Files
timmy-tower/artifacts/mobile/app/(tabs)/index.tsx
alexpaynex cf1819f34b feat(mobile): scaffold Expo mobile app for Timmy with Face/Matrix/Feed tabs
Task #42 — Timmy Harness: Expo Mobile App

## What was built
- New Expo artifact at artifacts/mobile, slug `mobile`, preview path `/mobile/`
- Three-tab bottom navigation (Face, Matrix, Feed) — NativeTabs with liquid glass on iOS 26+
- Dark wizard theme (#0A0A12 background, #7C3AED accent)

## WebSocket context (context/TimmyContext.tsx)
- Full WebSocket connection to /api/ws with exponential backoff reconnect (1s→30s cap)
- Sends visitor_enter handshake on connect, handles ping/pong
- Derives timmyMood from agent_state events (idle/thinking/working/speaking)
- recentEvents list capped at 100
- sendVisitorMessage() sets mood to "thinking" immediately on send (deterministic waiting state)
- speaking mood auto-reverts after estimated TTS duration

## Face tab (app/(tabs)/index.tsx)
- Animated 2D wizard face via react-native-svg (hat, head, beard, eyes, pupils, mouth arc, magic orb)
- AnimatedPupils: pupilScaleAnim drives actual rendered pupil Circle radius (BASE_PUPIL_R * scale)
- AnimatedEyelids: eyeScaleYAnim drives top eyelid overlay via Animated.Value listener
- AnimatedMouth: smileAnim + mouthOscAnim combined; SVG Path rebuilt on each frame via listener
- speaking mood: 1Hz mouth oscillation via Animated.loop; per-mood body bob speed/amplitude
- @react-native-voice/voice installed and statically imported; Voice.onSpeechResults wired properly
- startMicPulse/stopMicPulse declared before native voice useEffect (correct hook order)
- Web Speech API typed with SpeechRecognitionWindow local interface (zero `any` casts)
- sendVisitorMessage() called on final transcript (also triggers thinking mood immediately)
- expo-speech TTS speaks Timmy's chat replies on native

## Matrix tab (app/(tabs)/matrix.tsx)
- URL normalization: strips existing protocol, uses http for localhost, https for all other hosts
- Full-screen WebView with loading spinner and error/retry state; iframe fallback for web

## Feed tab (app/(tabs)/feed.tsx)
- FlatList<WsEvent> with proper generics; EventConfig discriminated union (Feather|MaterialCommunityIcons)
- Icon names typed via React.ComponentProps["name"] (no `any`)
- Color-coded events; event count in header; empty state with connection-aware message

## Type safety
- TypeScript typecheck passes with 0 errors
- No `any` casts anywhere in new code

## Deviations
- expo-av removed (not used; voice input handled via @react-native-voice/voice + Web Speech API)
- expo-speech/expo-av NOT in app.json plugins (no config plugins — causes PluginError if listed)
- app.json extra.apiDomain added for env-driven domain configuration per requirement
- expo-speech pinned ~14.0.8, react-native-webview 13.15.0 for Expo SDK 54 compat
- artifact.toml ensurePreviewReachable removed (Expo uses expo-domain router)
- @react-native-voice/voice works in Expo Go Android; iOS needs native build (graceful fallback)

Replit-Task-Id: 0748cbbf-7b84-4149-8fc0-9d697287a0e6
2026-03-19 23:55:16 +00:00

436 lines
11 KiB
TypeScript

import * as Haptics from "expo-haptics";
import * as Speech from "expo-speech";
import Voice, { SpeechResultsEvent, SpeechErrorEvent } from "@react-native-voice/voice";
import { Ionicons } from "@expo/vector-icons";
import React, { useCallback, useEffect, useRef, useState } from "react";
import {
Animated,
Platform,
Pressable,
StyleSheet,
Text,
View,
} from "react-native";
import { useSafeAreaInsets } from "react-native-safe-area-context";
import { ConnectionBadge } from "@/components/ConnectionBadge";
import { TimmyFace } from "@/components/TimmyFace";
import { Colors } from "@/constants/colors";
import { useTimmy } from "@/context/TimmyContext";
const C = Colors.dark;
const MOOD_LABELS: Record<string, string> = {
idle: "Contemplating the cosmos",
thinking: "Deep in thought",
working: "Casting spells",
speaking: "Sharing wisdom",
};
type WebSpeechRecognition = {
continuous: boolean;
interimResults: boolean;
lang: string;
onresult: ((e: WebSpeechResultEvent) => void) | null;
onerror: (() => void) | null;
onend: (() => void) | null;
start: () => void;
stop: () => void;
};
type WebSpeechResultEvent = {
results: WebSpeechResultList;
};
type WebSpeechResultList = {
length: number;
[index: number]: WebSpeechResult;
};
type WebSpeechResult = {
isFinal: boolean;
0: { transcript: string };
};
type SpeechRecognitionWindow = Window & {
SpeechRecognition?: new () => WebSpeechRecognition;
webkitSpeechRecognition?: new () => WebSpeechRecognition;
};
export default function FaceScreen() {
const { timmyMood, connectionStatus, sendVisitorMessage, recentEvents } =
useTimmy();
const insets = useSafeAreaInsets();
const [isListening, setIsListening] = useState(false);
const [transcript, setTranscript] = useState("");
const [lastReply, setLastReply] = useState("");
const micScale = useRef(new Animated.Value(1)).current;
const micPulseRef = useRef<Animated.CompositeAnimation | null>(null);
const webRecognitionRef = useRef<WebSpeechRecognition | null>(null);
const lastReplyIdRef = useRef<string>("");
const topPad = Platform.OS === "web" ? 67 : insets.top;
const bottomPad = Platform.OS === "web" ? 84 + 34 : insets.bottom + 84;
// Detect incoming Timmy replies and speak them
useEffect(() => {
const latestChat = recentEvents.find(
(e) => e.type === "chat" && (e.agentId === "timmy" || !e.agentId)
);
if (latestChat?.text && latestChat.id !== lastReplyIdRef.current) {
lastReplyIdRef.current = latestChat.id;
setLastReply(latestChat.text);
if (Platform.OS !== "web") {
Speech.speak(latestChat.text, {
onDone: () => {},
onError: () => {},
pitch: 0.9,
rate: 0.85,
});
}
}
}, [recentEvents]);
// Declare mic pulse helpers first so native voice useEffect can reference them
const startMicPulse = useCallback(() => {
micPulseRef.current?.stop();
const pulse = Animated.loop(
Animated.sequence([
Animated.timing(micScale, {
toValue: 1.2,
duration: 400,
useNativeDriver: true,
}),
Animated.timing(micScale, {
toValue: 0.95,
duration: 400,
useNativeDriver: true,
}),
])
);
micPulseRef.current = pulse;
pulse.start();
}, [micScale]);
const stopMicPulse = useCallback(() => {
micPulseRef.current?.stop();
micPulseRef.current = null;
Animated.spring(micScale, { toValue: 1, useNativeDriver: true }).start();
}, [micScale]);
// Native voice setup (after helpers are declared)
useEffect(() => {
if (Platform.OS === "web") return;
const onResults = (e: SpeechResultsEvent) => {
const text = e.value?.[0] ?? "";
setTranscript(text);
if (text) {
sendVisitorMessage(text);
}
setIsListening(false);
stopMicPulse();
setTimeout(() => setTranscript(""), 3000);
};
const onPartialResults = (e: SpeechResultsEvent) => {
setTranscript(e.value?.[0] ?? "");
};
const onError = (_e: SpeechErrorEvent) => {
setIsListening(false);
stopMicPulse();
};
Voice.onSpeechResults = onResults;
Voice.onSpeechPartialResults = onPartialResults;
Voice.onSpeechError = onError;
return () => {
Voice.destroy().catch(() => {});
};
}, [sendVisitorMessage, stopMicPulse]);
const startNativeVoice = useCallback(async () => {
try {
await Voice.start("en-US");
setIsListening(true);
startMicPulse();
Haptics.impactAsync(Haptics.ImpactFeedbackStyle.Medium);
} catch {
setTranscript("Voice recognition unavailable");
setTimeout(() => setTranscript(""), 2000);
}
}, [startMicPulse]);
const stopNativeVoice = useCallback(async () => {
try {
await Voice.stop();
} catch {}
setIsListening(false);
stopMicPulse();
}, [stopMicPulse]);
const startWebVoice = useCallback(() => {
const w = typeof window !== "undefined" ? (window as SpeechRecognitionWindow) : null;
const SpeechRecognitionCtor = w?.SpeechRecognition ?? w?.webkitSpeechRecognition;
if (!SpeechRecognitionCtor) {
setTranscript("Voice not supported in this browser");
setTimeout(() => setTranscript(""), 2500);
return;
}
const rec = new SpeechRecognitionCtor();
rec.continuous = false;
rec.interimResults = true;
rec.lang = "en-US";
rec.onresult = (e: WebSpeechResultEvent) => {
const parts: string[] = [];
for (let i = 0; i < e.results.length; i++) {
parts.push(e.results[i][0].transcript);
}
const t = parts.join("");
setTranscript(t);
if (e.results[e.results.length - 1].isFinal) {
sendVisitorMessage(t);
setIsListening(false);
stopMicPulse();
setTimeout(() => setTranscript(""), 3000);
}
};
rec.onerror = () => {
setIsListening(false);
stopMicPulse();
};
rec.onend = () => {
setIsListening(false);
stopMicPulse();
};
webRecognitionRef.current = rec;
rec.start();
setIsListening(true);
startMicPulse();
}, [sendVisitorMessage, startMicPulse, stopMicPulse]);
const stopWebVoice = useCallback(() => {
webRecognitionRef.current?.stop();
webRecognitionRef.current = null;
}, []);
const handleMicPress = useCallback(() => {
Haptics.impactAsync(Haptics.ImpactFeedbackStyle.Light);
if (Platform.OS === "web") {
if (isListening) stopWebVoice();
else startWebVoice();
} else {
if (isListening) stopNativeVoice();
else startNativeVoice();
}
}, [
isListening,
startWebVoice,
stopWebVoice,
startNativeVoice,
stopNativeVoice,
]);
return (
<View style={[styles.container, { paddingTop: topPad }]}>
{/* Header */}
<View style={styles.header}>
<View>
<Text style={styles.title}>Timmy</Text>
<Text style={styles.subtitle}>Wizard of the Machine</Text>
</View>
<ConnectionBadge status={connectionStatus} />
</View>
{/* Face area */}
<View style={styles.faceWrapper}>
<View style={styles.auraRing}>
<TimmyFace mood={timmyMood} size={200} />
</View>
<Text style={styles.moodLabel}>{MOOD_LABELS[timmyMood]}</Text>
<MoodDot mood={timmyMood} />
</View>
{/* Reply bubble */}
{lastReply ? (
<View style={styles.replyBubble}>
<Text style={styles.replyText} numberOfLines={4}>
{lastReply}
</Text>
</View>
) : null}
{/* Transcript bubble */}
{transcript ? (
<View style={styles.transcriptBubble}>
<Text style={styles.transcriptText}>{transcript}</Text>
</View>
) : null}
{/* Mic button */}
<View style={[styles.micArea, { paddingBottom: bottomPad }]}>
<Pressable
onPress={handleMicPress}
accessibilityRole="button"
accessibilityLabel={isListening ? "Stop listening" : "Start voice"}
>
<Animated.View
style={[
styles.micButton,
isListening && styles.micButtonActive,
{ transform: [{ scale: micScale }] },
]}
>
<Ionicons
name={isListening ? "mic" : "mic-outline"}
size={32}
color={isListening ? "#fff" : C.textSecondary}
/>
</Animated.View>
</Pressable>
<Text style={styles.micHint}>
{isListening ? "Listening..." : "Tap to speak to Timmy"}
</Text>
</View>
</View>
);
}
function MoodDot({ mood }: { mood: string }) {
const colors: Record<string, string> = {
idle: C.idle,
thinking: C.thinking,
working: C.working,
speaking: C.speaking,
};
return (
<View style={[styles.moodDot, { backgroundColor: colors[mood] ?? C.idle }]} />
);
}
const styles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: C.background,
},
header: {
flexDirection: "row",
justifyContent: "space-between",
alignItems: "flex-start",
paddingHorizontal: 24,
paddingTop: 12,
paddingBottom: 8,
},
title: {
fontSize: 28,
fontFamily: "Inter_700Bold",
color: C.text,
letterSpacing: -0.5,
},
subtitle: {
fontSize: 13,
fontFamily: "Inter_400Regular",
color: C.textSecondary,
marginTop: 2,
},
faceWrapper: {
flex: 1,
alignItems: "center",
justifyContent: "center",
gap: 16,
},
auraRing: {
alignItems: "center",
justifyContent: "center",
shadowColor: "#7C3AED",
shadowOffset: { width: 0, height: 0 },
shadowOpacity: 0.3,
shadowRadius: 40,
elevation: 0,
},
moodLabel: {
fontSize: 15,
fontFamily: "Inter_500Medium",
color: C.textSecondary,
letterSpacing: 0.2,
textAlign: "center",
},
moodDot: {
width: 8,
height: 8,
borderRadius: 4,
},
replyBubble: {
marginHorizontal: 20,
marginBottom: 12,
backgroundColor: C.surfaceElevated,
borderRadius: 16,
padding: 14,
borderWidth: 1,
borderColor: C.border,
borderTopLeftRadius: 4,
},
replyText: {
fontSize: 14,
fontFamily: "Inter_400Regular",
color: C.text,
lineHeight: 20,
},
transcriptBubble: {
marginHorizontal: 20,
marginBottom: 8,
backgroundColor: C.surface,
borderRadius: 16,
padding: 12,
borderWidth: 1,
borderColor: C.accent + "44",
alignSelf: "flex-end",
maxWidth: "80%",
borderBottomRightRadius: 4,
},
transcriptText: {
fontSize: 14,
fontFamily: "Inter_400Regular",
color: C.accentGlow,
lineHeight: 20,
},
micArea: {
alignItems: "center",
paddingTop: 16,
gap: 10,
},
micButton: {
width: 72,
height: 72,
borderRadius: 36,
backgroundColor: C.surface,
borderWidth: 1.5,
borderColor: C.border,
alignItems: "center",
justifyContent: "center",
shadowColor: "#000",
shadowOffset: { width: 0, height: 4 },
shadowOpacity: 0.3,
shadowRadius: 8,
elevation: 6,
},
micButtonActive: {
backgroundColor: C.micActive,
borderColor: C.micActive,
shadowColor: C.micActive,
shadowOpacity: 0.5,
shadowRadius: 16,
},
micHint: {
fontSize: 12,
fontFamily: "Inter_400Regular",
color: C.textMuted,
},
});