Files
timmy-tower/artifacts/mobile/app/(tabs)/index.tsx
2026-03-23 20:20:52 +00:00

475 lines
12 KiB
TypeScript

import * as Haptics from "expo-haptics";
import * as Speech from "expo-speech";
import Voice, { SpeechResultsEvent, SpeechErrorEvent } from "@react-native-voice/voice";
import { Ionicons } from "@expo/vector-icons";
import React, { useCallback, useEffect, useRef, useState } from "react";
import {
Animated,
Platform,
Pressable,
StyleSheet,
Text,
View,
} from "react-native";
import { useSafeAreaInsets } from "react-native-safe-area-context";
import { ConnectionBadge } from "@/components/ConnectionBadge";
import { JobSubmissionSheet } from "@/components/JobSubmissionSheet";
import { TimmyFace } from "@/components/TimmyFace";
import { Colors } from "@/constants/colors";
import { useTimmy } from "@/context/TimmyContext";
const C = Colors.dark;
const MOOD_LABELS: Record<string, string> = {
idle: "Contemplating the cosmos",
thinking: "Deep in thought",
working: "Casting spells",
speaking: "Sharing wisdom",
};
type WebSpeechRecognition = {
continuous: boolean;
interimResults: boolean;
lang: string;
onresult: ((e: WebSpeechResultEvent) => void) | null;
onerror: (() => void) | null;
onend: (() => void) | null;
start: () => void;
stop: () => void;
};
type WebSpeechResultEvent = {
results: WebSpeechResultList;
};
type WebSpeechResultList = {
length: number;
[index: number]: WebSpeechResult;
};
type WebSpeechResult = {
isFinal: boolean;
0: { transcript: string };
};
type SpeechRecognitionWindow = Window & {
SpeechRecognition?: new () => WebSpeechRecognition;
webkitSpeechRecognition?: new () => WebSpeechRecognition;
};
export default function FaceScreen() {
const { timmyMood, connectionStatus, sendVisitorMessage, recentEvents } =
useTimmy();
const insets = useSafeAreaInsets();
const [isListening, setIsListening] = useState(false);
const [transcript, setTranscript] = useState("");
const [lastReply, setLastReply] = useState("");
const [jobSheetVisible, setJobSheetVisible] = useState(false);
const micScale = useRef(new Animated.Value(1)).current;
const micPulseRef = useRef<Animated.CompositeAnimation | null>(null);
const webRecognitionRef = useRef<WebSpeechRecognition | null>(null);
const lastReplyIdRef = useRef<string>("");
const topPad = Platform.OS === "web" ? 67 : insets.top;
const bottomPad = Platform.OS === "web" ? 84 + 34 : insets.bottom + 84;
// Detect incoming Timmy replies and speak them
useEffect(() => {
const latestChat = recentEvents.find(
(e) => e.type === "chat" && (e.agentId === "timmy" || !e.agentId)
);
if (latestChat?.text && latestChat.id !== lastReplyIdRef.current) {
lastReplyIdRef.current = latestChat.id;
setLastReply(latestChat.text);
if (Platform.OS !== "web") {
Speech.speak(latestChat.text, {
onDone: () => {},
onError: () => {},
pitch: 0.9,
rate: 0.85,
});
}
}
}, [recentEvents]);
// Declare mic pulse helpers first so native voice useEffect can reference them
const startMicPulse = useCallback(() => {
micPulseRef.current?.stop();
const pulse = Animated.loop(
Animated.sequence([
Animated.timing(micScale, {
toValue: 1.2,
duration: 400,
useNativeDriver: true,
}),
Animated.timing(micScale, {
toValue: 0.95,
duration: 400,
useNativeDriver: true,
}),
])
);
micPulseRef.current = pulse;
pulse.start();
}, [micScale]);
const stopMicPulse = useCallback(() => {
micPulseRef.current?.stop();
micPulseRef.current = null;
Animated.spring(micScale, { toValue: 1, useNativeDriver: true }).start();
}, [micScale]);
// Native voice setup (after helpers are declared)
useEffect(() => {
if (Platform.OS === "web") return;
const onResults = (e: SpeechResultsEvent) => {
const text = e.value?.[0] ?? "";
setTranscript(text);
if (text) {
sendVisitorMessage(text);
}
setIsListening(false);
stopMicPulse();
setTimeout(() => setTranscript(""), 3000);
};
const onPartialResults = (e: SpeechResultsEvent) => {
setTranscript(e.value?.[0] ?? "");
};
const onError = (_e: SpeechErrorEvent) => {
setIsListening(false);
stopMicPulse();
};
Voice.onSpeechResults = onResults;
Voice.onSpeechPartialResults = onPartialResults;
Voice.onSpeechError = onError;
return () => {
Voice.destroy().catch(() => {});
};
}, [sendVisitorMessage, stopMicPulse]);
const startNativeVoice = useCallback(async () => {
try {
await Voice.start("en-US");
setIsListening(true);
startMicPulse();
Haptics.impactAsync(Haptics.ImpactFeedbackStyle.Medium);
} catch {
setTranscript("Voice recognition unavailable");
setTimeout(() => setTranscript(""), 2000);
}
}, [startMicPulse]);
const stopNativeVoice = useCallback(async () => {
try {
await Voice.stop();
} catch {}
setIsListening(false);
stopMicPulse();
}, [stopMicPulse]);
const startWebVoice = useCallback(() => {
const w = typeof window !== "undefined" ? (window as SpeechRecognitionWindow) : null;
const SpeechRecognitionCtor = w?.SpeechRecognition ?? w?.webkitSpeechRecognition;
if (!SpeechRecognitionCtor) {
setTranscript("Voice not supported in this browser");
setTimeout(() => setTranscript(""), 2500);
return;
}
const rec = new SpeechRecognitionCtor();
rec.continuous = false;
rec.interimResults = true;
rec.lang = "en-US";
rec.onresult = (e: WebSpeechResultEvent) => {
const parts: string[] = [];
for (let i = 0; i < e.results.length; i++) {
parts.push(e.results[i][0].transcript);
}
const t = parts.join("");
setTranscript(t);
if (e.results[e.results.length - 1].isFinal) {
sendVisitorMessage(t);
setIsListening(false);
stopMicPulse();
setTimeout(() => setTranscript(""), 3000);
}
};
rec.onerror = () => {
setIsListening(false);
stopMicPulse();
};
rec.onend = () => {
setIsListening(false);
stopMicPulse();
};
webRecognitionRef.current = rec;
rec.start();
setIsListening(true);
startMicPulse();
}, [sendVisitorMessage, startMicPulse, stopMicPulse]);
const stopWebVoice = useCallback(() => {
webRecognitionRef.current?.stop();
webRecognitionRef.current = null;
}, []);
const handleMicPress = useCallback(() => {
Haptics.impactAsync(Haptics.ImpactFeedbackStyle.Light);
if (Platform.OS === "web") {
if (isListening) stopWebVoice();
else startWebVoice();
} else {
if (isListening) stopNativeVoice();
else startNativeVoice();
}
}, [
isListening,
startWebVoice,
stopWebVoice,
startNativeVoice,
stopNativeVoice,
]);
return (
<View style={[styles.container, { paddingTop: topPad }]}>
{/* Header */}
<View style={styles.header}>
<View>
<Text style={styles.title}>Timmy</Text>
<Text style={styles.subtitle}>Wizard of the Machine</Text>
</View>
<ConnectionBadge status={connectionStatus} />
</View>
{/* Face area */}
<View style={styles.faceWrapper}>
<View style={styles.auraRing}>
<TimmyFace mood={timmyMood} size={200} />
</View>
<Text style={styles.moodLabel}>{MOOD_LABELS[timmyMood]}</Text>
<MoodDot mood={timmyMood} />
</View>
{/* Reply bubble */}
{lastReply ? (
<View style={styles.replyBubble}>
<Text style={styles.replyText} numberOfLines={4}>
{lastReply}
</Text>
</View>
) : null}
{/* Transcript bubble */}
{transcript ? (
<View style={styles.transcriptBubble}>
<Text style={styles.transcriptText}>{transcript}</Text>
</View>
) : null}
{/* Action buttons */}
<View style={[styles.micArea, { paddingBottom: bottomPad }]}>
<View style={styles.actionRow}>
<Pressable
onPress={handleMicPress}
accessibilityRole="button"
accessibilityLabel={isListening ? "Stop listening" : "Start voice"}
>
<Animated.View
style={[
styles.micButton,
isListening && styles.micButtonActive,
{ transform: [{ scale: micScale }] },
]}
>
<Ionicons
name={isListening ? "mic" : "mic-outline"}
size={32}
color={isListening ? "#fff" : C.textSecondary}
/>
</Animated.View>
</Pressable>
<Pressable
onPress={() => setJobSheetVisible(true)}
accessibilityRole="button"
accessibilityLabel="Submit paid job"
>
<View style={styles.jobButton}>
<Ionicons name="flash" size={26} color={C.jobStarted} />
</View>
</Pressable>
</View>
<Text style={styles.micHint}>
{isListening ? "Listening..." : "Tap mic to speak \u00B7 bolt to submit a job"}
</Text>
</View>
<JobSubmissionSheet
visible={jobSheetVisible}
onClose={() => setJobSheetVisible(false)}
/>
</View>
);
}
function MoodDot({ mood }: { mood: string }) {
const colors: Record<string, string> = {
idle: C.idle,
thinking: C.thinking,
working: C.working,
speaking: C.speaking,
};
return (
<View style={[styles.moodDot, { backgroundColor: colors[mood] ?? C.idle }]} />
);
}
const styles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: C.background,
},
header: {
flexDirection: "row",
justifyContent: "space-between",
alignItems: "flex-start",
paddingHorizontal: 24,
paddingTop: 12,
paddingBottom: 8,
},
title: {
fontSize: 28,
fontFamily: "Inter_700Bold",
color: C.text,
letterSpacing: -0.5,
},
subtitle: {
fontSize: 13,
fontFamily: "Inter_400Regular",
color: C.textSecondary,
marginTop: 2,
},
faceWrapper: {
flex: 1,
alignItems: "center",
justifyContent: "center",
gap: 16,
},
auraRing: {
alignItems: "center",
justifyContent: "center",
shadowColor: "#7C3AED",
shadowOffset: { width: 0, height: 0 },
shadowOpacity: 0.3,
shadowRadius: 40,
elevation: 0,
},
moodLabel: {
fontSize: 15,
fontFamily: "Inter_500Medium",
color: C.textSecondary,
letterSpacing: 0.2,
textAlign: "center",
},
moodDot: {
width: 8,
height: 8,
borderRadius: 4,
},
replyBubble: {
marginHorizontal: 20,
marginBottom: 12,
backgroundColor: C.surfaceElevated,
borderRadius: 16,
padding: 14,
borderWidth: 1,
borderColor: C.border,
borderTopLeftRadius: 4,
},
replyText: {
fontSize: 14,
fontFamily: "Inter_400Regular",
color: C.text,
lineHeight: 20,
},
transcriptBubble: {
marginHorizontal: 20,
marginBottom: 8,
backgroundColor: C.surface,
borderRadius: 16,
padding: 12,
borderWidth: 1,
borderColor: C.accent + "44",
alignSelf: "flex-end",
maxWidth: "80%",
borderBottomRightRadius: 4,
},
transcriptText: {
fontSize: 14,
fontFamily: "Inter_400Regular",
color: C.accentGlow,
lineHeight: 20,
},
micArea: {
alignItems: "center",
paddingTop: 16,
gap: 10,
},
actionRow: {
flexDirection: "row",
alignItems: "center",
gap: 20,
},
jobButton: {
width: 52,
height: 52,
borderRadius: 26,
backgroundColor: C.surface,
borderWidth: 1.5,
borderColor: C.jobStarted + "66",
alignItems: "center",
justifyContent: "center",
shadowColor: C.jobStarted,
shadowOffset: { width: 0, height: 2 },
shadowOpacity: 0.25,
shadowRadius: 6,
elevation: 4,
},
micButton: {
width: 72,
height: 72,
borderRadius: 36,
backgroundColor: C.surface,
borderWidth: 1.5,
borderColor: C.border,
alignItems: "center",
justifyContent: "center",
shadowColor: "#000",
shadowOffset: { width: 0, height: 4 },
shadowOpacity: 0.3,
shadowRadius: 8,
elevation: 6,
},
micButtonActive: {
backgroundColor: C.micActive,
borderColor: C.micActive,
shadowColor: C.micActive,
shadowOpacity: 0.5,
shadowRadius: 16,
},
micHint: {
fontSize: 12,
fontFamily: "Inter_400Regular",
color: C.textMuted,
},
});