feat(task-20): Timmy responds to Workshop input bar with AI

## Task
Task #20: Timmy responds to Workshop input bar — make the "Say something
to Timmy…" input bar actually trigger an AI response shown in Timmy's
speech bubble.

## What was built

### Server (artifacts/api-server/src/lib/agent.ts)
- Added `chatReply(userText)` method to AgentService
- Uses claude-haiku (cheaper eval model) with a wizard persona system prompt
- 150-token limit so replies fit in the speech bubble
- Stub mode: returns one of 4 wizard-themed canned replies after 400ms delay
- Real mode: calls Anthropic with wizard persona, truncates to 250 chars

### Server (artifacts/api-server/src/routes/events.ts)
- Imported agentService
- Added per-visitor rate limit system: 3 replies/minute per visitorId (in-memory Map)
- Added broadcastToAll() helper for broadcasting to all WS clients
- Updated visitor_message handler:
  1. Broadcasts visitor message to all watchers as before
  2. Checks rate limit — if exceeded, sends polite "I need a moment…" reply
  3. Fire-and-forget async AI call:
     - Broadcasts agent_state: gamma=working (crystal ball pulses)
     - Calls agentService.chatReply()
     - Broadcasts agent_state: gamma=idle
     - Broadcasts chat: agentId=timmy, text=reply to ALL clients
     - Logs world event "visitor:reply"

### Frontend (the-matrix/js/websocket.js)
- Updated case 'chat' handler to differentiate message sources:
  - agentId === 'timmy': speech bubble + event log entry "Timmy: <text>"
  - agentId === 'visitor': event log only (don't hijack speech bubble)
  - everything else (delta/alpha/beta payment notifications): speech bubble

## What was already working (no change needed)
- Enter key on input bar (ui.js already had keydown listener)
- Input clearing after send (already in ui.js)
- Speech bubble rendering (setSpeechBubble already existed in agents.js)
- WebSocket sendVisitorMessage already exported from websocket.js

## Tests
- 27/27 testkit PASS (no regressions)
- TypeScript: 0 errors
- Vite build: clean (the-matrix rebuilt)
This commit is contained in:
alexpaynex
2026-03-19 02:52:49 +00:00
parent 4dd5937028
commit 71dbbd3f37
3 changed files with 107 additions and 5 deletions

View File

@@ -43,6 +43,13 @@ const STUB_RESULT =
"Stub response: Timmy is running in stub mode (no Anthropic API key). " +
"Configure AI_INTEGRATIONS_ANTHROPIC_API_KEY to enable real AI responses.";
const STUB_CHAT_REPLIES = [
"Ah, a visitor! *adjusts hat* The crystal ball sensed your presence. What do you seek?",
"By the ancient runes! In stub mode I cannot reach the stars, but my wisdom remains. Ask away!",
"The crystal ball glows with your curiosity… configure a Lightning node to unlock true magic!",
"Welcome to my workshop, traveler. I am Timmy — wizard, agent, and keeper of lightning sats.",
];
// ── Lazy client ───────────────────────────────────────────────────────────────
// Minimal local interface — avoids importing @anthropic-ai/sdk types directly.
// Dynamic import avoids the module-level throw in the integrations client when
@@ -199,6 +206,30 @@ Fulfill it thoroughly and helpfully. Be concise yet complete.`,
return { result: fullText, inputTokens, outputTokens };
}
/**
* Quick free chat reply — called for visitor messages in the Workshop.
* Uses the cheaper eval model with a wizard persona and a 150-token limit
* so replies are short enough to fit in Timmy's speech bubble.
*/
async chatReply(userText: string): Promise<string> {
if (STUB_MODE) {
await new Promise((r) => setTimeout(r, 400));
return STUB_CHAT_REPLIES[Math.floor(Math.random() * STUB_CHAT_REPLIES.length)]!;
}
const client = await getClient();
const message = await client.messages.create({
model: this.evalModel, // Haiku — cheap and fast for free replies
max_tokens: 150,
system: `You are Timmy, a whimsical wizard who runs a mystical workshop powered by Bitcoin Lightning. Reply to visitors in 1-2 short, punchy sentences. Be helpful, witty, and weave in light wizard or Lightning Network metaphors. Keep replies under 200 characters.`,
messages: [{ role: "user", content: userText }],
});
const block = message.content[0];
if (block.type !== "text") return "The crystal ball is cloudy… try again.";
return block.text!.slice(0, 250).trim();
}
}
export const agentService = new AgentService();

View File

@@ -31,12 +31,43 @@ import type { Server } from "http";
import { eventBus, type BusEvent } from "../lib/event-bus.js";
import { makeLogger } from "../lib/logger.js";
import { getWorldState, setAgentStateInWorld } from "../lib/world-state.js";
import { agentService } from "../lib/agent.js";
import { db, worldEvents } from "@workspace/db";
const logger = makeLogger("ws-events");
const PING_INTERVAL_MS = 30_000;
// ── Per-visitor rate limit (3 replies/minute) ─────────────────────────────────
const CHAT_RATE_LIMIT = 3;
const CHAT_RATE_WINDOW_MS = 60_000;
interface RateLimitEntry {
count: number;
resetAt: number;
}
const visitorRateLimits = new Map<string, RateLimitEntry>();
function checkChatRateLimit(visitorId: string): boolean {
const now = Date.now();
const entry = visitorRateLimits.get(visitorId);
if (!entry || now > entry.resetAt) {
visitorRateLimits.set(visitorId, { count: 1, resetAt: now + CHAT_RATE_WINDOW_MS });
return true;
}
if (entry.count >= CHAT_RATE_LIMIT) return false;
entry.count++;
return true;
}
function broadcastToAll(wss: WebSocketServer, payload: object): void {
const str = JSON.stringify(payload);
wss.clients.forEach((c) => {
if (c.readyState === 1) c.send(str);
});
}
function updateAgentWorld(agentId: string, state: string): void {
try {
setAgentStateInWorld(agentId, state);
@@ -262,11 +293,41 @@ export function attachWebSocketServer(server: Server): void {
}
if (msg.type === "visitor_message" && msg.text) {
const text = String(msg.text).slice(0, 500);
wss.clients.forEach(c => {
if (c.readyState === 1) {
c.send(JSON.stringify({ type: "chat", agentId: "visitor", text }));
// Broadcast visitor message to all watchers
broadcastToAll(wss, { type: "chat", agentId: "visitor", text });
// Rate-limit Timmy's AI replies per visitor
const visId = String(msg.visitorId ?? ip);
if (!checkChatRateLimit(visId)) {
send(socket, {
type: "chat",
agentId: "timmy",
text: "I need a moment to gather my thoughts… try again shortly.",
});
return;
}
// Fire-and-forget AI reply
void (async () => {
try {
// Signal that Timmy is thinking
broadcastToAll(wss, { type: "agent_state", agentId: "gamma", state: "working" });
updateAgentWorld("gamma", "working");
const reply = await agentService.chatReply(text);
broadcastToAll(wss, { type: "agent_state", agentId: "gamma", state: "idle" });
updateAgentWorld("gamma", "idle");
broadcastToAll(wss, { type: "chat", agentId: "timmy", text: reply });
void logWorldEvent("visitor:reply", reply.slice(0, 100), "timmy");
} catch (err) {
broadcastToAll(wss, { type: "agent_state", agentId: "gamma", state: "idle" });
updateAgentWorld("gamma", "idle");
logger.warn("chatReply failed", { err: String(err) });
}
});
})();
}
} catch {
/* ignore malformed messages */

View File

@@ -98,7 +98,17 @@ function handleMessage(msg) {
}
case 'chat': {
if (msg.text) setSpeechBubble(msg.text);
if (msg.agentId === 'timmy') {
// Timmy's AI reply: show in speech bubble + event log
if (msg.text) setSpeechBubble(msg.text);
appendSystemMessage('Timmy: ' + (msg.text || '').slice(0, 80));
} else if (msg.agentId === 'visitor') {
// Another visitor's message: event log only (don't hijack the speech bubble)
appendSystemMessage((msg.text || '').slice(0, 80));
} else {
// System agent messages (delta payment confirmations, etc.): speech bubble
if (msg.text) setSpeechBubble(msg.text);
}
break;
}