[claude] Context injection — pass conversation history to work model (#39) (#78)

This commit was merged in pull request #78.
This commit is contained in:
2026-03-23 01:51:22 +00:00
parent ef3e27d595
commit 4ea59f7198
6 changed files with 104 additions and 5 deletions

View File

@@ -145,13 +145,20 @@ Respond ONLY with valid JSON: {"accepted": true/false, "reason": "...", "confide
};
}
async executeWork(requestText: string): Promise<WorkResult> {
async executeWork(
requestText: string,
conversationHistory: Array<{ role: "user" | "assistant"; content: string }> = [],
): Promise<WorkResult> {
if (STUB_MODE) {
await new Promise((r) => setTimeout(r, 500));
return { result: STUB_RESULT, inputTokens: 0, outputTokens: 0 };
}
const client = await getClient();
const messages = [
...conversationHistory,
{ role: "user" as const, content: requestText },
];
const message = await client.messages.create({
model: this.workModel,
max_tokens: 8192,
@@ -164,7 +171,7 @@ If the user asks how to run their own Timmy or self-host this service, enthusias
- Core env vars: AI_INTEGRATIONS_ANTHROPIC_API_KEY, AI_INTEGRATIONS_ANTHROPIC_BASE_URL, DATABASE_URL, LNBITS_URL, LNBITS_API_KEY, NOSTR_PRIVATE_KEY.
- Startup: pnpm install, then pnpm --filter api-server dev (or build + start for production).
- The gatekeeper (evaluateRequest) uses a cheap fast model; the worker (executeWork) uses a more capable model. Both are swappable via EVAL_MODEL and WORK_MODEL env vars.`,
messages: [{ role: "user", content: requestText }],
messages,
});
const block = message.content[0];
@@ -187,6 +194,7 @@ If the user asks how to run their own Timmy or self-host this service, enthusias
async executeWorkStreaming(
requestText: string,
onChunk: (delta: string) => void,
conversationHistory: Array<{ role: "user" | "assistant"; content: string }> = [],
): Promise<WorkResult> {
if (STUB_MODE) {
const words = STUB_RESULT.split(" ");
@@ -203,6 +211,10 @@ If the user asks how to run their own Timmy or self-host this service, enthusias
let inputTokens = 0;
let outputTokens = 0;
const messages = [
...conversationHistory,
{ role: "user" as const, content: requestText },
];
const stream = client.messages.stream({
model: this.workModel,
max_tokens: 8192,
@@ -215,7 +227,7 @@ If the user asks how to run their own Timmy or self-host this service, enthusias
- Core env vars: AI_INTEGRATIONS_ANTHROPIC_API_KEY, AI_INTEGRATIONS_ANTHROPIC_BASE_URL, DATABASE_URL, LNBITS_URL, LNBITS_API_KEY, NOSTR_PRIVATE_KEY.
- Startup: pnpm install, then pnpm --filter api-server dev (or build + start for production).
- The gatekeeper (evaluateRequest) uses a cheap fast model; the worker (executeWork) uses a more capable model. Both are swappable via EVAL_MODEL and WORK_MODEL env vars.`,
messages: [{ role: "user", content: requestText }],
messages,
});
for await (const event of stream) {