- btc-oracle.ts: CoinGecko BTC/USD fetch (60s cache), usdToSats() helper, fallback to BTC_PRICE_USD_FALLBACK env var (default $100k), 5s abort timeout - pricing.ts: Full rewrite — per-model token rates (Haiku/Sonnet, env-var overridable), DO infra amortisation, originator margin %, estimateInputTokens(), estimateOutputTokens() by request tier, calculateActualCostUsd() for post-work ledger, async calculateWorkFeeSats() → WorkFeeBreakdown - agent.ts: WorkResult now includes inputTokens + outputTokens from Anthropic usage; workModel/evalModel exposed as readonly public; EVAL_MODEL/WORK_MODEL env var support - jobs.ts: Work invoice creation calls pricingService.calculateWorkFeeSats() async; stores estimatedCostUsd/marginPct/btcPriceUsd on job; after executeWork stores actualInputTokens/actualOutputTokens/actualCostUsd; GET response includes pricingBreakdown (awaiting_work_payment) and costLedger (complete) - lib/db/src/schema/jobs.ts: 6 new real/integer columns for cost tracking; schema pushed - openapi.yaml: PricingBreakdown + CostLedger schemas added to JobStatusResponse - replit.md: 17 new env vars documented in Cost-based work fee pricing section
78 lines
2.5 KiB
TypeScript
78 lines
2.5 KiB
TypeScript
import { anthropic } from "@workspace/integrations-anthropic-ai";
|
|
|
|
export interface EvalResult {
|
|
accepted: boolean;
|
|
reason: string;
|
|
}
|
|
|
|
export interface WorkResult {
|
|
result: string;
|
|
inputTokens: number;
|
|
outputTokens: number;
|
|
}
|
|
|
|
export interface AgentConfig {
|
|
evalModel?: string;
|
|
workModel?: string;
|
|
}
|
|
|
|
export class AgentService {
|
|
readonly evalModel: string;
|
|
readonly workModel: string;
|
|
|
|
constructor(config?: AgentConfig) {
|
|
this.evalModel = config?.evalModel ?? process.env.EVAL_MODEL ?? "claude-haiku-4-5";
|
|
this.workModel = config?.workModel ?? process.env.WORK_MODEL ?? "claude-sonnet-4-6";
|
|
}
|
|
|
|
async evaluateRequest(requestText: string): Promise<EvalResult> {
|
|
const message = await anthropic.messages.create({
|
|
model: this.evalModel,
|
|
max_tokens: 8192,
|
|
system: `You are Timmy, an AI agent gatekeeper. Evaluate whether a request is acceptable to act on.
|
|
ACCEPT if the request is: clear enough to act on, ethical, lawful, and within the capability of a general-purpose AI.
|
|
REJECT if the request is: harmful, illegal, unethical, incoherent, or spam.
|
|
Respond ONLY with valid JSON: {"accepted": true, "reason": "..."} or {"accepted": false, "reason": "..."}`,
|
|
messages: [{ role: "user", content: `Evaluate this request: ${requestText}` }],
|
|
});
|
|
|
|
const block = message.content[0];
|
|
if (block.type !== "text") {
|
|
throw new Error("Unexpected non-text response from eval model");
|
|
}
|
|
|
|
let parsed: { accepted: boolean; reason: string };
|
|
try {
|
|
const raw = block.text.replace(/^```(?:json)?\s*/i, "").replace(/\s*```$/, "").trim();
|
|
parsed = JSON.parse(raw) as { accepted: boolean; reason: string };
|
|
} catch {
|
|
throw new Error(`Failed to parse eval JSON: ${block.text}`);
|
|
}
|
|
|
|
return { accepted: Boolean(parsed.accepted), reason: parsed.reason ?? "" };
|
|
}
|
|
|
|
async executeWork(requestText: string): Promise<WorkResult> {
|
|
const message = await anthropic.messages.create({
|
|
model: this.workModel,
|
|
max_tokens: 8192,
|
|
system: `You are Timmy, a capable AI agent. A user has paid for you to handle their request.
|
|
Fulfill it thoroughly and helpfully. Be concise yet complete.`,
|
|
messages: [{ role: "user", content: requestText }],
|
|
});
|
|
|
|
const block = message.content[0];
|
|
if (block.type !== "text") {
|
|
throw new Error("Unexpected non-text response from work model");
|
|
}
|
|
|
|
return {
|
|
result: block.text,
|
|
inputTokens: message.usage.input_tokens,
|
|
outputTokens: message.usage.output_tokens,
|
|
};
|
|
}
|
|
}
|
|
|
|
export const agentService = new AgentService();
|