diff --git a/artifacts/api-server/package.json b/artifacts/api-server/package.json index 5e5e90d..1521dcd 100644 --- a/artifacts/api-server/package.json +++ b/artifacts/api-server/package.json @@ -11,6 +11,7 @@ "dependencies": { "@workspace/db": "workspace:*", "@workspace/api-zod": "workspace:*", + "@workspace/integrations-anthropic-ai": "workspace:*", "drizzle-orm": "catalog:", "express": "^5", "cookie-parser": "^1.4.7", diff --git a/artifacts/api-server/src/lib/agent.ts b/artifacts/api-server/src/lib/agent.ts new file mode 100644 index 0000000..2757d94 --- /dev/null +++ b/artifacts/api-server/src/lib/agent.ts @@ -0,0 +1,71 @@ +import { anthropic } from "@workspace/integrations-anthropic-ai"; + +const EVAL_MODEL = "claude-haiku-4-5"; +const WORK_MODEL = "claude-sonnet-4-6"; + +export interface EvalResult { + approved: boolean; + reason: string; +} + +export async function evaluateRequest(request: string): Promise { + const message = await anthropic.messages.create({ + model: EVAL_MODEL, + max_tokens: 8192, + system: `You are Timmy, an AI agent gatekeeper. Your job is to evaluate user requests. +A request should be APPROVED if it is: +- Clear and specific enough to act on +- Ethical, lawful, and not harmful +- Within the capabilities of a general-purpose AI assistant + +A request should be REJECTED if it is: +- Harmful, illegal, or unethical +- Completely incoherent or impossible to act on +- Spam or an attempt to abuse the system + +Respond ONLY with valid JSON in this exact format: +{"approved": true, "reason": "Brief explanation"} +or +{"approved": false, "reason": "Brief explanation of why it was rejected"}`, + messages: [ + { + role: "user", + content: `Evaluate this request: ${request}`, + }, + ], + }); + + const block = message.content[0]; + if (block.type !== "text") { + throw new Error("Unexpected response type from eval model"); + } + + try { + const parsed = JSON.parse(block.text) as { approved: boolean; reason: string }; + return { approved: Boolean(parsed.approved), reason: parsed.reason ?? "" }; + } catch { + throw new Error(`Failed to parse eval response: ${block.text}`); + } +} + +export async function executeRequest(request: string): Promise { + const message = await anthropic.messages.create({ + model: WORK_MODEL, + max_tokens: 8192, + system: `You are Timmy, a capable AI agent. A user has paid for you to handle their request. +Do your best to fulfill it thoroughly and helpfully. Be concise yet complete.`, + messages: [ + { + role: "user", + content: request, + }, + ], + }); + + const block = message.content[0]; + if (block.type !== "text") { + throw new Error("Unexpected response type from work model"); + } + + return block.text; +} diff --git a/artifacts/api-server/src/lib/lnbits.ts b/artifacts/api-server/src/lib/lnbits.ts new file mode 100644 index 0000000..af4c570 --- /dev/null +++ b/artifacts/api-server/src/lib/lnbits.ts @@ -0,0 +1,87 @@ +const LNBITS_URL = process.env.LNBITS_URL; +const LNBITS_API_KEY = process.env.LNBITS_API_KEY; + +function getBaseUrl(): string { + if (!LNBITS_URL) { + throw new Error("LNBITS_URL environment variable is not set"); + } + return LNBITS_URL.replace(/\/$/, ""); +} + +function getHeaders(): Record { + if (!LNBITS_API_KEY) { + throw new Error("LNBITS_API_KEY environment variable is not set"); + } + return { + "Content-Type": "application/json", + "X-Api-Key": LNBITS_API_KEY, + }; +} + +export interface LNbitsInvoice { + paymentHash: string; + paymentRequest: string; +} + +export interface LNbitsInvoiceStatus { + paid: boolean; + paidAt?: Date; +} + +export async function createInvoice( + amountSats: number, + memo: string, +): Promise { + const baseUrl = getBaseUrl(); + const response = await fetch(`${baseUrl}/api/v1/payments`, { + method: "POST", + headers: getHeaders(), + body: JSON.stringify({ + out: false, + amount: amountSats, + memo, + }), + }); + + if (!response.ok) { + const body = await response.text(); + throw new Error(`LNbits createInvoice failed (${response.status}): ${body}`); + } + + const data = (await response.json()) as { + payment_hash: string; + payment_request: string; + }; + + return { + paymentHash: data.payment_hash, + paymentRequest: data.payment_request, + }; +} + +export async function checkInvoicePaid( + paymentHash: string, +): Promise { + const baseUrl = getBaseUrl(); + const response = await fetch(`${baseUrl}/api/v1/payments/${paymentHash}`, { + method: "GET", + headers: getHeaders(), + }); + + if (!response.ok) { + const body = await response.text(); + throw new Error(`LNbits checkInvoice failed (${response.status}): ${body}`); + } + + const data = (await response.json()) as { + paid: boolean; + details?: { time?: number }; + }; + + return { + paid: data.paid, + paidAt: data.paid && data.details?.time + ? new Date(data.details.time * 1000) + : undefined, + }; +} diff --git a/artifacts/api-server/src/lib/pricing.ts b/artifacts/api-server/src/lib/pricing.ts new file mode 100644 index 0000000..cf7c5ef --- /dev/null +++ b/artifacts/api-server/src/lib/pricing.ts @@ -0,0 +1,8 @@ +export const EVAL_FEE_SATS = 10; + +export function computeWorkFeeSats(request: string): number { + const len = request.trim().length; + if (len <= 100) return 50; + if (len <= 300) return 100; + return 250; +} diff --git a/artifacts/api-server/tsconfig.json b/artifacts/api-server/tsconfig.json index b60e718..8db0f1a 100644 --- a/artifacts/api-server/tsconfig.json +++ b/artifacts/api-server/tsconfig.json @@ -12,6 +12,9 @@ }, { "path": "../../lib/api-zod" + }, + { + "path": "../../lib/integrations-anthropic-ai" } ] } diff --git a/lib/db/src/schema/conversations.ts b/lib/db/src/schema/conversations.ts new file mode 100644 index 0000000..991a823 --- /dev/null +++ b/lib/db/src/schema/conversations.ts @@ -0,0 +1,17 @@ +import { pgTable, serial, text, timestamp } from "drizzle-orm/pg-core"; +import { createInsertSchema } from "drizzle-zod"; +import { z } from "zod/v4"; + +export const conversations = pgTable("conversations", { + id: serial("id").primaryKey(), + title: text("title").notNull(), + createdAt: timestamp("created_at", { withTimezone: true }).defaultNow().notNull(), +}); + +export const insertConversationSchema = createInsertSchema(conversations).omit({ + id: true, + createdAt: true, +}); + +export type Conversation = typeof conversations.$inferSelect; +export type InsertConversation = z.infer; diff --git a/lib/db/src/schema/index.ts b/lib/db/src/schema/index.ts index 3c00e79..73acb67 100644 --- a/lib/db/src/schema/index.ts +++ b/lib/db/src/schema/index.ts @@ -1,20 +1,4 @@ -// Export your models here. Add one export per file -// export * from "./posts"; -// -// Each model/table should ideally be split into different files. -// Each model/table should define a Drizzle table, insert schema, and types: -// -// import { pgTable, text, serial } from "drizzle-orm/pg-core"; -// import { createInsertSchema } from "drizzle-zod"; -// import { z } from "zod/v4"; -// -// export const postsTable = pgTable("posts", { -// id: serial("id").primaryKey(), -// title: text("title").notNull(), -// }); -// -// export const insertPostSchema = createInsertSchema(postsTable).omit({ id: true }); -// export type InsertPost = z.infer; -// export type Post = typeof postsTable.$inferSelect; - -export {} \ No newline at end of file +export * from "./jobs"; +export * from "./invoices"; +export * from "./conversations"; +export * from "./messages"; diff --git a/lib/db/src/schema/invoices.ts b/lib/db/src/schema/invoices.ts new file mode 100644 index 0000000..187848b --- /dev/null +++ b/lib/db/src/schema/invoices.ts @@ -0,0 +1,26 @@ +import { pgTable, text, integer, boolean, timestamp } from "drizzle-orm/pg-core"; +import { createInsertSchema } from "drizzle-zod"; +import { z } from "zod/v4"; + +export const INVOICE_TYPES = ["eval", "work"] as const; +export type InvoiceType = (typeof INVOICE_TYPES)[number]; + +export const invoices = pgTable("invoices", { + id: text("id").primaryKey(), + jobId: text("job_id").notNull(), + paymentHash: text("payment_hash").notNull().unique(), + paymentRequest: text("payment_request").notNull(), + amountSats: integer("amount_sats").notNull(), + type: text("type").$type().notNull(), + paid: boolean("paid").notNull().default(false), + createdAt: timestamp("created_at", { withTimezone: true }).defaultNow().notNull(), + paidAt: timestamp("paid_at", { withTimezone: true }), +}); + +export const insertInvoiceSchema = createInsertSchema(invoices).omit({ + createdAt: true, + paidAt: true, +}); + +export type Invoice = typeof invoices.$inferSelect; +export type InsertInvoice = z.infer; diff --git a/lib/db/src/schema/jobs.ts b/lib/db/src/schema/jobs.ts new file mode 100644 index 0000000..1819c45 --- /dev/null +++ b/lib/db/src/schema/jobs.ts @@ -0,0 +1,38 @@ +import { pgTable, text, timestamp, integer } from "drizzle-orm/pg-core"; +import { createInsertSchema } from "drizzle-zod"; +import { z } from "zod/v4"; + +export const JOB_STATES = [ + "awaiting_eval_payment", + "evaluating", + "rejected", + "awaiting_work_payment", + "executing", + "complete", + "failed", +] as const; + +export type JobState = (typeof JOB_STATES)[number]; + +export const jobs = pgTable("jobs", { + id: text("id").primaryKey(), + request: text("request").notNull(), + state: text("state").$type().notNull().default("awaiting_eval_payment"), + evalInvoiceId: text("eval_invoice_id"), + workInvoiceId: text("work_invoice_id"), + evalAmountSats: integer("eval_amount_sats").notNull(), + workAmountSats: integer("work_amount_sats"), + rejectionReason: text("rejection_reason"), + result: text("result"), + errorMessage: text("error_message"), + createdAt: timestamp("created_at", { withTimezone: true }).defaultNow().notNull(), + updatedAt: timestamp("updated_at", { withTimezone: true }).defaultNow().notNull(), +}); + +export const insertJobSchema = createInsertSchema(jobs).omit({ + createdAt: true, + updatedAt: true, +}); + +export type Job = typeof jobs.$inferSelect; +export type InsertJob = z.infer; diff --git a/lib/db/src/schema/messages.ts b/lib/db/src/schema/messages.ts new file mode 100644 index 0000000..2efa045 --- /dev/null +++ b/lib/db/src/schema/messages.ts @@ -0,0 +1,23 @@ +import { integer, pgTable, serial, text, timestamp } from "drizzle-orm/pg-core"; +import { createInsertSchema } from "drizzle-zod"; +import { z } from "zod/v4"; + +import { conversations } from "./conversations"; + +export const messages = pgTable("messages", { + id: serial("id").primaryKey(), + conversationId: integer("conversation_id") + .notNull() + .references(() => conversations.id, { onDelete: "cascade" }), + role: text("role").notNull(), + content: text("content").notNull(), + createdAt: timestamp("created_at", { withTimezone: true }).defaultNow().notNull(), +}); + +export const insertMessageSchema = createInsertSchema(messages).omit({ + id: true, + createdAt: true, +}); + +export type Message = typeof messages.$inferSelect; +export type InsertMessage = z.infer; diff --git a/lib/integrations-anthropic-ai/package.json b/lib/integrations-anthropic-ai/package.json new file mode 100644 index 0000000..23f69c0 --- /dev/null +++ b/lib/integrations-anthropic-ai/package.json @@ -0,0 +1,15 @@ +{ + "name": "@workspace/integrations-anthropic-ai", + "version": "0.0.0", + "private": true, + "type": "module", + "exports": { + ".": "./src/index.ts", + "./batch": "./src/batch/index.ts" + }, + "dependencies": { + "@anthropic-ai/sdk": "^0.78.0", + "p-limit": "^7.3.0", + "p-retry": "^7.1.1" + } +} diff --git a/lib/integrations-anthropic-ai/src/batch/index.ts b/lib/integrations-anthropic-ai/src/batch/index.ts new file mode 100644 index 0000000..f84c2c1 --- /dev/null +++ b/lib/integrations-anthropic-ai/src/batch/index.ts @@ -0,0 +1,6 @@ +export { + batchProcess, + batchProcessWithSSE, + isRateLimitError, + type BatchOptions, +} from "./utils"; diff --git a/lib/integrations-anthropic-ai/src/batch/utils.ts b/lib/integrations-anthropic-ai/src/batch/utils.ts new file mode 100644 index 0000000..5c74981 --- /dev/null +++ b/lib/integrations-anthropic-ai/src/batch/utils.ts @@ -0,0 +1,140 @@ +import pLimit from "p-limit"; +import pRetry from "p-retry"; + +/** + * Batch Processing Utilities + * + * Generic batch processing with built-in rate limiting and automatic retries. + * Use for any task that requires processing multiple items through an LLM or external API. + * + * USAGE: + * ```typescript + * import { batchProcess } from "@workspace/integrations-anthropic-ai/batch"; + * import { anthropic } from "@workspace/integrations-anthropic-ai"; + * + * const results = await batchProcess( + * artworks, + * async (artwork) => { + * const message = await anthropic.messages.create({ + * model: "claude-sonnet-4-6", + * max_tokens: 8192, + * messages: [{ role: "user", content: `Categorize: ${artwork.name}` }], + * }); + * const block = message.content[0]; + * return block.type === "text" ? block.text : ""; + * }, + * { concurrency: 2, retries: 5 } + * ); + * ``` + */ + +export interface BatchOptions { + concurrency?: number; + retries?: number; + minTimeout?: number; + maxTimeout?: number; + onProgress?: (completed: number, total: number, item: unknown) => void; +} + +export function isRateLimitError(error: unknown): boolean { + const errorMsg = error instanceof Error ? error.message : String(error); + return ( + errorMsg.includes("429") || + errorMsg.includes("RATELIMIT_EXCEEDED") || + errorMsg.toLowerCase().includes("quota") || + errorMsg.toLowerCase().includes("rate limit") + ); +} + +export async function batchProcess( + items: T[], + processor: (item: T, index: number) => Promise, + options: BatchOptions = {} +): Promise { + const { + concurrency = 2, + retries = 7, + minTimeout = 2000, + maxTimeout = 128000, + onProgress, + } = options; + + const limit = pLimit(concurrency); + let completed = 0; + + const promises = items.map((item, index) => + limit(() => + pRetry( + async () => { + try { + const result = await processor(item, index); + completed++; + onProgress?.(completed, items.length, item); + return result; + } catch (error: unknown) { + if (isRateLimitError(error)) { + throw error; + } + throw new pRetry.AbortError( + error instanceof Error ? error : new Error(String(error)) + ); + } + }, + { retries, minTimeout, maxTimeout, factor: 2 } + ) + ) + ); + + return Promise.all(promises); +} + +export async function batchProcessWithSSE( + items: T[], + processor: (item: T, index: number) => Promise, + sendEvent: (event: { type: string; [key: string]: unknown }) => void, + options: Omit = {} +): Promise { + const { retries = 5, minTimeout = 1000, maxTimeout = 15000 } = options; + + sendEvent({ type: "started", total: items.length }); + + const results: R[] = []; + let errors = 0; + + for (let index = 0; index < items.length; index++) { + const item = items[index]; + sendEvent({ type: "processing", index, item }); + + try { + const result = await pRetry( + () => processor(item, index), + { + retries, + minTimeout, + maxTimeout, + factor: 2, + onFailedAttempt: (error) => { + if (!isRateLimitError(error)) { + throw new pRetry.AbortError( + error instanceof Error ? error : new Error(String(error)) + ); + } + }, + } + ); + results.push(result); + sendEvent({ type: "progress", index, result }); + } catch (error) { + errors++; + results.push(undefined as R); + sendEvent({ + type: "progress", + index, + error: error instanceof Error ? error.message : "Processing failed", + }); + } + } + + sendEvent({ type: "complete", processed: items.length, errors }); + return results; +} diff --git a/lib/integrations-anthropic-ai/src/client.ts b/lib/integrations-anthropic-ai/src/client.ts new file mode 100644 index 0000000..8de684f --- /dev/null +++ b/lib/integrations-anthropic-ai/src/client.ts @@ -0,0 +1,18 @@ +import Anthropic from "@anthropic-ai/sdk"; + +if (!process.env.AI_INTEGRATIONS_ANTHROPIC_BASE_URL) { + throw new Error( + "AI_INTEGRATIONS_ANTHROPIC_BASE_URL must be set. Did you forget to provision the Anthropic AI integration?", + ); +} + +if (!process.env.AI_INTEGRATIONS_ANTHROPIC_API_KEY) { + throw new Error( + "AI_INTEGRATIONS_ANTHROPIC_API_KEY must be set. Did you forget to provision the Anthropic AI integration?", + ); +} + +export const anthropic = new Anthropic({ + apiKey: process.env.AI_INTEGRATIONS_ANTHROPIC_API_KEY, + baseURL: process.env.AI_INTEGRATIONS_ANTHROPIC_BASE_URL, +}); diff --git a/lib/integrations-anthropic-ai/src/index.ts b/lib/integrations-anthropic-ai/src/index.ts new file mode 100644 index 0000000..9e30489 --- /dev/null +++ b/lib/integrations-anthropic-ai/src/index.ts @@ -0,0 +1,2 @@ +export { anthropic } from "./client"; +export { batchProcess, batchProcessWithSSE, isRateLimitError, type BatchOptions } from "./batch"; diff --git a/lib/integrations-anthropic-ai/tsconfig.json b/lib/integrations-anthropic-ai/tsconfig.json new file mode 100644 index 0000000..53af906 --- /dev/null +++ b/lib/integrations-anthropic-ai/tsconfig.json @@ -0,0 +1,12 @@ +{ + "extends": "../../tsconfig.base.json", + "compilerOptions": { + "composite": true, + "declarationMap": true, + "emitDeclarationOnly": true, + "outDir": "dist", + "rootDir": "src", + "types": ["node"] + }, + "include": ["src"] +} diff --git a/lib/integrations/anthropic_ai_integrations/src/server/batch/index.ts b/lib/integrations/anthropic_ai_integrations/src/server/batch/index.ts new file mode 100644 index 0000000..f84c2c1 --- /dev/null +++ b/lib/integrations/anthropic_ai_integrations/src/server/batch/index.ts @@ -0,0 +1,6 @@ +export { + batchProcess, + batchProcessWithSSE, + isRateLimitError, + type BatchOptions, +} from "./utils"; diff --git a/lib/integrations/anthropic_ai_integrations/src/server/batch/utils.ts b/lib/integrations/anthropic_ai_integrations/src/server/batch/utils.ts new file mode 100644 index 0000000..ebf1ae5 --- /dev/null +++ b/lib/integrations/anthropic_ai_integrations/src/server/batch/utils.ts @@ -0,0 +1,167 @@ +import pLimit from "p-limit"; +import pRetry from "p-retry"; + +/** + * Batch Processing Utilities + * + * This module provides a generic batch processing function with built-in + * rate limiting and automatic retries. Use it for any task that requires + * processing multiple items through an LLM or external API. + * + * USAGE: + * ```typescript + * import { batchProcess, isRateLimitError } from "./replit_integrations/batch"; + * + * const results = await batchProcess( + * artworks, + * async (artwork) => { + * // Your custom LLM logic here + * const response = await openai.chat.completions.create({ + * model: "gpt-5.1", + * messages: [{ role: "user", content: `Categorize: ${artwork.name}` }], + * response_format: { type: "json_object" }, + * }); + * return JSON.parse(response.choices[0]?.message?.content || "{}"); + * }, + * { concurrency: 2, retries: 5 } + * ); + * ``` + */ + +export interface BatchOptions { + /** Max concurrent requests (default: 2) */ + concurrency?: number; + /** Max retry attempts for rate limit errors (default: 7) */ + retries?: number; + /** Initial retry delay in ms (default: 2000) */ + minTimeout?: number; + /** Max retry delay in ms (default: 128000) */ + maxTimeout?: number; + /** Callback for progress updates */ + onProgress?: (completed: number, total: number, item: unknown) => void; +} + +/** + * Check if an error is a rate limit or quota violation. + * Use this in custom error handling if needed. + */ +export function isRateLimitError(error: unknown): boolean { + const errorMsg = error instanceof Error ? error.message : String(error); + return ( + errorMsg.includes("429") || + errorMsg.includes("RATELIMIT_EXCEEDED") || + errorMsg.toLowerCase().includes("quota") || + errorMsg.toLowerCase().includes("rate limit") + ); +} + +/** + * Process items in batches with rate limiting and automatic retries. + * + * @param items - Array of items to process + * @param processor - Async function to process each item (write your LLM logic here) + * @param options - Concurrency and retry settings + * @returns Promise resolving to array of results in the same order as input + */ +export async function batchProcess( + items: T[], + processor: (item: T, index: number) => Promise, + options: BatchOptions = {} +): Promise { + const { + concurrency = 2, + retries = 7, + minTimeout = 2000, + maxTimeout = 128000, + onProgress, + } = options; + + const limit = pLimit(concurrency); + let completed = 0; + + const promises = items.map((item, index) => + limit(() => + pRetry( + async () => { + try { + const result = await processor(item, index); + completed++; + onProgress?.(completed, items.length, item); + return result; + } catch (error: unknown) { + if (isRateLimitError(error)) { + throw error; // Rethrow to trigger p-retry + } + // For non-rate-limit errors, abort immediately + throw new pRetry.AbortError( + error instanceof Error ? error : new Error(String(error)) + ); + } + }, + { retries, minTimeout, maxTimeout, factor: 2 } + ) + ) + ); + + return Promise.all(promises); +} + +/** + * Process items sequentially with SSE progress streaming. + * Use this when you need real-time progress updates to the client. + * + * @param items - Array of items to process + * @param processor - Async function to process each item + * @param sendEvent - Function to send SSE events to the client + * @param options - Retry settings (concurrency is always 1 for sequential) + */ +export async function batchProcessWithSSE( + items: T[], + processor: (item: T, index: number) => Promise, + sendEvent: (event: { type: string; [key: string]: unknown }) => void, + options: Omit = {} +): Promise { + const { retries = 5, minTimeout = 1000, maxTimeout = 15000 } = options; + + sendEvent({ type: "started", total: items.length }); + + const results: R[] = []; + let errors = 0; + + for (let index = 0; index < items.length; index++) { + const item = items[index]; + sendEvent({ type: "processing", index, item }); + + try { + const result = await pRetry( + () => processor(item, index), + { + retries, + minTimeout, + maxTimeout, + factor: 2, + onFailedAttempt: (error) => { + if (!isRateLimitError(error)) { + throw new pRetry.AbortError( + error instanceof Error ? error : new Error(String(error)) + ); + } + }, + } + ); + results.push(result); + sendEvent({ type: "progress", index, result }); + } catch (error) { + errors++; + results.push(undefined as R); // Placeholder for failed items + sendEvent({ + type: "progress", + index, + error: error instanceof Error ? error.message : "Processing failed", + }); + } + } + + sendEvent({ type: "complete", processed: items.length, errors }); + return results; +} diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 14f095e..a0298ba 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -169,6 +169,9 @@ importers: '@workspace/db': specifier: workspace:* version: link:../../lib/db + '@workspace/integrations-anthropic-ai': + specifier: workspace:* + version: link:../../lib/integrations-anthropic-ai cookie-parser: specifier: ^1.4.7 version: 1.4.7 @@ -427,6 +430,18 @@ importers: specifier: ^0.31.9 version: 0.31.9 + lib/integrations-anthropic-ai: + dependencies: + '@anthropic-ai/sdk': + specifier: ^0.78.0 + version: 0.78.0(zod@4.3.6) + p-limit: + specifier: ^7.3.0 + version: 7.3.0 + p-retry: + specifier: ^7.1.1 + version: 7.1.1 + scripts: devDependencies: '@types/node': @@ -438,6 +453,15 @@ importers: packages: + '@anthropic-ai/sdk@0.78.0': + resolution: {integrity: sha512-PzQhR715td/m1UaaN5hHXjYB8Gl2lF9UVhrrGrZeysiF6Rb74Wc9GCB8hzLdzmQtBd1qe89F9OptgB9Za1Ib5w==} + hasBin: true + peerDependencies: + zod: ^3.25.0 || ^4.0.0 + peerDependenciesMeta: + zod: + optional: true + '@babel/code-frame@7.29.0': resolution: {integrity: sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw==} engines: {node: '>=6.9.0'} @@ -1300,6 +1324,7 @@ packages: resolution: {integrity: sha512-3AHmtQq/ppNuUspKAlvA8HtLybkDflkMuLK4DPo77DfthRb71V84/c4MlWJXixZz4uruIH4uaa07IqoAkG64fg==} cpu: [x64] os: [linux] + libc: [glibc] '@scalar/helpers@0.2.18': resolution: {integrity: sha512-w1d4tpNEVZ293oB2BAgLrS0kVPUtG3eByNmOCJA5eK9vcT4D3cmsGtWjUaaqit0BQCsBFHK51rasGvSWnApYTw==} @@ -1359,6 +1384,7 @@ packages: engines: {node: '>= 20'} cpu: [x64] os: [linux] + libc: [glibc] '@tailwindcss/oxide-wasm32-wasi@4.2.1': resolution: {integrity: sha512-MGFB5cVPvshR85MTJkEvqDUnuNoysrsRxd6vnk1Lf2tbiqNlXpHYZqkqOQalydienEWOHHFyyuTSYRsLfxFJ2Q==} @@ -2065,6 +2091,10 @@ packages: resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} engines: {node: '>=0.10.0'} + is-network-error@1.3.1: + resolution: {integrity: sha512-6QCxa49rQbmUWLfk0nuGqzql9U8uaV2H6279bRErPBHe/109hCzsLUBUHfbEtvLIHBd6hyXbgedBSHevm43Edw==} + engines: {node: '>=16'} + is-number@7.0.0: resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==} engines: {node: '>=0.12.0'} @@ -2107,6 +2137,10 @@ packages: engines: {node: '>=6'} hasBin: true + json-schema-to-ts@3.1.1: + resolution: {integrity: sha512-+DWg8jCJG2TEnpy7kOm/7/AxaYoaRbjVB4LFZLySZlWn8exGs3A4OLJR966cVvU26N7X9TWxl+Jsw7dzAqKT6g==} + engines: {node: '>=16'} + json-schema-traverse@1.0.0: resolution: {integrity: sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==} @@ -2131,6 +2165,7 @@ packages: engines: {node: '>= 12.0.0'} cpu: [x64] os: [linux] + libc: [glibc] lightningcss@1.31.1: resolution: {integrity: sha512-l51N2r93WmGUye3WuFoN5k10zyvrVs0qfKBhyC5ogUQ6Ew6JUSswh78mbSO+IU3nTWsyOArqPCcShdQSadghBQ==} @@ -2266,10 +2301,18 @@ packages: resolution: {integrity: sha512-5b0R4txpzjPWVw/cXXUResoD4hb6U/x9BH08L7nw+GN1sezDzPdxeRvpc9c433fZhBan/wusjbCsqwqm4EIBIQ==} engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + p-limit@7.3.0: + resolution: {integrity: sha512-7cIXg/Z0M5WZRblrsOla88S4wAK+zOQQWeBYfV3qJuJXMr+LnbYjaadrFaS0JILfEDPVqHyKnZ1Z/1d6J9VVUw==} + engines: {node: '>=20'} + p-locate@6.0.0: resolution: {integrity: sha512-wPrq66Llhl7/4AGC6I+cqxT07LhXvWL08LNXz1fENOw0Ap4sRZZ/gZpTTJ5jpurzzzfS2W/Ge9BY3LgLjCShcw==} engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + p-retry@7.1.1: + resolution: {integrity: sha512-J5ApzjyRkkf601HpEeykoiCvzHQjWxPAHhyjFcEUP2SWq0+35NKh8TLhpLw+Dkq5TZBFvUM6UigdE9hIVYTl5w==} + engines: {node: '>=20'} + parse-ms@4.0.0: resolution: {integrity: sha512-TXfryirbmq34y8QBwgqCVLi+8oA3oWx2eAnSn62ITyEhEYaWRlVZ2DvMM9eZbMs/RfxPu/PK/aBLyGj4IrqMHw==} engines: {node: '>=18'} @@ -2628,6 +2671,9 @@ packages: resolution: {integrity: sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==} engines: {node: '>=0.6'} + ts-algebra@2.0.0: + resolution: {integrity: sha512-FPAhNPFMrkwz76P7cdjdmiShwMynZYN6SgOujD1urY4oNm80Ou9oMdmbR45LotcKOXoy7wSmHkRFE6Mxbrhefw==} + tsconfck@3.1.6: resolution: {integrity: sha512-ks6Vjr/jEw0P1gmOVwutM3B7fWxoWBL2KRDb1JfqGVawBmO5UsvmWOQFGHBPl5yxYz4eERr19E6L7NMv+Fej4w==} engines: {node: ^18 || >=20} @@ -2819,6 +2865,12 @@ packages: snapshots: + '@anthropic-ai/sdk@0.78.0(zod@4.3.6)': + dependencies: + json-schema-to-ts: 3.1.1 + optionalDependencies: + zod: 4.3.6 + '@babel/code-frame@7.29.0': dependencies: '@babel/helper-validator-identifier': 7.28.5 @@ -4538,6 +4590,8 @@ snapshots: dependencies: is-extglob: 2.1.1 + is-network-error@1.3.1: {} + is-number@7.0.0: {} is-path-inside@4.0.0: {} @@ -4562,6 +4616,11 @@ snapshots: jsesc@3.1.0: {} + json-schema-to-ts@3.1.1: + dependencies: + '@babel/runtime': 7.28.6 + ts-algebra: 2.0.0 + json-schema-traverse@1.0.0: {} json5@2.2.3: {} @@ -4727,10 +4786,18 @@ snapshots: dependencies: yocto-queue: 1.2.2 + p-limit@7.3.0: + dependencies: + yocto-queue: 1.2.2 + p-locate@6.0.0: dependencies: p-limit: 4.0.0 + p-retry@7.1.1: + dependencies: + is-network-error: 1.3.1 + parse-ms@4.0.0: {} parseurl@1.3.3: {} @@ -5071,6 +5138,8 @@ snapshots: toidentifier@1.0.1: {} + ts-algebra@2.0.0: {} + tsconfck@3.1.6(typescript@5.9.3): optionalDependencies: typescript: 5.9.3 diff --git a/tsconfig.json b/tsconfig.json index 49df732..cffbdb0 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -11,6 +11,12 @@ }, { "path": "./lib/api-zod" + }, + { + "path": "./lib/integrations-anthropic-ai" + }, + { + "path": "./artifacts/api-server" } ] }