feat: Gemini AI integration — conversations, messages, image gen

- Fixed YAML parse error (unquoted colon in description broke @scalar/json-magic)
- Converted orval.config.ts → orval.config.cjs (fixes orval v8 TypeScript config loading)
- Codegen now works: zod schemas + React Query hooks regenerated with Gemini types
- Added Gemini tag, 4 path groups, 8 schemas to openapi.yaml
- lib/integrations-gemini-ai wired: tsconfig refs, api-server package.json dep
- Created routes/gemini.ts: CRUD conversations/messages + SSE chat stream + image gen
- Mounted /gemini router in routes/index.ts
This commit is contained in:
Replit Agent
2026-03-20 02:41:12 +00:00
parent cdb104e34f
commit e86dab0d65
54 changed files with 3620 additions and 28 deletions

View File

@@ -0,0 +1,6 @@
export {
batchProcess,
batchProcessWithSSE,
isRateLimitError,
type BatchOptions,
} from "./utils";

View File

@@ -0,0 +1,139 @@
import pLimit from "p-limit";
import pRetry from "p-retry";
/**
* Batch Processing Utilities
*
* Generic batch processing with built-in rate limiting and automatic retries.
* Use for any task that requires processing multiple items through an LLM or external API.
*
* USAGE:
* ```typescript
* import { batchProcess } from "@workspace/integrations-gemini-ai/batch";
* import { ai } from "@workspace/integrations-gemini-ai";
*
* const results = await batchProcess(
* artworks,
* async (artwork) => {
* const response = await ai.models.generateContent({
* model: "gemini-2.5-flash",
* contents: [{ role: "user", parts: [{ text: `Categorize: ${artwork.name}` }] }],
* config: { responseMimeType: "application/json" },
* });
* return JSON.parse(response.text ?? "{}");
* },
* { concurrency: 2, retries: 5 }
* );
* ```
*/
export interface BatchOptions {
concurrency?: number;
retries?: number;
minTimeout?: number;
maxTimeout?: number;
onProgress?: (completed: number, total: number, item: unknown) => void;
}
export function isRateLimitError(error: unknown): boolean {
const errorMsg = error instanceof Error ? error.message : String(error);
return (
errorMsg.includes("429") ||
errorMsg.includes("RATELIMIT_EXCEEDED") ||
errorMsg.toLowerCase().includes("quota") ||
errorMsg.toLowerCase().includes("rate limit")
);
}
export async function batchProcess<T, R>(
items: T[],
processor: (item: T, index: number) => Promise<R>,
options: BatchOptions = {}
): Promise<R[]> {
const {
concurrency = 2,
retries = 7,
minTimeout = 2000,
maxTimeout = 128000,
onProgress,
} = options;
const limit = pLimit(concurrency);
let completed = 0;
const promises = items.map((item, index) =>
limit(() =>
pRetry(
async () => {
try {
const result = await processor(item, index);
completed++;
onProgress?.(completed, items.length, item);
return result;
} catch (error: unknown) {
if (isRateLimitError(error)) {
throw error;
}
throw new pRetry.AbortError(
error instanceof Error ? error : new Error(String(error))
);
}
},
{ retries, minTimeout, maxTimeout, factor: 2 }
)
)
);
return Promise.all(promises);
}
export async function batchProcessWithSSE<T, R>(
items: T[],
processor: (item: T, index: number) => Promise<R>,
sendEvent: (event: { type: string; [key: string]: unknown }) => void,
options: Omit<BatchOptions, "concurrency" | "onProgress"> = {}
): Promise<R[]> {
const { retries = 5, minTimeout = 1000, maxTimeout = 15000 } = options;
sendEvent({ type: "started", total: items.length });
const results: R[] = [];
let errors = 0;
for (let index = 0; index < items.length; index++) {
const item = items[index];
sendEvent({ type: "processing", index, item });
try {
const result = await pRetry(
() => processor(item, index),
{
retries,
minTimeout,
maxTimeout,
factor: 2,
onFailedAttempt: (error) => {
if (!isRateLimitError(error)) {
throw new pRetry.AbortError(
error instanceof Error ? error : new Error(String(error))
);
}
},
}
);
results.push(result);
sendEvent({ type: "progress", index, result });
} catch (error) {
errors++;
results.push(undefined as R);
sendEvent({
type: "progress",
index,
error: error instanceof Error ? error.message : "Processing failed",
});
}
}
sendEvent({ type: "complete", processed: items.length, errors });
return results;
}

View File

@@ -0,0 +1,21 @@
import { GoogleGenAI } from "@google/genai";
if (!process.env.AI_INTEGRATIONS_GEMINI_BASE_URL) {
throw new Error(
"AI_INTEGRATIONS_GEMINI_BASE_URL must be set. Did you forget to provision the Gemini AI integration?",
);
}
if (!process.env.AI_INTEGRATIONS_GEMINI_API_KEY) {
throw new Error(
"AI_INTEGRATIONS_GEMINI_API_KEY must be set. Did you forget to provision the Gemini AI integration?",
);
}
export const ai = new GoogleGenAI({
apiKey: process.env.AI_INTEGRATIONS_GEMINI_API_KEY,
httpOptions: {
apiVersion: "",
baseUrl: process.env.AI_INTEGRATIONS_GEMINI_BASE_URL,
},
});

View File

@@ -0,0 +1,47 @@
import { GoogleGenAI, Modality } from "@google/genai";
if (!process.env.AI_INTEGRATIONS_GEMINI_BASE_URL) {
throw new Error(
"AI_INTEGRATIONS_GEMINI_BASE_URL must be set. Did you forget to provision the Gemini AI integration?",
);
}
if (!process.env.AI_INTEGRATIONS_GEMINI_API_KEY) {
throw new Error(
"AI_INTEGRATIONS_GEMINI_API_KEY must be set. Did you forget to provision the Gemini AI integration?",
);
}
export const ai = new GoogleGenAI({
apiKey: process.env.AI_INTEGRATIONS_GEMINI_API_KEY,
httpOptions: {
apiVersion: "",
baseUrl: process.env.AI_INTEGRATIONS_GEMINI_BASE_URL,
},
});
export async function generateImage(
prompt: string
): Promise<{ b64_json: string; mimeType: string }> {
const response = await ai.models.generateContent({
model: "gemini-2.5-flash-image",
contents: [{ role: "user", parts: [{ text: prompt }] }],
config: {
responseModalities: [Modality.TEXT, Modality.IMAGE],
},
});
const candidate = response.candidates?.[0];
const imagePart = candidate?.content?.parts?.find(
(part: { inlineData?: { data?: string; mimeType?: string } }) => part.inlineData
);
if (!imagePart?.inlineData?.data) {
throw new Error("No image data in response");
}
return {
b64_json: imagePart.inlineData.data,
mimeType: imagePart.inlineData.mimeType || "image/png",
};
}

View File

@@ -0,0 +1 @@
export { ai, generateImage } from "./client";

View File

@@ -0,0 +1,3 @@
export { ai } from "./client";
export { generateImage } from "./image";
export { batchProcess, batchProcessWithSSE, isRateLimitError, type BatchOptions } from "./batch";