forked from Rockachopa/Timmy-time-dashboard
feat: implement v1 API endpoints for iPad app
This commit is contained in:
@@ -28,6 +28,7 @@ from dashboard.routes.agents import router as agents_router
|
||||
from dashboard.routes.briefing import router as briefing_router
|
||||
from dashboard.routes.calm import router as calm_router
|
||||
from dashboard.routes.chat_api import router as chat_api_router
|
||||
from dashboard.routes.chat_api_v1 import router as chat_api_v1_router
|
||||
from dashboard.routes.db_explorer import router as db_explorer_router
|
||||
from dashboard.routes.discord import router as discord_router
|
||||
from dashboard.routes.experiments import router as experiments_router
|
||||
@@ -483,6 +484,7 @@ app.include_router(grok_router)
|
||||
app.include_router(models_router)
|
||||
app.include_router(models_api_router)
|
||||
app.include_router(chat_api_router)
|
||||
app.include_router(chat_api_v1_router)
|
||||
app.include_router(thinking_router)
|
||||
app.include_router(calm_router)
|
||||
app.include_router(tasks_router)
|
||||
|
||||
205
src/dashboard/routes/chat_api_v1.py
Normal file
205
src/dashboard/routes/chat_api_v1.py
Normal file
@@ -0,0 +1,205 @@
|
||||
"""Version 1 (v1) JSON REST API for the Timmy Time iPad app.
|
||||
|
||||
This module implements the specific endpoints required by the native
|
||||
iPad app as defined in the project specification.
|
||||
|
||||
Endpoints:
|
||||
POST /api/v1/chat — Streaming SSE chat response
|
||||
GET /api/v1/chat/history — Retrieve chat history with limit
|
||||
POST /api/v1/upload — Multipart file upload with auto-detection
|
||||
GET /api/v1/status — Detailed system and model status
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import uuid
|
||||
import json
|
||||
import asyncio
|
||||
from datetime import datetime, UTC
|
||||
from pathlib import Path
|
||||
from typing import Optional, List
|
||||
|
||||
from fastapi import APIRouter, File, HTTPException, Request, UploadFile, Query
|
||||
from fastapi.responses import JSONResponse, StreamingResponse
|
||||
|
||||
from config import settings, APP_START_TIME
|
||||
from dashboard.store import message_log
|
||||
from timmy.session import chat_with_tools, _get_agent
|
||||
from dashboard.routes.health import _check_ollama
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(prefix="/api/v1", tags=["chat-api-v1"])
|
||||
|
||||
_UPLOAD_DIR = str(Path(settings.repo_root) / "data" / "chat-uploads")
|
||||
_MAX_UPLOAD_SIZE = 50 * 1024 * 1024 # 50 MB
|
||||
|
||||
|
||||
# ── POST /api/v1/chat ─────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
@router.post("/chat")
|
||||
async def api_v1_chat(request: Request):
|
||||
"""Accept a JSON chat payload and return a streaming SSE response.
|
||||
|
||||
Request body:
|
||||
{
|
||||
"message": "string",
|
||||
"session_id": "string",
|
||||
"attachments": ["id1", "id2"]
|
||||
}
|
||||
|
||||
Response:
|
||||
text/event-stream (SSE)
|
||||
"""
|
||||
try:
|
||||
body = await request.json()
|
||||
except Exception as exc:
|
||||
logger.warning("Chat v1 API JSON parse error: %s", exc)
|
||||
return JSONResponse(status_code=400, content={"error": "Invalid JSON"})
|
||||
|
||||
message = body.get("message")
|
||||
session_id = body.get("session_id", "ipad-app")
|
||||
attachments = body.get("attachments", [])
|
||||
|
||||
if not message:
|
||||
return JSONResponse(status_code=400, content={"error": "message is required"})
|
||||
|
||||
# Prepare context for the agent
|
||||
context_prefix = (
|
||||
f"[System: Current date/time is "
|
||||
f"{datetime.now().strftime('%A, %B %d, %Y at %I:%M %p')}]\n"
|
||||
f"[System: iPad App client]\n"
|
||||
)
|
||||
|
||||
if attachments:
|
||||
context_prefix += f"[System: Attachments: {', '.join(attachments)}]\n"
|
||||
|
||||
context_prefix += "\n"
|
||||
full_prompt = context_prefix + message
|
||||
|
||||
async def event_generator():
|
||||
try:
|
||||
agent = _get_agent()
|
||||
# Using streaming mode for SSE
|
||||
async for chunk in agent.arun(full_prompt, stream=True, session_id=session_id):
|
||||
# Agno chunks can be strings or RunOutput
|
||||
content = chunk.content if hasattr(chunk, "content") else str(chunk)
|
||||
if content:
|
||||
yield f"data: {json.dumps({'text': content})}\n\n"
|
||||
|
||||
yield "data: [DONE]\n\n"
|
||||
except Exception as exc:
|
||||
logger.error("SSE stream error: %s", exc)
|
||||
yield f"data: {json.dumps({'error': str(exc)})}\n\n"
|
||||
|
||||
return StreamingResponse(event_generator(), media_type="text/event-stream")
|
||||
|
||||
|
||||
# ── GET /api/v1/chat/history ──────────────────────────────────────────────────
|
||||
|
||||
|
||||
@router.get("/chat/history")
|
||||
async def api_v1_chat_history(
|
||||
session_id: str = Query("ipad-app"),
|
||||
limit: int = Query(50, ge=1, le=100)
|
||||
):
|
||||
"""Return recent chat history for a specific session."""
|
||||
# Filter and limit the message log
|
||||
# Note: message_log.all() returns all messages; we filter by source or just return last N
|
||||
all_msgs = message_log.all()
|
||||
|
||||
# In a real implementation, we'd filter by session_id if message_log supported it.
|
||||
# For now, we return the last 'limit' messages.
|
||||
history = [
|
||||
{
|
||||
"role": msg.role,
|
||||
"content": msg.content,
|
||||
"timestamp": msg.timestamp,
|
||||
"source": msg.source,
|
||||
}
|
||||
for msg in all_msgs[-limit:]
|
||||
]
|
||||
|
||||
return {"messages": history}
|
||||
|
||||
|
||||
# ── POST /api/v1/upload ───────────────────────────────────────────────────────
|
||||
|
||||
|
||||
@router.post("/upload")
|
||||
async def api_v1_upload(file: UploadFile = File(...)):
|
||||
"""Accept a file upload, auto-detect type, and return metadata.
|
||||
|
||||
Response:
|
||||
{
|
||||
"id": "string",
|
||||
"type": "image|audio|document|url",
|
||||
"summary": "string",
|
||||
"metadata": {...}
|
||||
}
|
||||
"""
|
||||
os.makedirs(_UPLOAD_DIR, exist_ok=True)
|
||||
|
||||
file_id = uuid.uuid4().hex[:12]
|
||||
safe_name = os.path.basename(file.filename or "upload")
|
||||
stored_name = f"{file_id}-{safe_name}"
|
||||
file_path = os.path.join(_UPLOAD_DIR, stored_name)
|
||||
|
||||
# Verify resolved path stays within upload directory
|
||||
resolved = Path(file_path).resolve()
|
||||
upload_root = Path(_UPLOAD_DIR).resolve()
|
||||
if not str(resolved).startswith(str(upload_root)):
|
||||
raise HTTPException(status_code=400, detail="Invalid file name")
|
||||
|
||||
contents = await file.read()
|
||||
if len(contents) > _MAX_UPLOAD_SIZE:
|
||||
raise HTTPException(status_code=413, detail="File too large (max 50 MB)")
|
||||
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(contents)
|
||||
|
||||
# Auto-detect type based on extension/mime
|
||||
mime_type = file.content_type or "application/octet-stream"
|
||||
ext = os.path.splitext(safe_name)[1].lower()
|
||||
|
||||
media_type = "document"
|
||||
if mime_type.startswith("image/") or ext in [".jpg", ".jpeg", ".png", ".heic"]:
|
||||
media_type = "image"
|
||||
elif mime_type.startswith("audio/") or ext in [".m4a", ".mp3", ".wav", ".caf"]:
|
||||
media_type = "audio"
|
||||
elif ext in [".pdf", ".txt", ".md"]:
|
||||
media_type = "document"
|
||||
|
||||
# Placeholder for actual processing (OCR, Whisper, etc.)
|
||||
summary = f"Uploaded {media_type}: {safe_name}"
|
||||
|
||||
return {
|
||||
"id": file_id,
|
||||
"type": media_type,
|
||||
"summary": summary,
|
||||
"url": f"/uploads/{stored_name}",
|
||||
"metadata": {
|
||||
"fileName": safe_name,
|
||||
"mimeType": mime_type,
|
||||
"size": len(contents)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# ── GET /api/v1/status ────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
@router.get("/status")
|
||||
async def api_v1_status():
|
||||
"""Detailed system and model status."""
|
||||
ollama_status = await _check_ollama()
|
||||
uptime = (datetime.now(UTC) - APP_START_TIME).total_seconds()
|
||||
|
||||
return {
|
||||
"timmy": "online" if ollama_status.status == "healthy" else "offline",
|
||||
"model": settings.ollama_model,
|
||||
"ollama": "running" if ollama_status.status == "healthy" else "stopped",
|
||||
"uptime": f"{int(uptime // 3600)}h {int((uptime % 3600) // 60)}m",
|
||||
"version": "2.0.0-v1-api"
|
||||
}
|
||||
52
tests/test_api_v1.py
Normal file
52
tests/test_api_v1.py
Normal file
@@ -0,0 +1,52 @@
|
||||
import sys
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
# Absolute path to src
|
||||
src_path = "/home/ubuntu/timmy-time/Timmy-time-dashboard/src"
|
||||
if src_path not in sys.path:
|
||||
sys.path.insert(0, src_path)
|
||||
|
||||
from fastapi.testclient import TestClient
|
||||
|
||||
try:
|
||||
from dashboard.app import app
|
||||
print("✓ Successfully imported dashboard.app")
|
||||
except ImportError as e:
|
||||
print(f"✗ Failed to import dashboard.app: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
client = TestClient(app)
|
||||
|
||||
def test_v1_status():
|
||||
response = client.get("/api/v1/status")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert "timmy" in data
|
||||
assert "model" in data
|
||||
assert "uptime" in data
|
||||
|
||||
def test_v1_chat_history():
|
||||
response = client.get("/api/v1/chat/history")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert "messages" in data
|
||||
|
||||
def test_v1_upload_fail():
|
||||
# Test without file
|
||||
response = client.post("/api/v1/upload")
|
||||
assert response.status_code == 422 # Unprocessable Entity (missing file)
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("Running API v1 tests...")
|
||||
try:
|
||||
test_v1_status()
|
||||
print("✓ Status test passed")
|
||||
test_v1_chat_history()
|
||||
print("✓ History test passed")
|
||||
test_v1_upload_fail()
|
||||
print("✓ Upload failure test passed")
|
||||
print("All tests passed!")
|
||||
except Exception as e:
|
||||
print(f"Test failed: {e}")
|
||||
sys.exit(1)
|
||||
Reference in New Issue
Block a user