1
0
This repository has been archived on 2026-03-24. You can view files and clone it. You cannot open issues or pull requests or push a commit.
Files
Timmy-time-dashboard/src/dashboard/routes/agents.py

239 lines
7.8 KiB
Python
Raw Normal View History

import json
import logging
from datetime import datetime
from fastapi import APIRouter, Form, Request
from fastapi.responses import HTMLResponse
from dashboard.store import message_log
from dashboard.templating import templates
from timmy.session import _clean_response, chat_with_tools, continue_chat
from timmy.tool_safety import (
format_action_description,
get_impact_level,
)
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/agents", tags=["agents"])
MAX_MESSAGE_LENGTH = 10_000 # chars — reject before hitting the model
# In-memory store for paused runs (approval_id -> run context).
# Each entry holds the RunOutput, the RunRequirement ref, and tool metadata.
_pending_runs: dict[str, dict] = {}
@router.get("")
async def list_agents():
"""Return registered agents."""
from config import settings
return {
"agents": [
{
"id": "default",
"name": settings.agent_name,
"status": "idle",
"capabilities": "chat,reasoning,research,planning",
Claude/remove persona system f vgt m (#126) * Remove persona system, identity, and all Timmy references Strip the codebase to pure orchestration logic: - Delete TIMMY_IDENTITY.md and memory/self/identity.md - Gut brain/identity.py to no-op stubs (empty returns) - Remove all system prompts reinforcing Timmy's character, faith, sovereignty, sign-off ("Sir, affirmative"), and agent roster - Replace identity-laden prompts with generic local-AI-assistant prompts - Remove "You work for Timmy" from all sub-agent system prompts - Rename PersonaTools → AgentTools, PERSONA_TOOLKITS → AGENT_TOOLKITS - Replace "timmy" agent ID with "orchestrator" across routes, marketplace, tools catalog, and orchestrator class - Strip Timmy references from config comments, templates, telegram bot, chat API, and dashboard UI - Delete tests/brain/test_identity.py entirely - Fix all test assertions that checked for persona identity content 729 tests pass (2 pre-existing failures in test_calm.py unrelated). https://claude.ai/code/session_01LjQGUE6nk9W9674zaxrYxy * Add Taskosaur (PM + AI task execution) to docker-compose Spins up Taskosaur alongside the dashboard on `docker compose up`: - postgres:16-alpine (port 5432, Taskosaur DB) - redis:7-alpine (Bull queue backend) - taskosaur (ports 3000 API / 3001 UI) - dashboard now depends_on taskosaur healthy - TASKOSAUR_API_URL injected into dashboard environment Dashboard can reach Taskosaur at http://taskosaur:3000/api on the internal network. Frontend UI accessible at http://localhost:3001. https://claude.ai/code/session_01LjQGUE6nk9W9674zaxrYxy --------- Co-authored-by: Claude <noreply@anthropic.com>
2026-03-04 12:00:49 -05:00
"type": "local",
"model": settings.ollama_model,
"backend": "ollama",
"version": "1.0.0",
}
]
}
@router.get("/default/panel", response_class=HTMLResponse)
async def agent_panel(request: Request):
Claude/remove persona system f vgt m (#126) * Remove persona system, identity, and all Timmy references Strip the codebase to pure orchestration logic: - Delete TIMMY_IDENTITY.md and memory/self/identity.md - Gut brain/identity.py to no-op stubs (empty returns) - Remove all system prompts reinforcing Timmy's character, faith, sovereignty, sign-off ("Sir, affirmative"), and agent roster - Replace identity-laden prompts with generic local-AI-assistant prompts - Remove "You work for Timmy" from all sub-agent system prompts - Rename PersonaTools → AgentTools, PERSONA_TOOLKITS → AGENT_TOOLKITS - Replace "timmy" agent ID with "orchestrator" across routes, marketplace, tools catalog, and orchestrator class - Strip Timmy references from config comments, templates, telegram bot, chat API, and dashboard UI - Delete tests/brain/test_identity.py entirely - Fix all test assertions that checked for persona identity content 729 tests pass (2 pre-existing failures in test_calm.py unrelated). https://claude.ai/code/session_01LjQGUE6nk9W9674zaxrYxy * Add Taskosaur (PM + AI task execution) to docker-compose Spins up Taskosaur alongside the dashboard on `docker compose up`: - postgres:16-alpine (port 5432, Taskosaur DB) - redis:7-alpine (Bull queue backend) - taskosaur (ports 3000 API / 3001 UI) - dashboard now depends_on taskosaur healthy - TASKOSAUR_API_URL injected into dashboard environment Dashboard can reach Taskosaur at http://taskosaur:3000/api on the internal network. Frontend UI accessible at http://localhost:3001. https://claude.ai/code/session_01LjQGUE6nk9W9674zaxrYxy --------- Co-authored-by: Claude <noreply@anthropic.com>
2026-03-04 12:00:49 -05:00
"""Chat panel — for HTMX main-panel swaps."""
return templates.TemplateResponse(request, "partials/agent_panel_chat.html", {"agent": None})
@router.get("/default/history", response_class=HTMLResponse)
async def get_history(request: Request):
return templates.TemplateResponse(
request,
"partials/history.html",
{"messages": message_log.all()},
)
@router.delete("/default/history", response_class=HTMLResponse)
async def clear_history(request: Request):
message_log.clear()
return templates.TemplateResponse(
request,
"partials/history.html",
{"messages": []},
)
@router.post("/default/chat", response_class=HTMLResponse)
async def chat_agent(request: Request, message: str = Form(...)):
"""Chat — synchronous response with native Agno tool confirmation."""
message = message.strip()
if not message:
from fastapi import HTTPException
raise HTTPException(status_code=400, detail="Message cannot be empty")
if len(message) > MAX_MESSAGE_LENGTH:
from fastapi import HTTPException
raise HTTPException(status_code=422, detail="Message too long")
timestamp = datetime.now().strftime("%H:%M:%S")
response_text = None
error_text = None
try:
run_output = await chat_with_tools(message)
except Exception as exc:
logger.error("Chat error: %s", exc)
error_text = f"Chat error: {exc}"
run_output = None
# Check if Agno paused the run for tool confirmation
tool_actions = []
if run_output is not None:
status = getattr(run_output, "status", None)
is_paused = status == "PAUSED" or str(status) == "RunStatus.paused"
if is_paused and getattr(run_output, "active_requirements", None):
for req in run_output.active_requirements:
if getattr(req, "needs_confirmation", False):
te = req.tool_execution
tool_name = getattr(te, "tool_name", "unknown")
tool_args = getattr(te, "tool_args", {}) or {}
from timmy.approvals import create_item
item = create_item(
title=f"Dashboard: {tool_name}",
description=format_action_description(tool_name, tool_args),
proposed_action=json.dumps({"tool": tool_name, "args": tool_args}),
impact=get_impact_level(tool_name),
)
_pending_runs[item.id] = {
"run_output": run_output,
"requirement": req,
"tool_name": tool_name,
"tool_args": tool_args,
}
tool_actions.append(
{
"approval_id": item.id,
"tool_name": tool_name,
"description": format_action_description(tool_name, tool_args),
"impact": get_impact_level(tool_name),
}
)
raw_content = run_output.content if hasattr(run_output, "content") else ""
response_text = _clean_response(raw_content or "")
if not response_text and not tool_actions:
response_text = None # let error template show if needed
message_log.append(role="user", content=message, timestamp=timestamp, source="browser")
if response_text:
message_log.append(
role="agent", content=response_text, timestamp=timestamp, source="browser"
)
elif error_text:
message_log.append(role="error", content=error_text, timestamp=timestamp, source="browser")
return templates.TemplateResponse(
request,
"partials/chat_message.html",
{
"user_message": message,
"response": response_text,
"error": error_text,
"timestamp": timestamp,
"task_id": None,
"queue_info": None,
"tool_actions": tool_actions,
},
)
@router.post("/default/tool/{approval_id}/approve", response_class=HTMLResponse)
async def approve_tool(request: Request, approval_id: str):
"""Confirm a paused tool and resume execution via Agno."""
from timmy.approvals import approve
pending = _pending_runs.pop(approval_id, None)
if not pending:
return HTMLResponse(
"<p class='text-danger'>Action not found or already processed.</p>",
status_code=404,
)
approve(approval_id)
tool_name = pending["tool_name"]
# Confirm the requirement — Agno will execute the tool on continue_run
req = pending["requirement"]
req.confirm()
try:
result_run = await continue_chat(pending["run_output"])
# Extract tool result from the resumed run
tool_result = ""
for te in getattr(result_run, "tools", None) or []:
if getattr(te, "tool_name", None) == tool_name and getattr(te, "result", None):
tool_result = te.result
break
if not tool_result:
tool_result = getattr(result_run, "content", None) or "Tool executed successfully."
except Exception as exc:
logger.error("Tool execution failed: %s", exc)
tool_result = f"Error: {exc}"
return templates.TemplateResponse(
request,
"partials/chat_tool_result.html",
{
"approval_id": approval_id,
"tool_name": tool_name,
"status": "approved",
"result": str(tool_result)[:2000],
},
)
@router.post("/default/tool/{approval_id}/reject", response_class=HTMLResponse)
async def reject_tool(request: Request, approval_id: str):
"""Reject a pending tool action."""
from timmy.approvals import reject
pending = _pending_runs.pop(approval_id, None)
tool_name = "action"
if pending:
tool_name = pending["tool_name"]
req = pending["requirement"]
req.reject(note="User rejected from dashboard")
# Resume so the agent knows the tool was rejected
try:
await continue_chat(pending["run_output"])
except Exception as exc:
logger.warning("Agent tool rejection error: %s", exc)
pass
reject(approval_id)
return templates.TemplateResponse(
request,
"partials/chat_tool_result.html",
{
"approval_id": approval_id,
"tool_name": tool_name,
"status": "rejected",
"result": "",
},
)