forked from Rockachopa/Timmy-time-dashboard
feat: task queue system with startup drain and backlogging (#76)
* feat: add task queue system for Timmy - all work goes through the queue - Add queue position tracking to task_queue models with task_type field - Add TaskProcessor class that consumes tasks from queue one at a time - Modify chat route to queue all messages for async processing - Chat responses get 'high' priority to jump ahead of thought tasks - Add queue status API endpoints for position polling - Update UI to show queue position (x/y) and current task banner - Replace thinking loop with task-based approach - thoughts are queued tasks - Push responses to user via WebSocket instead of immediate HTTP response - Add database migrations for existing tables * feat: Timmy drains task queue on startup, backlogs unhandleable tasks On spin-up, Timmy now iterates through all pending/approved tasks immediately instead of waiting for the polling loop. Tasks without a registered handler or with permanent errors are moved to a new BACKLOGGED status with a reason, keeping the queue clear for work Timmy can actually do. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> --------- Co-authored-by: Alexander Payne <apayne@MM.local> Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
committed by
GitHub
parent
849b5b1a8d
commit
5b6d33e05a
@@ -84,29 +84,139 @@ async def _briefing_scheduler() -> None:
|
||||
async def _thinking_loop() -> None:
|
||||
"""Background task: Timmy's default thinking thread.
|
||||
|
||||
Starts shortly after server boot and runs on a configurable cadence.
|
||||
Timmy ponders his existence, recent swarm activity, scripture, creative
|
||||
ideas, or continues a previous train of thought.
|
||||
Instead of thinking directly, this creates thought tasks in the queue
|
||||
for the task processor to handle. This ensures all of Timmy's work
|
||||
goes through the unified task system.
|
||||
"""
|
||||
from timmy.thinking import thinking_engine
|
||||
from swarm.task_queue.models import create_task
|
||||
from datetime import datetime
|
||||
|
||||
await asyncio.sleep(10) # Let server finish starting before first thought
|
||||
|
||||
while True:
|
||||
try:
|
||||
await thinking_engine.think_once()
|
||||
# Create a thought task instead of thinking directly
|
||||
now = datetime.now()
|
||||
create_task(
|
||||
title=f"Thought: {now.strftime('%A %B %d, %I:%M %p')}",
|
||||
description="Continue thinking about your existence, recent events, scripture, creative ideas, or a previous thread of thought.",
|
||||
assigned_to="timmy",
|
||||
created_by="timmy", # Self-generated
|
||||
priority="low",
|
||||
requires_approval=False,
|
||||
auto_approve=True,
|
||||
task_type="thought",
|
||||
)
|
||||
logger.debug("Created thought task in queue")
|
||||
except Exception as exc:
|
||||
logger.error("Thinking loop error: %s", exc)
|
||||
|
||||
await asyncio.sleep(settings.thinking_interval_seconds)
|
||||
|
||||
|
||||
async def _task_processor_loop() -> None:
|
||||
"""Background task: Timmy's task queue processor.
|
||||
|
||||
On startup, drains all pending/approved tasks immediately — iterating
|
||||
through the queue and processing what can be handled, backlogging what
|
||||
can't. Then enters the steady-state polling loop.
|
||||
"""
|
||||
from swarm.task_processor import task_processor
|
||||
from swarm.task_queue.models import update_task_status, TaskStatus
|
||||
from timmy.session import chat as timmy_chat
|
||||
from datetime import datetime
|
||||
import json
|
||||
import asyncio
|
||||
|
||||
await asyncio.sleep(5) # Let server finish starting
|
||||
|
||||
def handle_chat_response(task):
|
||||
"""Handler for chat_response tasks - calls Timmy and returns response."""
|
||||
try:
|
||||
now = datetime.now()
|
||||
context = f"[System: Current date/time is {now.strftime('%A, %B %d, %Y at %I:%M %p')}]\n\n"
|
||||
response = timmy_chat(context + task.description)
|
||||
|
||||
# Push response to user via WebSocket
|
||||
try:
|
||||
from infrastructure.ws_manager.handler import ws_manager
|
||||
|
||||
asyncio.create_task(
|
||||
ws_manager.broadcast(
|
||||
"timmy_response",
|
||||
{
|
||||
"task_id": task.id,
|
||||
"response": response,
|
||||
},
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug("Failed to push response via WS: %s", e)
|
||||
|
||||
return response
|
||||
except Exception as e:
|
||||
logger.error("Chat response failed: %s", e)
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
def handle_thought(task):
|
||||
"""Handler for thought tasks - Timmy's internal thinking."""
|
||||
from timmy.thinking import thinking_engine
|
||||
|
||||
try:
|
||||
result = thinking_engine.think_once()
|
||||
return str(result) if result else "Thought completed"
|
||||
except Exception as e:
|
||||
logger.error("Thought processing failed: %s", e)
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
# Register handlers
|
||||
task_processor.register_handler("chat_response", handle_chat_response)
|
||||
task_processor.register_handler("thought", handle_thought)
|
||||
task_processor.register_handler("internal", handle_thought)
|
||||
|
||||
# ── Startup drain: iterate through all pending tasks immediately ──
|
||||
logger.info("Draining task queue on startup…")
|
||||
try:
|
||||
summary = await task_processor.drain_queue()
|
||||
if summary["processed"] or summary["backlogged"]:
|
||||
logger.info(
|
||||
"Startup drain: %d processed, %d backlogged, %d skipped, %d failed",
|
||||
summary["processed"],
|
||||
summary["backlogged"],
|
||||
summary["skipped"],
|
||||
summary["failed"],
|
||||
)
|
||||
|
||||
# Notify via WebSocket so the dashboard updates
|
||||
try:
|
||||
from infrastructure.ws_manager.handler import ws_manager
|
||||
|
||||
asyncio.create_task(
|
||||
ws_manager.broadcast_json(
|
||||
{
|
||||
"type": "task_event",
|
||||
"event": "startup_drain_complete",
|
||||
"summary": summary,
|
||||
}
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
except Exception as exc:
|
||||
logger.error("Startup drain failed: %s", exc)
|
||||
|
||||
# ── Steady-state: poll for new tasks ──
|
||||
logger.info("Task processor entering steady-state loop")
|
||||
await task_processor.run_loop(interval_seconds=3.0)
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
task = asyncio.create_task(_briefing_scheduler())
|
||||
|
||||
# Register Timmy in the swarm registry so it shows up alongside other agents
|
||||
from swarm import registry as swarm_registry
|
||||
|
||||
swarm_registry.register(
|
||||
name="Timmy",
|
||||
capabilities="chat,reasoning,research,planning",
|
||||
@@ -115,6 +225,7 @@ async def lifespan(app: FastAPI):
|
||||
|
||||
# Log swarm recovery summary (reconciliation ran during coordinator init)
|
||||
from swarm.coordinator import coordinator as swarm_coordinator
|
||||
|
||||
rec = swarm_coordinator._recovery_summary
|
||||
if rec["tasks_failed"] or rec["agents_offlined"]:
|
||||
logger.info(
|
||||
@@ -138,6 +249,7 @@ async def lifespan(app: FastAPI):
|
||||
# Log system startup event so the Events page is never empty
|
||||
try:
|
||||
from swarm.event_log import log_event, EventType
|
||||
|
||||
log_event(
|
||||
EventType.SYSTEM_INFO,
|
||||
source="coordinator",
|
||||
@@ -148,6 +260,7 @@ async def lifespan(app: FastAPI):
|
||||
|
||||
# Auto-bootstrap MCP tools
|
||||
from mcp.bootstrap import auto_bootstrap, get_bootstrap_status
|
||||
|
||||
try:
|
||||
registered = auto_bootstrap()
|
||||
if registered:
|
||||
@@ -157,6 +270,7 @@ async def lifespan(app: FastAPI):
|
||||
|
||||
# Initialise Spark Intelligence engine
|
||||
from spark.engine import spark_engine
|
||||
|
||||
if spark_engine.enabled:
|
||||
logger.info("Spark Intelligence active — event capture enabled")
|
||||
|
||||
@@ -169,10 +283,17 @@ async def lifespan(app: FastAPI):
|
||||
settings.thinking_interval_seconds,
|
||||
)
|
||||
|
||||
# Start Timmy's task queue processor (skip in test mode)
|
||||
task_processor_task = None
|
||||
if os.environ.get("TIMMY_TEST_MODE") != "1":
|
||||
task_processor_task = asyncio.create_task(_task_processor_loop())
|
||||
logger.info("Task queue processor started")
|
||||
|
||||
# Auto-start chat integrations (skip silently if unconfigured)
|
||||
from integrations.telegram_bot.bot import telegram_bot
|
||||
from integrations.chat_bridge.vendors.discord import discord_bot
|
||||
from integrations.chat_bridge.registry import platform_registry
|
||||
|
||||
platform_registry.register(discord_bot)
|
||||
|
||||
if settings.telegram_token:
|
||||
@@ -195,6 +316,12 @@ async def lifespan(app: FastAPI):
|
||||
await thinking_task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
if task_processor_task:
|
||||
task_processor_task.cancel()
|
||||
try:
|
||||
await task_processor_task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
task.cancel()
|
||||
try:
|
||||
await task
|
||||
@@ -272,4 +399,5 @@ async def index(request: Request):
|
||||
async def shortcuts_setup():
|
||||
"""Siri Shortcuts setup guide."""
|
||||
from integrations.shortcuts.siri import get_setup_guide
|
||||
|
||||
return get_setup_guide()
|
||||
|
||||
Reference in New Issue
Block a user