Compare commits

..

1 Commits

Author SHA1 Message Date
kimi
bf57da87b3 refactor: break up delegate_to_kimi into helpers
Extract _find_kimi_cli, _resolve_workdir, and _run_kimi from the 68-line
delegate_to_kimi function, reducing it to ~15 lines.

Fixes #635

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-20 16:54:41 -04:00
19 changed files with 331 additions and 645 deletions

View File

@@ -84,7 +84,6 @@ class Settings(BaseSettings):
# Only used when explicitly enabled and query complexity warrants it.
grok_enabled: bool = False
xai_api_key: str = ""
xai_base_url: str = "https://api.x.ai/v1"
grok_default_model: str = "grok-3-fast"
grok_max_sats_per_query: int = 200
grok_free: bool = False # Skip Lightning invoice when user has own API key

View File

@@ -377,78 +377,73 @@ def _startup_background_tasks() -> list[asyncio.Task]:
]
def _try_prune(label: str, prune_fn, days: int) -> None:
"""Run a prune function, log results, swallow errors."""
try:
pruned = prune_fn()
if pruned:
logger.info(
"%s auto-prune: removed %d entries older than %d days",
label,
pruned,
days,
)
except Exception as exc:
logger.debug("%s auto-prune skipped: %s", label, exc)
def _check_vault_size() -> None:
"""Warn if the memory vault exceeds the configured size limit."""
try:
vault_path = Path(settings.repo_root) / "memory" / "notes"
if vault_path.exists():
total_bytes = sum(f.stat().st_size for f in vault_path.rglob("*") if f.is_file())
total_mb = total_bytes / (1024 * 1024)
if total_mb > settings.memory_vault_max_mb:
logger.warning(
"Memory vault (%.1f MB) exceeds limit (%d MB) — consider archiving old notes",
total_mb,
settings.memory_vault_max_mb,
)
except Exception as exc:
logger.debug("Vault size check skipped: %s", exc)
def _startup_pruning() -> None:
"""Auto-prune old memories, thoughts, and events on startup."""
if settings.memory_prune_days > 0:
from timmy.memory_system import prune_memories
try:
from timmy.memory_system import prune_memories
_try_prune(
"Memory",
lambda: prune_memories(
pruned = prune_memories(
older_than_days=settings.memory_prune_days,
keep_facts=settings.memory_prune_keep_facts,
),
settings.memory_prune_days,
)
)
if pruned:
logger.info(
"Memory auto-prune: removed %d entries older than %d days",
pruned,
settings.memory_prune_days,
)
except Exception as exc:
logger.debug("Memory auto-prune skipped: %s", exc)
if settings.thoughts_prune_days > 0:
from timmy.thinking import thinking_engine
try:
from timmy.thinking import thinking_engine
_try_prune(
"Thought",
lambda: thinking_engine.prune_old_thoughts(
pruned = thinking_engine.prune_old_thoughts(
keep_days=settings.thoughts_prune_days,
keep_min=settings.thoughts_prune_keep_min,
),
settings.thoughts_prune_days,
)
)
if pruned:
logger.info(
"Thought auto-prune: removed %d entries older than %d days",
pruned,
settings.thoughts_prune_days,
)
except Exception as exc:
logger.debug("Thought auto-prune skipped: %s", exc)
if settings.events_prune_days > 0:
from swarm.event_log import prune_old_events
try:
from swarm.event_log import prune_old_events
_try_prune(
"Event",
lambda: prune_old_events(
pruned = prune_old_events(
keep_days=settings.events_prune_days,
keep_min=settings.events_prune_keep_min,
),
settings.events_prune_days,
)
)
if pruned:
logger.info(
"Event auto-prune: removed %d entries older than %d days",
pruned,
settings.events_prune_days,
)
except Exception as exc:
logger.debug("Event auto-prune skipped: %s", exc)
if settings.memory_vault_max_mb > 0:
_check_vault_size()
try:
vault_path = Path(settings.repo_root) / "memory" / "notes"
if vault_path.exists():
total_bytes = sum(f.stat().st_size for f in vault_path.rglob("*") if f.is_file())
total_mb = total_bytes / (1024 * 1024)
if total_mb > settings.memory_vault_max_mb:
logger.warning(
"Memory vault (%.1f MB) exceeds limit (%d MB) — consider archiving old notes",
total_mb,
settings.memory_vault_max_mb,
)
except Exception as exc:
logger.debug("Vault size check skipped: %s", exc)
async def _shutdown_cleanup(

View File

@@ -175,12 +175,18 @@ class CSRFMiddleware(BaseHTTPMiddleware):
return await call_next(request)
# Token validation failed and path is not exempt
# Resolve the endpoint from routes BEFORE executing to avoid side effects
endpoint = self._resolve_endpoint(request)
if endpoint and is_csrf_exempt(endpoint):
return await call_next(request)
# We still need to call the app to check if the endpoint is decorated
# with @csrf_exempt, so we'll let it through and check after routing
response = await call_next(request)
# Endpoint is not exempt and token validation failed — reject without executing
# After routing, check if the endpoint is marked as exempt
endpoint = request.scope.get("endpoint")
if endpoint and is_csrf_exempt(endpoint):
# Endpoint is marked as exempt, allow the response
return response
# Endpoint is not exempt and token validation failed
# Return 403 error
return JSONResponse(
status_code=403,
content={
@@ -190,42 +196,6 @@ class CSRFMiddleware(BaseHTTPMiddleware):
},
)
def _resolve_endpoint(self, request: Request) -> Callable | None:
"""Resolve the endpoint for a request without executing it.
Walks the app chain to find routes, then matches against the request
scope. This allows checking @csrf_exempt before the handler runs
(avoiding side effects on CSRF rejection).
Returns:
The endpoint callable if found, None otherwise.
"""
try:
from starlette.routing import Match
# Walk the middleware/app chain to find something with routes
routes = None
current = self.app
for _ in range(10): # Safety limit
routes = getattr(current, "routes", None)
if routes:
break
current = getattr(current, "app", None)
if current is None:
break
if not routes:
return None
scope = dict(request.scope)
for route in routes:
match, child_scope = route.matches(scope)
if match == Match.FULL:
return child_scope.get("endpoint")
except Exception:
logger.debug("Failed to resolve endpoint for CSRF check")
return None
def _is_likely_exempt(self, path: str) -> bool:
"""Check if a path is likely to be CSRF exempt.

View File

@@ -1,4 +1,4 @@
from datetime import UTC, date, datetime
from datetime import date, datetime
from enum import StrEnum
from sqlalchemy import JSON, Boolean, Column, Date, DateTime, Index, Integer, String
@@ -40,13 +40,8 @@ class Task(Base):
deferred_at = Column(DateTime, nullable=True)
# Timestamps
created_at = Column(DateTime, default=lambda: datetime.now(UTC), nullable=False)
updated_at = Column(
DateTime,
default=lambda: datetime.now(UTC),
onupdate=lambda: datetime.now(UTC),
nullable=False,
)
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False)
__table_args__ = (Index("ix_task_state_order", "state", "sort_order"),)
@@ -64,4 +59,4 @@ class JournalEntry(Base):
gratitude = Column(String(500), nullable=True)
energy_level = Column(Integer, nullable=True) # User-reported, 1-10
created_at = Column(DateTime, default=lambda: datetime.now(UTC), nullable=False)
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)

View File

@@ -1,5 +1,5 @@
import logging
from datetime import UTC, date, datetime
from datetime import date, datetime
from fastapi import APIRouter, Depends, Form, HTTPException, Request
from fastapi.responses import HTMLResponse
@@ -226,7 +226,7 @@ async def post_evening_ritual(
)
for task in active_tasks:
task.state = TaskState.DEFERRED # Or DONE, depending on desired archiving logic
task.deferred_at = datetime.now(UTC)
task.deferred_at = datetime.utcnow()
db.add(task)
db.commit()
@@ -279,7 +279,7 @@ async def start_task(
raise HTTPException(status_code=404, detail="Task not found")
task.state = TaskState.NOW
task.started_at = datetime.now(UTC)
task.started_at = datetime.utcnow()
db.add(task)
db.commit()
@@ -309,7 +309,7 @@ async def complete_task(
raise HTTPException(status_code=404, detail="Task not found")
task.state = TaskState.DONE
task.completed_at = datetime.now(UTC)
task.completed_at = datetime.utcnow()
db.add(task)
db.commit()
@@ -338,7 +338,7 @@ async def defer_task(
raise HTTPException(status_code=404, detail="Task not found")
task.state = TaskState.DEFERRED
task.deferred_at = datetime.now(UTC)
task.deferred_at = datetime.utcnow()
db.add(task)
db.commit()

View File

@@ -5,7 +5,7 @@ import sqlite3
import uuid
from collections.abc import Generator
from contextlib import closing, contextmanager
from datetime import UTC, datetime
from datetime import datetime
from pathlib import Path
from fastapi import APIRouter, Form, HTTPException, Request
@@ -219,7 +219,7 @@ async def create_task_form(
raise HTTPException(status_code=400, detail="Task title cannot be empty")
task_id = str(uuid.uuid4())
now = datetime.now(UTC).isoformat()
now = datetime.utcnow().isoformat()
priority = priority if priority in VALID_PRIORITIES else "normal"
with _get_db() as db:
@@ -287,7 +287,7 @@ async def modify_task(
async def _set_status(request: Request, task_id: str, new_status: str):
"""Helper to update status and return refreshed task card."""
completed_at = (
datetime.now(UTC).isoformat() if new_status in ("completed", "vetoed", "failed") else None
datetime.utcnow().isoformat() if new_status in ("completed", "vetoed", "failed") else None
)
with _get_db() as db:
db.execute(
@@ -316,7 +316,7 @@ async def api_create_task(request: Request):
raise HTTPException(422, "title is required")
task_id = str(uuid.uuid4())
now = datetime.now(UTC).isoformat()
now = datetime.utcnow().isoformat()
priority = body.get("priority", "normal")
if priority not in VALID_PRIORITIES:
priority = "normal"
@@ -358,7 +358,7 @@ async def api_update_status(task_id: str, request: Request):
raise HTTPException(422, f"Invalid status. Must be one of: {VALID_STATUSES}")
completed_at = (
datetime.now(UTC).isoformat() if new_status in ("completed", "vetoed", "failed") else None
datetime.utcnow().isoformat() if new_status in ("completed", "vetoed", "failed") else None
)
with _get_db() as db:
db.execute(

View File

@@ -5,7 +5,7 @@ import sqlite3
import uuid
from collections.abc import Generator
from contextlib import closing, contextmanager
from datetime import UTC, datetime
from datetime import datetime
from pathlib import Path
from fastapi import APIRouter, Form, HTTPException, Request
@@ -144,7 +144,7 @@ async def submit_work_order(
related_files: str = Form(""),
):
wo_id = str(uuid.uuid4())
now = datetime.now(UTC).isoformat()
now = datetime.utcnow().isoformat()
priority = priority if priority in PRIORITIES else "medium"
category = category if category in CATEGORIES else "suggestion"
@@ -211,7 +211,7 @@ async def active_partial(request: Request):
async def _update_status(request: Request, wo_id: str, new_status: str, **extra):
completed_at = (
datetime.now(UTC).isoformat() if new_status in ("completed", "rejected") else None
datetime.utcnow().isoformat() if new_status in ("completed", "rejected") else None
)
with _get_db() as db:
sets = ["status=?", "completed_at=COALESCE(?, completed_at)"]

View File

@@ -149,52 +149,6 @@ def _log_error_event(
logger.debug("Failed to log error event: %s", log_exc)
def _build_report_description(
exc: Exception,
source: str,
context: dict | None,
error_hash: str,
tb_str: str,
affected_file: str,
affected_line: int,
git_ctx: dict,
) -> str:
"""Build the markdown description for a bug report task."""
parts = [
f"**Error:** {type(exc).__name__}: {str(exc)}",
f"**Source:** {source}",
f"**File:** {affected_file}:{affected_line}",
f"**Git:** {git_ctx.get('branch', '?')} @ {git_ctx.get('commit', '?')}",
f"**Time:** {datetime.now(UTC).isoformat()}",
f"**Hash:** {error_hash}",
]
if context:
ctx_str = ", ".join(f"{k}={v}" for k, v in context.items())
parts.append(f"**Context:** {ctx_str}")
parts.append(f"\n**Stack Trace:**\n```\n{tb_str[:2000]}\n```")
return "\n".join(parts)
def _log_bug_report_created(source: str, task_id: str, error_hash: str, title: str) -> None:
"""Log a BUG_REPORT_CREATED event (best-effort)."""
try:
from swarm.event_log import EventType, log_event
log_event(
EventType.BUG_REPORT_CREATED,
source=source,
task_id=task_id,
data={
"error_hash": error_hash,
"title": title[:100],
},
)
except Exception as exc:
logger.warning("Bug report event log error: %s", exc)
def _create_bug_report(
exc: Exception,
source: str,
@@ -210,20 +164,25 @@ def _create_bug_report(
from swarm.task_queue.models import create_task
title = f"[BUG] {type(exc).__name__}: {str(exc)[:80]}"
description = _build_report_description(
exc,
source,
context,
error_hash,
tb_str,
affected_file,
affected_line,
git_ctx,
)
description_parts = [
f"**Error:** {type(exc).__name__}: {str(exc)}",
f"**Source:** {source}",
f"**File:** {affected_file}:{affected_line}",
f"**Git:** {git_ctx.get('branch', '?')} @ {git_ctx.get('commit', '?')}",
f"**Time:** {datetime.now(UTC).isoformat()}",
f"**Hash:** {error_hash}",
]
if context:
ctx_str = ", ".join(f"{k}={v}" for k, v in context.items())
description_parts.append(f"**Context:** {ctx_str}")
description_parts.append(f"\n**Stack Trace:**\n```\n{tb_str[:2000]}\n```")
task = create_task(
title=title,
description=description,
description="\n".join(description_parts),
assigned_to="default",
created_by="system",
priority="normal",
@@ -231,9 +190,24 @@ def _create_bug_report(
auto_approve=True,
task_type="bug_report",
)
task_id = task.id
_log_bug_report_created(source, task.id, error_hash, title)
return task.id
try:
from swarm.event_log import EventType, log_event
log_event(
EventType.BUG_REPORT_CREATED,
source=source,
task_id=task_id,
data={
"error_hash": error_hash,
"title": title[:100],
},
)
except Exception as exc:
logger.warning("Bug report screenshot error: %s", exc)
return task_id
except Exception as task_exc:
logger.debug("Failed to create bug report task: %s", task_exc)

View File

@@ -64,7 +64,7 @@ class EventBus:
@bus.subscribe("agent.task.*")
async def handle_task(event: Event):
logger.debug("Task event: %s", event.data)
logger.debug(f"Task event: {event.data}")
await bus.publish(Event(
type="agent.task.assigned",

View File

@@ -221,56 +221,65 @@ class CascadeRouter:
raise RuntimeError("PyYAML not installed")
content = self.config_path.read_text()
# Expand environment variables
content = self._expand_env_vars(content)
data = yaml.safe_load(content)
self.config = self._parse_router_config(data)
self._load_providers(data)
# Load cascade settings
cascade = data.get("cascade", {})
# Load fallback chains
fallback_chains = data.get("fallback_chains", {})
# Load multi-modal settings
multimodal = data.get("multimodal", {})
self.config = RouterConfig(
timeout_seconds=cascade.get("timeout_seconds", 30),
max_retries_per_provider=cascade.get("max_retries_per_provider", 2),
retry_delay_seconds=cascade.get("retry_delay_seconds", 1),
circuit_breaker_failure_threshold=cascade.get("circuit_breaker", {}).get(
"failure_threshold", 5
),
circuit_breaker_recovery_timeout=cascade.get("circuit_breaker", {}).get(
"recovery_timeout", 60
),
circuit_breaker_half_open_max_calls=cascade.get("circuit_breaker", {}).get(
"half_open_max_calls", 2
),
auto_pull_models=multimodal.get("auto_pull", True),
fallback_chains=fallback_chains,
)
# Load providers
for p_data in data.get("providers", []):
# Skip disabled providers
if not p_data.get("enabled", False):
continue
provider = Provider(
name=p_data["name"],
type=p_data["type"],
enabled=p_data.get("enabled", True),
priority=p_data.get("priority", 99),
url=p_data.get("url"),
api_key=p_data.get("api_key"),
base_url=p_data.get("base_url"),
models=p_data.get("models", []),
)
# Check if provider is actually available
if self._check_provider_available(provider):
self.providers.append(provider)
else:
logger.warning("Provider %s not available, skipping", provider.name)
# Sort by priority
self.providers.sort(key=lambda p: p.priority)
except Exception as exc:
logger.error("Failed to load config: %s", exc)
def _parse_router_config(self, data: dict) -> RouterConfig:
"""Build a RouterConfig from parsed YAML data."""
cascade = data.get("cascade", {})
cb = cascade.get("circuit_breaker", {})
multimodal = data.get("multimodal", {})
return RouterConfig(
timeout_seconds=cascade.get("timeout_seconds", 30),
max_retries_per_provider=cascade.get("max_retries_per_provider", 2),
retry_delay_seconds=cascade.get("retry_delay_seconds", 1),
circuit_breaker_failure_threshold=cb.get("failure_threshold", 5),
circuit_breaker_recovery_timeout=cb.get("recovery_timeout", 60),
circuit_breaker_half_open_max_calls=cb.get("half_open_max_calls", 2),
auto_pull_models=multimodal.get("auto_pull", True),
fallback_chains=data.get("fallback_chains", {}),
)
def _load_providers(self, data: dict) -> None:
"""Load, filter, and sort providers from parsed YAML data."""
for p_data in data.get("providers", []):
if not p_data.get("enabled", False):
continue
provider = Provider(
name=p_data["name"],
type=p_data["type"],
enabled=p_data.get("enabled", True),
priority=p_data.get("priority", 99),
url=p_data.get("url"),
api_key=p_data.get("api_key"),
base_url=p_data.get("base_url"),
models=p_data.get("models", []),
)
if self._check_provider_available(provider):
self.providers.append(provider)
else:
logger.warning("Provider %s not available, skipping", provider.name)
self.providers.sort(key=lambda p: p.priority)
def _expand_env_vars(self, content: str) -> str:
"""Expand ${VAR} syntax in YAML content.
@@ -759,7 +768,7 @@ class CascadeRouter:
client = openai.AsyncOpenAI(
api_key=provider.api_key,
base_url=provider.base_url or settings.xai_base_url,
base_url=provider.base_url or "https://api.x.ai/v1",
timeout=httpx.Timeout(300.0),
)

View File

@@ -99,27 +99,23 @@ class GrokBackend:
def _get_client(self):
"""Create OpenAI client configured for xAI endpoint."""
from config import settings
import httpx
from openai import OpenAI
return OpenAI(
api_key=self._api_key,
base_url=settings.xai_base_url,
base_url="https://api.x.ai/v1",
timeout=httpx.Timeout(300.0),
)
async def _get_async_client(self):
"""Create async OpenAI client configured for xAI endpoint."""
from config import settings
import httpx
from openai import AsyncOpenAI
return AsyncOpenAI(
api_key=self._api_key,
base_url=settings.xai_base_url,
base_url="https://api.x.ai/v1",
timeout=httpx.Timeout(300.0),
)

View File

@@ -30,7 +30,7 @@ import shutil
import sqlite3
import uuid
from contextlib import closing
from datetime import UTC, datetime
from datetime import datetime
from pathlib import Path
import httpx
@@ -196,7 +196,7 @@ def _bridge_to_work_order(title: str, body: str, category: str) -> None:
body,
category,
"timmy-thinking",
datetime.now(UTC).isoformat(),
datetime.utcnow().isoformat(),
),
)
conn.commit()
@@ -204,61 +204,15 @@ def _bridge_to_work_order(title: str, body: str, category: str) -> None:
logger.debug("Work order bridge failed: %s", exc)
async def _ensure_issue_session():
"""Get or create the cached MCP session, connecting if needed.
Returns the connected ``MCPTools`` instance.
"""
from agno.tools.mcp import MCPTools
global _issue_session
if _issue_session is None:
_issue_session = MCPTools(
server_params=_gitea_server_params(),
timeout_seconds=settings.mcp_timeout,
)
if not getattr(_issue_session, "_connected", False):
await _issue_session.connect()
_issue_session._connected = True
return _issue_session
def _build_issue_body(body: str) -> str:
"""Append the auto-filing signature to the issue body."""
full_body = body
if full_body:
full_body += "\n\n"
full_body += "---\n*Auto-filed by Timmy's thinking engine*"
return full_body
def _build_issue_args(title: str, full_body: str) -> dict:
"""Build MCP tool arguments for ``issue_write`` with method=create."""
owner, repo = settings.gitea_repo.split("/", 1)
return {
"method": "create",
"owner": owner,
"repo": repo,
"title": title,
"body": full_body,
}
def _category_from_labels(labels: str) -> str:
"""Derive a work-order category from comma-separated label names."""
label_list = [tag.strip() for tag in labels.split(",") if tag.strip()] if labels else []
return "bug" if "bug" in label_list else "suggestion"
async def create_gitea_issue_via_mcp(title: str, body: str = "", labels: str = "") -> str:
"""File a Gitea issue via the MCP server (standalone, no LLM loop).
Used by the thinking engine's ``_maybe_file_issues()`` post-hook.
Manages its own MCPTools session with lazy connect + graceful failure.
Uses ``tools.session.call_tool()`` for direct MCP invocation — the
``MCPTools`` wrapper itself does not expose ``call_tool()``.
Args:
title: Issue title.
body: Issue body (markdown).
@@ -271,13 +225,46 @@ async def create_gitea_issue_via_mcp(title: str, body: str = "", labels: str = "
return "Gitea integration is not configured."
try:
session = await _ensure_issue_session()
full_body = _build_issue_body(body)
args = _build_issue_args(title, full_body)
from agno.tools.mcp import MCPTools
result = await session.session.call_tool("issue_write", arguments=args)
global _issue_session
_bridge_to_work_order(title, body, _category_from_labels(labels))
if _issue_session is None:
_issue_session = MCPTools(
server_params=_gitea_server_params(),
timeout_seconds=settings.mcp_timeout,
)
# Ensure connected
if not getattr(_issue_session, "_connected", False):
await _issue_session.connect()
_issue_session._connected = True
# Append auto-filing signature
full_body = body
if full_body:
full_body += "\n\n"
full_body += "---\n*Auto-filed by Timmy's thinking engine*"
# Parse owner/repo from settings
owner, repo = settings.gitea_repo.split("/", 1)
# Build tool arguments — gitea-mcp uses issue_write with method="create"
args = {
"method": "create",
"owner": owner,
"repo": repo,
"title": title,
"body": full_body,
}
# Call via the underlying MCP session (MCPTools doesn't expose call_tool)
result = await _issue_session.session.call_tool("issue_write", arguments=args)
# Bridge to local work order
label_list = [tag.strip() for tag in labels.split(",") if tag.strip()] if labels else []
category = "bug" if "bug" in label_list else "suggestion"
_bridge_to_work_order(title, body, category)
logger.info("Created Gitea issue via MCP: %s", title[:60])
return f"Created issue: {title}\n{result}"

View File

@@ -46,64 +46,6 @@ DB_PATH = PROJECT_ROOT / "data" / "memory.db"
# ───────────────────────────────────────────────────────────────────────────────
_DEFAULT_HOT_MEMORY_TEMPLATE = """\
# Timmy Hot Memory
> Working RAM — always loaded, ~300 lines max, pruned monthly
> Last updated: {date}
---
## Current Status
**Agent State:** Operational
**Mode:** Development
**Active Tasks:** 0
**Pending Decisions:** None
---
## Standing Rules
1. **Sovereignty First** — No cloud dependencies
2. **Local-Only Inference** — Ollama on localhost
3. **Privacy by Design** — Telemetry disabled
4. **Tool Minimalism** — Use tools only when necessary
5. **Memory Discipline** — Write handoffs at session end
---
## Agent Roster
| Agent | Role | Status |
|-------|------|--------|
| Timmy | Core | Active |
---
## User Profile
**Name:** (not set)
**Interests:** (to be learned)
---
## Key Decisions
(none yet)
---
## Pending Actions
- [ ] Learn user's name
---
*Prune date: {prune_date}*
"""
@contextmanager
def get_connection() -> Generator[sqlite3.Connection, None, None]:
"""Get database connection to unified memory database."""
@@ -790,12 +732,66 @@ class HotMemory:
logger.debug(
"HotMemory._create_default() - creating default MEMORY.md for backward compatibility"
)
now = datetime.now(UTC)
content = _DEFAULT_HOT_MEMORY_TEMPLATE.format(
date=now.strftime("%Y-%m-%d"),
prune_date=now.replace(day=25).strftime("%Y-%m-%d"),
default_content = """# Timmy Hot Memory
> Working RAM — always loaded, ~300 lines max, pruned monthly
> Last updated: {date}
---
## Current Status
**Agent State:** Operational
**Mode:** Development
**Active Tasks:** 0
**Pending Decisions:** None
---
## Standing Rules
1. **Sovereignty First** — No cloud dependencies
2. **Local-Only Inference** — Ollama on localhost
3. **Privacy by Design** — Telemetry disabled
4. **Tool Minimalism** — Use tools only when necessary
5. **Memory Discipline** — Write handoffs at session end
---
## Agent Roster
| Agent | Role | Status |
|-------|------|--------|
| Timmy | Core | Active |
---
## User Profile
**Name:** (not set)
**Interests:** (to be learned)
---
## Key Decisions
(none yet)
---
## Pending Actions
- [ ] Learn user's name
---
*Prune date: {prune_date}*
""".format(
date=datetime.now(UTC).strftime("%Y-%m-%d"),
prune_date=(datetime.now(UTC).replace(day=25)).strftime("%Y-%m-%d"),
)
self.path.write_text(content)
self.path.write_text(default_content)
logger.info("HotMemory: Created default MEMORY.md")

View File

@@ -392,26 +392,31 @@ def _build_insights(
return insights or ["Conversations look healthy. Keep up the good work."]
def _format_recurring_topics(repeated: list[tuple[str, int]]) -> list[str]:
"""Format the recurring-topics section of a reflection report."""
if repeated:
lines = ["### Recurring Topics"]
for word, count in repeated:
lines.append(f'- "{word}" ({count} mentions)')
lines.append("")
return lines
return ["### Recurring Topics\nNo strong patterns detected.\n"]
def self_reflect(limit: int = 30) -> str:
"""Review recent conversations and reflect on Timmy's own behavior.
Scans past session entries for patterns: low-confidence responses,
errors, repeated topics, and conversation quality signals. Returns
a structured reflection that Timmy can use to improve.
def _assemble_report(
entries: list[dict],
errors: list[dict],
timmy_msgs: list[dict],
user_msgs: list[dict],
low_conf: list[dict],
repeated: list[tuple[str, int]],
) -> str:
"""Assemble the full self-reflection report from analyzed data."""
Args:
limit: How many recent entries to review (default 30).
Returns:
A formatted self-reflection report.
"""
sl = get_session_logger()
sl.flush()
entries = sl.get_recent_entries(limit=limit)
if not entries:
return "No conversation history to reflect on yet."
_messages, errors, timmy_msgs, user_msgs = _categorize_entries(entries)
low_conf = _find_low_confidence(timmy_msgs)
repeated = _find_repeated_topics(user_msgs)
# Build reflection report
sections: list[str] = ["## Self-Reflection Report\n"]
sections.append(
f"Reviewed {len(entries)} recent entries: "
@@ -441,37 +446,16 @@ def _assemble_report(
)
)
sections.extend(_format_recurring_topics(repeated))
if repeated:
sections.append("### Recurring Topics")
for word, count in repeated:
sections.append(f'- "{word}" ({count} mentions)')
sections.append("")
else:
sections.append("### Recurring Topics\nNo strong patterns detected.\n")
sections.append("### Insights")
for insight in _build_insights(low_conf, errors, repeated):
sections.append(f"- {insight}")
return "\n".join(sections)
def self_reflect(limit: int = 30) -> str:
"""Review recent conversations and reflect on Timmy's own behavior.
Scans past session entries for patterns: low-confidence responses,
errors, repeated topics, and conversation quality signals. Returns
a structured reflection that Timmy can use to improve.
Args:
limit: How many recent entries to review (default 30).
Returns:
A formatted self-reflection report.
"""
sl = get_session_logger()
sl.flush()
entries = sl.get_recent_entries(limit=limit)
if not entries:
return "No conversation history to reflect on yet."
_messages, errors, timmy_msgs, user_msgs = _categorize_entries(entries)
low_conf = _find_low_confidence(timmy_msgs)
repeated = _find_repeated_topics(user_msgs)
return _assemble_report(entries, errors, timmy_msgs, user_msgs, low_conf, repeated)

View File

@@ -90,14 +90,17 @@ def list_swarm_agents() -> dict[str, Any]:
def _find_kimi_cli() -> str | None:
"""Return the path to the kimi CLI binary, or None if not installed."""
"""Return the path to the kimi CLI, or None if not installed."""
import shutil
return shutil.which("kimi")
def _resolve_workdir(working_directory: str) -> str | dict[str, Any]:
"""Return a validated working directory path, or an error dict."""
"""Resolve and validate the working directory.
Returns the resolved path string, or an error dict if invalid.
"""
from pathlib import Path
from config import settings
@@ -120,7 +123,7 @@ def _run_kimi(cmd: list[str], workdir: str) -> dict[str, Any]:
cmd,
capture_output=True,
text=True,
timeout=300, # 5 minute timeout for coding tasks
timeout=300,
cwd=workdir,
)

View File

@@ -86,40 +86,6 @@ def _pip_snapshot(mood: str, confidence: float) -> dict:
return pip_familiar.snapshot().to_dict()
def _resolve_mood(state) -> str:
"""Map cognitive mood/engagement to a presence mood string."""
if state.engagement == "idle" and state.mood == "settled":
return "calm"
return _MOOD_MAP.get(state.mood, "calm")
def _resolve_confidence(state) -> float:
"""Compute normalised confidence from cognitive tracker state."""
if state._confidence_count > 0:
raw = state._confidence_sum / state._confidence_count
else:
raw = 0.7
return round(max(0.0, min(1.0, raw)), 2)
def _build_active_threads(state) -> list[dict]:
"""Convert active commitments into presence thread dicts."""
return [
{"type": "thinking", "ref": c[:80], "status": "active"}
for c in state.active_commitments[:10]
]
def _build_environment() -> dict:
"""Return the environment section using local wall-clock time."""
local_now = datetime.now()
return {
"time_of_day": _time_of_day(local_now.hour),
"local_time": local_now.strftime("%-I:%M %p"),
"day_of_week": local_now.strftime("%A"),
}
def get_state_dict() -> dict:
"""Build presence state dict from current cognitive state.
@@ -132,19 +98,37 @@ def get_state_dict() -> dict:
state = cognitive_tracker.get_state()
now = datetime.now(UTC)
mood = _resolve_mood(state)
confidence = _resolve_confidence(state)
# Map cognitive mood to presence mood
mood = _MOOD_MAP.get(state.mood, "calm")
if state.engagement == "idle" and state.mood == "settled":
mood = "calm"
# Confidence from cognitive tracker
if state._confidence_count > 0:
confidence = state._confidence_sum / state._confidence_count
else:
confidence = 0.7
# Build active threads from commitments
threads = []
for commitment in state.active_commitments[:10]:
threads.append({"type": "thinking", "ref": commitment[:80], "status": "active"})
# Activity
activity = _ACTIVITY_MAP.get(state.engagement, "idle")
# Environment
local_now = datetime.now()
return {
"version": 1,
"liveness": now.strftime("%Y-%m-%dT%H:%M:%SZ"),
"current_focus": state.focus_topic or "",
"active_threads": _build_active_threads(state),
"active_threads": threads,
"recent_events": [],
"concerns": [],
"mood": mood,
"confidence": confidence,
"confidence": round(max(0.0, min(1.0, confidence)), 2),
"energy": round(_current_energy(), 2),
"identity": {
"name": "Timmy",
@@ -159,7 +143,11 @@ def get_state_dict() -> dict:
"visitor_present": False,
"conversation_turns": state.conversation_depth,
},
"environment": _build_environment(),
"environment": {
"time_of_day": _time_of_day(local_now.hour),
"local_time": local_now.strftime("%-I:%M %p"),
"day_of_week": local_now.strftime("%A"),
},
"familiar": _pip_snapshot(mood, confidence),
"meta": {
"schema_version": 1,

View File

@@ -1,100 +0,0 @@
"""Tests that CSRF rejection does NOT execute the endpoint handler.
Regression test for #626: the middleware was calling call_next() before
checking @csrf_exempt, causing side effects even on CSRF-rejected requests.
"""
import pytest
from fastapi import FastAPI
from fastapi.testclient import TestClient
from dashboard.middleware.csrf import CSRFMiddleware, csrf_exempt
class TestCSRFNoSideEffects:
"""Verify endpoints are NOT executed when CSRF validation fails."""
@pytest.fixture(autouse=True)
def enable_csrf(self):
"""Re-enable CSRF for these tests."""
from config import settings
original = settings.timmy_disable_csrf
settings.timmy_disable_csrf = False
yield
settings.timmy_disable_csrf = original
def test_protected_endpoint_not_executed_on_csrf_failure(self):
"""A protected endpoint must NOT run when CSRF token is missing.
Before the fix, the middleware called call_next() to resolve the
endpoint, executing its side effects before returning 403.
"""
app = FastAPI()
app.add_middleware(CSRFMiddleware)
side_effect_log = []
@app.post("/transfer")
def transfer_money():
side_effect_log.append("money_transferred")
return {"message": "transferred"}
client = TestClient(app)
response = client.post("/transfer")
assert response.status_code == 403
assert side_effect_log == [], (
"Endpoint was executed despite CSRF failure — side effects occurred!"
)
def test_csrf_exempt_endpoint_still_executes(self):
"""A @csrf_exempt endpoint should still execute without a CSRF token."""
app = FastAPI()
app.add_middleware(CSRFMiddleware)
side_effect_log = []
@app.post("/webhook-handler")
@csrf_exempt
def webhook_handler():
side_effect_log.append("webhook_processed")
return {"message": "processed"}
client = TestClient(app)
response = client.post("/webhook-handler")
assert response.status_code == 200
assert side_effect_log == ["webhook_processed"]
def test_exempt_and_protected_no_cross_contamination(self):
"""Mixed exempt/protected: only exempt endpoints execute without tokens."""
app = FastAPI()
app.add_middleware(CSRFMiddleware)
execution_log = []
@app.post("/safe-webhook")
@csrf_exempt
def safe_webhook():
execution_log.append("safe")
return {"message": "safe"}
@app.post("/dangerous-action")
def dangerous_action():
execution_log.append("dangerous")
return {"message": "danger"}
client = TestClient(app)
# Exempt endpoint runs
resp1 = client.post("/safe-webhook")
assert resp1.status_code == 200
# Protected endpoint blocked WITHOUT executing
resp2 = client.post("/dangerous-action")
assert resp2.status_code == 403
assert execution_log == ["safe"], (
f"Expected only 'safe' execution, got: {execution_log}"
)

View File

@@ -5,13 +5,11 @@ from datetime import UTC, datetime, timedelta
from unittest.mock import patch
from infrastructure.error_capture import (
_build_report_description,
_create_bug_report,
_dedup_cache,
_extract_traceback_info,
_get_git_context,
_is_duplicate,
_log_bug_report_created,
_log_error_event,
_notify_bug_report,
_record_to_session,
@@ -233,68 +231,6 @@ class TestLogErrorEvent:
_log_error_event(e, "test", "abc123", "file.py", 42, {"branch": "main"})
class TestBuildReportDescription:
"""Test _build_report_description helper."""
def test_includes_error_info(self):
try:
raise RuntimeError("desc test")
except RuntimeError as e:
desc = _build_report_description(
e,
"test_src",
None,
"hash1",
"tb...",
"file.py",
10,
{"branch": "main"},
)
assert "RuntimeError" in desc
assert "test_src" in desc
assert "file.py:10" in desc
assert "hash1" in desc
def test_includes_context_when_provided(self):
try:
raise RuntimeError("ctx desc")
except RuntimeError as e:
desc = _build_report_description(
e,
"src",
{"path": "/api"},
"h",
"tb",
"f.py",
1,
{},
)
assert "path=/api" in desc
def test_omits_context_when_none(self):
try:
raise RuntimeError("no ctx")
except RuntimeError as e:
desc = _build_report_description(
e,
"src",
None,
"h",
"tb",
"f.py",
1,
{},
)
assert "**Context:**" not in desc
class TestLogBugReportCreated:
"""Test _log_bug_report_created helper."""
def test_does_not_crash_on_missing_deps(self):
_log_bug_report_created("test", "task-1", "hash1", "title")
class TestCreateBugReport:
"""Test _create_bug_report helper."""

View File

@@ -6,9 +6,6 @@ import pytest
from timmy.mcp_tools import (
_bridge_to_work_order,
_build_issue_args,
_build_issue_body,
_category_from_labels,
_generate_avatar_image,
_parse_command,
close_mcp_sessions,
@@ -135,49 +132,6 @@ def test_filesystem_mcp_returns_tools():
assert "/home/user/project" in params_kwargs["args"]
# ---------------------------------------------------------------------------
# _build_issue_body / _build_issue_args / _category_from_labels
# ---------------------------------------------------------------------------
def test_build_issue_body_appends_signature():
"""_build_issue_body appends the auto-filing signature."""
result = _build_issue_body("Some description")
assert result.startswith("Some description\n\n")
assert "Auto-filed by Timmy" in result
def test_build_issue_body_empty():
"""_build_issue_body handles empty body."""
result = _build_issue_body("")
assert result.startswith("---\n")
def test_build_issue_args():
"""_build_issue_args returns correct MCP arguments."""
with patch("timmy.mcp_tools.settings") as mock_settings:
mock_settings.gitea_repo = "owner/repo"
result = _build_issue_args("Title", "Body")
assert result == {
"method": "create",
"owner": "owner",
"repo": "repo",
"title": "Title",
"body": "Body",
}
def test_category_from_labels_bug():
"""_category_from_labels returns 'bug' when labels contain bug."""
assert _category_from_labels("bug, enhancement") == "bug"
def test_category_from_labels_default():
"""_category_from_labels returns 'suggestion' by default."""
assert _category_from_labels("enhancement") == "suggestion"
assert _category_from_labels("") == "suggestion"
# ---------------------------------------------------------------------------
# create_gitea_issue_via_mcp
# ---------------------------------------------------------------------------