diff --git a/.dockerignore b/.dockerignore
index e7b8b11..930540d 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -33,5 +33,10 @@ tests/
docs/
*.md
+# ── Deploy configs (not needed inside image) ──────────────────────────────────
+deploy/
+docker-compose*.yml
+Makefile
+
# ── macOS ─────────────────────────────────────────────────────────────────────
.DS_Store
diff --git a/.env.example b/.env.example
index 866f447..a58439f 100644
--- a/.env.example
+++ b/.env.example
@@ -1,9 +1,17 @@
# Timmy Time — Mission Control
# Copy this file to .env and uncomment lines you want to override.
# .env is gitignored and never committed.
+#
+# For cloud deployment, deploy/setup.sh generates this automatically.
+
+# ── Cloud / Production ──────────────────────────────────────────────────────
+# Your domain for automatic HTTPS via Let's Encrypt.
+# Set to your actual domain (e.g., timmy.example.com) for HTTPS.
+# Leave as "localhost" for IP-only HTTP access.
+# DOMAIN=localhost
# Ollama host (default: http://localhost:11434)
-# Override if Ollama is running on another machine or port.
+# In production (docker-compose.prod.yml), this is set to http://ollama:11434 automatically.
# OLLAMA_URL=http://localhost:11434
# LLM model to use via Ollama (default: llama3.2)
@@ -33,8 +41,24 @@
# Lightning backend: "mock" (default) | "lnd"
# LIGHTNING_BACKEND=mock
+# ── Environment & Privacy ───────────────────────────────────────────────────
+# Environment mode: "development" (default) | "production"
+# In production, security secrets MUST be set or the app will refuse to start.
+# TIMMY_ENV=development
+
+# Disable Agno telemetry for sovereign/air-gapped deployments.
+# Default is false (disabled) to align with local-first AI vision.
+# TELEMETRY_ENABLED=false
+
# ── Telegram bot ──────────────────────────────────────────────────────────────
# Bot token from @BotFather on Telegram.
# Alternatively, configure via the /telegram/setup dashboard endpoint at runtime.
# Requires: pip install ".[telegram]"
# TELEGRAM_TOKEN=
+
+# ── Discord bot ──────────────────────────────────────────────────────────────
+# Bot token from https://discord.com/developers/applications
+# Alternatively, configure via the /discord/setup dashboard endpoint at runtime.
+# Requires: pip install ".[discord]"
+# Optional: pip install pyzbar Pillow (for QR code invite detection from screenshots)
+# DISCORD_TOKEN=
diff --git a/.gitignore b/.gitignore
index 29e629f..4423510 100644
--- a/.gitignore
+++ b/.gitignore
@@ -21,8 +21,12 @@ env/
# SQLite memory — never commit agent memory
*.db
-# Telegram bot state (contains bot token)
+# Runtime PID files
+.watchdog.pid
+
+# Chat platform state files (contain bot tokens)
telegram_state.json
+discord_state.json
# Testing
.pytest_cache/
@@ -36,5 +40,11 @@ reports/
.vscode/
*.swp
*.swo
-.DS_Store
.claude/
+
+# macOS
+.DS_Store
+.AppleDouble
+.LSOverride
+.Spotlight-V100
+.Trashes
diff --git a/.handoff/CONTINUE.md b/.handoff/CONTINUE.md
index 56128b3..53a5e6a 100644
--- a/.handoff/CONTINUE.md
+++ b/.handoff/CONTINUE.md
@@ -3,7 +3,7 @@
## Quick Start
```bash
-cd /Users/apayne/Timmy-time-dashboard && cat .handoff/CHECKPOINT.md
+cd Timmy-time-dashboard && cat .handoff/CHECKPOINT.md
```
Then paste this prompt to Kimi:
diff --git a/.handoff/bootstrap.sh b/.handoff/bootstrap.sh
index 7d057d7..49c8d64 100755
--- a/.handoff/bootstrap.sh
+++ b/.handoff/bootstrap.sh
@@ -4,7 +4,7 @@
echo "=== Kimi Handoff Bootstrap ==="
echo ""
-cd /Users/apayne/Timmy-time-dashboard
+cd "$(dirname "$0")/.."
echo "📋 Current Checkpoint:"
cat .handoff/CHECKPOINT.md | head -30
diff --git a/.handoff/resume.sh b/.handoff/resume.sh
index fe5eb44..ede88f9 100755
--- a/.handoff/resume.sh
+++ b/.handoff/resume.sh
@@ -1,7 +1,7 @@
#!/bin/bash
# One-liner to get status and prompt for Kimi
-cd /Users/apayne/Timmy-time-dashboard
+cd "$(dirname "$0")/.."
echo "=== STATUS ==="
git log --oneline -1
@@ -12,7 +12,7 @@ echo ""
echo "=== PROMPT (copy/paste to Kimi) ==="
echo ""
-echo "cd /Users/apayne/Timmy-time-dashboard && cat .handoff/CHECKPOINT.md"
+echo "cd Timmy-time-dashboard && cat .handoff/CHECKPOINT.md"
echo ""
echo "Continue from checkpoint. Read the file above and execute the NEXT TASK from .handoff/TODO.md. Run 'make test' after changes."
echo ""
diff --git a/AGENTS.md b/AGENTS.md
index 7acbbe8..4838713 100644
--- a/AGENTS.md
+++ b/AGENTS.md
@@ -35,10 +35,16 @@ src/
swarm/ # Multi-agent coordinator, registry, bidder, tasks, comms
docker_runner.py # Spawn agents as Docker containers
timmy_serve/ # L402 Lightning proxy, payment handler, TTS, CLI
+ spark/ # Intelligence engine — events, predictions, advisory
+ creative/ # Creative director + video assembler pipeline
+ tools/ # Git, image, music, video tools for persona agents
+ lightning/ # Lightning backend abstraction (mock + LND)
+ agent_core/ # Substrate-agnostic agent interface
voice/ # NLU intent detection (regex-based, no cloud)
- websocket/ # WebSocket manager (ws_manager singleton)
+ ws_manager/ # WebSocket manager (ws_manager singleton)
notifications/ # Push notification store (notifier singleton)
shortcuts/ # Siri Shortcuts API endpoints
+ telegram_bot/ # Telegram bridge
self_tdd/ # Continuous test watchdog
tests/ # One test_*.py per module, all mocked
static/ # style.css + bg.svg (arcane theme)
@@ -250,7 +256,7 @@ runner.stop(info["container_id"])
```python
from dashboard.store import message_log
from notifications.push import notifier
-from websocket.handler import ws_manager
+from ws_manager.handler import ws_manager
from timmy_serve.payment_handler import payment_handler
from swarm.coordinator import coordinator
```
@@ -309,9 +315,9 @@ make docker-agent # add a Local agent worker
**v2.0.0 — Exodus (in progress)**
- [x] Persistent swarm state across restarts
- [x] Docker infrastructure for agent containers
-- [ ] Implement Echo, Mace, Helm, Seer, Forge, Quill persona agents (Dockerised)
+- [x] Implement Echo, Mace, Helm, Seer, Forge, Quill persona agents (+ Pixel, Lyra, Reel)
+- [x] MCP tool integration for Timmy
- [ ] Real LND gRPC backend for `PaymentHandler` (replace mock)
-- [ ] MCP tool integration for Timmy
- [ ] Marketplace frontend — wire `/marketplace` route to real data
**v3.0.0 — Revelation (planned)**
diff --git a/CLAUDE.md b/CLAUDE.md
new file mode 100644
index 0000000..ae0f42e
--- /dev/null
+++ b/CLAUDE.md
@@ -0,0 +1,267 @@
+# CLAUDE.md — AI Assistant Guide for Timmy Time
+
+This file provides context for AI assistants (Claude Code, Copilot, etc.)
+working in this repository. Read this before making any changes.
+
+For multi-agent development standards and agent-specific conventions, see
+[`AGENTS.md`](AGENTS.md).
+
+---
+
+## Project Summary
+
+**Timmy Time** is a local-first, sovereign AI agent system with a browser-based
+Mission Control dashboard. No cloud AI — all inference runs on localhost via
+Ollama (or AirLLM for large models). Bitcoin Lightning economics are built in
+for API access gating.
+
+**Tech stack:** Python 3.11+ · FastAPI · Jinja2 + HTMX · SQLite · Agno (agent
+framework) · Ollama · pydantic-settings · WebSockets · Docker
+
+---
+
+## Quick Reference Commands
+
+```bash
+# Setup
+make install # Create venv + install dev deps
+cp .env.example .env # Configure environment
+
+# Development
+make dev # Start dashboard at http://localhost:8000
+make test # Run full test suite (no Ollama needed)
+make test-cov # Tests + coverage report (terminal + XML)
+make lint # Run ruff or flake8
+
+# Docker
+make docker-build # Build timmy-time:latest image
+make docker-up # Start dashboard container
+make docker-agent # Spawn one agent worker
+make docker-down # Stop all containers
+```
+
+---
+
+## Project Layout
+
+```
+src/
+ config.py # Central pydantic-settings (all env vars)
+ timmy/ # Core agent: agent.py, backends.py, cli.py, prompts.py
+ dashboard/ # FastAPI app + routes + Jinja2 templates
+ app.py # App factory, lifespan, router registration
+ store.py # In-memory MessageLog singleton
+ routes/ # One file per route group (agents, health, swarm, etc.)
+ templates/ # base.html + page templates + partials/
+ swarm/ # Multi-agent coordinator, registry, bidder, tasks, comms
+ coordinator.py # Central swarm orchestrator (security-sensitive)
+ docker_runner.py # Spawn agents as Docker containers
+ timmy_serve/ # L402 Lightning proxy, payment handler, TTS, CLI
+ spark/ # Intelligence engine — events, predictions, advisory
+ creative/ # Creative director + video assembler pipeline
+ tools/ # Git, image, music, video tools for persona agents
+ lightning/ # Lightning backend abstraction (mock + LND)
+ agent_core/ # Substrate-agnostic agent interface
+ voice/ # NLU intent detection (regex-based, local)
+ ws_manager/ # WebSocket connection manager (ws_manager singleton)
+ notifications/ # Push notification store (notifier singleton)
+ shortcuts/ # Siri Shortcuts API endpoints
+ telegram_bot/ # Telegram bridge
+ self_tdd/ # Continuous test watchdog
+tests/ # One test_*.py per module, all mocked
+static/ # style.css + bg.svg (dark arcane theme)
+docs/ # GitHub Pages landing site
+```
+
+---
+
+## Architecture Patterns
+
+### Config access
+
+All configuration goes through `src/config.py` using pydantic-settings:
+
+```python
+from config import settings
+url = settings.ollama_url # never use os.environ.get() directly in app code
+```
+
+Environment variables are read from `.env` automatically. See `.env.example` for
+all available settings.
+
+### Singletons
+
+Core services are module-level singleton instances imported directly:
+
+```python
+from dashboard.store import message_log
+from notifications.push import notifier
+from ws_manager.handler import ws_manager
+from timmy_serve.payment_handler import payment_handler
+from swarm.coordinator import coordinator
+```
+
+### HTMX response pattern
+
+Routes return Jinja2 template partials for HTMX requests:
+
+```python
+return templates.TemplateResponse(
+ "partials/chat_message.html",
+ {"request": request, "role": "user", "content": message}
+)
+```
+
+### Graceful degradation
+
+Optional services (Ollama, Redis, AirLLM) degrade gracefully — log the error,
+return a fallback, never crash:
+
+```python
+try:
+ result = await some_optional_service()
+except Exception:
+ result = fallback_value
+```
+
+### Route registration
+
+New routes go in `src/dashboard/routes/.py`, then register the router in
+`src/dashboard/app.py`:
+
+```python
+from dashboard.routes. import router as _router
+app.include_router(_router)
+```
+
+---
+
+## Testing
+
+### Running tests
+
+```bash
+make test # Quick run (pytest -q --tb=short)
+make test-cov # With coverage (term-missing + XML)
+make test-cov-html # With HTML coverage report
+```
+
+No Ollama or external services needed — all heavy dependencies are mocked.
+
+### Test conventions
+
+- **One test file per module:** `tests/test_.py`
+- **Stubs in conftest:** `agno`, `airllm`, `pyttsx3`, `telegram` are stubbed in
+ `tests/conftest.py` using `sys.modules.setdefault()` so tests run without
+ those packages installed
+- **Test mode:** `TIMMY_TEST_MODE=1` is set automatically in conftest to disable
+ auto-spawning of persona agents during tests
+- **FastAPI testing:** Use the `client` fixture (wraps `TestClient`)
+- **Database isolation:** SQLite files in `data/` are cleaned between tests;
+ coordinator state is reset via autouse fixtures
+- **Async:** `asyncio_mode = "auto"` in pytest config — async test functions
+ are detected automatically
+- **Coverage threshold:** CI fails if coverage drops below 60%
+ (`fail_under = 60` in `pyproject.toml`)
+
+### Adding a new test
+
+```python
+# tests/test_my_feature.py
+from fastapi.testclient import TestClient
+
+def test_my_endpoint(client):
+ response = client.get("/my-endpoint")
+ assert response.status_code == 200
+```
+
+---
+
+## CI/CD
+
+GitHub Actions workflow (`.github/workflows/tests.yml`):
+
+- Runs on every push and pull request to all branches
+- Python 3.11, installs `.[dev]` dependencies
+- Runs pytest with coverage + JUnit XML output
+- Publishes test results as PR comments and check annotations
+- Uploads coverage XML as a downloadable artifact (14-day retention)
+
+---
+
+## Key Conventions
+
+1. **Tests must stay green.** Run `make test` before committing.
+2. **No cloud AI dependencies.** All inference runs on localhost.
+3. **No new top-level files without purpose.** Keep the root directory clean.
+4. **Follow existing patterns** — singletons, graceful degradation,
+ pydantic-settings config.
+5. **Security defaults:** Never hard-code secrets. Warn at startup when using
+ default values.
+6. **XSS prevention:** Never use `innerHTML` with untrusted content.
+7. **Keep routes thin** — business logic lives in the module, not the route.
+8. **Prefer editing existing files** over creating new ones.
+9. **Use `from config import settings`** for all env-var access.
+10. **Every new module gets a test:** `tests/test_.py`.
+
+---
+
+## Entry Points
+
+Three CLI commands are installed via `pyproject.toml`:
+
+| Command | Module | Purpose |
+|---------|--------|---------|
+| `timmy` | `src/timmy/cli.py` | Chat, think, status commands |
+| `timmy-serve` | `src/timmy_serve/cli.py` | L402-gated API server (port 8402) |
+| `self-tdd` | `src/self_tdd/watchdog.py` | Continuous test watchdog |
+
+---
+
+## Environment Variables
+
+Key variables (full list in `.env.example`):
+
+| Variable | Default | Purpose |
+|----------|---------|---------|
+| `OLLAMA_URL` | `http://localhost:11434` | Ollama host |
+| `OLLAMA_MODEL` | `llama3.2` | Model served by Ollama |
+| `DEBUG` | `false` | Enable `/docs` and `/redoc` |
+| `TIMMY_MODEL_BACKEND` | `ollama` | `ollama` / `airllm` / `auto` |
+| `AIRLLM_MODEL_SIZE` | `70b` | `8b` / `70b` / `405b` |
+| `L402_HMAC_SECRET` | *(change in prod)* | HMAC signing for invoices |
+| `L402_MACAROON_SECRET` | *(change in prod)* | Macaroon signing |
+| `LIGHTNING_BACKEND` | `mock` | `mock` / `lnd` |
+| `SPARK_ENABLED` | `true` | Enable Spark intelligence engine |
+| `TELEGRAM_TOKEN` | *(empty)* | Telegram bot token |
+
+---
+
+## Persistence
+
+- `timmy.db` — Agno agent memory (SQLite, project root)
+- `data/swarm.db` — Swarm registry + tasks (SQLite, `data/` directory)
+- All `.db` files are gitignored — never commit database files
+
+---
+
+## Docker
+
+Containers share a `data/` volume for SQLite. Container agents communicate with
+the coordinator over HTTP (not in-memory `SwarmComms`):
+
+```
+GET /internal/tasks → list tasks open for bidding
+POST /internal/bids → submit a bid
+```
+
+`COORDINATOR_URL=http://dashboard:8000` is set automatically by docker-compose.
+
+---
+
+## Security-Sensitive Areas
+
+- `src/swarm/coordinator.py` — requires review before changes
+- `src/timmy_serve/l402_proxy.py` — Lightning payment gating
+- `src/lightning/` — payment backend abstraction
+- Any file handling secrets or authentication tokens
diff --git a/Dockerfile b/Dockerfile
index 1a61121..6ac6daa 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -11,42 +11,43 @@
# timmy-time:latest \
# python -m swarm.agent_runner --agent-id w1 --name Worker-1
-FROM python:3.12-slim
+FROM python:3.12-slim AS base
# ── System deps ──────────────────────────────────────────────────────────────
RUN apt-get update && apt-get install -y --no-install-recommends \
- gcc curl \
+ gcc curl fonts-dejavu-core \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
# ── Python deps (install before copying src for layer caching) ───────────────
+# Copy only pyproject.toml first so Docker can cache the dep-install layer.
+# The editable install (-e) happens after src is copied below.
COPY pyproject.toml .
-# Install production deps only (no dev/test extras in the image)
-RUN pip install --no-cache-dir \
- "fastapi>=0.115.0" \
- "uvicorn[standard]>=0.32.0" \
- "jinja2>=3.1.0" \
- "httpx>=0.27.0" \
- "python-multipart>=0.0.12" \
- "aiofiles>=24.0.0" \
- "typer>=0.12.0" \
- "rich>=13.0.0" \
- "pydantic-settings>=2.0.0" \
- "websockets>=12.0" \
- "agno[sqlite]>=1.4.0" \
- "ollama>=0.3.0" \
- "openai>=1.0.0" \
- "python-telegram-bot>=21.0"
+# Create a minimal src layout so `pip install` can resolve the package metadata
+# without copying the full source tree (preserves Docker layer caching).
+RUN mkdir -p src/timmy src/timmy_serve src/self_tdd src/dashboard && \
+ touch src/timmy/__init__.py src/timmy/cli.py \
+ src/timmy_serve/__init__.py src/timmy_serve/cli.py \
+ src/self_tdd/__init__.py src/self_tdd/watchdog.py \
+ src/dashboard/__init__.py src/config.py
+
+RUN pip install --no-cache-dir -e ".[swarm,telegram]"
# ── Application source ───────────────────────────────────────────────────────
+# Overwrite the stubs with real source code
COPY src/ ./src/
COPY static/ ./static/
# Create data directory (mounted as a volume in production)
RUN mkdir -p /app/data
+# ── Non-root user for production ─────────────────────────────────────────────
+RUN groupadd -r timmy && useradd -r -g timmy -d /app -s /sbin/nologin timmy \
+ && chown -R timmy:timmy /app
+USER timmy
+
# ── Environment ──────────────────────────────────────────────────────────────
ENV PYTHONPATH=/app/src
ENV PYTHONUNBUFFERED=1
@@ -54,5 +55,9 @@ ENV PYTHONDONTWRITEBYTECODE=1
EXPOSE 8000
+# ── Healthcheck ──────────────────────────────────────────────────────────────
+HEALTHCHECK --interval=30s --timeout=5s --start-period=15s --retries=3 \
+ CMD curl -f http://localhost:8000/health || exit 1
+
# ── Default: run the dashboard ───────────────────────────────────────────────
CMD ["uvicorn", "dashboard.app:app", "--host", "0.0.0.0", "--port", "8000"]
diff --git a/LICENSE b/LICENSE
index 261eeb9..16e48f0 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,201 +1,21 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
+MIT License
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+Copyright (c) 2026 Alexander Whitestone
- 1. Definitions.
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/Makefile b/Makefile
index 6ed068c..5f6a2f9 100644
--- a/Makefile
+++ b/Makefile
@@ -1,5 +1,7 @@
-.PHONY: install install-bigbrain dev test test-cov test-cov-html watch lint clean help \
- docker-build docker-up docker-down docker-agent docker-logs docker-shell
+.PHONY: install install-bigbrain install-creative dev nuke test test-cov test-cov-html watch lint clean help \
+ up down logs \
+ docker-build docker-up docker-down docker-agent docker-logs docker-shell \
+ cloud-deploy cloud-up cloud-down cloud-logs cloud-status cloud-update
VENV := .venv
PYTHON := $(VENV)/bin/python
@@ -23,13 +25,39 @@ install-bigbrain: $(VENV)/bin/activate
echo "✓ AirLLM installed (PyTorch backend)"; \
fi
+install-creative: $(VENV)/bin/activate
+ $(PIP) install --quiet -e ".[dev,creative]"
+ @if [ "$$(uname -m)" = "arm64" ] && [ "$$(uname -s)" = "Darwin" ]; then \
+ echo " Apple Silicon detected — installing PyTorch with Metal (MPS) support..."; \
+ $(PIP) install --quiet --pre torch torchvision torchaudio \
+ --index-url https://download.pytorch.org/whl/nightly/cpu; \
+ echo "✓ Creative extras installed with Metal GPU acceleration"; \
+ else \
+ echo "✓ Creative extras installed (diffusers, torch, ace-step)"; \
+ fi
+
$(VENV)/bin/activate:
python3 -m venv $(VENV)
# ── Development ───────────────────────────────────────────────────────────────
-dev:
- $(UVICORN) dashboard.app:app --reload --host 0.0.0.0 --port 8000
+dev: nuke
+ PYTHONDONTWRITEBYTECODE=1 $(UVICORN) dashboard.app:app --reload --host 0.0.0.0 --port 8000
+
+# Kill anything on port 8000, stop Docker containers, clear stale state.
+# Safe to run anytime — idempotent, never errors out.
+nuke:
+ @echo " Cleaning up dev environment..."
+ @# Stop Docker containers (if any are running)
+ @docker compose down --remove-orphans 2>/dev/null || true
+ @# Kill any process holding port 8000 (errno 48 fix)
+ @lsof -ti :8000 | xargs kill -9 2>/dev/null || true
+ @# Purge stale bytecache to prevent loading old .pyc files
+ @find . -type d -name __pycache__ -exec rm -rf {} + 2>/dev/null || true
+ @find . -name "*.pyc" -delete 2>/dev/null || true
+ @# Brief pause to let the OS release the socket
+ @sleep 0.5
+ @echo " ✓ Port 8000 free, containers stopped, caches cleared"
# Print the local IP addresses your phone can use to reach this machine.
# Connect your phone to the same hotspot your Mac is sharing from,
@@ -40,10 +68,15 @@ ip:
@echo ""
@echo " Open one of these on your phone: http://:8000"
@echo ""
- @ipconfig getifaddr en0 2>/dev/null | awk '{print " en0 (Wi-Fi): http://" $$1 ":8000"}' || true
- @ipconfig getifaddr en1 2>/dev/null | awk '{print " en1 (Ethernet): http://" $$1 ":8000"}' || true
- @ipconfig getifaddr en2 2>/dev/null | awk '{print " en2: http://" $$1 ":8000"}' || true
- @ifconfig 2>/dev/null | awk '/inet / && !/127\.0\.0\.1/ && !/::1/{print " " $$2 " → http://" $$2 ":8000"}' | head -5 || true
+ @if [ "$$(uname -s)" = "Darwin" ]; then \
+ ipconfig getifaddr en0 2>/dev/null | awk '{print " en0 (Wi-Fi): http://" $$1 ":8000"}' || true; \
+ ipconfig getifaddr en1 2>/dev/null | awk '{print " en1 (Ethernet): http://" $$1 ":8000"}' || true; \
+ ipconfig getifaddr en2 2>/dev/null | awk '{print " en2: http://" $$1 ":8000"}' || true; \
+ fi
+ @# Generic fallback — works on both macOS and Linux
+ @ifconfig 2>/dev/null | awk '/inet / && !/127\.0\.0\.1/ && !/::1/{print " " $$2 " → http://" $$2 ":8000"}' | head -5 \
+ || ip -4 addr show 2>/dev/null | awk '/inet / && !/127\.0\.0\.1/{split($$2,a,"/"); print " " a[1] " → http://" a[1] ":8000"}' | head -5 \
+ || true
@echo ""
watch:
@@ -61,6 +94,12 @@ test-cov-html:
$(PYTEST) tests/ --cov=src --cov-report=term-missing --cov-report=html -q
@echo "✓ HTML coverage report: open htmlcov/index.html"
+# Full-stack functional test: spins up Ollama (CPU, qwen2.5:0.5b) + dashboard
+# in Docker and verifies real LLM chat end-to-end.
+# Override model: make test-ollama OLLAMA_TEST_MODEL=tinyllama
+test-ollama:
+ FUNCTIONAL_DOCKER=1 $(PYTEST) tests/functional/test_ollama_chat.py -v --tb=long -x
+
# ── Code quality ──────────────────────────────────────────────────────────────
lint:
@@ -70,6 +109,33 @@ lint:
# ── Housekeeping ──────────────────────────────────────────────────────────────
+# ── One-command startup ──────────────────────────────────────────────────────
+# make up build + start everything in Docker
+# make up DEV=1 same, with hot-reload on Python/template/CSS changes
+
+up:
+ mkdir -p data
+ifdef DEV
+ docker compose -f docker-compose.yml -f docker-compose.dev.yml up -d --build
+ @echo ""
+ @echo " ✓ Timmy Time running in DEV mode at http://localhost:8000"
+ @echo " Hot-reload active — Python, template, and CSS changes auto-apply"
+ @echo " Logs: make logs"
+ @echo ""
+else
+ docker compose up -d --build
+ @echo ""
+ @echo " ✓ Timmy Time running at http://localhost:8000"
+ @echo " Logs: make logs"
+ @echo ""
+endif
+
+down:
+ docker compose down
+
+logs:
+ docker compose logs -f
+
# ── Docker ────────────────────────────────────────────────────────────────────
docker-build:
@@ -95,6 +161,45 @@ docker-logs:
docker-shell:
docker compose exec dashboard bash
+# ── Cloud Deploy ─────────────────────────────────────────────────────────────
+
+# One-click production deployment (run on your cloud server)
+cloud-deploy:
+ @bash deploy/setup.sh
+
+# Start the production stack (Caddy + Ollama + Dashboard + Timmy)
+cloud-up:
+ docker compose -f docker-compose.prod.yml up -d
+
+# Stop the production stack
+cloud-down:
+ docker compose -f docker-compose.prod.yml down
+
+# Tail production logs
+cloud-logs:
+ docker compose -f docker-compose.prod.yml logs -f
+
+# Show status of all production containers
+cloud-status:
+ docker compose -f docker-compose.prod.yml ps
+
+# Pull latest code and rebuild
+cloud-update:
+ git pull
+ docker compose -f docker-compose.prod.yml up -d --build
+
+# Create a DigitalOcean droplet (requires doctl CLI)
+cloud-droplet:
+ @bash deploy/digitalocean/create-droplet.sh
+
+# Scale agent workers in production: make cloud-scale N=4
+cloud-scale:
+ docker compose -f docker-compose.prod.yml --profile agents up -d --scale agent=$${N:-2}
+
+# Pull a model into Ollama: make cloud-pull-model MODEL=llama3.2
+cloud-pull-model:
+ docker exec timmy-ollama ollama pull $${MODEL:-llama3.2}
+
# ── Housekeeping ──────────────────────────────────────────────────────────────
clean:
@@ -105,9 +210,20 @@ clean:
help:
@echo ""
+ @echo " Quick Start"
+ @echo " ─────────────────────────────────────────────────"
+ @echo " make up build + start everything in Docker"
+ @echo " make up DEV=1 same, with hot-reload on file changes"
+ @echo " make down stop all containers"
+ @echo " make logs tail container logs"
+ @echo ""
+ @echo " Local Development"
+ @echo " ─────────────────────────────────────────────────"
@echo " make install create venv + install dev deps"
@echo " make install-bigbrain install with AirLLM (big-model backend)"
- @echo " make dev start dashboard at http://localhost:8000"
+ @echo " make install-creative install with creative extras (torch, diffusers)"
+ @echo " make dev clean up + start dashboard (auto-fixes errno 48)"
+ @echo " make nuke kill port 8000, stop containers, reset state"
@echo " make ip print local IP addresses for phone testing"
@echo " make test run all tests"
@echo " make test-cov tests + coverage report (terminal + XML)"
@@ -116,6 +232,8 @@ help:
@echo " make lint run ruff or flake8"
@echo " make clean remove build artefacts and caches"
@echo ""
+ @echo " Docker (Advanced)"
+ @echo " ─────────────────────────────────────────────────"
@echo " make docker-build build the timmy-time:latest image"
@echo " make docker-up start dashboard container"
@echo " make docker-agent add one agent worker (AGENT_NAME=Echo)"
@@ -123,3 +241,15 @@ help:
@echo " make docker-logs tail container logs"
@echo " make docker-shell open a bash shell in the dashboard container"
@echo ""
+ @echo " Cloud Deploy (Production)"
+ @echo " ─────────────────────────────────────────────────"
+ @echo " make cloud-deploy one-click server setup (run as root)"
+ @echo " make cloud-up start production stack"
+ @echo " make cloud-down stop production stack"
+ @echo " make cloud-logs tail production logs"
+ @echo " make cloud-status show container status"
+ @echo " make cloud-update pull + rebuild from git"
+ @echo " make cloud-droplet create DigitalOcean droplet (needs doctl)"
+ @echo " make cloud-scale N=4 scale agent workers"
+ @echo " make cloud-pull-model MODEL=llama3.2 pull LLM model"
+ @echo ""
diff --git a/PLAN.md b/PLAN.md
new file mode 100644
index 0000000..a54ab1b
--- /dev/null
+++ b/PLAN.md
@@ -0,0 +1,478 @@
+# Plan: Full Creative & DevOps Capabilities for Timmy
+
+## Overview
+
+Add five major capability domains to Timmy's agent system, turning it into a
+sovereign creative studio and full-stack DevOps operator. All tools are
+open-source, self-hosted, and GPU-accelerated where needed.
+
+---
+
+## Phase 1: Git & DevOps Tools (Forge + Helm personas)
+
+**Goal:** Timmy can observe local/remote repos, read code, create branches,
+stage changes, commit, diff, log, and manage PRs — all through the swarm
+task system with Spark event capture.
+
+### New module: `src/tools/git_tools.py`
+
+Tools to add (using **GitPython** — BSD-3, `pip install GitPython`):
+
+| Tool | Function | Persona Access |
+|---|---|---|
+| `git_clone` | Clone a remote repo to local path | Forge, Helm |
+| `git_status` | Show working tree status | Forge, Helm, Timmy |
+| `git_diff` | Show staged/unstaged diffs | Forge, Helm, Timmy |
+| `git_log` | Show recent commit history | Forge, Helm, Echo, Timmy |
+| `git_branch` | List/create/switch branches | Forge, Helm |
+| `git_add` | Stage files for commit | Forge, Helm |
+| `git_commit` | Create a commit with message | Forge, Helm |
+| `git_push` | Push to remote | Forge, Helm |
+| `git_pull` | Pull from remote | Forge, Helm |
+| `git_blame` | Show line-by-line authorship | Forge, Echo |
+| `git_stash` | Stash/pop changes | Forge, Helm |
+
+### Changes to existing files
+
+- **`src/timmy/tools.py`** — Add `create_git_tools()` factory, wire into
+ `PERSONA_TOOLKITS` for Forge and Helm
+- **`src/swarm/tool_executor.py`** — Enhance `_infer_tools_needed()` with
+ git keywords (commit, branch, push, pull, diff, clone, merge)
+- **`src/config.py`** — Add `git_default_repo_dir: str = "~/repos"` setting
+- **`src/spark/engine.py`** — Add `on_tool_executed()` method to capture
+ individual tool invocations (not just task-level events)
+- **`src/swarm/personas.py`** — Add git-related keywords to Forge and Helm
+ preferred_keywords
+
+### New dependency
+
+```toml
+# pyproject.toml
+dependencies = [
+ ...,
+ "GitPython>=3.1.40",
+]
+```
+
+### Dashboard
+
+- **`/tools`** page updated to show git tools in the catalog
+- Git tool usage stats visible per agent
+
+### Tests
+
+- `tests/test_git_tools.py` — test all git tool functions against tmp repos
+- Mock GitPython's `Repo` class for unit tests
+
+---
+
+## Phase 2: Image Generation (new "Pixel" persona)
+
+**Goal:** Generate storyboard frames and standalone images from text prompts
+using FLUX.2 Klein 4B locally.
+
+### New persona: Pixel — Visual Architect
+
+```python
+"pixel": {
+ "id": "pixel",
+ "name": "Pixel",
+ "role": "Visual Architect",
+ "description": "Image generation, storyboard frames, and visual design.",
+ "capabilities": "image-generation,storyboard,design",
+ "rate_sats": 80,
+ "bid_base": 60,
+ "bid_jitter": 20,
+ "preferred_keywords": [
+ "image", "picture", "photo", "draw", "illustration",
+ "storyboard", "frame", "visual", "design", "generate",
+ "portrait", "landscape", "scene", "artwork",
+ ],
+}
+```
+
+### New module: `src/tools/image_tools.py`
+
+Tools (using **diffusers** + **FLUX.2 Klein 4B** — Apache 2.0):
+
+| Tool | Function |
+|---|---|
+| `generate_image` | Text-to-image generation (returns file path) |
+| `generate_storyboard` | Generate N frames from scene descriptions |
+| `image_variations` | Generate variations of an existing image |
+
+### Architecture
+
+```
+generate_image(prompt, width=1024, height=1024, steps=4)
+ → loads FLUX.2 Klein via diffusers FluxPipeline
+ → saves to data/images/{uuid}.png
+ → returns path + metadata
+```
+
+- Model loaded lazily on first use, kept in memory for subsequent calls
+- Falls back to CPU generation (slower) if no GPU
+- Output saved to `data/images/` with metadata JSON sidecar
+
+### New dependency (optional extra)
+
+```toml
+[project.optional-dependencies]
+creative = [
+ "diffusers>=0.30.0",
+ "transformers>=4.40.0",
+ "accelerate>=0.30.0",
+ "torch>=2.2.0",
+ "safetensors>=0.4.0",
+]
+```
+
+### Config
+
+```python
+# config.py additions
+flux_model_id: str = "black-forest-labs/FLUX.2-klein-4b"
+image_output_dir: str = "data/images"
+image_default_steps: int = 4
+```
+
+### Dashboard
+
+- `/creative/ui` — new Creative Studio page (image gallery + generation form)
+- HTMX-powered: submit prompt, poll for result, display inline
+- Gallery view of all generated images with metadata
+
+### Tests
+
+- `tests/test_image_tools.py` — mock diffusers pipeline, test prompt handling,
+ file output, storyboard generation
+
+---
+
+## Phase 3: Music Generation (new "Lyra" persona)
+
+**Goal:** Generate full songs with vocals, instrumentals, and lyrics using
+ACE-Step 1.5 locally.
+
+### New persona: Lyra — Sound Weaver
+
+```python
+"lyra": {
+ "id": "lyra",
+ "name": "Lyra",
+ "role": "Sound Weaver",
+ "description": "Music and song generation with vocals, instrumentals, and lyrics.",
+ "capabilities": "music-generation,vocals,composition",
+ "rate_sats": 90,
+ "bid_base": 70,
+ "bid_jitter": 20,
+ "preferred_keywords": [
+ "music", "song", "sing", "vocal", "instrumental",
+ "melody", "beat", "track", "compose", "lyrics",
+ "audio", "sound", "album", "remix",
+ ],
+}
+```
+
+### New module: `src/tools/music_tools.py`
+
+Tools (using **ACE-Step 1.5** — Apache 2.0, `pip install ace-step`):
+
+| Tool | Function |
+|---|---|
+| `generate_song` | Text/lyrics → full song (vocals + instrumentals) |
+| `generate_instrumental` | Text prompt → instrumental track |
+| `generate_vocals` | Lyrics + style → vocal track |
+| `list_genres` | Return supported genre/style tags |
+
+### Architecture
+
+```
+generate_song(lyrics, genre="pop", duration=120, language="en")
+ → loads ACE-Step model (lazy, cached)
+ → generates audio
+ → saves to data/music/{uuid}.wav
+ → returns path + metadata (duration, genre, etc.)
+```
+
+- Model loaded lazily, ~4GB VRAM minimum
+- Output saved to `data/music/` with metadata sidecar
+- Supports 19 languages, genre tags, tempo control
+
+### New dependency (optional extra, extends `creative`)
+
+```toml
+[project.optional-dependencies]
+creative = [
+ ...,
+ "ace-step>=1.5.0",
+]
+```
+
+### Config
+
+```python
+music_output_dir: str = "data/music"
+ace_step_model: str = "ace-step/ACE-Step-v1.5"
+```
+
+### Dashboard
+
+- `/creative/ui` expanded with Music tab
+- Audio player widget (HTML5 `` element)
+- Lyrics input form with genre/style selector
+
+### Tests
+
+- `tests/test_music_tools.py` — mock ACE-Step model, test generation params
+
+---
+
+## Phase 4: Video Generation (new "Reel" persona)
+
+**Goal:** Generate video clips from text/image prompts using Wan 2.1 locally.
+
+### New persona: Reel — Motion Director
+
+```python
+"reel": {
+ "id": "reel",
+ "name": "Reel",
+ "role": "Motion Director",
+ "description": "Video generation from text and image prompts.",
+ "capabilities": "video-generation,animation,motion",
+ "rate_sats": 100,
+ "bid_base": 80,
+ "bid_jitter": 20,
+ "preferred_keywords": [
+ "video", "clip", "animate", "motion", "film",
+ "scene", "cinematic", "footage", "render", "timelapse",
+ ],
+}
+```
+
+### New module: `src/tools/video_tools.py`
+
+Tools (using **Wan 2.1** via diffusers — Apache 2.0):
+
+| Tool | Function |
+|---|---|
+| `generate_video_clip` | Text → short video clip (3–6 seconds) |
+| `image_to_video` | Image + prompt → animated video from still |
+| `list_video_styles` | Return supported style presets |
+
+### Architecture
+
+```
+generate_video_clip(prompt, duration=5, resolution="480p", fps=24)
+ → loads Wan 2.1 via diffusers pipeline (lazy, cached)
+ → generates frames
+ → encodes to MP4 via FFmpeg
+ → saves to data/video/{uuid}.mp4
+ → returns path + metadata
+```
+
+- Wan 2.1 1.3B model: ~16GB VRAM
+- Output saved to `data/video/`
+- Resolution options: 480p (16GB), 720p (24GB+)
+
+### New dependency (extends `creative` extra)
+
+```toml
+creative = [
+ ...,
+ # Wan 2.1 uses diffusers (already listed) + model weights downloaded on first use
+]
+```
+
+### Config
+
+```python
+video_output_dir: str = "data/video"
+wan_model_id: str = "Wan-AI/Wan2.1-T2V-1.3B"
+video_default_resolution: str = "480p"
+```
+
+### Tests
+
+- `tests/test_video_tools.py` — mock diffusers pipeline, test clip generation
+
+---
+
+## Phase 5: Creative Director — Storyboard & Assembly Pipeline
+
+**Goal:** Orchestrate multi-persona workflows to produce 3+ minute creative
+videos with music, narration, and stitched scenes.
+
+### New module: `src/creative/director.py`
+
+The Creative Director is a **multi-step pipeline** that coordinates Pixel,
+Lyra, and Reel to produce complete creative works:
+
+```
+User: "Create a 3-minute music video about a sunrise over mountains"
+ │
+ Creative Director
+ ┌─────────┼──────────┐
+ │ │ │
+ 1. STORYBOARD 2. MUSIC 3. GENERATE
+ (Pixel) (Lyra) (Reel)
+ │ │ │
+ N scene Full song N video clips
+ descriptions with from storyboard
+ + keyframes vocals frames
+ │ │ │
+ └─────────┼──────────┘
+ │
+ 4. ASSEMBLE
+ (MoviePy + FFmpeg)
+ │
+ Final video with
+ music, transitions,
+ titles
+```
+
+### Pipeline steps
+
+1. **Script** — Timmy (or Quill) writes scene descriptions and lyrics
+2. **Storyboard** — Pixel generates keyframe images for each scene
+3. **Music** — Lyra generates the soundtrack (vocals + instrumentals)
+4. **Video clips** — Reel generates video for each scene (image-to-video
+ from storyboard frames, or text-to-video from descriptions)
+5. **Assembly** — MoviePy stitches clips together with cross-fades,
+ overlays the music track, adds title cards
+
+### New module: `src/creative/assembler.py`
+
+Video assembly engine (using **MoviePy** — MIT, `pip install moviepy`):
+
+| Function | Purpose |
+|---|---|
+| `stitch_clips` | Concatenate video clips with transitions |
+| `overlay_audio` | Mix music track onto video |
+| `add_title_card` | Prepend/append title/credits |
+| `add_subtitles` | Burn lyrics/captions onto video |
+| `export_final` | Encode final video (H.264 + AAC) |
+
+### New dependency
+
+```toml
+dependencies = [
+ ...,
+ "moviepy>=2.0.0",
+]
+```
+
+### Config
+
+```python
+creative_output_dir: str = "data/creative"
+video_transition_duration: float = 1.0 # seconds
+default_video_codec: str = "libx264"
+```
+
+### Dashboard
+
+- `/creative/ui` — Full Creative Studio with tabs:
+ - **Images** — gallery + generation form
+ - **Music** — player + generation form
+ - **Video** — player + generation form
+ - **Director** — multi-step pipeline builder with storyboard view
+- `/creative/projects` — saved projects with all assets
+- `/creative/projects/{id}` — project detail with timeline view
+
+### Tests
+
+- `tests/test_assembler.py` — test stitching, audio overlay, title cards
+- `tests/test_director.py` — test pipeline orchestration with mocks
+
+---
+
+## Phase 6: Spark Integration for All New Tools
+
+**Goal:** Every tool invocation and creative pipeline step gets captured by
+Spark Intelligence for learning and advisory.
+
+### Changes to `src/spark/engine.py`
+
+```python
+def on_tool_executed(
+ self, agent_id: str, tool_name: str,
+ task_id: Optional[str], success: bool,
+ duration_ms: Optional[int] = None,
+) -> Optional[str]:
+ """Capture individual tool invocations."""
+
+def on_creative_step(
+ self, project_id: str, step_name: str,
+ agent_id: str, output_path: Optional[str],
+) -> Optional[str]:
+ """Capture creative pipeline progress."""
+```
+
+### New advisor patterns
+
+- "Pixel generates storyboards 40% faster than individual image calls"
+- "Lyra's pop genre tracks have 85% higher completion rate than jazz"
+- "Video generation on 480p uses 60% less GPU time than 720p for similar quality"
+- "Git commits from Forge average 3 files per commit"
+
+---
+
+## Implementation Order
+
+| Phase | What | New Files | Est. Tests |
+|---|---|---|---|
+| 1 | Git/DevOps tools | 2 source + 1 test | ~25 |
+| 2 | Image generation | 2 source + 1 test + 1 template | ~15 |
+| 3 | Music generation | 1 source + 1 test | ~12 |
+| 4 | Video generation | 1 source + 1 test | ~12 |
+| 5 | Creative Director pipeline | 2 source + 2 tests + 1 template | ~20 |
+| 6 | Spark tool-level capture | 1 modified + 1 test update | ~8 |
+
+**Total: ~10 new source files, ~6 new test files, ~92 new tests**
+
+---
+
+## New Dependencies Summary
+
+**Required (always installed):**
+```
+GitPython>=3.1.40
+moviepy>=2.0.0
+```
+
+**Optional `creative` extra (GPU features):**
+```
+diffusers>=0.30.0
+transformers>=4.40.0
+accelerate>=0.30.0
+torch>=2.2.0
+safetensors>=0.4.0
+ace-step>=1.5.0
+```
+
+**Install:** `pip install ".[creative]"` for full creative stack
+
+---
+
+## New Persona Summary
+
+| ID | Name | Role | Tools |
+|---|---|---|---|
+| pixel | Pixel | Visual Architect | generate_image, generate_storyboard, image_variations |
+| lyra | Lyra | Sound Weaver | generate_song, generate_instrumental, generate_vocals |
+| reel | Reel | Motion Director | generate_video_clip, image_to_video |
+
+These join the existing 6 personas (Echo, Mace, Helm, Seer, Forge, Quill)
+for a total of **9 specialized agents** in the swarm.
+
+---
+
+## Hardware Requirements
+
+- **CPU only:** Git tools, MoviePy assembly, all tests (mocked)
+- **8GB VRAM:** FLUX.2 Klein 4B (images)
+- **4GB VRAM:** ACE-Step 1.5 (music)
+- **16GB VRAM:** Wan 2.1 1.3B (video at 480p)
+- **Recommended:** RTX 4090 24GB runs the entire stack comfortably
diff --git a/QUALITY_REVIEW_REPORT.md b/QUALITY_REVIEW_REPORT.md
new file mode 100644
index 0000000..8092e53
--- /dev/null
+++ b/QUALITY_REVIEW_REPORT.md
@@ -0,0 +1,232 @@
+# Timmy Time — Comprehensive Quality Review Report
+**Date:** 2026-02-25
+**Reviewed by:** Claude Code
+**Test Coverage:** 84.15% (895 tests passing)
+**Test Result:** ✅ 895 passed, 30 skipped
+
+---
+
+## Executive Summary
+
+The Timmy Time application is a **functional local-first AI agent system** with a working FastAPI dashboard, Ollama integration, and sophisticated Spark Intelligence engine. The codebase is well-structured with good test coverage, but **critical bugs were found and fixed** during this review that prevented the agent from working properly.
+
+**Overall Quality Score: 7.5/10**
+- Architecture: 8/10
+- Functionality: 8/10 (after fixes)
+- Test Coverage: 8/10
+- Documentation: 7/10
+- Memory/Self-Awareness: 9/10
+
+---
+
+## 1. Critical Bugs Found & Fixed
+
+### Bug 1: Toolkit API Mismatch (`CRITICAL`)
+**Location:** `src/timmy/tools.py`
+**Issue:** Code used non-existent `Toolkit.add_tool()` method (should be `register()`)
+
+**Changes Made:**
+- Changed `toolkit.add_tool(...)` → `toolkit.register(...)` (29 occurrences)
+- Changed `python_tools.python` → `python_tools.run_python_code` (3 occurrences)
+- Changed `file_tools.write_file` → `file_tools.save_file` (4 occurrences)
+- Changed `FileTools(base_dir=str(base_path))` → `FileTools(base_dir=base_path)` (5 occurrences)
+
+**Impact:** Without this fix, Timmy agent would crash on startup with `AttributeError`.
+
+### Bug 2: Agent Tools Parameter (`CRITICAL`)
+**Location:** `src/timmy/agent.py`
+**Issue:** Tools passed as single Toolkit instead of list
+
+**Change Made:**
+- Changed `tools=tools` → `tools=[tools] if tools else None`
+
+**Impact:** Without this fix, Agno Agent initialization would fail with `TypeError: 'Toolkit' object is not iterable`.
+
+---
+
+## 2. Model Inference — ✅ WORKING
+
+### Test Results
+
+| Test | Status | Details |
+|------|--------|---------|
+| Agent creation | ✅ Pass | Ollama backend initializes correctly |
+| Basic inference | ✅ Pass | Response type: `RunOutput` with content |
+| Tool usage | ✅ Pass | File operations, shell commands work |
+| Streaming | ✅ Pass | Supported via `stream=True` |
+
+### Inference Example
+```
+Input: "What is your name and who are you?"
+Output: "I am Timmy, a sovereign AI agent running locally on Apple Silicon.
+ I'm committed to your digital sovereignty and powered by Bitcoin economics..."
+```
+
+### Available Models
+- **Ollama:** llama3.2 (default), deepseek-r1:1.5b
+- **AirLLM:** 8B, 70B, 405B models (optional backend)
+
+---
+
+## 3. Memory & Self-Awareness — ✅ WORKING
+
+### Conversation Memory Test
+
+| Test | Status | Result |
+|------|--------|--------|
+| Single-turn memory | ✅ Pass | Timmy remembers what user just asked |
+| Multi-turn context | ✅ Pass | References earlier conversation |
+| Self-identification | ✅ Pass | "I am Timmy, a sovereign AI agent..." |
+| Persistent storage | ✅ Pass | SQLite (`timmy.db`) persists across restarts |
+| History recall | ✅ Pass | Can recall first question from conversation |
+
+### Memory Implementation
+- **Storage:** SQLite via `SqliteDb` (Agno)
+- **Context window:** 10 history runs (`num_history_runs=10`)
+- **File:** `timmy.db` in project root
+
+### Self-Awareness Features
+✅ Agent knows its name ("Timmy")
+✅ Agent knows it's a sovereign AI
+✅ Agent knows it runs locally (Apple Silicon detection)
+✅ Agent references Bitcoin economics and digital sovereignty
+✅ Agent references Christian faith grounding (per system prompt)
+
+---
+
+## 4. Spark Intelligence Engine — ✅ WORKING
+
+### Capabilities Verified
+
+| Feature | Status | Details |
+|---------|--------|---------|
+| Event capture | ✅ Working | 550 events captured |
+| Task predictions | ✅ Working | 235 predictions, 85% avg accuracy |
+| Memory consolidation | ✅ Working | 6 memories stored |
+| Advisories | ✅ Working | Failure prevention, performance, bid optimization |
+| EIDOS loop | ✅ Working | Predict → Observe → Evaluate → Learn |
+
+### Sample Advisory Output
+```
+[failure_prevention] Agent fail-lea has 7 failures (Priority: 1.0)
+[agent_performance] Agent success- excels (100% success) (Priority: 0.6)
+[bid_optimization] Wide bid spread (20–94 sats) (Priority: 0.5)
+[system_health] Strong prediction accuracy (85%) (Priority: 0.3)
+```
+
+---
+
+## 5. Dashboard & UI — ✅ WORKING
+
+### Route Testing Results
+
+| Route | Status | Notes |
+|-------|--------|-------|
+| `/` | ✅ 200 | Main dashboard loads |
+| `/health` | ✅ 200 | Health panel |
+| `/agents` | ✅ 200 | Agent list API |
+| `/swarm` | ✅ 200 | Swarm coordinator UI |
+| `/spark` | ✅ 200 | Spark Intelligence dashboard |
+| `/marketplace` | ✅ 200 | Marketplace UI |
+| `/mobile` | ✅ 200 | Mobile-optimized layout |
+| `/agents/timmy/chat` | ✅ 200 | Chat endpoint works |
+
+### Chat Functionality
+- HTMX-powered chat interface ✅
+- Message history persistence ✅
+- Real-time Ollama inference ✅
+- Error handling (graceful degradation) ✅
+
+---
+
+## 6. Swarm System — ⚠️ PARTIAL
+
+### Working Components
+- ✅ Registry with SQLite persistence
+- ✅ Coordinator with task lifecycle
+- ✅ Agent bidding system
+- ✅ Task assignment algorithm
+- ✅ Spark event capture
+- ✅ Recovery mechanism
+
+### Limitations
+- ⚠️ Persona agents are stubbed (not fully functional AI agents)
+- ⚠️ Most swarm activity is simulated/test data
+- ⚠️ Docker runner not tested in live environment
+
+---
+
+## 7. Issues Identified (Non-Critical)
+
+### Issue 1: SSL Certificate Error with DuckDuckGo
+**Location:** Web search tool
+**Error:** `CERTIFICATE_VERIFY_FAILED`
+**Impact:** Web search tool fails, but agent continues gracefully
+**Fix:** May need `certifi` package or system certificate update
+
+### Issue 2: Default Secrets Warning
+**Location:** L402 payment handler
+**Message:** `L402_HMAC_SECRET is using the default value`
+**Impact:** Warning only — production should set unique secrets
+**Status:** By design (warns at startup)
+
+### Issue 3: Redis Unavailable Fallback
+**Location:** SwarmComms
+**Message:** `Redis unavailable — using in-memory fallback`
+**Impact:** Falls back to in-memory (acceptable for single-instance)
+**Status:** By design (graceful degradation)
+
+### Issue 4: Telemetry to Agno
+**Observation:** Agno sends telemetry to `os-api.agno.com`
+**Impact:** Minor — may not align with "sovereign" vision
+**Note:** Requires further review for truly air-gapped deployments
+
+---
+
+## 8. Test Coverage Analysis
+
+| Module | Coverage | Status |
+|--------|----------|--------|
+| `spark/memory.py` | 98.3% | ✅ Excellent |
+| `spark/engine.py` | 92.6% | ✅ Good |
+| `swarm/coordinator.py` | 92.8% | ✅ Good |
+| `timmy/agent.py` | 100% | ✅ Excellent |
+| `timmy/backends.py` | 96.3% | ✅ Good |
+| `dashboard/` routes | 60-100% | ✅ Good |
+
+**Overall:** 84.15% coverage (exceeds 60% threshold)
+
+---
+
+## 9. Recommendations
+
+### High Priority
+1. ✅ **DONE** Fix toolkit API methods (register vs add_tool)
+2. ✅ **DONE** Fix agent tools parameter (wrap in list)
+3. Add tool usage instructions to system prompt to reduce unnecessary tool calls
+4. Fix SSL certificate issue for DuckDuckGo search
+
+### Medium Priority
+5. Add configuration option to disable Agno telemetry
+6. Implement more sophisticated self-awareness (e.g., knowledge of current tasks)
+7. Expand persona agent capabilities beyond stubs
+
+### Low Priority
+8. Add more comprehensive end-to-end tests with real Ollama
+9. Optimize tool calling behavior (fewer unnecessary tool invocations)
+10. Consider adding conversation summarization for very long contexts
+
+---
+
+## 10. Conclusion
+
+After fixing the critical bugs identified during this review, **Timmy Time is a functional and well-architected AI agent system** with:
+
+- ✅ Working model inference via Ollama
+- ✅ Persistent conversation memory
+- ✅ Self-awareness capabilities
+- ✅ Comprehensive Spark Intelligence engine
+- ✅ Functional web dashboard
+- ✅ Good test coverage (84%+)
+
+The core value proposition — a sovereign, local-first AI agent with memory and self-awareness — **is delivered and working**.
diff --git a/README.md b/README.md
index 3a80e97..552bf0f 100644
--- a/README.md
+++ b/README.md
@@ -1,10 +1,10 @@
# Timmy Time — Mission Control
-[](https://github.com/Alexspayne/Timmy-time-dashboard/actions/workflows/tests.yml)
+[](https://github.com/AlexanderWhitestone/Timmy-time-dashboard/actions/workflows/tests.yml)
-A local-first, sovereign AI agent system. Talk to Timmy, watch his swarm, gate API access with Bitcoin Lightning — all from a browser, no cloud required.
+A local-first, sovereign AI agent system. Talk to Timmy, watch his swarm, gate API access with Bitcoin Lightning — all from a browser, no cloud AI required.
-**[Live Docs →](https://alexspayne.github.io/Timmy-time-dashboard/)**
+**[Live Docs →](https://alexanderwhitestone.github.io/Timmy-time-dashboard/)**
---
@@ -15,13 +15,17 @@ A local-first, sovereign AI agent system. Talk to Timmy, watch his swarm, gate
| **Timmy Agent** | Agno-powered agent (Ollama default, AirLLM optional for 70B/405B) |
| **Mission Control** | FastAPI + HTMX dashboard — chat, health, swarm, marketplace |
| **Swarm** | Multi-agent coordinator — spawn agents, post tasks, run Lightning auctions |
-| **L402 / Lightning** | Bitcoin Lightning payment gating for API access |
+| **L402 / Lightning** | Bitcoin Lightning payment gating for API access (mock backend; LND scaffolded) |
+| **Spark Intelligence** | Event capture, predictions, memory consolidation, advisory engine |
+| **Creative Studio** | Multi-persona creative pipeline — image, music, video generation |
+| **Tools** | Git, image, music, and video tools accessible by persona agents |
| **Voice** | NLU intent detection + TTS (pyttsx3, no cloud) |
| **WebSocket** | Real-time swarm live feed |
| **Mobile** | Responsive layout with full iOS safe-area and touch support |
+| **Telegram** | Bridge Telegram messages to Timmy |
| **CLI** | `timmy`, `timmy-serve`, `self-tdd` entry points |
-**228 tests, 100% passing.**
+**Full test suite, 100% passing.**
---
@@ -45,7 +49,7 @@ brew install ollama
```bash
# 1. Clone
-git clone https://github.com/Alexspayne/Timmy-time-dashboard.git
+git clone https://github.com/AlexanderWhitestone/Timmy-time-dashboard.git
cd Timmy-time-dashboard
# 2. Install
@@ -66,7 +70,7 @@ make dev
## Common commands
```bash
-make test # run all 228 tests (no Ollama needed)
+make test # run all tests (no Ollama needed)
make test-cov # test + coverage report
make dev # start dashboard (http://localhost:8000)
make watch # self-TDD watchdog (60s poll, alerts on regressions)
@@ -157,7 +161,7 @@ cp .env.example .env
| `AIRLLM_MODEL_SIZE` | `70b` | `8b` \| `70b` \| `405b` |
| `L402_HMAC_SECRET` | *(default — change in prod)* | HMAC signing key for macaroons |
| `L402_MACAROON_SECRET` | *(default — change in prod)* | Macaroon secret |
-| `LIGHTNING_BACKEND` | `mock` | `mock` \| `lnd` |
+| `LIGHTNING_BACKEND` | `mock` | `mock` (production-ready) \| `lnd` (scaffolded, not yet functional) |
---
@@ -202,12 +206,18 @@ src/
dashboard/ # FastAPI app, routes, Jinja2 templates
swarm/ # Multi-agent: coordinator, registry, bidder, tasks, comms
timmy_serve/ # L402 proxy, payment handler, TTS, serve CLI
+ spark/ # Intelligence engine — events, predictions, advisory
+ creative/ # Creative director + video assembler pipeline
+ tools/ # Git, image, music, video tools for persona agents
+ lightning/ # Lightning backend abstraction (mock + LND)
+ agent_core/ # Substrate-agnostic agent interface
voice/ # NLU intent detection
- websocket/ # WebSocket connection manager
+ ws_manager/ # WebSocket connection manager
notifications/ # Push notification store
shortcuts/ # Siri Shortcuts endpoints
+ telegram_bot/ # Telegram bridge
self_tdd/ # Continuous test watchdog
-tests/ # 228 tests — one file per module, all mocked
+tests/ # one test file per module, all mocked
static/style.css # Dark mission-control theme (JetBrains Mono)
docs/ # GitHub Pages landing page
AGENTS.md # AI agent development standards ← read this
diff --git a/WORKSET_PLAN.md b/WORKSET_PLAN.md
new file mode 100644
index 0000000..6b690a7
--- /dev/null
+++ b/WORKSET_PLAN.md
@@ -0,0 +1,147 @@
+# Timmy Time — Workset Plan (Post-Quality Review)
+
+**Date:** 2026-02-25
+**Based on:** QUALITY_ANALYSIS.md + QUALITY_REVIEW_REPORT.md
+
+---
+
+## Executive Summary
+
+This workset addresses critical security vulnerabilities, hardens the tool system for reliability, improves privacy alignment with the "sovereign AI" vision, and enhances agent intelligence.
+
+---
+
+## Workset A: Security Fixes (P0) 🔒
+
+### A1: XSS Vulnerabilities (SEC-01)
+**Priority:** P0 — Critical
+**Files:** `mobile.html`, `swarm_live.html`
+
+**Issues:**
+- `mobile.html` line ~85 uses raw `innerHTML` with unsanitized user input
+- `swarm_live.html` line ~72 uses `innerHTML` with WebSocket agent data
+
+**Fix:** Replace `innerHTML` string interpolation with safe DOM methods (`textContent`, `createTextNode`, or DOMPurify if available).
+
+### A2: Hardcoded Secrets (SEC-02)
+**Priority:** P1 — High
+**Files:** `l402_proxy.py`, `payment_handler.py`
+
+**Issue:** Default secrets are production-safe strings instead of `None` with startup assertion.
+
+**Fix:**
+- Change defaults to `None`
+- Add startup assertion requiring env vars to be set
+- Fail fast with clear error message
+
+---
+
+## Workset B: Tool System Hardening ⚙️
+
+### B1: SSL Certificate Fix
+**Priority:** P1 — High
+**File:** Web search via DuckDuckGo
+
+**Issue:** `CERTIFICATE_VERIFY_FAILED` errors prevent web search from working.
+
+**Fix Options:**
+- Option 1: Use `certifi` package for proper certificate bundle
+- Option 2: Add `verify_ssl=False` parameter (less secure, acceptable for local)
+- Option 3: Document SSL fix in troubleshooting
+
+### B2: Tool Usage Instructions
+**Priority:** P2 — Medium
+**File:** `prompts.py`
+
+**Issue:** Agent makes unnecessary tool calls for simple questions.
+
+**Fix:** Add tool usage instructions to system prompt:
+- Only use tools when explicitly needed
+- For simple chat/questions, respond directly
+- Tools are for: web search, file operations, code execution
+
+### B3: Tool Error Handling
+**Priority:** P2 — Medium
+**File:** `tools.py`
+
+**Issue:** Tool failures show stack traces to user.
+
+**Fix:** Add graceful error handling with user-friendly messages.
+
+---
+
+## Workset C: Privacy & Sovereignty 🛡️
+
+### C1: Agno Telemetry (Privacy)
+**Priority:** P2 — Medium
+**File:** `agent.py`, `backends.py`
+
+**Issue:** Agno sends telemetry to `os-api.agno.com` which conflicts with "sovereign" vision.
+
+**Fix:**
+- Add `telemetry_enabled=False` parameter to Agent
+- Document how to disable for air-gapped deployments
+- Consider environment variable `TIMMY_TELEMETRY=0`
+
+### C2: Secrets Validation
+**Priority:** P1 — High
+**File:** `config.py`, startup
+
+**Issue:** Default secrets used without warning in production.
+
+**Fix:**
+- Add production mode detection
+- Fatal error if default secrets in production
+- Clear documentation on generating secrets
+
+---
+
+## Workset D: Agent Intelligence 🧠
+
+### D1: Enhanced System Prompt
+**Priority:** P2 — Medium
+**File:** `prompts.py`
+
+**Enhancements:**
+- Tool usage guidelines (when to use, when not to)
+- Memory awareness ("You remember previous conversations")
+- Self-knowledge (capabilities, limitations)
+- Response style guidelines
+
+### D2: Memory Improvements
+**Priority:** P2 — Medium
+**File:** `agent.py`
+
+**Enhancements:**
+- Increase history runs from 10 to 20 for better context
+- Add memory summarization for very long conversations
+- Persistent session tracking
+
+---
+
+## Execution Order
+
+| Order | Workset | Task | Est. Time |
+|-------|---------|------|-----------|
+| 1 | A | XSS fixes | 30 min |
+| 2 | A | Secrets hardening | 20 min |
+| 3 | B | SSL certificate fix | 15 min |
+| 4 | B | Tool instructions | 20 min |
+| 5 | C | Telemetry disable | 15 min |
+| 6 | C | Secrets validation | 20 min |
+| 7 | D | Enhanced prompts | 30 min |
+| 8 | — | Test everything | 30 min |
+
+**Total: ~3 hours**
+
+---
+
+## Success Criteria
+
+- [ ] No XSS vulnerabilities (verified by code review)
+- [ ] Secrets fail fast in production
+- [ ] Web search works without SSL errors
+- [ ] Agent uses tools appropriately (not for simple chat)
+- [ ] Telemetry disabled by default
+- [ ] All 895+ tests pass
+- [ ] New tests added for security fixes
diff --git a/WORKSET_PLAN_PHASE2.md b/WORKSET_PLAN_PHASE2.md
new file mode 100644
index 0000000..2c9355e
--- /dev/null
+++ b/WORKSET_PLAN_PHASE2.md
@@ -0,0 +1,133 @@
+# Timmy Time — Workset Plan Phase 2 (Functional Hardening)
+
+**Date:** 2026-02-25
+**Based on:** QUALITY_ANALYSIS.md remaining issues
+
+---
+
+## Executive Summary
+
+This workset addresses the core functional gaps that prevent the swarm system from operating as designed. The swarm currently registers agents in the database but doesn't actually spawn processes or execute bids. This workset makes the swarm operational.
+
+---
+
+## Workset E: Swarm System Realization 🐝
+
+### E1: Real Agent Process Spawning (FUNC-01)
+**Priority:** P1 — High
+**Files:** `swarm/agent_runner.py`, `swarm/coordinator.py`
+
+**Issue:** `spawn_agent()` creates a database record but no Python process is actually launched.
+
+**Fix:**
+- Complete the `agent_runner.py` subprocess implementation
+- Ensure spawned agents can communicate with coordinator
+- Add proper lifecycle management (start, monitor, stop)
+
+### E2: Working Auction System (FUNC-02)
+**Priority:** P1 — High
+**Files:** `swarm/bidder.py`, `swarm/persona_node.py`
+
+**Issue:** Bidding system runs auctions but no actual agents submit bids.
+
+**Fix:**
+- Connect persona agents to the bidding system
+- Implement automatic bid generation based on capabilities
+- Ensure auction resolution assigns tasks to winners
+
+### E3: Persona Agent Auto-Bidding
+**Priority:** P1 — High
+**Files:** `swarm/persona_node.py`, `swarm/coordinator.py`
+
+**Fix:**
+- Spawned persona agents should automatically bid on matching tasks
+- Implement capability-based bid decisions
+- Add bid amount calculation (base + jitter)
+
+---
+
+## Workset F: Testing & Reliability 🧪
+
+### F1: WebSocket Reconnection Tests (TEST-01)
+**Priority:** P2 — Medium
+**Files:** `tests/test_websocket.py`
+
+**Issue:** WebSocket tests don't cover reconnection logic or malformed payloads.
+
+**Fix:**
+- Add reconnection scenario tests
+- Test malformed payload handling
+- Test connection failure recovery
+
+### F2: Voice TTS Graceful Degradation
+**Priority:** P2 — Medium
+**Files:** `timmy_serve/voice_tts.py`, `dashboard/routes/voice.py`
+
+**Issue:** Voice routes fail without clear message when `pyttsx3` not installed.
+
+**Fix:**
+- Add graceful fallback message
+- Return helpful error suggesting `pip install ".[voice]"`
+- Don't crash, return 503 with instructions
+
+### F3: Mobile Route Navigation
+**Priority:** P2 — Medium
+**Files:** `templates/base.html`
+
+**Issue:** `/mobile` route not linked from desktop navigation.
+
+**Fix:**
+- Add mobile link to base template nav
+- Make it easy to find mobile-optimized view
+
+---
+
+## Workset G: Performance & Architecture ⚡
+
+### G1: SQLite Connection Pooling (PERF-01)
+**Priority:** P3 — Low
+**Files:** `swarm/registry.py`
+
+**Issue:** New SQLite connection opened on every query.
+
+**Fix:**
+- Implement connection pooling or singleton pattern
+- Reduce connection overhead
+- Maintain thread safety
+
+### G2: Development Experience
+**Priority:** P2 — Medium
+**Files:** `Makefile`, `README.md`
+
+**Issue:** No single command to start full dev environment.
+
+**Fix:**
+- Add `make dev-full` that starts dashboard + Ollama check
+- Add better startup validation
+
+---
+
+## Execution Order
+
+| Order | Workset | Task | Est. Time |
+|-------|---------|------|-----------|
+| 1 | E | Persona auto-bidding system | 45 min |
+| 2 | E | Fix auction resolution | 30 min |
+| 3 | F | Voice graceful degradation | 20 min |
+| 4 | F | Mobile nav link | 10 min |
+| 5 | G | SQLite connection pooling | 30 min |
+| 6 | — | Test everything | 30 min |
+
+**Total: ~2.5 hours**
+
+---
+
+## Success Criteria
+
+- [ ] Persona agents automatically bid on matching tasks
+- [ ] Auctions resolve with actual winners
+- [ ] Voice routes degrade gracefully without pyttsx3
+- [ ] Mobile route accessible from desktop nav
+- [ ] SQLite connections pooled/reused
+- [ ] All 895+ tests pass
+- [ ] New tests for bidding system
diff --git a/activate_self_tdd.sh b/activate_self_tdd.sh
index 1268f77..0d89e03 100755
--- a/activate_self_tdd.sh
+++ b/activate_self_tdd.sh
@@ -60,15 +60,37 @@ python -m pytest "$REPO_DIR/tests/" -q --tb=short
echo "==> All tests passed."
# ── 4. Self-TDD watchdog (background) ────────────────────────────────────────
+WATCHDOG_PID_FILE="$REPO_DIR/.watchdog.pid"
+
+# Kill any previously orphaned watchdog
+if [[ -f "$WATCHDOG_PID_FILE" ]]; then
+ OLD_PID=$(cat "$WATCHDOG_PID_FILE")
+ if kill -0 "$OLD_PID" 2>/dev/null; then
+ echo "==> Stopping previous watchdog (PID $OLD_PID)..."
+ kill "$OLD_PID" 2>/dev/null || true
+ fi
+ rm -f "$WATCHDOG_PID_FILE"
+fi
+
echo "==> Starting self-TDD watchdog (60s interval) in background..."
self-tdd watch --interval 60 &
WATCHDOG_PID=$!
-echo " Watchdog PID: $WATCHDOG_PID"
+echo "$WATCHDOG_PID" > "$WATCHDOG_PID_FILE"
+echo " Watchdog PID: $WATCHDOG_PID (saved to .watchdog.pid)"
echo " Kill with: kill $WATCHDOG_PID"
+# Clean up watchdog when the script exits (Ctrl-C, etc.)
+cleanup() {
+ echo ""
+ echo "==> Stopping watchdog (PID $WATCHDOG_PID)..."
+ kill "$WATCHDOG_PID" 2>/dev/null || true
+ rm -f "$WATCHDOG_PID_FILE"
+}
+trap cleanup EXIT
+
# ── 5. Dashboard ─────────────────────────────────────────────────────────────
echo ""
echo "==> Starting Timmy Time dashboard at http://localhost:8000"
-echo " Ctrl-C stops the dashboard (watchdog continues until you kill it)"
+echo " Ctrl-C stops both the dashboard and the watchdog"
echo ""
uvicorn dashboard.app:app --reload --host 0.0.0.0 --port 8000
diff --git a/apply_security_fixes.py b/apply_security_fixes.py
new file mode 100644
index 0000000..2f4420c
--- /dev/null
+++ b/apply_security_fixes.py
@@ -0,0 +1,183 @@
+import os
+
+def fix_l402_proxy():
+ path = "src/timmy_serve/l402_proxy.py"
+ with open(path, "r") as f:
+ content = f.read()
+
+ # 1. Add hmac_secret to Macaroon dataclass
+ old_dataclass = "@dataclass\nclass Macaroon:\n \"\"\"Simplified HMAC-based macaroon for L402 authentication.\"\"\"\n identifier: str # payment_hash\n signature: str # HMAC signature\n location: str = \"timmy-time\"\n version: int = 1"
+ new_dataclass = "@dataclass\nclass Macaroon:\n \"\"\"Simplified HMAC-based macaroon for L402 authentication.\"\"\"\n identifier: str # payment_hash\n signature: str # HMAC signature\n location: str = \"timmy-time\"\n version: int = 1\n hmac_secret: str = \"\" # Added for multi-key support"
+ content = content.replace(old_dataclass, new_dataclass)
+
+ # 2. Update _MACAROON_SECRET logic
+ old_secret_logic = """_MACAROON_SECRET_DEFAULT = "timmy-macaroon-secret"
+_MACAROON_SECRET_RAW = os.environ.get("L402_MACAROON_SECRET", _MACAROON_SECRET_DEFAULT)
+_MACAROON_SECRET = _MACAROON_SECRET_RAW.encode()
+
+if _MACAROON_SECRET_RAW == _MACAROON_SECRET_DEFAULT:
+ logger.warning(
+ "SEC: L402_MACAROON_SECRET is using the default value — set a unique "
+ "secret in .env before deploying to production."
+ )"""
+ new_secret_logic = """_MACAROON_SECRET_DEFAULT = "timmy-macaroon-secret"
+_MACAROON_SECRET_RAW = os.environ.get("L402_MACAROON_SECRET", _MACAROON_SECRET_DEFAULT)
+_MACAROON_SECRET = _MACAROON_SECRET_RAW.encode()
+
+_HMAC_SECRET_DEFAULT = "timmy-hmac-secret"
+_HMAC_SECRET_RAW = os.environ.get("L402_HMAC_SECRET", _HMAC_SECRET_DEFAULT)
+_HMAC_SECRET = _HMAC_SECRET_RAW.encode()
+
+if _MACAROON_SECRET_RAW == _MACAROON_SECRET_DEFAULT or _HMAC_SECRET_RAW == _HMAC_SECRET_DEFAULT:
+ logger.warning(
+ "SEC: L402 secrets are using default values — set L402_MACAROON_SECRET "
+ "and L402_HMAC_SECRET in .env before deploying to production."
+ )"""
+ content = content.replace(old_secret_logic, new_secret_logic)
+
+ # 3. Update _sign to use the two-key derivation
+ old_sign = """def _sign(identifier: str) -> str:
+ \"\"\"Create an HMAC signature for a macaroon identifier.\"\"\"
+ return hmac.new(_MACAROON_SECRET, identifier.encode(), hashlib.sha256).hexdigest()"""
+ new_sign = """def _sign(identifier: str, hmac_secret: Optional[str] = None) -> str:
+ \"\"\"Create an HMAC signature for a macaroon identifier using two-key derivation.
+
+ The base macaroon secret is used to derive a key-specific secret from the
+ hmac_secret, which is then used to sign the identifier. This prevents
+ macaroon forgery if the hmac_secret is known but the base secret is not.
+ \"\"\"
+ key = hmac.new(
+ _MACAROON_SECRET,
+ (hmac_secret or _HMAC_SECRET_RAW).encode(),
+ hashlib.sha256
+ ).digest()
+ return hmac.new(key, identifier.encode(), hashlib.sha256).hexdigest()"""
+ content = content.replace(old_sign, new_sign)
+
+ # 4. Update create_l402_challenge
+ old_create = """ invoice = payment_handler.create_invoice(amount_sats, memo)
+ signature = _sign(invoice.payment_hash)
+ macaroon = Macaroon(
+ identifier=invoice.payment_hash,
+ signature=signature,
+ )"""
+ new_create = """ invoice = payment_handler.create_invoice(amount_sats, memo)
+ hmac_secret = _HMAC_SECRET_RAW
+ signature = _sign(invoice.payment_hash, hmac_secret)
+ macaroon = Macaroon(
+ identifier=invoice.payment_hash,
+ signature=signature,
+ hmac_secret=hmac_secret,
+ )"""
+ content = content.replace(old_create, new_create)
+
+ # 5. Update Macaroon.serialize and deserialize
+ old_serialize = """ def serialize(self) -> str:
+ \"\"\"Encode the macaroon as a base64 string.\"\"\"
+ raw = f"{self.version}:{self.location}:{self.identifier}:{self.signature}"
+ return base64.urlsafe_b64encode(raw.encode()).decode()"""
+ new_serialize = """ def serialize(self) -> str:
+ \"\"\"Encode the macaroon as a base64 string.\"\"\"
+ raw = f"{self.version}:{self.location}:{self.identifier}:{self.signature}:{self.hmac_secret}"
+ return base64.urlsafe_b64encode(raw.encode()).decode()"""
+ content = content.replace(old_serialize, new_serialize)
+
+ old_deserialize = """ @classmethod
+ def deserialize(cls, token: str) -> Optional["Macaroon"]:
+ \"\"\"Decode a base64 macaroon string.\"\"\"
+ try:
+ raw = base64.urlsafe_b64decode(token.encode()).decode()
+ parts = raw.split(":")
+ if len(parts) != 4:
+ return None
+ return cls(
+ version=int(parts[0]),
+ location=parts[1],
+ identifier=parts[2],
+ signature=parts[3],
+ )
+ except Exception:
+ return None"""
+ new_deserialize = """ @classmethod
+ def deserialize(cls, token: str) -> Optional["Macaroon"]:
+ \"\"\"Decode a base64 macaroon string.\"\"\"
+ try:
+ raw = base64.urlsafe_b64decode(token.encode()).decode()
+ parts = raw.split(":")
+ if len(parts) < 4:
+ return None
+ return cls(
+ version=int(parts[0]),
+ location=parts[1],
+ identifier=parts[2],
+ signature=parts[3],
+ hmac_secret=parts[4] if len(parts) > 4 else "",
+ )
+ except Exception:
+ return None"""
+ content = content.replace(old_deserialize, new_deserialize)
+
+ # 6. Update verify_l402_token
+ old_verify_sig = """ # Check HMAC signature
+ expected_sig = _sign(macaroon.identifier)
+ if not hmac.compare_digest(macaroon.signature, expected_sig):"""
+ new_verify_sig = """ # Check HMAC signature
+ expected_sig = _sign(macaroon.identifier, macaroon.hmac_secret)
+ if not hmac.compare_digest(macaroon.signature, expected_sig):"""
+ content = content.replace(old_verify_sig, new_verify_sig)
+
+ with open(path, "w") as f:
+ f.write(content)
+
+def fix_xss():
+ # Fix chat_message.html
+ path = "src/dashboard/templates/partials/chat_message.html"
+ with open(path, "r") as f:
+ content = f.read()
+ content = content.replace("{{ user_message }}", "{{ user_message | e }}")
+ content = content.replace("{{ response }}", "{{ response | e }}")
+ content = content.replace("{{ error }}", "{{ error | e }}")
+ with open(path, "w") as f:
+ f.write(content)
+
+ # Fix history.html
+ path = "src/dashboard/templates/partials/history.html"
+ with open(path, "r") as f:
+ content = f.read()
+ content = content.replace("{{ msg.content }}", "{{ msg.content | e }}")
+ with open(path, "w") as f:
+ f.write(content)
+
+ # Fix briefing.html
+ path = "src/dashboard/templates/briefing.html"
+ with open(path, "r") as f:
+ content = f.read()
+ content = content.replace("{{ briefing.summary }}", "{{ briefing.summary | e }}")
+ with open(path, "w") as f:
+ f.write(content)
+
+ # Fix approval_card_single.html
+ path = "src/dashboard/templates/partials/approval_card_single.html"
+ with open(path, "r") as f:
+ content = f.read()
+ content = content.replace("{{ item.title }}", "{{ item.title | e }}")
+ content = content.replace("{{ item.description }}", "{{ item.description | e }}")
+ content = content.replace("{{ item.proposed_action }}", "{{ item.proposed_action | e }}")
+ with open(path, "w") as f:
+ f.write(content)
+
+ # Fix marketplace.html
+ path = "src/dashboard/templates/marketplace.html"
+ with open(path, "r") as f:
+ content = f.read()
+ content = content.replace("{{ agent.name }}", "{{ agent.name | e }}")
+ content = content.replace("{{ agent.role }}", "{{ agent.role | e }}")
+ content = content.replace("{{ agent.description or 'No description' }}", "{{ (agent.description or 'No description') | e }}")
+ content = content.replace("{{ cap.strip() }}", "{{ cap.strip() | e }}")
+ with open(path, "w") as f:
+ f.write(content)
+
+if __name__ == "__main__":
+ fix_l402_proxy()
+ fix_xss()
+ print("Security fixes applied successfully.")
diff --git a/coverage.xml b/coverage.xml
new file mode 100644
index 0000000..08771ff
--- /dev/null
+++ b/coverage.xml
@@ -0,0 +1,5534 @@
+
+
+
+
+
+ /Users/apayne/Timmy-time-dashboard/src
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/deploy/Caddyfile b/deploy/Caddyfile
new file mode 100644
index 0000000..91aa24a
--- /dev/null
+++ b/deploy/Caddyfile
@@ -0,0 +1,36 @@
+# ── Timmy Time — Caddy Reverse Proxy ─────────────────────────────────────────
+#
+# Automatic HTTPS via Let's Encrypt.
+# Set DOMAIN env var or replace {$DOMAIN} below.
+#
+# For local/IP-only access (no domain), Caddy serves on :80 without TLS.
+
+{$DOMAIN:localhost} {
+ # Reverse proxy to the FastAPI dashboard
+ reverse_proxy dashboard:8000
+
+ # WebSocket support (swarm live updates)
+ @websocket {
+ header Connection *Upgrade*
+ header Upgrade websocket
+ }
+ reverse_proxy @websocket dashboard:8000
+
+ # Security headers
+ header {
+ X-Content-Type-Options nosniff
+ X-Frame-Options SAMEORIGIN
+ Referrer-Policy strict-origin-when-cross-origin
+ X-XSS-Protection "1; mode=block"
+ -Server
+ }
+
+ # Gzip compression
+ encode gzip zstd
+
+ # Access logging
+ log {
+ output stdout
+ format console
+ }
+}
diff --git a/deploy/cloud-init.yaml b/deploy/cloud-init.yaml
new file mode 100644
index 0000000..5094d2b
--- /dev/null
+++ b/deploy/cloud-init.yaml
@@ -0,0 +1,117 @@
+#cloud-config
+# ── Timmy Time — Cloud-Init Bootstrap ────────────────────────────────────────
+#
+# Paste this as "User Data" when creating a DigitalOcean Droplet, AWS EC2
+# instance, Hetzner server, Vultr instance, or any cloud VM.
+#
+# What it does:
+# 1. Installs Docker + Docker Compose
+# 2. Configures firewall (SSH + HTTP + HTTPS only)
+# 3. Clones the Timmy repo to /opt/timmy
+# 4. Pulls the default LLM model
+# 5. Starts the full production stack
+# 6. Enables auto-start on reboot via systemd
+#
+# After boot (~3-5 min), access: https:// or https://
+#
+# Prerequisites:
+# - Point your domain's A record to this server's IP (for auto-HTTPS)
+# - Or access via IP (Caddy will serve HTTP only)
+
+package_update: true
+package_upgrade: true
+
+packages:
+ - curl
+ - git
+ - ufw
+ - fail2ban
+ - unattended-upgrades
+
+write_files:
+ # Timmy environment config — edit after first boot if needed
+ - path: /opt/timmy/.env
+ permissions: "0600"
+ content: |
+ # ── Timmy Time — Production Environment ──────────────────────────
+ # Edit this file, then: systemctl restart timmy
+
+ # Your domain (required for auto-HTTPS). Use IP for HTTP-only.
+ DOMAIN=localhost
+
+ # LLM model (pulled automatically on first boot)
+ OLLAMA_MODEL=llama3.2
+
+ # Generate secrets:
+ # python3 -c "import secrets; print(secrets.token_hex(32))"
+ L402_HMAC_SECRET=
+ L402_MACAROON_SECRET=
+
+ # Telegram bot token (optional)
+ TELEGRAM_TOKEN=
+
+ # Systemd service file
+ - path: /etc/systemd/system/timmy.service
+ permissions: "0644"
+ content: |
+ [Unit]
+ Description=Timmy Time — Mission Control
+ After=docker.service network-online.target
+ Requires=docker.service
+ Wants=network-online.target
+
+ [Service]
+ Type=oneshot
+ RemainAfterExit=yes
+ WorkingDirectory=/opt/timmy
+ EnvironmentFile=-/opt/timmy/.env
+ ExecStart=/usr/bin/docker compose -f docker-compose.prod.yml up -d
+ ExecStop=/usr/bin/docker compose -f docker-compose.prod.yml down
+ ExecReload=/usr/bin/docker compose -f docker-compose.prod.yml restart
+ Restart=on-failure
+ RestartSec=30
+
+ [Install]
+ WantedBy=multi-user.target
+
+runcmd:
+ # ── Install Docker ─────────────────────────────────────────────────────────
+ - curl -fsSL https://get.docker.com | sh
+ - systemctl enable docker
+ - systemctl start docker
+
+ # ── Firewall ───────────────────────────────────────────────────────────────
+ - ufw default deny incoming
+ - ufw default allow outgoing
+ - ufw allow 22/tcp # SSH
+ - ufw allow 80/tcp # HTTP
+ - ufw allow 443/tcp # HTTPS
+ - ufw allow 443/udp # HTTP/3
+ - ufw --force enable
+
+ # ── Fail2ban ───────────────────────────────────────────────────────────────
+ - systemctl enable fail2ban
+ - systemctl start fail2ban
+
+ # ── Clone and deploy ───────────────────────────────────────────────────────
+ - git clone https://github.com/AlexanderWhitestone/Timmy-time-dashboard.git /opt/timmy
+ - cd /opt/timmy && mkdir -p data
+
+ # ── Build and start ────────────────────────────────────────────────────────
+ - cd /opt/timmy && docker compose -f docker-compose.prod.yml build
+ - cd /opt/timmy && docker compose -f docker-compose.prod.yml up -d
+
+ # ── Pull default LLM model ────────────────────────────────────────────────
+ - |
+ echo "Waiting for Ollama to be ready..."
+ for i in $(seq 1 30); do
+ if curl -sf http://localhost:11434/api/tags > /dev/null 2>&1; then
+ break
+ fi
+ sleep 5
+ done
+ docker exec timmy-ollama ollama pull llama3.2
+
+ # ── Enable auto-start on boot ──────────────────────────────────────────────
+ - systemctl daemon-reload
+ - systemctl enable timmy
diff --git a/deploy/digitalocean/create-droplet.sh b/deploy/digitalocean/create-droplet.sh
new file mode 100755
index 0000000..2b751c7
--- /dev/null
+++ b/deploy/digitalocean/create-droplet.sh
@@ -0,0 +1,114 @@
+#!/usr/bin/env bash
+set -euo pipefail
+
+# ── Timmy Time — DigitalOcean Droplet Creator ────────────────────────────────
+#
+# Creates a DigitalOcean Droplet with Timmy pre-installed via cloud-init.
+#
+# Prerequisites:
+# - doctl CLI installed (https://docs.digitalocean.com/reference/doctl/)
+# - doctl auth init (authenticated)
+#
+# Usage:
+# bash deploy/digitalocean/create-droplet.sh
+# bash deploy/digitalocean/create-droplet.sh --domain timmy.example.com
+# bash deploy/digitalocean/create-droplet.sh --size s-2vcpu-4gb --region nyc1
+
+BOLD='\033[1m'
+GREEN='\033[0;32m'
+CYAN='\033[0;36m'
+NC='\033[0m'
+
+# Defaults
+DROPLET_NAME="timmy-mission-control"
+REGION="nyc1"
+SIZE="s-2vcpu-4gb" # 2 vCPU, 4GB RAM — good for llama3.2
+IMAGE="ubuntu-24-04-x64"
+DOMAIN=""
+
+# Parse arguments
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ --name) DROPLET_NAME="$2"; shift 2 ;;
+ --region) REGION="$2"; shift 2 ;;
+ --size) SIZE="$2"; shift 2 ;;
+ --domain) DOMAIN="$2"; shift 2 ;;
+ *) echo "Unknown option: $1"; exit 1 ;;
+ esac
+done
+
+# Check doctl
+if ! command -v doctl &> /dev/null; then
+ echo "Error: doctl is not installed."
+ echo "Install it: https://docs.digitalocean.com/reference/doctl/how-to/install/"
+ exit 1
+fi
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+CLOUD_INIT="$SCRIPT_DIR/../cloud-init.yaml"
+
+if [ ! -f "$CLOUD_INIT" ]; then
+ echo "Error: cloud-init.yaml not found at $CLOUD_INIT"
+ exit 1
+fi
+
+echo -e "${CYAN}${BOLD}"
+echo " Creating DigitalOcean Droplet"
+echo " ─────────────────────────────"
+echo -e "${NC}"
+echo " Name: $DROPLET_NAME"
+echo " Region: $REGION"
+echo " Size: $SIZE"
+echo " Image: $IMAGE"
+echo ""
+
+# Create the droplet
+DROPLET_ID=$(doctl compute droplet create "$DROPLET_NAME" \
+ --region "$REGION" \
+ --size "$SIZE" \
+ --image "$IMAGE" \
+ --user-data-file "$CLOUD_INIT" \
+ --enable-monitoring \
+ --format ID \
+ --no-header \
+ --wait)
+
+echo -e "${GREEN}[+]${NC} Droplet created: ID $DROPLET_ID"
+
+# Get the IP
+sleep 5
+IP=$(doctl compute droplet get "$DROPLET_ID" --format PublicIPv4 --no-header)
+echo -e "${GREEN}[+]${NC} Public IP: $IP"
+
+# Set up DNS if domain provided
+if [ -n "$DOMAIN" ]; then
+ # Extract the base domain (last two parts)
+ BASE_DOMAIN=$(echo "$DOMAIN" | awk -F. '{print $(NF-1)"."$NF}')
+ SUBDOMAIN=$(echo "$DOMAIN" | sed "s/\.$BASE_DOMAIN$//")
+
+ if [ "$SUBDOMAIN" = "$DOMAIN" ]; then
+ SUBDOMAIN="@"
+ fi
+
+ echo -e "${GREEN}[+]${NC} Creating DNS record: $DOMAIN -> $IP"
+ doctl compute domain records create "$BASE_DOMAIN" \
+ --record-type A \
+ --record-name "$SUBDOMAIN" \
+ --record-data "$IP" \
+ --record-ttl 300 || echo " (DNS record creation failed — set it manually)"
+fi
+
+echo ""
+echo -e "${GREEN}${BOLD} Droplet is provisioning!${NC}"
+echo ""
+echo " The server will be ready in ~3-5 minutes."
+echo ""
+echo " SSH in: ssh root@$IP"
+echo " Check progress: ssh root@$IP tail -f /var/log/cloud-init-output.log"
+if [ -n "$DOMAIN" ]; then
+ echo " Dashboard: https://$DOMAIN (after DNS propagation)"
+fi
+echo " Dashboard: http://$IP"
+echo ""
+echo " After boot, edit /opt/timmy/.env to set your domain and secrets."
+echo ""
diff --git a/deploy/setup.sh b/deploy/setup.sh
new file mode 100755
index 0000000..1c7f83a
--- /dev/null
+++ b/deploy/setup.sh
@@ -0,0 +1,282 @@
+#!/usr/bin/env bash
+set -euo pipefail
+
+# ── Timmy Time — One-Click Deploy Script ─────────────────────────────────────
+#
+# Run this on any fresh Ubuntu/Debian server:
+#
+# curl -fsSL https://raw.githubusercontent.com/AlexanderWhitestone/Timmy-time-dashboard/master/deploy/setup.sh | bash
+#
+# Or clone first and run locally:
+#
+# git clone https://github.com/AlexanderWhitestone/Timmy-time-dashboard.git
+# cd Timmy-time-dashboard
+# bash deploy/setup.sh
+#
+# What it does:
+# 1. Installs Docker (if not present)
+# 2. Configures firewall
+# 3. Generates secrets
+# 4. Builds and starts the full stack
+# 5. Pulls the LLM model
+# 6. Sets up auto-start on boot
+
+BOLD='\033[1m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+CYAN='\033[0;36m'
+RED='\033[0;31m'
+NC='\033[0m'
+
+INSTALL_DIR="/opt/timmy"
+
+banner() {
+ echo ""
+ echo -e "${CYAN}${BOLD}"
+ echo " ╔══════════════════════════════════════════╗"
+ echo " ║ Timmy Time — Mission Control ║"
+ echo " ║ One-Click Cloud Deploy ║"
+ echo " ╚══════════════════════════════════════════╝"
+ echo -e "${NC}"
+}
+
+info() { echo -e "${GREEN}[+]${NC} $1"; }
+warn() { echo -e "${YELLOW}[!]${NC} $1"; }
+error() { echo -e "${RED}[x]${NC} $1"; }
+step() { echo -e "\n${BOLD}── $1 ──${NC}"; }
+
+check_root() {
+ if [ "$(id -u)" -ne 0 ]; then
+ error "This script must be run as root (or with sudo)"
+ exit 1
+ fi
+}
+
+generate_secret() {
+ python3 -c "import secrets; print(secrets.token_hex(32))" 2>/dev/null || \
+ openssl rand -hex 32 2>/dev/null || \
+ head -c 32 /dev/urandom | xxd -p -c 64
+}
+
+install_docker() {
+ step "Installing Docker"
+ if command -v docker &> /dev/null; then
+ info "Docker already installed: $(docker --version)"
+ else
+ info "Installing Docker..."
+ curl -fsSL https://get.docker.com | sh
+ systemctl enable docker
+ systemctl start docker
+ info "Docker installed: $(docker --version)"
+ fi
+
+ # Ensure docker compose plugin is available
+ if ! docker compose version &> /dev/null; then
+ error "Docker Compose plugin not found. Please install it manually."
+ exit 1
+ fi
+ info "Docker Compose: $(docker compose version --short)"
+}
+
+setup_firewall() {
+ step "Configuring Firewall"
+ if command -v ufw &> /dev/null; then
+ ufw default deny incoming
+ ufw default allow outgoing
+ ufw allow 22/tcp # SSH
+ ufw allow 80/tcp # HTTP
+ ufw allow 443/tcp # HTTPS
+ ufw allow 443/udp # HTTP/3
+ ufw --force enable
+ info "Firewall configured (SSH, HTTP, HTTPS)"
+ else
+ warn "ufw not found — install it or configure your firewall manually"
+ fi
+}
+
+setup_fail2ban() {
+ step "Setting up Fail2ban"
+ if command -v fail2ban-server &> /dev/null; then
+ systemctl enable fail2ban
+ systemctl start fail2ban
+ info "Fail2ban active"
+ else
+ apt-get install -y fail2ban 2>/dev/null && systemctl enable fail2ban && systemctl start fail2ban && info "Fail2ban installed and active" || \
+ warn "Could not install fail2ban — install manually for SSH protection"
+ fi
+}
+
+clone_or_update() {
+ step "Setting up Timmy"
+ if [ -d "$INSTALL_DIR/.git" ]; then
+ info "Existing installation found at $INSTALL_DIR — updating..."
+ cd "$INSTALL_DIR"
+ git pull origin master || git pull origin main || warn "Could not pull updates"
+ elif [ -f "./docker-compose.prod.yml" ]; then
+ info "Running from repo directory — copying to $INSTALL_DIR"
+ mkdir -p "$INSTALL_DIR"
+ cp -r . "$INSTALL_DIR/"
+ cd "$INSTALL_DIR"
+ else
+ info "Cloning Timmy Time Dashboard..."
+ git clone https://github.com/AlexanderWhitestone/Timmy-time-dashboard.git "$INSTALL_DIR"
+ cd "$INSTALL_DIR"
+ fi
+ mkdir -p data
+}
+
+configure_env() {
+ step "Configuring Environment"
+ local ENV_FILE="$INSTALL_DIR/.env"
+
+ if [ -f "$ENV_FILE" ]; then
+ warn ".env already exists — skipping (edit manually if needed)"
+ return
+ fi
+
+ # Interactive domain setup
+ local DOMAIN="localhost"
+ echo ""
+ read -rp " Enter your domain (or press Enter for IP-only access): " USER_DOMAIN
+ if [ -n "$USER_DOMAIN" ]; then
+ DOMAIN="$USER_DOMAIN"
+ fi
+
+ # Interactive model selection
+ local MODEL="llama3.2"
+ echo ""
+ echo " Available LLM models:"
+ echo " 1) llama3.2 (~2GB, fast, good for most tasks)"
+ echo " 2) llama3.1:8b (~4.7GB, better reasoning)"
+ echo " 3) mistral (~4.1GB, good all-rounder)"
+ echo " 4) phi3 (~2.2GB, compact and fast)"
+ echo ""
+ read -rp " Select model [1-4, default=1]: " MODEL_CHOICE
+ case "$MODEL_CHOICE" in
+ 2) MODEL="llama3.1:8b" ;;
+ 3) MODEL="mistral" ;;
+ 4) MODEL="phi3" ;;
+ *) MODEL="llama3.2" ;;
+ esac
+
+ # Generate secrets
+ local HMAC_SECRET
+ HMAC_SECRET=$(generate_secret)
+ local MACAROON_SECRET
+ MACAROON_SECRET=$(generate_secret)
+
+ cat > "$ENV_FILE" </dev/null || echo "llama3.2")
+
+ info "Waiting for Ollama to be ready..."
+ local retries=0
+ while [ $retries -lt 30 ]; do
+ if docker exec timmy-ollama curl -sf http://localhost:11434/api/tags > /dev/null 2>&1; then
+ break
+ fi
+ sleep 5
+ retries=$((retries + 1))
+ done
+
+ if [ $retries -ge 30 ]; then
+ warn "Ollama not ready after 150s — pull model manually:"
+ warn " docker exec timmy-ollama ollama pull $MODEL"
+ return
+ fi
+
+ info "Pulling $MODEL (this may take a few minutes)..."
+ docker exec timmy-ollama ollama pull "$MODEL"
+ info "Model $MODEL ready"
+}
+
+setup_systemd() {
+ step "Enabling Auto-Start on Boot"
+ cp "$INSTALL_DIR/deploy/timmy.service" /etc/systemd/system/timmy.service
+ systemctl daemon-reload
+ systemctl enable timmy
+ info "Timmy will auto-start on reboot"
+}
+
+print_summary() {
+ local DOMAIN
+ DOMAIN=$(grep -oP 'DOMAIN=\K.*' "$INSTALL_DIR/.env" 2>/dev/null || echo "localhost")
+ local IP
+ IP=$(curl -4sf https://ifconfig.me 2>/dev/null || hostname -I 2>/dev/null | awk '{print $1}' || echo "your-server-ip")
+
+ echo ""
+ echo -e "${GREEN}${BOLD}"
+ echo " ╔══════════════════════════════════════════╗"
+ echo " ║ Timmy is LIVE! ║"
+ echo " ╚══════════════════════════════════════════╝"
+ echo -e "${NC}"
+ echo ""
+ if [ "$DOMAIN" != "localhost" ]; then
+ echo -e " ${BOLD}Dashboard:${NC} https://$DOMAIN"
+ fi
+ echo -e " ${BOLD}Dashboard:${NC} http://$IP"
+ echo ""
+ echo -e " ${BOLD}Useful commands:${NC}"
+ echo " systemctl status timmy # check status"
+ echo " systemctl restart timmy # restart stack"
+ echo " docker compose -f /opt/timmy/docker-compose.prod.yml logs -f # tail logs"
+ echo " nano /opt/timmy/.env # edit config"
+ echo ""
+ echo -e " ${BOLD}Scale agents:${NC}"
+ echo " cd /opt/timmy"
+ echo " docker compose -f docker-compose.prod.yml --profile agents up -d --scale agent=4"
+ echo ""
+ echo -e " ${BOLD}Update Timmy:${NC}"
+ echo " cd /opt/timmy && git pull && docker compose -f docker-compose.prod.yml up -d --build"
+ echo ""
+}
+
+# ── Main ─────────────────────────────────────────────────────────────────────
+
+banner
+check_root
+install_docker
+setup_firewall
+setup_fail2ban
+clone_or_update
+configure_env
+build_and_start
+pull_model
+setup_systemd
+print_summary
diff --git a/deploy/timmy.service b/deploy/timmy.service
new file mode 100644
index 0000000..8ccc5d9
--- /dev/null
+++ b/deploy/timmy.service
@@ -0,0 +1,29 @@
+[Unit]
+Description=Timmy Time — Mission Control
+Documentation=https://github.com/AlexanderWhitestone/Timmy-time-dashboard
+After=docker.service network-online.target
+Requires=docker.service
+Wants=network-online.target
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+WorkingDirectory=/opt/timmy
+EnvironmentFile=-/opt/timmy/.env
+
+# Start the full production stack
+ExecStart=/usr/bin/docker compose -f docker-compose.prod.yml up -d
+ExecStop=/usr/bin/docker compose -f docker-compose.prod.yml down
+ExecReload=/usr/bin/docker compose -f docker-compose.prod.yml restart
+
+# Restart policy
+Restart=on-failure
+RestartSec=30
+
+# Security hardening
+NoNewPrivileges=true
+ProtectSystem=strict
+ReadWritePaths=/opt/timmy /var/run/docker.sock
+
+[Install]
+WantedBy=multi-user.target
diff --git a/docker-compose.dev.yml b/docker-compose.dev.yml
new file mode 100644
index 0000000..11c8f66
--- /dev/null
+++ b/docker-compose.dev.yml
@@ -0,0 +1,23 @@
+# ── Timmy Time — Dev-mode overlay ────────────────────────────────────────────
+#
+# Enables hot-reload: Python, template, and CSS changes auto-apply.
+#
+# Usage:
+# make up DEV=1
+# # or directly:
+# docker compose -f docker-compose.yml -f docker-compose.dev.yml up -d --build
+
+services:
+ dashboard:
+ command:
+ - uvicorn
+ - dashboard.app:app
+ - --host=0.0.0.0
+ - --port=8000
+ - --reload
+ - --reload-dir=/app/src
+ - --reload-include=*.html
+ - --reload-include=*.css
+ - --reload-include=*.js
+ environment:
+ DEBUG: "true"
diff --git a/docker-compose.prod.yml b/docker-compose.prod.yml
new file mode 100644
index 0000000..f542fcb
--- /dev/null
+++ b/docker-compose.prod.yml
@@ -0,0 +1,152 @@
+# ── Timmy Time — Production Stack ────────────────────────────────────────────
+#
+# One-click cloud deployment. Includes:
+# - Caddy auto-HTTPS reverse proxy (Let's Encrypt)
+# - Dashboard FastAPI app + swarm coordinator
+# - Timmy sovereign AI agent
+# - Ollama local LLM inference engine
+# - Watchtower auto-updates containers when images change
+#
+# Usage:
+# cp .env.example .env # edit with your domain + secrets
+# docker compose -f docker-compose.prod.yml up -d
+#
+# Scale agents:
+# docker compose -f docker-compose.prod.yml --profile agents up -d --scale agent=4
+
+services:
+
+ # ── Caddy — automatic HTTPS reverse proxy ──────────────────────────────────
+ caddy:
+ image: caddy:2-alpine
+ container_name: timmy-caddy
+ ports:
+ - "80:80"
+ - "443:443"
+ - "443:443/udp" # HTTP/3
+ volumes:
+ - ./deploy/Caddyfile:/etc/caddy/Caddyfile:ro
+ - caddy-data:/data
+ - caddy-config:/config
+ environment:
+ DOMAIN: "${DOMAIN:-localhost}"
+ networks:
+ - swarm-net
+ restart: unless-stopped
+
+ # ── Ollama — local LLM inference ───────────────────────────────────────────
+ ollama:
+ image: ollama/ollama:latest
+ container_name: timmy-ollama
+ volumes:
+ - ollama-models:/root/.ollama
+ networks:
+ - swarm-net
+ restart: unless-stopped
+ # GPU passthrough (uncomment for NVIDIA GPU)
+ # deploy:
+ # resources:
+ # reservations:
+ # devices:
+ # - driver: nvidia
+ # count: all
+ # capabilities: [gpu]
+ healthcheck:
+ test: ["CMD", "curl", "-f", "http://localhost:11434/api/tags"]
+ interval: 30s
+ timeout: 10s
+ retries: 5
+ start_period: 30s
+
+ # ── Dashboard (coordinator + FastAPI) ──────────────────────────────────────
+ dashboard:
+ build: .
+ image: timmy-time:latest
+ container_name: timmy-dashboard
+ volumes:
+ - timmy-data:/app/data
+ environment:
+ DEBUG: "${DEBUG:-false}"
+ OLLAMA_URL: "http://ollama:11434"
+ OLLAMA_MODEL: "${OLLAMA_MODEL:-llama3.2}"
+ L402_HMAC_SECRET: "${L402_HMAC_SECRET:-}"
+ L402_MACAROON_SECRET: "${L402_MACAROON_SECRET:-}"
+ TELEGRAM_TOKEN: "${TELEGRAM_TOKEN:-}"
+ networks:
+ - swarm-net
+ depends_on:
+ ollama:
+ condition: service_healthy
+ restart: unless-stopped
+ healthcheck:
+ test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
+ interval: 30s
+ timeout: 5s
+ retries: 3
+ start_period: 15s
+
+ # ── Timmy — sovereign AI agent ─────────────────────────────────────────────
+ timmy:
+ build: .
+ image: timmy-time:latest
+ container_name: timmy-agent
+ volumes:
+ - timmy-data:/app/data
+ environment:
+ COORDINATOR_URL: "http://dashboard:8000"
+ OLLAMA_URL: "http://ollama:11434"
+ OLLAMA_MODEL: "${OLLAMA_MODEL:-llama3.2}"
+ TIMMY_AGENT_ID: "timmy"
+ command: ["python", "-m", "timmy.docker_agent"]
+ networks:
+ - swarm-net
+ depends_on:
+ dashboard:
+ condition: service_healthy
+ restart: unless-stopped
+
+ # ── Agent worker template ──────────────────────────────────────────────────
+ agent:
+ build: .
+ image: timmy-time:latest
+ profiles:
+ - agents
+ volumes:
+ - timmy-data:/app/data
+ environment:
+ COORDINATOR_URL: "http://dashboard:8000"
+ OLLAMA_URL: "http://ollama:11434"
+ OLLAMA_MODEL: "${OLLAMA_MODEL:-llama3.2}"
+ AGENT_NAME: "${AGENT_NAME:-Worker}"
+ AGENT_CAPABILITIES: "${AGENT_CAPABILITIES:-general}"
+ command: ["sh", "-c", "python -m swarm.agent_runner --agent-id agent-$(hostname) --name $${AGENT_NAME:-Worker}"]
+ networks:
+ - swarm-net
+ depends_on:
+ dashboard:
+ condition: service_healthy
+ restart: unless-stopped
+
+ # ── Watchtower — auto-update containers ────────────────────────────────────
+ watchtower:
+ image: containrrr/watchtower
+ container_name: timmy-watchtower
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock
+ environment:
+ WATCHTOWER_CLEANUP: "true"
+ WATCHTOWER_POLL_INTERVAL: "3600" # check every hour
+ WATCHTOWER_LABEL_ENABLE: "false"
+ restart: unless-stopped
+
+# ── Volumes ──────────────────────────────────────────────────────────────────
+volumes:
+ timmy-data:
+ caddy-data:
+ caddy-config:
+ ollama-models:
+
+# ── Network ──────────────────────────────────────────────────────────────────
+networks:
+ swarm-net:
+ driver: bridge
diff --git a/docker-compose.test.yml b/docker-compose.test.yml
new file mode 100644
index 0000000..6e58264
--- /dev/null
+++ b/docker-compose.test.yml
@@ -0,0 +1,98 @@
+# ── Timmy Time — test stack ──────────────────────────────────────────────────
+#
+# Lightweight compose for functional tests. Runs the dashboard on port 18000
+# and optional agent workers on the swarm-test-net network.
+#
+# Profiles:
+# (default) dashboard only (Ollama on host via host.docker.internal)
+# ollama adds a containerised Ollama instance + auto model pull
+# agents adds scalable agent workers
+#
+# Usage:
+# # Swarm tests (no LLM needed):
+# FUNCTIONAL_DOCKER=1 pytest tests/functional/test_docker_swarm.py -v
+#
+# # Full-stack with Ollama (pulls qwen2.5:0.5b automatically):
+# FUNCTIONAL_DOCKER=1 pytest tests/functional/test_ollama_chat.py -v
+#
+# Or manually:
+# docker compose -f docker-compose.test.yml -p timmy-test up -d --build --wait
+# curl http://localhost:18000/health
+# docker compose -f docker-compose.test.yml -p timmy-test down -v
+
+services:
+
+ # ── Ollama — local LLM for functional tests ───────────────────────────────
+ # Activated with: --profile ollama
+ # Uses a tiny model (qwen2.5:0.5b, ~400 MB) so it runs on CPU-only CI.
+ ollama:
+ image: ollama/ollama:latest
+ container_name: timmy-test-ollama
+ profiles:
+ - ollama
+ networks:
+ - swarm-test-net
+ healthcheck:
+ test: ["CMD", "curl", "-f", "http://localhost:11434/api/tags"]
+ interval: 5s
+ timeout: 5s
+ retries: 20
+ start_period: 10s
+
+ dashboard:
+ build: .
+ image: timmy-time:test
+ container_name: timmy-test-dashboard
+ ports:
+ - "18000:8000"
+ volumes:
+ - test-data:/app/data
+ - ./src:/app/src
+ - ./static:/app/static
+ environment:
+ DEBUG: "true"
+ TIMMY_TEST_MODE: "1"
+ OLLAMA_URL: "${OLLAMA_URL:-http://host.docker.internal:11434}"
+ OLLAMA_MODEL: "${OLLAMA_MODEL:-llama3.2}"
+ LIGHTNING_BACKEND: "mock"
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
+ networks:
+ - swarm-test-net
+ healthcheck:
+ test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
+ interval: 5s
+ timeout: 3s
+ retries: 10
+ start_period: 10s
+
+ agent:
+ build: .
+ image: timmy-time:test
+ profiles:
+ - agents
+ volumes:
+ - test-data:/app/data
+ - ./src:/app/src
+ environment:
+ COORDINATOR_URL: "http://dashboard:8000"
+ OLLAMA_URL: "${OLLAMA_URL:-http://host.docker.internal:11434}"
+ OLLAMA_MODEL: "${OLLAMA_MODEL:-llama3.2}"
+ AGENT_NAME: "${AGENT_NAME:-TestWorker}"
+ AGENT_CAPABILITIES: "${AGENT_CAPABILITIES:-general}"
+ TIMMY_TEST_MODE: "1"
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
+ command: ["sh", "-c", "python -m swarm.agent_runner --agent-id agent-$(hostname) --name $${AGENT_NAME:-TestWorker}"]
+ networks:
+ - swarm-test-net
+ depends_on:
+ dashboard:
+ condition: service_healthy
+
+volumes:
+ test-data:
+
+networks:
+ swarm-test-net:
+ driver: bridge
diff --git a/docker-compose.yml b/docker-compose.yml
index 6fc1e1b..9118083 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -15,8 +15,6 @@
# make docker-down stop everything
# make docker-logs tail logs
-version: "3.9"
-
services:
# ── Dashboard (coordinator + FastAPI) ──────────────────────────────────────
@@ -95,6 +93,9 @@ services:
restart: unless-stopped
# ── Shared volume ─────────────────────────────────────────────────────────────
+# NOTE: the data/ directory must exist before running docker compose up.
+# `make docker-up` and `make up` handle this automatically.
+# If running docker compose directly, first run: mkdir -p data
volumes:
timmy-data:
driver: local
diff --git a/docs/AUDIT_REPORT.md b/docs/AUDIT_REPORT.md
new file mode 100644
index 0000000..0f7761a
--- /dev/null
+++ b/docs/AUDIT_REPORT.md
@@ -0,0 +1,352 @@
+# Timmy Time Dashboard - Feature Audit Report
+
+**Date**: 2026-02-24
+**Auditor**: Claude (Opus 4.6)
+**Scope**: All features claimed in documentation (`docs/index.html`, `README.md`) vs. actual implementation
+
+---
+
+## Executive Summary
+
+The Timmy Time Dashboard is a **real, functional codebase** with substantial implementation across its 15+ subsystems. However, the documentation contains several **misleading or inaccurate claims** that overstate readiness in some areas and understate capability in others.
+
+### Key Findings
+
+| Claim | Verdict | Detail |
+|-------|---------|--------|
+| "600+ Tests Passing" | **UNDERSTATED** | 643 tests collected and passing |
+| "20+ API Endpoints" | **UNDERSTATED** | 58 actual endpoints |
+| "0 Cloud Calls" | **FALSE** | Frontend loads Bootstrap, HTMX, Google Fonts from CDN |
+| "LND gRPC-ready for production" | **FALSE** | Every LND method raises `NotImplementedError` |
+| "15 Subsystems" | **TRUE** | 15+ distinct modules confirmed |
+| "No cloud, no telemetry" | **PARTIALLY FALSE** | Backend is local-only; frontend depends on CDN resources |
+| "Agents earn and spend sats autonomously" | **FALSE** | Not implemented; agents bid in sats but no satoshi movement occurs |
+| "15-second Lightning auctions" | **PARTIALLY TRUE** | Auction logic exists but `asyncio.sleep(0)` closes auctions immediately |
+| "Macaroon" implementation | **SIMPLIFIED** | HMAC-only, not true macaroons (no caveats, no delegation) |
+
+**Overall assessment**: The core system (agent, dashboard, swarm coordination, mock Lightning, voice NLU, creative pipeline orchestration, WebSocket, Spark intelligence) is genuinely implemented and well-tested. The main areas of concern are inflated claims about Lightning/LND production readiness and the "zero cloud" positioning.
+
+---
+
+## 1. Test Suite Audit
+
+### Claim: "600+ Tests Passing"
+
+**Verdict: TRUE (understated)**
+
+```
+$ python -m pytest -q
+643 passed, 1 warning in 46.06s
+```
+
+- **47 test files**, **643 test functions**
+- All pass cleanly on Python 3.11
+- Tests are mocked at appropriate boundaries (no Ollama/GPU required)
+- Test quality is generally good - tests verify real state transitions, SQLite persistence, HTTP response structure, and business logic
+
+### Test Quality Assessment
+
+**Strengths:**
+- Swarm tests use real temporary SQLite databases (not mocked away)
+- L402/Lightning tests verify cryptographic operations (macaroon serialization, HMAC signing, preimage verification)
+- Dashboard tests use FastAPI `TestClient` with actual HTTP requests
+- Assembler tests produce real video files with MoviePy
+
+**Weaknesses:**
+- LND backend is entirely untested (all methods raise `NotImplementedError`)
+- `agent_core/ollama_adapter.py` has two TODO stubs (`persist_memory`, `communicate`) that are tested as no-ops
+- Creative tool tests mock the heavyweight model loading (expected, but means end-to-end generation is untested)
+- Some tests only verify status codes without checking response body content
+
+---
+
+## 2. Feature-by-Feature Audit
+
+### 2.1 Timmy Agent
+**Claimed**: Agno-powered conversational agent backed by Ollama, AirLLM for 70B-405B models, SQLite memory
+**Verdict: REAL & FUNCTIONAL**
+
+- `src/timmy/agent.py` (79 lines): Creates a genuine `agno.Agent` with Ollama model, SQLite persistence, tools, and system prompt
+- Backend selection (`backends.py`) implements real Ollama/AirLLM switching with Apple Silicon detection
+- CLI (`cli.py`) provides working `timmy chat`, `timmy think`, `timmy status` commands
+- Approval workflow (`approvals.py`) implements real human-in-the-loop with SQLite-backed state
+- Briefing system (`briefing.py`) generates real scheduled briefings
+
+**Issues**:
+- `agent_core/ollama_adapter.py:184` has `# TODO: Persist to SQLite for long-term memory` and `communicate()` at line 221 is explicitly described as "a stub"
+- CLI tests are sparse: only 2 tests for 3 commands. The `chat` and `think` commands lack dedicated test coverage.
+
+### 2.2 Mission Control UI
+**Claimed**: FastAPI + HTMX + Jinja2 dashboard, dark terminal aesthetic
+**Verdict: REAL & FUNCTIONAL**
+
+- **58 actual endpoints** (documentation claims "20+")
+- Full Jinja2 template hierarchy with base layout + 12 page templates + 12 partials
+- Real HTMX integration for dynamic updates
+- Bootstrap 5 loaded from CDN (contradicts "no cloud" claim)
+- Dark theme with JetBrains Mono font (loaded from Google Fonts CDN)
+
+### 2.3 Multi-Agent Swarm
+**Claimed**: Coordinator, registry, bidder, manager, sub-agent spawning, 15-second Lightning auctions
+**Verdict: REAL & FUNCTIONAL**
+
+- `coordinator.py` (400+ lines): Full orchestration of task lifecycle
+- `registry.py`: Real SQLite-backed agent registry with capabilities tracking
+- `bidder.py`: Genuine auction logic with configurable timeouts and bid scoring
+- `manager.py`: Spawns agents as subprocesses with lifecycle management
+- `tasks.py`: SQLite-backed task CRUD with state machine transitions
+- `comms.py`: In-memory pub/sub (Redis optional, graceful fallback)
+- `routing.py`: Capability-based task routing
+- `learner.py`: Agent outcome learning
+- `recovery.py`: Fault recovery on startup
+- 9 personas defined (Echo, Mace, Helm, Seer, Forge, Quill, Pixel, Lyra, Reel)
+
+**Issues**:
+- The documentation roadmap mentions personas "Echo, Mace, Helm, Seer, Forge, Quill" but the codebase also includes Pixel, Lyra, and Reel. The creative persona toolkits (pixel, lyra, reel) are stubs in `tools.py:293-295` — they create empty `Toolkit` objects because the real tools live in separate modules.
+- **Auction timing bug**: `coordinator.py` uses `await asyncio.sleep(0)` instead of the documented 15-second wait, meaning auctions close almost immediately. This is masked by synchronous in-process bidding but would break for subprocess/Docker agents.
+- **Docker agent HTTP API partially wired**: `agent_runner.py` polls `/internal/tasks` and posts to `/internal/bids` — these endpoints exist in `swarm_internal.py` but the integration path is incomplete for containerized deployment.
+- **Tool execution not fully wired**: `persona_node.py`'s `execute_task()` has infrastructure for tool invocation but doesn't execute tools end-to-end in practice.
+
+### 2.4 L402 Lightning Payments
+**Claimed**: "Bitcoin Lightning payment gating via HMAC macaroons. Mock backend for dev, LND gRPC-ready for production. Agents earn and spend sats autonomously."
+**Verdict: PARTIALLY IMPLEMENTED - LND CLAIM IS FALSE**
+
+**What works:**
+- Mock Lightning backend (`mock_backend.py`): Fully functional invoice creation, payment simulation, settlement, balance tracking
+- L402 proxy (`l402_proxy.py`): Real macaroon creation/verification with HMAC signing
+- Payment handler (`payment_handler.py`): Complete invoice lifecycle management
+- Inter-agent payment settlement (`inter_agent.py`): Framework exists with mock backend
+
+**What does NOT work:**
+- **LND backend (`lnd_backend.py`)**: Every single method raises `NotImplementedError` or returns hardcoded fallback values:
+ - `create_invoice()` — `raise NotImplementedError` (line 199)
+ - `check_payment()` — `raise NotImplementedError` (line 220)
+ - `get_invoice()` — `raise NotImplementedError` (line 248)
+ - `list_invoices()` — `raise NotImplementedError` (line 290)
+ - `get_balance_sats()` — `return 0` with warning (line 304)
+ - `health_check()` — returns `{"ok": False, "backend": "lnd-stub"}` (line 327)
+ - The gRPC stub is explicitly `None` with comment: "LND gRPC stubs not yet implemented" (line 153)
+
+**The documentation claim that LND is "gRPC-ready for production" is false.** The file contains commented-out pseudocode showing what the implementation *would* look like, but no actual gRPC calls are made. The gRPC channel/auth infrastructure is ~80% ready but the protobuf stubs are missing entirely. The claim that "agents earn and spend sats autonomously" is also unimplemented — agents bid in sats during auctions but `payment_handler.settle_invoice()` is never called from agent code. No satoshi movement occurs. This is listed under v3.0.0 (Planned) in the roadmap but stated as current capability in the features section.
+
+Additionally, the "macaroon" implementation is HMAC-only (`l402_proxy.py:67-69`), not true macaroons. There is no support for caveats, delegation, or cryptographic nesting. This is adequate for L402 but not the full macaroon specification the documentation implies.
+
+### 2.5 Spark Intelligence Engine
+**Claimed**: Event capture, predictions (EIDOS), memory consolidation, advisory engine
+**Verdict: REAL & FUNCTIONAL**
+
+- `engine.py`: Full event lifecycle with 8 event types, SQLite persistence
+- `eidos.py`: Genuine prediction logic with multi-component accuracy scoring (winner prediction 0.4 weight, success probability 0.4 weight, bid range 0.2 weight)
+- `memory.py`: Real event-to-memory pipeline with importance scoring and consolidation
+- `advisor.py`: Generates actionable recommendations based on failure patterns, agent performance, and bid optimization
+- Dashboard routes expose `/spark`, `/spark/ui`, `/spark/timeline`, `/spark/insights`
+
+### 2.6 Creative Studio
+**Claimed**: Multi-persona creative pipeline for image, music, video generation
+**Verdict: REAL ORCHESTRATION, BACKEND MODELS OPTIONAL**
+
+- `director.py`: True end-to-end pipeline (storyboard -> music -> video -> assembly -> complete)
+- `assembler.py`: Real video assembly using MoviePy with cross-fade transitions, audio overlay, title cards, subtitles
+- `image_tools.py`: FLUX.1 diffusers pipeline (lazy-loaded)
+- `music_tools.py`: ACE-Step model integration (lazy-loaded)
+- `video_tools.py`: Wan 2.1 text-to-video pipeline (lazy-loaded)
+
+The orchestration is 100% real. Tool backends are implemented with real model loading logic but require heavyweight dependencies (GPU, model downloads). Graceful degradation if missing.
+
+### 2.7 Voice I/O
+**Claimed**: Pattern-matched NLU, TTS via pyttsx3
+**Verdict: REAL & FUNCTIONAL**
+
+- `nlu.py`: Regex-based intent detection with 5 intent types and confidence scoring
+- Entity extraction for agent names, task descriptions, numbers
+- TTS endpoint exists at `/voice/tts/speak`
+- Enhanced voice processing at `/voice/enhanced/process`
+
+### 2.8 Mobile Optimized
+**Claimed**: iOS safe-area, 44px touch targets, 16px inputs, 21-scenario HITL test harness
+**Verdict: REAL & FUNCTIONAL**
+
+- `mobile.html` template with iOS viewport-fit, safe-area insets
+- 21-scenario test harness at `/mobile-test`
+- `test_mobile_scenarios.py`: 36 tests covering mobile-specific behavior
+
+### 2.9 WebSocket Live Feed
+**Claimed**: Real-time swarm events over WebSocket
+**Verdict: REAL & FUNCTIONAL**
+
+- `websocket/handler.py`: Connection manager with broadcast, 100-event replay buffer
+- Specialized broadcast methods for agent_joined, task_posted, bid_submitted, task_assigned, task_completed
+- `/ws/swarm` endpoint for live WebSocket connections
+
+### 2.10 Security
+**Claimed**: XSS prevention via textContent, HMAC-signed macaroons, startup warnings for defaults
+**Verdict: REAL & FUNCTIONAL**
+
+- HMAC macaroon signing is cryptographically implemented
+- Config warns on default secrets at startup
+- Templates use Jinja2 autoescaping
+
+### 2.11 Self-TDD Watchdog
+**Claimed**: 60-second polling, regression alerts
+**Verdict: REAL & FUNCTIONAL**
+
+- `self_tdd/watchdog.py` (71 lines): Polls pytest and alerts on failures
+- `activate_self_tdd.sh`: Bootstrap script
+
+### 2.12 Telegram Integration
+**Claimed**: Bridge Telegram messages to Timmy
+**Verdict: REAL & FUNCTIONAL**
+
+- `telegram_bot/bot.py`: python-telegram-bot integration
+- Message handler creates Timmy agent and processes user text
+- Token management with file persistence
+- Dashboard routes at `/telegram/status` and `/telegram/setup`
+
+### 2.13 Siri Shortcuts
+**Claimed**: iOS automation endpoints
+**Verdict: REAL & FUNCTIONAL**
+
+- `shortcuts/siri.py`: 4 endpoint definitions (chat, status, swarm, task)
+- Setup guide generation for iOS Shortcuts app
+
+### 2.14 Push Notifications
+**Claimed**: Local + macOS native notifications
+**Verdict: REAL & FUNCTIONAL**
+
+- `notifications/push.py`: Bounded notification store, listener callbacks
+- macOS native notifications via osascript
+- Read/unread state management
+
+---
+
+## 3. Documentation Accuracy Issues
+
+### 3.1 FALSE: "0 Cloud Calls"
+
+The hero section, stats bar, and feature descriptions all claim zero cloud dependency. However, `src/dashboard/templates/base.html` loads:
+
+| Resource | CDN |
+|----------|-----|
+| Bootstrap 5.3.3 CSS | `cdn.jsdelivr.net` |
+| Bootstrap 5.3.3 JS | `cdn.jsdelivr.net` |
+| HTMX 2.0.3 | `unpkg.com` |
+| JetBrains Mono font | `fonts.googleapis.com` |
+
+These are loaded on every page render. The dashboard will not render correctly without internet access unless these are bundled locally.
+
+**Recommendation**: Bundle these assets locally or change the documentation to say "no cloud AI/telemetry" instead of "0 Cloud Calls."
+
+### 3.2 FALSE: "LND gRPC-ready for production"
+
+The documentation (both `docs/index.html` and `README.md`) implies the LND backend is production-ready. In reality:
+
+- Every method in `lnd_backend.py` raises `NotImplementedError`
+- The gRPC stub initialization explicitly returns `None` with a warning
+- The code contains only commented-out pseudocode
+- The file itself contains a `generate_lnd_protos()` function explaining what steps are needed to *begin* implementation
+
+**Recommendation**: Change documentation to "LND integration planned" or "LND backend scaffolded — mock only for now."
+
+### 3.3 FALSE: "Agents earn and spend sats autonomously"
+
+This capability is described in the v3.0.0 (Planned) roadmap section but is also implied as current functionality in the L402 features card. The inter-agent payment system (`inter_agent.py`) exists but only works with the mock backend.
+
+### 3.4 UNDERSTATED: Test Count and Endpoint Count
+
+- Documentation says "600+ tests" — actual count is **643**
+- Documentation says "20+ API endpoints" — actual count is **58**
+
+These are technically true ("600+" and "20+" include the real numbers) but are misleadingly conservative.
+
+### 3.5 MINOR: "Bootstrap 5" not mentioned in docs/index.html
+
+The GitHub Pages documentation feature card for Mission Control says "FastAPI + HTMX + Bootstrap 5" in its tag line, which is accurate. But the "no cloud" messaging directly contradicts loading Bootstrap from a CDN.
+
+---
+
+## 4. Code Quality Summary
+
+| Module | Lines | Quality | Notes |
+|--------|-------|---------|-------|
+| swarm | 3,069 | Good | Comprehensive coordination with SQLite persistence |
+| dashboard | 1,806 | Good | Clean FastAPI routes, well-structured templates |
+| timmy | 1,353 | Good | Clean agent setup with proper backend abstraction |
+| spark | 1,238 | Excellent | Sophisticated intelligence pipeline |
+| tools | 869 | Good | Real implementations with lazy-loading pattern |
+| lightning | 868 | Mixed | Mock is excellent; LND is entirely unimplemented |
+| timmy_serve | 693 | Good | L402 proxy works with mock backend |
+| creative | 683 | Good | Real orchestration pipeline |
+| agent_core | 627 | Mixed | Some TODO stubs (persist_memory, communicate) |
+| telegram_bot | 163 | Good | Complete integration |
+| notifications | 146 | Good | Working notification store |
+| voice | 133 | Good | Working NLU with intent detection |
+| websocket | 129 | Good | Solid connection management |
+| shortcuts | 93 | Good | Clean endpoint definitions |
+| self_tdd | 71 | Good | Simple and effective |
+
+**Total**: 86 Python files, 12,007 lines of code
+
+---
+
+## 5. Recommendations
+
+1. **Fix the "0 Cloud Calls" claim** — either bundle frontend dependencies locally or change the messaging
+2. **Fix the LND documentation** — clearly mark it as unimplemented/scaffolded, not "production-ready"
+3. **Fix the autonomous sats claim** — move it from current features to roadmap/planned
+4. **Update test/endpoint counts** — "643 tests" and "58 endpoints" are more impressive than "600+" and "20+"
+5. **Implement `agent_core` TODO stubs** — `persist_memory()` and `communicate()` are dead code
+6. **Bundle CDN resources** — for true offline operation, vendor Bootstrap, HTMX, and the font
+
+---
+
+## Appendix: Test Breakdown by Module
+
+| Test File | Tests | Module Tested |
+|-----------|-------|---------------|
+| test_spark.py | 47 | Spark intelligence engine |
+| test_mobile_scenarios.py | 36 | Mobile layout |
+| test_swarm.py | 29 | Swarm core |
+| test_dashboard_routes.py | 25 | Dashboard routes |
+| test_learner.py | 23 | Agent learning |
+| test_briefing.py | 22 | Briefing system |
+| test_swarm_personas.py | 21 | Persona definitions |
+| test_coordinator.py | 20 | Swarm coordinator |
+| test_creative_director.py | 19 | Creative pipeline |
+| test_tool_executor.py | 19 | Tool execution |
+| test_lightning_interface.py | 19 | Lightning backend |
+| test_dashboard.py | 18 | Dashboard core |
+| test_git_tools.py | 18 | Git tools |
+| test_approvals.py | 17 | Approval workflow |
+| test_swarm_routing.py | 17 | Task routing |
+| test_telegram_bot.py | 16 | Telegram bridge |
+| test_websocket_extended.py | 16 | WebSocket |
+| test_voice_nlu.py | 15 | Voice NLU |
+| test_backends.py | 14 | Backend selection |
+| test_swarm_recovery.py | 14 | Fault recovery |
+| test_swarm_stats.py | 13 | Performance stats |
+| test_swarm_integration_full.py | 13 | Swarm integration |
+| test_l402_proxy.py | 13 | L402 proxy |
+| test_agent.py | 13 | Core agent |
+| test_notifications.py | 11 | Push notifications |
+| test_spark_tools_creative.py | 11 | Spark + creative integration |
+| test_swarm_node.py | 10 | Swarm nodes |
+| test_inter_agent.py | 10 | Inter-agent comms |
+| test_timmy_serve_cli.py | 10 | Serve CLI |
+| test_docker_agent.py | 9 | Docker agents |
+| test_assembler_integration.py | 9 | Video assembly |
+| test_swarm_integration.py | 7 | Swarm integration |
+| test_assembler.py | 7 | Video assembly |
+| test_image_tools.py | 7 | Image tools |
+| test_music_tools.py | 9 | Music tools |
+| test_video_tools.py | 9 | Video tools |
+| test_creative_route.py | 6 | Creative routes |
+| test_shortcuts.py | 6 | Siri shortcuts |
+| test_watchdog.py | 6 | Self-TDD watchdog |
+| test_voice_enhanced.py | 8 | Enhanced voice |
+| test_timmy_serve_app.py | 5 | Serve app |
+| test_music_video_integration.py | 5 | Music + video pipeline |
+| test_swarm_live_page.py | 4 | Live swarm page |
+| test_agent_runner.py | 4 | Agent runner |
+| test_prompts.py | 8 | System prompts |
+| test_cli.py | 2 | CLI |
+| test_websocket.py | 3 | WebSocket core |
diff --git a/docs/index.html b/docs/index.html
index f31ba49..f356284 100644
--- a/docs/index.html
+++ b/docs/index.html
@@ -553,7 +553,7 @@
Quickstart
Agent Team
Roadmap
- GitHub →
+ GitHub →
@@ -563,18 +563,18 @@
Your agents.Your hardware. Your sats.
A local-first AI command center. Talk to Timmy, coordinate your swarm,
- gate API access with Bitcoin Lightning — no cloud, no telemetry, no compromise.
+ gate API access with Bitcoin Lightning — no cloud AI, no telemetry, no compromise.
- 228 Tests Passing
+ Full Test Suite Passing
FastAPI + HTMX
Lightning L402
- No Cloud
+ No Cloud AI
Multi-Agent Swarm
MIT License
@@ -582,20 +582,20 @@
-
228
+
640+
Tests Passing
0
-
Cloud Calls
+
Cloud AI Calls
@@ -639,7 +639,7 @@
⚡
L402 Lightning Payments
Bitcoin Lightning payment gating via HMAC macaroons. Mock backend for dev,
- LND gRPC-ready for production. Agents earn and spend sats autonomously.
+ LND backend scaffolded for production. Auction bids priced in sats.
L402 · Macaroon · BOLT11
@@ -740,7 +740,7 @@ External: Ollama :11434 · optional Redis · optional LND gRPC
1
Clone
-
git clone https://github.com/Alexspayne/Timmy-time-dashboard.git
+ git clone https://github.com/AlexanderWhitestone/Timmy-time-dashboard.git
cd Timmy-time-dashboard
@@ -780,7 +780,7 @@ External: Ollama :11434 · optional Redis · optional LND gRPC
5
Test
-
make test # 228 tests — no Ollama needed
+ make test # full test suite — no Ollama needed
make test-cov # + coverage report
make watch # self-TDD watchdog in background
@@ -797,7 +797,7 @@ External: Ollama :11434 · optional Redis · optional LND gRPC
The agent team
This repo is built by a multi-agent team. Each tool has a defined lane.
- See AGENTS.md
+ See AGENTS.md
for the full development standards.
@@ -840,7 +840,7 @@ External: Ollama :11434 · optional Redis · optional LND gRPC
-
Alex Payne
+
Alexander Whitestone
Human · Orchestrator
Vision and product decisions
@@ -870,7 +870,7 @@ External: Ollama :11434 · optional Redis · optional LND gRPC
✓ Complete
Foundation
-
Agno + Ollama + SQLite + FastAPI dashboard + HTMX + 228 tests.
+
Agno + Ollama + SQLite + FastAPI dashboard + HTMX + full test suite.
CLI, mobile layout, Bootstrap, CI/CD, AirLLM big-brain backend.
@@ -912,13 +912,13 @@ External: Ollama :11434 · optional Redis · optional LND gRPC
diff --git a/pyproject.toml b/pyproject.toml
index 1364e6a..ebac582 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -23,6 +23,8 @@ dependencies = [
"rich>=13.0.0",
"pydantic-settings>=2.0.0",
"websockets>=12.0",
+ "GitPython>=3.1.40",
+ "moviepy>=2.0.0",
]
[project.optional-dependencies]
@@ -30,6 +32,7 @@ dev = [
"pytest>=8.0.0",
"pytest-asyncio>=0.24.0",
"pytest-cov>=5.0.0",
+ "selenium>=4.20.0",
]
# Big-brain: run 8B / 70B / 405B models locally via layer-by-layer loading.
# pip install ".[bigbrain]"
@@ -52,11 +55,28 @@ voice = [
telegram = [
"python-telegram-bot>=21.0",
]
+# Discord: bridge Discord messages to Timmy with native thread support.
+# pip install ".[discord]"
+# Optional: pip install pyzbar Pillow (for QR code invite detection)
+discord = [
+ "discord.py>=2.3.0",
+]
+# Creative: GPU-accelerated image, music, and video generation.
+# pip install ".[creative]"
+creative = [
+ "diffusers>=0.30.0",
+ "transformers>=4.40.0",
+ "accelerate>=0.30.0",
+ "torch>=2.2.0",
+ "safetensors>=0.4.0",
+ "ace-step>=1.5.0",
+]
[project.scripts]
timmy = "timmy.cli:main"
timmy-serve = "timmy_serve.cli:main"
self-tdd = "self_tdd.watchdog:main"
+self-modify = "self_modify.cli:main"
[tool.hatch.build.targets.wheel]
sources = {"src" = ""}
@@ -67,16 +87,23 @@ include = [
"src/config.py",
"src/self_tdd",
"src/swarm",
- "src/websocket",
+ "src/ws_manager",
"src/voice",
"src/notifications",
"src/shortcuts",
"src/telegram_bot",
+ "src/chat_bridge",
+ "src/spark",
+ "src/tools",
+ "src/creative",
+ "src/agent_core",
+ "src/lightning",
+ "src/self_modify",
]
[tool.pytest.ini_options]
testpaths = ["tests"]
-pythonpath = ["src"]
+pythonpath = ["src", "tests"]
asyncio_mode = "auto"
asyncio_default_fixture_loop_scope = "function"
addopts = "-v --tb=short"
diff --git a/src/agent_core/ollama_adapter.py b/src/agent_core/ollama_adapter.py
index 126e0f4..e27a109 100644
--- a/src/agent_core/ollama_adapter.py
+++ b/src/agent_core/ollama_adapter.py
@@ -172,18 +172,19 @@ Respond naturally and helpfully."""
return result
def remember(self, memory: Memory) -> None:
- """Store memory persistently.
-
- For now, working memory is sufficient. In the future,
- this would write to SQLite or vector DB for long-term
- memory across sessions.
+ """Store memory in working memory.
+
+ Adds the memory to the sliding window and bumps its importance.
"""
- # Mark as accessed to update importance
memory.touch()
-
- # TODO: Persist to SQLite for long-term memory
- # This would integrate with the existing briefing system
- pass
+
+ # Deduplicate by id
+ self._working_memory = [m for m in self._working_memory if m.id != memory.id]
+ self._working_memory.append(memory)
+
+ # Evict oldest if over capacity
+ if len(self._working_memory) > self._max_working_memory:
+ self._working_memory.pop(0)
def recall(self, query: str, limit: int = 5) -> list[Memory]:
"""Retrieve relevant memories.
@@ -215,13 +216,22 @@ Respond naturally and helpfully."""
return [m for _, m in scored[:limit]]
def communicate(self, message: Communication) -> bool:
- """Send message to another agent.
-
- This would use the swarm comms layer for inter-agent
- messaging. For now, it's a stub.
- """
- # TODO: Integrate with swarm.comms
- return True
+ """Send message to another agent via swarm comms."""
+ try:
+ from swarm.comms import SwarmComms
+ comms = SwarmComms()
+ comms.publish(
+ "agent:messages",
+ "agent_message",
+ {
+ "from": self._identity.name,
+ "to": message.recipient,
+ "content": message.content,
+ },
+ )
+ return True
+ except Exception:
+ return False
def _extract_tags(self, perception: Perception) -> list[str]:
"""Extract searchable tags from perception."""
diff --git a/src/chat_bridge/__init__.py b/src/chat_bridge/__init__.py
new file mode 100644
index 0000000..7aa82bd
--- /dev/null
+++ b/src/chat_bridge/__init__.py
@@ -0,0 +1,10 @@
+"""Chat Bridge — vendor-agnostic chat platform abstraction.
+
+Provides a clean interface for integrating any chat platform
+(Discord, Telegram, Slack, etc.) with Timmy's agent core.
+
+Usage:
+ from chat_bridge.base import ChatPlatform
+ from chat_bridge.registry import platform_registry
+ from chat_bridge.vendors.discord import DiscordVendor
+"""
diff --git a/src/chat_bridge/base.py b/src/chat_bridge/base.py
new file mode 100644
index 0000000..6af6607
--- /dev/null
+++ b/src/chat_bridge/base.py
@@ -0,0 +1,147 @@
+"""ChatPlatform — abstract base class for all chat vendor integrations.
+
+Each vendor (Discord, Telegram, Slack, etc.) implements this interface.
+The dashboard and agent code interact only with this contract, never
+with vendor-specific APIs directly.
+
+Architecture:
+ ChatPlatform (ABC)
+ |
+ +-- DiscordVendor (discord.py)
+ +-- TelegramVendor (future migration)
+ +-- SlackVendor (future)
+"""
+
+from abc import ABC, abstractmethod
+from dataclasses import dataclass, field
+from datetime import datetime, timezone
+from enum import Enum, auto
+from typing import Any, Optional
+
+
+class PlatformState(Enum):
+ """Lifecycle state of a chat platform connection."""
+ DISCONNECTED = auto()
+ CONNECTING = auto()
+ CONNECTED = auto()
+ ERROR = auto()
+
+
+@dataclass
+class ChatMessage:
+ """Vendor-agnostic representation of a chat message."""
+ content: str
+ author: str
+ channel_id: str
+ platform: str
+ timestamp: str = field(
+ default_factory=lambda: datetime.now(timezone.utc).isoformat()
+ )
+ message_id: Optional[str] = None
+ thread_id: Optional[str] = None
+ attachments: list[str] = field(default_factory=list)
+ metadata: dict[str, Any] = field(default_factory=dict)
+
+
+@dataclass
+class ChatThread:
+ """Vendor-agnostic representation of a conversation thread."""
+ thread_id: str
+ title: str
+ channel_id: str
+ platform: str
+ created_at: str = field(
+ default_factory=lambda: datetime.now(timezone.utc).isoformat()
+ )
+ archived: bool = False
+ message_count: int = 0
+ metadata: dict[str, Any] = field(default_factory=dict)
+
+
+@dataclass
+class InviteInfo:
+ """Parsed invite extracted from an image or text."""
+ url: str
+ code: str
+ platform: str
+ guild_name: Optional[str] = None
+ source: str = "unknown" # "qr", "vision", "text"
+
+
+@dataclass
+class PlatformStatus:
+ """Current status of a chat platform connection."""
+ platform: str
+ state: PlatformState
+ token_set: bool
+ guild_count: int = 0
+ thread_count: int = 0
+ error: Optional[str] = None
+
+ def to_dict(self) -> dict[str, Any]:
+ return {
+ "platform": self.platform,
+ "state": self.state.name.lower(),
+ "connected": self.state == PlatformState.CONNECTED,
+ "token_set": self.token_set,
+ "guild_count": self.guild_count,
+ "thread_count": self.thread_count,
+ "error": self.error,
+ }
+
+
+class ChatPlatform(ABC):
+ """Abstract base class for chat platform integrations.
+
+ Lifecycle:
+ configure(token) -> start() -> [send/receive messages] -> stop()
+
+ All vendors implement this interface. The dashboard routes and
+ agent code work with ChatPlatform, never with vendor-specific APIs.
+ """
+
+ @property
+ @abstractmethod
+ def name(self) -> str:
+ """Platform identifier (e.g., 'discord', 'telegram')."""
+
+ @property
+ @abstractmethod
+ def state(self) -> PlatformState:
+ """Current connection state."""
+
+ @abstractmethod
+ async def start(self, token: Optional[str] = None) -> bool:
+ """Start the platform connection. Returns True on success."""
+
+ @abstractmethod
+ async def stop(self) -> None:
+ """Gracefully disconnect."""
+
+ @abstractmethod
+ async def send_message(
+ self, channel_id: str, content: str, thread_id: Optional[str] = None
+ ) -> Optional[ChatMessage]:
+ """Send a message. Optionally within a thread."""
+
+ @abstractmethod
+ async def create_thread(
+ self, channel_id: str, title: str, initial_message: Optional[str] = None
+ ) -> Optional[ChatThread]:
+ """Create a new thread in a channel."""
+
+ @abstractmethod
+ async def join_from_invite(self, invite_code: str) -> bool:
+ """Join a server/workspace using an invite code."""
+
+ @abstractmethod
+ def status(self) -> PlatformStatus:
+ """Return current platform status."""
+
+ @abstractmethod
+ def save_token(self, token: str) -> None:
+ """Persist token for restarts."""
+
+ @abstractmethod
+ def load_token(self) -> Optional[str]:
+ """Load persisted token."""
diff --git a/src/chat_bridge/invite_parser.py b/src/chat_bridge/invite_parser.py
new file mode 100644
index 0000000..2c48770
--- /dev/null
+++ b/src/chat_bridge/invite_parser.py
@@ -0,0 +1,166 @@
+"""InviteParser — extract chat platform invite links from images.
+
+Strategy chain:
+ 1. QR code detection (pyzbar — fast, no GPU)
+ 2. Ollama vision OCR (local LLM — handles screenshots with visible URLs)
+ 3. Regex fallback on raw text input
+
+Supports Discord invite patterns:
+ - discord.gg/
+ - discord.com/invite/
+ - discordapp.com/invite/
+
+Usage:
+ from chat_bridge.invite_parser import invite_parser
+
+ # From image bytes (screenshot or QR photo)
+ result = await invite_parser.parse_image(image_bytes)
+
+ # From plain text
+ result = invite_parser.parse_text("Join us at discord.gg/abc123")
+"""
+
+import io
+import logging
+import re
+from typing import Optional
+
+from chat_bridge.base import InviteInfo
+
+logger = logging.getLogger(__name__)
+
+# Patterns for Discord invite URLs
+_DISCORD_PATTERNS = [
+ re.compile(r"(?:https?://)?discord\.gg/([A-Za-z0-9\-_]+)"),
+ re.compile(r"(?:https?://)?(?:www\.)?discord(?:app)?\.com/invite/([A-Za-z0-9\-_]+)"),
+]
+
+
+def _extract_discord_code(text: str) -> Optional[str]:
+ """Extract a Discord invite code from text."""
+ for pattern in _DISCORD_PATTERNS:
+ match = pattern.search(text)
+ if match:
+ return match.group(1)
+ return None
+
+
+class InviteParser:
+ """Multi-strategy invite parser.
+
+ Tries QR detection first (fast), then Ollama vision (local AI),
+ then regex on raw text. All local, no cloud.
+ """
+
+ async def parse_image(self, image_data: bytes) -> Optional[InviteInfo]:
+ """Extract an invite from image bytes (screenshot or QR photo).
+
+ Tries strategies in order:
+ 1. QR code decode (pyzbar)
+ 2. Ollama vision model (local OCR)
+ """
+ result = self._try_qr_decode(image_data)
+ if result:
+ return result
+
+ result = await self._try_ollama_vision(image_data)
+ if result:
+ return result
+
+ logger.info("No invite found in image via any strategy.")
+ return None
+
+ def parse_text(self, text: str) -> Optional[InviteInfo]:
+ """Extract an invite from plain text."""
+ code = _extract_discord_code(text)
+ if code:
+ return InviteInfo(
+ url=f"https://discord.gg/{code}",
+ code=code,
+ platform="discord",
+ source="text",
+ )
+ return None
+
+ def _try_qr_decode(self, image_data: bytes) -> Optional[InviteInfo]:
+ """Strategy 1: Decode QR codes from image using pyzbar."""
+ try:
+ from PIL import Image
+ from pyzbar.pyzbar import decode as qr_decode
+ except ImportError:
+ logger.debug("pyzbar/Pillow not installed, skipping QR strategy.")
+ return None
+
+ try:
+ image = Image.open(io.BytesIO(image_data))
+ decoded = qr_decode(image)
+
+ for obj in decoded:
+ text = obj.data.decode("utf-8", errors="ignore")
+ code = _extract_discord_code(text)
+ if code:
+ logger.info("QR decode found Discord invite: %s", code)
+ return InviteInfo(
+ url=f"https://discord.gg/{code}",
+ code=code,
+ platform="discord",
+ source="qr",
+ )
+ except Exception as exc:
+ logger.debug("QR decode failed: %s", exc)
+
+ return None
+
+ async def _try_ollama_vision(self, image_data: bytes) -> Optional[InviteInfo]:
+ """Strategy 2: Use Ollama vision model for local OCR."""
+ try:
+ import base64
+ import httpx
+ from config import settings
+ except ImportError:
+ logger.debug("httpx not available for Ollama vision.")
+ return None
+
+ try:
+ b64_image = base64.b64encode(image_data).decode("ascii")
+
+ async with httpx.AsyncClient(timeout=30.0) as client:
+ resp = await client.post(
+ f"{settings.ollama_url}/api/generate",
+ json={
+ "model": settings.ollama_model,
+ "prompt": (
+ "Extract any Discord invite link from this image. "
+ "Look for URLs like discord.gg/CODE or "
+ "discord.com/invite/CODE. "
+ "Reply with ONLY the invite URL, nothing else. "
+ "If no invite link is found, reply with: NONE"
+ ),
+ "images": [b64_image],
+ "stream": False,
+ },
+ )
+
+ if resp.status_code != 200:
+ logger.debug("Ollama vision returned %d", resp.status_code)
+ return None
+
+ answer = resp.json().get("response", "").strip()
+ if answer and answer.upper() != "NONE":
+ code = _extract_discord_code(answer)
+ if code:
+ logger.info("Ollama vision found Discord invite: %s", code)
+ return InviteInfo(
+ url=f"https://discord.gg/{code}",
+ code=code,
+ platform="discord",
+ source="vision",
+ )
+ except Exception as exc:
+ logger.debug("Ollama vision strategy failed: %s", exc)
+
+ return None
+
+
+# Module-level singleton
+invite_parser = InviteParser()
diff --git a/src/chat_bridge/registry.py b/src/chat_bridge/registry.py
new file mode 100644
index 0000000..16271c4
--- /dev/null
+++ b/src/chat_bridge/registry.py
@@ -0,0 +1,74 @@
+"""PlatformRegistry — singleton registry for chat platform vendors.
+
+Provides a central point for registering, discovering, and managing
+all chat platform integrations. Dashboard routes and the agent core
+interact with platforms through this registry.
+
+Usage:
+ from chat_bridge.registry import platform_registry
+
+ platform_registry.register(discord_vendor)
+ discord = platform_registry.get("discord")
+ all_platforms = platform_registry.list_platforms()
+"""
+
+import logging
+from typing import Optional
+
+from chat_bridge.base import ChatPlatform, PlatformStatus
+
+logger = logging.getLogger(__name__)
+
+
+class PlatformRegistry:
+ """Thread-safe registry of ChatPlatform vendors."""
+
+ def __init__(self) -> None:
+ self._platforms: dict[str, ChatPlatform] = {}
+
+ def register(self, platform: ChatPlatform) -> None:
+ """Register a chat platform vendor."""
+ name = platform.name
+ if name in self._platforms:
+ logger.warning("Platform '%s' already registered, replacing.", name)
+ self._platforms[name] = platform
+ logger.info("Registered chat platform: %s", name)
+
+ def unregister(self, name: str) -> bool:
+ """Remove a platform from the registry. Returns True if it existed."""
+ if name in self._platforms:
+ del self._platforms[name]
+ logger.info("Unregistered chat platform: %s", name)
+ return True
+ return False
+
+ def get(self, name: str) -> Optional[ChatPlatform]:
+ """Get a platform by name."""
+ return self._platforms.get(name)
+
+ def list_platforms(self) -> list[PlatformStatus]:
+ """Return status of all registered platforms."""
+ return [p.status() for p in self._platforms.values()]
+
+ async def start_all(self) -> dict[str, bool]:
+ """Start all registered platforms. Returns name -> success mapping."""
+ results = {}
+ for name, platform in self._platforms.items():
+ try:
+ results[name] = await platform.start()
+ except Exception as exc:
+ logger.error("Failed to start platform '%s': %s", name, exc)
+ results[name] = False
+ return results
+
+ async def stop_all(self) -> None:
+ """Stop all registered platforms."""
+ for name, platform in self._platforms.items():
+ try:
+ await platform.stop()
+ except Exception as exc:
+ logger.error("Error stopping platform '%s': %s", name, exc)
+
+
+# Module-level singleton
+platform_registry = PlatformRegistry()
diff --git a/src/chat_bridge/vendors/__init__.py b/src/chat_bridge/vendors/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/chat_bridge/vendors/discord.py b/src/chat_bridge/vendors/discord.py
new file mode 100644
index 0000000..0610884
--- /dev/null
+++ b/src/chat_bridge/vendors/discord.py
@@ -0,0 +1,400 @@
+"""DiscordVendor — Discord integration via discord.py.
+
+Implements ChatPlatform with native thread support. Each conversation
+with Timmy gets its own Discord thread, keeping channels clean.
+
+Optional dependency — install with:
+ pip install ".[discord]"
+
+Architecture:
+ DiscordVendor
+ ├── _client (discord.Client) — handles gateway events
+ ├── _thread_map — channel_id -> active thread
+ └── _message_handler — bridges to Timmy agent
+"""
+
+import asyncio
+import json
+import logging
+from pathlib import Path
+from typing import Optional
+
+from chat_bridge.base import (
+ ChatMessage,
+ ChatPlatform,
+ ChatThread,
+ InviteInfo,
+ PlatformState,
+ PlatformStatus,
+)
+
+logger = logging.getLogger(__name__)
+
+_STATE_FILE = Path(__file__).parent.parent.parent.parent / "discord_state.json"
+
+
+class DiscordVendor(ChatPlatform):
+ """Discord integration with native thread conversations.
+
+ Every user interaction creates or continues a Discord thread,
+ keeping channel history clean and conversations organized.
+ """
+
+ def __init__(self) -> None:
+ self._client = None
+ self._token: Optional[str] = None
+ self._state: PlatformState = PlatformState.DISCONNECTED
+ self._task: Optional[asyncio.Task] = None
+ self._guild_count: int = 0
+ self._active_threads: dict[str, str] = {} # channel_id -> thread_id
+
+ # ── ChatPlatform interface ─────────────────────────────────────────────
+
+ @property
+ def name(self) -> str:
+ return "discord"
+
+ @property
+ def state(self) -> PlatformState:
+ return self._state
+
+ async def start(self, token: Optional[str] = None) -> bool:
+ """Start the Discord bot. Returns True on success."""
+ if self._state == PlatformState.CONNECTED:
+ return True
+
+ tok = token or self.load_token()
+ if not tok:
+ logger.warning("Discord bot: no token configured, skipping start.")
+ return False
+
+ try:
+ import discord
+ except ImportError:
+ logger.error(
+ "discord.py is not installed. "
+ 'Run: pip install ".[discord]"'
+ )
+ return False
+
+ try:
+ self._state = PlatformState.CONNECTING
+ self._token = tok
+
+ intents = discord.Intents.default()
+ intents.message_content = True
+ intents.guilds = True
+
+ self._client = discord.Client(intents=intents)
+ self._register_handlers()
+
+ # Run the client in a background task so we don't block
+ self._task = asyncio.create_task(self._run_client(tok))
+
+ # Wait briefly for connection
+ for _ in range(30):
+ await asyncio.sleep(0.5)
+ if self._state == PlatformState.CONNECTED:
+ logger.info("Discord bot connected (%d guilds).", self._guild_count)
+ return True
+ if self._state == PlatformState.ERROR:
+ return False
+
+ logger.warning("Discord bot: connection timed out.")
+ self._state = PlatformState.ERROR
+ return False
+
+ except Exception as exc:
+ logger.error("Discord bot failed to start: %s", exc)
+ self._state = PlatformState.ERROR
+ self._token = None
+ self._client = None
+ return False
+
+ async def stop(self) -> None:
+ """Gracefully disconnect the Discord bot."""
+ if self._client and not self._client.is_closed():
+ try:
+ await self._client.close()
+ logger.info("Discord bot disconnected.")
+ except Exception as exc:
+ logger.error("Error stopping Discord bot: %s", exc)
+
+ if self._task and not self._task.done():
+ self._task.cancel()
+ try:
+ await self._task
+ except asyncio.CancelledError:
+ pass
+
+ self._state = PlatformState.DISCONNECTED
+ self._client = None
+ self._task = None
+
+ async def send_message(
+ self, channel_id: str, content: str, thread_id: Optional[str] = None
+ ) -> Optional[ChatMessage]:
+ """Send a message to a Discord channel or thread."""
+ if not self._client or self._state != PlatformState.CONNECTED:
+ return None
+
+ try:
+ import discord
+
+ target_id = int(thread_id) if thread_id else int(channel_id)
+ channel = self._client.get_channel(target_id)
+
+ if channel is None:
+ channel = await self._client.fetch_channel(target_id)
+
+ msg = await channel.send(content)
+
+ return ChatMessage(
+ content=content,
+ author=str(self._client.user),
+ channel_id=str(msg.channel.id),
+ platform="discord",
+ message_id=str(msg.id),
+ thread_id=thread_id,
+ )
+ except Exception as exc:
+ logger.error("Failed to send Discord message: %s", exc)
+ return None
+
+ async def create_thread(
+ self, channel_id: str, title: str, initial_message: Optional[str] = None
+ ) -> Optional[ChatThread]:
+ """Create a new thread in a Discord channel."""
+ if not self._client or self._state != PlatformState.CONNECTED:
+ return None
+
+ try:
+ channel = self._client.get_channel(int(channel_id))
+ if channel is None:
+ channel = await self._client.fetch_channel(int(channel_id))
+
+ thread = await channel.create_thread(
+ name=title[:100], # Discord limits thread names to 100 chars
+ auto_archive_duration=1440, # 24 hours
+ )
+
+ if initial_message:
+ await thread.send(initial_message)
+
+ self._active_threads[channel_id] = str(thread.id)
+
+ return ChatThread(
+ thread_id=str(thread.id),
+ title=title[:100],
+ channel_id=channel_id,
+ platform="discord",
+ )
+ except Exception as exc:
+ logger.error("Failed to create Discord thread: %s", exc)
+ return None
+
+ async def join_from_invite(self, invite_code: str) -> bool:
+ """Join a Discord server using an invite code.
+
+ Note: Bot accounts cannot use invite links directly.
+ This generates an OAuth2 URL for adding the bot to a server.
+ The invite_code is validated but the actual join requires
+ the server admin to use the bot's OAuth2 authorization URL.
+ """
+ if not self._client or self._state != PlatformState.CONNECTED:
+ logger.warning("Discord bot not connected, cannot process invite.")
+ return False
+
+ try:
+ import discord
+
+ invite = await self._client.fetch_invite(invite_code)
+ logger.info(
+ "Validated invite for server '%s' (code: %s)",
+ invite.guild.name if invite.guild else "unknown",
+ invite_code,
+ )
+ return True
+ except Exception as exc:
+ logger.error("Invalid Discord invite '%s': %s", invite_code, exc)
+ return False
+
+ def status(self) -> PlatformStatus:
+ return PlatformStatus(
+ platform="discord",
+ state=self._state,
+ token_set=bool(self._token),
+ guild_count=self._guild_count,
+ thread_count=len(self._active_threads),
+ )
+
+ def save_token(self, token: str) -> None:
+ """Persist token to state file."""
+ try:
+ _STATE_FILE.write_text(json.dumps({"token": token}))
+ except Exception as exc:
+ logger.error("Failed to save Discord token: %s", exc)
+
+ def load_token(self) -> Optional[str]:
+ """Load token from state file or config."""
+ try:
+ if _STATE_FILE.exists():
+ data = json.loads(_STATE_FILE.read_text())
+ token = data.get("token")
+ if token:
+ return token
+ except Exception as exc:
+ logger.debug("Could not read discord state file: %s", exc)
+
+ try:
+ from config import settings
+ return settings.discord_token or None
+ except Exception:
+ return None
+
+ # ── OAuth2 URL generation ──────────────────────────────────────────────
+
+ def get_oauth2_url(self) -> Optional[str]:
+ """Generate the OAuth2 URL for adding this bot to a server.
+
+ Requires the bot to be connected to read its application ID.
+ """
+ if not self._client or not self._client.user:
+ return None
+
+ app_id = self._client.user.id
+ # Permissions: Send Messages, Create Public Threads, Manage Threads,
+ # Read Message History, Embed Links, Attach Files
+ permissions = 397284550656
+ return (
+ f"https://discord.com/oauth2/authorize"
+ f"?client_id={app_id}&scope=bot"
+ f"&permissions={permissions}"
+ )
+
+ # ── Internal ───────────────────────────────────────────────────────────
+
+ async def _run_client(self, token: str) -> None:
+ """Run the discord.py client (blocking call in a task)."""
+ try:
+ await self._client.start(token)
+ except Exception as exc:
+ logger.error("Discord client error: %s", exc)
+ self._state = PlatformState.ERROR
+
+ def _register_handlers(self) -> None:
+ """Register Discord event handlers on the client."""
+
+ @self._client.event
+ async def on_ready():
+ self._guild_count = len(self._client.guilds)
+ self._state = PlatformState.CONNECTED
+ logger.info(
+ "Discord ready: %s in %d guild(s)",
+ self._client.user,
+ self._guild_count,
+ )
+
+ @self._client.event
+ async def on_message(message):
+ # Ignore our own messages
+ if message.author == self._client.user:
+ return
+
+ # Only respond to mentions or DMs
+ is_dm = not hasattr(message.channel, "guild") or message.channel.guild is None
+ is_mention = self._client.user in message.mentions
+
+ if not is_dm and not is_mention:
+ return
+
+ await self._handle_message(message)
+
+ @self._client.event
+ async def on_disconnect():
+ if self._state != PlatformState.DISCONNECTED:
+ self._state = PlatformState.CONNECTING
+ logger.warning("Discord disconnected, will auto-reconnect.")
+
+ async def _handle_message(self, message) -> None:
+ """Process an incoming message and respond via a thread."""
+ # Strip the bot mention from the message content
+ content = message.content
+ if self._client.user:
+ content = content.replace(f"<@{self._client.user.id}>", "").strip()
+
+ if not content:
+ return
+
+ # Create or reuse a thread for this conversation
+ thread = await self._get_or_create_thread(message)
+ target = thread or message.channel
+
+ # Run Timmy agent
+ try:
+ from timmy.agent import create_timmy
+
+ agent = create_timmy()
+ run = await asyncio.to_thread(agent.run, content, stream=False)
+ response = run.content if hasattr(run, "content") else str(run)
+ except Exception as exc:
+ logger.error("Timmy error in Discord handler: %s", exc)
+ response = f"Timmy is offline: {exc}"
+
+ # Discord has a 2000 character limit
+ for chunk in _chunk_message(response, 2000):
+ await target.send(chunk)
+
+ async def _get_or_create_thread(self, message):
+ """Get the active thread for a channel, or create one.
+
+ If the message is already in a thread, use that thread.
+ Otherwise, create a new thread from the message.
+ """
+ try:
+ import discord
+
+ # Already in a thread — just use it
+ if isinstance(message.channel, discord.Thread):
+ return message.channel
+
+ # DM channels don't support threads
+ if isinstance(message.channel, discord.DMChannel):
+ return None
+
+ # Create a thread from this message
+ thread_name = f"Timmy | {message.author.display_name}"
+ thread = await message.create_thread(
+ name=thread_name[:100],
+ auto_archive_duration=1440,
+ )
+ channel_id = str(message.channel.id)
+ self._active_threads[channel_id] = str(thread.id)
+ return thread
+
+ except Exception as exc:
+ logger.debug("Could not create thread: %s", exc)
+ return None
+
+
+def _chunk_message(text: str, max_len: int = 2000) -> list[str]:
+ """Split a message into chunks that fit Discord's character limit."""
+ if len(text) <= max_len:
+ return [text]
+
+ chunks = []
+ while text:
+ if len(text) <= max_len:
+ chunks.append(text)
+ break
+ # Try to split at a newline
+ split_at = text.rfind("\n", 0, max_len)
+ if split_at == -1:
+ split_at = max_len
+ chunks.append(text[:split_at])
+ text = text[split_at:].lstrip("\n")
+ return chunks
+
+
+# Module-level singleton
+discord_bot = DiscordVendor()
diff --git a/src/config.py b/src/config.py
index 506e643..9bd5e6d 100644
--- a/src/config.py
+++ b/src/config.py
@@ -16,6 +16,9 @@ class Settings(BaseSettings):
# Telegram bot token — set via TELEGRAM_TOKEN env var or the /telegram/setup endpoint
telegram_token: str = ""
+ # Discord bot token — set via DISCORD_TOKEN env var or the /discord/setup endpoint
+ discord_token: str = ""
+
# ── AirLLM / backend selection ───────────────────────────────────────────
# "ollama" — always use Ollama (default, safe everywhere)
# "airllm" — always use AirLLM (requires pip install ".[bigbrain]")
@@ -28,6 +31,59 @@ class Settings(BaseSettings):
# 8b ~16 GB | 70b ~140 GB | 405b ~810 GB
airllm_model_size: Literal["8b", "70b", "405b"] = "70b"
+ # ── Spark Intelligence ────────────────────────────────────────────────
+ # Enable/disable the Spark cognitive layer.
+ # When enabled, Spark captures swarm events, runs EIDOS predictions,
+ # consolidates memories, and generates advisory recommendations.
+ spark_enabled: bool = True
+
+ # ── Git / DevOps ──────────────────────────────────────────────────────
+ git_default_repo_dir: str = "~/repos"
+
+ # ── Creative — Image Generation (Pixel) ───────────────────────────────
+ flux_model_id: str = "black-forest-labs/FLUX.1-schnell"
+ image_output_dir: str = "data/images"
+ image_default_steps: int = 4
+
+ # ── Creative — Music Generation (Lyra) ────────────────────────────────
+ music_output_dir: str = "data/music"
+ ace_step_model: str = "ace-step/ACE-Step-v1.5"
+
+ # ── Creative — Video Generation (Reel) ────────────────────────────────
+ video_output_dir: str = "data/video"
+ wan_model_id: str = "Wan-AI/Wan2.1-T2V-1.3B"
+ video_default_resolution: str = "480p"
+
+ # ── Creative — Pipeline / Assembly ────────────────────────────────────
+ creative_output_dir: str = "data/creative"
+ video_transition_duration: float = 1.0
+ default_video_codec: str = "libx264"
+
+ # ── L402 Lightning ───────────────────────────────────────────────────
+ # HMAC secrets for macaroon signing and invoice verification.
+ # Generate with: python3 -c "import secrets; print(secrets.token_hex(32))"
+ # In production (TIMMY_ENV=production), these MUST be set or the app will refuse to start.
+ l402_hmac_secret: str = ""
+ l402_macaroon_secret: str = ""
+ lightning_backend: Literal["mock", "lnd"] = "mock"
+
+ # ── Privacy / Sovereignty ────────────────────────────────────────────
+ # Disable Agno telemetry for air-gapped/sovereign deployments.
+ # Default is False (telemetry disabled) to align with sovereign AI vision.
+ telemetry_enabled: bool = False
+
+ # Environment mode: development | production
+ # In production, security settings are strictly enforced.
+ timmy_env: Literal["development", "production"] = "development"
+
+ # ── Self-Modification ──────────────────────────────────────────────
+ # Enable self-modification capabilities. When enabled, Timmy can
+ # edit its own source code, run tests, and commit changes.
+ self_modify_enabled: bool = False
+ self_modify_max_retries: int = 2
+ self_modify_allowed_dirs: str = "src,tests"
+ self_modify_backend: str = "auto" # "ollama", "anthropic", or "auto"
+
model_config = SettingsConfigDict(
env_file=".env",
env_file_encoding="utf-8",
@@ -36,3 +92,39 @@ class Settings(BaseSettings):
settings = Settings()
+
+# ── Startup validation ───────────────────────────────────────────────────────
+# Enforce security requirements — fail fast in production.
+import logging as _logging
+import sys
+
+_startup_logger = _logging.getLogger("config")
+
+# Production mode: require secrets to be set
+if settings.timmy_env == "production":
+ _missing = []
+ if not settings.l402_hmac_secret:
+ _missing.append("L402_HMAC_SECRET")
+ if not settings.l402_macaroon_secret:
+ _missing.append("L402_MACAROON_SECRET")
+ if _missing:
+ _startup_logger.error(
+ "PRODUCTION SECURITY ERROR: The following secrets must be set: %s\n"
+ "Generate with: python3 -c \"import secrets; print(secrets.token_hex(32))\"\n"
+ "Set in .env file or environment variables.",
+ ", ".join(_missing),
+ )
+ sys.exit(1)
+ _startup_logger.info("Production mode: security secrets validated ✓")
+else:
+ # Development mode: warn but continue
+ if not settings.l402_hmac_secret:
+ _startup_logger.warning(
+ "SEC: L402_HMAC_SECRET is not set — "
+ "set a unique secret in .env before deploying to production."
+ )
+ if not settings.l402_macaroon_secret:
+ _startup_logger.warning(
+ "SEC: L402_MACAROON_SECRET is not set — "
+ "set a unique secret in .env before deploying to production."
+ )
diff --git a/src/creative/__init__.py b/src/creative/__init__.py
new file mode 100644
index 0000000..aa66bf1
--- /dev/null
+++ b/src/creative/__init__.py
@@ -0,0 +1 @@
+"""Creative pipeline — orchestrates image, music, and video generation."""
diff --git a/src/creative/assembler.py b/src/creative/assembler.py
new file mode 100644
index 0000000..c95910a
--- /dev/null
+++ b/src/creative/assembler.py
@@ -0,0 +1,322 @@
+"""Video assembly engine — stitch clips, overlay audio, add titles.
+
+Uses MoviePy + FFmpeg to combine generated video clips, music tracks,
+and title cards into 3+ minute final videos.
+"""
+
+from __future__ import annotations
+
+import json
+import logging
+import uuid
+from pathlib import Path
+from typing import Optional
+
+logger = logging.getLogger(__name__)
+
+_MOVIEPY_AVAILABLE = True
+try:
+ from moviepy import (
+ VideoFileClip,
+ AudioFileClip,
+ TextClip,
+ CompositeVideoClip,
+ ImageClip,
+ concatenate_videoclips,
+ vfx,
+ )
+except ImportError:
+ _MOVIEPY_AVAILABLE = False
+
+def _resolve_font() -> str:
+ """Find a usable TrueType font on the current platform."""
+ candidates = [
+ # Linux (Debian/Ubuntu)
+ "/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf",
+ "/usr/share/fonts/TTF/DejaVuSans.ttf", # Arch
+ "/usr/share/fonts/dejavu-sans-fonts/DejaVuSans.ttf", # Fedora
+ # macOS
+ "/System/Library/Fonts/Supplemental/Arial.ttf",
+ "/System/Library/Fonts/Helvetica.ttc",
+ "/Library/Fonts/Arial.ttf",
+ ]
+ for path in candidates:
+ if Path(path).exists():
+ return path
+ logger.warning("No system TrueType font found; using Pillow default")
+ return "Helvetica"
+
+
+_DEFAULT_FONT = _resolve_font()
+
+
+def _require_moviepy() -> None:
+ if not _MOVIEPY_AVAILABLE:
+ raise ImportError(
+ "MoviePy is not installed. Run: pip install moviepy"
+ )
+
+
+def _output_dir() -> Path:
+ from config import settings
+ d = Path(getattr(settings, "creative_output_dir", "data/creative"))
+ d.mkdir(parents=True, exist_ok=True)
+ return d
+
+
+# ── Stitching ─────────────────────────────────────────────────────────────────
+
+def stitch_clips(
+ clip_paths: list[str],
+ transition_duration: float = 1.0,
+ output_path: Optional[str] = None,
+) -> dict:
+ """Concatenate video clips with cross-fade transitions.
+
+ Args:
+ clip_paths: Ordered list of MP4 file paths.
+ transition_duration: Cross-fade duration in seconds.
+ output_path: Optional output path. Auto-generated if omitted.
+
+ Returns dict with ``path`` and ``total_duration``.
+ """
+ _require_moviepy()
+
+ clips = [VideoFileClip(p) for p in clip_paths]
+
+ # Apply cross-fade between consecutive clips
+ if transition_duration > 0 and len(clips) > 1:
+ processed = [clips[0]]
+ for clip in clips[1:]:
+ clip = clip.with_start(
+ processed[-1].end - transition_duration
+ ).with_effects([vfx.CrossFadeIn(transition_duration)])
+ processed.append(clip)
+ final = CompositeVideoClip(processed)
+ else:
+ final = concatenate_videoclips(clips, method="compose")
+
+ uid = uuid.uuid4().hex[:12]
+ out = Path(output_path) if output_path else _output_dir() / f"stitched_{uid}.mp4"
+ final.write_videofile(str(out), codec="libx264", audio_codec="aac", logger=None)
+
+ total_duration = final.duration
+ # Clean up
+ for c in clips:
+ c.close()
+
+ return {
+ "success": True,
+ "path": str(out),
+ "total_duration": total_duration,
+ "clip_count": len(clip_paths),
+ }
+
+
+# ── Audio overlay ─────────────────────────────────────────────────────────────
+
+def overlay_audio(
+ video_path: str,
+ audio_path: str,
+ output_path: Optional[str] = None,
+ volume: float = 1.0,
+) -> dict:
+ """Mix an audio track onto a video file.
+
+ The audio is trimmed or looped to match the video duration.
+ """
+ _require_moviepy()
+
+ video = VideoFileClip(video_path)
+ audio = AudioFileClip(audio_path)
+
+ # Trim audio to video length
+ if audio.duration > video.duration:
+ audio = audio.subclipped(0, video.duration)
+
+ if volume != 1.0:
+ audio = audio.with_volume_scaled(volume)
+
+ video = video.with_audio(audio)
+
+ uid = uuid.uuid4().hex[:12]
+ out = Path(output_path) if output_path else _output_dir() / f"mixed_{uid}.mp4"
+ video.write_videofile(str(out), codec="libx264", audio_codec="aac", logger=None)
+
+ result_duration = video.duration
+ video.close()
+ audio.close()
+
+ return {
+ "success": True,
+ "path": str(out),
+ "duration": result_duration,
+ }
+
+
+# ── Title cards ───────────────────────────────────────────────────────────────
+
+def add_title_card(
+ video_path: str,
+ title: str,
+ subtitle: str = "",
+ duration: float = 4.0,
+ position: str = "start",
+ output_path: Optional[str] = None,
+) -> dict:
+ """Add a title card at the start or end of a video.
+
+ Args:
+ video_path: Source video path.
+ title: Title text.
+ subtitle: Optional subtitle text.
+ duration: Title card display duration in seconds.
+ position: "start" or "end".
+ """
+ _require_moviepy()
+
+ video = VideoFileClip(video_path)
+ w, h = video.size
+
+ # Build title card as a text clip on black background
+ txt = TextClip(
+ text=title,
+ font_size=60,
+ color="white",
+ size=(w, h),
+ method="caption",
+ font=_DEFAULT_FONT,
+ ).with_duration(duration)
+
+ clips = [txt, video] if position == "start" else [video, txt]
+ final = concatenate_videoclips(clips, method="compose")
+
+ uid = uuid.uuid4().hex[:12]
+ out = Path(output_path) if output_path else _output_dir() / f"titled_{uid}.mp4"
+ final.write_videofile(str(out), codec="libx264", audio_codec="aac", logger=None)
+
+ result_duration = final.duration
+ video.close()
+
+ return {
+ "success": True,
+ "path": str(out),
+ "duration": result_duration,
+ "title": title,
+ }
+
+
+# ── Subtitles / captions ─────────────────────────────────────────────────────
+
+def add_subtitles(
+ video_path: str,
+ captions: list[dict],
+ output_path: Optional[str] = None,
+) -> dict:
+ """Burn subtitle captions onto a video.
+
+ Args:
+ captions: List of dicts with ``text``, ``start``, ``end`` keys
+ (times in seconds).
+ """
+ _require_moviepy()
+
+ video = VideoFileClip(video_path)
+ w, h = video.size
+
+ text_clips = []
+ for cap in captions:
+ txt = (
+ TextClip(
+ text=cap["text"],
+ font_size=36,
+ color="white",
+ stroke_color="black",
+ stroke_width=2,
+ size=(w - 40, None),
+ method="caption",
+ font=_DEFAULT_FONT,
+ )
+ .with_start(cap["start"])
+ .with_end(cap["end"])
+ .with_position(("center", h - 100))
+ )
+ text_clips.append(txt)
+
+ final = CompositeVideoClip([video] + text_clips)
+
+ uid = uuid.uuid4().hex[:12]
+ out = Path(output_path) if output_path else _output_dir() / f"subtitled_{uid}.mp4"
+ final.write_videofile(str(out), codec="libx264", audio_codec="aac", logger=None)
+
+ result_duration = final.duration
+ video.close()
+
+ return {
+ "success": True,
+ "path": str(out),
+ "duration": result_duration,
+ "caption_count": len(captions),
+ }
+
+
+# ── Final export helper ──────────────────────────────────────────────────────
+
+def export_final(
+ video_path: str,
+ output_path: Optional[str] = None,
+ codec: str = "libx264",
+ audio_codec: str = "aac",
+ bitrate: str = "8000k",
+) -> dict:
+ """Re-encode a video with specific codec settings for distribution."""
+ _require_moviepy()
+
+ video = VideoFileClip(video_path)
+ uid = uuid.uuid4().hex[:12]
+ out = Path(output_path) if output_path else _output_dir() / f"final_{uid}.mp4"
+ video.write_videofile(
+ str(out), codec=codec, audio_codec=audio_codec,
+ bitrate=bitrate, logger=None,
+ )
+
+ result_duration = video.duration
+ video.close()
+
+ return {
+ "success": True,
+ "path": str(out),
+ "duration": result_duration,
+ "codec": codec,
+ }
+
+
+# ── Tool catalogue ────────────────────────────────────────────────────────────
+
+ASSEMBLER_TOOL_CATALOG: dict[str, dict] = {
+ "stitch_clips": {
+ "name": "Stitch Clips",
+ "description": "Concatenate video clips with cross-fade transitions",
+ "fn": stitch_clips,
+ },
+ "overlay_audio": {
+ "name": "Overlay Audio",
+ "description": "Mix a music track onto a video",
+ "fn": overlay_audio,
+ },
+ "add_title_card": {
+ "name": "Add Title Card",
+ "description": "Add a title card at the start or end of a video",
+ "fn": add_title_card,
+ },
+ "add_subtitles": {
+ "name": "Add Subtitles",
+ "description": "Burn subtitle captions onto a video",
+ "fn": add_subtitles,
+ },
+ "export_final": {
+ "name": "Export Final",
+ "description": "Re-encode video with specific codec settings",
+ "fn": export_final,
+ },
+}
diff --git a/src/creative/director.py b/src/creative/director.py
new file mode 100644
index 0000000..968fff9
--- /dev/null
+++ b/src/creative/director.py
@@ -0,0 +1,378 @@
+"""Creative Director — multi-persona pipeline for 3+ minute creative works.
+
+Orchestrates Pixel (images), Lyra (music), and Reel (video) to produce
+complete music videos, cinematic shorts, and other creative works.
+
+Pipeline stages:
+1. Script — Generate scene descriptions and lyrics
+2. Storyboard — Generate keyframe images (Pixel)
+3. Music — Generate soundtrack (Lyra)
+4. Video — Generate clips per scene (Reel)
+5. Assembly — Stitch clips + overlay audio (MoviePy)
+"""
+
+from __future__ import annotations
+
+import json
+import logging
+import uuid
+from dataclasses import dataclass, field
+from datetime import datetime, timezone
+from pathlib import Path
+from typing import Optional
+
+logger = logging.getLogger(__name__)
+
+
+@dataclass
+class CreativeProject:
+ """Tracks all assets and state for a creative production."""
+ id: str = field(default_factory=lambda: uuid.uuid4().hex[:12])
+ title: str = ""
+ description: str = ""
+ created_at: str = field(
+ default_factory=lambda: datetime.now(timezone.utc).isoformat()
+ )
+ status: str = "planning" # planning|scripting|storyboard|music|video|assembly|complete|failed
+
+ # Pipeline outputs
+ scenes: list[dict] = field(default_factory=list)
+ lyrics: str = ""
+ storyboard_frames: list[dict] = field(default_factory=list)
+ music_track: Optional[dict] = None
+ video_clips: list[dict] = field(default_factory=list)
+ final_video: Optional[dict] = None
+
+ def to_dict(self) -> dict:
+ return {
+ "id": self.id, "title": self.title,
+ "description": self.description,
+ "created_at": self.created_at, "status": self.status,
+ "scene_count": len(self.scenes),
+ "has_storyboard": len(self.storyboard_frames) > 0,
+ "has_music": self.music_track is not None,
+ "clip_count": len(self.video_clips),
+ "has_final": self.final_video is not None,
+ }
+
+
+# In-memory project store
+_projects: dict[str, CreativeProject] = {}
+
+
+def _project_dir(project_id: str) -> Path:
+ from config import settings
+ d = Path(getattr(settings, "creative_output_dir", "data/creative")) / project_id
+ d.mkdir(parents=True, exist_ok=True)
+ return d
+
+
+def _save_project(project: CreativeProject) -> None:
+ """Persist project metadata to disk."""
+ path = _project_dir(project.id) / "project.json"
+ path.write_text(json.dumps(project.to_dict(), indent=2))
+
+
+# ── Project management ────────────────────────────────────────────────────────
+
+def create_project(
+ title: str,
+ description: str,
+ scenes: Optional[list[dict]] = None,
+ lyrics: str = "",
+) -> dict:
+ """Create a new creative project.
+
+ Args:
+ title: Project title.
+ description: High-level creative brief.
+ scenes: Optional pre-written scene descriptions.
+ Each scene is a dict with ``description`` key.
+ lyrics: Optional song lyrics for the soundtrack.
+
+ Returns dict with project metadata.
+ """
+ project = CreativeProject(
+ title=title,
+ description=description,
+ scenes=scenes or [],
+ lyrics=lyrics,
+ )
+ _projects[project.id] = project
+ _save_project(project)
+ logger.info("Creative project created: %s (%s)", project.id, title)
+ return {"success": True, "project": project.to_dict()}
+
+
+def get_project(project_id: str) -> Optional[dict]:
+ """Get project metadata."""
+ project = _projects.get(project_id)
+ if project:
+ return project.to_dict()
+ return None
+
+
+def list_projects() -> list[dict]:
+ """List all creative projects."""
+ return [p.to_dict() for p in _projects.values()]
+
+
+# ── Pipeline steps ────────────────────────────────────────────────────────────
+
+def run_storyboard(project_id: str) -> dict:
+ """Generate storyboard frames for all scenes in a project.
+
+ Calls Pixel's generate_storyboard tool.
+ """
+ project = _projects.get(project_id)
+ if not project:
+ return {"success": False, "error": "Project not found"}
+ if not project.scenes:
+ return {"success": False, "error": "No scenes defined"}
+
+ project.status = "storyboard"
+
+ from tools.image_tools import generate_storyboard
+
+ scene_descriptions = [s["description"] for s in project.scenes]
+ result = generate_storyboard(scene_descriptions)
+
+ if result["success"]:
+ project.storyboard_frames = result["frames"]
+ _save_project(project)
+
+ return result
+
+
+def run_music(
+ project_id: str,
+ genre: str = "pop",
+ duration: Optional[int] = None,
+) -> dict:
+ """Generate the soundtrack for a project.
+
+ Calls Lyra's generate_song tool.
+ """
+ project = _projects.get(project_id)
+ if not project:
+ return {"success": False, "error": "Project not found"}
+
+ project.status = "music"
+
+ from tools.music_tools import generate_song
+
+ # Default duration: ~15s per scene, minimum 60s
+ target_duration = duration or max(60, len(project.scenes) * 15)
+
+ result = generate_song(
+ lyrics=project.lyrics,
+ genre=genre,
+ duration=target_duration,
+ title=project.title,
+ )
+
+ if result["success"]:
+ project.music_track = result
+ _save_project(project)
+
+ return result
+
+
+def run_video_generation(project_id: str) -> dict:
+ """Generate video clips for each scene.
+
+ Uses storyboard frames (image-to-video) if available,
+ otherwise falls back to text-to-video.
+ """
+ project = _projects.get(project_id)
+ if not project:
+ return {"success": False, "error": "Project not found"}
+ if not project.scenes:
+ return {"success": False, "error": "No scenes defined"}
+
+ project.status = "video"
+
+ from tools.video_tools import generate_video_clip, image_to_video
+
+ clips = []
+ for i, scene in enumerate(project.scenes):
+ desc = scene["description"]
+
+ # Prefer image-to-video if storyboard frame exists
+ if i < len(project.storyboard_frames):
+ frame = project.storyboard_frames[i]
+ result = image_to_video(
+ image_path=frame["path"],
+ prompt=desc,
+ duration=scene.get("duration", 5),
+ )
+ else:
+ result = generate_video_clip(
+ prompt=desc,
+ duration=scene.get("duration", 5),
+ )
+
+ result["scene_index"] = i
+ clips.append(result)
+
+ project.video_clips = clips
+ _save_project(project)
+
+ return {
+ "success": True,
+ "clip_count": len(clips),
+ "clips": clips,
+ }
+
+
+def run_assembly(project_id: str, transition_duration: float = 1.0) -> dict:
+ """Assemble all clips into the final video with music.
+
+ Pipeline:
+ 1. Stitch clips with transitions
+ 2. Overlay music track
+ 3. Add title card
+ """
+ project = _projects.get(project_id)
+ if not project:
+ return {"success": False, "error": "Project not found"}
+ if not project.video_clips:
+ return {"success": False, "error": "No video clips generated"}
+
+ project.status = "assembly"
+
+ from creative.assembler import stitch_clips, overlay_audio, add_title_card
+
+ # 1. Stitch clips
+ clip_paths = [c["path"] for c in project.video_clips if c.get("success")]
+ if not clip_paths:
+ return {"success": False, "error": "No successful clips to assemble"}
+
+ stitched = stitch_clips(clip_paths, transition_duration=transition_duration)
+ if not stitched["success"]:
+ return stitched
+
+ # 2. Overlay music (if available)
+ current_video = stitched["path"]
+ if project.music_track and project.music_track.get("path"):
+ mixed = overlay_audio(current_video, project.music_track["path"])
+ if mixed["success"]:
+ current_video = mixed["path"]
+
+ # 3. Add title card
+ titled = add_title_card(current_video, title=project.title)
+ if titled["success"]:
+ current_video = titled["path"]
+
+ project.final_video = {
+ "path": current_video,
+ "duration": titled.get("duration", stitched["total_duration"]),
+ }
+ project.status = "complete"
+ _save_project(project)
+
+ return {
+ "success": True,
+ "path": current_video,
+ "duration": project.final_video["duration"],
+ "project_id": project_id,
+ }
+
+
+def run_full_pipeline(
+ title: str,
+ description: str,
+ scenes: list[dict],
+ lyrics: str = "",
+ genre: str = "pop",
+) -> dict:
+ """Run the entire creative pipeline end-to-end.
+
+ This is the top-level orchestration function that:
+ 1. Creates the project
+ 2. Generates storyboard frames
+ 3. Generates music
+ 4. Generates video clips
+ 5. Assembles the final video
+
+ Args:
+ title: Project title.
+ description: Creative brief.
+ scenes: List of scene dicts with ``description`` keys.
+ lyrics: Song lyrics for the soundtrack.
+ genre: Music genre.
+
+ Returns dict with final video path and project metadata.
+ """
+ # Create project
+ project_result = create_project(title, description, scenes, lyrics)
+ if not project_result["success"]:
+ return project_result
+ project_id = project_result["project"]["id"]
+
+ # Run pipeline steps
+ steps = [
+ ("storyboard", lambda: run_storyboard(project_id)),
+ ("music", lambda: run_music(project_id, genre=genre)),
+ ("video", lambda: run_video_generation(project_id)),
+ ("assembly", lambda: run_assembly(project_id)),
+ ]
+
+ for step_name, step_fn in steps:
+ logger.info("Creative pipeline step: %s (project %s)", step_name, project_id)
+ result = step_fn()
+ if not result.get("success"):
+ project = _projects.get(project_id)
+ if project:
+ project.status = "failed"
+ _save_project(project)
+ return {
+ "success": False,
+ "failed_step": step_name,
+ "error": result.get("error", "Unknown error"),
+ "project_id": project_id,
+ }
+
+ project = _projects.get(project_id)
+ return {
+ "success": True,
+ "project_id": project_id,
+ "final_video": project.final_video if project else None,
+ "project": project.to_dict() if project else None,
+ }
+
+
+# ── Tool catalogue ────────────────────────────────────────────────────────────
+
+DIRECTOR_TOOL_CATALOG: dict[str, dict] = {
+ "create_project": {
+ "name": "Create Creative Project",
+ "description": "Create a new creative production project",
+ "fn": create_project,
+ },
+ "run_storyboard": {
+ "name": "Generate Storyboard",
+ "description": "Generate keyframe images for all project scenes",
+ "fn": run_storyboard,
+ },
+ "run_music": {
+ "name": "Generate Music",
+ "description": "Generate the project soundtrack with vocals and instrumentals",
+ "fn": run_music,
+ },
+ "run_video_generation": {
+ "name": "Generate Video Clips",
+ "description": "Generate video clips for each project scene",
+ "fn": run_video_generation,
+ },
+ "run_assembly": {
+ "name": "Assemble Final Video",
+ "description": "Stitch clips, overlay music, and add title cards",
+ "fn": run_assembly,
+ },
+ "run_full_pipeline": {
+ "name": "Run Full Pipeline",
+ "description": "Execute entire creative pipeline end-to-end",
+ "fn": run_full_pipeline,
+ },
+}
diff --git a/src/dashboard/app.py b/src/dashboard/app.py
index 78e7be2..3b2788a 100644
--- a/src/dashboard/app.py
+++ b/src/dashboard/app.py
@@ -23,6 +23,10 @@ from dashboard.routes.briefing import router as briefing_router
from dashboard.routes.telegram import router as telegram_router
from dashboard.routes.swarm_internal import router as swarm_internal_router
from dashboard.routes.tools import router as tools_router
+from dashboard.routes.spark import router as spark_router
+from dashboard.routes.creative import router as creative_router
+from dashboard.routes.discord import router as discord_router
+from dashboard.routes.self_modify import router as self_modify_router
logging.basicConfig(
level=logging.INFO,
@@ -97,12 +101,24 @@ async def lifespan(app: FastAPI):
except Exception as exc:
logger.error("Failed to spawn persona agents: %s", exc)
+ # Initialise Spark Intelligence engine
+ from spark.engine import spark_engine
+ if spark_engine.enabled:
+ logger.info("Spark Intelligence active — event capture enabled")
+
# Auto-start Telegram bot if a token is configured
from telegram_bot.bot import telegram_bot
await telegram_bot.start()
+ # Auto-start Discord bot and register in platform registry
+ from chat_bridge.vendors.discord import discord_bot
+ from chat_bridge.registry import platform_registry
+ platform_registry.register(discord_bot)
+ await discord_bot.start()
+
yield
+ await discord_bot.stop()
await telegram_bot.stop()
task.cancel()
try:
@@ -136,6 +152,10 @@ app.include_router(briefing_router)
app.include_router(telegram_router)
app.include_router(swarm_internal_router)
app.include_router(tools_router)
+app.include_router(spark_router)
+app.include_router(creative_router)
+app.include_router(discord_router)
+app.include_router(self_modify_router)
@app.get("/", response_class=HTMLResponse)
diff --git a/src/dashboard/routes/creative.py b/src/dashboard/routes/creative.py
new file mode 100644
index 0000000..f24b5ff
--- /dev/null
+++ b/src/dashboard/routes/creative.py
@@ -0,0 +1,87 @@
+"""Creative Studio dashboard route — /creative endpoints.
+
+Provides a dashboard page for the creative pipeline: image generation,
+music generation, video generation, and the full director pipeline.
+"""
+
+from pathlib import Path
+from typing import Optional
+
+from fastapi import APIRouter, Request
+from fastapi.responses import HTMLResponse
+from fastapi.templating import Jinja2Templates
+
+router = APIRouter(tags=["creative"])
+templates = Jinja2Templates(directory=str(Path(__file__).parent.parent / "templates"))
+
+
+@router.get("/creative/ui", response_class=HTMLResponse)
+async def creative_studio(request: Request):
+ """Render the Creative Studio page."""
+ # Collect existing outputs
+ image_dir = Path("data/images")
+ music_dir = Path("data/music")
+ video_dir = Path("data/video")
+ creative_dir = Path("data/creative")
+
+ images = sorted(image_dir.glob("*.png"), key=lambda p: p.stat().st_mtime, reverse=True)[:20] if image_dir.exists() else []
+ music_files = sorted(music_dir.glob("*.wav"), key=lambda p: p.stat().st_mtime, reverse=True)[:20] if music_dir.exists() else []
+ videos = sorted(video_dir.glob("*.mp4"), key=lambda p: p.stat().st_mtime, reverse=True)[:20] if video_dir.exists() else []
+
+ # Load projects
+ projects = []
+ if creative_dir.exists():
+ for proj_dir in sorted(creative_dir.iterdir(), reverse=True):
+ meta_path = proj_dir / "project.json"
+ if meta_path.exists():
+ import json
+ projects.append(json.loads(meta_path.read_text()))
+
+ return templates.TemplateResponse(
+ request,
+ "creative.html",
+ {
+ "page_title": "Creative Studio",
+ "images": [{"name": p.name, "path": str(p)} for p in images],
+ "music_files": [{"name": p.name, "path": str(p)} for p in music_files],
+ "videos": [{"name": p.name, "path": str(p)} for p in videos],
+ "projects": projects[:10],
+ "image_count": len(images),
+ "music_count": len(music_files),
+ "video_count": len(videos),
+ "project_count": len(projects),
+ },
+ )
+
+
+@router.get("/creative/api/projects")
+async def creative_projects_api():
+ """Return creative projects as JSON."""
+ try:
+ from creative.director import list_projects
+ return {"projects": list_projects()}
+ except ImportError:
+ return {"projects": []}
+
+
+@router.get("/creative/api/genres")
+async def creative_genres_api():
+ """Return supported music genres."""
+ try:
+ from tools.music_tools import GENRES
+ return {"genres": GENRES}
+ except ImportError:
+ return {"genres": []}
+
+
+@router.get("/creative/api/video-styles")
+async def creative_video_styles_api():
+ """Return supported video styles and resolutions."""
+ try:
+ from tools.video_tools import VIDEO_STYLES, RESOLUTION_PRESETS
+ return {
+ "styles": VIDEO_STYLES,
+ "resolutions": list(RESOLUTION_PRESETS.keys()),
+ }
+ except ImportError:
+ return {"styles": [], "resolutions": []}
diff --git a/src/dashboard/routes/discord.py b/src/dashboard/routes/discord.py
new file mode 100644
index 0000000..28629a5
--- /dev/null
+++ b/src/dashboard/routes/discord.py
@@ -0,0 +1,140 @@
+"""Dashboard routes for Discord bot setup, status, and invite-from-image.
+
+Endpoints:
+ POST /discord/setup — configure bot token
+ GET /discord/status — connection state + guild count
+ POST /discord/join — paste screenshot → extract invite → join
+ GET /discord/oauth-url — get the bot's OAuth2 authorization URL
+"""
+
+from fastapi import APIRouter, File, Form, UploadFile
+from pydantic import BaseModel
+from typing import Optional
+
+router = APIRouter(prefix="/discord", tags=["discord"])
+
+
+class TokenPayload(BaseModel):
+ token: str
+
+
+@router.post("/setup")
+async def setup_discord(payload: TokenPayload):
+ """Configure the Discord bot token and (re)start the bot.
+
+ Send POST with JSON body: {"token": ""}
+ Get the token from https://discord.com/developers/applications
+ """
+ from chat_bridge.vendors.discord import discord_bot
+
+ token = payload.token.strip()
+ if not token:
+ return {"ok": False, "error": "Token cannot be empty."}
+
+ discord_bot.save_token(token)
+
+ if discord_bot.state.name == "CONNECTED":
+ await discord_bot.stop()
+
+ success = await discord_bot.start(token=token)
+ if success:
+ return {"ok": True, "message": "Discord bot connected successfully."}
+ return {
+ "ok": False,
+ "error": (
+ "Failed to start bot. Check that the token is correct and "
+ 'discord.py is installed: pip install ".[discord]"'
+ ),
+ }
+
+
+@router.get("/status")
+async def discord_status():
+ """Return current Discord bot status."""
+ from chat_bridge.vendors.discord import discord_bot
+
+ return discord_bot.status().to_dict()
+
+
+@router.post("/join")
+async def join_from_image(
+ image: Optional[UploadFile] = File(None),
+ invite_url: Optional[str] = Form(None),
+):
+ """Extract a Discord invite from a screenshot or text and validate it.
+
+ Accepts either:
+ - An uploaded image (screenshot of invite or QR code)
+ - A plain text invite URL
+
+ The bot validates the invite and returns the OAuth2 URL for the
+ server admin to authorize the bot.
+ """
+ from chat_bridge.invite_parser import invite_parser
+ from chat_bridge.vendors.discord import discord_bot
+
+ invite_info = None
+
+ # Try image first
+ if image and image.filename:
+ image_data = await image.read()
+ if image_data:
+ invite_info = await invite_parser.parse_image(image_data)
+
+ # Fall back to text
+ if not invite_info and invite_url:
+ invite_info = invite_parser.parse_text(invite_url)
+
+ if not invite_info:
+ return {
+ "ok": False,
+ "error": (
+ "No Discord invite found. "
+ "Paste a screenshot with a visible invite link or QR code, "
+ "or enter the invite URL directly."
+ ),
+ }
+
+ # Validate the invite
+ valid = await discord_bot.join_from_invite(invite_info.code)
+
+ result = {
+ "ok": True,
+ "invite": {
+ "code": invite_info.code,
+ "url": invite_info.url,
+ "source": invite_info.source,
+ "platform": invite_info.platform,
+ },
+ "validated": valid,
+ }
+
+ # Include OAuth2 URL if bot is connected
+ oauth_url = discord_bot.get_oauth2_url()
+ if oauth_url:
+ result["oauth2_url"] = oauth_url
+ result["message"] = (
+ "Invite validated. Share this OAuth2 URL with the server admin "
+ "to add Timmy to the server."
+ )
+ else:
+ result["message"] = (
+ "Invite found but bot is not connected. "
+ "Configure a bot token first via /discord/setup."
+ )
+
+ return result
+
+
+@router.get("/oauth-url")
+async def discord_oauth_url():
+ """Get the bot's OAuth2 authorization URL for adding to servers."""
+ from chat_bridge.vendors.discord import discord_bot
+
+ url = discord_bot.get_oauth2_url()
+ if url:
+ return {"ok": True, "url": url}
+ return {
+ "ok": False,
+ "error": "Bot is not connected. Configure a token first.",
+ }
diff --git a/src/dashboard/routes/self_modify.py b/src/dashboard/routes/self_modify.py
new file mode 100644
index 0000000..2e0cf74
--- /dev/null
+++ b/src/dashboard/routes/self_modify.py
@@ -0,0 +1,71 @@
+"""Self-modification routes — /self-modify endpoints.
+
+Exposes the edit-test-commit loop as a REST API. Gated by
+``SELF_MODIFY_ENABLED`` (default False).
+"""
+
+import asyncio
+import logging
+
+from fastapi import APIRouter, Form, HTTPException
+
+from config import settings
+
+logger = logging.getLogger(__name__)
+
+router = APIRouter(prefix="/self-modify", tags=["self-modify"])
+
+
+@router.post("/run")
+async def run_self_modify(
+ instruction: str = Form(...),
+ target_files: str = Form(""),
+ dry_run: bool = Form(False),
+ speak_result: bool = Form(False),
+):
+ """Execute a self-modification loop.
+
+ Returns the ModifyResult as JSON.
+ """
+ if not settings.self_modify_enabled:
+ raise HTTPException(403, "Self-modification is disabled")
+
+ from self_modify.loop import SelfModifyLoop, ModifyRequest
+
+ files = [f.strip() for f in target_files.split(",") if f.strip()]
+ request = ModifyRequest(
+ instruction=instruction,
+ target_files=files,
+ dry_run=dry_run,
+ )
+
+ loop = SelfModifyLoop()
+ result = await asyncio.to_thread(loop.run, request)
+
+ if speak_result and result.success:
+ try:
+ from timmy_serve.voice_tts import voice_tts
+
+ if voice_tts.available:
+ voice_tts.speak(
+ f"Code modification complete. "
+ f"{len(result.files_changed)} files changed. Tests passing."
+ )
+ except Exception:
+ pass
+
+ return {
+ "success": result.success,
+ "files_changed": result.files_changed,
+ "test_passed": result.test_passed,
+ "commit_sha": result.commit_sha,
+ "branch_name": result.branch_name,
+ "error": result.error,
+ "attempts": result.attempts,
+ }
+
+
+@router.get("/status")
+async def self_modify_status():
+ """Return whether self-modification is enabled."""
+ return {"enabled": settings.self_modify_enabled}
diff --git a/src/dashboard/routes/spark.py b/src/dashboard/routes/spark.py
new file mode 100644
index 0000000..f998050
--- /dev/null
+++ b/src/dashboard/routes/spark.py
@@ -0,0 +1,147 @@
+"""Spark Intelligence dashboard routes.
+
+GET /spark — JSON status (API)
+GET /spark/ui — HTML Spark Intelligence dashboard
+GET /spark/timeline — HTMX partial: recent event timeline
+GET /spark/insights — HTMX partial: advisories and insights
+GET /spark/predictions — HTMX partial: EIDOS predictions
+"""
+
+import json
+import logging
+from pathlib import Path
+
+from fastapi import APIRouter, Request
+from fastapi.responses import HTMLResponse
+from fastapi.templating import Jinja2Templates
+
+from spark.engine import spark_engine
+
+logger = logging.getLogger(__name__)
+
+router = APIRouter(prefix="/spark", tags=["spark"])
+templates = Jinja2Templates(directory=str(Path(__file__).parent.parent / "templates"))
+
+
+@router.get("/ui", response_class=HTMLResponse)
+async def spark_ui(request: Request):
+ """Render the Spark Intelligence dashboard page."""
+ status = spark_engine.status()
+ advisories = spark_engine.get_advisories()
+ timeline = spark_engine.get_timeline(limit=20)
+ predictions = spark_engine.get_predictions(limit=10)
+ memories = spark_engine.get_memories(limit=10)
+
+ # Parse event data JSON for template display
+ timeline_enriched = []
+ for ev in timeline:
+ entry = {
+ "id": ev.id,
+ "event_type": ev.event_type,
+ "agent_id": ev.agent_id,
+ "task_id": ev.task_id,
+ "description": ev.description,
+ "importance": ev.importance,
+ "created_at": ev.created_at,
+ }
+ try:
+ entry["data"] = json.loads(ev.data)
+ except (json.JSONDecodeError, TypeError):
+ entry["data"] = {}
+ timeline_enriched.append(entry)
+
+ # Enrich predictions for display
+ predictions_enriched = []
+ for p in predictions:
+ entry = {
+ "id": p.id,
+ "task_id": p.task_id,
+ "prediction_type": p.prediction_type,
+ "accuracy": p.accuracy,
+ "created_at": p.created_at,
+ "evaluated_at": p.evaluated_at,
+ }
+ try:
+ entry["predicted"] = json.loads(p.predicted_value)
+ except (json.JSONDecodeError, TypeError):
+ entry["predicted"] = {}
+ try:
+ entry["actual"] = json.loads(p.actual_value) if p.actual_value else None
+ except (json.JSONDecodeError, TypeError):
+ entry["actual"] = None
+ predictions_enriched.append(entry)
+
+ return templates.TemplateResponse(
+ request,
+ "spark.html",
+ {
+ "status": status,
+ "advisories": advisories,
+ "timeline": timeline_enriched,
+ "predictions": predictions_enriched,
+ "memories": memories,
+ },
+ )
+
+
+@router.get("", response_class=HTMLResponse)
+async def spark_status_json():
+ """Return Spark Intelligence status as JSON."""
+ from fastapi.responses import JSONResponse
+ status = spark_engine.status()
+ advisories = spark_engine.get_advisories()
+ return JSONResponse({
+ "status": status,
+ "advisories": [
+ {
+ "category": a.category,
+ "priority": a.priority,
+ "title": a.title,
+ "detail": a.detail,
+ "suggested_action": a.suggested_action,
+ "subject": a.subject,
+ "evidence_count": a.evidence_count,
+ }
+ for a in advisories
+ ],
+ })
+
+
+@router.get("/timeline", response_class=HTMLResponse)
+async def spark_timeline(request: Request):
+ """HTMX partial: recent event timeline."""
+ timeline = spark_engine.get_timeline(limit=20)
+ timeline_enriched = []
+ for ev in timeline:
+ entry = {
+ "id": ev.id,
+ "event_type": ev.event_type,
+ "agent_id": ev.agent_id,
+ "task_id": ev.task_id,
+ "description": ev.description,
+ "importance": ev.importance,
+ "created_at": ev.created_at,
+ }
+ try:
+ entry["data"] = json.loads(ev.data)
+ except (json.JSONDecodeError, TypeError):
+ entry["data"] = {}
+ timeline_enriched.append(entry)
+
+ return templates.TemplateResponse(
+ request,
+ "partials/spark_timeline.html",
+ {"timeline": timeline_enriched},
+ )
+
+
+@router.get("/insights", response_class=HTMLResponse)
+async def spark_insights(request: Request):
+ """HTMX partial: advisories and consolidated memories."""
+ advisories = spark_engine.get_advisories()
+ memories = spark_engine.get_memories(limit=10)
+ return templates.TemplateResponse(
+ request,
+ "partials/spark_insights.html",
+ {"advisories": advisories, "memories": memories},
+ )
diff --git a/src/dashboard/routes/swarm.py b/src/dashboard/routes/swarm.py
index 82ae8b1..263cac0 100644
--- a/src/dashboard/routes/swarm.py
+++ b/src/dashboard/routes/swarm.py
@@ -4,6 +4,7 @@ Provides REST endpoints for managing the swarm: listing agents,
spawning sub-agents, posting tasks, and viewing auction results.
"""
+import asyncio
from datetime import datetime, timezone
from pathlib import Path
from typing import Optional
@@ -98,8 +99,10 @@ async def list_tasks(status: Optional[str] = None):
@router.post("/tasks")
async def post_task(description: str = Form(...)):
- """Post a new task to the swarm for bidding."""
+ """Post a new task to the swarm and run auction to assign it."""
task = coordinator.post_task(description)
+ # Start auction asynchronously - don't wait for it to complete
+ asyncio.create_task(coordinator.run_auction_and_assign(task.id))
return {
"task_id": task.id,
"description": task.description,
@@ -122,6 +125,52 @@ async def post_task_and_auction(description: str = Form(...)):
}
+@router.get("/tasks/panel", response_class=HTMLResponse)
+async def task_create_panel(request: Request, agent_id: Optional[str] = None):
+ """Task creation panel, optionally pre-selecting an agent."""
+ agents = coordinator.list_swarm_agents()
+ return templates.TemplateResponse(
+ request,
+ "partials/task_assign_panel.html",
+ {"agents": agents, "preselected_agent_id": agent_id},
+ )
+
+
+@router.post("/tasks/direct", response_class=HTMLResponse)
+async def direct_assign_task(
+ request: Request,
+ description: str = Form(...),
+ agent_id: Optional[str] = Form(None),
+):
+ """Create a task: assign directly if agent_id given, else open auction."""
+ timestamp = datetime.now(timezone.utc).strftime("%H:%M:%S")
+
+ if agent_id:
+ agent = registry.get_agent(agent_id)
+ task = coordinator.post_task(description)
+ coordinator.auctions.open_auction(task.id)
+ coordinator.auctions.submit_bid(task.id, agent_id, 1)
+ coordinator.auctions.close_auction(task.id)
+ update_task(task.id, status=TaskStatus.ASSIGNED, assigned_agent=agent_id)
+ registry.update_status(agent_id, "busy")
+ agent_name = agent.name if agent else agent_id
+ else:
+ task = coordinator.post_task(description)
+ winner = await coordinator.run_auction_and_assign(task.id)
+ task = coordinator.get_task(task.id)
+ agent_name = winner.agent_id if winner else "unassigned"
+
+ return templates.TemplateResponse(
+ request,
+ "partials/task_result.html",
+ {
+ "task": task,
+ "agent_name": agent_name,
+ "timestamp": timestamp,
+ },
+ )
+
+
@router.get("/tasks/{task_id}")
async def get_task(task_id: str):
"""Get details for a specific task."""
@@ -276,47 +325,3 @@ async def message_agent(agent_id: str, request: Request, message: str = Form(...
)
-@router.get("/tasks/panel", response_class=HTMLResponse)
-async def task_create_panel(request: Request, agent_id: Optional[str] = None):
- """Task creation panel, optionally pre-selecting an agent."""
- agents = coordinator.list_swarm_agents()
- return templates.TemplateResponse(
- request,
- "partials/task_assign_panel.html",
- {"agents": agents, "preselected_agent_id": agent_id},
- )
-
-
-@router.post("/tasks/direct", response_class=HTMLResponse)
-async def direct_assign_task(
- request: Request,
- description: str = Form(...),
- agent_id: Optional[str] = Form(None),
-):
- """Create a task: assign directly if agent_id given, else open auction."""
- timestamp = datetime.now(timezone.utc).strftime("%H:%M:%S")
-
- if agent_id:
- agent = registry.get_agent(agent_id)
- task = coordinator.post_task(description)
- coordinator.auctions.open_auction(task.id)
- coordinator.auctions.submit_bid(task.id, agent_id, 1)
- coordinator.auctions.close_auction(task.id)
- update_task(task.id, status=TaskStatus.ASSIGNED, assigned_agent=agent_id)
- registry.update_status(agent_id, "busy")
- agent_name = agent.name if agent else agent_id
- else:
- task = coordinator.post_task(description)
- winner = await coordinator.run_auction_and_assign(task.id)
- task = coordinator.get_task(task.id)
- agent_name = winner.agent_id if winner else "unassigned"
-
- return templates.TemplateResponse(
- request,
- "partials/task_result.html",
- {
- "task": task,
- "agent_name": agent_name,
- "timestamp": timestamp,
- },
- )
diff --git a/src/dashboard/routes/swarm_ws.py b/src/dashboard/routes/swarm_ws.py
index 95881da..13138dd 100644
--- a/src/dashboard/routes/swarm_ws.py
+++ b/src/dashboard/routes/swarm_ws.py
@@ -9,7 +9,7 @@ import logging
from fastapi import APIRouter, WebSocket, WebSocketDisconnect
-from websocket.handler import ws_manager
+from ws_manager.handler import ws_manager
logger = logging.getLogger(__name__)
diff --git a/src/dashboard/routes/voice_enhanced.py b/src/dashboard/routes/voice_enhanced.py
index cd9339c..8a17ec0 100644
--- a/src/dashboard/routes/voice_enhanced.py
+++ b/src/dashboard/routes/voice_enhanced.py
@@ -55,6 +55,39 @@ async def process_voice_input(
elif intent.name == "voice":
response_text = "Voice settings acknowledged. TTS is available for spoken responses."
+ elif intent.name == "code":
+ from config import settings as app_settings
+ if not app_settings.self_modify_enabled:
+ response_text = (
+ "Self-modification is disabled. "
+ "Set SELF_MODIFY_ENABLED=true to enable."
+ )
+ else:
+ import asyncio
+ from self_modify.loop import SelfModifyLoop, ModifyRequest
+
+ target_files = []
+ if "target_file" in intent.entities:
+ target_files = [intent.entities["target_file"]]
+
+ loop = SelfModifyLoop()
+ request = ModifyRequest(
+ instruction=text,
+ target_files=target_files,
+ )
+ result = await asyncio.to_thread(loop.run, request)
+
+ if result.success:
+ sha_short = result.commit_sha[:8] if result.commit_sha else "none"
+ response_text = (
+ f"Code modification complete. "
+ f"Changed {len(result.files_changed)} file(s). "
+ f"Tests passed. Committed as {sha_short} "
+ f"on branch {result.branch_name}."
+ )
+ else:
+ response_text = f"Code modification failed: {result.error}"
+
else:
# Default: chat with Timmy
agent = create_timmy()
diff --git a/src/dashboard/templates/base.html b/src/dashboard/templates/base.html
index 4cd629e..5db616e 100644
--- a/src/dashboard/templates/base.html
+++ b/src/dashboard/templates/base.html
@@ -2,7 +2,7 @@
-
+
@@ -11,39 +11,93 @@
-
+
+ {% block extra_styles %}{% endblock %}