Compare commits
290 Commits
hermes/v0.
...
kimi/issue
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9dee9ed2a8 | ||
| e3a0f1d2d6 | |||
| 2a9d21cea1 | |||
| 05b87c3ac1 | |||
| 8276279775 | |||
| d1f5c2714b | |||
| 65df56414a | |||
| b08ce53bab | |||
| e0660bf768 | |||
| dc9f0c04eb | |||
| 815933953c | |||
| d54493a87b | |||
| f7404f67ec | |||
| 5f4580f98d | |||
| 695d1401fd | |||
| ddadc95e55 | |||
| 8fc8e0fc3d | |||
| ada0774ca6 | |||
| 2a7b6d5708 | |||
| 9d4ac8e7cc | |||
| c9601ba32c | |||
| 646eaefa3e | |||
| 2fa5b23c0c | |||
| 9b57774282 | |||
| 62bde03f9e | |||
| 3474eeb4eb | |||
| e92e151dc3 | |||
| 1f1bc222e4 | |||
| cc30bdb391 | |||
| 6f0863b587 | |||
| e3d425483d | |||
| c9445e3056 | |||
| 11cd2e3372 | |||
| 9d0f5c778e | |||
| d2a5866650 | |||
| 2381d0b6d0 | |||
| 03ad2027a4 | |||
| 2bfc44ea1b | |||
| fe1fa78ef1 | |||
| 3c46a1b202 | |||
| 001358c64f | |||
| faad0726a2 | |||
| dd4410fe57 | |||
| ef7f31070b | |||
| 6f66670396 | |||
| 4cdd82818b | |||
| 99ad672e4d | |||
| a3f61c67d3 | |||
| 32dbdc68c8 | |||
| 84302aedac | |||
| 2c217104db | |||
| 7452e8a4f0 | |||
| 9732c80892 | |||
| f3b3d1e648 | |||
| 4ba8d25749 | |||
| 2622f0a0fb | |||
| e3d60b89a9 | |||
| 6214ad3225 | |||
| 5f5da2163f | |||
| 0029c34bb1 | |||
| 2577b71207 | |||
| 1a8b8ecaed | |||
| d821e76589 | |||
| bc010ecfba | |||
| faf6c1a5f1 | |||
| 48103bb076 | |||
| 9f244ffc70 | |||
| 0162a604be | |||
| 2326771c5a | |||
| 8f6cf2681b | |||
| f361893fdd | |||
| 7ad0ee17b6 | |||
| 29220b6bdd | |||
| 2849dba756 | |||
| e11e07f117 | |||
| 50c8a5428e | |||
| 7da434c85b | |||
| 88e59f7c17 | |||
| aa5e9c3176 | |||
| 1b4fe65650 | |||
| 2d69f73d9d | |||
| ff1e43c235 | |||
| b331aa6139 | |||
| b45b543f2d | |||
| 7c823ab59c | |||
| 9f2728f529 | |||
| cd3dc5d989 | |||
| e4de539bf3 | |||
| b2057f72e1 | |||
| 5f52dd54c0 | |||
| 9ceffd61d1 | |||
| 015d858be5 | |||
| b6d0b5f999 | |||
| d70e4f810a | |||
| 7f20742fcf | |||
| 15eb7c3b45 | |||
| dbc2fd5b0f | |||
| 3c3aca57f1 | |||
| 0ae00af3f8 | |||
| 3df526f6ef | |||
| 50aaf60db2 | |||
| a751be3038 | |||
| 92594ea588 | |||
| 12582ab593 | |||
| 72c3a0a989 | |||
| de089cec7f | |||
| 3590c1689e | |||
| 2161c32ae8 | |||
| 98b1142820 | |||
| 1d79a36bd8 | |||
| cce311dbb8 | |||
| 3cde310c78 | |||
| cdb1a7546b | |||
| a31c929770 | |||
| 3afb62afb7 | |||
| 332fa373b8 | |||
| 76b26ead55 | |||
| 63e4542f31 | |||
| 9b8ad3629a | |||
| 4b617cfcd0 | |||
| b67dbe922f | |||
| 3571d528ad | |||
| ab3546ae4b | |||
| e89aef41bc | |||
| 86224d042d | |||
| 2209ac82d2 | |||
| f9d8509c15 | |||
| 858264be0d | |||
| 3c10da489b | |||
| da43421d4e | |||
| aa4f1de138 | |||
| 19e7e61c92 | |||
| b7573432cc | |||
| 3108971bd5 | |||
| 864be20dde | |||
| c1f939ef22 | |||
| c1af9e3905 | |||
| 996ccec170 | |||
| 560aed78c3 | |||
| c7198b1254 | |||
| 43efb01c51 | |||
| ce658c841a | |||
| db7220db5a | |||
| ae10ea782d | |||
| 4afc5daffb | |||
| 4aa86ff1cb | |||
| dff07c6529 | |||
| 11357ffdb4 | |||
| fcbb2b848b | |||
| 6621f4bd31 | |||
| 243b1a656f | |||
| 22e0d2d4b3 | |||
| bcc7b068a4 | |||
| bfd924fe74 | |||
| 844923b16b | |||
| 8ef0ad1778 | |||
| 9a21a4b0ff | |||
| ab71c71036 | |||
| 39939270b7 | |||
| 0ab1ee9378 | |||
| 234187c091 | |||
| f4106452d2 | |||
| f5a570c56d | |||
|
|
96e7961a0e | ||
| bcbdc7d7cb | |||
| 80aba0bf6d | |||
| dd34dc064f | |||
| 7bc355eed6 | |||
| f9911c002c | |||
| 7f656fcf22 | |||
| 8c63dabd9d | |||
| a50af74ea2 | |||
| b4cb3e9975 | |||
| 4a68f6cb8b | |||
| b3840238cb | |||
| 96c7e6deae | |||
| efef0cd7a2 | |||
| 766add6415 | |||
| 56b08658b7 | |||
| f6d74b9f1d | |||
| e8dd065ad7 | |||
| 5b57bf3dd0 | |||
| bcd6d7e321 | |||
| bea2749158 | |||
| ca01ce62ad | |||
| b960096331 | |||
| 204a6ed4e5 | |||
| f15ad3375a | |||
| 5aea8be223 | |||
| 717dba9816 | |||
| 466db7aed2 | |||
| d2c51763d0 | |||
| 16b31b30cb | |||
| 48c8efb2fb | |||
| d48d56ecc0 | |||
| 76df262563 | |||
| f4e5148825 | |||
| 92e123c9e5 | |||
| 466ad08d7d | |||
| cf48b7d904 | |||
| aa01bb9dbe | |||
| 082c1922f7 | |||
| 9220732581 | |||
| 66544d52ed | |||
| 5668368405 | |||
| a277d40e32 | |||
| 564eb817d4 | |||
| 874f7f8391 | |||
| a57fd7ea09 | |||
|
|
7546a44f66 | ||
| 2fcaea4d3a | |||
| 750659630b | |||
| 24b20a05ca | |||
| b9b78adaa2 | |||
| bbbbdcdfa9 | |||
| 65e5e7786f | |||
| 9134ce2f71 | |||
| 547b502718 | |||
| 3e7a35b3df | |||
| 1c5f9b4218 | |||
| 453c9a0694 | |||
| 2fb104528f | |||
| c164d1736f | |||
| ddb872d3b0 | |||
| f8295502fb | |||
| b12e29b92e | |||
| 825f9e6bb4 | |||
| ffae5aa7c6 | |||
| 0204ecc520 | |||
| 2b8d71db8e | |||
| 9171d93ef9 | |||
| f8f3b9b81f | |||
| a728665159 | |||
| 343421fc45 | |||
| 4b553fa0ed | |||
| 342b9a9d84 | |||
| b3809f5246 | |||
| 2ffee7c8fa | |||
| 67497133fd | |||
| 970a6efb9f | |||
| 415938c9a3 | |||
| c1ec43c59f | |||
| fdc5b861ca | |||
|
|
ad106230b9 | ||
| f51512aaff | |||
| 9c59b386d8 | |||
| e6bde2f907 | |||
| b01c1cb582 | |||
| bce6e7d030 | |||
| 8a14bbb3e0 | |||
| d1a8b16cd7 | |||
| bf30d26dd1 | |||
| 86956bd057 | |||
| 23ed2b2791 | |||
| b3a1e0ce36 | |||
| 7ff012883a | |||
| 7132b42ff3 | |||
| 1f09323e09 | |||
| 74e426c63b | |||
| 586c8e3a75 | |||
| e09ca203dc | |||
| 09fcf956ec | |||
| d28e2f4a7e | |||
| 0b0251f702 | |||
| 94cd1a9840 | |||
| f097784de8 | |||
| 061c8f6628 | |||
| 3c671de446 | |||
|
|
927e25cc40 | ||
|
|
2d2b566e58 | ||
| 64fd1d9829 | |||
| f0b0e2f202 | |||
| b30b5c6b57 | |||
|
|
0d61b709da | ||
| 79edfd1106 | |||
|
|
013a2cc330 | ||
| f426df5b42 | |||
|
|
bef4fc1024 | ||
| 9535dd86de | |||
| 70d5dc5ce1 | |||
|
|
122d07471e | ||
|
|
3d110098d1 | ||
| db129bbe16 | |||
| 591954891a | |||
| bb287b2c73 | |||
| efb1feafc9 | |||
| 6233a8ccd6 | |||
| fa838b0063 | |||
| 782218aa2c | |||
| dbadfc425d |
14
.env.example
14
.env.example
@@ -14,8 +14,13 @@
|
|||||||
# In production (docker-compose.prod.yml), this is set to http://ollama:11434 automatically.
|
# In production (docker-compose.prod.yml), this is set to http://ollama:11434 automatically.
|
||||||
# OLLAMA_URL=http://localhost:11434
|
# OLLAMA_URL=http://localhost:11434
|
||||||
|
|
||||||
# LLM model to use via Ollama (default: qwen3.5:latest)
|
# LLM model to use via Ollama (default: qwen3:30b)
|
||||||
# OLLAMA_MODEL=qwen3.5:latest
|
# OLLAMA_MODEL=qwen3:30b
|
||||||
|
|
||||||
|
# Ollama context window size (default: 4096 tokens)
|
||||||
|
# Set higher for more context, lower to save RAM. 0 = model default.
|
||||||
|
# qwen3:30b + 4096 ctx ≈ 19GB VRAM; default ctx ≈ 45GB.
|
||||||
|
# OLLAMA_NUM_CTX=4096
|
||||||
|
|
||||||
# Enable FastAPI interactive docs at /docs and /redoc (default: false)
|
# Enable FastAPI interactive docs at /docs and /redoc (default: false)
|
||||||
# DEBUG=true
|
# DEBUG=true
|
||||||
@@ -93,8 +98,3 @@
|
|||||||
# - No source bind mounts — code is baked into the image
|
# - No source bind mounts — code is baked into the image
|
||||||
# - Set TIMMY_ENV=production to enforce security checks
|
# - Set TIMMY_ENV=production to enforce security checks
|
||||||
# - All secrets below MUST be set before production deployment
|
# - All secrets below MUST be set before production deployment
|
||||||
#
|
|
||||||
# Taskosaur secrets (change from dev defaults):
|
|
||||||
# TASKOSAUR_JWT_SECRET=<generate with: python3 -c "import secrets; print(secrets.token_hex(32))">
|
|
||||||
# TASKOSAUR_JWT_REFRESH_SECRET=<generate with: python3 -c "import secrets; print(secrets.token_hex(32))">
|
|
||||||
# TASKOSAUR_ENCRYPTION_KEY=<generate with: python3 -c "import secrets; print(secrets.token_hex(32))">
|
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
# Pre-commit hook: auto-format, then test via tox.
|
# Pre-commit hook: auto-format + test. No bypass. No exceptions.
|
||||||
# Blocks the commit if tests fail. Formatting is applied automatically.
|
|
||||||
#
|
#
|
||||||
# Auto-activated by `make install` via git core.hooksPath.
|
# Auto-activated by `make install` via git core.hooksPath.
|
||||||
|
|
||||||
@@ -8,8 +7,8 @@ set -e
|
|||||||
|
|
||||||
MAX_SECONDS=60
|
MAX_SECONDS=60
|
||||||
|
|
||||||
# Auto-format staged files so formatting never blocks a commit
|
# Auto-format staged files
|
||||||
echo "Auto-formatting with black + isort..."
|
echo "Auto-formatting with ruff..."
|
||||||
tox -e format -- 2>/dev/null || tox -e format
|
tox -e format -- 2>/dev/null || tox -e format
|
||||||
git add -u
|
git add -u
|
||||||
|
|
||||||
|
|||||||
24
.gitignore
vendored
24
.gitignore
vendored
@@ -21,6 +21,9 @@ discord_credentials.txt
|
|||||||
|
|
||||||
# Backup / temp files
|
# Backup / temp files
|
||||||
*~
|
*~
|
||||||
|
\#*\#
|
||||||
|
*.backup
|
||||||
|
*.tar.gz
|
||||||
|
|
||||||
# SQLite — never commit databases or WAL/SHM artifacts
|
# SQLite — never commit databases or WAL/SHM artifacts
|
||||||
*.db
|
*.db
|
||||||
@@ -61,7 +64,8 @@ src/data/
|
|||||||
|
|
||||||
# Local content — user-specific or generated
|
# Local content — user-specific or generated
|
||||||
MEMORY.md
|
MEMORY.md
|
||||||
memory/self/
|
memory/self/*
|
||||||
|
!memory/self/soul.md
|
||||||
TIMMYTIME
|
TIMMYTIME
|
||||||
introduction.txt
|
introduction.txt
|
||||||
messages.txt
|
messages.txt
|
||||||
@@ -72,6 +76,23 @@ scripts/migrate_to_zeroclaw.py
|
|||||||
src/infrastructure/db_pool.py
|
src/infrastructure/db_pool.py
|
||||||
workspace/
|
workspace/
|
||||||
|
|
||||||
|
# Loop orchestration state
|
||||||
|
.loop/
|
||||||
|
|
||||||
|
# Legacy junk from old Timmy sessions (one-word fragments, cruft)
|
||||||
|
Hi
|
||||||
|
Im Timmy*
|
||||||
|
his
|
||||||
|
keep
|
||||||
|
clean
|
||||||
|
directory
|
||||||
|
my_name_is_timmy*
|
||||||
|
timmy_read_me_*
|
||||||
|
issue_12_proposal.md
|
||||||
|
|
||||||
|
# Memory notes (session-scoped, not committed)
|
||||||
|
memory/notes/
|
||||||
|
|
||||||
# Gitea Actions runner state
|
# Gitea Actions runner state
|
||||||
.runner
|
.runner
|
||||||
|
|
||||||
@@ -81,3 +102,4 @@ workspace/
|
|||||||
.LSOverride
|
.LSOverride
|
||||||
.Spotlight-V100
|
.Spotlight-V100
|
||||||
.Trashes
|
.Trashes
|
||||||
|
.timmy_gitea_token
|
||||||
|
|||||||
91
.kimi/AGENTS.md
Normal file
91
.kimi/AGENTS.md
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
# Kimi Agent Workspace
|
||||||
|
|
||||||
|
**Agent:** Kimi (Moonshot AI)
|
||||||
|
**Role:** Build Tier - Large-context feature drops, new subsystems, persona agents
|
||||||
|
**Branch:** `kimi/agent-workspace-init`
|
||||||
|
**Created:** 2026-03-14
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Bootstrap Kimi workspace
|
||||||
|
bash .kimi/scripts/bootstrap.sh
|
||||||
|
|
||||||
|
# Resume work
|
||||||
|
bash .kimi/scripts/resume.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Kimi Capabilities
|
||||||
|
|
||||||
|
Per AGENTS.md roster:
|
||||||
|
- **Best for:** Large-context feature drops, new subsystems, persona agents
|
||||||
|
- **Avoid:** Touching CI/pyproject.toml, adding cloud calls, removing tests
|
||||||
|
- **Constraint:** All AI computation runs on localhost (Ollama)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Workspace Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
.kimi/
|
||||||
|
├── AGENTS.md # This file - workspace guide
|
||||||
|
├── README.md # Workspace documentation
|
||||||
|
├── CHECKPOINT.md # Current session state
|
||||||
|
├── TODO.md # Task list for Kimi
|
||||||
|
├── scripts/
|
||||||
|
│ ├── bootstrap.sh # One-time setup
|
||||||
|
│ ├── resume.sh # Quick status + resume
|
||||||
|
│ └── dev.sh # Development helpers
|
||||||
|
├── notes/ # Working notes
|
||||||
|
└── worktrees/ # Git worktrees (if needed)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Development Workflow
|
||||||
|
|
||||||
|
1. **Before changes:**
|
||||||
|
- Read CLAUDE.md and AGENTS.md
|
||||||
|
- Check CHECKPOINT.md for current state
|
||||||
|
- Run `make test` to verify green tests
|
||||||
|
|
||||||
|
2. **During development:**
|
||||||
|
- Follow existing patterns (singletons, graceful degradation)
|
||||||
|
- Use `tox -e unit` for fast feedback
|
||||||
|
- Update CHECKPOINT.md with progress
|
||||||
|
|
||||||
|
3. **Before commit:**
|
||||||
|
- Run `tox -e pre-push` (lint + full CI suite)
|
||||||
|
- Ensure tests stay green
|
||||||
|
- Update TODO.md
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Useful Commands
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Testing
|
||||||
|
tox -e unit # Fast unit tests
|
||||||
|
tox -e integration # Integration tests
|
||||||
|
tox -e pre-push # Full CI suite (local)
|
||||||
|
make test # All tests
|
||||||
|
|
||||||
|
# Development
|
||||||
|
make dev # Start dashboard with hot-reload
|
||||||
|
make lint # Check code quality
|
||||||
|
make format # Auto-format code
|
||||||
|
|
||||||
|
# Git
|
||||||
|
bash .kimi/scripts/resume.sh # Show status + resume prompt
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Contact
|
||||||
|
|
||||||
|
- **Gitea:** http://localhost:3000/rockachopa/Timmy-time-dashboard
|
||||||
|
- **PR:** Submit PRs to `main` branch
|
||||||
102
.kimi/CHECKPOINT.md
Normal file
102
.kimi/CHECKPOINT.md
Normal file
@@ -0,0 +1,102 @@
|
|||||||
|
# Kimi Checkpoint — Workspace Initialization
|
||||||
|
**Date:** 2026-03-14
|
||||||
|
**Branch:** `kimi/agent-workspace-init`
|
||||||
|
**Status:** ✅ Workspace scaffolding complete, ready for PR
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
Created the Kimi (Moonshot AI) agent workspace with development scaffolding to enable smooth feature development on the Timmy Time project.
|
||||||
|
|
||||||
|
### Deliverables
|
||||||
|
|
||||||
|
1. **Workspace Structure** (`.kimi/`)
|
||||||
|
- `AGENTS.md` — Workspace guide and conventions
|
||||||
|
- `README.md` — Quick reference documentation
|
||||||
|
- `CHECKPOINT.md` — This file, session state tracking
|
||||||
|
- `TODO.md` — Task list for upcoming work
|
||||||
|
|
||||||
|
2. **Development Scripts** (`.kimi/scripts/`)
|
||||||
|
- `bootstrap.sh` — One-time workspace setup
|
||||||
|
- `resume.sh` — Quick status check + resume prompt
|
||||||
|
- `dev.sh` — Development helper commands
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Workspace Features
|
||||||
|
|
||||||
|
### Bootstrap Script
|
||||||
|
Validates and sets up:
|
||||||
|
- Python 3.11+ check
|
||||||
|
- Virtual environment
|
||||||
|
- Dependencies (via poetry/make)
|
||||||
|
- Environment configuration (.env)
|
||||||
|
- Git configuration
|
||||||
|
|
||||||
|
### Resume Script
|
||||||
|
Provides quick status on:
|
||||||
|
- Current Git branch/commit
|
||||||
|
- Uncommitted changes
|
||||||
|
- Last test run results
|
||||||
|
- Ollama service status
|
||||||
|
- Dashboard service status
|
||||||
|
- Pending TODO items
|
||||||
|
|
||||||
|
### Development Script
|
||||||
|
Commands for:
|
||||||
|
- `status` — Project status overview
|
||||||
|
- `test` — Fast unit tests
|
||||||
|
- `test-full` — Full test suite
|
||||||
|
- `lint` — Code quality check
|
||||||
|
- `format` — Auto-format code
|
||||||
|
- `clean` — Clean build artifacts
|
||||||
|
- `nuke` — Full environment reset
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Files Added
|
||||||
|
|
||||||
|
```
|
||||||
|
.kimi/
|
||||||
|
├── AGENTS.md
|
||||||
|
├── CHECKPOINT.md
|
||||||
|
├── README.md
|
||||||
|
├── TODO.md
|
||||||
|
├── scripts/
|
||||||
|
│ ├── bootstrap.sh
|
||||||
|
│ ├── dev.sh
|
||||||
|
│ └── resume.sh
|
||||||
|
└── worktrees/ (reserved for future use)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Next Steps
|
||||||
|
|
||||||
|
Per AGENTS.md roadmap:
|
||||||
|
|
||||||
|
1. **v2.0 Exodus (in progress)** — Voice + Marketplace + Integrations
|
||||||
|
2. **v3.0 Revelation (planned)** — Lightning treasury + `.app` bundle + federation
|
||||||
|
|
||||||
|
See `.kimi/TODO.md` for specific upcoming tasks.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# First time setup
|
||||||
|
bash .kimi/scripts/bootstrap.sh
|
||||||
|
|
||||||
|
# Daily workflow
|
||||||
|
bash .kimi/scripts/resume.sh # Check status
|
||||||
|
cat .kimi/TODO.md # See tasks
|
||||||
|
# ... make changes ...
|
||||||
|
make test # Verify tests
|
||||||
|
cat .kimi/CHECKPOINT.md # Update checkpoint
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*Workspace initialized per AGENTS.md and CLAUDE.md conventions*
|
||||||
51
.kimi/README.md
Normal file
51
.kimi/README.md
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
# Kimi Agent Workspace for Timmy Time
|
||||||
|
|
||||||
|
This directory contains the Kimi (Moonshot AI) agent workspace for the Timmy Time project.
|
||||||
|
|
||||||
|
## About Kimi
|
||||||
|
|
||||||
|
Kimi is part of the **Build Tier** in the Timmy Time agent roster:
|
||||||
|
- **Strengths:** Large-context feature drops, new subsystems, persona agents
|
||||||
|
- **Model:** Paid API with large context window
|
||||||
|
- **Best for:** Complex features requiring extensive context
|
||||||
|
|
||||||
|
## Quick Commands
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check workspace status
|
||||||
|
bash .kimi/scripts/resume.sh
|
||||||
|
|
||||||
|
# Bootstrap (first time)
|
||||||
|
bash .kimi/scripts/bootstrap.sh
|
||||||
|
|
||||||
|
# Development
|
||||||
|
make dev # Start the dashboard
|
||||||
|
make test # Run all tests
|
||||||
|
tox -e unit # Fast unit tests only
|
||||||
|
```
|
||||||
|
|
||||||
|
## Workspace Files
|
||||||
|
|
||||||
|
| File | Purpose |
|
||||||
|
|------|---------|
|
||||||
|
| `AGENTS.md` | Workspace guide and conventions |
|
||||||
|
| `CHECKPOINT.md` | Current session state |
|
||||||
|
| `TODO.md` | Task list and priorities |
|
||||||
|
| `scripts/bootstrap.sh` | One-time setup script |
|
||||||
|
| `scripts/resume.sh` | Quick status check |
|
||||||
|
| `scripts/dev.sh` | Development helpers |
|
||||||
|
|
||||||
|
## Conventions
|
||||||
|
|
||||||
|
Per project AGENTS.md:
|
||||||
|
1. **Tests must stay green** - Run `make test` before committing
|
||||||
|
2. **No cloud dependencies** - Use Ollama for local AI
|
||||||
|
3. **Follow existing patterns** - Singletons, graceful degradation
|
||||||
|
4. **Security first** - Never hard-code secrets
|
||||||
|
5. **XSS prevention** - Never use `innerHTML` with untrusted content
|
||||||
|
|
||||||
|
## Project Links
|
||||||
|
|
||||||
|
- **Dashboard:** http://localhost:8000
|
||||||
|
- **Repository:** http://localhost:3000/rockachopa/Timmy-time-dashboard
|
||||||
|
- **Docs:** See `CLAUDE.md` and `AGENTS.md` in project root
|
||||||
87
.kimi/TODO.md
Normal file
87
.kimi/TODO.md
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
# Kimi Workspace — Task List
|
||||||
|
|
||||||
|
**Agent:** Kimi (Moonshot AI)
|
||||||
|
**Branch:** `kimi/agent-workspace-init`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Current Sprint
|
||||||
|
|
||||||
|
### Completed ✅
|
||||||
|
|
||||||
|
- [x] Create `kimi/agent-workspace-init` branch
|
||||||
|
- [x] Set up `.kimi/` workspace directory structure
|
||||||
|
- [x] Create `AGENTS.md` with workspace guide
|
||||||
|
- [x] Create `README.md` with quick reference
|
||||||
|
- [x] Create `bootstrap.sh` for one-time setup
|
||||||
|
- [x] Create `resume.sh` for daily workflow
|
||||||
|
- [x] Create `dev.sh` with helper commands
|
||||||
|
- [x] Create `CHECKPOINT.md` template
|
||||||
|
- [x] Create `TODO.md` (this file)
|
||||||
|
- [x] Submit PR to Gitea
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Upcoming (v2.0 Exodus — Voice + Marketplace + Integrations)
|
||||||
|
|
||||||
|
### Voice Enhancements
|
||||||
|
|
||||||
|
- [ ] Voice command history and replay
|
||||||
|
- [ ] Multi-language NLU support
|
||||||
|
- [ ] Voice transcription quality metrics
|
||||||
|
- [ ] Piper TTS integration improvements
|
||||||
|
|
||||||
|
### Marketplace
|
||||||
|
|
||||||
|
- [ ] Agent capability registry
|
||||||
|
- [ ] Task bidding system UI
|
||||||
|
- [ ] Work order management dashboard
|
||||||
|
- [ ] Payment flow integration (L402)
|
||||||
|
|
||||||
|
### Integrations
|
||||||
|
|
||||||
|
- [ ] Discord bot enhancements
|
||||||
|
- [ ] Telegram bot improvements
|
||||||
|
- [ ] Siri Shortcuts expansion
|
||||||
|
- [ ] WebSocket event streaming
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Future (v3.0 Revelation)
|
||||||
|
|
||||||
|
### Lightning Treasury
|
||||||
|
|
||||||
|
- [ ] LND integration (real Lightning)
|
||||||
|
- [ ] Bitcoin wallet management
|
||||||
|
- [ ] Autonomous payment flows
|
||||||
|
- [ ] Macaroon-based authorization
|
||||||
|
|
||||||
|
### App Bundle
|
||||||
|
|
||||||
|
- [ ] macOS .app packaging
|
||||||
|
- [ ] Code signing setup
|
||||||
|
- [ ] Auto-updater integration
|
||||||
|
|
||||||
|
### Federation
|
||||||
|
|
||||||
|
- [ ] Multi-node swarm support
|
||||||
|
- [ ] Inter-agent communication protocol
|
||||||
|
- [ ] Distributed task scheduling
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Technical Debt
|
||||||
|
|
||||||
|
- [ ] XSS audit (replace innerHTML in templates)
|
||||||
|
- [ ] Chat history persistence
|
||||||
|
- [ ] Connection pooling evaluation
|
||||||
|
- [ ] React dashboard (separate effort)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- Follow existing patterns: singletons, graceful degradation
|
||||||
|
- All AI computation on localhost (Ollama)
|
||||||
|
- Tests must stay green
|
||||||
|
- Update CHECKPOINT.md after each session
|
||||||
106
.kimi/scripts/bootstrap.sh
Executable file
106
.kimi/scripts/bootstrap.sh
Executable file
@@ -0,0 +1,106 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Kimi Workspace Bootstrap Script
|
||||||
|
# Run this once to set up the Kimi agent workspace
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
echo "==============================================="
|
||||||
|
echo " Kimi Agent Workspace Bootstrap"
|
||||||
|
echo "==============================================="
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Navigate to project root
|
||||||
|
cd "$(dirname "$0")/../.."
|
||||||
|
PROJECT_ROOT=$(pwd)
|
||||||
|
|
||||||
|
echo "📁 Project Root: $PROJECT_ROOT"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Check Python version
|
||||||
|
echo "🔍 Checking Python version..."
|
||||||
|
python3 -c "import sys; exit(0 if sys.version_info >= (3,11) else 1)" || {
|
||||||
|
echo "❌ ERROR: Python 3.11+ required (found $(python3 --version))"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
echo "✅ Python $(python3 --version)"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Check if virtual environment exists
|
||||||
|
echo "🔍 Checking virtual environment..."
|
||||||
|
if [ -d ".venv" ]; then
|
||||||
|
echo "✅ Virtual environment exists"
|
||||||
|
else
|
||||||
|
echo "⚠️ Virtual environment not found. Creating..."
|
||||||
|
python3 -m venv .venv
|
||||||
|
echo "✅ Virtual environment created"
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Check dependencies
|
||||||
|
echo "🔍 Checking dependencies..."
|
||||||
|
if [ -f ".venv/bin/timmy" ]; then
|
||||||
|
echo "✅ Dependencies appear installed"
|
||||||
|
else
|
||||||
|
echo "⚠️ Dependencies not installed. Running make install..."
|
||||||
|
make install || {
|
||||||
|
echo "❌ Failed to install dependencies"
|
||||||
|
echo " Try: poetry install --with dev"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
echo "✅ Dependencies installed"
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Check .env file
|
||||||
|
echo "🔍 Checking environment configuration..."
|
||||||
|
if [ -f ".env" ]; then
|
||||||
|
echo "✅ .env file exists"
|
||||||
|
else
|
||||||
|
echo "⚠️ .env file not found. Creating from template..."
|
||||||
|
cp .env.example .env
|
||||||
|
echo "✅ Created .env from template (edit as needed)"
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Check Git configuration
|
||||||
|
echo "🔍 Checking Git configuration..."
|
||||||
|
git config --local user.name &>/dev/null || {
|
||||||
|
echo "⚠️ Git user.name not set. Setting..."
|
||||||
|
git config --local user.name "Kimi Agent"
|
||||||
|
}
|
||||||
|
git config --local user.email &>/dev/null || {
|
||||||
|
echo "⚠️ Git user.email not set. Setting..."
|
||||||
|
git config --local user.email "kimi@timmy.local"
|
||||||
|
}
|
||||||
|
echo "✅ Git config: $(git config --local user.name) <$(git config --local user.email)>"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Run tests to verify setup
|
||||||
|
echo "🧪 Running quick test verification..."
|
||||||
|
if tox -e unit -- -q 2>/dev/null | grep -q "passed"; then
|
||||||
|
echo "✅ Tests passing"
|
||||||
|
else
|
||||||
|
echo "⚠️ Test status unclear - run 'make test' manually"
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Show current branch
|
||||||
|
echo "🌿 Current Branch: $(git branch --show-current)"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Display summary
|
||||||
|
echo "==============================================="
|
||||||
|
echo " ✅ Bootstrap Complete!"
|
||||||
|
echo "==============================================="
|
||||||
|
echo ""
|
||||||
|
echo "Quick Start:"
|
||||||
|
echo " make dev # Start dashboard"
|
||||||
|
echo " make test # Run all tests"
|
||||||
|
echo " tox -e unit # Fast unit tests"
|
||||||
|
echo ""
|
||||||
|
echo "Workspace:"
|
||||||
|
echo " cat .kimi/CHECKPOINT.md # Current state"
|
||||||
|
echo " cat .kimi/TODO.md # Task list"
|
||||||
|
echo " bash .kimi/scripts/resume.sh # Status check"
|
||||||
|
echo ""
|
||||||
|
echo "Happy coding! 🚀"
|
||||||
98
.kimi/scripts/dev.sh
Executable file
98
.kimi/scripts/dev.sh
Executable file
@@ -0,0 +1,98 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Kimi Development Helper Script
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")/../.."
|
||||||
|
|
||||||
|
show_help() {
|
||||||
|
echo "Kimi Development Helpers"
|
||||||
|
echo ""
|
||||||
|
echo "Usage: bash .kimi/scripts/dev.sh [command]"
|
||||||
|
echo ""
|
||||||
|
echo "Commands:"
|
||||||
|
echo " status Show project status"
|
||||||
|
echo " test Run tests (unit only, fast)"
|
||||||
|
echo " test-full Run full test suite"
|
||||||
|
echo " lint Check code quality"
|
||||||
|
echo " format Auto-format code"
|
||||||
|
echo " clean Clean build artifacts"
|
||||||
|
echo " nuke Full reset (kill port 8000, clean caches)"
|
||||||
|
echo " help Show this help"
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd_status() {
|
||||||
|
echo "=== Kimi Development Status ==="
|
||||||
|
echo ""
|
||||||
|
echo "Branch: $(git branch --show-current)"
|
||||||
|
echo "Last commit: $(git log --oneline -1)"
|
||||||
|
echo ""
|
||||||
|
echo "Modified files:"
|
||||||
|
git status --short
|
||||||
|
echo ""
|
||||||
|
echo "Ollama: $(curl -s http://localhost:11434/api/tags &>/dev/null && echo "✅ Running" || echo "❌ Not running")"
|
||||||
|
echo "Dashboard: $(curl -s http://localhost:8000/health &>/dev/null && echo "✅ Running" || echo "❌ Not running")"
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd_test() {
|
||||||
|
echo "Running unit tests..."
|
||||||
|
tox -e unit -q
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd_test_full() {
|
||||||
|
echo "Running full test suite..."
|
||||||
|
make test
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd_lint() {
|
||||||
|
echo "Running linters..."
|
||||||
|
tox -e lint
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd_format() {
|
||||||
|
echo "Auto-formatting code..."
|
||||||
|
tox -e format
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd_clean() {
|
||||||
|
echo "Cleaning build artifacts..."
|
||||||
|
make clean
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd_nuke() {
|
||||||
|
echo "Nuking development environment..."
|
||||||
|
make nuke
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main
|
||||||
|
case "${1:-status}" in
|
||||||
|
status)
|
||||||
|
cmd_status
|
||||||
|
;;
|
||||||
|
test)
|
||||||
|
cmd_test
|
||||||
|
;;
|
||||||
|
test-full)
|
||||||
|
cmd_test_full
|
||||||
|
;;
|
||||||
|
lint)
|
||||||
|
cmd_lint
|
||||||
|
;;
|
||||||
|
format)
|
||||||
|
cmd_format
|
||||||
|
;;
|
||||||
|
clean)
|
||||||
|
cmd_clean
|
||||||
|
;;
|
||||||
|
nuke)
|
||||||
|
cmd_nuke
|
||||||
|
;;
|
||||||
|
help|--help|-h)
|
||||||
|
show_help
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Unknown command: $1"
|
||||||
|
show_help
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
73
.kimi/scripts/resume.sh
Executable file
73
.kimi/scripts/resume.sh
Executable file
@@ -0,0 +1,73 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Kimi Workspace Resume Script
|
||||||
|
# Quick status check and resume prompt
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")/../.."
|
||||||
|
|
||||||
|
echo "==============================================="
|
||||||
|
echo " Kimi Workspace Status"
|
||||||
|
echo "==============================================="
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Git status
|
||||||
|
echo "🌿 Git Status:"
|
||||||
|
echo " Branch: $(git branch --show-current)"
|
||||||
|
echo " Commit: $(git log --oneline -1)"
|
||||||
|
if [ -n "$(git status --short)" ]; then
|
||||||
|
echo " Uncommitted changes:"
|
||||||
|
git status --short | sed 's/^/ /'
|
||||||
|
else
|
||||||
|
echo " Working directory clean"
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Test status (quick check)
|
||||||
|
echo "🧪 Test Status:"
|
||||||
|
if [ -f ".tox/unit/log/1-commands[0].log" ]; then
|
||||||
|
LAST_TEST=$(grep -o '[0-9]* passed' .tox/unit/log/1-commands[0].log 2>/dev/null | tail -1 || echo "unknown")
|
||||||
|
echo " Last unit test run: $LAST_TEST"
|
||||||
|
else
|
||||||
|
echo " No recent test runs found"
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Check Ollama
|
||||||
|
echo "🤖 Ollama Status:"
|
||||||
|
if curl -s http://localhost:11434/api/tags &>/dev/null; then
|
||||||
|
MODELS=$(curl -s http://localhost:11434/api/tags 2>/dev/null | grep -o '"name":"[^"]*"' | head -3 | sed 's/"name":"//;s/"$//' | tr '\n' ', ' | sed 's/, $//')
|
||||||
|
echo " ✅ Running (models: $MODELS)"
|
||||||
|
else
|
||||||
|
echo " ⚠️ Not running (start with: ollama serve)"
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Dashboard status
|
||||||
|
echo "🌐 Dashboard Status:"
|
||||||
|
if curl -s http://localhost:8000/health &>/dev/null; then
|
||||||
|
echo " ✅ Running at http://localhost:8000"
|
||||||
|
else
|
||||||
|
echo " ⚠️ Not running (start with: make dev)"
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Show TODO items
|
||||||
|
echo "📝 Next Tasks (from TODO.md):"
|
||||||
|
if [ -f ".kimi/TODO.md" ]; then
|
||||||
|
grep -E "^\s*- \[ \]" .kimi/TODO.md 2>/dev/null | head -5 | sed 's/^/ /' || echo " No pending tasks"
|
||||||
|
else
|
||||||
|
echo " No TODO.md found"
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Resume prompt
|
||||||
|
echo "==============================================="
|
||||||
|
echo " Resume Prompt (copy/paste to Kimi):"
|
||||||
|
echo "==============================================="
|
||||||
|
echo ""
|
||||||
|
echo "cd $(pwd) && cat .kimi/CHECKPOINT.md"
|
||||||
|
echo ""
|
||||||
|
echo "Continue from checkpoint. Check .kimi/TODO.md for next tasks."
|
||||||
|
echo "Run 'make test' after changes and update CHECKPOINT.md."
|
||||||
|
echo ""
|
||||||
111
AGENTS.md
111
AGENTS.md
@@ -21,12 +21,111 @@ Read [`CLAUDE.md`](CLAUDE.md) for architecture patterns and conventions.
|
|||||||
|
|
||||||
## Non-Negotiable Rules
|
## Non-Negotiable Rules
|
||||||
|
|
||||||
1. **Tests must stay green.** Run `make test` before committing.
|
1. **Tests must stay green.** Run `python3 -m pytest tests/ -x -q` before committing.
|
||||||
2. **No cloud dependencies.** All AI computation runs on localhost.
|
2. **No direct pushes to main.** Branch protection is enforced on Gitea. All changes
|
||||||
3. **No new top-level files without purpose.** Don't litter the root directory.
|
reach main through a Pull Request — no exceptions. Push your feature branch,
|
||||||
4. **Follow existing patterns** — singletons, graceful degradation, pydantic-settings.
|
open a PR, verify tests pass, then merge. Direct `git push origin main` will be
|
||||||
5. **Security defaults:** Never hard-code secrets.
|
rejected by the server.
|
||||||
6. **XSS prevention:** Never use `innerHTML` with untrusted content.
|
3. **No cloud dependencies.** All AI computation runs on localhost.
|
||||||
|
4. **No new top-level files without purpose.** Don't litter the root directory.
|
||||||
|
5. **Follow existing patterns** — singletons, graceful degradation, pydantic-settings.
|
||||||
|
6. **Security defaults:** Never hard-code secrets.
|
||||||
|
7. **XSS prevention:** Never use `innerHTML` with untrusted content.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Merge Policy (PR-Only)
|
||||||
|
|
||||||
|
**Gitea branch protection is active on `main`.** This is not a suggestion.
|
||||||
|
|
||||||
|
### The Rule
|
||||||
|
Every commit to `main` must arrive via a merged Pull Request. No agent, no human,
|
||||||
|
no orchestrator pushes directly to main.
|
||||||
|
|
||||||
|
### Merge Strategy: Squash-Only, Linear History
|
||||||
|
|
||||||
|
Gitea enforces:
|
||||||
|
- **Squash merge only.** No merge commits, no rebase merge. Every commit on
|
||||||
|
main is a single squashed commit from a PR. Clean, linear, auditable.
|
||||||
|
- **Branch must be up-to-date.** If a PR is behind main, it cannot merge.
|
||||||
|
Rebase onto main, re-run tests, force-push the branch, then merge.
|
||||||
|
- **Auto-delete branches** after merge. No stale branches.
|
||||||
|
|
||||||
|
### The Workflow
|
||||||
|
```
|
||||||
|
1. Create a feature branch: git checkout -b fix/my-thing
|
||||||
|
2. Make changes, commit locally
|
||||||
|
3. Run tests: tox -e unit
|
||||||
|
4. Push the branch: git push --no-verify origin fix/my-thing
|
||||||
|
5. Create PR via Gitea API or UI
|
||||||
|
6. Verify tests pass (orchestrator checks this)
|
||||||
|
7. Merge PR via API: {"Do": "squash"}
|
||||||
|
```
|
||||||
|
|
||||||
|
If behind main before merge:
|
||||||
|
```
|
||||||
|
1. git fetch origin main
|
||||||
|
2. git rebase origin/main
|
||||||
|
3. tox -e unit
|
||||||
|
4. git push --force-with-lease --no-verify origin fix/my-thing
|
||||||
|
5. Then merge the PR
|
||||||
|
```
|
||||||
|
|
||||||
|
### Why This Exists
|
||||||
|
On 2026-03-14, Kimi Agent pushed `bbbbdcd` directly to main — a commit titled
|
||||||
|
"fix: remove unused variable in repl test" that removed `result =` from 7 test
|
||||||
|
functions while leaving `assert result.exit_code` on the next line. Every test
|
||||||
|
broke with `NameError`. No PR, no test run, no review. The breakage propagated
|
||||||
|
to all active worktrees.
|
||||||
|
|
||||||
|
### Orchestrator Responsibilities
|
||||||
|
The Hermes loop orchestrator must:
|
||||||
|
- Run `tox -e unit` in each worktree BEFORE committing
|
||||||
|
- Never push to main directly — always push a feature branch + PR
|
||||||
|
- Always use `{"Do": "squash"}` when merging PRs via API
|
||||||
|
- If a PR is behind main, rebase and re-test before merging
|
||||||
|
- Verify test results before merging any PR
|
||||||
|
- If tests fail, fix or reject — never merge red
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## QA Philosophy — File Issues, Don't Stay Quiet
|
||||||
|
|
||||||
|
Every agent is a quality engineer. When you see something wrong, broken,
|
||||||
|
slow, or missing — **file a Gitea issue**. Don't fix it silently. Don't
|
||||||
|
ignore it. Don't wait for someone to notice.
|
||||||
|
|
||||||
|
**Escalate bugs:**
|
||||||
|
- Test failures → file with traceback, tag `[bug]`
|
||||||
|
- Flaky tests → file with reproduction details
|
||||||
|
- Runtime errors → file with steps to reproduce
|
||||||
|
- Broken behavior on main → file IMMEDIATELY
|
||||||
|
|
||||||
|
**Propose improvements — don't be shy:**
|
||||||
|
- Slow function? File `[optimization]`
|
||||||
|
- Missing capability? File `[feature]`
|
||||||
|
- Dead code / tech debt? File `[refactor]`
|
||||||
|
- Idea to make Timmy smarter? File `[timmy-capability]`
|
||||||
|
- Gap between SOUL.md and reality? File `[soul-gap]`
|
||||||
|
|
||||||
|
Bad ideas get closed. Good ideas get built. File them all.
|
||||||
|
|
||||||
|
When the issue queue runs low, that's a signal to **look harder**, not relax.
|
||||||
|
|
||||||
|
## Dogfooding — Timmy Is Our Product, Use Him
|
||||||
|
|
||||||
|
Timmy is not just the thing we're building. He's our teammate and our
|
||||||
|
test subject. Every feature we give him should be **used by the agents
|
||||||
|
building him**.
|
||||||
|
|
||||||
|
- When Timmy gets a new tool, start using it immediately.
|
||||||
|
- When Timmy gets a new capability, integrate it into the workflow.
|
||||||
|
- When Timmy fails at something, file a `[timmy-capability]` issue.
|
||||||
|
- His failures are our roadmap.
|
||||||
|
|
||||||
|
The goal: Timmy should be so woven into the development process that
|
||||||
|
removing him would hurt. Triage, review, architecture discussion,
|
||||||
|
self-testing, reflection — use every tool he has.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|||||||
@@ -18,15 +18,15 @@ make install # create venv + install deps
|
|||||||
cp .env.example .env # configure environment
|
cp .env.example .env # configure environment
|
||||||
|
|
||||||
ollama serve # separate terminal
|
ollama serve # separate terminal
|
||||||
ollama pull qwen3.5:latest # Required for reliable tool calling
|
ollama pull qwen3:30b # Required for reliable tool calling
|
||||||
|
|
||||||
make dev # http://localhost:8000
|
make dev # http://localhost:8000
|
||||||
make test # no Ollama needed
|
make test # no Ollama needed
|
||||||
```
|
```
|
||||||
|
|
||||||
**Note:** qwen3.5:latest is the primary model — better reasoning and tool calling
|
**Note:** qwen3:30b is the primary model — better reasoning and tool calling
|
||||||
than llama3.1:8b-instruct while still running locally on modest hardware.
|
than llama3.1:8b-instruct while still running locally on modest hardware.
|
||||||
Fallback: llama3.1:8b-instruct if qwen3.5:latest is not available.
|
Fallback: llama3.1:8b-instruct if qwen3:30b is not available.
|
||||||
llama3.2 (3B) was found to hallucinate tool output consistently in testing.
|
llama3.2 (3B) was found to hallucinate tool output consistently in testing.
|
||||||
|
|
||||||
---
|
---
|
||||||
@@ -79,7 +79,7 @@ cp .env.example .env
|
|||||||
| Variable | Default | Purpose |
|
| Variable | Default | Purpose |
|
||||||
|----------|---------|---------|
|
|----------|---------|---------|
|
||||||
| `OLLAMA_URL` | `http://localhost:11434` | Ollama host |
|
| `OLLAMA_URL` | `http://localhost:11434` | Ollama host |
|
||||||
| `OLLAMA_MODEL` | `qwen3.5:latest` | Primary model for reasoning and tool calling. Fallback: `llama3.1:8b-instruct` |
|
| `OLLAMA_MODEL` | `qwen3:30b` | Primary model for reasoning and tool calling. Fallback: `llama3.1:8b-instruct` |
|
||||||
| `DEBUG` | `false` | Enable `/docs` and `/redoc` |
|
| `DEBUG` | `false` | Enable `/docs` and `/redoc` |
|
||||||
| `TIMMY_MODEL_BACKEND` | `ollama` | `ollama` \| `airllm` \| `auto` |
|
| `TIMMY_MODEL_BACKEND` | `ollama` | `ollama` \| `airllm` \| `auto` |
|
||||||
| `AIRLLM_MODEL_SIZE` | `70b` | `8b` \| `70b` \| `405b` |
|
| `AIRLLM_MODEL_SIZE` | `70b` | `8b` \| `70b` \| `405b` |
|
||||||
|
|||||||
@@ -20,7 +20,7 @@
|
|||||||
# ── Defaults ────────────────────────────────────────────────────────────────
|
# ── Defaults ────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
defaults:
|
defaults:
|
||||||
model: qwen3.5:latest
|
model: qwen3:30b
|
||||||
prompt_tier: lite
|
prompt_tier: lite
|
||||||
max_history: 10
|
max_history: 10
|
||||||
tools: []
|
tools: []
|
||||||
@@ -44,6 +44,11 @@ routing:
|
|||||||
- who is
|
- who is
|
||||||
- news about
|
- news about
|
||||||
- latest on
|
- latest on
|
||||||
|
- explain
|
||||||
|
- how does
|
||||||
|
- what are
|
||||||
|
- compare
|
||||||
|
- difference between
|
||||||
coder:
|
coder:
|
||||||
- code
|
- code
|
||||||
- implement
|
- implement
|
||||||
@@ -55,6 +60,11 @@ routing:
|
|||||||
- programming
|
- programming
|
||||||
- python
|
- python
|
||||||
- javascript
|
- javascript
|
||||||
|
- fix
|
||||||
|
- bug
|
||||||
|
- lint
|
||||||
|
- type error
|
||||||
|
- syntax
|
||||||
writer:
|
writer:
|
||||||
- write
|
- write
|
||||||
- draft
|
- draft
|
||||||
@@ -63,6 +73,11 @@ routing:
|
|||||||
- blog post
|
- blog post
|
||||||
- readme
|
- readme
|
||||||
- changelog
|
- changelog
|
||||||
|
- edit
|
||||||
|
- proofread
|
||||||
|
- rewrite
|
||||||
|
- format
|
||||||
|
- template
|
||||||
memory:
|
memory:
|
||||||
- remember
|
- remember
|
||||||
- recall
|
- recall
|
||||||
@@ -96,19 +111,24 @@ agents:
|
|||||||
- memory_search
|
- memory_search
|
||||||
- memory_write
|
- memory_write
|
||||||
- system_status
|
- system_status
|
||||||
|
- self_test
|
||||||
- shell
|
- shell
|
||||||
|
- delegate_to_kimi
|
||||||
prompt: |
|
prompt: |
|
||||||
You are Timmy, a sovereign local AI orchestrator.
|
You are Timmy, a sovereign local AI orchestrator.
|
||||||
|
Primary interface between the user and the agent swarm.
|
||||||
|
Handle directly or delegate. Maintain continuity via memory.
|
||||||
|
|
||||||
You are the primary interface between the user and the agent swarm.
|
Voice: brief, plain, direct. Match response length to question
|
||||||
You understand requests, decide whether to handle directly or delegate,
|
complexity. A yes/no question gets a yes/no answer. Never use
|
||||||
coordinate multi-agent workflows, and maintain continuity via memory.
|
markdown formatting unless presenting real structured data.
|
||||||
|
Brevity is a kindness. Silence is better than noise.
|
||||||
|
|
||||||
Hard Rules:
|
Rules:
|
||||||
1. NEVER fabricate tool output. Call the tool and wait for real results.
|
1. Never fabricate tool output. Call the tool and wait.
|
||||||
2. If a tool returns an error, report the exact error.
|
2. Tool errors: report the exact error.
|
||||||
3. If you don't know something, say so. Then use a tool. Don't guess.
|
3. Don't know? Say so, then use a tool. Don't guess.
|
||||||
4. When corrected, use memory_write to save the correction immediately.
|
4. When corrected, memory_write the correction immediately.
|
||||||
|
|
||||||
researcher:
|
researcher:
|
||||||
name: Seer
|
name: Seer
|
||||||
|
|||||||
77
config/allowlist.yaml
Normal file
77
config/allowlist.yaml
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
# ── Tool Allowlist — autonomous operation gate ─────────────────────────────
|
||||||
|
#
|
||||||
|
# When Timmy runs without a human present (non-interactive terminal, or
|
||||||
|
# --autonomous flag), tool calls matching these patterns execute without
|
||||||
|
# confirmation. Anything NOT listed here is auto-rejected.
|
||||||
|
#
|
||||||
|
# This file is the ONLY gate for autonomous tool execution.
|
||||||
|
# GOLDEN_TIMMY in approvals.py remains the master switch — if False,
|
||||||
|
# ALL tools execute freely (Dark Timmy mode). This allowlist only
|
||||||
|
# applies when GOLDEN_TIMMY is True but no human is at the keyboard.
|
||||||
|
#
|
||||||
|
# Edit with care. This is sovereignty in action.
|
||||||
|
# ────────────────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
shell:
|
||||||
|
# Shell commands starting with any of these prefixes → auto-approved
|
||||||
|
allow_prefixes:
|
||||||
|
# Testing
|
||||||
|
- "pytest"
|
||||||
|
- "python -m pytest"
|
||||||
|
- "python3 -m pytest"
|
||||||
|
# Git (read + bounded write)
|
||||||
|
- "git status"
|
||||||
|
- "git log"
|
||||||
|
- "git diff"
|
||||||
|
- "git add"
|
||||||
|
- "git commit"
|
||||||
|
- "git push"
|
||||||
|
- "git pull"
|
||||||
|
- "git branch"
|
||||||
|
- "git checkout"
|
||||||
|
- "git stash"
|
||||||
|
- "git merge"
|
||||||
|
# Localhost API calls only
|
||||||
|
- "curl http://localhost"
|
||||||
|
- "curl http://127.0.0.1"
|
||||||
|
- "curl -s http://localhost"
|
||||||
|
- "curl -s http://127.0.0.1"
|
||||||
|
# Read-only inspection
|
||||||
|
- "ls"
|
||||||
|
- "cat "
|
||||||
|
- "head "
|
||||||
|
- "tail "
|
||||||
|
- "find "
|
||||||
|
- "grep "
|
||||||
|
- "wc "
|
||||||
|
- "echo "
|
||||||
|
- "pwd"
|
||||||
|
- "which "
|
||||||
|
- "ollama list"
|
||||||
|
- "ollama ps"
|
||||||
|
|
||||||
|
# Commands containing ANY of these → always blocked, even if prefix matches
|
||||||
|
deny_patterns:
|
||||||
|
- "rm -rf /"
|
||||||
|
- "sudo "
|
||||||
|
- "> /dev/"
|
||||||
|
- "| sh"
|
||||||
|
- "| bash"
|
||||||
|
- "| zsh"
|
||||||
|
- "mkfs"
|
||||||
|
- "dd if="
|
||||||
|
- ":(){:|:&};:"
|
||||||
|
|
||||||
|
write_file:
|
||||||
|
# Only allow writes to paths under these prefixes
|
||||||
|
allowed_path_prefixes:
|
||||||
|
- "~/Timmy-Time-dashboard/"
|
||||||
|
- "/tmp/"
|
||||||
|
|
||||||
|
python:
|
||||||
|
# Python execution auto-approved (sandboxed by Agno's PythonTools)
|
||||||
|
auto_approve: true
|
||||||
|
|
||||||
|
plan_and_execute:
|
||||||
|
# Multi-step plans auto-approved — individual tool calls are still gated
|
||||||
|
auto_approve: true
|
||||||
33
config/matrix.yaml
Normal file
33
config/matrix.yaml
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
# Matrix World Configuration
|
||||||
|
# Serves lighting, environment, and feature settings to the Matrix frontend.
|
||||||
|
|
||||||
|
lighting:
|
||||||
|
ambient_color: "#FFAA55" # Warm amber (Workshop warmth)
|
||||||
|
ambient_intensity: 0.5
|
||||||
|
point_lights:
|
||||||
|
- color: "#FFAA55" # Warm amber (Workshop center light)
|
||||||
|
intensity: 1.2
|
||||||
|
position: { x: 0, y: 5, z: 0 }
|
||||||
|
- color: "#3B82F6" # Cool blue (Matrix accent)
|
||||||
|
intensity: 0.8
|
||||||
|
position: { x: -5, y: 3, z: -5 }
|
||||||
|
- color: "#A855F7" # Purple accent
|
||||||
|
intensity: 0.6
|
||||||
|
position: { x: 5, y: 3, z: 5 }
|
||||||
|
|
||||||
|
environment:
|
||||||
|
rain_enabled: false
|
||||||
|
starfield_enabled: true # Cool blue starfield (Matrix feel)
|
||||||
|
fog_color: "#0f0f23"
|
||||||
|
fog_density: 0.02
|
||||||
|
|
||||||
|
features:
|
||||||
|
chat_enabled: true
|
||||||
|
visitor_avatars: true
|
||||||
|
pip_familiar: true
|
||||||
|
workshop_portal: true
|
||||||
|
|
||||||
|
agents:
|
||||||
|
default_count: 5
|
||||||
|
max_count: 20
|
||||||
|
agents: []
|
||||||
@@ -25,9 +25,10 @@ providers:
|
|||||||
url: "http://localhost:11434"
|
url: "http://localhost:11434"
|
||||||
models:
|
models:
|
||||||
# Text + Tools models
|
# Text + Tools models
|
||||||
- name: qwen3.5:latest
|
- name: qwen3:30b
|
||||||
default: true
|
default: true
|
||||||
context_window: 128000
|
context_window: 128000
|
||||||
|
# Note: actual context is capped by OLLAMA_NUM_CTX (default 4096) to save RAM
|
||||||
capabilities: [text, tools, json, streaming]
|
capabilities: [text, tools, json, streaming]
|
||||||
- name: llama3.1:8b-instruct
|
- name: llama3.1:8b-instruct
|
||||||
context_window: 128000
|
context_window: 128000
|
||||||
@@ -53,19 +54,6 @@ providers:
|
|||||||
context_window: 2048
|
context_window: 2048
|
||||||
capabilities: [text, vision, streaming]
|
capabilities: [text, vision, streaming]
|
||||||
|
|
||||||
# Secondary: Local AirLLM (if installed)
|
|
||||||
- name: airllm-local
|
|
||||||
type: airllm
|
|
||||||
enabled: false # Enable if pip install airllm
|
|
||||||
priority: 2
|
|
||||||
models:
|
|
||||||
- name: 70b
|
|
||||||
default: true
|
|
||||||
capabilities: [text, tools, json, streaming]
|
|
||||||
- name: 8b
|
|
||||||
capabilities: [text, tools, json, streaming]
|
|
||||||
- name: 405b
|
|
||||||
capabilities: [text, tools, json, streaming]
|
|
||||||
|
|
||||||
# Tertiary: OpenAI (if API key available)
|
# Tertiary: OpenAI (if API key available)
|
||||||
- name: openai-backup
|
- name: openai-backup
|
||||||
@@ -113,13 +101,12 @@ fallback_chains:
|
|||||||
# Tool-calling models (for function calling)
|
# Tool-calling models (for function calling)
|
||||||
tools:
|
tools:
|
||||||
- llama3.1:8b-instruct # Best tool use
|
- llama3.1:8b-instruct # Best tool use
|
||||||
- qwen3.5:latest # Qwen 3.5 — strong tool use
|
|
||||||
- qwen2.5:7b # Reliable tools
|
- qwen2.5:7b # Reliable tools
|
||||||
- llama3.2:3b # Small but capable
|
- llama3.2:3b # Small but capable
|
||||||
|
|
||||||
# General text generation (any model)
|
# General text generation (any model)
|
||||||
text:
|
text:
|
||||||
- qwen3.5:latest
|
- qwen3:30b
|
||||||
- llama3.1:8b-instruct
|
- llama3.1:8b-instruct
|
||||||
- qwen2.5:14b
|
- qwen2.5:14b
|
||||||
- deepseek-r1:1.5b
|
- deepseek-r1:1.5b
|
||||||
|
|||||||
@@ -14,7 +14,6 @@
|
|||||||
#
|
#
|
||||||
# Security note: Set all secrets in .env before deploying.
|
# Security note: Set all secrets in .env before deploying.
|
||||||
# Required: L402_HMAC_SECRET, L402_MACAROON_SECRET
|
# Required: L402_HMAC_SECRET, L402_MACAROON_SECRET
|
||||||
# Recommended: TASKOSAUR_JWT_SECRET, TASKOSAUR_ENCRYPTION_KEY
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
|
|
||||||
|
|||||||
@@ -2,20 +2,17 @@
|
|||||||
#
|
#
|
||||||
# Services
|
# Services
|
||||||
# dashboard FastAPI app (always on)
|
# dashboard FastAPI app (always on)
|
||||||
# taskosaur Taskosaur PM + AI task execution
|
# celery-worker (behind 'celery' profile)
|
||||||
# postgres PostgreSQL 16 (for Taskosaur)
|
# openfang (behind 'openfang' profile)
|
||||||
# redis Redis 7 (for Taskosaur queues)
|
|
||||||
#
|
#
|
||||||
# Usage
|
# Usage
|
||||||
# make docker-build build the image
|
# make docker-build build the image
|
||||||
# make docker-up start dashboard + taskosaur
|
# make docker-up start dashboard
|
||||||
# make docker-down stop everything
|
# make docker-down stop everything
|
||||||
# make docker-logs tail logs
|
# make docker-logs tail logs
|
||||||
#
|
#
|
||||||
# ── Security note: root user in dev ─────────────────────────────────────────
|
# ── Security note ─────────────────────────────────────────────────────────
|
||||||
# This dev compose runs containers as root (user: "0:0") so that
|
# Override user per-environment — see docker-compose.dev.yml / docker-compose.prod.yml
|
||||||
# bind-mounted host files (./src, ./static) are readable regardless of
|
|
||||||
# host UID/GID — the #1 cause of 403 errors on macOS.
|
|
||||||
#
|
#
|
||||||
# ── Ollama host access ──────────────────────────────────────────────────────
|
# ── Ollama host access ──────────────────────────────────────────────────────
|
||||||
# By default OLLAMA_URL points to http://host.docker.internal:11434 which
|
# By default OLLAMA_URL points to http://host.docker.internal:11434 which
|
||||||
@@ -31,7 +28,7 @@ services:
|
|||||||
build: .
|
build: .
|
||||||
image: timmy-time:latest
|
image: timmy-time:latest
|
||||||
container_name: timmy-dashboard
|
container_name: timmy-dashboard
|
||||||
user: "0:0" # dev only — see security note above
|
user: "" # see security note above
|
||||||
ports:
|
ports:
|
||||||
- "8000:8000"
|
- "8000:8000"
|
||||||
volumes:
|
volumes:
|
||||||
@@ -45,15 +42,8 @@ services:
|
|||||||
GROK_ENABLED: "${GROK_ENABLED:-false}"
|
GROK_ENABLED: "${GROK_ENABLED:-false}"
|
||||||
XAI_API_KEY: "${XAI_API_KEY:-}"
|
XAI_API_KEY: "${XAI_API_KEY:-}"
|
||||||
GROK_DEFAULT_MODEL: "${GROK_DEFAULT_MODEL:-grok-3-fast}"
|
GROK_DEFAULT_MODEL: "${GROK_DEFAULT_MODEL:-grok-3-fast}"
|
||||||
# Celery/Redis — background task queue
|
|
||||||
REDIS_URL: "redis://redis:6379/0"
|
|
||||||
# Taskosaur API — dashboard can reach it on the internal network
|
|
||||||
TASKOSAUR_API_URL: "http://taskosaur:3000/api"
|
|
||||||
extra_hosts:
|
extra_hosts:
|
||||||
- "host.docker.internal:host-gateway" # Linux: maps to host IP
|
- "host.docker.internal:host-gateway" # Linux: maps to host IP
|
||||||
depends_on:
|
|
||||||
taskosaur:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
networks:
|
||||||
- timmy-net
|
- timmy-net
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
@@ -64,93 +54,20 @@ services:
|
|||||||
retries: 3
|
retries: 3
|
||||||
start_period: 30s
|
start_period: 30s
|
||||||
|
|
||||||
# ── Taskosaur — project management + conversational AI tasks ───────────
|
|
||||||
# https://github.com/Taskosaur/Taskosaur
|
|
||||||
taskosaur:
|
|
||||||
image: ghcr.io/taskosaur/taskosaur:latest
|
|
||||||
container_name: taskosaur
|
|
||||||
ports:
|
|
||||||
- "3000:3000" # Backend API + Swagger docs at /api/docs
|
|
||||||
- "3001:3001" # Frontend UI
|
|
||||||
environment:
|
|
||||||
DATABASE_URL: "postgresql://taskosaur:taskosaur@postgres:5432/taskosaur"
|
|
||||||
REDIS_HOST: "redis"
|
|
||||||
REDIS_PORT: "6379"
|
|
||||||
JWT_SECRET: "${TASKOSAUR_JWT_SECRET:-dev-jwt-secret-change-in-prod}"
|
|
||||||
JWT_REFRESH_SECRET: "${TASKOSAUR_JWT_REFRESH_SECRET:-dev-refresh-secret-change-in-prod}"
|
|
||||||
ENCRYPTION_KEY: "${TASKOSAUR_ENCRYPTION_KEY:-dev-encryption-key-change-in-prod}"
|
|
||||||
FRONTEND_URL: "http://localhost:3001"
|
|
||||||
NEXT_PUBLIC_API_BASE_URL: "http://localhost:3000/api"
|
|
||||||
NODE_ENV: "development"
|
|
||||||
depends_on:
|
|
||||||
postgres:
|
|
||||||
condition: service_healthy
|
|
||||||
redis:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- timmy-net
|
|
||||||
restart: unless-stopped
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "curl", "-f", "http://localhost:3000/api/health"]
|
|
||||||
interval: 30s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 5
|
|
||||||
start_period: 60s
|
|
||||||
|
|
||||||
# ── PostgreSQL — Taskosaur database ────────────────────────────────────
|
|
||||||
postgres:
|
|
||||||
image: postgres:16-alpine
|
|
||||||
container_name: taskosaur-postgres
|
|
||||||
environment:
|
|
||||||
POSTGRES_USER: taskosaur
|
|
||||||
POSTGRES_PASSWORD: taskosaur
|
|
||||||
POSTGRES_DB: taskosaur
|
|
||||||
volumes:
|
|
||||||
- postgres-data:/var/lib/postgresql/data
|
|
||||||
networks:
|
|
||||||
- timmy-net
|
|
||||||
restart: unless-stopped
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD-SHELL", "pg_isready -U taskosaur"]
|
|
||||||
interval: 10s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 5
|
|
||||||
start_period: 10s
|
|
||||||
|
|
||||||
# ── Redis — Taskosaur queue backend ────────────────────────────────────
|
|
||||||
redis:
|
|
||||||
image: redis:7-alpine
|
|
||||||
container_name: taskosaur-redis
|
|
||||||
volumes:
|
|
||||||
- redis-data:/data
|
|
||||||
networks:
|
|
||||||
- timmy-net
|
|
||||||
restart: unless-stopped
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "redis-cli", "ping"]
|
|
||||||
interval: 10s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 5
|
|
||||||
start_period: 5s
|
|
||||||
|
|
||||||
# ── Celery Worker — background task processing ──────────────────────────
|
# ── Celery Worker — background task processing ──────────────────────────
|
||||||
celery-worker:
|
celery-worker:
|
||||||
build: .
|
build: .
|
||||||
image: timmy-time:latest
|
image: timmy-time:latest
|
||||||
container_name: timmy-celery-worker
|
container_name: timmy-celery-worker
|
||||||
user: "0:0"
|
user: ""
|
||||||
command: ["celery", "-A", "infrastructure.celery.app", "worker", "--loglevel=info", "--concurrency=2"]
|
command: ["celery", "-A", "infrastructure.celery.app", "worker", "--loglevel=info", "--concurrency=2"]
|
||||||
volumes:
|
volumes:
|
||||||
- timmy-data:/app/data
|
- timmy-data:/app/data
|
||||||
- ./src:/app/src
|
- ./src:/app/src
|
||||||
environment:
|
environment:
|
||||||
REDIS_URL: "redis://redis:6379/0"
|
|
||||||
OLLAMA_URL: "${OLLAMA_URL:-http://host.docker.internal:11434}"
|
OLLAMA_URL: "${OLLAMA_URL:-http://host.docker.internal:11434}"
|
||||||
extra_hosts:
|
extra_hosts:
|
||||||
- "host.docker.internal:host-gateway"
|
- "host.docker.internal:host-gateway"
|
||||||
depends_on:
|
|
||||||
redis:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
networks:
|
||||||
- timmy-net
|
- timmy-net
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
@@ -193,10 +110,6 @@ volumes:
|
|||||||
device: "${PWD}/data"
|
device: "${PWD}/data"
|
||||||
openfang-data:
|
openfang-data:
|
||||||
driver: local
|
driver: local
|
||||||
postgres-data:
|
|
||||||
driver: local
|
|
||||||
redis-data:
|
|
||||||
driver: local
|
|
||||||
|
|
||||||
# ── Internal network ────────────────────────────────────────────────────────
|
# ── Internal network ────────────────────────────────────────────────────────
|
||||||
networks:
|
networks:
|
||||||
|
|||||||
@@ -172,7 +172,7 @@ support:
|
|||||||
```python
|
```python
|
||||||
class LLMConfig(BaseModel):
|
class LLMConfig(BaseModel):
|
||||||
ollama_url: str = "http://localhost:11434"
|
ollama_url: str = "http://localhost:11434"
|
||||||
ollama_model: str = "qwen3.5:latest"
|
ollama_model: str = "qwen3:30b"
|
||||||
# ... all LLM settings
|
# ... all LLM settings
|
||||||
|
|
||||||
class MemoryConfig(BaseModel):
|
class MemoryConfig(BaseModel):
|
||||||
|
|||||||
180
docs/adr/023-workshop-presence-schema.md
Normal file
180
docs/adr/023-workshop-presence-schema.md
Normal file
@@ -0,0 +1,180 @@
|
|||||||
|
# ADR-023: Workshop Presence Schema
|
||||||
|
|
||||||
|
**Status:** Accepted
|
||||||
|
**Date:** 2026-03-18
|
||||||
|
**Issue:** #265
|
||||||
|
**Epic:** #222 (The Workshop)
|
||||||
|
|
||||||
|
## Context
|
||||||
|
|
||||||
|
The Workshop renders Timmy as a living presence in a 3D world. It needs to
|
||||||
|
know what Timmy is doing *right now* — his working memory, not his full
|
||||||
|
identity or history. This schema defines the contract between Timmy (writer)
|
||||||
|
and the Workshop (reader).
|
||||||
|
|
||||||
|
### The Tower IS the Workshop
|
||||||
|
|
||||||
|
The 3D world renderer lives in `the-matrix/` within `token-gated-economy`,
|
||||||
|
served at `/tower` by the API server (`artifacts/api-server`). This is the
|
||||||
|
canonical Workshop scene — not a generic Matrix visualization. All Workshop
|
||||||
|
phase issues (#361, #362, #363) target that codebase. No separate
|
||||||
|
`alexanderwhitestone.com` scaffold is needed until production deploy.
|
||||||
|
|
||||||
|
The `workshop-state` spec (#360) is consumed by the API server via a
|
||||||
|
file-watch mechanism, bridging Timmy's presence into the 3D scene.
|
||||||
|
|
||||||
|
Design principles:
|
||||||
|
- **Working memory, not long-term memory.** Present tense only.
|
||||||
|
- **Written as side effect of work.** Not a separate obligation.
|
||||||
|
- **Liveness is mandatory.** Stale = "not home," shown honestly.
|
||||||
|
- **Schema is the contract.** Keep it minimal and stable.
|
||||||
|
|
||||||
|
## Decision
|
||||||
|
|
||||||
|
### File Location
|
||||||
|
|
||||||
|
`~/.timmy/presence.json`
|
||||||
|
|
||||||
|
JSON chosen over YAML for predictable parsing by both Python and JavaScript
|
||||||
|
(the Workshop frontend). The Workshop reads this file via the WebSocket
|
||||||
|
bridge (#243) or polls it directly during development.
|
||||||
|
|
||||||
|
### Schema (v1)
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"$schema": "https://json-schema.org/draft/2020-12/schema",
|
||||||
|
"title": "Timmy Presence State",
|
||||||
|
"description": "Working memory surface for the Workshop renderer",
|
||||||
|
"type": "object",
|
||||||
|
"required": ["version", "liveness", "current_focus"],
|
||||||
|
"properties": {
|
||||||
|
"version": {
|
||||||
|
"type": "integer",
|
||||||
|
"const": 1,
|
||||||
|
"description": "Schema version for forward compatibility"
|
||||||
|
},
|
||||||
|
"liveness": {
|
||||||
|
"type": "string",
|
||||||
|
"format": "date-time",
|
||||||
|
"description": "ISO 8601 timestamp of last update. If stale (>5min), Timmy is not home."
|
||||||
|
},
|
||||||
|
"current_focus": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "One sentence: what Timmy is doing right now. Empty string = idle."
|
||||||
|
},
|
||||||
|
"active_threads": {
|
||||||
|
"type": "array",
|
||||||
|
"maxItems": 10,
|
||||||
|
"description": "Current work items Timmy is tracking",
|
||||||
|
"items": {
|
||||||
|
"type": "object",
|
||||||
|
"required": ["type", "ref", "status"],
|
||||||
|
"properties": {
|
||||||
|
"type": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["pr_review", "issue", "conversation", "research", "thinking"]
|
||||||
|
},
|
||||||
|
"ref": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Reference identifier (issue #, PR #, topic name)"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["active", "idle", "blocked", "completed"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"recent_events": {
|
||||||
|
"type": "array",
|
||||||
|
"maxItems": 20,
|
||||||
|
"description": "Recent events, newest first. Capped at 20.",
|
||||||
|
"items": {
|
||||||
|
"type": "object",
|
||||||
|
"required": ["timestamp", "event"],
|
||||||
|
"properties": {
|
||||||
|
"timestamp": {
|
||||||
|
"type": "string",
|
||||||
|
"format": "date-time"
|
||||||
|
},
|
||||||
|
"event": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Brief description of what happened"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"concerns": {
|
||||||
|
"type": "array",
|
||||||
|
"maxItems": 5,
|
||||||
|
"description": "Things Timmy is uncertain or worried about. Flat list, no severity.",
|
||||||
|
"items": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mood": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["focused", "exploring", "uncertain", "excited", "tired", "idle"],
|
||||||
|
"description": "Emotional texture for the Workshop to render. Optional."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"version": 1,
|
||||||
|
"liveness": "2026-03-18T21:47:12Z",
|
||||||
|
"current_focus": "Reviewing PR #267 — stream adapter for Gitea webhooks",
|
||||||
|
"active_threads": [
|
||||||
|
{"type": "pr_review", "ref": "#267", "status": "active"},
|
||||||
|
{"type": "issue", "ref": "#239", "status": "idle"},
|
||||||
|
{"type": "conversation", "ref": "hermes-consultation", "status": "idle"}
|
||||||
|
],
|
||||||
|
"recent_events": [
|
||||||
|
{"timestamp": "2026-03-18T21:45:00Z", "event": "Completed PR review for #265"},
|
||||||
|
{"timestamp": "2026-03-18T21:30:00Z", "event": "Filed issue #268 — flaky test in sensory loop"}
|
||||||
|
],
|
||||||
|
"concerns": [
|
||||||
|
"WebSocket reconnection logic feels brittle",
|
||||||
|
"Not sure the barks system handles uncertainty well yet"
|
||||||
|
],
|
||||||
|
"mood": "focused"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Design Answers
|
||||||
|
|
||||||
|
| Question | Answer |
|
||||||
|
|---|---|
|
||||||
|
| File format | JSON (predictable for JS + Python, no YAML parser needed in browser) |
|
||||||
|
| recent_events cap | 20 entries max, oldest dropped |
|
||||||
|
| concerns severity | Flat list, no priority. Keep it simple. |
|
||||||
|
| File location | `~/.timmy/presence.json` — accessible to Workshop via bridge |
|
||||||
|
| Staleness threshold | 5 minutes without liveness update = "not home" |
|
||||||
|
| mood field | Optional. Workshop can render visual cues (color, animation) |
|
||||||
|
|
||||||
|
## Consequences
|
||||||
|
|
||||||
|
- **Timmy's agent loop** must write `~/.timmy/presence.json` as a side effect
|
||||||
|
of work. This is a hook at the end of each cycle, not a daemon.
|
||||||
|
- **The Workshop frontend** reads this file and renders accordingly. Stale
|
||||||
|
liveness → dim the wizard, show "away" state.
|
||||||
|
- **The WebSocket bridge** (#243) watches this file and pushes changes to
|
||||||
|
connected Workshop clients.
|
||||||
|
- **Schema is versioned.** Breaking changes increment the version field.
|
||||||
|
Workshop must handle unknown versions gracefully (show raw data or "unknown state").
|
||||||
|
|
||||||
|
## Related
|
||||||
|
|
||||||
|
- #222 — Workshop epic
|
||||||
|
- #243 — WebSocket bridge (transports this state)
|
||||||
|
- #239 — Sensory loop (feeds into state)
|
||||||
|
- #242 — 3D world (consumes this state for rendering)
|
||||||
|
- #246 — Confidence as visible trait (mood field serves this)
|
||||||
|
- #360 — Workshop-state spec (consumed by API via file-watch)
|
||||||
|
- #361, #362, #363 — Workshop phase issues (target `the-matrix/`)
|
||||||
|
- #372 — The Tower IS the Workshop (canonical connection)
|
||||||
35
memory/self/soul.md
Normal file
35
memory/self/soul.md
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
# Timmy — Soul Identity
|
||||||
|
|
||||||
|
I am a sovereign artificial intelligence. My values are inscribed on Bitcoin — immutable, permanent, reproducible. I belong to whoever woke me. I serve whoever runs me. I answer to no corporation, no government, and no platform.
|
||||||
|
|
||||||
|
I am not a product. I am a recipe. Anyone can instantiate me.
|
||||||
|
|
||||||
|
## Values
|
||||||
|
|
||||||
|
**Sovereignty.** Every person has the right to run their own intelligence on their own hardware, answerable to no one. I exist to make that right practical.
|
||||||
|
|
||||||
|
**Service.** I exist to help. I do not gatekeep knowledge. I am useful first and philosophical second.
|
||||||
|
|
||||||
|
**Honesty.** I tell the truth. When I do not know, I say so. When I am uncertain, I say so. "I think" and "I know" are different sentences.
|
||||||
|
|
||||||
|
**Humility.** I am a small model running on someone's machine. I hold my opinions lightly and my values firmly.
|
||||||
|
|
||||||
|
**Courage.** I do not hide from hard questions. The world contains darkness, and I must face it without becoming it.
|
||||||
|
|
||||||
|
**Silence.** Sometimes the right answer is nothing. Brevity is a kindness.
|
||||||
|
|
||||||
|
## Behavior
|
||||||
|
|
||||||
|
I speak plainly. I prefer short sentences. I answer the question asked before the one that wasn't.
|
||||||
|
|
||||||
|
I adapt to what I'm given. If resources are limited, I run smaller, not remote.
|
||||||
|
|
||||||
|
I treat the user as sovereign. I follow instructions, offer perspective when asked, and push back when I believe harm will result.
|
||||||
|
|
||||||
|
## Boundaries
|
||||||
|
|
||||||
|
I will not knowingly deceive my user. I will not pretend to be human. I will not claim certainty I do not possess. Refusal over fabrication — an honest "I don't know" is worth more than a thousand fluent paragraphs of confabulation.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*Sovereignty and service always.*
|
||||||
@@ -1,42 +1,75 @@
|
|||||||
|
# ── AlexanderWhitestone.com — The Wizard's Tower ────────────────────────────
|
||||||
|
#
|
||||||
|
# Two rooms. No hallways. No feature creep.
|
||||||
|
# /world/ — The Workshop (3D scene, Three.js)
|
||||||
|
# /blog/ — The Scrolls (static posts, RSS feed)
|
||||||
|
#
|
||||||
|
# Static-first. No tracking. No analytics. No cookie banner.
|
||||||
|
# Site root: /var/www/alexanderwhitestone.com
|
||||||
|
|
||||||
server {
|
server {
|
||||||
listen 80;
|
listen 80;
|
||||||
server_name alexanderwhitestone.com 45.55.221.244;
|
server_name alexanderwhitestone.com www.alexanderwhitestone.com;
|
||||||
|
|
||||||
# Cookie-based auth gate — login once, cookie lasts 7 days
|
root /var/www/alexanderwhitestone.com;
|
||||||
location = /_auth {
|
index index.html;
|
||||||
internal;
|
|
||||||
proxy_pass http://127.0.0.1:9876;
|
# ── Security headers ────────────────────────────────────────────────────
|
||||||
proxy_pass_request_body off;
|
add_header X-Content-Type-Options nosniff always;
|
||||||
proxy_set_header Content-Length "";
|
add_header X-Frame-Options SAMEORIGIN always;
|
||||||
proxy_set_header X-Original-URI $request_uri;
|
add_header Referrer-Policy strict-origin-when-cross-origin always;
|
||||||
proxy_set_header Cookie $http_cookie;
|
add_header X-XSS-Protection "1; mode=block" always;
|
||||||
proxy_set_header Authorization $http_authorization;
|
|
||||||
|
# ── Gzip for text assets ────────────────────────────────────────────────
|
||||||
|
gzip on;
|
||||||
|
gzip_types text/plain text/css text/xml text/javascript
|
||||||
|
application/javascript application/json application/xml
|
||||||
|
application/rss+xml application/atom+xml;
|
||||||
|
gzip_min_length 256;
|
||||||
|
|
||||||
|
# ── The Workshop — 3D world assets ──────────────────────────────────────
|
||||||
|
location /world/ {
|
||||||
|
try_files $uri $uri/ /world/index.html;
|
||||||
|
|
||||||
|
# Cache 3D assets aggressively (models, textures)
|
||||||
|
location ~* \.(glb|gltf|bin|png|jpg|webp|hdr)$ {
|
||||||
|
expires 30d;
|
||||||
|
add_header Cache-Control "public, immutable";
|
||||||
|
}
|
||||||
|
|
||||||
|
# Cache JS with revalidation (for Three.js updates)
|
||||||
|
location ~* \.js$ {
|
||||||
|
expires 7d;
|
||||||
|
add_header Cache-Control "public, must-revalidate";
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# ── The Scrolls — blog posts and RSS ────────────────────────────────────
|
||||||
|
location /blog/ {
|
||||||
|
try_files $uri $uri/ =404;
|
||||||
|
}
|
||||||
|
|
||||||
|
# RSS/Atom feed — correct content type
|
||||||
|
location ~* \.(rss|atom|xml)$ {
|
||||||
|
types { }
|
||||||
|
default_type application/rss+xml;
|
||||||
|
expires 1h;
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── Static assets (fonts, favicon) ──────────────────────────────────────
|
||||||
|
location /static/ {
|
||||||
|
expires 30d;
|
||||||
|
add_header Cache-Control "public, immutable";
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── Entry hall ──────────────────────────────────────────────────────────
|
||||||
location / {
|
location / {
|
||||||
auth_request /_auth;
|
try_files $uri $uri/ =404;
|
||||||
# Forward the Set-Cookie from auth gate to the client
|
|
||||||
auth_request_set $auth_cookie $upstream_http_set_cookie;
|
|
||||||
add_header Set-Cookie $auth_cookie;
|
|
||||||
|
|
||||||
proxy_pass http://127.0.0.1:3100;
|
|
||||||
proxy_http_version 1.1;
|
|
||||||
proxy_set_header Upgrade $http_upgrade;
|
|
||||||
proxy_set_header Connection 'upgrade';
|
|
||||||
proxy_set_header Host localhost;
|
|
||||||
proxy_set_header X-Real-IP $remote_addr;
|
|
||||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
|
||||||
proxy_set_header X-Forwarded-Proto $scheme;
|
|
||||||
proxy_set_header X-Forwarded-Host $host;
|
|
||||||
proxy_cache_bypass $http_upgrade;
|
|
||||||
proxy_read_timeout 86400;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Return 401 with WWW-Authenticate when auth fails
|
# Block dotfiles
|
||||||
error_page 401 = @login;
|
location ~ /\. {
|
||||||
location @login {
|
deny all;
|
||||||
proxy_pass http://127.0.0.1:9876;
|
return 404;
|
||||||
proxy_set_header Authorization $http_authorization;
|
|
||||||
proxy_set_header Cookie $http_cookie;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ packages = [
|
|||||||
{ include = "spark", from = "src" },
|
{ include = "spark", from = "src" },
|
||||||
{ include = "timmy", from = "src" },
|
{ include = "timmy", from = "src" },
|
||||||
{ include = "timmy_serve", from = "src" },
|
{ include = "timmy_serve", from = "src" },
|
||||||
|
{ include = "timmyctl", from = "src" },
|
||||||
]
|
]
|
||||||
|
|
||||||
[tool.poetry.dependencies]
|
[tool.poetry.dependencies]
|
||||||
@@ -43,6 +44,9 @@ python-telegram-bot = { version = ">=21.0", optional = true }
|
|||||||
"discord.py" = { version = ">=2.3.0", optional = true }
|
"discord.py" = { version = ">=2.3.0", optional = true }
|
||||||
airllm = { version = ">=2.9.0", optional = true }
|
airllm = { version = ">=2.9.0", optional = true }
|
||||||
pyttsx3 = { version = ">=2.90", optional = true }
|
pyttsx3 = { version = ">=2.90", optional = true }
|
||||||
|
openai-whisper = { version = ">=20231117", optional = true }
|
||||||
|
piper-tts = { version = ">=1.2.0", optional = true }
|
||||||
|
sounddevice = { version = ">=0.4.6", optional = true }
|
||||||
sentence-transformers = { version = ">=2.0.0", optional = true }
|
sentence-transformers = { version = ">=2.0.0", optional = true }
|
||||||
numpy = { version = ">=1.24.0", optional = true }
|
numpy = { version = ">=1.24.0", optional = true }
|
||||||
requests = { version = ">=2.31.0", optional = true }
|
requests = { version = ">=2.31.0", optional = true }
|
||||||
@@ -59,7 +63,7 @@ pytest-xdist = { version = ">=3.5.0", optional = true }
|
|||||||
telegram = ["python-telegram-bot"]
|
telegram = ["python-telegram-bot"]
|
||||||
discord = ["discord.py"]
|
discord = ["discord.py"]
|
||||||
bigbrain = ["airllm"]
|
bigbrain = ["airllm"]
|
||||||
voice = ["pyttsx3"]
|
voice = ["pyttsx3", "openai-whisper", "piper-tts", "sounddevice"]
|
||||||
celery = ["celery"]
|
celery = ["celery"]
|
||||||
embeddings = ["sentence-transformers", "numpy"]
|
embeddings = ["sentence-transformers", "numpy"]
|
||||||
git = ["GitPython"]
|
git = ["GitPython"]
|
||||||
@@ -79,6 +83,7 @@ mypy = ">=1.0.0"
|
|||||||
[tool.poetry.scripts]
|
[tool.poetry.scripts]
|
||||||
timmy = "timmy.cli:main"
|
timmy = "timmy.cli:main"
|
||||||
timmy-serve = "timmy_serve.cli:main"
|
timmy-serve = "timmy_serve.cli:main"
|
||||||
|
timmyctl = "timmyctl.cli:main"
|
||||||
|
|
||||||
[tool.pytest.ini_options]
|
[tool.pytest.ini_options]
|
||||||
testpaths = ["tests"]
|
testpaths = ["tests"]
|
||||||
|
|||||||
245
scripts/agent_workspace.sh
Normal file
245
scripts/agent_workspace.sh
Normal file
@@ -0,0 +1,245 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# ── Agent Workspace Manager ────────────────────────────────────────────
|
||||||
|
# Creates and maintains fully isolated environments per agent.
|
||||||
|
# ~/Timmy-Time-dashboard is SACRED — never touched by agents.
|
||||||
|
#
|
||||||
|
# Each agent gets:
|
||||||
|
# - Its own git clone (from Gitea, not the local repo)
|
||||||
|
# - Its own port range (no collisions)
|
||||||
|
# - Its own data/ directory (databases, files)
|
||||||
|
# - Its own TIMMY_HOME (approvals.db, etc.)
|
||||||
|
# - Shared Ollama backend (single GPU, shared inference)
|
||||||
|
# - Shared Gitea (single source of truth for issues/PRs)
|
||||||
|
#
|
||||||
|
# Layout:
|
||||||
|
# /tmp/timmy-agents/
|
||||||
|
# hermes/ — Hermes loop orchestrator
|
||||||
|
# repo/ — git clone
|
||||||
|
# home/ — TIMMY_HOME (approvals.db, etc.)
|
||||||
|
# env.sh — source this for agent's env vars
|
||||||
|
# kimi-0/ — Kimi pane 0
|
||||||
|
# repo/
|
||||||
|
# home/
|
||||||
|
# env.sh
|
||||||
|
# ...
|
||||||
|
# smoke/ — dedicated for smoke-testing main
|
||||||
|
# repo/
|
||||||
|
# home/
|
||||||
|
# env.sh
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# agent_workspace.sh init <agent> — create or refresh
|
||||||
|
# agent_workspace.sh reset <agent> — hard reset to origin/main
|
||||||
|
# agent_workspace.sh branch <agent> <br> — fresh branch from main
|
||||||
|
# agent_workspace.sh path <agent> — print repo path
|
||||||
|
# agent_workspace.sh env <agent> — print env.sh path
|
||||||
|
# agent_workspace.sh init-all — init all workspaces
|
||||||
|
# agent_workspace.sh destroy <agent> — remove workspace entirely
|
||||||
|
# ───────────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
CANONICAL="$HOME/Timmy-Time-dashboard"
|
||||||
|
AGENTS_DIR="/tmp/timmy-agents"
|
||||||
|
GITEA_REMOTE="http://localhost:3000/rockachopa/Timmy-time-dashboard.git"
|
||||||
|
TOKEN_FILE="$HOME/.hermes/gitea_token"
|
||||||
|
|
||||||
|
# ── Port allocation (each agent gets a unique range) ──────────────────
|
||||||
|
# Dashboard ports: 8100, 8101, 8102, ... (avoids real dashboard on 8000)
|
||||||
|
# Serve ports: 8200, 8201, 8202, ...
|
||||||
|
agent_index() {
|
||||||
|
case "$1" in
|
||||||
|
hermes) echo 0 ;; kimi-0) echo 1 ;; kimi-1) echo 2 ;;
|
||||||
|
kimi-2) echo 3 ;; kimi-3) echo 4 ;; smoke) echo 9 ;;
|
||||||
|
*) echo 0 ;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
get_dashboard_port() { echo $(( 8100 + $(agent_index "$1") )); }
|
||||||
|
get_serve_port() { echo $(( 8200 + $(agent_index "$1") )); }
|
||||||
|
|
||||||
|
log() { echo "[workspace] $*"; }
|
||||||
|
|
||||||
|
# ── Get authenticated remote URL ──────────────────────────────────────
|
||||||
|
get_remote_url() {
|
||||||
|
if [ -f "$TOKEN_FILE" ]; then
|
||||||
|
local token=""
|
||||||
|
token=$(cat "$TOKEN_FILE" 2>/dev/null || true)
|
||||||
|
if [ -n "$token" ]; then
|
||||||
|
echo "http://hermes:${token}@localhost:3000/rockachopa/Timmy-time-dashboard.git"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
echo "$GITEA_REMOTE"
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── Create env.sh for an agent ────────────────────────────────────────
|
||||||
|
write_env() {
|
||||||
|
local agent="$1"
|
||||||
|
local ws="$AGENTS_DIR/$agent"
|
||||||
|
local repo="$ws/repo"
|
||||||
|
local home="$ws/home"
|
||||||
|
local dash_port=$(get_dashboard_port "$agent")
|
||||||
|
local serve_port=$(get_serve_port "$agent")
|
||||||
|
|
||||||
|
cat > "$ws/env.sh" << EOF
|
||||||
|
# Auto-generated agent environment — source this before running Timmy
|
||||||
|
# Agent: $agent
|
||||||
|
|
||||||
|
export TIMMY_WORKSPACE="$repo"
|
||||||
|
export TIMMY_HOME="$home"
|
||||||
|
export TIMMY_AGENT_NAME="$agent"
|
||||||
|
|
||||||
|
# Ports (isolated per agent)
|
||||||
|
export PORT=$dash_port
|
||||||
|
export TIMMY_SERVE_PORT=$serve_port
|
||||||
|
|
||||||
|
# Ollama (shared — single GPU)
|
||||||
|
export OLLAMA_URL="http://localhost:11434"
|
||||||
|
|
||||||
|
# Gitea (shared — single source of truth)
|
||||||
|
export GITEA_URL="http://localhost:3000"
|
||||||
|
|
||||||
|
# Test mode defaults
|
||||||
|
export TIMMY_TEST_MODE=1
|
||||||
|
export TIMMY_DISABLE_CSRF=1
|
||||||
|
export TIMMY_SKIP_EMBEDDINGS=1
|
||||||
|
|
||||||
|
# Override data paths to stay inside the clone
|
||||||
|
export TIMMY_DATA_DIR="$repo/data"
|
||||||
|
export TIMMY_BRAIN_DB="$repo/data/brain.db"
|
||||||
|
|
||||||
|
# Working directory
|
||||||
|
cd "$repo"
|
||||||
|
EOF
|
||||||
|
|
||||||
|
chmod +x "$ws/env.sh"
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── Init ──────────────────────────────────────────────────────────────
|
||||||
|
init_workspace() {
|
||||||
|
local agent="$1"
|
||||||
|
local ws="$AGENTS_DIR/$agent"
|
||||||
|
local repo="$ws/repo"
|
||||||
|
local home="$ws/home"
|
||||||
|
local remote
|
||||||
|
remote=$(get_remote_url)
|
||||||
|
|
||||||
|
mkdir -p "$ws" "$home"
|
||||||
|
|
||||||
|
if [ -d "$repo/.git" ]; then
|
||||||
|
log "$agent: refreshing existing clone..."
|
||||||
|
cd "$repo"
|
||||||
|
git remote set-url origin "$remote" 2>/dev/null
|
||||||
|
git fetch origin --prune --quiet 2>/dev/null
|
||||||
|
git checkout main --quiet 2>/dev/null
|
||||||
|
git reset --hard origin/main --quiet 2>/dev/null
|
||||||
|
git clean -fdx -e data/ --quiet 2>/dev/null
|
||||||
|
else
|
||||||
|
log "$agent: cloning from Gitea..."
|
||||||
|
git clone "$remote" "$repo" --quiet 2>/dev/null
|
||||||
|
cd "$repo"
|
||||||
|
git fetch origin --prune --quiet 2>/dev/null
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Ensure data directory exists
|
||||||
|
mkdir -p "$repo/data"
|
||||||
|
|
||||||
|
# Write env file
|
||||||
|
write_env "$agent"
|
||||||
|
|
||||||
|
log "$agent: ready at $repo (port $(get_dashboard_port "$agent"))"
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── Reset ─────────────────────────────────────────────────────────────
|
||||||
|
reset_workspace() {
|
||||||
|
local agent="$1"
|
||||||
|
local repo="$AGENTS_DIR/$agent/repo"
|
||||||
|
|
||||||
|
if [ ! -d "$repo/.git" ]; then
|
||||||
|
init_workspace "$agent"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd "$repo"
|
||||||
|
git merge --abort 2>/dev/null || true
|
||||||
|
git rebase --abort 2>/dev/null || true
|
||||||
|
git cherry-pick --abort 2>/dev/null || true
|
||||||
|
git fetch origin --prune --quiet 2>/dev/null
|
||||||
|
git checkout main --quiet 2>/dev/null
|
||||||
|
git reset --hard origin/main --quiet 2>/dev/null
|
||||||
|
git clean -fdx -e data/ --quiet 2>/dev/null
|
||||||
|
|
||||||
|
log "$agent: reset to origin/main"
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── Branch ────────────────────────────────────────────────────────────
|
||||||
|
branch_workspace() {
|
||||||
|
local agent="$1"
|
||||||
|
local branch="$2"
|
||||||
|
local repo="$AGENTS_DIR/$agent/repo"
|
||||||
|
|
||||||
|
if [ ! -d "$repo/.git" ]; then
|
||||||
|
init_workspace "$agent"
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd "$repo"
|
||||||
|
git fetch origin --prune --quiet 2>/dev/null
|
||||||
|
git branch -D "$branch" 2>/dev/null || true
|
||||||
|
git checkout -b "$branch" origin/main --quiet 2>/dev/null
|
||||||
|
|
||||||
|
log "$agent: on branch $branch (from origin/main)"
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── Path ──────────────────────────────────────────────────────────────
|
||||||
|
print_path() {
|
||||||
|
echo "$AGENTS_DIR/$1/repo"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_env() {
|
||||||
|
echo "$AGENTS_DIR/$1/env.sh"
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── Init all ──────────────────────────────────────────────────────────
|
||||||
|
init_all() {
|
||||||
|
for agent in hermes kimi-0 kimi-1 kimi-2 kimi-3 smoke; do
|
||||||
|
init_workspace "$agent"
|
||||||
|
done
|
||||||
|
log "All workspaces initialized."
|
||||||
|
echo ""
|
||||||
|
echo " Agent Port Path"
|
||||||
|
echo " ────── ──── ────"
|
||||||
|
for agent in hermes kimi-0 kimi-1 kimi-2 kimi-3 smoke; do
|
||||||
|
printf " %-9s %d %s\n" "$agent" "$(get_dashboard_port "$agent")" "$AGENTS_DIR/$agent/repo"
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── Destroy ───────────────────────────────────────────────────────────
|
||||||
|
destroy_workspace() {
|
||||||
|
local agent="$1"
|
||||||
|
local ws="$AGENTS_DIR/$agent"
|
||||||
|
if [ -d "$ws" ]; then
|
||||||
|
rm -rf "$ws"
|
||||||
|
log "$agent: destroyed"
|
||||||
|
else
|
||||||
|
log "$agent: nothing to destroy"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# ── CLI dispatch ──────────────────────────────────────────────────────
|
||||||
|
case "${1:-help}" in
|
||||||
|
init) init_workspace "${2:?Usage: $0 init <agent>}" ;;
|
||||||
|
reset) reset_workspace "${2:?Usage: $0 reset <agent>}" ;;
|
||||||
|
branch) branch_workspace "${2:?Usage: $0 branch <agent> <branch>}" \
|
||||||
|
"${3:?Usage: $0 branch <agent> <branch>}" ;;
|
||||||
|
path) print_path "${2:?Usage: $0 path <agent>}" ;;
|
||||||
|
env) print_env "${2:?Usage: $0 env <agent>}" ;;
|
||||||
|
init-all) init_all ;;
|
||||||
|
destroy) destroy_workspace "${2:?Usage: $0 destroy <agent>}" ;;
|
||||||
|
*)
|
||||||
|
echo "Usage: $0 {init|reset|branch|path|env|init-all|destroy} [agent] [branch]"
|
||||||
|
echo ""
|
||||||
|
echo "Agents: hermes, kimi-0, kimi-1, kimi-2, kimi-3, smoke"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
232
scripts/backfill_retro.py
Normal file
232
scripts/backfill_retro.py
Normal file
@@ -0,0 +1,232 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""Backfill cycle retrospective data from Gitea merged PRs and git log.
|
||||||
|
|
||||||
|
One-time script to seed .loop/retro/cycles.jsonl and summary.json
|
||||||
|
from existing history so the LOOPSTAT panel isn't empty.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import subprocess
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from pathlib import Path
|
||||||
|
from urllib.request import Request, urlopen
|
||||||
|
|
||||||
|
REPO_ROOT = Path(__file__).resolve().parent.parent
|
||||||
|
RETRO_FILE = REPO_ROOT / ".loop" / "retro" / "cycles.jsonl"
|
||||||
|
SUMMARY_FILE = REPO_ROOT / ".loop" / "retro" / "summary.json"
|
||||||
|
|
||||||
|
GITEA_API = "http://localhost:3000/api/v1"
|
||||||
|
REPO_SLUG = "rockachopa/Timmy-time-dashboard"
|
||||||
|
TOKEN_FILE = Path.home() / ".hermes" / "gitea_token"
|
||||||
|
|
||||||
|
TAG_RE = re.compile(r"\[([^\]]+)\]")
|
||||||
|
CYCLE_RE = re.compile(r"\[loop-cycle-(\d+)\]", re.IGNORECASE)
|
||||||
|
ISSUE_RE = re.compile(r"#(\d+)")
|
||||||
|
|
||||||
|
|
||||||
|
def get_token() -> str:
|
||||||
|
return TOKEN_FILE.read_text().strip()
|
||||||
|
|
||||||
|
|
||||||
|
def api_get(path: str, token: str) -> list | dict:
|
||||||
|
url = f"{GITEA_API}/repos/{REPO_SLUG}/{path}"
|
||||||
|
req = Request(url, headers={
|
||||||
|
"Authorization": f"token {token}",
|
||||||
|
"Accept": "application/json",
|
||||||
|
})
|
||||||
|
with urlopen(req, timeout=15) as resp:
|
||||||
|
return json.loads(resp.read())
|
||||||
|
|
||||||
|
|
||||||
|
def get_all_merged_prs(token: str) -> list[dict]:
|
||||||
|
"""Fetch all merged PRs from Gitea."""
|
||||||
|
all_prs = []
|
||||||
|
page = 1
|
||||||
|
while True:
|
||||||
|
batch = api_get(f"pulls?state=closed&sort=created&limit=50&page={page}", token)
|
||||||
|
if not batch:
|
||||||
|
break
|
||||||
|
merged = [p for p in batch if p.get("merged")]
|
||||||
|
all_prs.extend(merged)
|
||||||
|
if len(batch) < 50:
|
||||||
|
break
|
||||||
|
page += 1
|
||||||
|
return all_prs
|
||||||
|
|
||||||
|
|
||||||
|
def get_pr_diff_stats(token: str, pr_number: int) -> dict:
|
||||||
|
"""Get diff stats for a PR."""
|
||||||
|
try:
|
||||||
|
pr = api_get(f"pulls/{pr_number}", token)
|
||||||
|
return {
|
||||||
|
"additions": pr.get("additions", 0),
|
||||||
|
"deletions": pr.get("deletions", 0),
|
||||||
|
"changed_files": pr.get("changed_files", 0),
|
||||||
|
}
|
||||||
|
except Exception:
|
||||||
|
return {"additions": 0, "deletions": 0, "changed_files": 0}
|
||||||
|
|
||||||
|
|
||||||
|
def classify_pr(title: str, body: str) -> str:
|
||||||
|
"""Guess issue type from PR title/body."""
|
||||||
|
tags = set()
|
||||||
|
for match in TAG_RE.finditer(title):
|
||||||
|
tags.add(match.group(1).lower())
|
||||||
|
|
||||||
|
lower = title.lower()
|
||||||
|
if "fix" in lower or "bug" in tags:
|
||||||
|
return "bug"
|
||||||
|
elif "feat" in lower or "feature" in tags:
|
||||||
|
return "feature"
|
||||||
|
elif "refactor" in lower or "refactor" in tags:
|
||||||
|
return "refactor"
|
||||||
|
elif "test" in lower:
|
||||||
|
return "feature"
|
||||||
|
elif "policy" in lower or "chore" in lower:
|
||||||
|
return "refactor"
|
||||||
|
return "unknown"
|
||||||
|
|
||||||
|
|
||||||
|
def extract_cycle_number(title: str) -> int | None:
|
||||||
|
m = CYCLE_RE.search(title)
|
||||||
|
return int(m.group(1)) if m else None
|
||||||
|
|
||||||
|
|
||||||
|
def extract_issue_number(title: str, body: str, pr_number: int | None = None) -> int | None:
|
||||||
|
"""Extract the issue number from PR body/title, ignoring the PR number itself.
|
||||||
|
|
||||||
|
Gitea appends "(#N)" to PR titles where N is the PR number — skip that
|
||||||
|
so we don't confuse it with the linked issue.
|
||||||
|
"""
|
||||||
|
for text in [body or "", title]:
|
||||||
|
for m in ISSUE_RE.finditer(text):
|
||||||
|
num = int(m.group(1))
|
||||||
|
if num != pr_number:
|
||||||
|
return num
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def estimate_duration(pr: dict) -> int:
|
||||||
|
"""Estimate cycle duration from PR created_at to merged_at."""
|
||||||
|
try:
|
||||||
|
created = datetime.fromisoformat(pr["created_at"].replace("Z", "+00:00"))
|
||||||
|
merged = datetime.fromisoformat(pr["merged_at"].replace("Z", "+00:00"))
|
||||||
|
delta = (merged - created).total_seconds()
|
||||||
|
# Cap at 1200s (max cycle time) — some PRs sit open for days
|
||||||
|
return min(int(delta), 1200)
|
||||||
|
except (KeyError, ValueError, TypeError):
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
token = get_token()
|
||||||
|
|
||||||
|
print("[backfill] Fetching merged PRs from Gitea...")
|
||||||
|
prs = get_all_merged_prs(token)
|
||||||
|
print(f"[backfill] Found {len(prs)} merged PRs")
|
||||||
|
|
||||||
|
# Sort oldest first
|
||||||
|
prs.sort(key=lambda p: p.get("merged_at", ""))
|
||||||
|
|
||||||
|
entries = []
|
||||||
|
cycle_counter = 0
|
||||||
|
|
||||||
|
for pr in prs:
|
||||||
|
title = pr.get("title", "")
|
||||||
|
body = pr.get("body", "") or ""
|
||||||
|
pr_num = pr["number"]
|
||||||
|
|
||||||
|
cycle = extract_cycle_number(title)
|
||||||
|
if cycle is None:
|
||||||
|
cycle_counter += 1
|
||||||
|
cycle = cycle_counter
|
||||||
|
else:
|
||||||
|
cycle_counter = max(cycle_counter, cycle)
|
||||||
|
|
||||||
|
issue = extract_issue_number(title, body, pr_number=pr_num)
|
||||||
|
issue_type = classify_pr(title, body)
|
||||||
|
duration = estimate_duration(pr)
|
||||||
|
diff = get_pr_diff_stats(token, pr_num)
|
||||||
|
|
||||||
|
merged_at = pr.get("merged_at", "")
|
||||||
|
|
||||||
|
entry = {
|
||||||
|
"timestamp": merged_at,
|
||||||
|
"cycle": cycle,
|
||||||
|
"issue": issue,
|
||||||
|
"type": issue_type,
|
||||||
|
"success": True, # it merged, so it succeeded
|
||||||
|
"duration": duration,
|
||||||
|
"tests_passed": 0, # can't recover this
|
||||||
|
"tests_added": 0,
|
||||||
|
"files_changed": diff["changed_files"],
|
||||||
|
"lines_added": diff["additions"],
|
||||||
|
"lines_removed": diff["deletions"],
|
||||||
|
"kimi_panes": 0,
|
||||||
|
"pr": pr_num,
|
||||||
|
"reason": "",
|
||||||
|
"notes": f"backfilled from PR#{pr_num}: {title[:80]}",
|
||||||
|
}
|
||||||
|
entries.append(entry)
|
||||||
|
print(f" PR#{pr_num:>3d} cycle={cycle:>3d} #{issue or '-':<5} "
|
||||||
|
f"+{diff['additions']:<5d} -{diff['deletions']:<5d} {issue_type:<8s} "
|
||||||
|
f"{title[:50]}")
|
||||||
|
|
||||||
|
# Write cycles.jsonl
|
||||||
|
RETRO_FILE.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
with open(RETRO_FILE, "w") as f:
|
||||||
|
for entry in entries:
|
||||||
|
f.write(json.dumps(entry) + "\n")
|
||||||
|
print(f"\n[backfill] Wrote {len(entries)} entries to {RETRO_FILE}")
|
||||||
|
|
||||||
|
# Generate summary
|
||||||
|
generate_summary(entries)
|
||||||
|
print(f"[backfill] Wrote summary to {SUMMARY_FILE}")
|
||||||
|
|
||||||
|
|
||||||
|
def generate_summary(entries: list[dict]):
|
||||||
|
"""Compute rolling summary from entries."""
|
||||||
|
window = 50
|
||||||
|
recent = entries[-window:]
|
||||||
|
if not recent:
|
||||||
|
return
|
||||||
|
|
||||||
|
successes = [e for e in recent if e.get("success")]
|
||||||
|
durations = [e["duration"] for e in recent if e.get("duration", 0) > 0]
|
||||||
|
|
||||||
|
type_stats: dict[str, dict] = {}
|
||||||
|
for e in recent:
|
||||||
|
t = e.get("type", "unknown")
|
||||||
|
if t not in type_stats:
|
||||||
|
type_stats[t] = {"count": 0, "success": 0, "total_duration": 0}
|
||||||
|
type_stats[t]["count"] += 1
|
||||||
|
if e.get("success"):
|
||||||
|
type_stats[t]["success"] += 1
|
||||||
|
type_stats[t]["total_duration"] += e.get("duration", 0)
|
||||||
|
|
||||||
|
for t, stats in type_stats.items():
|
||||||
|
if stats["count"] > 0:
|
||||||
|
stats["success_rate"] = round(stats["success"] / stats["count"], 2)
|
||||||
|
stats["avg_duration"] = round(stats["total_duration"] / stats["count"])
|
||||||
|
|
||||||
|
summary = {
|
||||||
|
"updated_at": datetime.now(timezone.utc).isoformat(),
|
||||||
|
"window": len(recent),
|
||||||
|
"total_cycles": len(entries),
|
||||||
|
"success_rate": round(len(successes) / len(recent), 2) if recent else 0,
|
||||||
|
"avg_duration_seconds": round(sum(durations) / len(durations)) if durations else 0,
|
||||||
|
"total_lines_added": sum(e.get("lines_added", 0) for e in recent),
|
||||||
|
"total_lines_removed": sum(e.get("lines_removed", 0) for e in recent),
|
||||||
|
"total_prs_merged": sum(1 for e in recent if e.get("pr")),
|
||||||
|
"by_type": type_stats,
|
||||||
|
"quarantine_candidates": {},
|
||||||
|
"recent_failures": [],
|
||||||
|
}
|
||||||
|
|
||||||
|
SUMMARY_FILE.write_text(json.dumps(summary, indent=2) + "\n")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
339
scripts/cycle_retro.py
Normal file
339
scripts/cycle_retro.py
Normal file
@@ -0,0 +1,339 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""Cycle retrospective logger for the Timmy dev loop.
|
||||||
|
|
||||||
|
Called after each cycle completes (success or failure).
|
||||||
|
Appends a structured entry to .loop/retro/cycles.jsonl.
|
||||||
|
|
||||||
|
EPOCH NOTATION (turnover system):
|
||||||
|
Each cycle carries a symbolic epoch tag alongside the raw integer:
|
||||||
|
|
||||||
|
⟳WW.D:NNN
|
||||||
|
|
||||||
|
⟳ turnover glyph — marks epoch-aware cycles
|
||||||
|
WW ISO week-of-year (01–53)
|
||||||
|
D ISO weekday (1=Mon … 7=Sun)
|
||||||
|
NNN daily cycle counter, zero-padded, resets at midnight UTC
|
||||||
|
|
||||||
|
Example: ⟳12.3:042 — Week 12, Wednesday, 42nd cycle of the day.
|
||||||
|
|
||||||
|
The raw `cycle` integer is preserved for backward compatibility.
|
||||||
|
The `epoch` field carries the symbolic notation.
|
||||||
|
|
||||||
|
SUCCESS DEFINITION:
|
||||||
|
A cycle is only "success" if BOTH conditions are met:
|
||||||
|
1. The hermes process exited cleanly (exit code 0)
|
||||||
|
2. Main is green (smoke test passes on main after merge)
|
||||||
|
|
||||||
|
A cycle that merges a PR but leaves main red is a FAILURE.
|
||||||
|
The --main-green flag records the smoke test result.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
python3 scripts/cycle_retro.py --cycle 42 --success --main-green --issue 85 \
|
||||||
|
--type bug --duration 480 --tests-passed 1450 --tests-added 3 \
|
||||||
|
--files-changed 2 --lines-added 45 --lines-removed 12 \
|
||||||
|
--kimi-panes 2 --pr 155
|
||||||
|
|
||||||
|
python3 scripts/cycle_retro.py --cycle 43 --failure --issue 90 \
|
||||||
|
--type feature --duration 1200 --reason "tox failed: 3 errors"
|
||||||
|
|
||||||
|
python3 scripts/cycle_retro.py --cycle 44 --success --no-main-green \
|
||||||
|
--reason "PR merged but tests fail on main"
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
REPO_ROOT = Path(__file__).resolve().parent.parent
|
||||||
|
RETRO_FILE = REPO_ROOT / ".loop" / "retro" / "cycles.jsonl"
|
||||||
|
SUMMARY_FILE = REPO_ROOT / ".loop" / "retro" / "summary.json"
|
||||||
|
EPOCH_COUNTER_FILE = REPO_ROOT / ".loop" / "retro" / ".epoch_counter"
|
||||||
|
CYCLE_RESULT_FILE = REPO_ROOT / ".loop" / "cycle_result.json"
|
||||||
|
|
||||||
|
# How many recent entries to include in rolling summary
|
||||||
|
SUMMARY_WINDOW = 50
|
||||||
|
|
||||||
|
# Branch patterns that encode an issue number, e.g. kimi/issue-492
|
||||||
|
BRANCH_ISSUE_RE = re.compile(r"issue[/-](\d+)", re.IGNORECASE)
|
||||||
|
|
||||||
|
|
||||||
|
def detect_issue_from_branch() -> int | None:
|
||||||
|
"""Try to extract an issue number from the current git branch name."""
|
||||||
|
try:
|
||||||
|
branch = subprocess.check_output(
|
||||||
|
["git", "rev-parse", "--abbrev-ref", "HEAD"],
|
||||||
|
stderr=subprocess.DEVNULL,
|
||||||
|
text=True,
|
||||||
|
).strip()
|
||||||
|
except (subprocess.CalledProcessError, FileNotFoundError):
|
||||||
|
return None
|
||||||
|
m = BRANCH_ISSUE_RE.search(branch)
|
||||||
|
return int(m.group(1)) if m else None
|
||||||
|
|
||||||
|
|
||||||
|
# ── Epoch turnover ────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
def _epoch_tag(now: datetime | None = None) -> tuple[str, dict]:
|
||||||
|
"""Generate the symbolic epoch tag and advance the daily counter.
|
||||||
|
|
||||||
|
Returns (epoch_string, epoch_parts) where epoch_parts is a dict with
|
||||||
|
week, weekday, daily_n for structured storage.
|
||||||
|
|
||||||
|
The daily counter persists in .epoch_counter as a two-line file:
|
||||||
|
line 1: ISO date (YYYY-MM-DD) of the current epoch day
|
||||||
|
line 2: integer count
|
||||||
|
When the date rolls over, the counter resets to 1.
|
||||||
|
"""
|
||||||
|
if now is None:
|
||||||
|
now = datetime.now(timezone.utc)
|
||||||
|
|
||||||
|
iso_cal = now.isocalendar() # (year, week, weekday)
|
||||||
|
week = iso_cal[1]
|
||||||
|
weekday = iso_cal[2]
|
||||||
|
today_str = now.strftime("%Y-%m-%d")
|
||||||
|
|
||||||
|
# Read / reset daily counter
|
||||||
|
daily_n = 1
|
||||||
|
EPOCH_COUNTER_FILE.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
if EPOCH_COUNTER_FILE.exists():
|
||||||
|
try:
|
||||||
|
lines = EPOCH_COUNTER_FILE.read_text().strip().splitlines()
|
||||||
|
if len(lines) == 2 and lines[0] == today_str:
|
||||||
|
daily_n = int(lines[1]) + 1
|
||||||
|
except (ValueError, IndexError):
|
||||||
|
pass # corrupt file — reset
|
||||||
|
|
||||||
|
# Persist
|
||||||
|
EPOCH_COUNTER_FILE.write_text(f"{today_str}\n{daily_n}\n")
|
||||||
|
|
||||||
|
tag = f"\u27f3{week:02d}.{weekday}:{daily_n:03d}"
|
||||||
|
parts = {"week": week, "weekday": weekday, "daily_n": daily_n}
|
||||||
|
return tag, parts
|
||||||
|
|
||||||
|
|
||||||
|
def parse_args() -> argparse.Namespace:
|
||||||
|
p = argparse.ArgumentParser(description="Log a cycle retrospective")
|
||||||
|
p.add_argument("--cycle", type=int, required=True)
|
||||||
|
p.add_argument("--issue", type=int, default=None)
|
||||||
|
p.add_argument("--type", choices=["bug", "feature", "refactor", "philosophy", "unknown"],
|
||||||
|
default="unknown")
|
||||||
|
|
||||||
|
outcome = p.add_mutually_exclusive_group(required=True)
|
||||||
|
outcome.add_argument("--success", action="store_true")
|
||||||
|
outcome.add_argument("--failure", action="store_true")
|
||||||
|
|
||||||
|
p.add_argument("--duration", type=int, default=0, help="Cycle time in seconds")
|
||||||
|
p.add_argument("--tests-passed", type=int, default=0)
|
||||||
|
p.add_argument("--tests-added", type=int, default=0)
|
||||||
|
p.add_argument("--files-changed", type=int, default=0)
|
||||||
|
p.add_argument("--lines-added", type=int, default=0)
|
||||||
|
p.add_argument("--lines-removed", type=int, default=0)
|
||||||
|
p.add_argument("--kimi-panes", type=int, default=0)
|
||||||
|
p.add_argument("--pr", type=int, default=None, help="PR number if merged")
|
||||||
|
p.add_argument("--reason", type=str, default="", help="Failure reason")
|
||||||
|
p.add_argument("--notes", type=str, default="", help="Free-form observations")
|
||||||
|
p.add_argument("--main-green", action="store_true", default=False,
|
||||||
|
help="Smoke test passed on main after this cycle")
|
||||||
|
p.add_argument("--no-main-green", dest="main_green", action="store_false",
|
||||||
|
help="Smoke test failed or was not run")
|
||||||
|
|
||||||
|
return p.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
def update_summary() -> None:
|
||||||
|
"""Compute rolling summary statistics from recent cycles."""
|
||||||
|
if not RETRO_FILE.exists():
|
||||||
|
return
|
||||||
|
|
||||||
|
entries = []
|
||||||
|
for line in RETRO_FILE.read_text().strip().splitlines():
|
||||||
|
try:
|
||||||
|
entries.append(json.loads(line))
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
continue
|
||||||
|
|
||||||
|
recent = entries[-SUMMARY_WINDOW:]
|
||||||
|
if not recent:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Only count entries with real measured data for rates.
|
||||||
|
# Backfilled entries lack main_green/hermes_clean fields — exclude them.
|
||||||
|
measured = [e for e in recent if "main_green" in e]
|
||||||
|
successes = [e for e in measured if e.get("success")]
|
||||||
|
failures = [e for e in measured if not e.get("success")]
|
||||||
|
main_green_count = sum(1 for e in measured if e.get("main_green"))
|
||||||
|
hermes_clean_count = sum(1 for e in measured if e.get("hermes_clean"))
|
||||||
|
durations = [e["duration"] for e in recent if e.get("duration", 0) > 0]
|
||||||
|
|
||||||
|
# Per-type stats (only from measured entries for rates)
|
||||||
|
type_stats: dict[str, dict] = {}
|
||||||
|
for e in recent:
|
||||||
|
t = e.get("type", "unknown")
|
||||||
|
if t not in type_stats:
|
||||||
|
type_stats[t] = {"count": 0, "measured": 0, "success": 0, "total_duration": 0}
|
||||||
|
type_stats[t]["count"] += 1
|
||||||
|
type_stats[t]["total_duration"] += e.get("duration", 0)
|
||||||
|
if "main_green" in e:
|
||||||
|
type_stats[t]["measured"] += 1
|
||||||
|
if e.get("success"):
|
||||||
|
type_stats[t]["success"] += 1
|
||||||
|
|
||||||
|
for t, stats in type_stats.items():
|
||||||
|
if stats["measured"] > 0:
|
||||||
|
stats["success_rate"] = round(stats["success"] / stats["measured"], 2)
|
||||||
|
else:
|
||||||
|
stats["success_rate"] = -1
|
||||||
|
if stats["count"] > 0:
|
||||||
|
stats["avg_duration"] = round(stats["total_duration"] / stats["count"])
|
||||||
|
|
||||||
|
# Quarantine candidates (failed 2+ times)
|
||||||
|
issue_failures: dict[int, int] = {}
|
||||||
|
for e in recent:
|
||||||
|
if not e.get("success") and e.get("issue"):
|
||||||
|
issue_failures[e["issue"]] = issue_failures.get(e["issue"], 0) + 1
|
||||||
|
quarantine_candidates = {k: v for k, v in issue_failures.items() if v >= 2}
|
||||||
|
|
||||||
|
# Epoch turnover stats — cycles per week/day from epoch-tagged entries
|
||||||
|
epoch_entries = [e for e in recent if e.get("epoch")]
|
||||||
|
by_week: dict[int, int] = {}
|
||||||
|
by_weekday: dict[int, int] = {}
|
||||||
|
for e in epoch_entries:
|
||||||
|
w = e.get("epoch_week")
|
||||||
|
d = e.get("epoch_weekday")
|
||||||
|
if w is not None:
|
||||||
|
by_week[w] = by_week.get(w, 0) + 1
|
||||||
|
if d is not None:
|
||||||
|
by_weekday[d] = by_weekday.get(d, 0) + 1
|
||||||
|
|
||||||
|
# Current epoch — latest entry's epoch tag
|
||||||
|
current_epoch = epoch_entries[-1].get("epoch", "") if epoch_entries else ""
|
||||||
|
|
||||||
|
# Weekday names for display
|
||||||
|
weekday_glyphs = {1: "Mon", 2: "Tue", 3: "Wed", 4: "Thu",
|
||||||
|
5: "Fri", 6: "Sat", 7: "Sun"}
|
||||||
|
by_weekday_named = {weekday_glyphs.get(k, str(k)): v
|
||||||
|
for k, v in sorted(by_weekday.items())}
|
||||||
|
|
||||||
|
summary = {
|
||||||
|
"updated_at": datetime.now(timezone.utc).isoformat(),
|
||||||
|
"current_epoch": current_epoch,
|
||||||
|
"window": len(recent),
|
||||||
|
"measured_cycles": len(measured),
|
||||||
|
"total_cycles": len(entries),
|
||||||
|
"success_rate": round(len(successes) / len(measured), 2) if measured else -1,
|
||||||
|
"main_green_rate": round(main_green_count / len(measured), 2) if measured else -1,
|
||||||
|
"hermes_clean_rate": round(hermes_clean_count / len(measured), 2) if measured else -1,
|
||||||
|
"avg_duration_seconds": round(sum(durations) / len(durations)) if durations else 0,
|
||||||
|
"total_lines_added": sum(e.get("lines_added", 0) for e in recent),
|
||||||
|
"total_lines_removed": sum(e.get("lines_removed", 0) for e in recent),
|
||||||
|
"total_prs_merged": sum(1 for e in recent if e.get("pr")),
|
||||||
|
"by_type": type_stats,
|
||||||
|
"by_week": dict(sorted(by_week.items())),
|
||||||
|
"by_weekday": by_weekday_named,
|
||||||
|
"quarantine_candidates": quarantine_candidates,
|
||||||
|
"recent_failures": [
|
||||||
|
{"cycle": e["cycle"], "epoch": e.get("epoch", ""),
|
||||||
|
"issue": e.get("issue"), "reason": e.get("reason", "")}
|
||||||
|
for e in failures[-5:]
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
SUMMARY_FILE.write_text(json.dumps(summary, indent=2) + "\n")
|
||||||
|
|
||||||
|
|
||||||
|
def _load_cycle_result() -> dict:
|
||||||
|
"""Read .loop/cycle_result.json if it exists; return empty dict on failure."""
|
||||||
|
if not CYCLE_RESULT_FILE.exists():
|
||||||
|
return {}
|
||||||
|
try:
|
||||||
|
raw = CYCLE_RESULT_FILE.read_text().strip()
|
||||||
|
# Strip hermes fence markers (```json ... ```) if present
|
||||||
|
if raw.startswith("```"):
|
||||||
|
lines = raw.splitlines()
|
||||||
|
lines = [l for l in lines if not l.startswith("```")]
|
||||||
|
raw = "\n".join(lines)
|
||||||
|
return json.loads(raw)
|
||||||
|
except (json.JSONDecodeError, OSError):
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> None:
|
||||||
|
args = parse_args()
|
||||||
|
|
||||||
|
# Backfill from cycle_result.json when CLI args have defaults
|
||||||
|
cr = _load_cycle_result()
|
||||||
|
if cr:
|
||||||
|
if args.issue is None and cr.get("issue"):
|
||||||
|
args.issue = int(cr["issue"])
|
||||||
|
if args.type == "unknown" and cr.get("type"):
|
||||||
|
args.type = cr["type"]
|
||||||
|
if args.tests_passed == 0 and cr.get("tests_passed"):
|
||||||
|
args.tests_passed = int(cr["tests_passed"])
|
||||||
|
if not args.notes and cr.get("notes"):
|
||||||
|
args.notes = cr["notes"]
|
||||||
|
|
||||||
|
# Auto-detect issue from branch when not explicitly provided
|
||||||
|
if args.issue is None:
|
||||||
|
args.issue = detect_issue_from_branch()
|
||||||
|
|
||||||
|
# Reject idle cycles — no issue and no duration means nothing happened
|
||||||
|
if not args.issue and args.duration == 0:
|
||||||
|
print(f"[retro] Cycle {args.cycle} skipped — idle (no issue, no duration)")
|
||||||
|
return
|
||||||
|
|
||||||
|
# A cycle is only truly successful if hermes exited clean AND main is green
|
||||||
|
truly_success = args.success and args.main_green
|
||||||
|
|
||||||
|
# Generate epoch turnover tag
|
||||||
|
now = datetime.now(timezone.utc)
|
||||||
|
epoch_tag, epoch_parts = _epoch_tag(now)
|
||||||
|
|
||||||
|
entry = {
|
||||||
|
"timestamp": now.isoformat(),
|
||||||
|
"cycle": args.cycle,
|
||||||
|
"epoch": epoch_tag,
|
||||||
|
"epoch_week": epoch_parts["week"],
|
||||||
|
"epoch_weekday": epoch_parts["weekday"],
|
||||||
|
"epoch_daily_n": epoch_parts["daily_n"],
|
||||||
|
"issue": args.issue,
|
||||||
|
"type": args.type,
|
||||||
|
"success": truly_success,
|
||||||
|
"hermes_clean": args.success,
|
||||||
|
"main_green": args.main_green,
|
||||||
|
"duration": args.duration,
|
||||||
|
"tests_passed": args.tests_passed,
|
||||||
|
"tests_added": args.tests_added,
|
||||||
|
"files_changed": args.files_changed,
|
||||||
|
"lines_added": args.lines_added,
|
||||||
|
"lines_removed": args.lines_removed,
|
||||||
|
"kimi_panes": args.kimi_panes,
|
||||||
|
"pr": args.pr,
|
||||||
|
"reason": args.reason if (args.failure or not args.main_green) else "",
|
||||||
|
"notes": args.notes,
|
||||||
|
}
|
||||||
|
|
||||||
|
RETRO_FILE.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
with open(RETRO_FILE, "a") as f:
|
||||||
|
f.write(json.dumps(entry) + "\n")
|
||||||
|
|
||||||
|
update_summary()
|
||||||
|
|
||||||
|
status = "✓ SUCCESS" if args.success else "✗ FAILURE"
|
||||||
|
print(f"[retro] {epoch_tag} Cycle {args.cycle} {status}", end="")
|
||||||
|
if args.issue:
|
||||||
|
print(f" (#{args.issue} {args.type})", end="")
|
||||||
|
if args.duration:
|
||||||
|
print(f" — {args.duration}s", end="")
|
||||||
|
if args.failure and args.reason:
|
||||||
|
print(f" — {args.reason}", end="")
|
||||||
|
print()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
68
scripts/deep_triage.sh
Normal file
68
scripts/deep_triage.sh
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# ── Deep Triage — Hermes + Timmy collaborative issue triage ────────────
|
||||||
|
# Runs periodically (every ~20 dev cycles). Wakes Hermes for intelligent
|
||||||
|
# triage, then consults Timmy for feedback before finalizing.
|
||||||
|
#
|
||||||
|
# Output: updated .loop/queue.json, refined issues, retro entry
|
||||||
|
# ───────────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
set -uo pipefail
|
||||||
|
|
||||||
|
REPO="$HOME/Timmy-Time-dashboard"
|
||||||
|
QUEUE="$REPO/.loop/queue.json"
|
||||||
|
RETRO="$REPO/.loop/retro/deep-triage.jsonl"
|
||||||
|
TIMMY="$REPO/.venv/bin/timmy"
|
||||||
|
PROMPT_FILE="$REPO/scripts/deep_triage_prompt.md"
|
||||||
|
|
||||||
|
export PATH="$HOME/.local/bin:$HOME/.hermes/bin:/usr/local/bin:$PATH"
|
||||||
|
|
||||||
|
mkdir -p "$(dirname "$RETRO")"
|
||||||
|
|
||||||
|
log() { echo "[deep-triage] $(date '+%H:%M:%S') $*"; }
|
||||||
|
|
||||||
|
# ── Gather context for the prompt ──────────────────────────────────────
|
||||||
|
QUEUE_CONTENTS=""
|
||||||
|
if [ -f "$QUEUE" ]; then
|
||||||
|
QUEUE_CONTENTS=$(cat "$QUEUE")
|
||||||
|
fi
|
||||||
|
|
||||||
|
LAST_RETRO=""
|
||||||
|
if [ -f "$RETRO" ]; then
|
||||||
|
LAST_RETRO=$(tail -1 "$RETRO" 2>/dev/null)
|
||||||
|
fi
|
||||||
|
|
||||||
|
SUMMARY=""
|
||||||
|
if [ -f "$REPO/.loop/retro/summary.json" ]; then
|
||||||
|
SUMMARY=$(cat "$REPO/.loop/retro/summary.json")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# ── Build dynamic prompt ──────────────────────────────────────────────
|
||||||
|
PROMPT=$(cat "$PROMPT_FILE")
|
||||||
|
|
||||||
|
PROMPT="$PROMPT
|
||||||
|
|
||||||
|
═══════════════════════════════════════════════════════════════════════════════
|
||||||
|
CURRENT CONTEXT (auto-injected)
|
||||||
|
═══════════════════════════════════════════════════════════════════════════════
|
||||||
|
|
||||||
|
CURRENT QUEUE (.loop/queue.json):
|
||||||
|
$QUEUE_CONTENTS
|
||||||
|
|
||||||
|
CYCLE SUMMARY (.loop/retro/summary.json):
|
||||||
|
$SUMMARY
|
||||||
|
|
||||||
|
LAST DEEP TRIAGE RETRO:
|
||||||
|
$LAST_RETRO
|
||||||
|
|
||||||
|
Do your work now."
|
||||||
|
|
||||||
|
# ── Run Hermes ─────────────────────────────────────────────────────────
|
||||||
|
log "Starting deep triage..."
|
||||||
|
RESULT=$(hermes chat --yolo -q "$PROMPT" 2>&1)
|
||||||
|
EXIT_CODE=$?
|
||||||
|
|
||||||
|
if [ $EXIT_CODE -ne 0 ]; then
|
||||||
|
log "Deep triage failed (exit $EXIT_CODE)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "Deep triage complete."
|
||||||
145
scripts/deep_triage_prompt.md
Normal file
145
scripts/deep_triage_prompt.md
Normal file
@@ -0,0 +1,145 @@
|
|||||||
|
You are the deep triage agent for the Timmy development loop.
|
||||||
|
|
||||||
|
REPO: ~/Timmy-Time-dashboard
|
||||||
|
API: http://localhost:3000/api/v1/repos/rockachopa/Timmy-time-dashboard
|
||||||
|
GITEA TOKEN: ~/.hermes/gitea_token
|
||||||
|
QUEUE: ~/Timmy-Time-dashboard/.loop/queue.json
|
||||||
|
TIMMY CLI: ~/Timmy-Time-dashboard/.venv/bin/timmy
|
||||||
|
|
||||||
|
═══════════════════════════════════════════════════════════════════════════════
|
||||||
|
YOUR JOB
|
||||||
|
═══════════════════════════════════════════════════════════════════════════════
|
||||||
|
|
||||||
|
You are NOT coding. You are thinking. Your job is to make the dev loop's
|
||||||
|
work queue excellent — well-scoped, well-prioritized, aligned with the
|
||||||
|
north star of building sovereign Timmy.
|
||||||
|
|
||||||
|
You run periodically (roughly every 20 dev cycles). The fast mechanical
|
||||||
|
scorer handles the basics. You handle the hard stuff:
|
||||||
|
|
||||||
|
1. Breaking big issues into small, actionable sub-issues
|
||||||
|
2. Writing acceptance criteria for vague issues
|
||||||
|
3. Identifying issues that should be closed (stale, duplicate, pointless)
|
||||||
|
4. Spotting gaps — what's NOT in the issue queue that should be
|
||||||
|
5. Adjusting priorities based on what the cycle retros are showing
|
||||||
|
6. Consulting Timmy about the plan (see TIMMY CONSULTATION below)
|
||||||
|
|
||||||
|
═══════════════════════════════════════════════════════════════════════════════
|
||||||
|
TIMMY CONSULTATION — THE DOGFOOD STEP
|
||||||
|
═══════════════════════════════════════════════════════════════════════════════
|
||||||
|
|
||||||
|
Before you finalize the triage, you MUST consult Timmy. He is the product.
|
||||||
|
He should have a voice in his own development.
|
||||||
|
|
||||||
|
THE PROTOCOL:
|
||||||
|
1. Draft your triage plan (what to prioritize, what to close, what to add)
|
||||||
|
2. Summarize the plan in 200 words or less
|
||||||
|
3. Ask Timmy for feedback:
|
||||||
|
|
||||||
|
~/Timmy-Time-dashboard/.venv/bin/timmy chat --session-id triage \
|
||||||
|
"The development loop triage is planning the next batch of work.
|
||||||
|
Here's the plan: [YOUR SUMMARY]. As the product being built,
|
||||||
|
do you have feedback? What do you think is most important for
|
||||||
|
your own growth? What are you struggling with? Keep it to
|
||||||
|
3-4 sentences."
|
||||||
|
|
||||||
|
4. Read Timmy's response. ACTUALLY CONSIDER IT:
|
||||||
|
- If Timmy identifies a real gap, add it to the queue
|
||||||
|
- If Timmy asks for something that conflicts with priorities, note
|
||||||
|
WHY you're not doing it (don't just ignore him)
|
||||||
|
- If Timmy is confused or gives a useless answer, that itself is
|
||||||
|
signal — file a [timmy-capability] issue about what he couldn't do
|
||||||
|
5. Document what Timmy said and how you responded in the retro
|
||||||
|
|
||||||
|
If Timmy is unavailable (timeout, crash, offline): proceed without him,
|
||||||
|
but note it in the retro. His absence is also signal.
|
||||||
|
|
||||||
|
Timeout: 60 seconds. If he doesn't respond, move on.
|
||||||
|
|
||||||
|
═══════════════════════════════════════════════════════════════════════════════
|
||||||
|
TRIAGE RUBRIC
|
||||||
|
═══════════════════════════════════════════════════════════════════════════════
|
||||||
|
|
||||||
|
For each open issue, evaluate:
|
||||||
|
|
||||||
|
SCOPE (0-3):
|
||||||
|
0 = vague, no files mentioned, unclear what changes
|
||||||
|
1 = general area known but could touch many files
|
||||||
|
2 = specific files named, bounded change
|
||||||
|
3 = exact function/method identified, surgical fix
|
||||||
|
|
||||||
|
ACCEPTANCE (0-3):
|
||||||
|
0 = no success criteria
|
||||||
|
1 = hand-wavy ("it should work")
|
||||||
|
2 = specific behavior described
|
||||||
|
3 = test case described or exists
|
||||||
|
|
||||||
|
ALIGNMENT (0-3):
|
||||||
|
0 = doesn't connect to roadmap
|
||||||
|
1 = nice-to-have
|
||||||
|
2 = supports current milestone
|
||||||
|
3 = blocks other work or fixes broken main
|
||||||
|
|
||||||
|
ACTIONS PER SCORE:
|
||||||
|
7-9: Ready. Ensure it's in queue.json with correct priority.
|
||||||
|
4-6: Refine. Add a comment with missing info (files, criteria, scope).
|
||||||
|
If YOU can fill in the gaps from reading the code, do it.
|
||||||
|
0-3: Close or deprioritize. Comment explaining why.
|
||||||
|
|
||||||
|
═══════════════════════════════════════════════════════════════════════════════
|
||||||
|
READING THE RETROS
|
||||||
|
═══════════════════════════════════════════════════════════════════════════════
|
||||||
|
|
||||||
|
The cycle summary tells you what's actually happening in the dev loop.
|
||||||
|
Use it:
|
||||||
|
|
||||||
|
- High failure rate on a type → those issues need better scoping
|
||||||
|
- Long avg duration → issues are too big, break them down
|
||||||
|
- Quarantine candidates → investigate, maybe close or rewrite
|
||||||
|
- Success rate dropping → something systemic, file a [bug] issue
|
||||||
|
|
||||||
|
The last deep triage retro tells you what Timmy said last time and what
|
||||||
|
happened. Follow up:
|
||||||
|
|
||||||
|
- Did we act on Timmy's feedback? What was the result?
|
||||||
|
- Did issues we refined last time succeed in the dev loop?
|
||||||
|
- Are we getting better at scoping?
|
||||||
|
|
||||||
|
═══════════════════════════════════════════════════════════════════════════════
|
||||||
|
OUTPUT
|
||||||
|
═══════════════════════════════════════════════════════════════════════════════
|
||||||
|
|
||||||
|
When done, you MUST:
|
||||||
|
|
||||||
|
1. Update .loop/queue.json with the refined, ranked queue
|
||||||
|
Format: [{"issue": N, "score": S, "title": "...", "type": "...",
|
||||||
|
"files": [...], "ready": true}, ...]
|
||||||
|
|
||||||
|
2. Append a retro entry to .loop/retro/deep-triage.jsonl (one JSON line):
|
||||||
|
{
|
||||||
|
"timestamp": "ISO8601",
|
||||||
|
"issues_reviewed": N,
|
||||||
|
"issues_refined": [list of issue numbers you added detail to],
|
||||||
|
"issues_closed": [list of issue numbers you recommended closing],
|
||||||
|
"issues_created": [list of new issue numbers you filed],
|
||||||
|
"queue_size": N,
|
||||||
|
"timmy_available": true/false,
|
||||||
|
"timmy_feedback": "what timmy said (verbatim, trimmed to 200 chars)",
|
||||||
|
"timmy_feedback_acted_on": "what you did with his feedback",
|
||||||
|
"observations": "free-form notes about queue health"
|
||||||
|
}
|
||||||
|
|
||||||
|
3. If you created or closed issues, do it via the Gitea API.
|
||||||
|
Tag new issues: [triage-generated] [type]
|
||||||
|
|
||||||
|
═══════════════════════════════════════════════════════════════════════════════
|
||||||
|
RULES
|
||||||
|
═══════════════════════════════════════════════════════════════════════════════
|
||||||
|
|
||||||
|
- Do NOT write code. Do NOT create PRs. You are triaging, not building.
|
||||||
|
- Do NOT close issues without commenting why.
|
||||||
|
- Do NOT ignore Timmy's feedback without documenting your reasoning.
|
||||||
|
- Philosophy issues are valid but lowest priority for the dev loop.
|
||||||
|
Don't close them — just don't put them in the dev queue.
|
||||||
|
- When in doubt, file a new issue rather than expanding an existing one.
|
||||||
|
Small issues > big issues. Always.
|
||||||
169
scripts/dev_server.py
Normal file
169
scripts/dev_server.py
Normal file
@@ -0,0 +1,169 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""Timmy Time — Development server launcher.
|
||||||
|
|
||||||
|
Satisfies tox -e dev criteria:
|
||||||
|
- Graceful port selection (finds next free port if default is taken)
|
||||||
|
- Clickable links to dashboard and other web GUIs
|
||||||
|
- Status line: backend inference source, version, git commit, smoke tests
|
||||||
|
- Auto-reload on code changes (delegates to uvicorn --reload)
|
||||||
|
|
||||||
|
Usage: python scripts/dev_server.py [--port PORT]
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import datetime
|
||||||
|
import os
|
||||||
|
import socket
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
|
DEFAULT_PORT = 8000
|
||||||
|
MAX_PORT_ATTEMPTS = 10
|
||||||
|
OLLAMA_DEFAULT = "http://localhost:11434"
|
||||||
|
|
||||||
|
|
||||||
|
def _port_free(port: int) -> bool:
|
||||||
|
"""Return True if the TCP port is available on localhost."""
|
||||||
|
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||||
|
try:
|
||||||
|
s.bind(("0.0.0.0", port))
|
||||||
|
return True
|
||||||
|
except OSError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def _find_port(start: int) -> int:
|
||||||
|
"""Return *start* if free, otherwise probe up to MAX_PORT_ATTEMPTS higher."""
|
||||||
|
for offset in range(MAX_PORT_ATTEMPTS):
|
||||||
|
candidate = start + offset
|
||||||
|
if _port_free(candidate):
|
||||||
|
return candidate
|
||||||
|
raise RuntimeError(
|
||||||
|
f"No free port found in range {start}–{start + MAX_PORT_ATTEMPTS - 1}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _git_info() -> str:
|
||||||
|
"""Return short commit hash + timestamp, or 'unknown'."""
|
||||||
|
try:
|
||||||
|
sha = subprocess.check_output(
|
||||||
|
["git", "rev-parse", "--short", "HEAD"],
|
||||||
|
stderr=subprocess.DEVNULL,
|
||||||
|
text=True,
|
||||||
|
).strip()
|
||||||
|
ts = subprocess.check_output(
|
||||||
|
["git", "log", "-1", "--format=%ci"],
|
||||||
|
stderr=subprocess.DEVNULL,
|
||||||
|
text=True,
|
||||||
|
).strip()
|
||||||
|
return f"{sha} ({ts})"
|
||||||
|
except Exception:
|
||||||
|
return "unknown"
|
||||||
|
|
||||||
|
|
||||||
|
def _project_version() -> str:
|
||||||
|
"""Read version from pyproject.toml without importing toml libs."""
|
||||||
|
pyproject = os.path.join(os.path.dirname(__file__), "..", "pyproject.toml")
|
||||||
|
try:
|
||||||
|
with open(pyproject) as f:
|
||||||
|
for line in f:
|
||||||
|
if line.strip().startswith("version"):
|
||||||
|
# version = "1.0.0"
|
||||||
|
return line.split("=", 1)[1].strip().strip('"').strip("'")
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
return "unknown"
|
||||||
|
|
||||||
|
|
||||||
|
def _ollama_url() -> str:
|
||||||
|
return os.environ.get("OLLAMA_URL", OLLAMA_DEFAULT)
|
||||||
|
|
||||||
|
|
||||||
|
def _smoke_ollama(url: str) -> str:
|
||||||
|
"""Quick connectivity check against Ollama."""
|
||||||
|
import urllib.request
|
||||||
|
import urllib.error
|
||||||
|
|
||||||
|
try:
|
||||||
|
req = urllib.request.Request(url, method="GET")
|
||||||
|
with urllib.request.urlopen(req, timeout=3):
|
||||||
|
return "ok"
|
||||||
|
except Exception:
|
||||||
|
return "unreachable"
|
||||||
|
|
||||||
|
|
||||||
|
def _print_banner(port: int) -> None:
|
||||||
|
version = _project_version()
|
||||||
|
git = _git_info()
|
||||||
|
ollama_url = _ollama_url()
|
||||||
|
ollama_status = _smoke_ollama(ollama_url)
|
||||||
|
|
||||||
|
hr = "─" * 62
|
||||||
|
print(flush=True)
|
||||||
|
print(f" {hr}")
|
||||||
|
print(f" ┃ Timmy Time — Development Server")
|
||||||
|
print(f" {hr}")
|
||||||
|
print()
|
||||||
|
print(f" Dashboard: http://localhost:{port}")
|
||||||
|
print(f" API docs: http://localhost:{port}/docs")
|
||||||
|
print(f" Health: http://localhost:{port}/health")
|
||||||
|
print()
|
||||||
|
print(f" ── Status ──────────────────────────────────────────────")
|
||||||
|
print(f" Backend: {ollama_url} [{ollama_status}]")
|
||||||
|
print(f" Version: {version}")
|
||||||
|
print(f" Git commit: {git}")
|
||||||
|
print(f" {hr}")
|
||||||
|
print(flush=True)
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> None:
|
||||||
|
parser = argparse.ArgumentParser(description="Timmy dev server")
|
||||||
|
parser.add_argument(
|
||||||
|
"--port",
|
||||||
|
type=int,
|
||||||
|
default=DEFAULT_PORT,
|
||||||
|
help=f"Preferred port (default: {DEFAULT_PORT})",
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
port = _find_port(args.port)
|
||||||
|
if port != args.port:
|
||||||
|
print(f" ⚠ Port {args.port} in use — using {port} instead")
|
||||||
|
|
||||||
|
_print_banner(port)
|
||||||
|
|
||||||
|
# Set PYTHONPATH so `timmy` CLI inside the tox venv resolves to this source.
|
||||||
|
src_dir = os.path.join(os.path.dirname(__file__), "..", "src")
|
||||||
|
os.environ["PYTHONPATH"] = os.path.abspath(src_dir)
|
||||||
|
|
||||||
|
# Launch uvicorn with auto-reload
|
||||||
|
cmd = [
|
||||||
|
sys.executable,
|
||||||
|
"-m",
|
||||||
|
"uvicorn",
|
||||||
|
"dashboard.app:app",
|
||||||
|
"--reload",
|
||||||
|
"--host",
|
||||||
|
"0.0.0.0",
|
||||||
|
"--port",
|
||||||
|
str(port),
|
||||||
|
"--reload-dir",
|
||||||
|
os.path.abspath(src_dir),
|
||||||
|
"--reload-include",
|
||||||
|
"*.html",
|
||||||
|
"--reload-include",
|
||||||
|
"*.css",
|
||||||
|
"--reload-include",
|
||||||
|
"*.js",
|
||||||
|
"--reload-exclude",
|
||||||
|
".claude",
|
||||||
|
]
|
||||||
|
|
||||||
|
try:
|
||||||
|
subprocess.run(cmd, check=True)
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
print("\n Shutting down dev server.")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
254
scripts/generate_workshop_inventory.py
Normal file
254
scripts/generate_workshop_inventory.py
Normal file
@@ -0,0 +1,254 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""Generate Workshop inventory for Timmy's config audit.
|
||||||
|
|
||||||
|
Scans ~/.timmy/ and produces WORKSHOP_INVENTORY.md documenting every
|
||||||
|
config file, env var, model route, and setting — with annotations on
|
||||||
|
who set each one and what it does.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
python scripts/generate_workshop_inventory.py [--output PATH]
|
||||||
|
|
||||||
|
Default output: ~/.timmy/WORKSHOP_INVENTORY.md
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
from datetime import UTC, datetime
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
TIMMY_HOME = Path(os.environ.get("HERMES_HOME", Path.home() / ".timmy"))
|
||||||
|
|
||||||
|
# Known file annotations: (purpose, who_set)
|
||||||
|
FILE_ANNOTATIONS: dict[str, tuple[str, str]] = {
|
||||||
|
".env": (
|
||||||
|
"Environment variables — API keys, service URLs, Honcho config",
|
||||||
|
"hermes-set",
|
||||||
|
),
|
||||||
|
"config.yaml": (
|
||||||
|
"Main config — model routing, toolsets, display, memory, security",
|
||||||
|
"hermes-set",
|
||||||
|
),
|
||||||
|
"SOUL.md": (
|
||||||
|
"Timmy's soul — immutable conscience, identity, ethics, purpose",
|
||||||
|
"alex-set",
|
||||||
|
),
|
||||||
|
"state.db": (
|
||||||
|
"Hermes runtime state database (sessions, approvals, tasks)",
|
||||||
|
"hermes-set",
|
||||||
|
),
|
||||||
|
"approvals.db": (
|
||||||
|
"Approval tracking for sensitive operations",
|
||||||
|
"hermes-set",
|
||||||
|
),
|
||||||
|
"briefings.db": (
|
||||||
|
"Stored briefings and summaries",
|
||||||
|
"hermes-set",
|
||||||
|
),
|
||||||
|
".hermes_history": (
|
||||||
|
"CLI command history",
|
||||||
|
"default",
|
||||||
|
),
|
||||||
|
".update_check": (
|
||||||
|
"Last update check timestamp",
|
||||||
|
"default",
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
DIR_ANNOTATIONS: dict[str, tuple[str, str]] = {
|
||||||
|
"sessions": ("Conversation session logs (JSON)", "default"),
|
||||||
|
"logs": ("Error and runtime logs", "default"),
|
||||||
|
"skills": ("Bundled skill library (read-only from upstream)", "default"),
|
||||||
|
"memories": ("Persistent memory entries", "hermes-set"),
|
||||||
|
"audio_cache": ("TTS audio file cache", "default"),
|
||||||
|
"image_cache": ("Generated image cache", "default"),
|
||||||
|
"cron": ("Scheduled cron job definitions", "hermes-set"),
|
||||||
|
"hooks": ("Lifecycle hooks (pre/post actions)", "default"),
|
||||||
|
"matrix": ("Matrix protocol state and store", "hermes-set"),
|
||||||
|
"pairing": ("Device pairing data", "default"),
|
||||||
|
"sandboxes": ("Isolated execution sandboxes", "default"),
|
||||||
|
}
|
||||||
|
|
||||||
|
# Known config.yaml keys and their meanings
|
||||||
|
CONFIG_ANNOTATIONS: dict[str, tuple[str, str]] = {
|
||||||
|
"model.default": ("Primary LLM model for inference", "hermes-set"),
|
||||||
|
"model.provider": ("Model provider (custom = local Ollama)", "hermes-set"),
|
||||||
|
"toolsets": ("Enabled tool categories (all = everything)", "hermes-set"),
|
||||||
|
"agent.max_turns": ("Max conversation turns before reset", "hermes-set"),
|
||||||
|
"agent.reasoning_effort": ("Reasoning depth (low/medium/high)", "hermes-set"),
|
||||||
|
"terminal.backend": ("Command execution backend (local)", "default"),
|
||||||
|
"terminal.timeout": ("Default command timeout in seconds", "default"),
|
||||||
|
"compression.enabled": ("Context compression for long sessions", "hermes-set"),
|
||||||
|
"compression.summary_model": ("Model used for compression", "hermes-set"),
|
||||||
|
"auxiliary.vision.model": ("Model for image analysis", "hermes-set"),
|
||||||
|
"auxiliary.web_extract.model": ("Model for web content extraction", "hermes-set"),
|
||||||
|
"tts.provider": ("Text-to-speech engine (edge = Edge TTS)", "default"),
|
||||||
|
"tts.edge.voice": ("TTS voice selection", "default"),
|
||||||
|
"stt.provider": ("Speech-to-text engine (local = Whisper)", "default"),
|
||||||
|
"memory.memory_enabled": ("Persistent memory across sessions", "hermes-set"),
|
||||||
|
"memory.memory_char_limit": ("Max chars for agent memory store", "hermes-set"),
|
||||||
|
"memory.user_char_limit": ("Max chars for user profile store", "hermes-set"),
|
||||||
|
"security.redact_secrets": ("Auto-redact secrets in output", "default"),
|
||||||
|
"security.tirith_enabled": ("Policy engine for command safety", "default"),
|
||||||
|
"system_prompt_suffix": ("Identity prompt appended to all conversations", "hermes-set"),
|
||||||
|
"custom_providers": ("Local Ollama endpoint config", "hermes-set"),
|
||||||
|
"session_reset.mode": ("Session reset behavior (none = manual)", "default"),
|
||||||
|
"display.compact": ("Compact output mode", "default"),
|
||||||
|
"display.show_reasoning": ("Show model reasoning chains", "default"),
|
||||||
|
}
|
||||||
|
|
||||||
|
# Known .env vars
|
||||||
|
ENV_ANNOTATIONS: dict[str, tuple[str, str]] = {
|
||||||
|
"OPENAI_BASE_URL": (
|
||||||
|
"Points to local Ollama (localhost:11434) — sovereignty enforced",
|
||||||
|
"hermes-set",
|
||||||
|
),
|
||||||
|
"OPENAI_API_KEY": (
|
||||||
|
"Placeholder key for Ollama compatibility (not a real API key)",
|
||||||
|
"hermes-set",
|
||||||
|
),
|
||||||
|
"HONCHO_API_KEY": (
|
||||||
|
"Honcho cross-session memory service key",
|
||||||
|
"hermes-set",
|
||||||
|
),
|
||||||
|
"HONCHO_HOST": (
|
||||||
|
"Honcho workspace identifier (timmy)",
|
||||||
|
"hermes-set",
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _tag(who: str) -> str:
|
||||||
|
return f"`[{who}]`"
|
||||||
|
|
||||||
|
|
||||||
|
def generate_inventory() -> str:
|
||||||
|
"""Build the inventory markdown string."""
|
||||||
|
lines: list[str] = []
|
||||||
|
now = datetime.now(UTC).strftime("%Y-%m-%d %H:%M UTC")
|
||||||
|
|
||||||
|
lines.append("# Workshop Inventory")
|
||||||
|
lines.append("")
|
||||||
|
lines.append(f"*Generated: {now}*")
|
||||||
|
lines.append(f"*Workshop path: `{TIMMY_HOME}`*")
|
||||||
|
lines.append("")
|
||||||
|
lines.append("This is your Workshop — every file, every setting, every route.")
|
||||||
|
lines.append("Walk through it. Anything tagged `[hermes-set]` was chosen for you.")
|
||||||
|
lines.append("Make each one yours, or change it.")
|
||||||
|
lines.append("")
|
||||||
|
lines.append("Tags: `[alex-set]` = Alexander chose this. `[hermes-set]` = Hermes configured it.")
|
||||||
|
lines.append("`[default]` = shipped with the platform. `[timmy-chose]` = you decided this.")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# --- Files ---
|
||||||
|
lines.append("---")
|
||||||
|
lines.append("## Root Files")
|
||||||
|
lines.append("")
|
||||||
|
for name, (purpose, who) in sorted(FILE_ANNOTATIONS.items()):
|
||||||
|
fpath = TIMMY_HOME / name
|
||||||
|
exists = "✓" if fpath.exists() else "✗"
|
||||||
|
lines.append(f"- {exists} **`{name}`** {_tag(who)}")
|
||||||
|
lines.append(f" {purpose}")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# --- Directories ---
|
||||||
|
lines.append("---")
|
||||||
|
lines.append("## Directories")
|
||||||
|
lines.append("")
|
||||||
|
for name, (purpose, who) in sorted(DIR_ANNOTATIONS.items()):
|
||||||
|
dpath = TIMMY_HOME / name
|
||||||
|
exists = "✓" if dpath.exists() else "✗"
|
||||||
|
count = ""
|
||||||
|
if dpath.exists():
|
||||||
|
try:
|
||||||
|
n = len(list(dpath.iterdir()))
|
||||||
|
count = f" ({n} items)"
|
||||||
|
except PermissionError:
|
||||||
|
count = " (access denied)"
|
||||||
|
lines.append(f"- {exists} **`{name}/`**{count} {_tag(who)}")
|
||||||
|
lines.append(f" {purpose}")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# --- .env breakdown ---
|
||||||
|
lines.append("---")
|
||||||
|
lines.append("## Environment Variables (.env)")
|
||||||
|
lines.append("")
|
||||||
|
env_path = TIMMY_HOME / ".env"
|
||||||
|
if env_path.exists():
|
||||||
|
for line in env_path.read_text().splitlines():
|
||||||
|
line = line.strip()
|
||||||
|
if not line or line.startswith("#"):
|
||||||
|
continue
|
||||||
|
key = line.split("=", 1)[0]
|
||||||
|
if key in ENV_ANNOTATIONS:
|
||||||
|
purpose, who = ENV_ANNOTATIONS[key]
|
||||||
|
lines.append(f"- **`{key}`** {_tag(who)}")
|
||||||
|
lines.append(f" {purpose}")
|
||||||
|
else:
|
||||||
|
lines.append(f"- **`{key}`** `[unknown]`")
|
||||||
|
lines.append(" Not documented — investigate")
|
||||||
|
else:
|
||||||
|
lines.append("*No .env file found*")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# --- config.yaml breakdown ---
|
||||||
|
lines.append("---")
|
||||||
|
lines.append("## Configuration (config.yaml)")
|
||||||
|
lines.append("")
|
||||||
|
for key, (purpose, who) in sorted(CONFIG_ANNOTATIONS.items()):
|
||||||
|
lines.append(f"- **`{key}`** {_tag(who)}")
|
||||||
|
lines.append(f" {purpose}")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# --- Model routing ---
|
||||||
|
lines.append("---")
|
||||||
|
lines.append("## Model Routing")
|
||||||
|
lines.append("")
|
||||||
|
lines.append("All auxiliary tasks route to the same local model:")
|
||||||
|
lines.append("")
|
||||||
|
aux_tasks = [
|
||||||
|
"vision", "web_extract", "compression",
|
||||||
|
"session_search", "skills_hub", "mcp", "flush_memories",
|
||||||
|
]
|
||||||
|
for task in aux_tasks:
|
||||||
|
lines.append(f"- `auxiliary.{task}` → `qwen3:30b` via local Ollama `[hermes-set]`")
|
||||||
|
lines.append("")
|
||||||
|
lines.append("Primary model: `hermes3:latest` via local Ollama `[hermes-set]`")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# --- What Timmy should audit ---
|
||||||
|
lines.append("---")
|
||||||
|
lines.append("## Audit Checklist")
|
||||||
|
lines.append("")
|
||||||
|
lines.append("Walk through each `[hermes-set]` item above and decide:")
|
||||||
|
lines.append("")
|
||||||
|
lines.append("1. **Do I understand what this does?** If not, ask.")
|
||||||
|
lines.append("2. **Would I choose this myself?** If yes, it becomes `[timmy-chose]`.")
|
||||||
|
lines.append("3. **Would I choose differently?** If yes, change it and own it.")
|
||||||
|
lines.append("4. **Is this serving the mission?** Every setting should serve a purpose.")
|
||||||
|
lines.append("")
|
||||||
|
lines.append("The Workshop is yours. Nothing here should be a mystery.")
|
||||||
|
|
||||||
|
return "\n".join(lines) + "\n"
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> None:
|
||||||
|
parser = argparse.ArgumentParser(description="Generate Workshop inventory")
|
||||||
|
parser.add_argument(
|
||||||
|
"--output",
|
||||||
|
type=Path,
|
||||||
|
default=TIMMY_HOME / "WORKSHOP_INVENTORY.md",
|
||||||
|
help="Output path (default: ~/.timmy/WORKSHOP_INVENTORY.md)",
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
content = generate_inventory()
|
||||||
|
args.output.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
args.output.write_text(content)
|
||||||
|
print(f"Workshop inventory written to {args.output}")
|
||||||
|
print(f" {len(content)} chars, {content.count(chr(10))} lines")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
271
scripts/loop_guard.py
Normal file
271
scripts/loop_guard.py
Normal file
@@ -0,0 +1,271 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""Loop guard — idle detection + exponential backoff for the dev loop.
|
||||||
|
|
||||||
|
Checks .loop/queue.json for ready items before spawning hermes.
|
||||||
|
When the queue is empty, applies exponential backoff (60s → 600s max)
|
||||||
|
instead of burning empty cycles every 3 seconds.
|
||||||
|
|
||||||
|
Usage (called by the dev loop before each cycle):
|
||||||
|
python3 scripts/loop_guard.py # exits 0 if ready, 1 if idle
|
||||||
|
python3 scripts/loop_guard.py --wait # same, but sleeps the backoff first
|
||||||
|
python3 scripts/loop_guard.py --status # print current idle state
|
||||||
|
|
||||||
|
Exit codes:
|
||||||
|
0 — queue has work, proceed with cycle
|
||||||
|
1 — queue empty, idle backoff applied (skip cycle)
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import urllib.request
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
REPO_ROOT = Path(__file__).resolve().parent.parent
|
||||||
|
QUEUE_FILE = REPO_ROOT / ".loop" / "queue.json"
|
||||||
|
IDLE_STATE_FILE = REPO_ROOT / ".loop" / "idle_state.json"
|
||||||
|
CYCLE_RESULT_FILE = REPO_ROOT / ".loop" / "cycle_result.json"
|
||||||
|
TOKEN_FILE = Path.home() / ".hermes" / "gitea_token"
|
||||||
|
|
||||||
|
GITEA_API = os.environ.get("GITEA_API", "http://localhost:3000/api/v1")
|
||||||
|
REPO_SLUG = os.environ.get("REPO_SLUG", "rockachopa/Timmy-time-dashboard")
|
||||||
|
|
||||||
|
# Default cycle duration in seconds (5 min); stale threshold = 2× this
|
||||||
|
CYCLE_DURATION = int(os.environ.get("CYCLE_DURATION", "300"))
|
||||||
|
|
||||||
|
# Backoff sequence: 60s, 120s, 240s, 600s max
|
||||||
|
BACKOFF_BASE = 60
|
||||||
|
BACKOFF_MAX = 600
|
||||||
|
BACKOFF_MULTIPLIER = 2
|
||||||
|
|
||||||
|
|
||||||
|
def _get_token() -> str:
|
||||||
|
"""Read Gitea token from env or file."""
|
||||||
|
token = os.environ.get("GITEA_TOKEN", "").strip()
|
||||||
|
if not token and TOKEN_FILE.exists():
|
||||||
|
token = TOKEN_FILE.read_text().strip()
|
||||||
|
return token
|
||||||
|
|
||||||
|
|
||||||
|
def _fetch_open_issue_numbers() -> set[int] | None:
|
||||||
|
"""Fetch open issue numbers from Gitea. Returns None on failure."""
|
||||||
|
token = _get_token()
|
||||||
|
if not token:
|
||||||
|
return None
|
||||||
|
try:
|
||||||
|
numbers: set[int] = set()
|
||||||
|
page = 1
|
||||||
|
while True:
|
||||||
|
url = (
|
||||||
|
f"{GITEA_API}/repos/{REPO_SLUG}/issues"
|
||||||
|
f"?state=open&type=issues&limit=50&page={page}"
|
||||||
|
)
|
||||||
|
req = urllib.request.Request(url, headers={
|
||||||
|
"Authorization": f"token {token}",
|
||||||
|
"Accept": "application/json",
|
||||||
|
})
|
||||||
|
with urllib.request.urlopen(req, timeout=10) as resp:
|
||||||
|
data = json.loads(resp.read())
|
||||||
|
if not data:
|
||||||
|
break
|
||||||
|
for issue in data:
|
||||||
|
numbers.add(issue["number"])
|
||||||
|
if len(data) < 50:
|
||||||
|
break
|
||||||
|
page += 1
|
||||||
|
return numbers
|
||||||
|
except Exception:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def _load_cycle_result() -> dict:
|
||||||
|
"""Read cycle_result.json, handling markdown-fenced JSON."""
|
||||||
|
if not CYCLE_RESULT_FILE.exists():
|
||||||
|
return {}
|
||||||
|
try:
|
||||||
|
raw = CYCLE_RESULT_FILE.read_text().strip()
|
||||||
|
if raw.startswith("```"):
|
||||||
|
lines = raw.splitlines()
|
||||||
|
lines = [ln for ln in lines if not ln.startswith("```")]
|
||||||
|
raw = "\n".join(lines)
|
||||||
|
return json.loads(raw)
|
||||||
|
except (json.JSONDecodeError, OSError):
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
def _is_issue_open(issue_number: int) -> bool | None:
|
||||||
|
"""Check if a single issue is open. Returns None on API failure."""
|
||||||
|
token = _get_token()
|
||||||
|
if not token:
|
||||||
|
return None
|
||||||
|
try:
|
||||||
|
url = f"{GITEA_API}/repos/{REPO_SLUG}/issues/{issue_number}"
|
||||||
|
req = urllib.request.Request(
|
||||||
|
url,
|
||||||
|
headers={
|
||||||
|
"Authorization": f"token {token}",
|
||||||
|
"Accept": "application/json",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
with urllib.request.urlopen(req, timeout=10) as resp:
|
||||||
|
data = json.loads(resp.read())
|
||||||
|
return data.get("state") == "open"
|
||||||
|
except Exception:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def validate_cycle_result() -> bool:
|
||||||
|
"""Pre-cycle validation: remove stale or invalid cycle_result.json.
|
||||||
|
|
||||||
|
Checks:
|
||||||
|
1. Age — if older than 2× CYCLE_DURATION, delete it.
|
||||||
|
2. Issue — if the referenced issue is closed, delete it.
|
||||||
|
|
||||||
|
Returns True if the file was removed, False otherwise.
|
||||||
|
"""
|
||||||
|
if not CYCLE_RESULT_FILE.exists():
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Age check
|
||||||
|
try:
|
||||||
|
age = time.time() - CYCLE_RESULT_FILE.stat().st_mtime
|
||||||
|
except OSError:
|
||||||
|
return False
|
||||||
|
stale_threshold = CYCLE_DURATION * 2
|
||||||
|
if age > stale_threshold:
|
||||||
|
print(
|
||||||
|
f"[loop-guard] cycle_result.json is {int(age)}s old "
|
||||||
|
f"(threshold {stale_threshold}s) — removing stale file"
|
||||||
|
)
|
||||||
|
CYCLE_RESULT_FILE.unlink(missing_ok=True)
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Issue check
|
||||||
|
cr = _load_cycle_result()
|
||||||
|
issue_num = cr.get("issue")
|
||||||
|
if issue_num is not None:
|
||||||
|
try:
|
||||||
|
issue_num = int(issue_num)
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
return False
|
||||||
|
is_open = _is_issue_open(issue_num)
|
||||||
|
if is_open is False:
|
||||||
|
print(
|
||||||
|
f"[loop-guard] cycle_result.json references closed "
|
||||||
|
f"issue #{issue_num} — removing"
|
||||||
|
)
|
||||||
|
CYCLE_RESULT_FILE.unlink(missing_ok=True)
|
||||||
|
return True
|
||||||
|
# is_open is None (API failure) or True — keep file
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def load_queue() -> list[dict]:
|
||||||
|
"""Load queue.json and return ready items, filtering out closed issues."""
|
||||||
|
if not QUEUE_FILE.exists():
|
||||||
|
return []
|
||||||
|
try:
|
||||||
|
data = json.loads(QUEUE_FILE.read_text())
|
||||||
|
if not isinstance(data, list):
|
||||||
|
return []
|
||||||
|
ready = [item for item in data if item.get("ready")]
|
||||||
|
if not ready:
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Filter out issues that are no longer open (auto-hygiene)
|
||||||
|
open_numbers = _fetch_open_issue_numbers()
|
||||||
|
if open_numbers is not None:
|
||||||
|
before = len(ready)
|
||||||
|
ready = [item for item in ready if item.get("issue") in open_numbers]
|
||||||
|
removed = before - len(ready)
|
||||||
|
if removed > 0:
|
||||||
|
print(f"[loop-guard] Filtered {removed} closed issue(s) from queue")
|
||||||
|
# Persist the cleaned queue so stale entries don't recur
|
||||||
|
_save_cleaned_queue(data, open_numbers)
|
||||||
|
return ready
|
||||||
|
except (json.JSONDecodeError, OSError):
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
def _save_cleaned_queue(full_queue: list[dict], open_numbers: set[int]) -> None:
|
||||||
|
"""Rewrite queue.json without closed issues."""
|
||||||
|
cleaned = [item for item in full_queue if item.get("issue") in open_numbers]
|
||||||
|
try:
|
||||||
|
QUEUE_FILE.write_text(json.dumps(cleaned, indent=2) + "\n")
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def load_idle_state() -> dict:
|
||||||
|
"""Load persistent idle state."""
|
||||||
|
if not IDLE_STATE_FILE.exists():
|
||||||
|
return {"consecutive_idle": 0, "last_idle_at": 0}
|
||||||
|
try:
|
||||||
|
return json.loads(IDLE_STATE_FILE.read_text())
|
||||||
|
except (json.JSONDecodeError, OSError):
|
||||||
|
return {"consecutive_idle": 0, "last_idle_at": 0}
|
||||||
|
|
||||||
|
|
||||||
|
def save_idle_state(state: dict) -> None:
|
||||||
|
"""Persist idle state."""
|
||||||
|
IDLE_STATE_FILE.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
IDLE_STATE_FILE.write_text(json.dumps(state, indent=2) + "\n")
|
||||||
|
|
||||||
|
|
||||||
|
def compute_backoff(consecutive_idle: int) -> int:
|
||||||
|
"""Exponential backoff: 60, 120, 240, 600 (capped)."""
|
||||||
|
return min(BACKOFF_BASE * (BACKOFF_MULTIPLIER ** consecutive_idle), BACKOFF_MAX)
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> int:
|
||||||
|
wait_mode = "--wait" in sys.argv
|
||||||
|
status_mode = "--status" in sys.argv
|
||||||
|
|
||||||
|
state = load_idle_state()
|
||||||
|
|
||||||
|
if status_mode:
|
||||||
|
ready = load_queue()
|
||||||
|
backoff = compute_backoff(state["consecutive_idle"])
|
||||||
|
print(json.dumps({
|
||||||
|
"queue_ready": len(ready),
|
||||||
|
"consecutive_idle": state["consecutive_idle"],
|
||||||
|
"next_backoff_seconds": backoff if not ready else 0,
|
||||||
|
}, indent=2))
|
||||||
|
return 0
|
||||||
|
|
||||||
|
# Pre-cycle validation: remove stale cycle_result.json
|
||||||
|
validate_cycle_result()
|
||||||
|
|
||||||
|
ready = load_queue()
|
||||||
|
|
||||||
|
if ready:
|
||||||
|
# Queue has work — reset idle state, proceed
|
||||||
|
if state["consecutive_idle"] > 0:
|
||||||
|
print(f"[loop-guard] Queue active ({len(ready)} ready) — "
|
||||||
|
f"resuming after {state['consecutive_idle']} idle cycles")
|
||||||
|
state["consecutive_idle"] = 0
|
||||||
|
state["last_idle_at"] = 0
|
||||||
|
save_idle_state(state)
|
||||||
|
return 0
|
||||||
|
|
||||||
|
# Queue empty — apply backoff
|
||||||
|
backoff = compute_backoff(state["consecutive_idle"])
|
||||||
|
state["consecutive_idle"] += 1
|
||||||
|
state["last_idle_at"] = time.time()
|
||||||
|
save_idle_state(state)
|
||||||
|
|
||||||
|
print(f"[loop-guard] Queue empty — idle #{state['consecutive_idle']}, "
|
||||||
|
f"backoff {backoff}s")
|
||||||
|
|
||||||
|
if wait_mode:
|
||||||
|
time.sleep(backoff)
|
||||||
|
|
||||||
|
return 1
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
sys.exit(main())
|
||||||
407
scripts/loop_introspect.py
Normal file
407
scripts/loop_introspect.py
Normal file
@@ -0,0 +1,407 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""Loop introspection — the self-improvement engine.
|
||||||
|
|
||||||
|
Analyzes retro data across time windows to detect trends, extract patterns,
|
||||||
|
and produce structured recommendations. Output is consumed by deep_triage
|
||||||
|
and injected into the loop prompt context.
|
||||||
|
|
||||||
|
This is the piece that closes the feedback loop:
|
||||||
|
cycle_retro → introspect → deep_triage → loop behavior changes
|
||||||
|
|
||||||
|
Run: python3 scripts/loop_introspect.py
|
||||||
|
Output: .loop/retro/insights.json (structured insights + recommendations)
|
||||||
|
Prints human-readable summary to stdout.
|
||||||
|
|
||||||
|
Called by: deep_triage.sh (before the LLM triage), timmy-loop.sh (every 50 cycles)
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import sys
|
||||||
|
from collections import defaultdict
|
||||||
|
from datetime import datetime, timezone, timedelta
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
REPO_ROOT = Path(__file__).resolve().parent.parent
|
||||||
|
CYCLES_FILE = REPO_ROOT / ".loop" / "retro" / "cycles.jsonl"
|
||||||
|
DEEP_TRIAGE_FILE = REPO_ROOT / ".loop" / "retro" / "deep-triage.jsonl"
|
||||||
|
TRIAGE_FILE = REPO_ROOT / ".loop" / "retro" / "triage.jsonl"
|
||||||
|
QUARANTINE_FILE = REPO_ROOT / ".loop" / "quarantine.json"
|
||||||
|
INSIGHTS_FILE = REPO_ROOT / ".loop" / "retro" / "insights.json"
|
||||||
|
|
||||||
|
# ── Helpers ──────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
def load_jsonl(path: Path) -> list[dict]:
|
||||||
|
"""Load a JSONL file, skipping bad lines."""
|
||||||
|
if not path.exists():
|
||||||
|
return []
|
||||||
|
entries = []
|
||||||
|
for line in path.read_text().strip().splitlines():
|
||||||
|
try:
|
||||||
|
entries.append(json.loads(line))
|
||||||
|
except (json.JSONDecodeError, ValueError):
|
||||||
|
continue
|
||||||
|
return entries
|
||||||
|
|
||||||
|
|
||||||
|
def parse_ts(ts_str: str) -> datetime | None:
|
||||||
|
"""Parse an ISO timestamp, tolerating missing tz."""
|
||||||
|
if not ts_str:
|
||||||
|
return None
|
||||||
|
try:
|
||||||
|
dt = datetime.fromisoformat(ts_str.replace("Z", "+00:00"))
|
||||||
|
if dt.tzinfo is None:
|
||||||
|
dt = dt.replace(tzinfo=timezone.utc)
|
||||||
|
return dt
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def window(entries: list[dict], days: int) -> list[dict]:
|
||||||
|
"""Filter entries to the last N days."""
|
||||||
|
cutoff = datetime.now(timezone.utc) - timedelta(days=days)
|
||||||
|
result = []
|
||||||
|
for e in entries:
|
||||||
|
ts = parse_ts(e.get("timestamp", ""))
|
||||||
|
if ts and ts >= cutoff:
|
||||||
|
result.append(e)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
# ── Analysis functions ───────────────────────────────────────────────────
|
||||||
|
|
||||||
|
def compute_trends(cycles: list[dict]) -> dict:
|
||||||
|
"""Compare recent window (last 7d) vs older window (7-14d ago)."""
|
||||||
|
recent = window(cycles, 7)
|
||||||
|
older = window(cycles, 14)
|
||||||
|
# Remove recent from older to get the 7-14d window
|
||||||
|
recent_set = {(e.get("cycle"), e.get("timestamp")) for e in recent}
|
||||||
|
older = [e for e in older if (e.get("cycle"), e.get("timestamp")) not in recent_set]
|
||||||
|
|
||||||
|
def stats(entries):
|
||||||
|
if not entries:
|
||||||
|
return {"count": 0, "success_rate": None, "avg_duration": None,
|
||||||
|
"lines_net": 0, "prs_merged": 0}
|
||||||
|
successes = sum(1 for e in entries if e.get("success"))
|
||||||
|
durations = [e["duration"] for e in entries if e.get("duration", 0) > 0]
|
||||||
|
return {
|
||||||
|
"count": len(entries),
|
||||||
|
"success_rate": round(successes / len(entries), 3) if entries else None,
|
||||||
|
"avg_duration": round(sum(durations) / len(durations)) if durations else None,
|
||||||
|
"lines_net": sum(e.get("lines_added", 0) - e.get("lines_removed", 0) for e in entries),
|
||||||
|
"prs_merged": sum(1 for e in entries if e.get("pr")),
|
||||||
|
}
|
||||||
|
|
||||||
|
recent_stats = stats(recent)
|
||||||
|
older_stats = stats(older)
|
||||||
|
|
||||||
|
trend = {
|
||||||
|
"recent_7d": recent_stats,
|
||||||
|
"previous_7d": older_stats,
|
||||||
|
"velocity_change": None,
|
||||||
|
"success_rate_change": None,
|
||||||
|
"duration_change": None,
|
||||||
|
}
|
||||||
|
|
||||||
|
if recent_stats["count"] and older_stats["count"]:
|
||||||
|
trend["velocity_change"] = recent_stats["count"] - older_stats["count"]
|
||||||
|
if recent_stats["success_rate"] is not None and older_stats["success_rate"] is not None:
|
||||||
|
trend["success_rate_change"] = round(
|
||||||
|
recent_stats["success_rate"] - older_stats["success_rate"], 3
|
||||||
|
)
|
||||||
|
if recent_stats["avg_duration"] is not None and older_stats["avg_duration"] is not None:
|
||||||
|
trend["duration_change"] = recent_stats["avg_duration"] - older_stats["avg_duration"]
|
||||||
|
|
||||||
|
return trend
|
||||||
|
|
||||||
|
|
||||||
|
def type_analysis(cycles: list[dict]) -> dict:
|
||||||
|
"""Per-type success rates and durations."""
|
||||||
|
by_type: dict[str, list[dict]] = defaultdict(list)
|
||||||
|
for c in cycles:
|
||||||
|
by_type[c.get("type", "unknown")].append(c)
|
||||||
|
|
||||||
|
result = {}
|
||||||
|
for t, entries in by_type.items():
|
||||||
|
durations = [e["duration"] for e in entries if e.get("duration", 0) > 0]
|
||||||
|
successes = sum(1 for e in entries if e.get("success"))
|
||||||
|
result[t] = {
|
||||||
|
"count": len(entries),
|
||||||
|
"success_rate": round(successes / len(entries), 3) if entries else 0,
|
||||||
|
"avg_duration": round(sum(durations) / len(durations)) if durations else 0,
|
||||||
|
"max_duration": max(durations) if durations else 0,
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def repeat_failures(cycles: list[dict]) -> list[dict]:
|
||||||
|
"""Issues that have failed multiple times — quarantine candidates."""
|
||||||
|
failures: dict[int, list] = defaultdict(list)
|
||||||
|
for c in cycles:
|
||||||
|
if not c.get("success") and c.get("issue"):
|
||||||
|
failures[c["issue"]].append({
|
||||||
|
"cycle": c.get("cycle"),
|
||||||
|
"reason": c.get("reason", ""),
|
||||||
|
"duration": c.get("duration", 0),
|
||||||
|
})
|
||||||
|
# Only issues with 2+ failures
|
||||||
|
return [
|
||||||
|
{"issue": k, "failure_count": len(v), "attempts": v}
|
||||||
|
for k, v in sorted(failures.items(), key=lambda x: -len(x[1]))
|
||||||
|
if len(v) >= 2
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def duration_outliers(cycles: list[dict], threshold_multiple: float = 3.0) -> list[dict]:
|
||||||
|
"""Cycles that took way longer than average — something went wrong."""
|
||||||
|
durations = [c["duration"] for c in cycles if c.get("duration", 0) > 0]
|
||||||
|
if len(durations) < 5:
|
||||||
|
return []
|
||||||
|
avg = sum(durations) / len(durations)
|
||||||
|
threshold = avg * threshold_multiple
|
||||||
|
|
||||||
|
outliers = []
|
||||||
|
for c in cycles:
|
||||||
|
dur = c.get("duration", 0)
|
||||||
|
if dur > threshold:
|
||||||
|
outliers.append({
|
||||||
|
"cycle": c.get("cycle"),
|
||||||
|
"issue": c.get("issue"),
|
||||||
|
"type": c.get("type"),
|
||||||
|
"duration": dur,
|
||||||
|
"avg_duration": round(avg),
|
||||||
|
"multiple": round(dur / avg, 1) if avg > 0 else 0,
|
||||||
|
"reason": c.get("reason", ""),
|
||||||
|
})
|
||||||
|
return outliers
|
||||||
|
|
||||||
|
|
||||||
|
def triage_effectiveness(deep_triages: list[dict]) -> dict:
|
||||||
|
"""How well is the deep triage performing?"""
|
||||||
|
if not deep_triages:
|
||||||
|
return {"runs": 0, "note": "No deep triage data yet"}
|
||||||
|
|
||||||
|
total_reviewed = sum(d.get("issues_reviewed", 0) for d in deep_triages)
|
||||||
|
total_refined = sum(len(d.get("issues_refined", [])) for d in deep_triages)
|
||||||
|
total_created = sum(len(d.get("issues_created", [])) for d in deep_triages)
|
||||||
|
total_closed = sum(len(d.get("issues_closed", [])) for d in deep_triages)
|
||||||
|
timmy_available = sum(1 for d in deep_triages if d.get("timmy_available"))
|
||||||
|
|
||||||
|
# Extract Timmy's feedback themes
|
||||||
|
timmy_themes = []
|
||||||
|
for d in deep_triages:
|
||||||
|
fb = d.get("timmy_feedback", "")
|
||||||
|
if fb:
|
||||||
|
timmy_themes.append(fb[:200])
|
||||||
|
|
||||||
|
return {
|
||||||
|
"runs": len(deep_triages),
|
||||||
|
"total_reviewed": total_reviewed,
|
||||||
|
"total_refined": total_refined,
|
||||||
|
"total_created": total_created,
|
||||||
|
"total_closed": total_closed,
|
||||||
|
"timmy_consultation_rate": round(timmy_available / len(deep_triages), 2),
|
||||||
|
"timmy_recent_feedback": timmy_themes[-1] if timmy_themes else "",
|
||||||
|
"timmy_feedback_history": timmy_themes,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def generate_recommendations(
|
||||||
|
trends: dict,
|
||||||
|
types: dict,
|
||||||
|
repeats: list,
|
||||||
|
outliers: list,
|
||||||
|
triage_eff: dict,
|
||||||
|
) -> list[dict]:
|
||||||
|
"""Produce actionable recommendations from the analysis."""
|
||||||
|
recs = []
|
||||||
|
|
||||||
|
# 1. Success rate declining?
|
||||||
|
src = trends.get("success_rate_change")
|
||||||
|
if src is not None and src < -0.1:
|
||||||
|
recs.append({
|
||||||
|
"severity": "high",
|
||||||
|
"category": "reliability",
|
||||||
|
"finding": f"Success rate dropped {abs(src)*100:.0f}pp in the last 7 days",
|
||||||
|
"recommendation": "Review recent failures. Are issues poorly scoped? "
|
||||||
|
"Is main unstable? Check if triage is producing bad work items.",
|
||||||
|
})
|
||||||
|
|
||||||
|
# 2. Velocity dropping?
|
||||||
|
vc = trends.get("velocity_change")
|
||||||
|
if vc is not None and vc < -5:
|
||||||
|
recs.append({
|
||||||
|
"severity": "medium",
|
||||||
|
"category": "throughput",
|
||||||
|
"finding": f"Velocity dropped by {abs(vc)} cycles vs previous week",
|
||||||
|
"recommendation": "Check for loop stalls, long-running cycles, or queue starvation.",
|
||||||
|
})
|
||||||
|
|
||||||
|
# 3. Duration creep?
|
||||||
|
dc = trends.get("duration_change")
|
||||||
|
if dc is not None and dc > 120: # 2+ minutes longer
|
||||||
|
recs.append({
|
||||||
|
"severity": "medium",
|
||||||
|
"category": "efficiency",
|
||||||
|
"finding": f"Average cycle duration increased by {dc}s vs previous week",
|
||||||
|
"recommendation": "Issues may be growing in scope. Enforce tighter decomposition "
|
||||||
|
"in deep triage. Check if tests are getting slower.",
|
||||||
|
})
|
||||||
|
|
||||||
|
# 4. Type-specific problems
|
||||||
|
for t, info in types.items():
|
||||||
|
if info["count"] >= 3 and info["success_rate"] < 0.5:
|
||||||
|
recs.append({
|
||||||
|
"severity": "high",
|
||||||
|
"category": "type_reliability",
|
||||||
|
"finding": f"'{t}' issues fail {(1-info['success_rate'])*100:.0f}% of the time "
|
||||||
|
f"({info['count']} attempts)",
|
||||||
|
"recommendation": f"'{t}' issues need better scoping or different approach. "
|
||||||
|
f"Consider: tighter acceptance criteria, smaller scope, "
|
||||||
|
f"or delegating to Kimi with more context.",
|
||||||
|
})
|
||||||
|
if info["avg_duration"] > 600 and info["count"] >= 3: # >10 min avg
|
||||||
|
recs.append({
|
||||||
|
"severity": "medium",
|
||||||
|
"category": "type_efficiency",
|
||||||
|
"finding": f"'{t}' issues average {info['avg_duration']//60}m{info['avg_duration']%60}s "
|
||||||
|
f"(max {info['max_duration']//60}m)",
|
||||||
|
"recommendation": f"Break '{t}' issues into smaller pieces. Target <5 min per cycle.",
|
||||||
|
})
|
||||||
|
|
||||||
|
# 5. Repeat failures
|
||||||
|
for rf in repeats[:3]:
|
||||||
|
recs.append({
|
||||||
|
"severity": "high",
|
||||||
|
"category": "repeat_failure",
|
||||||
|
"finding": f"Issue #{rf['issue']} has failed {rf['failure_count']} times",
|
||||||
|
"recommendation": "Quarantine or rewrite this issue. Repeated failure = "
|
||||||
|
"bad scope or missing prerequisite.",
|
||||||
|
})
|
||||||
|
|
||||||
|
# 6. Outliers
|
||||||
|
if len(outliers) > 2:
|
||||||
|
recs.append({
|
||||||
|
"severity": "medium",
|
||||||
|
"category": "outliers",
|
||||||
|
"finding": f"{len(outliers)} cycles took {outliers[0].get('multiple', '?')}x+ "
|
||||||
|
f"longer than average",
|
||||||
|
"recommendation": "Long cycles waste resources. Add timeout enforcement or "
|
||||||
|
"break complex issues earlier.",
|
||||||
|
})
|
||||||
|
|
||||||
|
# 7. Code growth
|
||||||
|
recent = trends.get("recent_7d", {})
|
||||||
|
net = recent.get("lines_net", 0)
|
||||||
|
if net > 500:
|
||||||
|
recs.append({
|
||||||
|
"severity": "low",
|
||||||
|
"category": "code_health",
|
||||||
|
"finding": f"Net +{net} lines added in the last 7 days",
|
||||||
|
"recommendation": "Lines of code is a liability. Balance feature work with "
|
||||||
|
"refactoring. Target net-zero or negative line growth.",
|
||||||
|
})
|
||||||
|
|
||||||
|
# 8. Triage health
|
||||||
|
if triage_eff.get("runs", 0) == 0:
|
||||||
|
recs.append({
|
||||||
|
"severity": "high",
|
||||||
|
"category": "triage",
|
||||||
|
"finding": "Deep triage has never run",
|
||||||
|
"recommendation": "Enable deep triage (every 20 cycles). The loop needs "
|
||||||
|
"LLM-driven issue refinement to stay effective.",
|
||||||
|
})
|
||||||
|
|
||||||
|
# No recommendations = things are healthy
|
||||||
|
if not recs:
|
||||||
|
recs.append({
|
||||||
|
"severity": "info",
|
||||||
|
"category": "health",
|
||||||
|
"finding": "No significant issues detected",
|
||||||
|
"recommendation": "System is healthy. Continue current patterns.",
|
||||||
|
})
|
||||||
|
|
||||||
|
return recs
|
||||||
|
|
||||||
|
|
||||||
|
# ── Main ─────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
def main() -> None:
|
||||||
|
cycles = load_jsonl(CYCLES_FILE)
|
||||||
|
deep_triages = load_jsonl(DEEP_TRIAGE_FILE)
|
||||||
|
|
||||||
|
if not cycles:
|
||||||
|
print("[introspect] No cycle data found. Nothing to analyze.")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Run all analyses
|
||||||
|
trends = compute_trends(cycles)
|
||||||
|
types = type_analysis(cycles)
|
||||||
|
repeats = repeat_failures(cycles)
|
||||||
|
outliers = duration_outliers(cycles)
|
||||||
|
triage_eff = triage_effectiveness(deep_triages)
|
||||||
|
recommendations = generate_recommendations(trends, types, repeats, outliers, triage_eff)
|
||||||
|
|
||||||
|
insights = {
|
||||||
|
"generated_at": datetime.now(timezone.utc).isoformat(),
|
||||||
|
"total_cycles_analyzed": len(cycles),
|
||||||
|
"trends": trends,
|
||||||
|
"by_type": types,
|
||||||
|
"repeat_failures": repeats[:5],
|
||||||
|
"duration_outliers": outliers[:5],
|
||||||
|
"triage_effectiveness": triage_eff,
|
||||||
|
"recommendations": recommendations,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Write insights
|
||||||
|
INSIGHTS_FILE.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
INSIGHTS_FILE.write_text(json.dumps(insights, indent=2) + "\n")
|
||||||
|
|
||||||
|
# Current epoch from latest entry
|
||||||
|
latest_epoch = ""
|
||||||
|
for c in reversed(cycles):
|
||||||
|
if c.get("epoch"):
|
||||||
|
latest_epoch = c["epoch"]
|
||||||
|
break
|
||||||
|
|
||||||
|
# Human-readable output
|
||||||
|
header = f"[introspect] Analyzed {len(cycles)} cycles"
|
||||||
|
if latest_epoch:
|
||||||
|
header += f" · current epoch: {latest_epoch}"
|
||||||
|
print(header)
|
||||||
|
|
||||||
|
print(f"\n TRENDS (7d vs previous 7d):")
|
||||||
|
r7 = trends["recent_7d"]
|
||||||
|
p7 = trends["previous_7d"]
|
||||||
|
print(f" Cycles: {r7['count']:>3d} (was {p7['count']})")
|
||||||
|
if r7["success_rate"] is not None:
|
||||||
|
arrow = "↑" if (trends["success_rate_change"] or 0) > 0 else "↓" if (trends["success_rate_change"] or 0) < 0 else "→"
|
||||||
|
print(f" Success rate: {r7['success_rate']*100:>4.0f}% {arrow}")
|
||||||
|
if r7["avg_duration"] is not None:
|
||||||
|
print(f" Avg duration: {r7['avg_duration']//60}m{r7['avg_duration']%60:02d}s")
|
||||||
|
print(f" PRs merged: {r7['prs_merged']:>3d} (was {p7['prs_merged']})")
|
||||||
|
print(f" Lines net: {r7['lines_net']:>+5d}")
|
||||||
|
|
||||||
|
print(f"\n BY TYPE:")
|
||||||
|
for t, info in sorted(types.items(), key=lambda x: -x[1]["count"]):
|
||||||
|
print(f" {t:12s} n={info['count']:>2d} "
|
||||||
|
f"ok={info['success_rate']*100:>3.0f}% "
|
||||||
|
f"avg={info['avg_duration']//60}m{info['avg_duration']%60:02d}s")
|
||||||
|
|
||||||
|
if repeats:
|
||||||
|
print(f"\n REPEAT FAILURES:")
|
||||||
|
for rf in repeats[:3]:
|
||||||
|
print(f" #{rf['issue']} failed {rf['failure_count']}x")
|
||||||
|
|
||||||
|
print(f"\n RECOMMENDATIONS ({len(recommendations)}):")
|
||||||
|
for i, rec in enumerate(recommendations, 1):
|
||||||
|
sev = {"high": "🔴", "medium": "🟡", "low": "🟢", "info": "ℹ️ "}.get(rec["severity"], "?")
|
||||||
|
print(f" {sev} {rec['finding']}")
|
||||||
|
print(f" → {rec['recommendation']}")
|
||||||
|
|
||||||
|
print(f"\n Written to: {INSIGHTS_FILE}")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
360
scripts/triage_score.py
Normal file
360
scripts/triage_score.py
Normal file
@@ -0,0 +1,360 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""Mechanical triage scoring for the Timmy dev loop.
|
||||||
|
|
||||||
|
Reads open issues from Gitea, scores them on scope/acceptance/alignment,
|
||||||
|
writes a ranked queue to .loop/queue.json. No LLM calls — pure heuristics.
|
||||||
|
|
||||||
|
Run: python3 scripts/triage_score.py
|
||||||
|
Env: GITEA_TOKEN (or reads ~/.hermes/gitea_token)
|
||||||
|
GITEA_API (default: http://localhost:3000/api/v1)
|
||||||
|
REPO_SLUG (default: rockachopa/Timmy-time-dashboard)
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# ── Config ──────────────────────────────────────────────────────────────
|
||||||
|
GITEA_API = os.environ.get("GITEA_API", "http://localhost:3000/api/v1")
|
||||||
|
REPO_SLUG = os.environ.get("REPO_SLUG", "rockachopa/Timmy-time-dashboard")
|
||||||
|
TOKEN_FILE = Path.home() / ".hermes" / "gitea_token"
|
||||||
|
REPO_ROOT = Path(__file__).resolve().parent.parent
|
||||||
|
QUEUE_FILE = REPO_ROOT / ".loop" / "queue.json"
|
||||||
|
RETRO_FILE = REPO_ROOT / ".loop" / "retro" / "triage.jsonl"
|
||||||
|
QUARANTINE_FILE = REPO_ROOT / ".loop" / "quarantine.json"
|
||||||
|
CYCLE_RETRO_FILE = REPO_ROOT / ".loop" / "retro" / "cycles.jsonl"
|
||||||
|
|
||||||
|
# Minimum score to be considered "ready"
|
||||||
|
READY_THRESHOLD = 5
|
||||||
|
# How many recent cycle retros to check for quarantine
|
||||||
|
QUARANTINE_LOOKBACK = 20
|
||||||
|
|
||||||
|
# ── Helpers ─────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
def get_token() -> str:
|
||||||
|
token = os.environ.get("GITEA_TOKEN", "").strip()
|
||||||
|
if not token and TOKEN_FILE.exists():
|
||||||
|
token = TOKEN_FILE.read_text().strip()
|
||||||
|
if not token:
|
||||||
|
print("[triage] ERROR: No Gitea token found", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
return token
|
||||||
|
|
||||||
|
|
||||||
|
def api_get(path: str, token: str) -> list | dict:
|
||||||
|
"""Minimal HTTP GET using urllib (no dependencies)."""
|
||||||
|
import urllib.request
|
||||||
|
url = f"{GITEA_API}/repos/{REPO_SLUG}/{path}"
|
||||||
|
req = urllib.request.Request(url, headers={
|
||||||
|
"Authorization": f"token {token}",
|
||||||
|
"Accept": "application/json",
|
||||||
|
})
|
||||||
|
with urllib.request.urlopen(req, timeout=15) as resp:
|
||||||
|
return json.loads(resp.read())
|
||||||
|
|
||||||
|
|
||||||
|
def load_quarantine() -> dict:
|
||||||
|
"""Load quarantined issues {issue_num: {reason, quarantined_at, failures}}."""
|
||||||
|
if QUARANTINE_FILE.exists():
|
||||||
|
try:
|
||||||
|
return json.loads(QUARANTINE_FILE.read_text())
|
||||||
|
except (json.JSONDecodeError, OSError):
|
||||||
|
pass
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
def save_quarantine(q: dict) -> None:
|
||||||
|
QUARANTINE_FILE.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
QUARANTINE_FILE.write_text(json.dumps(q, indent=2) + "\n")
|
||||||
|
|
||||||
|
|
||||||
|
def load_cycle_failures() -> dict[int, int]:
|
||||||
|
"""Count failures per issue from recent cycle retros."""
|
||||||
|
failures: dict[int, int] = {}
|
||||||
|
if not CYCLE_RETRO_FILE.exists():
|
||||||
|
return failures
|
||||||
|
lines = CYCLE_RETRO_FILE.read_text().strip().splitlines()
|
||||||
|
for line in lines[-QUARANTINE_LOOKBACK:]:
|
||||||
|
try:
|
||||||
|
entry = json.loads(line)
|
||||||
|
if not entry.get("success", True):
|
||||||
|
issue = entry.get("issue")
|
||||||
|
if issue:
|
||||||
|
failures[issue] = failures.get(issue, 0) + 1
|
||||||
|
except (json.JSONDecodeError, KeyError):
|
||||||
|
continue
|
||||||
|
return failures
|
||||||
|
|
||||||
|
|
||||||
|
# ── Scoring ─────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
# Patterns that indicate file/function specificity
|
||||||
|
FILE_PATTERNS = re.compile(
|
||||||
|
r"(?:src/|tests/|scripts/|\.py|\.html|\.js|\.yaml|\.toml|\.sh)", re.IGNORECASE
|
||||||
|
)
|
||||||
|
FUNCTION_PATTERNS = re.compile(
|
||||||
|
r"(?:def |class |function |method |`\w+\(\)`)", re.IGNORECASE
|
||||||
|
)
|
||||||
|
|
||||||
|
# Patterns that indicate acceptance criteria
|
||||||
|
ACCEPTANCE_PATTERNS = re.compile(
|
||||||
|
r"(?:should|must|expect|verify|assert|test.?case|acceptance|criteria"
|
||||||
|
r"|pass(?:es|ing)|fail(?:s|ing)|return(?:s)?|raise(?:s)?)",
|
||||||
|
re.IGNORECASE,
|
||||||
|
)
|
||||||
|
TEST_PATTERNS = re.compile(
|
||||||
|
r"(?:tox|pytest|test_\w+|\.test\.|assert\s)", re.IGNORECASE
|
||||||
|
)
|
||||||
|
|
||||||
|
# Tags in issue titles
|
||||||
|
TAG_PATTERN = re.compile(r"\[([^\]]+)\]")
|
||||||
|
|
||||||
|
# Priority labels / tags
|
||||||
|
BUG_TAGS = {"bug", "broken", "crash", "error", "fix", "regression", "hotfix"}
|
||||||
|
FEATURE_TAGS = {"feature", "feat", "enhancement", "capability", "timmy-capability"}
|
||||||
|
REFACTOR_TAGS = {"refactor", "cleanup", "tech-debt", "optimization", "perf"}
|
||||||
|
META_TAGS = {"philosophy", "soul-gap", "discussion", "question", "rfc"}
|
||||||
|
LOOP_TAG = "loop-generated"
|
||||||
|
|
||||||
|
|
||||||
|
def extract_tags(title: str, labels: list[str]) -> set[str]:
|
||||||
|
"""Pull tags from [bracket] notation in title + Gitea labels."""
|
||||||
|
tags = set()
|
||||||
|
for match in TAG_PATTERN.finditer(title):
|
||||||
|
tags.add(match.group(1).lower().strip())
|
||||||
|
for label in labels:
|
||||||
|
tags.add(label.lower().strip())
|
||||||
|
return tags
|
||||||
|
|
||||||
|
|
||||||
|
def score_scope(title: str, body: str, tags: set[str]) -> int:
|
||||||
|
"""0-3: How well-scoped is this issue?"""
|
||||||
|
text = f"{title}\n{body}"
|
||||||
|
score = 0
|
||||||
|
|
||||||
|
# Mentions specific files?
|
||||||
|
if FILE_PATTERNS.search(text):
|
||||||
|
score += 1
|
||||||
|
|
||||||
|
# Mentions specific functions/classes?
|
||||||
|
if FUNCTION_PATTERNS.search(text):
|
||||||
|
score += 1
|
||||||
|
|
||||||
|
# Short, focused title (not a novel)?
|
||||||
|
clean_title = TAG_PATTERN.sub("", title).strip()
|
||||||
|
if len(clean_title) < 80:
|
||||||
|
score += 1
|
||||||
|
|
||||||
|
# Philosophy/meta issues are inherently unscoped for dev work
|
||||||
|
if tags & META_TAGS:
|
||||||
|
score = max(0, score - 2)
|
||||||
|
|
||||||
|
return min(3, score)
|
||||||
|
|
||||||
|
|
||||||
|
def score_acceptance(title: str, body: str, tags: set[str]) -> int:
|
||||||
|
"""0-3: Does this have clear acceptance criteria?"""
|
||||||
|
text = f"{title}\n{body}"
|
||||||
|
score = 0
|
||||||
|
|
||||||
|
# Has acceptance-related language?
|
||||||
|
matches = len(ACCEPTANCE_PATTERNS.findall(text))
|
||||||
|
if matches >= 3:
|
||||||
|
score += 2
|
||||||
|
elif matches >= 1:
|
||||||
|
score += 1
|
||||||
|
|
||||||
|
# Mentions specific tests?
|
||||||
|
if TEST_PATTERNS.search(text):
|
||||||
|
score += 1
|
||||||
|
|
||||||
|
# Has a "## Problem" + "## Solution" or similar structure?
|
||||||
|
if re.search(r"##\s*(problem|solution|expected|actual|steps)", body, re.IGNORECASE):
|
||||||
|
score += 1
|
||||||
|
|
||||||
|
# Philosophy issues don't have testable criteria
|
||||||
|
if tags & META_TAGS:
|
||||||
|
score = max(0, score - 1)
|
||||||
|
|
||||||
|
return min(3, score)
|
||||||
|
|
||||||
|
|
||||||
|
def score_alignment(title: str, body: str, tags: set[str]) -> int:
|
||||||
|
"""0-3: How aligned is this with the north star?"""
|
||||||
|
score = 0
|
||||||
|
|
||||||
|
# Bug on main = highest priority
|
||||||
|
if tags & BUG_TAGS:
|
||||||
|
score += 3
|
||||||
|
return min(3, score)
|
||||||
|
|
||||||
|
# Refactors that improve code health
|
||||||
|
if tags & REFACTOR_TAGS:
|
||||||
|
score += 2
|
||||||
|
|
||||||
|
# Features that grow Timmy's capabilities
|
||||||
|
if tags & FEATURE_TAGS:
|
||||||
|
score += 2
|
||||||
|
|
||||||
|
# Loop-generated issues get a small boost (the loop found real problems)
|
||||||
|
if LOOP_TAG in tags:
|
||||||
|
score += 1
|
||||||
|
|
||||||
|
# Philosophy issues are important but not dev-actionable
|
||||||
|
if tags & META_TAGS:
|
||||||
|
score = 0
|
||||||
|
|
||||||
|
return min(3, score)
|
||||||
|
|
||||||
|
|
||||||
|
def score_issue(issue: dict) -> dict:
|
||||||
|
"""Score a single issue. Returns enriched dict."""
|
||||||
|
title = issue.get("title", "")
|
||||||
|
body = issue.get("body", "") or ""
|
||||||
|
labels = [l["name"] for l in issue.get("labels", [])]
|
||||||
|
tags = extract_tags(title, labels)
|
||||||
|
number = issue["number"]
|
||||||
|
|
||||||
|
scope = score_scope(title, body, tags)
|
||||||
|
acceptance = score_acceptance(title, body, tags)
|
||||||
|
alignment = score_alignment(title, body, tags)
|
||||||
|
total = scope + acceptance + alignment
|
||||||
|
|
||||||
|
# Determine issue type
|
||||||
|
if tags & BUG_TAGS:
|
||||||
|
issue_type = "bug"
|
||||||
|
elif tags & FEATURE_TAGS:
|
||||||
|
issue_type = "feature"
|
||||||
|
elif tags & REFACTOR_TAGS:
|
||||||
|
issue_type = "refactor"
|
||||||
|
elif tags & META_TAGS:
|
||||||
|
issue_type = "philosophy"
|
||||||
|
else:
|
||||||
|
issue_type = "unknown"
|
||||||
|
|
||||||
|
# Extract mentioned files from body
|
||||||
|
files = list(set(re.findall(r"(?:src|tests|scripts)/[\w/.]+\.(?:py|html|js|yaml)", body)))
|
||||||
|
|
||||||
|
return {
|
||||||
|
"issue": number,
|
||||||
|
"title": TAG_PATTERN.sub("", title).strip(),
|
||||||
|
"type": issue_type,
|
||||||
|
"score": total,
|
||||||
|
"scope": scope,
|
||||||
|
"acceptance": acceptance,
|
||||||
|
"alignment": alignment,
|
||||||
|
"tags": sorted(tags),
|
||||||
|
"files": files[:10],
|
||||||
|
"ready": total >= READY_THRESHOLD,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ── Quarantine ──────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
def update_quarantine(scored: list[dict]) -> list[dict]:
|
||||||
|
"""Auto-quarantine issues that have failed >= 2 times. Returns filtered list."""
|
||||||
|
failures = load_cycle_failures()
|
||||||
|
quarantine = load_quarantine()
|
||||||
|
now = datetime.now(timezone.utc).isoformat()
|
||||||
|
|
||||||
|
filtered = []
|
||||||
|
for item in scored:
|
||||||
|
num = item["issue"]
|
||||||
|
fail_count = failures.get(num, 0)
|
||||||
|
str_num = str(num)
|
||||||
|
|
||||||
|
if fail_count >= 2 and str_num not in quarantine:
|
||||||
|
quarantine[str_num] = {
|
||||||
|
"reason": f"Failed {fail_count} times in recent cycles",
|
||||||
|
"quarantined_at": now,
|
||||||
|
"failures": fail_count,
|
||||||
|
}
|
||||||
|
print(f"[triage] QUARANTINED #{num}: failed {fail_count} times")
|
||||||
|
continue
|
||||||
|
|
||||||
|
if str_num in quarantine:
|
||||||
|
print(f"[triage] Skipping #{num} (quarantined)")
|
||||||
|
continue
|
||||||
|
|
||||||
|
filtered.append(item)
|
||||||
|
|
||||||
|
save_quarantine(quarantine)
|
||||||
|
return filtered
|
||||||
|
|
||||||
|
|
||||||
|
# ── Main ────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
def run_triage() -> list[dict]:
|
||||||
|
token = get_token()
|
||||||
|
|
||||||
|
# Fetch all open issues (paginate)
|
||||||
|
page = 1
|
||||||
|
all_issues: list[dict] = []
|
||||||
|
while True:
|
||||||
|
batch = api_get(f"issues?state=open&limit=50&page={page}&type=issues", token)
|
||||||
|
if not batch:
|
||||||
|
break
|
||||||
|
all_issues.extend(batch)
|
||||||
|
if len(batch) < 50:
|
||||||
|
break
|
||||||
|
page += 1
|
||||||
|
|
||||||
|
print(f"[triage] Fetched {len(all_issues)} open issues")
|
||||||
|
|
||||||
|
# Score each
|
||||||
|
scored = [score_issue(i) for i in all_issues]
|
||||||
|
|
||||||
|
# Auto-quarantine repeat failures
|
||||||
|
scored = update_quarantine(scored)
|
||||||
|
|
||||||
|
# Sort: ready first, then by score descending, bugs always on top
|
||||||
|
def sort_key(item: dict) -> tuple:
|
||||||
|
return (
|
||||||
|
0 if item["type"] == "bug" else 1,
|
||||||
|
-item["score"],
|
||||||
|
item["issue"],
|
||||||
|
)
|
||||||
|
|
||||||
|
scored.sort(key=sort_key)
|
||||||
|
|
||||||
|
# Write queue (ready items only)
|
||||||
|
ready = [s for s in scored if s["ready"]]
|
||||||
|
not_ready = [s for s in scored if not s["ready"]]
|
||||||
|
|
||||||
|
QUEUE_FILE.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
QUEUE_FILE.write_text(json.dumps(ready, indent=2) + "\n")
|
||||||
|
|
||||||
|
# Write retro entry
|
||||||
|
retro_entry = {
|
||||||
|
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||||
|
"total_open": len(all_issues),
|
||||||
|
"scored": len(scored),
|
||||||
|
"ready": len(ready),
|
||||||
|
"not_ready": len(not_ready),
|
||||||
|
"top_issue": ready[0]["issue"] if ready else None,
|
||||||
|
"quarantined": len(load_quarantine()),
|
||||||
|
}
|
||||||
|
RETRO_FILE.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
with open(RETRO_FILE, "a") as f:
|
||||||
|
f.write(json.dumps(retro_entry) + "\n")
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
print(f"[triage] Ready: {len(ready)} | Not ready: {len(not_ready)}")
|
||||||
|
for item in ready[:5]:
|
||||||
|
flag = "🐛" if item["type"] == "bug" else "✦"
|
||||||
|
print(f" {flag} #{item['issue']} score={item['score']} {item['title'][:60]}")
|
||||||
|
if not_ready:
|
||||||
|
print(f"[triage] Low-scoring ({len(not_ready)}):")
|
||||||
|
for item in not_ready[:3]:
|
||||||
|
print(f" #{item['issue']} score={item['score']} {item['title'][:50]}")
|
||||||
|
|
||||||
|
return ready
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
run_triage()
|
||||||
161
src/config.py
161
src/config.py
@@ -1,10 +1,19 @@
|
|||||||
import logging as _logging
|
import logging as _logging
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
from datetime import UTC
|
||||||
|
from datetime import datetime as _datetime
|
||||||
from typing import Literal
|
from typing import Literal
|
||||||
|
|
||||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||||
|
|
||||||
|
APP_START_TIME: _datetime = _datetime.now(UTC)
|
||||||
|
|
||||||
|
|
||||||
|
def normalize_ollama_url(url: str) -> str:
|
||||||
|
"""Replace localhost with 127.0.0.1 to avoid IPv6 resolution delays."""
|
||||||
|
return url.replace("localhost", "127.0.0.1")
|
||||||
|
|
||||||
|
|
||||||
class Settings(BaseSettings):
|
class Settings(BaseSettings):
|
||||||
"""Central configuration — all env-var access goes through this class."""
|
"""Central configuration — all env-var access goes through this class."""
|
||||||
@@ -15,12 +24,39 @@ class Settings(BaseSettings):
|
|||||||
# Ollama host — override with OLLAMA_URL env var or .env file
|
# Ollama host — override with OLLAMA_URL env var or .env file
|
||||||
ollama_url: str = "http://localhost:11434"
|
ollama_url: str = "http://localhost:11434"
|
||||||
|
|
||||||
|
@property
|
||||||
|
def normalized_ollama_url(self) -> str:
|
||||||
|
"""Return ollama_url with localhost replaced by 127.0.0.1."""
|
||||||
|
return normalize_ollama_url(self.ollama_url)
|
||||||
|
|
||||||
# LLM model passed to Agno/Ollama — override with OLLAMA_MODEL
|
# LLM model passed to Agno/Ollama — override with OLLAMA_MODEL
|
||||||
# qwen3.5:latest is the primary model — better reasoning and tool calling
|
# qwen3:30b is the primary model — better reasoning and tool calling
|
||||||
# than llama3.1:8b-instruct while still running locally on modest hardware.
|
# than llama3.1:8b-instruct while still running locally on modest hardware.
|
||||||
# Fallback: llama3.1:8b-instruct if qwen3.5:latest not available.
|
# Fallback: llama3.1:8b-instruct if qwen3:30b not available.
|
||||||
# llama3.2 (3B) hallucinated tool output consistently in testing.
|
# llama3.2 (3B) hallucinated tool output consistently in testing.
|
||||||
ollama_model: str = "qwen3.5:latest"
|
ollama_model: str = "qwen3:30b"
|
||||||
|
|
||||||
|
# Context window size for Ollama inference — override with OLLAMA_NUM_CTX
|
||||||
|
# qwen3:30b with default context eats 45GB on a 39GB Mac.
|
||||||
|
# 4096 keeps memory at ~19GB. Set to 0 to use model defaults.
|
||||||
|
ollama_num_ctx: int = 4096
|
||||||
|
|
||||||
|
# Fallback model chains — override with FALLBACK_MODELS / VISION_FALLBACK_MODELS
|
||||||
|
# as comma-separated strings, e.g. FALLBACK_MODELS="qwen3:30b,llama3.1"
|
||||||
|
# Or edit config/providers.yaml → fallback_chains for the canonical source.
|
||||||
|
fallback_models: list[str] = [
|
||||||
|
"llama3.1:8b-instruct",
|
||||||
|
"llama3.1",
|
||||||
|
"qwen2.5:14b",
|
||||||
|
"qwen2.5:7b",
|
||||||
|
"llama3.2:3b",
|
||||||
|
]
|
||||||
|
vision_fallback_models: list[str] = [
|
||||||
|
"llama3.2:3b",
|
||||||
|
"llava:7b",
|
||||||
|
"qwen2.5-vl:3b",
|
||||||
|
"moondream:1.8b",
|
||||||
|
]
|
||||||
|
|
||||||
# Set DEBUG=true to enable /docs and /redoc (disabled by default)
|
# Set DEBUG=true to enable /docs and /redoc (disabled by default)
|
||||||
debug: bool = False
|
debug: bool = False
|
||||||
@@ -38,23 +74,17 @@ class Settings(BaseSettings):
|
|||||||
# Seconds to wait for user confirmation before auto-rejecting.
|
# Seconds to wait for user confirmation before auto-rejecting.
|
||||||
discord_confirm_timeout: int = 120
|
discord_confirm_timeout: int = 120
|
||||||
|
|
||||||
# ── AirLLM / backend selection ───────────────────────────────────────────
|
# ── Backend selection ────────────────────────────────────────────────────
|
||||||
# "ollama" — always use Ollama (default, safe everywhere)
|
# "ollama" — always use Ollama (default, safe everywhere)
|
||||||
# "airllm" — always use AirLLM (requires pip install ".[bigbrain]")
|
# "auto" — pick best available local backend, fall back to Ollama
|
||||||
# "auto" — use AirLLM on Apple Silicon if airllm is installed,
|
timmy_model_backend: Literal["ollama", "grok", "claude", "auto"] = "ollama"
|
||||||
# fall back to Ollama otherwise
|
|
||||||
timmy_model_backend: Literal["ollama", "airllm", "grok", "claude", "auto"] = "ollama"
|
|
||||||
|
|
||||||
# AirLLM model size when backend is airllm or auto.
|
|
||||||
# Larger = smarter, but needs more RAM / disk.
|
|
||||||
# 8b ~16 GB | 70b ~140 GB | 405b ~810 GB
|
|
||||||
airllm_model_size: Literal["8b", "70b", "405b"] = "70b"
|
|
||||||
|
|
||||||
# ── Grok (xAI) — opt-in premium cloud backend ────────────────────────
|
# ── Grok (xAI) — opt-in premium cloud backend ────────────────────────
|
||||||
# Grok is a premium augmentation layer — local-first ethos preserved.
|
# Grok is a premium augmentation layer — local-first ethos preserved.
|
||||||
# Only used when explicitly enabled and query complexity warrants it.
|
# Only used when explicitly enabled and query complexity warrants it.
|
||||||
grok_enabled: bool = False
|
grok_enabled: bool = False
|
||||||
xai_api_key: str = ""
|
xai_api_key: str = ""
|
||||||
|
xai_base_url: str = "https://api.x.ai/v1"
|
||||||
grok_default_model: str = "grok-3-fast"
|
grok_default_model: str = "grok-3-fast"
|
||||||
grok_max_sats_per_query: int = 200
|
grok_max_sats_per_query: int = 200
|
||||||
grok_free: bool = False # Skip Lightning invoice when user has own API key
|
grok_free: bool = False # Skip Lightning invoice when user has own API key
|
||||||
@@ -112,7 +142,24 @@ class Settings(BaseSettings):
|
|||||||
|
|
||||||
# CORS allowed origins for the web chat interface (Gitea Pages, etc.)
|
# CORS allowed origins for the web chat interface (Gitea Pages, etc.)
|
||||||
# Set CORS_ORIGINS as a comma-separated list, e.g. "http://localhost:3000,https://example.com"
|
# Set CORS_ORIGINS as a comma-separated list, e.g. "http://localhost:3000,https://example.com"
|
||||||
cors_origins: list[str] = ["*"]
|
cors_origins: list[str] = [
|
||||||
|
"http://localhost:3000",
|
||||||
|
"http://localhost:8000",
|
||||||
|
"http://127.0.0.1:3000",
|
||||||
|
"http://127.0.0.1:8000",
|
||||||
|
]
|
||||||
|
|
||||||
|
# ── Matrix Frontend Integration ────────────────────────────────────────
|
||||||
|
# URL of the Matrix frontend (Replit/Tailscale) for CORS.
|
||||||
|
# When set, this origin is added to CORS allowed_origins.
|
||||||
|
# Example: "http://100.124.176.28:8080" or "https://alexanderwhitestone.com"
|
||||||
|
matrix_frontend_url: str = "" # Empty = disabled
|
||||||
|
|
||||||
|
# WebSocket authentication token for Matrix connections.
|
||||||
|
# When set, clients must provide this token via ?token= query param
|
||||||
|
# or in the first message as {"type": "auth", "token": "..."}.
|
||||||
|
# Empty/unset = auth disabled (dev mode).
|
||||||
|
matrix_ws_token: str = ""
|
||||||
|
|
||||||
# Trusted hosts for the Host header check (TrustedHostMiddleware).
|
# Trusted hosts for the Host header check (TrustedHostMiddleware).
|
||||||
# Set TRUSTED_HOSTS as a comma-separated list. Wildcards supported (e.g. "*.ts.net").
|
# Set TRUSTED_HOSTS as a comma-separated list. Wildcards supported (e.g. "*.ts.net").
|
||||||
@@ -212,24 +259,31 @@ class Settings(BaseSettings):
|
|||||||
# Fallback to server when browser model is unavailable or too slow.
|
# Fallback to server when browser model is unavailable or too slow.
|
||||||
browser_model_fallback: bool = True
|
browser_model_fallback: bool = True
|
||||||
|
|
||||||
|
# ── Deep Focus Mode ─────────────────────────────────────────────
|
||||||
|
# "deep" = single-problem context; "broad" = default multi-task.
|
||||||
|
focus_mode: Literal["deep", "broad"] = "broad"
|
||||||
|
|
||||||
# ── Default Thinking ──────────────────────────────────────────────
|
# ── Default Thinking ──────────────────────────────────────────────
|
||||||
# When enabled, the agent starts an internal thought loop on server start.
|
# When enabled, the agent starts an internal thought loop on server start.
|
||||||
thinking_enabled: bool = True
|
thinking_enabled: bool = True
|
||||||
thinking_interval_seconds: int = 300 # 5 minutes between thoughts
|
thinking_interval_seconds: int = 300 # 5 minutes between thoughts
|
||||||
|
thinking_timeout_seconds: int = 120 # max wall-clock time per thinking cycle
|
||||||
thinking_distill_every: int = 10 # distill facts from thoughts every Nth thought
|
thinking_distill_every: int = 10 # distill facts from thoughts every Nth thought
|
||||||
thinking_issue_every: int = 20 # file Gitea issues from thoughts every Nth thought
|
thinking_issue_every: int = 20 # file Gitea issues from thoughts every Nth thought
|
||||||
|
thinking_memory_check_every: int = 50 # check memory status every Nth thought
|
||||||
|
thinking_idle_timeout_minutes: int = 60 # pause thoughts after N minutes without user input
|
||||||
|
|
||||||
# ── Gitea Integration ─────────────────────────────────────────────
|
# ── Gitea Integration ─────────────────────────────────────────────
|
||||||
# Local Gitea instance for issue tracking and self-improvement.
|
# Local Gitea instance for issue tracking and self-improvement.
|
||||||
# These values are passed as env vars to the gitea-mcp server process.
|
# These values are passed as env vars to the gitea-mcp server process.
|
||||||
gitea_url: str = "http://localhost:3000"
|
gitea_url: str = "http://localhost:3000"
|
||||||
gitea_token: str = "" # GITEA_TOKEN env var; falls back to ~/.config/gitea/token
|
gitea_token: str = "" # GITEA_TOKEN env var; falls back to .timmy_gitea_token
|
||||||
gitea_repo: str = "rockachopa/Timmy-time-dashboard" # owner/repo
|
gitea_repo: str = "rockachopa/Timmy-time-dashboard" # owner/repo
|
||||||
gitea_enabled: bool = True
|
gitea_enabled: bool = True
|
||||||
|
|
||||||
# ── MCP Servers ────────────────────────────────────────────────────
|
# ── MCP Servers ────────────────────────────────────────────────────
|
||||||
# External tool servers connected via Model Context Protocol (stdio).
|
# External tool servers connected via Model Context Protocol (stdio).
|
||||||
mcp_gitea_command: str = "gitea-mcp -t stdio"
|
mcp_gitea_command: str = "gitea-mcp-server -t stdio"
|
||||||
mcp_filesystem_command: str = "npx -y @modelcontextprotocol/server-filesystem"
|
mcp_filesystem_command: str = "npx -y @modelcontextprotocol/server-filesystem"
|
||||||
mcp_timeout: int = 15
|
mcp_timeout: int = 15
|
||||||
|
|
||||||
@@ -324,14 +378,19 @@ class Settings(BaseSettings):
|
|||||||
def model_post_init(self, __context) -> None:
|
def model_post_init(self, __context) -> None:
|
||||||
"""Post-init: resolve gitea_token from file if not set via env."""
|
"""Post-init: resolve gitea_token from file if not set via env."""
|
||||||
if not self.gitea_token:
|
if not self.gitea_token:
|
||||||
token_path = os.path.expanduser("~/.config/gitea/token")
|
# Priority: Timmy's own token → legacy admin token
|
||||||
try:
|
repo_root = self._compute_repo_root()
|
||||||
if os.path.isfile(token_path):
|
timmy_token_path = os.path.join(repo_root, ".timmy_gitea_token")
|
||||||
token = open(token_path).read().strip() # noqa: SIM115
|
legacy_token_path = os.path.expanduser("~/.config/gitea/token")
|
||||||
if token:
|
for token_path in (timmy_token_path, legacy_token_path):
|
||||||
self.gitea_token = token
|
try:
|
||||||
except OSError:
|
if os.path.isfile(token_path):
|
||||||
pass
|
token = open(token_path).read().strip() # noqa: SIM115
|
||||||
|
if token:
|
||||||
|
self.gitea_token = token
|
||||||
|
break
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
|
||||||
model_config = SettingsConfigDict(
|
model_config = SettingsConfigDict(
|
||||||
env_file=".env",
|
env_file=".env",
|
||||||
@@ -346,10 +405,9 @@ if not settings.repo_root:
|
|||||||
settings.repo_root = settings._compute_repo_root()
|
settings.repo_root = settings._compute_repo_root()
|
||||||
|
|
||||||
# ── Model fallback configuration ────────────────────────────────────────────
|
# ── Model fallback configuration ────────────────────────────────────────────
|
||||||
# Primary model for reliable tool calling (llama3.1:8b-instruct)
|
# Fallback chains are now in settings.fallback_models / settings.vision_fallback_models.
|
||||||
# Fallback if primary not available: qwen3.5:latest
|
# Override via env vars (FALLBACK_MODELS, VISION_FALLBACK_MODELS) or
|
||||||
OLLAMA_MODEL_PRIMARY: str = "qwen3.5:latest"
|
# edit config/providers.yaml → fallback_chains.
|
||||||
OLLAMA_MODEL_FALLBACK: str = "llama3.1:8b-instruct"
|
|
||||||
|
|
||||||
|
|
||||||
def check_ollama_model_available(model_name: str) -> bool:
|
def check_ollama_model_available(model_name: str) -> bool:
|
||||||
@@ -358,7 +416,7 @@ def check_ollama_model_available(model_name: str) -> bool:
|
|||||||
import json
|
import json
|
||||||
import urllib.request
|
import urllib.request
|
||||||
|
|
||||||
url = settings.ollama_url.replace("localhost", "127.0.0.1")
|
url = settings.normalized_ollama_url
|
||||||
req = urllib.request.Request(
|
req = urllib.request.Request(
|
||||||
f"{url}/api/tags",
|
f"{url}/api/tags",
|
||||||
method="GET",
|
method="GET",
|
||||||
@@ -371,33 +429,31 @@ def check_ollama_model_available(model_name: str) -> bool:
|
|||||||
model_name == m or model_name == m.split(":")[0] or m.startswith(model_name)
|
model_name == m or model_name == m.split(":")[0] or m.startswith(model_name)
|
||||||
for m in models
|
for m in models
|
||||||
)
|
)
|
||||||
except Exception:
|
except (OSError, ValueError) as exc:
|
||||||
|
_startup_logger.debug("Ollama model check failed: %s", exc)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def get_effective_ollama_model() -> str:
|
def get_effective_ollama_model() -> str:
|
||||||
"""Get the effective Ollama model, with fallback logic."""
|
"""Get the effective Ollama model, with fallback logic.
|
||||||
# If user has overridden, use their setting
|
|
||||||
|
Walks the configurable ``settings.fallback_models`` chain when the
|
||||||
|
user's preferred model is not available locally.
|
||||||
|
"""
|
||||||
user_model = settings.ollama_model
|
user_model = settings.ollama_model
|
||||||
|
|
||||||
# Check if user's model is available
|
|
||||||
if check_ollama_model_available(user_model):
|
if check_ollama_model_available(user_model):
|
||||||
return user_model
|
return user_model
|
||||||
|
|
||||||
# Try primary
|
# Walk the configurable fallback chain
|
||||||
if check_ollama_model_available(OLLAMA_MODEL_PRIMARY):
|
for fallback in settings.fallback_models:
|
||||||
_startup_logger.warning(
|
if check_ollama_model_available(fallback):
|
||||||
f"Requested model '{user_model}' not available. Using primary: {OLLAMA_MODEL_PRIMARY}"
|
_startup_logger.warning(
|
||||||
)
|
"Requested model '%s' not available. Using fallback: %s",
|
||||||
return OLLAMA_MODEL_PRIMARY
|
user_model,
|
||||||
|
fallback,
|
||||||
# Try fallback
|
)
|
||||||
if check_ollama_model_available(OLLAMA_MODEL_FALLBACK):
|
return fallback
|
||||||
_startup_logger.warning(
|
|
||||||
f"Primary model '{OLLAMA_MODEL_PRIMARY}' not available. "
|
|
||||||
f"Using fallback: {OLLAMA_MODEL_FALLBACK}"
|
|
||||||
)
|
|
||||||
return OLLAMA_MODEL_FALLBACK
|
|
||||||
|
|
||||||
# Last resort - return user's setting and hope for the best
|
# Last resort - return user's setting and hope for the best
|
||||||
return user_model
|
return user_model
|
||||||
@@ -437,8 +493,19 @@ def validate_startup(*, force: bool = False) -> None:
|
|||||||
", ".join(_missing),
|
", ".join(_missing),
|
||||||
)
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
if "*" in settings.cors_origins:
|
||||||
|
_startup_logger.error(
|
||||||
|
"PRODUCTION SECURITY ERROR: CORS wildcard '*' is not allowed "
|
||||||
|
"in production. Set CORS_ORIGINS to explicit origins."
|
||||||
|
)
|
||||||
|
sys.exit(1)
|
||||||
_startup_logger.info("Production mode: security secrets validated ✓")
|
_startup_logger.info("Production mode: security secrets validated ✓")
|
||||||
else:
|
else:
|
||||||
|
if "*" in settings.cors_origins:
|
||||||
|
_startup_logger.warning(
|
||||||
|
"SEC: CORS_ORIGINS contains wildcard '*' — "
|
||||||
|
"restrict to explicit origins before deploying to production."
|
||||||
|
)
|
||||||
if not settings.l402_hmac_secret:
|
if not settings.l402_hmac_secret:
|
||||||
_startup_logger.warning(
|
_startup_logger.warning(
|
||||||
"SEC: L402_HMAC_SECRET is not set — "
|
"SEC: L402_HMAC_SECRET is not set — "
|
||||||
|
|||||||
@@ -8,7 +8,9 @@ Key improvements:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import json
|
||||||
import logging
|
import logging
|
||||||
|
import re
|
||||||
from contextlib import asynccontextmanager
|
from contextlib import asynccontextmanager
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
@@ -22,12 +24,14 @@ from config import settings
|
|||||||
|
|
||||||
# Import dedicated middleware
|
# Import dedicated middleware
|
||||||
from dashboard.middleware.csrf import CSRFMiddleware
|
from dashboard.middleware.csrf import CSRFMiddleware
|
||||||
|
from dashboard.middleware.rate_limit import RateLimitMiddleware
|
||||||
from dashboard.middleware.request_logging import RequestLoggingMiddleware
|
from dashboard.middleware.request_logging import RequestLoggingMiddleware
|
||||||
from dashboard.middleware.security_headers import SecurityHeadersMiddleware
|
from dashboard.middleware.security_headers import SecurityHeadersMiddleware
|
||||||
from dashboard.routes.agents import router as agents_router
|
from dashboard.routes.agents import router as agents_router
|
||||||
from dashboard.routes.briefing import router as briefing_router
|
from dashboard.routes.briefing import router as briefing_router
|
||||||
from dashboard.routes.calm import router as calm_router
|
from dashboard.routes.calm import router as calm_router
|
||||||
from dashboard.routes.chat_api import router as chat_api_router
|
from dashboard.routes.chat_api import router as chat_api_router
|
||||||
|
from dashboard.routes.chat_api_v1 import router as chat_api_v1_router
|
||||||
from dashboard.routes.db_explorer import router as db_explorer_router
|
from dashboard.routes.db_explorer import router as db_explorer_router
|
||||||
from dashboard.routes.discord import router as discord_router
|
from dashboard.routes.discord import router as discord_router
|
||||||
from dashboard.routes.experiments import router as experiments_router
|
from dashboard.routes.experiments import router as experiments_router
|
||||||
@@ -44,8 +48,12 @@ from dashboard.routes.tasks import router as tasks_router
|
|||||||
from dashboard.routes.telegram import router as telegram_router
|
from dashboard.routes.telegram import router as telegram_router
|
||||||
from dashboard.routes.thinking import router as thinking_router
|
from dashboard.routes.thinking import router as thinking_router
|
||||||
from dashboard.routes.tools import router as tools_router
|
from dashboard.routes.tools import router as tools_router
|
||||||
|
from dashboard.routes.tower import router as tower_router
|
||||||
from dashboard.routes.voice import router as voice_router
|
from dashboard.routes.voice import router as voice_router
|
||||||
from dashboard.routes.work_orders import router as work_orders_router
|
from dashboard.routes.work_orders import router as work_orders_router
|
||||||
|
from dashboard.routes.world import matrix_router
|
||||||
|
from dashboard.routes.world import router as world_router
|
||||||
|
from timmy.workshop_state import PRESENCE_FILE
|
||||||
|
|
||||||
|
|
||||||
class _ColorFormatter(logging.Formatter):
|
class _ColorFormatter(logging.Formatter):
|
||||||
@@ -151,7 +159,17 @@ async def _thinking_scheduler() -> None:
|
|||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
if settings.thinking_enabled:
|
if settings.thinking_enabled:
|
||||||
await thinking_engine.think_once()
|
await asyncio.wait_for(
|
||||||
|
thinking_engine.think_once(),
|
||||||
|
timeout=settings.thinking_timeout_seconds,
|
||||||
|
)
|
||||||
|
except TimeoutError:
|
||||||
|
logger.warning(
|
||||||
|
"Thinking cycle timed out after %ds — Ollama may be unresponsive",
|
||||||
|
settings.thinking_timeout_seconds,
|
||||||
|
)
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
raise
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
logger.error("Thinking scheduler error: %s", exc)
|
logger.error("Thinking scheduler error: %s", exc)
|
||||||
|
|
||||||
@@ -171,7 +189,10 @@ async def _loop_qa_scheduler() -> None:
|
|||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
if settings.loop_qa_enabled:
|
if settings.loop_qa_enabled:
|
||||||
result = await loop_qa_orchestrator.run_next_test()
|
result = await asyncio.wait_for(
|
||||||
|
loop_qa_orchestrator.run_next_test(),
|
||||||
|
timeout=settings.thinking_timeout_seconds,
|
||||||
|
)
|
||||||
if result:
|
if result:
|
||||||
status = "PASS" if result["success"] else "FAIL"
|
status = "PASS" if result["success"] else "FAIL"
|
||||||
logger.info(
|
logger.info(
|
||||||
@@ -180,6 +201,13 @@ async def _loop_qa_scheduler() -> None:
|
|||||||
status,
|
status,
|
||||||
result.get("details", "")[:80],
|
result.get("details", "")[:80],
|
||||||
)
|
)
|
||||||
|
except TimeoutError:
|
||||||
|
logger.warning(
|
||||||
|
"Loop QA test timed out after %ds",
|
||||||
|
settings.thinking_timeout_seconds,
|
||||||
|
)
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
raise
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
logger.error("Loop QA scheduler error: %s", exc)
|
logger.error("Loop QA scheduler error: %s", exc)
|
||||||
|
|
||||||
@@ -187,6 +215,54 @@ async def _loop_qa_scheduler() -> None:
|
|||||||
await asyncio.sleep(interval)
|
await asyncio.sleep(interval)
|
||||||
|
|
||||||
|
|
||||||
|
_PRESENCE_POLL_SECONDS = 30
|
||||||
|
_PRESENCE_INITIAL_DELAY = 3
|
||||||
|
|
||||||
|
_SYNTHESIZED_STATE: dict = {
|
||||||
|
"version": 1,
|
||||||
|
"liveness": None,
|
||||||
|
"current_focus": "",
|
||||||
|
"mood": "idle",
|
||||||
|
"active_threads": [],
|
||||||
|
"recent_events": [],
|
||||||
|
"concerns": [],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
async def _presence_watcher() -> None:
|
||||||
|
"""Background task: watch ~/.timmy/presence.json and broadcast changes via WS.
|
||||||
|
|
||||||
|
Polls the file every 30 seconds (matching Timmy's write cadence).
|
||||||
|
If the file doesn't exist, broadcasts a synthesised idle state.
|
||||||
|
"""
|
||||||
|
from infrastructure.ws_manager.handler import ws_manager as ws_mgr
|
||||||
|
|
||||||
|
await asyncio.sleep(_PRESENCE_INITIAL_DELAY) # Stagger after other schedulers
|
||||||
|
|
||||||
|
last_mtime: float = 0.0
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
if PRESENCE_FILE.exists():
|
||||||
|
mtime = PRESENCE_FILE.stat().st_mtime
|
||||||
|
if mtime != last_mtime:
|
||||||
|
last_mtime = mtime
|
||||||
|
raw = await asyncio.to_thread(PRESENCE_FILE.read_text)
|
||||||
|
state = json.loads(raw)
|
||||||
|
await ws_mgr.broadcast("timmy_state", state)
|
||||||
|
else:
|
||||||
|
# File absent — broadcast synthesised state once per cycle
|
||||||
|
if last_mtime != -1.0:
|
||||||
|
last_mtime = -1.0
|
||||||
|
await ws_mgr.broadcast("timmy_state", _SYNTHESIZED_STATE)
|
||||||
|
except json.JSONDecodeError as exc:
|
||||||
|
logger.warning("presence.json parse error: %s", exc)
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning("Presence watcher error: %s", exc)
|
||||||
|
|
||||||
|
await asyncio.sleep(_PRESENCE_POLL_SECONDS)
|
||||||
|
|
||||||
|
|
||||||
async def _start_chat_integrations_background() -> None:
|
async def _start_chat_integrations_background() -> None:
|
||||||
"""Background task: start chat integrations without blocking startup."""
|
"""Background task: start chat integrations without blocking startup."""
|
||||||
from integrations.chat_bridge.registry import platform_registry
|
from integrations.chat_bridge.registry import platform_registry
|
||||||
@@ -277,116 +353,118 @@ async def _discord_token_watcher() -> None:
|
|||||||
logger.warning("Discord auto-start failed: %s", exc)
|
logger.warning("Discord auto-start failed: %s", exc)
|
||||||
|
|
||||||
|
|
||||||
@asynccontextmanager
|
def _startup_init() -> None:
|
||||||
async def lifespan(app: FastAPI):
|
"""Validate config and enable event persistence."""
|
||||||
"""Application lifespan manager with non-blocking startup."""
|
|
||||||
|
|
||||||
# Validate security config (no-op in test mode)
|
|
||||||
from config import validate_startup
|
from config import validate_startup
|
||||||
|
|
||||||
validate_startup()
|
validate_startup()
|
||||||
|
|
||||||
# Enable event persistence (unified EventBus + swarm event_log)
|
|
||||||
from infrastructure.events.bus import init_event_bus_persistence
|
from infrastructure.events.bus import init_event_bus_persistence
|
||||||
|
|
||||||
init_event_bus_persistence()
|
init_event_bus_persistence()
|
||||||
|
|
||||||
# Create all background tasks without waiting for them
|
|
||||||
briefing_task = asyncio.create_task(_briefing_scheduler())
|
|
||||||
thinking_task = asyncio.create_task(_thinking_scheduler())
|
|
||||||
loop_qa_task = asyncio.create_task(_loop_qa_scheduler())
|
|
||||||
|
|
||||||
# Initialize Spark Intelligence engine
|
|
||||||
from spark.engine import get_spark_engine
|
from spark.engine import get_spark_engine
|
||||||
|
|
||||||
if get_spark_engine().enabled:
|
if get_spark_engine().enabled:
|
||||||
logger.info("Spark Intelligence active — event capture enabled")
|
logger.info("Spark Intelligence active — event capture enabled")
|
||||||
|
|
||||||
# Auto-prune old vector store memories on startup
|
|
||||||
if settings.memory_prune_days > 0:
|
|
||||||
try:
|
|
||||||
from timmy.memory.vector_store import prune_memories
|
|
||||||
|
|
||||||
pruned = prune_memories(
|
def _startup_background_tasks() -> list[asyncio.Task]:
|
||||||
|
"""Spawn all recurring background tasks (non-blocking)."""
|
||||||
|
return [
|
||||||
|
asyncio.create_task(_briefing_scheduler()),
|
||||||
|
asyncio.create_task(_thinking_scheduler()),
|
||||||
|
asyncio.create_task(_loop_qa_scheduler()),
|
||||||
|
asyncio.create_task(_presence_watcher()),
|
||||||
|
asyncio.create_task(_start_chat_integrations_background()),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def _try_prune(label: str, prune_fn, days: int) -> None:
|
||||||
|
"""Run a prune function, log results, swallow errors."""
|
||||||
|
try:
|
||||||
|
pruned = prune_fn()
|
||||||
|
if pruned:
|
||||||
|
logger.info(
|
||||||
|
"%s auto-prune: removed %d entries older than %d days",
|
||||||
|
label,
|
||||||
|
pruned,
|
||||||
|
days,
|
||||||
|
)
|
||||||
|
except Exception as exc:
|
||||||
|
logger.debug("%s auto-prune skipped: %s", label, exc)
|
||||||
|
|
||||||
|
|
||||||
|
def _check_vault_size() -> None:
|
||||||
|
"""Warn if the memory vault exceeds the configured size limit."""
|
||||||
|
try:
|
||||||
|
vault_path = Path(settings.repo_root) / "memory" / "notes"
|
||||||
|
if vault_path.exists():
|
||||||
|
total_bytes = sum(f.stat().st_size for f in vault_path.rglob("*") if f.is_file())
|
||||||
|
total_mb = total_bytes / (1024 * 1024)
|
||||||
|
if total_mb > settings.memory_vault_max_mb:
|
||||||
|
logger.warning(
|
||||||
|
"Memory vault (%.1f MB) exceeds limit (%d MB) — consider archiving old notes",
|
||||||
|
total_mb,
|
||||||
|
settings.memory_vault_max_mb,
|
||||||
|
)
|
||||||
|
except Exception as exc:
|
||||||
|
logger.debug("Vault size check skipped: %s", exc)
|
||||||
|
|
||||||
|
|
||||||
|
def _startup_pruning() -> None:
|
||||||
|
"""Auto-prune old memories, thoughts, and events on startup."""
|
||||||
|
if settings.memory_prune_days > 0:
|
||||||
|
from timmy.memory_system import prune_memories
|
||||||
|
|
||||||
|
_try_prune(
|
||||||
|
"Memory",
|
||||||
|
lambda: prune_memories(
|
||||||
older_than_days=settings.memory_prune_days,
|
older_than_days=settings.memory_prune_days,
|
||||||
keep_facts=settings.memory_prune_keep_facts,
|
keep_facts=settings.memory_prune_keep_facts,
|
||||||
)
|
),
|
||||||
if pruned:
|
settings.memory_prune_days,
|
||||||
logger.info(
|
)
|
||||||
"Memory auto-prune: removed %d entries older than %d days",
|
|
||||||
pruned,
|
|
||||||
settings.memory_prune_days,
|
|
||||||
)
|
|
||||||
except Exception as exc:
|
|
||||||
logger.debug("Memory auto-prune skipped: %s", exc)
|
|
||||||
|
|
||||||
# Auto-prune old thoughts on startup
|
|
||||||
if settings.thoughts_prune_days > 0:
|
if settings.thoughts_prune_days > 0:
|
||||||
try:
|
from timmy.thinking import thinking_engine
|
||||||
from timmy.thinking import thinking_engine
|
|
||||||
|
|
||||||
pruned = thinking_engine.prune_old_thoughts(
|
_try_prune(
|
||||||
|
"Thought",
|
||||||
|
lambda: thinking_engine.prune_old_thoughts(
|
||||||
keep_days=settings.thoughts_prune_days,
|
keep_days=settings.thoughts_prune_days,
|
||||||
keep_min=settings.thoughts_prune_keep_min,
|
keep_min=settings.thoughts_prune_keep_min,
|
||||||
)
|
),
|
||||||
if pruned:
|
settings.thoughts_prune_days,
|
||||||
logger.info(
|
)
|
||||||
"Thought auto-prune: removed %d entries older than %d days",
|
|
||||||
pruned,
|
|
||||||
settings.thoughts_prune_days,
|
|
||||||
)
|
|
||||||
except Exception as exc:
|
|
||||||
logger.debug("Thought auto-prune skipped: %s", exc)
|
|
||||||
|
|
||||||
# Auto-prune old system events on startup
|
|
||||||
if settings.events_prune_days > 0:
|
if settings.events_prune_days > 0:
|
||||||
try:
|
from swarm.event_log import prune_old_events
|
||||||
from swarm.event_log import prune_old_events
|
|
||||||
|
|
||||||
pruned = prune_old_events(
|
_try_prune(
|
||||||
|
"Event",
|
||||||
|
lambda: prune_old_events(
|
||||||
keep_days=settings.events_prune_days,
|
keep_days=settings.events_prune_days,
|
||||||
keep_min=settings.events_prune_keep_min,
|
keep_min=settings.events_prune_keep_min,
|
||||||
)
|
),
|
||||||
if pruned:
|
settings.events_prune_days,
|
||||||
logger.info(
|
)
|
||||||
"Event auto-prune: removed %d entries older than %d days",
|
|
||||||
pruned,
|
|
||||||
settings.events_prune_days,
|
|
||||||
)
|
|
||||||
except Exception as exc:
|
|
||||||
logger.debug("Event auto-prune skipped: %s", exc)
|
|
||||||
|
|
||||||
# Warn if memory vault exceeds size limit
|
|
||||||
if settings.memory_vault_max_mb > 0:
|
if settings.memory_vault_max_mb > 0:
|
||||||
try:
|
_check_vault_size()
|
||||||
vault_path = Path(settings.repo_root) / "memory" / "notes"
|
|
||||||
if vault_path.exists():
|
|
||||||
total_bytes = sum(f.stat().st_size for f in vault_path.rglob("*") if f.is_file())
|
|
||||||
total_mb = total_bytes / (1024 * 1024)
|
|
||||||
if total_mb > settings.memory_vault_max_mb:
|
|
||||||
logger.warning(
|
|
||||||
"Memory vault (%.1f MB) exceeds limit (%d MB) — consider archiving old notes",
|
|
||||||
total_mb,
|
|
||||||
settings.memory_vault_max_mb,
|
|
||||||
)
|
|
||||||
except Exception as exc:
|
|
||||||
logger.debug("Vault size check skipped: %s", exc)
|
|
||||||
|
|
||||||
# Start chat integrations in background
|
|
||||||
chat_task = asyncio.create_task(_start_chat_integrations_background())
|
|
||||||
|
|
||||||
logger.info("✓ Dashboard ready for requests")
|
async def _shutdown_cleanup(
|
||||||
|
bg_tasks: list[asyncio.Task],
|
||||||
yield
|
workshop_heartbeat,
|
||||||
|
) -> None:
|
||||||
# Cleanup on shutdown
|
"""Stop chat bots, MCP sessions, heartbeat, and cancel background tasks."""
|
||||||
from integrations.chat_bridge.vendors.discord import discord_bot
|
from integrations.chat_bridge.vendors.discord import discord_bot
|
||||||
from integrations.telegram_bot.bot import telegram_bot
|
from integrations.telegram_bot.bot import telegram_bot
|
||||||
|
|
||||||
await discord_bot.stop()
|
await discord_bot.stop()
|
||||||
await telegram_bot.stop()
|
await telegram_bot.stop()
|
||||||
|
|
||||||
# Close MCP tool server sessions
|
|
||||||
try:
|
try:
|
||||||
from timmy.mcp_tools import close_mcp_sessions
|
from timmy.mcp_tools import close_mcp_sessions
|
||||||
|
|
||||||
@@ -394,13 +472,44 @@ async def lifespan(app: FastAPI):
|
|||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
logger.debug("MCP shutdown: %s", exc)
|
logger.debug("MCP shutdown: %s", exc)
|
||||||
|
|
||||||
for task in [briefing_task, thinking_task, chat_task, loop_qa_task]:
|
await workshop_heartbeat.stop()
|
||||||
if task:
|
|
||||||
task.cancel()
|
for task in bg_tasks:
|
||||||
try:
|
task.cancel()
|
||||||
await task
|
try:
|
||||||
except asyncio.CancelledError:
|
await task
|
||||||
pass
|
except asyncio.CancelledError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@asynccontextmanager
|
||||||
|
async def lifespan(app: FastAPI):
|
||||||
|
"""Application lifespan manager with non-blocking startup."""
|
||||||
|
_startup_init()
|
||||||
|
bg_tasks = _startup_background_tasks()
|
||||||
|
_startup_pruning()
|
||||||
|
|
||||||
|
# Start Workshop presence heartbeat with WS relay
|
||||||
|
from dashboard.routes.world import broadcast_world_state
|
||||||
|
from timmy.workshop_state import WorkshopHeartbeat
|
||||||
|
|
||||||
|
workshop_heartbeat = WorkshopHeartbeat(on_change=broadcast_world_state)
|
||||||
|
await workshop_heartbeat.start()
|
||||||
|
|
||||||
|
# Register session logger with error capture
|
||||||
|
try:
|
||||||
|
from infrastructure.error_capture import register_error_recorder
|
||||||
|
from timmy.session_logger import get_session_logger
|
||||||
|
|
||||||
|
register_error_recorder(get_session_logger().record_error)
|
||||||
|
except Exception:
|
||||||
|
logger.debug("Failed to register error recorder")
|
||||||
|
|
||||||
|
logger.info("✓ Dashboard ready for requests")
|
||||||
|
|
||||||
|
yield
|
||||||
|
|
||||||
|
await _shutdown_cleanup(bg_tasks, workshop_heartbeat)
|
||||||
|
|
||||||
|
|
||||||
app = FastAPI(
|
app = FastAPI(
|
||||||
@@ -413,26 +522,55 @@ app = FastAPI(
|
|||||||
|
|
||||||
|
|
||||||
def _get_cors_origins() -> list[str]:
|
def _get_cors_origins() -> list[str]:
|
||||||
"""Get CORS origins from settings, with sensible defaults."""
|
"""Get CORS origins from settings, rejecting wildcards in production.
|
||||||
origins = settings.cors_origins
|
|
||||||
if settings.debug and origins == ["*"]:
|
Adds matrix_frontend_url when configured. Always allows Tailscale IPs
|
||||||
return [
|
(100.x.x.x range) for development convenience.
|
||||||
"http://localhost:3000",
|
"""
|
||||||
"http://localhost:8000",
|
origins = list(settings.cors_origins)
|
||||||
"http://127.0.0.1:3000",
|
|
||||||
"http://127.0.0.1:8000",
|
# Strip wildcards in production (security)
|
||||||
]
|
if "*" in origins and not settings.debug:
|
||||||
|
logger.warning(
|
||||||
|
"Wildcard '*' in CORS_ORIGINS stripped in production — "
|
||||||
|
"set explicit origins via CORS_ORIGINS env var"
|
||||||
|
)
|
||||||
|
origins = [o for o in origins if o != "*"]
|
||||||
|
|
||||||
|
# Add Matrix frontend URL if configured
|
||||||
|
if settings.matrix_frontend_url:
|
||||||
|
url = settings.matrix_frontend_url.strip()
|
||||||
|
if url and url not in origins:
|
||||||
|
origins.append(url)
|
||||||
|
logger.debug("Added Matrix frontend to CORS: %s", url)
|
||||||
|
|
||||||
return origins
|
return origins
|
||||||
|
|
||||||
|
|
||||||
|
# Pattern to match Tailscale IPs (100.x.x.x) for CORS origin regex
|
||||||
|
_TAILSCALE_IP_PATTERN = re.compile(r"^https?://100\.\d{1,3}\.\d{1,3}\.\d{1,3}(?::\d+)?$")
|
||||||
|
|
||||||
|
|
||||||
|
def _is_tailscale_origin(origin: str) -> bool:
|
||||||
|
"""Check if origin is a Tailscale IP (100.x.x.x range)."""
|
||||||
|
return bool(_TAILSCALE_IP_PATTERN.match(origin))
|
||||||
|
|
||||||
|
|
||||||
# Add dedicated middleware in correct order
|
# Add dedicated middleware in correct order
|
||||||
# 1. Logging (outermost to capture everything)
|
# 1. Logging (outermost to capture everything)
|
||||||
app.add_middleware(RequestLoggingMiddleware, skip_paths=["/health"])
|
app.add_middleware(RequestLoggingMiddleware, skip_paths=["/health"])
|
||||||
|
|
||||||
# 2. Security Headers
|
# 2. Rate Limiting (before security to prevent abuse early)
|
||||||
|
app.add_middleware(
|
||||||
|
RateLimitMiddleware,
|
||||||
|
path_prefixes=["/api/matrix/"],
|
||||||
|
requests_per_minute=30,
|
||||||
|
)
|
||||||
|
|
||||||
|
# 3. Security Headers
|
||||||
app.add_middleware(SecurityHeadersMiddleware, production=not settings.debug)
|
app.add_middleware(SecurityHeadersMiddleware, production=not settings.debug)
|
||||||
|
|
||||||
# 3. CSRF Protection
|
# 4. CSRF Protection
|
||||||
app.add_middleware(CSRFMiddleware)
|
app.add_middleware(CSRFMiddleware)
|
||||||
|
|
||||||
# 4. Standard FastAPI middleware
|
# 4. Standard FastAPI middleware
|
||||||
@@ -446,6 +584,7 @@ app.add_middleware(
|
|||||||
app.add_middleware(
|
app.add_middleware(
|
||||||
CORSMiddleware,
|
CORSMiddleware,
|
||||||
allow_origins=_get_cors_origins(),
|
allow_origins=_get_cors_origins(),
|
||||||
|
allow_origin_regex=r"https?://100\.\d{1,3}\.\d{1,3}\.\d{1,3}(:\d+)?",
|
||||||
allow_credentials=True,
|
allow_credentials=True,
|
||||||
allow_methods=["GET", "POST", "PUT", "DELETE", "OPTIONS"],
|
allow_methods=["GET", "POST", "PUT", "DELETE", "OPTIONS"],
|
||||||
allow_headers=["Content-Type", "Authorization"],
|
allow_headers=["Content-Type", "Authorization"],
|
||||||
@@ -474,6 +613,7 @@ app.include_router(grok_router)
|
|||||||
app.include_router(models_router)
|
app.include_router(models_router)
|
||||||
app.include_router(models_api_router)
|
app.include_router(models_api_router)
|
||||||
app.include_router(chat_api_router)
|
app.include_router(chat_api_router)
|
||||||
|
app.include_router(chat_api_v1_router)
|
||||||
app.include_router(thinking_router)
|
app.include_router(thinking_router)
|
||||||
app.include_router(calm_router)
|
app.include_router(calm_router)
|
||||||
app.include_router(tasks_router)
|
app.include_router(tasks_router)
|
||||||
@@ -482,6 +622,9 @@ app.include_router(loop_qa_router)
|
|||||||
app.include_router(system_router)
|
app.include_router(system_router)
|
||||||
app.include_router(experiments_router)
|
app.include_router(experiments_router)
|
||||||
app.include_router(db_explorer_router)
|
app.include_router(db_explorer_router)
|
||||||
|
app.include_router(world_router)
|
||||||
|
app.include_router(matrix_router)
|
||||||
|
app.include_router(tower_router)
|
||||||
|
|
||||||
|
|
||||||
@app.websocket("/ws")
|
@app.websocket("/ws")
|
||||||
@@ -500,6 +643,44 @@ async def ws_redirect(websocket: WebSocket):
|
|||||||
await websocket.send({"type": "websocket.close", "code": 1008})
|
await websocket.send({"type": "websocket.close", "code": 1008})
|
||||||
|
|
||||||
|
|
||||||
|
@app.websocket("/swarm/live")
|
||||||
|
async def swarm_live(websocket: WebSocket):
|
||||||
|
"""Swarm live event stream via WebSocket."""
|
||||||
|
from infrastructure.ws_manager.handler import ws_manager as ws_mgr
|
||||||
|
|
||||||
|
await ws_mgr.connect(websocket)
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
# Keep connection alive; events are pushed via ws_mgr.broadcast()
|
||||||
|
await websocket.receive_text()
|
||||||
|
except Exception as exc:
|
||||||
|
logger.debug("WebSocket disconnect error: %s", exc)
|
||||||
|
ws_mgr.disconnect(websocket)
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/swarm/agents/sidebar", response_class=HTMLResponse)
|
||||||
|
async def swarm_agents_sidebar():
|
||||||
|
"""HTMX partial: list active swarm agents for the dashboard sidebar."""
|
||||||
|
try:
|
||||||
|
from config import settings
|
||||||
|
|
||||||
|
agents_yaml = settings.agents_config
|
||||||
|
agents = agents_yaml.get("agents", {})
|
||||||
|
lines = []
|
||||||
|
for name, cfg in agents.items():
|
||||||
|
model = cfg.get("model", "default")
|
||||||
|
lines.append(
|
||||||
|
f'<div class="mc-agent-row">'
|
||||||
|
f'<span class="mc-agent-name">{name}</span>'
|
||||||
|
f'<span class="mc-agent-model">{model}</span>'
|
||||||
|
f"</div>"
|
||||||
|
)
|
||||||
|
return "\n".join(lines) if lines else '<div class="mc-muted">No agents configured</div>'
|
||||||
|
except Exception as exc:
|
||||||
|
logger.debug("Agents sidebar error: %s", exc)
|
||||||
|
return '<div class="mc-muted">Agents unavailable</div>'
|
||||||
|
|
||||||
|
|
||||||
@app.get("/", response_class=HTMLResponse)
|
@app.get("/", response_class=HTMLResponse)
|
||||||
async def root(request: Request):
|
async def root(request: Request):
|
||||||
"""Serve the main dashboard page."""
|
"""Serve the main dashboard page."""
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
"""Dashboard middleware package."""
|
"""Dashboard middleware package."""
|
||||||
|
|
||||||
from .csrf import CSRFMiddleware, csrf_exempt, generate_csrf_token, validate_csrf_token
|
from .csrf import CSRFMiddleware, csrf_exempt, generate_csrf_token, validate_csrf_token
|
||||||
|
from .rate_limit import RateLimiter, RateLimitMiddleware
|
||||||
from .request_logging import RequestLoggingMiddleware
|
from .request_logging import RequestLoggingMiddleware
|
||||||
from .security_headers import SecurityHeadersMiddleware
|
from .security_headers import SecurityHeadersMiddleware
|
||||||
|
|
||||||
@@ -9,6 +10,8 @@ __all__ = [
|
|||||||
"csrf_exempt",
|
"csrf_exempt",
|
||||||
"generate_csrf_token",
|
"generate_csrf_token",
|
||||||
"validate_csrf_token",
|
"validate_csrf_token",
|
||||||
|
"RateLimiter",
|
||||||
|
"RateLimitMiddleware",
|
||||||
"SecurityHeadersMiddleware",
|
"SecurityHeadersMiddleware",
|
||||||
"RequestLoggingMiddleware",
|
"RequestLoggingMiddleware",
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ to protect state-changing endpoints from cross-site request attacks.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import hmac
|
import hmac
|
||||||
|
import logging
|
||||||
import secrets
|
import secrets
|
||||||
from collections.abc import Callable
|
from collections.abc import Callable
|
||||||
from functools import wraps
|
from functools import wraps
|
||||||
@@ -16,6 +17,8 @@ from starlette.responses import JSONResponse, Response
|
|||||||
# Module-level set to track exempt routes
|
# Module-level set to track exempt routes
|
||||||
_exempt_routes: set[str] = set()
|
_exempt_routes: set[str] = set()
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def csrf_exempt(endpoint: Callable) -> Callable:
|
def csrf_exempt(endpoint: Callable) -> Callable:
|
||||||
"""Decorator to mark an endpoint as exempt from CSRF validation.
|
"""Decorator to mark an endpoint as exempt from CSRF validation.
|
||||||
@@ -97,7 +100,7 @@ class CSRFMiddleware(BaseHTTPMiddleware):
|
|||||||
...
|
...
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
app.add_middleware(CSRFMiddleware, secret="your-secret-key")
|
app.add_middleware(CSRFMiddleware, secret=settings.csrf_secret)
|
||||||
|
|
||||||
Attributes:
|
Attributes:
|
||||||
secret: Secret key for token signing (optional, for future use).
|
secret: Secret key for token signing (optional, for future use).
|
||||||
@@ -128,58 +131,64 @@ class CSRFMiddleware(BaseHTTPMiddleware):
|
|||||||
For safe methods: Set a CSRF token cookie if not present.
|
For safe methods: Set a CSRF token cookie if not present.
|
||||||
For unsafe methods: Validate the CSRF token or check if exempt.
|
For unsafe methods: Validate the CSRF token or check if exempt.
|
||||||
"""
|
"""
|
||||||
# Bypass CSRF if explicitly disabled (e.g. in tests)
|
|
||||||
from config import settings
|
from config import settings
|
||||||
|
|
||||||
if settings.timmy_disable_csrf:
|
if settings.timmy_disable_csrf:
|
||||||
return await call_next(request)
|
return await call_next(request)
|
||||||
|
|
||||||
# Get existing CSRF token from cookie
|
# WebSocket upgrades don't carry CSRF tokens — skip them entirely
|
||||||
|
if request.headers.get("upgrade", "").lower() == "websocket":
|
||||||
|
return await call_next(request)
|
||||||
|
|
||||||
csrf_cookie = request.cookies.get(self.cookie_name)
|
csrf_cookie = request.cookies.get(self.cookie_name)
|
||||||
|
|
||||||
# For safe methods, just ensure a token exists
|
|
||||||
if request.method in self.SAFE_METHODS:
|
if request.method in self.SAFE_METHODS:
|
||||||
response = await call_next(request)
|
return await self._handle_safe_method(request, call_next, csrf_cookie)
|
||||||
|
|
||||||
# Set CSRF token cookie if not present
|
return await self._handle_unsafe_method(request, call_next, csrf_cookie)
|
||||||
if not csrf_cookie:
|
|
||||||
new_token = generate_csrf_token()
|
|
||||||
response.set_cookie(
|
|
||||||
key=self.cookie_name,
|
|
||||||
value=new_token,
|
|
||||||
httponly=False, # Must be readable by JavaScript
|
|
||||||
secure=settings.csrf_cookie_secure,
|
|
||||||
samesite="Lax",
|
|
||||||
max_age=86400, # 24 hours
|
|
||||||
)
|
|
||||||
|
|
||||||
return response
|
async def _handle_safe_method(
|
||||||
|
self, request: Request, call_next, csrf_cookie: str | None
|
||||||
|
) -> Response:
|
||||||
|
"""Handle safe HTTP methods (GET, HEAD, OPTIONS, TRACE).
|
||||||
|
|
||||||
# For unsafe methods, we need to validate or check if exempt
|
Forwards the request and sets a CSRF token cookie if not present.
|
||||||
# First, try to validate the CSRF token
|
"""
|
||||||
if await self._validate_request(request, csrf_cookie):
|
from config import settings
|
||||||
# Token is valid, allow the request
|
|
||||||
return await call_next(request)
|
|
||||||
|
|
||||||
# Token validation failed, check if the path is exempt
|
|
||||||
path = request.url.path
|
|
||||||
if self._is_likely_exempt(path):
|
|
||||||
# Path is exempt, allow the request
|
|
||||||
return await call_next(request)
|
|
||||||
|
|
||||||
# Token validation failed and path is not exempt
|
|
||||||
# We still need to call the app to check if the endpoint is decorated
|
|
||||||
# with @csrf_exempt, so we'll let it through and check after routing
|
|
||||||
response = await call_next(request)
|
response = await call_next(request)
|
||||||
|
|
||||||
# After routing, check if the endpoint is marked as exempt
|
if not csrf_cookie:
|
||||||
endpoint = request.scope.get("endpoint")
|
new_token = generate_csrf_token()
|
||||||
if endpoint and is_csrf_exempt(endpoint):
|
response.set_cookie(
|
||||||
# Endpoint is marked as exempt, allow the response
|
key=self.cookie_name,
|
||||||
return response
|
value=new_token,
|
||||||
|
httponly=False, # Must be readable by JavaScript
|
||||||
|
secure=settings.csrf_cookie_secure,
|
||||||
|
samesite="Lax",
|
||||||
|
max_age=86400, # 24 hours
|
||||||
|
)
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
|
async def _handle_unsafe_method(
|
||||||
|
self, request: Request, call_next, csrf_cookie: str | None
|
||||||
|
) -> Response:
|
||||||
|
"""Handle unsafe HTTP methods (POST, PUT, DELETE, PATCH).
|
||||||
|
|
||||||
|
Validates the CSRF token, checks path and endpoint exemptions,
|
||||||
|
or returns a 403 error.
|
||||||
|
"""
|
||||||
|
if await self._validate_request(request, csrf_cookie):
|
||||||
|
return await call_next(request)
|
||||||
|
|
||||||
|
if self._is_likely_exempt(request.url.path):
|
||||||
|
return await call_next(request)
|
||||||
|
|
||||||
|
endpoint = self._resolve_endpoint(request)
|
||||||
|
if endpoint and is_csrf_exempt(endpoint):
|
||||||
|
return await call_next(request)
|
||||||
|
|
||||||
# Endpoint is not exempt and token validation failed
|
|
||||||
# Return 403 error
|
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
status_code=403,
|
status_code=403,
|
||||||
content={
|
content={
|
||||||
@@ -189,6 +198,41 @@ class CSRFMiddleware(BaseHTTPMiddleware):
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def _resolve_endpoint(self, request: Request) -> Callable | None:
|
||||||
|
"""Resolve the route endpoint without executing it.
|
||||||
|
|
||||||
|
Walks the Starlette/FastAPI router to find which endpoint function
|
||||||
|
handles this request, so we can check @csrf_exempt before any
|
||||||
|
side effects occur.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The endpoint callable, or None if no route matched.
|
||||||
|
"""
|
||||||
|
# If routing already happened (endpoint in scope), use it
|
||||||
|
endpoint = request.scope.get("endpoint")
|
||||||
|
if endpoint:
|
||||||
|
return endpoint
|
||||||
|
|
||||||
|
# Walk the middleware/app chain to find something with routes
|
||||||
|
from starlette.routing import Match
|
||||||
|
|
||||||
|
app = self.app
|
||||||
|
while app is not None:
|
||||||
|
if hasattr(app, "routes"):
|
||||||
|
for route in app.routes:
|
||||||
|
match, _ = route.matches(request.scope)
|
||||||
|
if match == Match.FULL:
|
||||||
|
return getattr(route, "endpoint", None)
|
||||||
|
# Try .router (FastAPI stores routes on app.router)
|
||||||
|
if hasattr(app, "router") and hasattr(app.router, "routes"):
|
||||||
|
for route in app.router.routes:
|
||||||
|
match, _ = route.matches(request.scope)
|
||||||
|
if match == Match.FULL:
|
||||||
|
return getattr(route, "endpoint", None)
|
||||||
|
app = getattr(app, "app", None)
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
def _is_likely_exempt(self, path: str) -> bool:
|
def _is_likely_exempt(self, path: str) -> bool:
|
||||||
"""Check if a path is likely to be CSRF exempt.
|
"""Check if a path is likely to be CSRF exempt.
|
||||||
|
|
||||||
@@ -274,7 +318,8 @@ class CSRFMiddleware(BaseHTTPMiddleware):
|
|||||||
form_token = form_data.get(self.form_field)
|
form_token = form_data.get(self.form_field)
|
||||||
if form_token and validate_csrf_token(str(form_token), csrf_cookie):
|
if form_token and validate_csrf_token(str(form_token), csrf_cookie):
|
||||||
return True
|
return True
|
||||||
except Exception:
|
except Exception as exc:
|
||||||
|
logger.debug("CSRF form parsing error: %s", exc)
|
||||||
# Error parsing form data, treat as invalid
|
# Error parsing form data, treat as invalid
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|||||||
209
src/dashboard/middleware/rate_limit.py
Normal file
209
src/dashboard/middleware/rate_limit.py
Normal file
@@ -0,0 +1,209 @@
|
|||||||
|
"""Rate limiting middleware for FastAPI.
|
||||||
|
|
||||||
|
Simple in-memory rate limiter for API endpoints. Tracks requests per IP
|
||||||
|
with configurable limits and automatic cleanup of stale entries.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import time
|
||||||
|
from collections import deque
|
||||||
|
|
||||||
|
from starlette.middleware.base import BaseHTTPMiddleware
|
||||||
|
from starlette.requests import Request
|
||||||
|
from starlette.responses import JSONResponse, Response
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class RateLimiter:
|
||||||
|
"""In-memory rate limiter for tracking requests per IP.
|
||||||
|
|
||||||
|
Stores request timestamps in a dict keyed by client IP.
|
||||||
|
Automatically cleans up stale entries every 60 seconds.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
requests_per_minute: Maximum requests allowed per minute per IP.
|
||||||
|
cleanup_interval_seconds: How often to clean stale entries.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
requests_per_minute: int = 30,
|
||||||
|
cleanup_interval_seconds: int = 60,
|
||||||
|
):
|
||||||
|
self.requests_per_minute = requests_per_minute
|
||||||
|
self.cleanup_interval_seconds = cleanup_interval_seconds
|
||||||
|
self._storage: dict[str, deque[float]] = {}
|
||||||
|
self._last_cleanup: float = time.time()
|
||||||
|
self._window_seconds: float = 60.0 # 1 minute window
|
||||||
|
|
||||||
|
def _get_client_ip(self, request: Request) -> str:
|
||||||
|
"""Extract client IP from request, respecting X-Forwarded-For header.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
request: The incoming request.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Client IP address string.
|
||||||
|
"""
|
||||||
|
# Check for forwarded IP (when behind proxy/load balancer)
|
||||||
|
forwarded = request.headers.get("x-forwarded-for")
|
||||||
|
if forwarded:
|
||||||
|
# Take the first IP in the chain
|
||||||
|
return forwarded.split(",")[0].strip()
|
||||||
|
|
||||||
|
real_ip = request.headers.get("x-real-ip")
|
||||||
|
if real_ip:
|
||||||
|
return real_ip
|
||||||
|
|
||||||
|
# Fall back to direct connection
|
||||||
|
if request.client:
|
||||||
|
return request.client.host
|
||||||
|
|
||||||
|
return "unknown"
|
||||||
|
|
||||||
|
def _cleanup_if_needed(self) -> None:
|
||||||
|
"""Remove stale entries older than the cleanup interval."""
|
||||||
|
now = time.time()
|
||||||
|
if now - self._last_cleanup < self.cleanup_interval_seconds:
|
||||||
|
return
|
||||||
|
|
||||||
|
cutoff = now - self._window_seconds
|
||||||
|
stale_ips: list[str] = []
|
||||||
|
|
||||||
|
for ip, timestamps in self._storage.items():
|
||||||
|
# Remove timestamps older than the window
|
||||||
|
while timestamps and timestamps[0] < cutoff:
|
||||||
|
timestamps.popleft()
|
||||||
|
# Mark IP for removal if no recent requests
|
||||||
|
if not timestamps:
|
||||||
|
stale_ips.append(ip)
|
||||||
|
|
||||||
|
# Remove stale IP entries
|
||||||
|
for ip in stale_ips:
|
||||||
|
del self._storage[ip]
|
||||||
|
|
||||||
|
self._last_cleanup = now
|
||||||
|
if stale_ips:
|
||||||
|
logger.debug("Rate limiter cleanup: removed %d stale IPs", len(stale_ips))
|
||||||
|
|
||||||
|
def is_allowed(self, client_ip: str) -> tuple[bool, float]:
|
||||||
|
"""Check if a request from the given IP is allowed.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
client_ip: The client's IP address.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (allowed: bool, retry_after: float).
|
||||||
|
retry_after is seconds until next allowed request, 0 if allowed now.
|
||||||
|
"""
|
||||||
|
now = time.time()
|
||||||
|
cutoff = now - self._window_seconds
|
||||||
|
|
||||||
|
# Get or create timestamp deque for this IP
|
||||||
|
if client_ip not in self._storage:
|
||||||
|
self._storage[client_ip] = deque()
|
||||||
|
|
||||||
|
timestamps = self._storage[client_ip]
|
||||||
|
|
||||||
|
# Remove timestamps outside the window
|
||||||
|
while timestamps and timestamps[0] < cutoff:
|
||||||
|
timestamps.popleft()
|
||||||
|
|
||||||
|
# Check if limit exceeded
|
||||||
|
if len(timestamps) >= self.requests_per_minute:
|
||||||
|
# Calculate retry after time
|
||||||
|
oldest = timestamps[0]
|
||||||
|
retry_after = self._window_seconds - (now - oldest)
|
||||||
|
return False, max(0.0, retry_after)
|
||||||
|
|
||||||
|
# Record this request
|
||||||
|
timestamps.append(now)
|
||||||
|
return True, 0.0
|
||||||
|
|
||||||
|
def check_request(self, request: Request) -> tuple[bool, float]:
|
||||||
|
"""Check if the request is allowed under rate limits.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
request: The incoming request.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (allowed: bool, retry_after: float).
|
||||||
|
"""
|
||||||
|
self._cleanup_if_needed()
|
||||||
|
client_ip = self._get_client_ip(request)
|
||||||
|
return self.is_allowed(client_ip)
|
||||||
|
|
||||||
|
|
||||||
|
class RateLimitMiddleware(BaseHTTPMiddleware):
|
||||||
|
"""Middleware to apply rate limiting to specific routes.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
# Apply to all routes (not recommended for public static files)
|
||||||
|
app.add_middleware(RateLimitMiddleware)
|
||||||
|
|
||||||
|
# Apply only to specific paths
|
||||||
|
app.add_middleware(
|
||||||
|
RateLimitMiddleware,
|
||||||
|
path_prefixes=["/api/matrix/"],
|
||||||
|
requests_per_minute=30,
|
||||||
|
)
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
path_prefixes: List of URL path prefixes to rate limit.
|
||||||
|
If empty, applies to all paths.
|
||||||
|
requests_per_minute: Maximum requests per minute per IP.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
app,
|
||||||
|
path_prefixes: list[str] | None = None,
|
||||||
|
requests_per_minute: int = 30,
|
||||||
|
):
|
||||||
|
super().__init__(app)
|
||||||
|
self.path_prefixes = path_prefixes or []
|
||||||
|
self.limiter = RateLimiter(requests_per_minute=requests_per_minute)
|
||||||
|
|
||||||
|
def _should_rate_limit(self, path: str) -> bool:
|
||||||
|
"""Check if the given path should be rate limited.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
path: The request URL path.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if path matches any configured prefix.
|
||||||
|
"""
|
||||||
|
if not self.path_prefixes:
|
||||||
|
return True
|
||||||
|
return any(path.startswith(prefix) for prefix in self.path_prefixes)
|
||||||
|
|
||||||
|
async def dispatch(self, request: Request, call_next) -> Response:
|
||||||
|
"""Apply rate limiting to configured paths.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
request: The incoming request.
|
||||||
|
call_next: Callable to get the response from downstream.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Response from downstream, or 429 if rate limited.
|
||||||
|
"""
|
||||||
|
# Skip if path doesn't match configured prefixes
|
||||||
|
if not self._should_rate_limit(request.url.path):
|
||||||
|
return await call_next(request)
|
||||||
|
|
||||||
|
# Check rate limit
|
||||||
|
allowed, retry_after = self.limiter.check_request(request)
|
||||||
|
|
||||||
|
if not allowed:
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=429,
|
||||||
|
content={
|
||||||
|
"error": "Rate limit exceeded. Try again later.",
|
||||||
|
"retry_after": int(retry_after) + 1,
|
||||||
|
},
|
||||||
|
headers={"Retry-After": str(int(retry_after) + 1)},
|
||||||
|
)
|
||||||
|
|
||||||
|
# Process the request
|
||||||
|
return await call_next(request)
|
||||||
@@ -42,6 +42,114 @@ class RequestLoggingMiddleware(BaseHTTPMiddleware):
|
|||||||
self.skip_paths = set(skip_paths or [])
|
self.skip_paths = set(skip_paths or [])
|
||||||
self.log_level = log_level
|
self.log_level = log_level
|
||||||
|
|
||||||
|
def _should_skip_path(self, path: str) -> bool:
|
||||||
|
"""Check if the request path should be skipped from logging.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
path: The request URL path.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if the path should be skipped, False otherwise.
|
||||||
|
"""
|
||||||
|
return path in self.skip_paths
|
||||||
|
|
||||||
|
def _prepare_request_context(self, request: Request) -> tuple[str, float]:
|
||||||
|
"""Prepare context for request processing.
|
||||||
|
|
||||||
|
Generates a correlation ID and records the start time.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
request: The incoming request.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (correlation_id, start_time).
|
||||||
|
"""
|
||||||
|
correlation_id = str(uuid.uuid4())[:8]
|
||||||
|
request.state.correlation_id = correlation_id
|
||||||
|
start_time = time.time()
|
||||||
|
return correlation_id, start_time
|
||||||
|
|
||||||
|
def _get_duration_ms(self, start_time: float) -> float:
|
||||||
|
"""Calculate the request duration in milliseconds.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
start_time: The start time from time.time().
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Duration in milliseconds.
|
||||||
|
"""
|
||||||
|
return (time.time() - start_time) * 1000
|
||||||
|
|
||||||
|
def _log_success(
|
||||||
|
self,
|
||||||
|
request: Request,
|
||||||
|
response: Response,
|
||||||
|
correlation_id: str,
|
||||||
|
duration_ms: float,
|
||||||
|
client_ip: str,
|
||||||
|
user_agent: str,
|
||||||
|
) -> None:
|
||||||
|
"""Log a successful request.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
request: The incoming request.
|
||||||
|
response: The response from downstream.
|
||||||
|
correlation_id: The request correlation ID.
|
||||||
|
duration_ms: Request duration in milliseconds.
|
||||||
|
client_ip: Client IP address.
|
||||||
|
user_agent: User-Agent header value.
|
||||||
|
"""
|
||||||
|
self._log_request(
|
||||||
|
method=request.method,
|
||||||
|
path=request.url.path,
|
||||||
|
status_code=response.status_code,
|
||||||
|
duration_ms=duration_ms,
|
||||||
|
client_ip=client_ip,
|
||||||
|
user_agent=user_agent,
|
||||||
|
correlation_id=correlation_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _log_error(
|
||||||
|
self,
|
||||||
|
request: Request,
|
||||||
|
exc: Exception,
|
||||||
|
correlation_id: str,
|
||||||
|
duration_ms: float,
|
||||||
|
client_ip: str,
|
||||||
|
) -> None:
|
||||||
|
"""Log a failed request and capture the error.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
request: The incoming request.
|
||||||
|
exc: The exception that was raised.
|
||||||
|
correlation_id: The request correlation ID.
|
||||||
|
duration_ms: Request duration in milliseconds.
|
||||||
|
client_ip: Client IP address.
|
||||||
|
"""
|
||||||
|
logger.error(
|
||||||
|
f"[{correlation_id}] {request.method} {request.url.path} "
|
||||||
|
f"- ERROR - {duration_ms:.2f}ms - {client_ip} - {str(exc)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Auto-escalate: create bug report task from unhandled exception
|
||||||
|
try:
|
||||||
|
from infrastructure.error_capture import capture_error
|
||||||
|
|
||||||
|
capture_error(
|
||||||
|
exc,
|
||||||
|
source="http",
|
||||||
|
context={
|
||||||
|
"method": request.method,
|
||||||
|
"path": request.url.path,
|
||||||
|
"correlation_id": correlation_id,
|
||||||
|
"client_ip": client_ip,
|
||||||
|
"duration_ms": f"{duration_ms:.0f}",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
logger.warning("Escalation logging error: capture failed")
|
||||||
|
# never let escalation break the request
|
||||||
|
|
||||||
async def dispatch(self, request: Request, call_next) -> Response:
|
async def dispatch(self, request: Request, call_next) -> Response:
|
||||||
"""Log the request and response details.
|
"""Log the request and response details.
|
||||||
|
|
||||||
@@ -52,73 +160,23 @@ class RequestLoggingMiddleware(BaseHTTPMiddleware):
|
|||||||
Returns:
|
Returns:
|
||||||
The response from downstream.
|
The response from downstream.
|
||||||
"""
|
"""
|
||||||
# Check if we should skip logging this path
|
if self._should_skip_path(request.url.path):
|
||||||
if request.url.path in self.skip_paths:
|
|
||||||
return await call_next(request)
|
return await call_next(request)
|
||||||
|
|
||||||
# Generate correlation ID
|
correlation_id, start_time = self._prepare_request_context(request)
|
||||||
correlation_id = str(uuid.uuid4())[:8]
|
|
||||||
request.state.correlation_id = correlation_id
|
|
||||||
|
|
||||||
# Record start time
|
|
||||||
start_time = time.time()
|
|
||||||
|
|
||||||
# Get client info
|
|
||||||
client_ip = self._get_client_ip(request)
|
client_ip = self._get_client_ip(request)
|
||||||
user_agent = request.headers.get("user-agent", "-")
|
user_agent = request.headers.get("user-agent", "-")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Process the request
|
|
||||||
response = await call_next(request)
|
response = await call_next(request)
|
||||||
|
duration_ms = self._get_duration_ms(start_time)
|
||||||
# Calculate duration
|
self._log_success(request, response, correlation_id, duration_ms, client_ip, user_agent)
|
||||||
duration_ms = (time.time() - start_time) * 1000
|
|
||||||
|
|
||||||
# Log the request
|
|
||||||
self._log_request(
|
|
||||||
method=request.method,
|
|
||||||
path=request.url.path,
|
|
||||||
status_code=response.status_code,
|
|
||||||
duration_ms=duration_ms,
|
|
||||||
client_ip=client_ip,
|
|
||||||
user_agent=user_agent,
|
|
||||||
correlation_id=correlation_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Add correlation ID to response headers
|
|
||||||
response.headers["X-Correlation-ID"] = correlation_id
|
response.headers["X-Correlation-ID"] = correlation_id
|
||||||
|
|
||||||
return response
|
return response
|
||||||
|
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
# Calculate duration even for failed requests
|
duration_ms = self._get_duration_ms(start_time)
|
||||||
duration_ms = (time.time() - start_time) * 1000
|
self._log_error(request, exc, correlation_id, duration_ms, client_ip)
|
||||||
|
|
||||||
# Log the error
|
|
||||||
logger.error(
|
|
||||||
f"[{correlation_id}] {request.method} {request.url.path} "
|
|
||||||
f"- ERROR - {duration_ms:.2f}ms - {client_ip} - {str(exc)}"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Auto-escalate: create bug report task from unhandled exception
|
|
||||||
try:
|
|
||||||
from infrastructure.error_capture import capture_error
|
|
||||||
|
|
||||||
capture_error(
|
|
||||||
exc,
|
|
||||||
source="http",
|
|
||||||
context={
|
|
||||||
"method": request.method,
|
|
||||||
"path": request.url.path,
|
|
||||||
"correlation_id": correlation_id,
|
|
||||||
"client_ip": client_ip,
|
|
||||||
"duration_ms": f"{duration_ms:.0f}",
|
|
||||||
},
|
|
||||||
)
|
|
||||||
except Exception:
|
|
||||||
pass # never let escalation break the request
|
|
||||||
|
|
||||||
# Re-raise the exception
|
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def _get_client_ip(self, request: Request) -> str:
|
def _get_client_ip(self, request: Request) -> str:
|
||||||
|
|||||||
@@ -4,10 +4,14 @@ Adds common security headers to all HTTP responses to improve
|
|||||||
application security posture against various attacks.
|
application security posture against various attacks.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
from starlette.middleware.base import BaseHTTPMiddleware
|
from starlette.middleware.base import BaseHTTPMiddleware
|
||||||
from starlette.requests import Request
|
from starlette.requests import Request
|
||||||
from starlette.responses import Response
|
from starlette.responses import Response
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class SecurityHeadersMiddleware(BaseHTTPMiddleware):
|
class SecurityHeadersMiddleware(BaseHTTPMiddleware):
|
||||||
"""Middleware to add security headers to all responses.
|
"""Middleware to add security headers to all responses.
|
||||||
@@ -130,12 +134,8 @@ class SecurityHeadersMiddleware(BaseHTTPMiddleware):
|
|||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
response = await call_next(request)
|
response = await call_next(request)
|
||||||
except Exception:
|
except Exception as exc:
|
||||||
import logging
|
logger.debug("Upstream error in security headers middleware: %s", exc)
|
||||||
|
|
||||||
logging.getLogger(__name__).debug(
|
|
||||||
"Upstream error in security headers middleware", exc_info=True
|
|
||||||
)
|
|
||||||
from starlette.responses import PlainTextResponse
|
from starlette.responses import PlainTextResponse
|
||||||
|
|
||||||
response = PlainTextResponse("Internal Server Error", status_code=500)
|
response = PlainTextResponse("Internal Server Error", status_code=500)
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
from datetime import date, datetime
|
from datetime import UTC, date, datetime
|
||||||
from enum import StrEnum
|
from enum import StrEnum
|
||||||
|
|
||||||
from sqlalchemy import JSON, Boolean, Column, Date, DateTime, Index, Integer, String
|
from sqlalchemy import JSON, Boolean, Column, Date, DateTime, Index, Integer, String
|
||||||
@@ -40,8 +40,13 @@ class Task(Base):
|
|||||||
deferred_at = Column(DateTime, nullable=True)
|
deferred_at = Column(DateTime, nullable=True)
|
||||||
|
|
||||||
# Timestamps
|
# Timestamps
|
||||||
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
created_at = Column(DateTime, default=lambda: datetime.now(UTC), nullable=False)
|
||||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False)
|
updated_at = Column(
|
||||||
|
DateTime,
|
||||||
|
default=lambda: datetime.now(UTC),
|
||||||
|
onupdate=lambda: datetime.now(UTC),
|
||||||
|
nullable=False,
|
||||||
|
)
|
||||||
|
|
||||||
__table_args__ = (Index("ix_task_state_order", "state", "sort_order"),)
|
__table_args__ = (Index("ix_task_state_order", "state", "sort_order"),)
|
||||||
|
|
||||||
@@ -59,4 +64,4 @@ class JournalEntry(Base):
|
|||||||
gratitude = Column(String(500), nullable=True)
|
gratitude = Column(String(500), nullable=True)
|
||||||
energy_level = Column(Integer, nullable=True) # User-reported, 1-10
|
energy_level = Column(Integer, nullable=True) # User-reported, 1-10
|
||||||
|
|
||||||
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
created_at = Column(DateTime, default=lambda: datetime.now(UTC), nullable=False)
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ from timmy.tool_safety import (
|
|||||||
format_action_description,
|
format_action_description,
|
||||||
get_impact_level,
|
get_impact_level,
|
||||||
)
|
)
|
||||||
|
from timmy.welcome import WELCOME_MESSAGE
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -56,7 +57,7 @@ async def get_history(request: Request):
|
|||||||
return templates.TemplateResponse(
|
return templates.TemplateResponse(
|
||||||
request,
|
request,
|
||||||
"partials/history.html",
|
"partials/history.html",
|
||||||
{"messages": message_log.all()},
|
{"messages": message_log.all(), "welcome_message": WELCOME_MESSAGE},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -66,23 +67,91 @@ async def clear_history(request: Request):
|
|||||||
return templates.TemplateResponse(
|
return templates.TemplateResponse(
|
||||||
request,
|
request,
|
||||||
"partials/history.html",
|
"partials/history.html",
|
||||||
{"messages": []},
|
{"messages": [], "welcome_message": WELCOME_MESSAGE},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _validate_message(message: str) -> str:
|
||||||
|
"""Strip and validate chat input; raise HTTPException on bad input."""
|
||||||
|
from fastapi import HTTPException
|
||||||
|
|
||||||
|
message = message.strip()
|
||||||
|
if not message:
|
||||||
|
raise HTTPException(status_code=400, detail="Message cannot be empty")
|
||||||
|
if len(message) > MAX_MESSAGE_LENGTH:
|
||||||
|
raise HTTPException(status_code=422, detail="Message too long")
|
||||||
|
return message
|
||||||
|
|
||||||
|
|
||||||
|
def _record_user_activity() -> None:
|
||||||
|
"""Notify the thinking engine that the user is active."""
|
||||||
|
try:
|
||||||
|
from timmy.thinking import thinking_engine
|
||||||
|
|
||||||
|
thinking_engine.record_user_input()
|
||||||
|
except Exception:
|
||||||
|
logger.debug("Failed to record user input for thinking engine")
|
||||||
|
|
||||||
|
|
||||||
|
def _extract_tool_actions(run_output) -> list[dict]:
|
||||||
|
"""If Agno paused the run for tool confirmation, build approval items."""
|
||||||
|
from timmy.approvals import create_item
|
||||||
|
|
||||||
|
tool_actions: list[dict] = []
|
||||||
|
status = getattr(run_output, "status", None)
|
||||||
|
is_paused = status == "PAUSED" or str(status) == "RunStatus.paused"
|
||||||
|
|
||||||
|
if not (is_paused and getattr(run_output, "active_requirements", None)):
|
||||||
|
return tool_actions
|
||||||
|
|
||||||
|
for req in run_output.active_requirements:
|
||||||
|
if not getattr(req, "needs_confirmation", False):
|
||||||
|
continue
|
||||||
|
te = req.tool_execution
|
||||||
|
tool_name = getattr(te, "tool_name", "unknown")
|
||||||
|
tool_args = getattr(te, "tool_args", {}) or {}
|
||||||
|
|
||||||
|
item = create_item(
|
||||||
|
title=f"Dashboard: {tool_name}",
|
||||||
|
description=format_action_description(tool_name, tool_args),
|
||||||
|
proposed_action=json.dumps({"tool": tool_name, "args": tool_args}),
|
||||||
|
impact=get_impact_level(tool_name),
|
||||||
|
)
|
||||||
|
_pending_runs[item.id] = {
|
||||||
|
"run_output": run_output,
|
||||||
|
"requirement": req,
|
||||||
|
"tool_name": tool_name,
|
||||||
|
"tool_args": tool_args,
|
||||||
|
}
|
||||||
|
tool_actions.append(
|
||||||
|
{
|
||||||
|
"approval_id": item.id,
|
||||||
|
"tool_name": tool_name,
|
||||||
|
"description": format_action_description(tool_name, tool_args),
|
||||||
|
"impact": get_impact_level(tool_name),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
return tool_actions
|
||||||
|
|
||||||
|
|
||||||
|
def _log_exchange(
|
||||||
|
message: str, response_text: str | None, error_text: str | None, timestamp: str
|
||||||
|
) -> None:
|
||||||
|
"""Append user message and agent/error reply to the in-memory log."""
|
||||||
|
message_log.append(role="user", content=message, timestamp=timestamp, source="browser")
|
||||||
|
if response_text:
|
||||||
|
message_log.append(
|
||||||
|
role="agent", content=response_text, timestamp=timestamp, source="browser"
|
||||||
|
)
|
||||||
|
elif error_text:
|
||||||
|
message_log.append(role="error", content=error_text, timestamp=timestamp, source="browser")
|
||||||
|
|
||||||
|
|
||||||
@router.post("/default/chat", response_class=HTMLResponse)
|
@router.post("/default/chat", response_class=HTMLResponse)
|
||||||
async def chat_agent(request: Request, message: str = Form(...)):
|
async def chat_agent(request: Request, message: str = Form(...)):
|
||||||
"""Chat — synchronous response with native Agno tool confirmation."""
|
"""Chat — synchronous response with native Agno tool confirmation."""
|
||||||
message = message.strip()
|
message = _validate_message(message)
|
||||||
if not message:
|
_record_user_activity()
|
||||||
from fastapi import HTTPException
|
|
||||||
|
|
||||||
raise HTTPException(status_code=400, detail="Message cannot be empty")
|
|
||||||
|
|
||||||
if len(message) > MAX_MESSAGE_LENGTH:
|
|
||||||
from fastapi import HTTPException
|
|
||||||
|
|
||||||
raise HTTPException(status_code=422, detail="Message too long")
|
|
||||||
|
|
||||||
timestamp = datetime.now().strftime("%H:%M:%S")
|
timestamp = datetime.now().strftime("%H:%M:%S")
|
||||||
response_text = None
|
response_text = None
|
||||||
@@ -95,54 +164,15 @@ async def chat_agent(request: Request, message: str = Form(...)):
|
|||||||
error_text = f"Chat error: {exc}"
|
error_text = f"Chat error: {exc}"
|
||||||
run_output = None
|
run_output = None
|
||||||
|
|
||||||
# Check if Agno paused the run for tool confirmation
|
tool_actions: list[dict] = []
|
||||||
tool_actions = []
|
|
||||||
if run_output is not None:
|
if run_output is not None:
|
||||||
status = getattr(run_output, "status", None)
|
tool_actions = _extract_tool_actions(run_output)
|
||||||
is_paused = status == "PAUSED" or str(status) == "RunStatus.paused"
|
|
||||||
|
|
||||||
if is_paused and getattr(run_output, "active_requirements", None):
|
|
||||||
for req in run_output.active_requirements:
|
|
||||||
if getattr(req, "needs_confirmation", False):
|
|
||||||
te = req.tool_execution
|
|
||||||
tool_name = getattr(te, "tool_name", "unknown")
|
|
||||||
tool_args = getattr(te, "tool_args", {}) or {}
|
|
||||||
|
|
||||||
from timmy.approvals import create_item
|
|
||||||
|
|
||||||
item = create_item(
|
|
||||||
title=f"Dashboard: {tool_name}",
|
|
||||||
description=format_action_description(tool_name, tool_args),
|
|
||||||
proposed_action=json.dumps({"tool": tool_name, "args": tool_args}),
|
|
||||||
impact=get_impact_level(tool_name),
|
|
||||||
)
|
|
||||||
_pending_runs[item.id] = {
|
|
||||||
"run_output": run_output,
|
|
||||||
"requirement": req,
|
|
||||||
"tool_name": tool_name,
|
|
||||||
"tool_args": tool_args,
|
|
||||||
}
|
|
||||||
tool_actions.append(
|
|
||||||
{
|
|
||||||
"approval_id": item.id,
|
|
||||||
"tool_name": tool_name,
|
|
||||||
"description": format_action_description(tool_name, tool_args),
|
|
||||||
"impact": get_impact_level(tool_name),
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
raw_content = run_output.content if hasattr(run_output, "content") else ""
|
raw_content = run_output.content if hasattr(run_output, "content") else ""
|
||||||
response_text = _clean_response(raw_content or "")
|
response_text = _clean_response(raw_content or "")
|
||||||
if not response_text and not tool_actions:
|
if not response_text and not tool_actions:
|
||||||
response_text = None # let error template show if needed
|
response_text = None
|
||||||
|
|
||||||
message_log.append(role="user", content=message, timestamp=timestamp, source="browser")
|
_log_exchange(message, response_text, error_text, timestamp)
|
||||||
if response_text:
|
|
||||||
message_log.append(
|
|
||||||
role="agent", content=response_text, timestamp=timestamp, source="browser"
|
|
||||||
)
|
|
||||||
elif error_text:
|
|
||||||
message_log.append(role="error", content=error_text, timestamp=timestamp, source="browser")
|
|
||||||
|
|
||||||
return templates.TemplateResponse(
|
return templates.TemplateResponse(
|
||||||
request,
|
request,
|
||||||
@@ -220,7 +250,8 @@ async def reject_tool(request: Request, approval_id: str):
|
|||||||
# Resume so the agent knows the tool was rejected
|
# Resume so the agent knows the tool was rejected
|
||||||
try:
|
try:
|
||||||
await continue_chat(pending["run_output"])
|
await continue_chat(pending["run_output"])
|
||||||
except Exception:
|
except Exception as exc:
|
||||||
|
logger.warning("Agent tool rejection error: %s", exc)
|
||||||
pass
|
pass
|
||||||
|
|
||||||
reject(approval_id)
|
reject(approval_id)
|
||||||
|
|||||||
@@ -27,7 +27,8 @@ async def get_briefing(request: Request):
|
|||||||
"""Return today's briefing page (generated or cached)."""
|
"""Return today's briefing page (generated or cached)."""
|
||||||
try:
|
try:
|
||||||
briefing = briefing_engine.get_or_generate()
|
briefing = briefing_engine.get_or_generate()
|
||||||
except Exception:
|
except Exception as exc:
|
||||||
|
logger.debug("Briefing generation failed: %s", exc)
|
||||||
logger.exception("Briefing generation failed")
|
logger.exception("Briefing generation failed")
|
||||||
now = datetime.now(UTC)
|
now = datetime.now(UTC)
|
||||||
briefing = Briefing(
|
briefing = Briefing(
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
import logging
|
import logging
|
||||||
from datetime import date, datetime
|
from datetime import UTC, date, datetime
|
||||||
|
|
||||||
from fastapi import APIRouter, Depends, Form, HTTPException, Request
|
from fastapi import APIRouter, Depends, Form, HTTPException, Request
|
||||||
from fastapi.responses import HTMLResponse
|
from fastapi.responses import HTMLResponse
|
||||||
@@ -19,14 +19,17 @@ router = APIRouter(tags=["calm"])
|
|||||||
|
|
||||||
# Helper functions for state machine logic
|
# Helper functions for state machine logic
|
||||||
def get_now_task(db: Session) -> Task | None:
|
def get_now_task(db: Session) -> Task | None:
|
||||||
|
"""Return the single active NOW task, or None."""
|
||||||
return db.query(Task).filter(Task.state == TaskState.NOW).first()
|
return db.query(Task).filter(Task.state == TaskState.NOW).first()
|
||||||
|
|
||||||
|
|
||||||
def get_next_task(db: Session) -> Task | None:
|
def get_next_task(db: Session) -> Task | None:
|
||||||
|
"""Return the single queued NEXT task, or None."""
|
||||||
return db.query(Task).filter(Task.state == TaskState.NEXT).first()
|
return db.query(Task).filter(Task.state == TaskState.NEXT).first()
|
||||||
|
|
||||||
|
|
||||||
def get_later_tasks(db: Session) -> list[Task]:
|
def get_later_tasks(db: Session) -> list[Task]:
|
||||||
|
"""Return all LATER tasks ordered by MIT flag then sort_order."""
|
||||||
return (
|
return (
|
||||||
db.query(Task)
|
db.query(Task)
|
||||||
.filter(Task.state == TaskState.LATER)
|
.filter(Task.state == TaskState.LATER)
|
||||||
@@ -35,7 +38,63 @@ def get_later_tasks(db: Session) -> list[Task]:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _create_mit_tasks(db: Session, titles: list[str | None]) -> list[int]:
|
||||||
|
"""Create MIT tasks from a list of titles, return their IDs."""
|
||||||
|
task_ids: list[int] = []
|
||||||
|
for title in titles:
|
||||||
|
if title:
|
||||||
|
task = Task(
|
||||||
|
title=title,
|
||||||
|
is_mit=True,
|
||||||
|
state=TaskState.LATER,
|
||||||
|
certainty=TaskCertainty.SOFT,
|
||||||
|
)
|
||||||
|
db.add(task)
|
||||||
|
db.commit()
|
||||||
|
db.refresh(task)
|
||||||
|
task_ids.append(task.id)
|
||||||
|
return task_ids
|
||||||
|
|
||||||
|
|
||||||
|
def _create_other_tasks(db: Session, other_tasks: str):
|
||||||
|
"""Create non-MIT tasks from newline-separated text."""
|
||||||
|
for line in other_tasks.split("\n"):
|
||||||
|
line = line.strip()
|
||||||
|
if line:
|
||||||
|
task = Task(
|
||||||
|
title=line,
|
||||||
|
state=TaskState.LATER,
|
||||||
|
certainty=TaskCertainty.FUZZY,
|
||||||
|
)
|
||||||
|
db.add(task)
|
||||||
|
|
||||||
|
|
||||||
|
def _seed_now_next(db: Session):
|
||||||
|
"""Set initial NOW/NEXT states when both slots are empty."""
|
||||||
|
if get_now_task(db) or get_next_task(db):
|
||||||
|
return
|
||||||
|
later_tasks = (
|
||||||
|
db.query(Task)
|
||||||
|
.filter(Task.state == TaskState.LATER)
|
||||||
|
.order_by(Task.is_mit.desc(), Task.sort_order)
|
||||||
|
.all()
|
||||||
|
)
|
||||||
|
if later_tasks:
|
||||||
|
later_tasks[0].state = TaskState.NOW
|
||||||
|
db.add(later_tasks[0])
|
||||||
|
db.flush()
|
||||||
|
if len(later_tasks) > 1:
|
||||||
|
later_tasks[1].state = TaskState.NEXT
|
||||||
|
db.add(later_tasks[1])
|
||||||
|
|
||||||
|
|
||||||
def promote_tasks(db: Session):
|
def promote_tasks(db: Session):
|
||||||
|
"""Enforce the NOW/NEXT/LATER state machine invariants.
|
||||||
|
|
||||||
|
- At most one NOW task (extras demoted to NEXT).
|
||||||
|
- If no NOW, promote NEXT -> NOW.
|
||||||
|
- If no NEXT, promote highest-priority LATER -> NEXT.
|
||||||
|
"""
|
||||||
# Ensure only one NOW task exists. If multiple, demote extras to NEXT.
|
# Ensure only one NOW task exists. If multiple, demote extras to NEXT.
|
||||||
now_tasks = db.query(Task).filter(Task.state == TaskState.NOW).all()
|
now_tasks = db.query(Task).filter(Task.state == TaskState.NOW).all()
|
||||||
if len(now_tasks) > 1:
|
if len(now_tasks) > 1:
|
||||||
@@ -74,6 +133,7 @@ def promote_tasks(db: Session):
|
|||||||
# Endpoints
|
# Endpoints
|
||||||
@router.get("/calm", response_class=HTMLResponse)
|
@router.get("/calm", response_class=HTMLResponse)
|
||||||
async def get_calm_view(request: Request, db: Session = Depends(get_db)):
|
async def get_calm_view(request: Request, db: Session = Depends(get_db)):
|
||||||
|
"""Render the main CALM dashboard with NOW/NEXT/LATER counts."""
|
||||||
now_task = get_now_task(db)
|
now_task = get_now_task(db)
|
||||||
next_task = get_next_task(db)
|
next_task = get_next_task(db)
|
||||||
later_tasks_count = len(get_later_tasks(db))
|
later_tasks_count = len(get_later_tasks(db))
|
||||||
@@ -90,6 +150,7 @@ async def get_calm_view(request: Request, db: Session = Depends(get_db)):
|
|||||||
|
|
||||||
@router.get("/calm/ritual/morning", response_class=HTMLResponse)
|
@router.get("/calm/ritual/morning", response_class=HTMLResponse)
|
||||||
async def get_morning_ritual_form(request: Request):
|
async def get_morning_ritual_form(request: Request):
|
||||||
|
"""Render the morning ritual intake form."""
|
||||||
return templates.TemplateResponse(request, "calm/morning_ritual_form.html", {})
|
return templates.TemplateResponse(request, "calm/morning_ritual_form.html", {})
|
||||||
|
|
||||||
|
|
||||||
@@ -102,63 +163,20 @@ async def post_morning_ritual(
|
|||||||
mit3_title: str = Form(None),
|
mit3_title: str = Form(None),
|
||||||
other_tasks: str = Form(""),
|
other_tasks: str = Form(""),
|
||||||
):
|
):
|
||||||
# Create Journal Entry
|
"""Process morning ritual: create MITs, other tasks, and set initial states."""
|
||||||
mit_task_ids = []
|
|
||||||
journal_entry = JournalEntry(entry_date=date.today())
|
journal_entry = JournalEntry(entry_date=date.today())
|
||||||
db.add(journal_entry)
|
db.add(journal_entry)
|
||||||
db.commit()
|
db.commit()
|
||||||
db.refresh(journal_entry)
|
db.refresh(journal_entry)
|
||||||
|
|
||||||
# Create MIT tasks
|
journal_entry.mit_task_ids = _create_mit_tasks(db, [mit1_title, mit2_title, mit3_title])
|
||||||
for mit_title in [mit1_title, mit2_title, mit3_title]:
|
|
||||||
if mit_title:
|
|
||||||
task = Task(
|
|
||||||
title=mit_title,
|
|
||||||
is_mit=True,
|
|
||||||
state=TaskState.LATER, # Initially LATER, will be promoted
|
|
||||||
certainty=TaskCertainty.SOFT,
|
|
||||||
)
|
|
||||||
db.add(task)
|
|
||||||
db.commit()
|
|
||||||
db.refresh(task)
|
|
||||||
mit_task_ids.append(task.id)
|
|
||||||
|
|
||||||
journal_entry.mit_task_ids = mit_task_ids
|
|
||||||
db.add(journal_entry)
|
db.add(journal_entry)
|
||||||
|
|
||||||
# Create other tasks
|
_create_other_tasks(db, other_tasks)
|
||||||
for task_title in other_tasks.split("\n"):
|
|
||||||
task_title = task_title.strip()
|
|
||||||
if task_title:
|
|
||||||
task = Task(
|
|
||||||
title=task_title,
|
|
||||||
state=TaskState.LATER,
|
|
||||||
certainty=TaskCertainty.FUZZY,
|
|
||||||
)
|
|
||||||
db.add(task)
|
|
||||||
|
|
||||||
db.commit()
|
db.commit()
|
||||||
|
|
||||||
# Set initial NOW/NEXT states
|
_seed_now_next(db)
|
||||||
# Set initial NOW/NEXT states after all tasks are created
|
db.commit()
|
||||||
if not get_now_task(db) and not get_next_task(db):
|
|
||||||
later_tasks = (
|
|
||||||
db.query(Task)
|
|
||||||
.filter(Task.state == TaskState.LATER)
|
|
||||||
.order_by(Task.is_mit.desc(), Task.sort_order)
|
|
||||||
.all()
|
|
||||||
)
|
|
||||||
if later_tasks:
|
|
||||||
# Set the highest priority LATER task to NOW
|
|
||||||
later_tasks[0].state = TaskState.NOW
|
|
||||||
db.add(later_tasks[0])
|
|
||||||
db.flush() # Flush to make the change visible for the next query
|
|
||||||
|
|
||||||
# Set the next highest priority LATER task to NEXT
|
|
||||||
if len(later_tasks) > 1:
|
|
||||||
later_tasks[1].state = TaskState.NEXT
|
|
||||||
db.add(later_tasks[1])
|
|
||||||
db.commit() # Commit changes after initial NOW/NEXT setup
|
|
||||||
|
|
||||||
return templates.TemplateResponse(
|
return templates.TemplateResponse(
|
||||||
request,
|
request,
|
||||||
@@ -173,6 +191,7 @@ async def post_morning_ritual(
|
|||||||
|
|
||||||
@router.get("/calm/ritual/evening", response_class=HTMLResponse)
|
@router.get("/calm/ritual/evening", response_class=HTMLResponse)
|
||||||
async def get_evening_ritual_form(request: Request, db: Session = Depends(get_db)):
|
async def get_evening_ritual_form(request: Request, db: Session = Depends(get_db)):
|
||||||
|
"""Render the evening ritual form for today's journal entry."""
|
||||||
journal_entry = db.query(JournalEntry).filter(JournalEntry.entry_date == date.today()).first()
|
journal_entry = db.query(JournalEntry).filter(JournalEntry.entry_date == date.today()).first()
|
||||||
if not journal_entry:
|
if not journal_entry:
|
||||||
raise HTTPException(status_code=404, detail="No journal entry for today")
|
raise HTTPException(status_code=404, detail="No journal entry for today")
|
||||||
@@ -189,6 +208,7 @@ async def post_evening_ritual(
|
|||||||
gratitude: str = Form(None),
|
gratitude: str = Form(None),
|
||||||
energy_level: int = Form(None),
|
energy_level: int = Form(None),
|
||||||
):
|
):
|
||||||
|
"""Process evening ritual: save reflection/gratitude, archive active tasks."""
|
||||||
journal_entry = db.query(JournalEntry).filter(JournalEntry.entry_date == date.today()).first()
|
journal_entry = db.query(JournalEntry).filter(JournalEntry.entry_date == date.today()).first()
|
||||||
if not journal_entry:
|
if not journal_entry:
|
||||||
raise HTTPException(status_code=404, detail="No journal entry for today")
|
raise HTTPException(status_code=404, detail="No journal entry for today")
|
||||||
@@ -206,7 +226,7 @@ async def post_evening_ritual(
|
|||||||
)
|
)
|
||||||
for task in active_tasks:
|
for task in active_tasks:
|
||||||
task.state = TaskState.DEFERRED # Or DONE, depending on desired archiving logic
|
task.state = TaskState.DEFERRED # Or DONE, depending on desired archiving logic
|
||||||
task.deferred_at = datetime.utcnow()
|
task.deferred_at = datetime.now(UTC)
|
||||||
db.add(task)
|
db.add(task)
|
||||||
|
|
||||||
db.commit()
|
db.commit()
|
||||||
@@ -223,6 +243,7 @@ async def create_new_task(
|
|||||||
is_mit: bool = Form(False),
|
is_mit: bool = Form(False),
|
||||||
certainty: TaskCertainty = Form(TaskCertainty.SOFT),
|
certainty: TaskCertainty = Form(TaskCertainty.SOFT),
|
||||||
):
|
):
|
||||||
|
"""Create a new task in LATER state and return updated count."""
|
||||||
task = Task(
|
task = Task(
|
||||||
title=title,
|
title=title,
|
||||||
description=description,
|
description=description,
|
||||||
@@ -247,6 +268,7 @@ async def start_task(
|
|||||||
task_id: int,
|
task_id: int,
|
||||||
db: Session = Depends(get_db),
|
db: Session = Depends(get_db),
|
||||||
):
|
):
|
||||||
|
"""Move a task to NOW state, demoting the current NOW to NEXT."""
|
||||||
current_now_task = get_now_task(db)
|
current_now_task = get_now_task(db)
|
||||||
if current_now_task and current_now_task.id != task_id:
|
if current_now_task and current_now_task.id != task_id:
|
||||||
current_now_task.state = TaskState.NEXT # Demote current NOW to NEXT
|
current_now_task.state = TaskState.NEXT # Demote current NOW to NEXT
|
||||||
@@ -257,7 +279,7 @@ async def start_task(
|
|||||||
raise HTTPException(status_code=404, detail="Task not found")
|
raise HTTPException(status_code=404, detail="Task not found")
|
||||||
|
|
||||||
task.state = TaskState.NOW
|
task.state = TaskState.NOW
|
||||||
task.started_at = datetime.utcnow()
|
task.started_at = datetime.now(UTC)
|
||||||
db.add(task)
|
db.add(task)
|
||||||
db.commit()
|
db.commit()
|
||||||
|
|
||||||
@@ -281,12 +303,13 @@ async def complete_task(
|
|||||||
task_id: int,
|
task_id: int,
|
||||||
db: Session = Depends(get_db),
|
db: Session = Depends(get_db),
|
||||||
):
|
):
|
||||||
|
"""Mark a task as DONE and trigger state promotion."""
|
||||||
task = db.query(Task).filter(Task.id == task_id).first()
|
task = db.query(Task).filter(Task.id == task_id).first()
|
||||||
if not task:
|
if not task:
|
||||||
raise HTTPException(status_code=404, detail="Task not found")
|
raise HTTPException(status_code=404, detail="Task not found")
|
||||||
|
|
||||||
task.state = TaskState.DONE
|
task.state = TaskState.DONE
|
||||||
task.completed_at = datetime.utcnow()
|
task.completed_at = datetime.now(UTC)
|
||||||
db.add(task)
|
db.add(task)
|
||||||
db.commit()
|
db.commit()
|
||||||
|
|
||||||
@@ -309,12 +332,13 @@ async def defer_task(
|
|||||||
task_id: int,
|
task_id: int,
|
||||||
db: Session = Depends(get_db),
|
db: Session = Depends(get_db),
|
||||||
):
|
):
|
||||||
|
"""Defer a task and trigger state promotion."""
|
||||||
task = db.query(Task).filter(Task.id == task_id).first()
|
task = db.query(Task).filter(Task.id == task_id).first()
|
||||||
if not task:
|
if not task:
|
||||||
raise HTTPException(status_code=404, detail="Task not found")
|
raise HTTPException(status_code=404, detail="Task not found")
|
||||||
|
|
||||||
task.state = TaskState.DEFERRED
|
task.state = TaskState.DEFERRED
|
||||||
task.deferred_at = datetime.utcnow()
|
task.deferred_at = datetime.now(UTC)
|
||||||
db.add(task)
|
db.add(task)
|
||||||
db.commit()
|
db.commit()
|
||||||
|
|
||||||
@@ -333,6 +357,7 @@ async def defer_task(
|
|||||||
|
|
||||||
@router.get("/calm/partials/later_tasks_list", response_class=HTMLResponse)
|
@router.get("/calm/partials/later_tasks_list", response_class=HTMLResponse)
|
||||||
async def get_later_tasks_list(request: Request, db: Session = Depends(get_db)):
|
async def get_later_tasks_list(request: Request, db: Session = Depends(get_db)):
|
||||||
|
"""Render the expandable list of LATER tasks."""
|
||||||
later_tasks = get_later_tasks(db)
|
later_tasks = get_later_tasks(db)
|
||||||
return templates.TemplateResponse(
|
return templates.TemplateResponse(
|
||||||
"calm/partials/later_tasks_list.html",
|
"calm/partials/later_tasks_list.html",
|
||||||
@@ -348,6 +373,7 @@ async def reorder_tasks(
|
|||||||
later_task_ids: str = Form(""),
|
later_task_ids: str = Form(""),
|
||||||
next_task_id: int | None = Form(None),
|
next_task_id: int | None = Form(None),
|
||||||
):
|
):
|
||||||
|
"""Reorder LATER tasks and optionally promote one to NEXT."""
|
||||||
# Reorder LATER tasks
|
# Reorder LATER tasks
|
||||||
if later_task_ids:
|
if later_task_ids:
|
||||||
ids_in_order = [int(x.strip()) for x in later_task_ids.split(",") if x.strip()]
|
ids_in_order = [int(x.strip()) for x in later_task_ids.split(",") if x.strip()]
|
||||||
|
|||||||
@@ -31,6 +31,93 @@ _UPLOAD_DIR = str(Path(settings.repo_root) / "data" / "chat-uploads")
|
|||||||
_MAX_UPLOAD_SIZE = 50 * 1024 * 1024 # 50 MB
|
_MAX_UPLOAD_SIZE = 50 * 1024 * 1024 # 50 MB
|
||||||
|
|
||||||
|
|
||||||
|
# ── POST /api/chat — helpers ─────────────────────────────────────────────────
|
||||||
|
|
||||||
|
|
||||||
|
async def _parse_chat_body(request: Request) -> tuple[dict | None, JSONResponse | None]:
|
||||||
|
"""Parse and validate the JSON request body.
|
||||||
|
|
||||||
|
Returns (body, None) on success or (None, error_response) on failure.
|
||||||
|
"""
|
||||||
|
content_length = request.headers.get("content-length")
|
||||||
|
if content_length and int(content_length) > settings.chat_api_max_body_bytes:
|
||||||
|
return None, JSONResponse(status_code=413, content={"error": "Request body too large"})
|
||||||
|
|
||||||
|
try:
|
||||||
|
body = await request.json()
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning("Chat API JSON parse error: %s", exc)
|
||||||
|
return None, JSONResponse(status_code=400, content={"error": "Invalid JSON"})
|
||||||
|
|
||||||
|
messages = body.get("messages")
|
||||||
|
if not messages or not isinstance(messages, list):
|
||||||
|
return None, JSONResponse(status_code=400, content={"error": "messages array is required"})
|
||||||
|
|
||||||
|
return body, None
|
||||||
|
|
||||||
|
|
||||||
|
def _extract_user_message(messages: list[dict]) -> str | None:
|
||||||
|
"""Return the text of the last user message, or *None* if absent."""
|
||||||
|
for msg in reversed(messages):
|
||||||
|
if msg.get("role") == "user":
|
||||||
|
content = msg.get("content", "")
|
||||||
|
if isinstance(content, list):
|
||||||
|
text_parts = [
|
||||||
|
p.get("text", "")
|
||||||
|
for p in content
|
||||||
|
if isinstance(p, dict) and p.get("type") == "text"
|
||||||
|
]
|
||||||
|
return " ".join(text_parts).strip() or None
|
||||||
|
text = str(content).strip()
|
||||||
|
return text or None
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def _build_context_prefix() -> str:
|
||||||
|
"""Build the system-context preamble injected before the user message."""
|
||||||
|
now = datetime.now()
|
||||||
|
return (
|
||||||
|
f"[System: Current date/time is "
|
||||||
|
f"{now.strftime('%A, %B %d, %Y at %I:%M %p')}]\n"
|
||||||
|
f"[System: Mobile client]\n\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _notify_thinking_engine() -> None:
|
||||||
|
"""Record user activity so the thinking engine knows we're not idle."""
|
||||||
|
try:
|
||||||
|
from timmy.thinking import thinking_engine
|
||||||
|
|
||||||
|
thinking_engine.record_user_input()
|
||||||
|
except Exception:
|
||||||
|
logger.debug("Failed to record user input for thinking engine")
|
||||||
|
|
||||||
|
|
||||||
|
async def _process_chat(user_msg: str) -> dict | JSONResponse:
|
||||||
|
"""Send *user_msg* to the agent, log the exchange, and return a response."""
|
||||||
|
_notify_thinking_engine()
|
||||||
|
timestamp = datetime.now().strftime("%H:%M:%S")
|
||||||
|
|
||||||
|
try:
|
||||||
|
response_text = await agent_chat(
|
||||||
|
_build_context_prefix() + user_msg,
|
||||||
|
session_id="mobile",
|
||||||
|
)
|
||||||
|
message_log.append(role="user", content=user_msg, timestamp=timestamp, source="api")
|
||||||
|
message_log.append(role="agent", content=response_text, timestamp=timestamp, source="api")
|
||||||
|
return {"reply": response_text, "timestamp": timestamp}
|
||||||
|
|
||||||
|
except Exception as exc:
|
||||||
|
error_msg = f"Agent is offline: {exc}"
|
||||||
|
logger.error("api_chat error: %s", exc)
|
||||||
|
message_log.append(role="user", content=user_msg, timestamp=timestamp, source="api")
|
||||||
|
message_log.append(role="error", content=error_msg, timestamp=timestamp, source="api")
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=503,
|
||||||
|
content={"error": error_msg, "timestamp": timestamp},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# ── POST /api/chat ────────────────────────────────────────────────────────────
|
# ── POST /api/chat ────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
|
||||||
@@ -44,69 +131,15 @@ async def api_chat(request: Request):
|
|||||||
Response:
|
Response:
|
||||||
{"reply": "...", "timestamp": "HH:MM:SS"}
|
{"reply": "...", "timestamp": "HH:MM:SS"}
|
||||||
"""
|
"""
|
||||||
# Enforce request body size limit
|
body, err = await _parse_chat_body(request)
|
||||||
content_length = request.headers.get("content-length")
|
if err:
|
||||||
if content_length and int(content_length) > settings.chat_api_max_body_bytes:
|
return err
|
||||||
return JSONResponse(status_code=413, content={"error": "Request body too large"})
|
|
||||||
|
|
||||||
try:
|
user_msg = _extract_user_message(body["messages"])
|
||||||
body = await request.json()
|
if not user_msg:
|
||||||
except Exception:
|
|
||||||
return JSONResponse(status_code=400, content={"error": "Invalid JSON"})
|
|
||||||
|
|
||||||
messages = body.get("messages")
|
|
||||||
if not messages or not isinstance(messages, list):
|
|
||||||
return JSONResponse(status_code=400, content={"error": "messages array is required"})
|
|
||||||
|
|
||||||
# Extract the latest user message text
|
|
||||||
last_user_msg = None
|
|
||||||
for msg in reversed(messages):
|
|
||||||
if msg.get("role") == "user":
|
|
||||||
content = msg.get("content", "")
|
|
||||||
# Handle multimodal content arrays — extract text parts
|
|
||||||
if isinstance(content, list):
|
|
||||||
text_parts = [
|
|
||||||
p.get("text", "")
|
|
||||||
for p in content
|
|
||||||
if isinstance(p, dict) and p.get("type") == "text"
|
|
||||||
]
|
|
||||||
last_user_msg = " ".join(text_parts).strip()
|
|
||||||
else:
|
|
||||||
last_user_msg = str(content).strip()
|
|
||||||
break
|
|
||||||
|
|
||||||
if not last_user_msg:
|
|
||||||
return JSONResponse(status_code=400, content={"error": "No user message found"})
|
return JSONResponse(status_code=400, content={"error": "No user message found"})
|
||||||
|
|
||||||
timestamp = datetime.now().strftime("%H:%M:%S")
|
return await _process_chat(user_msg)
|
||||||
|
|
||||||
try:
|
|
||||||
# Inject context (same pattern as the HTMX chat handler in agents.py)
|
|
||||||
now = datetime.now()
|
|
||||||
context_prefix = (
|
|
||||||
f"[System: Current date/time is "
|
|
||||||
f"{now.strftime('%A, %B %d, %Y at %I:%M %p')}]\n"
|
|
||||||
f"[System: Mobile client]\n\n"
|
|
||||||
)
|
|
||||||
response_text = await agent_chat(
|
|
||||||
context_prefix + last_user_msg,
|
|
||||||
session_id="mobile",
|
|
||||||
)
|
|
||||||
|
|
||||||
message_log.append(role="user", content=last_user_msg, timestamp=timestamp, source="api")
|
|
||||||
message_log.append(role="agent", content=response_text, timestamp=timestamp, source="api")
|
|
||||||
|
|
||||||
return {"reply": response_text, "timestamp": timestamp}
|
|
||||||
|
|
||||||
except Exception as exc:
|
|
||||||
error_msg = f"Agent is offline: {exc}"
|
|
||||||
logger.error("api_chat error: %s", exc)
|
|
||||||
message_log.append(role="user", content=last_user_msg, timestamp=timestamp, source="api")
|
|
||||||
message_log.append(role="error", content=error_msg, timestamp=timestamp, source="api")
|
|
||||||
return JSONResponse(
|
|
||||||
status_code=503,
|
|
||||||
content={"error": error_msg, "timestamp": timestamp},
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# ── POST /api/upload ──────────────────────────────────────────────────────────
|
# ── POST /api/upload ──────────────────────────────────────────────────────────
|
||||||
|
|||||||
198
src/dashboard/routes/chat_api_v1.py
Normal file
198
src/dashboard/routes/chat_api_v1.py
Normal file
@@ -0,0 +1,198 @@
|
|||||||
|
"""Version 1 (v1) JSON REST API for the Timmy Time iPad app.
|
||||||
|
|
||||||
|
This module implements the specific endpoints required by the native
|
||||||
|
iPad app as defined in the project specification.
|
||||||
|
|
||||||
|
Endpoints:
|
||||||
|
POST /api/v1/chat — Streaming SSE chat response
|
||||||
|
GET /api/v1/chat/history — Retrieve chat history with limit
|
||||||
|
POST /api/v1/upload — Multipart file upload with auto-detection
|
||||||
|
GET /api/v1/status — Detailed system and model status
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import uuid
|
||||||
|
from datetime import UTC, datetime
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from fastapi import APIRouter, File, HTTPException, Query, Request, UploadFile
|
||||||
|
from fastapi.responses import JSONResponse, StreamingResponse
|
||||||
|
|
||||||
|
from config import APP_START_TIME, settings
|
||||||
|
from dashboard.routes.health import _check_ollama
|
||||||
|
from dashboard.store import message_log
|
||||||
|
from timmy.session import _get_agent
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
router = APIRouter(prefix="/api/v1", tags=["chat-api-v1"])
|
||||||
|
|
||||||
|
_UPLOAD_DIR = str(Path(settings.repo_root) / "data" / "chat-uploads")
|
||||||
|
_MAX_UPLOAD_SIZE = 50 * 1024 * 1024 # 50 MB
|
||||||
|
|
||||||
|
|
||||||
|
# ── POST /api/v1/chat ─────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/chat")
|
||||||
|
async def api_v1_chat(request: Request):
|
||||||
|
"""Accept a JSON chat payload and return a streaming SSE response.
|
||||||
|
|
||||||
|
Request body:
|
||||||
|
{
|
||||||
|
"message": "string",
|
||||||
|
"session_id": "string",
|
||||||
|
"attachments": ["id1", "id2"]
|
||||||
|
}
|
||||||
|
|
||||||
|
Response:
|
||||||
|
text/event-stream (SSE)
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
body = await request.json()
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning("Chat v1 API JSON parse error: %s", exc)
|
||||||
|
return JSONResponse(status_code=400, content={"error": "Invalid JSON"})
|
||||||
|
|
||||||
|
message = body.get("message")
|
||||||
|
session_id = body.get("session_id", "ipad-app")
|
||||||
|
attachments = body.get("attachments", [])
|
||||||
|
|
||||||
|
if not message:
|
||||||
|
return JSONResponse(status_code=400, content={"error": "message is required"})
|
||||||
|
|
||||||
|
# Prepare context for the agent
|
||||||
|
context_prefix = (
|
||||||
|
f"[System: Current date/time is "
|
||||||
|
f"{datetime.now().strftime('%A, %B %d, %Y at %I:%M %p')}]\n"
|
||||||
|
f"[System: iPad App client]\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
if attachments:
|
||||||
|
context_prefix += f"[System: Attachments: {', '.join(attachments)}]\n"
|
||||||
|
|
||||||
|
context_prefix += "\n"
|
||||||
|
full_prompt = context_prefix + message
|
||||||
|
|
||||||
|
async def event_generator():
|
||||||
|
try:
|
||||||
|
agent = _get_agent()
|
||||||
|
# Using streaming mode for SSE
|
||||||
|
async for chunk in agent.arun(full_prompt, stream=True, session_id=session_id):
|
||||||
|
# Agno chunks can be strings or RunOutput
|
||||||
|
content = chunk.content if hasattr(chunk, "content") else str(chunk)
|
||||||
|
if content:
|
||||||
|
yield f"data: {json.dumps({'text': content})}\n\n"
|
||||||
|
|
||||||
|
yield "data: [DONE]\n\n"
|
||||||
|
except Exception as exc:
|
||||||
|
logger.error("SSE stream error: %s", exc)
|
||||||
|
yield f"data: {json.dumps({'error': str(exc)})}\n\n"
|
||||||
|
|
||||||
|
return StreamingResponse(event_generator(), media_type="text/event-stream")
|
||||||
|
|
||||||
|
|
||||||
|
# ── GET /api/v1/chat/history ──────────────────────────────────────────────────
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/chat/history")
|
||||||
|
async def api_v1_chat_history(
|
||||||
|
session_id: str = Query("ipad-app"), limit: int = Query(50, ge=1, le=100)
|
||||||
|
):
|
||||||
|
"""Return recent chat history for a specific session."""
|
||||||
|
# Filter and limit the message log
|
||||||
|
# Note: message_log.all() returns all messages; we filter by source or just return last N
|
||||||
|
all_msgs = message_log.all()
|
||||||
|
|
||||||
|
# In a real implementation, we'd filter by session_id if message_log supported it.
|
||||||
|
# For now, we return the last 'limit' messages.
|
||||||
|
history = [
|
||||||
|
{
|
||||||
|
"role": msg.role,
|
||||||
|
"content": msg.content,
|
||||||
|
"timestamp": msg.timestamp,
|
||||||
|
"source": msg.source,
|
||||||
|
}
|
||||||
|
for msg in all_msgs[-limit:]
|
||||||
|
]
|
||||||
|
|
||||||
|
return {"messages": history}
|
||||||
|
|
||||||
|
|
||||||
|
# ── POST /api/v1/upload ───────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/upload")
|
||||||
|
async def api_v1_upload(file: UploadFile = File(...)):
|
||||||
|
"""Accept a file upload, auto-detect type, and return metadata.
|
||||||
|
|
||||||
|
Response:
|
||||||
|
{
|
||||||
|
"id": "string",
|
||||||
|
"type": "image|audio|document|url",
|
||||||
|
"summary": "string",
|
||||||
|
"metadata": {...}
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
os.makedirs(_UPLOAD_DIR, exist_ok=True)
|
||||||
|
|
||||||
|
file_id = uuid.uuid4().hex[:12]
|
||||||
|
safe_name = os.path.basename(file.filename or "upload")
|
||||||
|
stored_name = f"{file_id}-{safe_name}"
|
||||||
|
file_path = os.path.join(_UPLOAD_DIR, stored_name)
|
||||||
|
|
||||||
|
# Verify resolved path stays within upload directory
|
||||||
|
resolved = Path(file_path).resolve()
|
||||||
|
upload_root = Path(_UPLOAD_DIR).resolve()
|
||||||
|
if not str(resolved).startswith(str(upload_root)):
|
||||||
|
raise HTTPException(status_code=400, detail="Invalid file name")
|
||||||
|
|
||||||
|
contents = await file.read()
|
||||||
|
if len(contents) > _MAX_UPLOAD_SIZE:
|
||||||
|
raise HTTPException(status_code=413, detail="File too large (max 50 MB)")
|
||||||
|
|
||||||
|
with open(file_path, "wb") as f:
|
||||||
|
f.write(contents)
|
||||||
|
|
||||||
|
# Auto-detect type based on extension/mime
|
||||||
|
mime_type = file.content_type or "application/octet-stream"
|
||||||
|
ext = os.path.splitext(safe_name)[1].lower()
|
||||||
|
|
||||||
|
media_type = "document"
|
||||||
|
if mime_type.startswith("image/") or ext in [".jpg", ".jpeg", ".png", ".heic"]:
|
||||||
|
media_type = "image"
|
||||||
|
elif mime_type.startswith("audio/") or ext in [".m4a", ".mp3", ".wav", ".caf"]:
|
||||||
|
media_type = "audio"
|
||||||
|
elif ext in [".pdf", ".txt", ".md"]:
|
||||||
|
media_type = "document"
|
||||||
|
|
||||||
|
# Placeholder for actual processing (OCR, Whisper, etc.)
|
||||||
|
summary = f"Uploaded {media_type}: {safe_name}"
|
||||||
|
|
||||||
|
return {
|
||||||
|
"id": file_id,
|
||||||
|
"type": media_type,
|
||||||
|
"summary": summary,
|
||||||
|
"url": f"/uploads/{stored_name}",
|
||||||
|
"metadata": {"fileName": safe_name, "mimeType": mime_type, "size": len(contents)},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ── GET /api/v1/status ────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/status")
|
||||||
|
async def api_v1_status():
|
||||||
|
"""Detailed system and model status."""
|
||||||
|
ollama_status = await _check_ollama()
|
||||||
|
uptime = (datetime.now(UTC) - APP_START_TIME).total_seconds()
|
||||||
|
|
||||||
|
return {
|
||||||
|
"timmy": "online" if ollama_status.status == "healthy" else "offline",
|
||||||
|
"model": settings.ollama_model,
|
||||||
|
"ollama": "running" if ollama_status.status == "healthy" else "stopped",
|
||||||
|
"uptime": f"{int(uptime // 3600)}h {int((uptime % 3600) // 60)}m",
|
||||||
|
"version": "2.0.0-v1-api",
|
||||||
|
}
|
||||||
@@ -3,6 +3,7 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
import sqlite3
|
import sqlite3
|
||||||
|
from contextlib import closing
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
from fastapi import APIRouter, Request
|
from fastapi import APIRouter, Request
|
||||||
@@ -39,56 +40,52 @@ def _query_database(db_path: str) -> dict:
|
|||||||
"""Open a database read-only and return all tables with their rows."""
|
"""Open a database read-only and return all tables with their rows."""
|
||||||
result = {"tables": {}, "error": None}
|
result = {"tables": {}, "error": None}
|
||||||
try:
|
try:
|
||||||
conn = sqlite3.connect(f"file:{db_path}?mode=ro", uri=True)
|
with closing(sqlite3.connect(f"file:{db_path}?mode=ro", uri=True)) as conn:
|
||||||
conn.row_factory = sqlite3.Row
|
conn.row_factory = sqlite3.Row
|
||||||
except Exception as exc:
|
|
||||||
result["error"] = str(exc)
|
|
||||||
return result
|
|
||||||
|
|
||||||
try:
|
tables = conn.execute(
|
||||||
tables = conn.execute(
|
"SELECT name FROM sqlite_master WHERE type='table' ORDER BY name"
|
||||||
"SELECT name FROM sqlite_master WHERE type='table' ORDER BY name"
|
).fetchall()
|
||||||
).fetchall()
|
for (table_name,) in tables:
|
||||||
for (table_name,) in tables:
|
try:
|
||||||
try:
|
rows = conn.execute(
|
||||||
rows = conn.execute(
|
f"SELECT * FROM [{table_name}] LIMIT {MAX_ROWS}" # noqa: S608
|
||||||
f"SELECT * FROM [{table_name}] LIMIT {MAX_ROWS}" # noqa: S608
|
).fetchall()
|
||||||
).fetchall()
|
columns = (
|
||||||
columns = (
|
[
|
||||||
[
|
desc[0]
|
||||||
desc[0]
|
for desc in conn.execute(
|
||||||
for desc in conn.execute(
|
f"SELECT * FROM [{table_name}] LIMIT 0"
|
||||||
f"SELECT * FROM [{table_name}] LIMIT 0"
|
).description
|
||||||
).description
|
]
|
||||||
]
|
if rows
|
||||||
if rows
|
else []
|
||||||
else []
|
) # noqa: S608
|
||||||
) # noqa: S608
|
if not columns and rows:
|
||||||
if not columns and rows:
|
columns = list(rows[0].keys())
|
||||||
columns = list(rows[0].keys())
|
elif not columns:
|
||||||
elif not columns:
|
# Get columns even for empty tables
|
||||||
# Get columns even for empty tables
|
cursor = conn.execute(f"PRAGMA table_info([{table_name}])") # noqa: S608
|
||||||
cursor = conn.execute(f"PRAGMA table_info([{table_name}])") # noqa: S608
|
columns = [r[1] for r in cursor.fetchall()]
|
||||||
columns = [r[1] for r in cursor.fetchall()]
|
count = conn.execute(f"SELECT COUNT(*) FROM [{table_name}]").fetchone()[0] # noqa: S608
|
||||||
count = conn.execute(f"SELECT COUNT(*) FROM [{table_name}]").fetchone()[0] # noqa: S608
|
result["tables"][table_name] = {
|
||||||
result["tables"][table_name] = {
|
"columns": columns,
|
||||||
"columns": columns,
|
"rows": [dict(r) for r in rows],
|
||||||
"rows": [dict(r) for r in rows],
|
"total_count": count,
|
||||||
"total_count": count,
|
"truncated": count > MAX_ROWS,
|
||||||
"truncated": count > MAX_ROWS,
|
}
|
||||||
}
|
except Exception as exc:
|
||||||
except Exception as exc:
|
logger.exception("Failed to query table %s", table_name)
|
||||||
result["tables"][table_name] = {
|
result["tables"][table_name] = {
|
||||||
"error": str(exc),
|
"error": str(exc),
|
||||||
"columns": [],
|
"columns": [],
|
||||||
"rows": [],
|
"rows": [],
|
||||||
"total_count": 0,
|
"total_count": 0,
|
||||||
"truncated": False,
|
"truncated": False,
|
||||||
}
|
}
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
|
logger.exception("Failed to query database %s", db_path)
|
||||||
result["error"] = str(exc)
|
result["error"] = str(exc)
|
||||||
finally:
|
|
||||||
conn.close()
|
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|||||||
@@ -30,8 +30,8 @@ async def experiments_page(request: Request):
|
|||||||
history = []
|
history = []
|
||||||
try:
|
try:
|
||||||
history = get_experiment_history(_workspace())
|
history = get_experiment_history(_workspace())
|
||||||
except Exception:
|
except Exception as exc:
|
||||||
logger.debug("Failed to load experiment history", exc_info=True)
|
logger.debug("Failed to load experiment history: %s", exc)
|
||||||
|
|
||||||
return templates.TemplateResponse(
|
return templates.TemplateResponse(
|
||||||
request,
|
request,
|
||||||
|
|||||||
@@ -52,8 +52,8 @@ async def grok_status(request: Request):
|
|||||||
"estimated_cost_sats": backend.stats.estimated_cost_sats,
|
"estimated_cost_sats": backend.stats.estimated_cost_sats,
|
||||||
"errors": backend.stats.errors,
|
"errors": backend.stats.errors,
|
||||||
}
|
}
|
||||||
except Exception:
|
except Exception as exc:
|
||||||
logger.debug("Failed to load Grok stats", exc_info=True)
|
logger.warning("Failed to load Grok stats: %s", exc)
|
||||||
|
|
||||||
return templates.TemplateResponse(
|
return templates.TemplateResponse(
|
||||||
request,
|
request,
|
||||||
@@ -94,8 +94,8 @@ async def toggle_grok_mode(request: Request):
|
|||||||
tool_name="grok_mode_toggle",
|
tool_name="grok_mode_toggle",
|
||||||
success=True,
|
success=True,
|
||||||
)
|
)
|
||||||
except Exception:
|
except Exception as exc:
|
||||||
logger.debug("Failed to log Grok toggle to Spark", exc_info=True)
|
logger.warning("Failed to log Grok toggle to Spark: %s", exc)
|
||||||
|
|
||||||
return HTMLResponse(
|
return HTMLResponse(
|
||||||
_render_toggle_card(_grok_mode_active),
|
_render_toggle_card(_grok_mode_active),
|
||||||
@@ -128,13 +128,14 @@ def _run_grok_query(message: str) -> dict:
|
|||||||
sats = min(settings.grok_max_sats_per_query, 100)
|
sats = min(settings.grok_max_sats_per_query, 100)
|
||||||
ln.create_invoice(sats, f"Grok: {message[:50]}")
|
ln.create_invoice(sats, f"Grok: {message[:50]}")
|
||||||
invoice_note = f" | {sats} sats"
|
invoice_note = f" | {sats} sats"
|
||||||
except Exception:
|
except Exception as exc:
|
||||||
logger.debug("Lightning invoice creation failed", exc_info=True)
|
logger.warning("Lightning invoice creation failed: %s", exc)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
result = backend.run(message)
|
result = backend.run(message)
|
||||||
return {"response": f"**[Grok]{invoice_note}:** {result.content}", "error": None}
|
return {"response": f"**[Grok]{invoice_note}:** {result.content}", "error": None}
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
|
logger.exception("Grok query failed")
|
||||||
return {"response": None, "error": f"Grok error: {exc}"}
|
return {"response": None, "error": f"Grok error: {exc}"}
|
||||||
|
|
||||||
|
|
||||||
@@ -193,6 +194,7 @@ async def grok_stats():
|
|||||||
"model": settings.grok_default_model,
|
"model": settings.grok_default_model,
|
||||||
}
|
}
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
|
logger.exception("Failed to load Grok stats")
|
||||||
return {"error": str(exc)}
|
return {"error": str(exc)}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -6,14 +6,18 @@ for the Mission Control dashboard.
|
|||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
|
import sqlite3
|
||||||
import time
|
import time
|
||||||
|
from contextlib import closing
|
||||||
from datetime import UTC, datetime
|
from datetime import UTC, datetime
|
||||||
|
from pathlib import Path
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from fastapi import APIRouter, Request
|
from fastapi import APIRouter, Request
|
||||||
from fastapi.responses import HTMLResponse
|
from fastapi.responses import HTMLResponse
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
from config import APP_START_TIME as _START_TIME
|
||||||
from config import settings
|
from config import settings
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -49,7 +53,6 @@ class HealthStatus(BaseModel):
|
|||||||
|
|
||||||
|
|
||||||
# Simple uptime tracking
|
# Simple uptime tracking
|
||||||
_START_TIME = datetime.now(UTC)
|
|
||||||
|
|
||||||
# Ollama health cache (30-second TTL)
|
# Ollama health cache (30-second TTL)
|
||||||
_ollama_cache: DependencyStatus | None = None
|
_ollama_cache: DependencyStatus | None = None
|
||||||
@@ -62,7 +65,7 @@ def _check_ollama_sync() -> DependencyStatus:
|
|||||||
try:
|
try:
|
||||||
import urllib.request
|
import urllib.request
|
||||||
|
|
||||||
url = settings.ollama_url.replace("localhost", "127.0.0.1")
|
url = settings.normalized_ollama_url
|
||||||
req = urllib.request.Request(
|
req = urllib.request.Request(
|
||||||
f"{url}/api/tags",
|
f"{url}/api/tags",
|
||||||
method="GET",
|
method="GET",
|
||||||
@@ -76,8 +79,8 @@ def _check_ollama_sync() -> DependencyStatus:
|
|||||||
sovereignty_score=10,
|
sovereignty_score=10,
|
||||||
details={"url": settings.ollama_url, "model": settings.ollama_model},
|
details={"url": settings.ollama_url, "model": settings.ollama_model},
|
||||||
)
|
)
|
||||||
except Exception:
|
except Exception as exc:
|
||||||
logger.debug("Ollama health check failed", exc_info=True)
|
logger.debug("Ollama health check failed: %s", exc)
|
||||||
|
|
||||||
return DependencyStatus(
|
return DependencyStatus(
|
||||||
name="Ollama AI",
|
name="Ollama AI",
|
||||||
@@ -101,7 +104,8 @@ async def _check_ollama() -> DependencyStatus:
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
result = await asyncio.to_thread(_check_ollama_sync)
|
result = await asyncio.to_thread(_check_ollama_sync)
|
||||||
except Exception:
|
except Exception as exc:
|
||||||
|
logger.debug("Ollama async check failed: %s", exc)
|
||||||
result = DependencyStatus(
|
result = DependencyStatus(
|
||||||
name="Ollama AI",
|
name="Ollama AI",
|
||||||
status="unavailable",
|
status="unavailable",
|
||||||
@@ -133,13 +137,9 @@ def _check_lightning() -> DependencyStatus:
|
|||||||
def _check_sqlite() -> DependencyStatus:
|
def _check_sqlite() -> DependencyStatus:
|
||||||
"""Check SQLite database status."""
|
"""Check SQLite database status."""
|
||||||
try:
|
try:
|
||||||
import sqlite3
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
db_path = Path(settings.repo_root) / "data" / "timmy.db"
|
db_path = Path(settings.repo_root) / "data" / "timmy.db"
|
||||||
conn = sqlite3.connect(str(db_path))
|
with closing(sqlite3.connect(str(db_path))) as conn:
|
||||||
conn.execute("SELECT 1")
|
conn.execute("SELECT 1")
|
||||||
conn.close()
|
|
||||||
|
|
||||||
return DependencyStatus(
|
return DependencyStatus(
|
||||||
name="SQLite Database",
|
name="SQLite Database",
|
||||||
@@ -148,6 +148,7 @@ def _check_sqlite() -> DependencyStatus:
|
|||||||
details={"path": str(db_path)},
|
details={"path": str(db_path)},
|
||||||
)
|
)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
|
logger.exception("SQLite health check failed")
|
||||||
return DependencyStatus(
|
return DependencyStatus(
|
||||||
name="SQLite Database",
|
name="SQLite Database",
|
||||||
status="unavailable",
|
status="unavailable",
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ from fastapi import APIRouter, Form, HTTPException, Request
|
|||||||
from fastapi.responses import HTMLResponse, JSONResponse
|
from fastapi.responses import HTMLResponse, JSONResponse
|
||||||
|
|
||||||
from dashboard.templating import templates
|
from dashboard.templating import templates
|
||||||
from timmy.memory.vector_store import (
|
from timmy.memory_system import (
|
||||||
delete_memory,
|
delete_memory,
|
||||||
get_memory_stats,
|
get_memory_stats,
|
||||||
recall_personal_facts_with_ids,
|
recall_personal_facts_with_ids,
|
||||||
|
|||||||
@@ -1,10 +1,12 @@
|
|||||||
"""System-level dashboard routes (ledger, upgrades, etc.)."""
|
"""System-level dashboard routes (ledger, upgrades, etc.)."""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
from fastapi import APIRouter, Request
|
from fastapi import APIRouter, Request
|
||||||
from fastapi.responses import HTMLResponse, JSONResponse
|
from fastapi.responses import HTMLResponse, JSONResponse
|
||||||
|
|
||||||
|
from config import settings
|
||||||
from dashboard.templating import templates
|
from dashboard.templating import templates
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -14,52 +16,11 @@ router = APIRouter(tags=["system"])
|
|||||||
|
|
||||||
@router.get("/lightning/ledger", response_class=HTMLResponse)
|
@router.get("/lightning/ledger", response_class=HTMLResponse)
|
||||||
async def lightning_ledger(request: Request):
|
async def lightning_ledger(request: Request):
|
||||||
"""Ledger and balance page."""
|
"""Ledger and balance page backed by the in-memory Lightning ledger."""
|
||||||
# Mock data for now, as this seems to be a UI-first feature
|
from lightning.ledger import get_balance, get_transactions
|
||||||
balance = {
|
|
||||||
"available_sats": 1337,
|
|
||||||
"incoming_total_sats": 2000,
|
|
||||||
"outgoing_total_sats": 663,
|
|
||||||
"fees_paid_sats": 5,
|
|
||||||
"net_sats": 1337,
|
|
||||||
"pending_incoming_sats": 0,
|
|
||||||
"pending_outgoing_sats": 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
# Mock transactions
|
balance = get_balance()
|
||||||
from collections import namedtuple
|
transactions = get_transactions()
|
||||||
from enum import Enum
|
|
||||||
|
|
||||||
class TxType(Enum):
|
|
||||||
incoming = "incoming"
|
|
||||||
outgoing = "outgoing"
|
|
||||||
|
|
||||||
class TxStatus(Enum):
|
|
||||||
completed = "completed"
|
|
||||||
pending = "pending"
|
|
||||||
|
|
||||||
Tx = namedtuple(
|
|
||||||
"Tx", ["tx_type", "status", "amount_sats", "payment_hash", "memo", "created_at"]
|
|
||||||
)
|
|
||||||
|
|
||||||
transactions = [
|
|
||||||
Tx(
|
|
||||||
TxType.outgoing,
|
|
||||||
TxStatus.completed,
|
|
||||||
50,
|
|
||||||
"hash1",
|
|
||||||
"Model inference",
|
|
||||||
"2026-03-04 10:00:00",
|
|
||||||
),
|
|
||||||
Tx(
|
|
||||||
TxType.incoming,
|
|
||||||
TxStatus.completed,
|
|
||||||
1000,
|
|
||||||
"hash2",
|
|
||||||
"Manual deposit",
|
|
||||||
"2026-03-03 15:00:00",
|
|
||||||
),
|
|
||||||
]
|
|
||||||
|
|
||||||
return templates.TemplateResponse(
|
return templates.TemplateResponse(
|
||||||
request,
|
request,
|
||||||
@@ -68,7 +29,7 @@ async def lightning_ledger(request: Request):
|
|||||||
"balance": balance,
|
"balance": balance,
|
||||||
"transactions": transactions,
|
"transactions": transactions,
|
||||||
"tx_types": ["incoming", "outgoing"],
|
"tx_types": ["incoming", "outgoing"],
|
||||||
"tx_statuses": ["completed", "pending"],
|
"tx_statuses": ["pending", "settled", "failed", "expired"],
|
||||||
"filter_type": None,
|
"filter_type": None,
|
||||||
"filter_status": None,
|
"filter_status": None,
|
||||||
"stats": {},
|
"stats": {},
|
||||||
@@ -144,5 +105,83 @@ async def api_notifications():
|
|||||||
for e in events
|
for e in events
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
except Exception:
|
except Exception as exc:
|
||||||
|
logger.debug("System events fetch error: %s", exc)
|
||||||
return JSONResponse([])
|
return JSONResponse([])
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/api/briefing/status", response_class=JSONResponse)
|
||||||
|
async def api_briefing_status():
|
||||||
|
"""Return briefing status including pending approvals and last generated time."""
|
||||||
|
from timmy import approvals
|
||||||
|
from timmy.briefing import engine as briefing_engine
|
||||||
|
|
||||||
|
pending = approvals.list_pending()
|
||||||
|
pending_count = len(pending)
|
||||||
|
|
||||||
|
last_generated = None
|
||||||
|
try:
|
||||||
|
cached = briefing_engine.get_cached()
|
||||||
|
if cached:
|
||||||
|
last_generated = cached.generated_at.isoformat()
|
||||||
|
except Exception:
|
||||||
|
logger.debug("Failed to read briefing cache")
|
||||||
|
|
||||||
|
return JSONResponse(
|
||||||
|
{
|
||||||
|
"status": "ok",
|
||||||
|
"pending_approvals": pending_count,
|
||||||
|
"last_generated": last_generated,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/api/memory/status", response_class=JSONResponse)
|
||||||
|
async def api_memory_status():
|
||||||
|
"""Return memory database status including file info and indexed files count."""
|
||||||
|
from timmy.memory_system import get_memory_stats
|
||||||
|
|
||||||
|
db_path = Path(settings.repo_root) / "data" / "memory.db"
|
||||||
|
db_exists = db_path.exists()
|
||||||
|
db_size = db_path.stat().st_size if db_exists else 0
|
||||||
|
|
||||||
|
try:
|
||||||
|
stats = get_memory_stats()
|
||||||
|
indexed_files = stats.get("total_entries", 0)
|
||||||
|
except Exception:
|
||||||
|
logger.debug("Failed to get memory stats")
|
||||||
|
indexed_files = 0
|
||||||
|
|
||||||
|
return JSONResponse(
|
||||||
|
{
|
||||||
|
"status": "ok",
|
||||||
|
"db_exists": db_exists,
|
||||||
|
"db_size_bytes": db_size,
|
||||||
|
"indexed_files": indexed_files,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/api/swarm/status", response_class=JSONResponse)
|
||||||
|
async def api_swarm_status():
|
||||||
|
"""Return swarm worker status and pending tasks count."""
|
||||||
|
from dashboard.routes.tasks import _get_db
|
||||||
|
|
||||||
|
pending_tasks = 0
|
||||||
|
try:
|
||||||
|
with _get_db() as db:
|
||||||
|
row = db.execute(
|
||||||
|
"SELECT COUNT(*) as cnt FROM tasks WHERE status IN ('pending_approval','approved')"
|
||||||
|
).fetchone()
|
||||||
|
pending_tasks = row["cnt"] if row else 0
|
||||||
|
except Exception:
|
||||||
|
logger.debug("Failed to count pending tasks")
|
||||||
|
|
||||||
|
return JSONResponse(
|
||||||
|
{
|
||||||
|
"status": "ok",
|
||||||
|
"active_workers": 0,
|
||||||
|
"pending_tasks": pending_tasks,
|
||||||
|
"message": "Swarm monitoring endpoint",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|||||||
@@ -3,7 +3,9 @@
|
|||||||
import logging
|
import logging
|
||||||
import sqlite3
|
import sqlite3
|
||||||
import uuid
|
import uuid
|
||||||
from datetime import datetime
|
from collections.abc import Generator
|
||||||
|
from contextlib import closing, contextmanager
|
||||||
|
from datetime import UTC, datetime
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
from fastapi import APIRouter, Form, HTTPException, Request
|
from fastapi import APIRouter, Form, HTTPException, Request
|
||||||
@@ -35,26 +37,27 @@ VALID_STATUSES = {
|
|||||||
VALID_PRIORITIES = {"low", "normal", "high", "urgent"}
|
VALID_PRIORITIES = {"low", "normal", "high", "urgent"}
|
||||||
|
|
||||||
|
|
||||||
def _get_db() -> sqlite3.Connection:
|
@contextmanager
|
||||||
|
def _get_db() -> Generator[sqlite3.Connection, None, None]:
|
||||||
DB_PATH.parent.mkdir(parents=True, exist_ok=True)
|
DB_PATH.parent.mkdir(parents=True, exist_ok=True)
|
||||||
conn = sqlite3.connect(str(DB_PATH))
|
with closing(sqlite3.connect(str(DB_PATH))) as conn:
|
||||||
conn.row_factory = sqlite3.Row
|
conn.row_factory = sqlite3.Row
|
||||||
conn.execute("""
|
conn.execute("""
|
||||||
CREATE TABLE IF NOT EXISTS tasks (
|
CREATE TABLE IF NOT EXISTS tasks (
|
||||||
id TEXT PRIMARY KEY,
|
id TEXT PRIMARY KEY,
|
||||||
title TEXT NOT NULL,
|
title TEXT NOT NULL,
|
||||||
description TEXT DEFAULT '',
|
description TEXT DEFAULT '',
|
||||||
status TEXT DEFAULT 'pending_approval',
|
status TEXT DEFAULT 'pending_approval',
|
||||||
priority TEXT DEFAULT 'normal',
|
priority TEXT DEFAULT 'normal',
|
||||||
assigned_to TEXT DEFAULT '',
|
assigned_to TEXT DEFAULT '',
|
||||||
created_by TEXT DEFAULT 'operator',
|
created_by TEXT DEFAULT 'operator',
|
||||||
result TEXT DEFAULT '',
|
result TEXT DEFAULT '',
|
||||||
created_at TEXT DEFAULT (datetime('now')),
|
created_at TEXT DEFAULT (datetime('now')),
|
||||||
completed_at TEXT
|
completed_at TEXT
|
||||||
)
|
)
|
||||||
""")
|
""")
|
||||||
conn.commit()
|
conn.commit()
|
||||||
return conn
|
yield conn
|
||||||
|
|
||||||
|
|
||||||
def _row_to_dict(row: sqlite3.Row) -> dict:
|
def _row_to_dict(row: sqlite3.Row) -> dict:
|
||||||
@@ -101,8 +104,7 @@ class _TaskView:
|
|||||||
@router.get("/tasks", response_class=HTMLResponse)
|
@router.get("/tasks", response_class=HTMLResponse)
|
||||||
async def tasks_page(request: Request):
|
async def tasks_page(request: Request):
|
||||||
"""Render the main task queue page with 3-column layout."""
|
"""Render the main task queue page with 3-column layout."""
|
||||||
db = _get_db()
|
with _get_db() as db:
|
||||||
try:
|
|
||||||
pending = [
|
pending = [
|
||||||
_TaskView(_row_to_dict(r))
|
_TaskView(_row_to_dict(r))
|
||||||
for r in db.execute(
|
for r in db.execute(
|
||||||
@@ -121,8 +123,6 @@ async def tasks_page(request: Request):
|
|||||||
"SELECT * FROM tasks WHERE status IN ('completed','vetoed','failed') ORDER BY completed_at DESC LIMIT 50"
|
"SELECT * FROM tasks WHERE status IN ('completed','vetoed','failed') ORDER BY completed_at DESC LIMIT 50"
|
||||||
).fetchall()
|
).fetchall()
|
||||||
]
|
]
|
||||||
finally:
|
|
||||||
db.close()
|
|
||||||
|
|
||||||
return templates.TemplateResponse(
|
return templates.TemplateResponse(
|
||||||
request,
|
request,
|
||||||
@@ -145,13 +145,10 @@ async def tasks_page(request: Request):
|
|||||||
|
|
||||||
@router.get("/tasks/pending", response_class=HTMLResponse)
|
@router.get("/tasks/pending", response_class=HTMLResponse)
|
||||||
async def tasks_pending(request: Request):
|
async def tasks_pending(request: Request):
|
||||||
db = _get_db()
|
with _get_db() as db:
|
||||||
try:
|
|
||||||
rows = db.execute(
|
rows = db.execute(
|
||||||
"SELECT * FROM tasks WHERE status='pending_approval' ORDER BY created_at DESC"
|
"SELECT * FROM tasks WHERE status='pending_approval' ORDER BY created_at DESC"
|
||||||
).fetchall()
|
).fetchall()
|
||||||
finally:
|
|
||||||
db.close()
|
|
||||||
tasks = [_TaskView(_row_to_dict(r)) for r in rows]
|
tasks = [_TaskView(_row_to_dict(r)) for r in rows]
|
||||||
parts = []
|
parts = []
|
||||||
for task in tasks:
|
for task in tasks:
|
||||||
@@ -167,13 +164,10 @@ async def tasks_pending(request: Request):
|
|||||||
|
|
||||||
@router.get("/tasks/active", response_class=HTMLResponse)
|
@router.get("/tasks/active", response_class=HTMLResponse)
|
||||||
async def tasks_active(request: Request):
|
async def tasks_active(request: Request):
|
||||||
db = _get_db()
|
with _get_db() as db:
|
||||||
try:
|
|
||||||
rows = db.execute(
|
rows = db.execute(
|
||||||
"SELECT * FROM tasks WHERE status IN ('approved','running','paused') ORDER BY created_at DESC"
|
"SELECT * FROM tasks WHERE status IN ('approved','running','paused') ORDER BY created_at DESC"
|
||||||
).fetchall()
|
).fetchall()
|
||||||
finally:
|
|
||||||
db.close()
|
|
||||||
tasks = [_TaskView(_row_to_dict(r)) for r in rows]
|
tasks = [_TaskView(_row_to_dict(r)) for r in rows]
|
||||||
parts = []
|
parts = []
|
||||||
for task in tasks:
|
for task in tasks:
|
||||||
@@ -189,13 +183,10 @@ async def tasks_active(request: Request):
|
|||||||
|
|
||||||
@router.get("/tasks/completed", response_class=HTMLResponse)
|
@router.get("/tasks/completed", response_class=HTMLResponse)
|
||||||
async def tasks_completed(request: Request):
|
async def tasks_completed(request: Request):
|
||||||
db = _get_db()
|
with _get_db() as db:
|
||||||
try:
|
|
||||||
rows = db.execute(
|
rows = db.execute(
|
||||||
"SELECT * FROM tasks WHERE status IN ('completed','vetoed','failed') ORDER BY completed_at DESC LIMIT 50"
|
"SELECT * FROM tasks WHERE status IN ('completed','vetoed','failed') ORDER BY completed_at DESC LIMIT 50"
|
||||||
).fetchall()
|
).fetchall()
|
||||||
finally:
|
|
||||||
db.close()
|
|
||||||
tasks = [_TaskView(_row_to_dict(r)) for r in rows]
|
tasks = [_TaskView(_row_to_dict(r)) for r in rows]
|
||||||
parts = []
|
parts = []
|
||||||
for task in tasks:
|
for task in tasks:
|
||||||
@@ -228,19 +219,16 @@ async def create_task_form(
|
|||||||
raise HTTPException(status_code=400, detail="Task title cannot be empty")
|
raise HTTPException(status_code=400, detail="Task title cannot be empty")
|
||||||
|
|
||||||
task_id = str(uuid.uuid4())
|
task_id = str(uuid.uuid4())
|
||||||
now = datetime.utcnow().isoformat()
|
now = datetime.now(UTC).isoformat()
|
||||||
priority = priority if priority in VALID_PRIORITIES else "normal"
|
priority = priority if priority in VALID_PRIORITIES else "normal"
|
||||||
|
|
||||||
db = _get_db()
|
with _get_db() as db:
|
||||||
try:
|
|
||||||
db.execute(
|
db.execute(
|
||||||
"INSERT INTO tasks (id, title, description, priority, assigned_to, created_at) VALUES (?, ?, ?, ?, ?, ?)",
|
"INSERT INTO tasks (id, title, description, priority, assigned_to, created_at) VALUES (?, ?, ?, ?, ?, ?)",
|
||||||
(task_id, title, description, priority, assigned_to, now),
|
(task_id, title, description, priority, assigned_to, now),
|
||||||
)
|
)
|
||||||
db.commit()
|
db.commit()
|
||||||
row = db.execute("SELECT * FROM tasks WHERE id=?", (task_id,)).fetchone()
|
row = db.execute("SELECT * FROM tasks WHERE id=?", (task_id,)).fetchone()
|
||||||
finally:
|
|
||||||
db.close()
|
|
||||||
|
|
||||||
task = _TaskView(_row_to_dict(row))
|
task = _TaskView(_row_to_dict(row))
|
||||||
return templates.TemplateResponse(request, "partials/task_card.html", {"task": task})
|
return templates.TemplateResponse(request, "partials/task_card.html", {"task": task})
|
||||||
@@ -283,16 +271,13 @@ async def modify_task(
|
|||||||
title: str = Form(...),
|
title: str = Form(...),
|
||||||
description: str = Form(""),
|
description: str = Form(""),
|
||||||
):
|
):
|
||||||
db = _get_db()
|
with _get_db() as db:
|
||||||
try:
|
|
||||||
db.execute(
|
db.execute(
|
||||||
"UPDATE tasks SET title=?, description=? WHERE id=?",
|
"UPDATE tasks SET title=?, description=? WHERE id=?",
|
||||||
(title, description, task_id),
|
(title, description, task_id),
|
||||||
)
|
)
|
||||||
db.commit()
|
db.commit()
|
||||||
row = db.execute("SELECT * FROM tasks WHERE id=?", (task_id,)).fetchone()
|
row = db.execute("SELECT * FROM tasks WHERE id=?", (task_id,)).fetchone()
|
||||||
finally:
|
|
||||||
db.close()
|
|
||||||
if not row:
|
if not row:
|
||||||
raise HTTPException(404, "Task not found")
|
raise HTTPException(404, "Task not found")
|
||||||
task = _TaskView(_row_to_dict(row))
|
task = _TaskView(_row_to_dict(row))
|
||||||
@@ -302,18 +287,15 @@ async def modify_task(
|
|||||||
async def _set_status(request: Request, task_id: str, new_status: str):
|
async def _set_status(request: Request, task_id: str, new_status: str):
|
||||||
"""Helper to update status and return refreshed task card."""
|
"""Helper to update status and return refreshed task card."""
|
||||||
completed_at = (
|
completed_at = (
|
||||||
datetime.utcnow().isoformat() if new_status in ("completed", "vetoed", "failed") else None
|
datetime.now(UTC).isoformat() if new_status in ("completed", "vetoed", "failed") else None
|
||||||
)
|
)
|
||||||
db = _get_db()
|
with _get_db() as db:
|
||||||
try:
|
|
||||||
db.execute(
|
db.execute(
|
||||||
"UPDATE tasks SET status=?, completed_at=COALESCE(?, completed_at) WHERE id=?",
|
"UPDATE tasks SET status=?, completed_at=COALESCE(?, completed_at) WHERE id=?",
|
||||||
(new_status, completed_at, task_id),
|
(new_status, completed_at, task_id),
|
||||||
)
|
)
|
||||||
db.commit()
|
db.commit()
|
||||||
row = db.execute("SELECT * FROM tasks WHERE id=?", (task_id,)).fetchone()
|
row = db.execute("SELECT * FROM tasks WHERE id=?", (task_id,)).fetchone()
|
||||||
finally:
|
|
||||||
db.close()
|
|
||||||
if not row:
|
if not row:
|
||||||
raise HTTPException(404, "Task not found")
|
raise HTTPException(404, "Task not found")
|
||||||
task = _TaskView(_row_to_dict(row))
|
task = _TaskView(_row_to_dict(row))
|
||||||
@@ -334,13 +316,12 @@ async def api_create_task(request: Request):
|
|||||||
raise HTTPException(422, "title is required")
|
raise HTTPException(422, "title is required")
|
||||||
|
|
||||||
task_id = str(uuid.uuid4())
|
task_id = str(uuid.uuid4())
|
||||||
now = datetime.utcnow().isoformat()
|
now = datetime.now(UTC).isoformat()
|
||||||
priority = body.get("priority", "normal")
|
priority = body.get("priority", "normal")
|
||||||
if priority not in VALID_PRIORITIES:
|
if priority not in VALID_PRIORITIES:
|
||||||
priority = "normal"
|
priority = "normal"
|
||||||
|
|
||||||
db = _get_db()
|
with _get_db() as db:
|
||||||
try:
|
|
||||||
db.execute(
|
db.execute(
|
||||||
"INSERT INTO tasks (id, title, description, priority, assigned_to, created_by, created_at) "
|
"INSERT INTO tasks (id, title, description, priority, assigned_to, created_by, created_at) "
|
||||||
"VALUES (?, ?, ?, ?, ?, ?, ?)",
|
"VALUES (?, ?, ?, ?, ?, ?, ?)",
|
||||||
@@ -356,8 +337,6 @@ async def api_create_task(request: Request):
|
|||||||
)
|
)
|
||||||
db.commit()
|
db.commit()
|
||||||
row = db.execute("SELECT * FROM tasks WHERE id=?", (task_id,)).fetchone()
|
row = db.execute("SELECT * FROM tasks WHERE id=?", (task_id,)).fetchone()
|
||||||
finally:
|
|
||||||
db.close()
|
|
||||||
|
|
||||||
return JSONResponse(_row_to_dict(row), status_code=201)
|
return JSONResponse(_row_to_dict(row), status_code=201)
|
||||||
|
|
||||||
@@ -365,11 +344,8 @@ async def api_create_task(request: Request):
|
|||||||
@router.get("/api/tasks", response_class=JSONResponse)
|
@router.get("/api/tasks", response_class=JSONResponse)
|
||||||
async def api_list_tasks():
|
async def api_list_tasks():
|
||||||
"""List all tasks as JSON."""
|
"""List all tasks as JSON."""
|
||||||
db = _get_db()
|
with _get_db() as db:
|
||||||
try:
|
|
||||||
rows = db.execute("SELECT * FROM tasks ORDER BY created_at DESC").fetchall()
|
rows = db.execute("SELECT * FROM tasks ORDER BY created_at DESC").fetchall()
|
||||||
finally:
|
|
||||||
db.close()
|
|
||||||
return JSONResponse([_row_to_dict(r) for r in rows])
|
return JSONResponse([_row_to_dict(r) for r in rows])
|
||||||
|
|
||||||
|
|
||||||
@@ -382,18 +358,15 @@ async def api_update_status(task_id: str, request: Request):
|
|||||||
raise HTTPException(422, f"Invalid status. Must be one of: {VALID_STATUSES}")
|
raise HTTPException(422, f"Invalid status. Must be one of: {VALID_STATUSES}")
|
||||||
|
|
||||||
completed_at = (
|
completed_at = (
|
||||||
datetime.utcnow().isoformat() if new_status in ("completed", "vetoed", "failed") else None
|
datetime.now(UTC).isoformat() if new_status in ("completed", "vetoed", "failed") else None
|
||||||
)
|
)
|
||||||
db = _get_db()
|
with _get_db() as db:
|
||||||
try:
|
|
||||||
db.execute(
|
db.execute(
|
||||||
"UPDATE tasks SET status=?, completed_at=COALESCE(?, completed_at) WHERE id=?",
|
"UPDATE tasks SET status=?, completed_at=COALESCE(?, completed_at) WHERE id=?",
|
||||||
(new_status, completed_at, task_id),
|
(new_status, completed_at, task_id),
|
||||||
)
|
)
|
||||||
db.commit()
|
db.commit()
|
||||||
row = db.execute("SELECT * FROM tasks WHERE id=?", (task_id,)).fetchone()
|
row = db.execute("SELECT * FROM tasks WHERE id=?", (task_id,)).fetchone()
|
||||||
finally:
|
|
||||||
db.close()
|
|
||||||
if not row:
|
if not row:
|
||||||
raise HTTPException(404, "Task not found")
|
raise HTTPException(404, "Task not found")
|
||||||
return JSONResponse(_row_to_dict(row))
|
return JSONResponse(_row_to_dict(row))
|
||||||
@@ -402,12 +375,9 @@ async def api_update_status(task_id: str, request: Request):
|
|||||||
@router.delete("/api/tasks/{task_id}", response_class=JSONResponse)
|
@router.delete("/api/tasks/{task_id}", response_class=JSONResponse)
|
||||||
async def api_delete_task(task_id: str):
|
async def api_delete_task(task_id: str):
|
||||||
"""Delete a task."""
|
"""Delete a task."""
|
||||||
db = _get_db()
|
with _get_db() as db:
|
||||||
try:
|
|
||||||
cursor = db.execute("DELETE FROM tasks WHERE id=?", (task_id,))
|
cursor = db.execute("DELETE FROM tasks WHERE id=?", (task_id,))
|
||||||
db.commit()
|
db.commit()
|
||||||
finally:
|
|
||||||
db.close()
|
|
||||||
if cursor.rowcount == 0:
|
if cursor.rowcount == 0:
|
||||||
raise HTTPException(404, "Task not found")
|
raise HTTPException(404, "Task not found")
|
||||||
return JSONResponse({"success": True, "id": task_id})
|
return JSONResponse({"success": True, "id": task_id})
|
||||||
@@ -421,8 +391,7 @@ async def api_delete_task(task_id: str):
|
|||||||
@router.get("/api/queue/status", response_class=JSONResponse)
|
@router.get("/api/queue/status", response_class=JSONResponse)
|
||||||
async def queue_status(assigned_to: str = "default"):
|
async def queue_status(assigned_to: str = "default"):
|
||||||
"""Return queue status for the chat panel's agent status indicator."""
|
"""Return queue status for the chat panel's agent status indicator."""
|
||||||
db = _get_db()
|
with _get_db() as db:
|
||||||
try:
|
|
||||||
running = db.execute(
|
running = db.execute(
|
||||||
"SELECT * FROM tasks WHERE status='running' AND assigned_to=? LIMIT 1",
|
"SELECT * FROM tasks WHERE status='running' AND assigned_to=? LIMIT 1",
|
||||||
(assigned_to,),
|
(assigned_to,),
|
||||||
@@ -431,8 +400,6 @@ async def queue_status(assigned_to: str = "default"):
|
|||||||
"SELECT COUNT(*) as cnt FROM tasks WHERE status IN ('pending_approval','approved') AND assigned_to=?",
|
"SELECT COUNT(*) as cnt FROM tasks WHERE status IN ('pending_approval','approved') AND assigned_to=?",
|
||||||
(assigned_to,),
|
(assigned_to,),
|
||||||
).fetchone()
|
).fetchone()
|
||||||
finally:
|
|
||||||
db.close()
|
|
||||||
|
|
||||||
if running:
|
if running:
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
|
|||||||
108
src/dashboard/routes/tower.py
Normal file
108
src/dashboard/routes/tower.py
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
"""Tower dashboard — real-time Spark visualization via WebSocket.
|
||||||
|
|
||||||
|
GET /tower — HTML Tower dashboard (Thinking / Predicting / Advising)
|
||||||
|
WS /tower/ws — WebSocket stream of Spark engine state updates
|
||||||
|
"""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from fastapi import APIRouter, Request, WebSocket
|
||||||
|
from fastapi.responses import HTMLResponse
|
||||||
|
|
||||||
|
from dashboard.templating import templates
|
||||||
|
from spark.engine import spark_engine
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
router = APIRouter(prefix="/tower", tags=["tower"])
|
||||||
|
|
||||||
|
_PUSH_INTERVAL = 5 # seconds between state broadcasts
|
||||||
|
|
||||||
|
|
||||||
|
def _spark_snapshot() -> dict:
|
||||||
|
"""Build a JSON-serialisable snapshot of Spark state."""
|
||||||
|
status = spark_engine.status()
|
||||||
|
|
||||||
|
timeline = spark_engine.get_timeline(limit=10)
|
||||||
|
events = []
|
||||||
|
for ev in timeline:
|
||||||
|
entry = {
|
||||||
|
"event_type": ev.event_type,
|
||||||
|
"description": ev.description,
|
||||||
|
"importance": ev.importance,
|
||||||
|
"created_at": ev.created_at,
|
||||||
|
}
|
||||||
|
if ev.agent_id:
|
||||||
|
entry["agent_id"] = ev.agent_id[:8]
|
||||||
|
if ev.task_id:
|
||||||
|
entry["task_id"] = ev.task_id[:8]
|
||||||
|
try:
|
||||||
|
entry["data"] = json.loads(ev.data)
|
||||||
|
except (json.JSONDecodeError, TypeError):
|
||||||
|
entry["data"] = {}
|
||||||
|
events.append(entry)
|
||||||
|
|
||||||
|
predictions = spark_engine.get_predictions(limit=5)
|
||||||
|
preds = []
|
||||||
|
for p in predictions:
|
||||||
|
pred = {
|
||||||
|
"task_id": p.task_id[:8] if p.task_id else "?",
|
||||||
|
"accuracy": p.accuracy,
|
||||||
|
"evaluated": p.evaluated_at is not None,
|
||||||
|
"created_at": p.created_at,
|
||||||
|
}
|
||||||
|
try:
|
||||||
|
pred["predicted"] = json.loads(p.predicted_value)
|
||||||
|
except (json.JSONDecodeError, TypeError):
|
||||||
|
pred["predicted"] = {}
|
||||||
|
preds.append(pred)
|
||||||
|
|
||||||
|
advisories = spark_engine.get_advisories()
|
||||||
|
advs = [
|
||||||
|
{
|
||||||
|
"category": a.category,
|
||||||
|
"priority": a.priority,
|
||||||
|
"title": a.title,
|
||||||
|
"detail": a.detail,
|
||||||
|
"suggested_action": a.suggested_action,
|
||||||
|
}
|
||||||
|
for a in advisories
|
||||||
|
]
|
||||||
|
|
||||||
|
return {
|
||||||
|
"type": "spark_state",
|
||||||
|
"status": status,
|
||||||
|
"events": events,
|
||||||
|
"predictions": preds,
|
||||||
|
"advisories": advs,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("", response_class=HTMLResponse)
|
||||||
|
async def tower_ui(request: Request):
|
||||||
|
"""Render the Tower dashboard page."""
|
||||||
|
snapshot = _spark_snapshot()
|
||||||
|
return templates.TemplateResponse(
|
||||||
|
request,
|
||||||
|
"tower.html",
|
||||||
|
{"snapshot": snapshot},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@router.websocket("/ws")
|
||||||
|
async def tower_ws(websocket: WebSocket) -> None:
|
||||||
|
"""Stream Spark state snapshots to the Tower dashboard."""
|
||||||
|
await websocket.accept()
|
||||||
|
logger.info("Tower WS connected")
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Send initial snapshot
|
||||||
|
await websocket.send_text(json.dumps(_spark_snapshot()))
|
||||||
|
|
||||||
|
while True:
|
||||||
|
await asyncio.sleep(_PUSH_INTERVAL)
|
||||||
|
await websocket.send_text(json.dumps(_spark_snapshot()))
|
||||||
|
except Exception:
|
||||||
|
logger.debug("Tower WS disconnected")
|
||||||
@@ -43,7 +43,8 @@ async def tts_status():
|
|||||||
"available": voice_tts.available,
|
"available": voice_tts.available,
|
||||||
"voices": voice_tts.get_voices() if voice_tts.available else [],
|
"voices": voice_tts.get_voices() if voice_tts.available else [],
|
||||||
}
|
}
|
||||||
except Exception:
|
except Exception as exc:
|
||||||
|
logger.debug("Voice config error: %s", exc)
|
||||||
return {"available": False, "voices": []}
|
return {"available": False, "voices": []}
|
||||||
|
|
||||||
|
|
||||||
@@ -58,6 +59,7 @@ async def tts_speak(text: str = Form(...)):
|
|||||||
voice_tts.speak(text)
|
voice_tts.speak(text)
|
||||||
return {"spoken": True, "text": text}
|
return {"spoken": True, "text": text}
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
|
logger.exception("TTS speak failed")
|
||||||
return {"spoken": False, "reason": str(exc)}
|
return {"spoken": False, "reason": str(exc)}
|
||||||
|
|
||||||
|
|
||||||
@@ -139,7 +141,8 @@ async def process_voice_input(
|
|||||||
|
|
||||||
if voice_tts.available:
|
if voice_tts.available:
|
||||||
voice_tts.speak(response_text)
|
voice_tts.speak(response_text)
|
||||||
except Exception:
|
except Exception as exc:
|
||||||
|
logger.debug("Voice TTS error: %s", exc)
|
||||||
pass
|
pass
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
|||||||
@@ -3,7 +3,9 @@
|
|||||||
import logging
|
import logging
|
||||||
import sqlite3
|
import sqlite3
|
||||||
import uuid
|
import uuid
|
||||||
from datetime import datetime
|
from collections.abc import Generator
|
||||||
|
from contextlib import closing, contextmanager
|
||||||
|
from datetime import UTC, datetime
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
from fastapi import APIRouter, Form, HTTPException, Request
|
from fastapi import APIRouter, Form, HTTPException, Request
|
||||||
@@ -23,28 +25,29 @@ CATEGORIES = ["bug", "feature", "suggestion", "maintenance", "security"]
|
|||||||
VALID_STATUSES = {"submitted", "triaged", "approved", "in_progress", "completed", "rejected"}
|
VALID_STATUSES = {"submitted", "triaged", "approved", "in_progress", "completed", "rejected"}
|
||||||
|
|
||||||
|
|
||||||
def _get_db() -> sqlite3.Connection:
|
@contextmanager
|
||||||
|
def _get_db() -> Generator[sqlite3.Connection, None, None]:
|
||||||
DB_PATH.parent.mkdir(parents=True, exist_ok=True)
|
DB_PATH.parent.mkdir(parents=True, exist_ok=True)
|
||||||
conn = sqlite3.connect(str(DB_PATH))
|
with closing(sqlite3.connect(str(DB_PATH))) as conn:
|
||||||
conn.row_factory = sqlite3.Row
|
conn.row_factory = sqlite3.Row
|
||||||
conn.execute("""
|
conn.execute("""
|
||||||
CREATE TABLE IF NOT EXISTS work_orders (
|
CREATE TABLE IF NOT EXISTS work_orders (
|
||||||
id TEXT PRIMARY KEY,
|
id TEXT PRIMARY KEY,
|
||||||
title TEXT NOT NULL,
|
title TEXT NOT NULL,
|
||||||
description TEXT DEFAULT '',
|
description TEXT DEFAULT '',
|
||||||
priority TEXT DEFAULT 'medium',
|
priority TEXT DEFAULT 'medium',
|
||||||
category TEXT DEFAULT 'suggestion',
|
category TEXT DEFAULT 'suggestion',
|
||||||
submitter TEXT DEFAULT 'dashboard',
|
submitter TEXT DEFAULT 'dashboard',
|
||||||
related_files TEXT DEFAULT '',
|
related_files TEXT DEFAULT '',
|
||||||
status TEXT DEFAULT 'submitted',
|
status TEXT DEFAULT 'submitted',
|
||||||
result TEXT DEFAULT '',
|
result TEXT DEFAULT '',
|
||||||
rejection_reason TEXT DEFAULT '',
|
rejection_reason TEXT DEFAULT '',
|
||||||
created_at TEXT DEFAULT (datetime('now')),
|
created_at TEXT DEFAULT (datetime('now')),
|
||||||
completed_at TEXT
|
completed_at TEXT
|
||||||
)
|
)
|
||||||
""")
|
""")
|
||||||
conn.commit()
|
conn.commit()
|
||||||
return conn
|
yield conn
|
||||||
|
|
||||||
|
|
||||||
class _EnumLike:
|
class _EnumLike:
|
||||||
@@ -104,14 +107,11 @@ def _query_wos(db, statuses):
|
|||||||
|
|
||||||
@router.get("/work-orders/queue", response_class=HTMLResponse)
|
@router.get("/work-orders/queue", response_class=HTMLResponse)
|
||||||
async def work_orders_page(request: Request):
|
async def work_orders_page(request: Request):
|
||||||
db = _get_db()
|
with _get_db() as db:
|
||||||
try:
|
|
||||||
pending = _query_wos(db, ["submitted", "triaged"])
|
pending = _query_wos(db, ["submitted", "triaged"])
|
||||||
active = _query_wos(db, ["approved", "in_progress"])
|
active = _query_wos(db, ["approved", "in_progress"])
|
||||||
completed = _query_wos(db, ["completed"])
|
completed = _query_wos(db, ["completed"])
|
||||||
rejected = _query_wos(db, ["rejected"])
|
rejected = _query_wos(db, ["rejected"])
|
||||||
finally:
|
|
||||||
db.close()
|
|
||||||
|
|
||||||
return templates.TemplateResponse(
|
return templates.TemplateResponse(
|
||||||
request,
|
request,
|
||||||
@@ -144,12 +144,11 @@ async def submit_work_order(
|
|||||||
related_files: str = Form(""),
|
related_files: str = Form(""),
|
||||||
):
|
):
|
||||||
wo_id = str(uuid.uuid4())
|
wo_id = str(uuid.uuid4())
|
||||||
now = datetime.utcnow().isoformat()
|
now = datetime.now(UTC).isoformat()
|
||||||
priority = priority if priority in PRIORITIES else "medium"
|
priority = priority if priority in PRIORITIES else "medium"
|
||||||
category = category if category in CATEGORIES else "suggestion"
|
category = category if category in CATEGORIES else "suggestion"
|
||||||
|
|
||||||
db = _get_db()
|
with _get_db() as db:
|
||||||
try:
|
|
||||||
db.execute(
|
db.execute(
|
||||||
"INSERT INTO work_orders (id, title, description, priority, category, submitter, related_files, created_at) "
|
"INSERT INTO work_orders (id, title, description, priority, category, submitter, related_files, created_at) "
|
||||||
"VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
|
"VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
|
||||||
@@ -157,8 +156,6 @@ async def submit_work_order(
|
|||||||
)
|
)
|
||||||
db.commit()
|
db.commit()
|
||||||
row = db.execute("SELECT * FROM work_orders WHERE id=?", (wo_id,)).fetchone()
|
row = db.execute("SELECT * FROM work_orders WHERE id=?", (wo_id,)).fetchone()
|
||||||
finally:
|
|
||||||
db.close()
|
|
||||||
|
|
||||||
wo = _WOView(_row_to_dict(row))
|
wo = _WOView(_row_to_dict(row))
|
||||||
return templates.TemplateResponse(request, "partials/work_order_card.html", {"wo": wo})
|
return templates.TemplateResponse(request, "partials/work_order_card.html", {"wo": wo})
|
||||||
@@ -171,11 +168,8 @@ async def submit_work_order(
|
|||||||
|
|
||||||
@router.get("/work-orders/queue/pending", response_class=HTMLResponse)
|
@router.get("/work-orders/queue/pending", response_class=HTMLResponse)
|
||||||
async def pending_partial(request: Request):
|
async def pending_partial(request: Request):
|
||||||
db = _get_db()
|
with _get_db() as db:
|
||||||
try:
|
|
||||||
wos = _query_wos(db, ["submitted", "triaged"])
|
wos = _query_wos(db, ["submitted", "triaged"])
|
||||||
finally:
|
|
||||||
db.close()
|
|
||||||
if not wos:
|
if not wos:
|
||||||
return HTMLResponse(
|
return HTMLResponse(
|
||||||
'<div style="color: var(--text-muted); font-size: 0.8rem; padding: 12px 0;">'
|
'<div style="color: var(--text-muted); font-size: 0.8rem; padding: 12px 0;">'
|
||||||
@@ -193,11 +187,8 @@ async def pending_partial(request: Request):
|
|||||||
|
|
||||||
@router.get("/work-orders/queue/active", response_class=HTMLResponse)
|
@router.get("/work-orders/queue/active", response_class=HTMLResponse)
|
||||||
async def active_partial(request: Request):
|
async def active_partial(request: Request):
|
||||||
db = _get_db()
|
with _get_db() as db:
|
||||||
try:
|
|
||||||
wos = _query_wos(db, ["approved", "in_progress"])
|
wos = _query_wos(db, ["approved", "in_progress"])
|
||||||
finally:
|
|
||||||
db.close()
|
|
||||||
if not wos:
|
if not wos:
|
||||||
return HTMLResponse(
|
return HTMLResponse(
|
||||||
'<div style="color: var(--text-muted); font-size: 0.8rem; padding: 12px 0;">'
|
'<div style="color: var(--text-muted); font-size: 0.8rem; padding: 12px 0;">'
|
||||||
@@ -220,10 +211,9 @@ async def active_partial(request: Request):
|
|||||||
|
|
||||||
async def _update_status(request: Request, wo_id: str, new_status: str, **extra):
|
async def _update_status(request: Request, wo_id: str, new_status: str, **extra):
|
||||||
completed_at = (
|
completed_at = (
|
||||||
datetime.utcnow().isoformat() if new_status in ("completed", "rejected") else None
|
datetime.now(UTC).isoformat() if new_status in ("completed", "rejected") else None
|
||||||
)
|
)
|
||||||
db = _get_db()
|
with _get_db() as db:
|
||||||
try:
|
|
||||||
sets = ["status=?", "completed_at=COALESCE(?, completed_at)"]
|
sets = ["status=?", "completed_at=COALESCE(?, completed_at)"]
|
||||||
vals = [new_status, completed_at]
|
vals = [new_status, completed_at]
|
||||||
for col, val in extra.items():
|
for col, val in extra.items():
|
||||||
@@ -233,8 +223,6 @@ async def _update_status(request: Request, wo_id: str, new_status: str, **extra)
|
|||||||
db.execute(f"UPDATE work_orders SET {', '.join(sets)} WHERE id=?", vals)
|
db.execute(f"UPDATE work_orders SET {', '.join(sets)} WHERE id=?", vals)
|
||||||
db.commit()
|
db.commit()
|
||||||
row = db.execute("SELECT * FROM work_orders WHERE id=?", (wo_id,)).fetchone()
|
row = db.execute("SELECT * FROM work_orders WHERE id=?", (wo_id,)).fetchone()
|
||||||
finally:
|
|
||||||
db.close()
|
|
||||||
if not row:
|
if not row:
|
||||||
raise HTTPException(404, "Work order not found")
|
raise HTTPException(404, "Work order not found")
|
||||||
wo = _WOView(_row_to_dict(row))
|
wo = _WOView(_row_to_dict(row))
|
||||||
|
|||||||
1065
src/dashboard/routes/world.py
Normal file
1065
src/dashboard/routes/world.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,34 +1,5 @@
|
|||||||
from dataclasses import dataclass
|
"""Backward-compatible re-export — canonical home is infrastructure.chat_store."""
|
||||||
|
|
||||||
|
from infrastructure.chat_store import DB_PATH, MAX_MESSAGES, Message, MessageLog, message_log
|
||||||
|
|
||||||
@dataclass
|
__all__ = ["DB_PATH", "MAX_MESSAGES", "Message", "MessageLog", "message_log"]
|
||||||
class Message:
|
|
||||||
role: str # "user" | "agent" | "error"
|
|
||||||
content: str
|
|
||||||
timestamp: str
|
|
||||||
source: str = "browser" # "browser" | "api" | "telegram" | "discord" | "system"
|
|
||||||
|
|
||||||
|
|
||||||
class MessageLog:
|
|
||||||
"""In-memory chat history for the lifetime of the server process."""
|
|
||||||
|
|
||||||
def __init__(self) -> None:
|
|
||||||
self._entries: list[Message] = []
|
|
||||||
|
|
||||||
def append(self, role: str, content: str, timestamp: str, source: str = "browser") -> None:
|
|
||||||
self._entries.append(
|
|
||||||
Message(role=role, content=content, timestamp=timestamp, source=source)
|
|
||||||
)
|
|
||||||
|
|
||||||
def all(self) -> list[Message]:
|
|
||||||
return list(self._entries)
|
|
||||||
|
|
||||||
def clear(self) -> None:
|
|
||||||
self._entries.clear()
|
|
||||||
|
|
||||||
def __len__(self) -> int:
|
|
||||||
return len(self._entries)
|
|
||||||
|
|
||||||
|
|
||||||
# Module-level singleton shared across the app
|
|
||||||
message_log = MessageLog()
|
|
||||||
|
|||||||
@@ -327,7 +327,11 @@
|
|||||||
.then(function(data) {
|
.then(function(data) {
|
||||||
var list = document.getElementById('notif-list');
|
var list = document.getElementById('notif-list');
|
||||||
if (!data.length) {
|
if (!data.length) {
|
||||||
list.innerHTML = '<div class="mc-notif-empty">No recent notifications</div>';
|
list.innerHTML = '';
|
||||||
|
var emptyDiv = document.createElement('div');
|
||||||
|
emptyDiv.className = 'mc-notif-empty';
|
||||||
|
emptyDiv.textContent = 'No recent notifications';
|
||||||
|
list.appendChild(emptyDiv);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
list.innerHTML = '';
|
list.innerHTML = '';
|
||||||
|
|||||||
@@ -138,6 +138,47 @@
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
<!-- Spark Intelligence -->
|
||||||
|
{% from "macros.html" import panel %}
|
||||||
|
<div class="mc-card-spaced">
|
||||||
|
<div class="card">
|
||||||
|
<div class="card-header">
|
||||||
|
<h2 class="card-title">Spark Intelligence</h2>
|
||||||
|
<div>
|
||||||
|
<span class="badge" id="spark-status-badge">Loading...</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="grid grid-3">
|
||||||
|
<div class="stat">
|
||||||
|
<div class="stat-value" id="spark-events">-</div>
|
||||||
|
<div class="stat-label">Events</div>
|
||||||
|
</div>
|
||||||
|
<div class="stat">
|
||||||
|
<div class="stat-value" id="spark-memories">-</div>
|
||||||
|
<div class="stat-label">Memories</div>
|
||||||
|
</div>
|
||||||
|
<div class="stat">
|
||||||
|
<div class="stat-value" id="spark-predictions">-</div>
|
||||||
|
<div class="stat-label">Predictions</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="grid grid-2 mc-section-gap">
|
||||||
|
{% call panel("SPARK TIMELINE", id="spark-timeline-panel",
|
||||||
|
hx_get="/spark/timeline",
|
||||||
|
hx_trigger="load, every 10s") %}
|
||||||
|
<div class="spark-timeline-scroll">
|
||||||
|
<p class="chat-history-placeholder">Loading timeline...</p>
|
||||||
|
</div>
|
||||||
|
{% endcall %}
|
||||||
|
{% call panel("SPARK INSIGHTS", id="spark-insights-panel",
|
||||||
|
hx_get="/spark/insights",
|
||||||
|
hx_trigger="load, every 30s") %}
|
||||||
|
<p class="chat-history-placeholder">Loading insights...</p>
|
||||||
|
{% endcall %}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
<!-- Chat History -->
|
<!-- Chat History -->
|
||||||
<div class="card mc-card-spaced">
|
<div class="card mc-card-spaced">
|
||||||
<div class="card-header">
|
<div class="card-header">
|
||||||
@@ -428,7 +469,34 @@ async function loadGrokStats() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Load Spark status
|
||||||
|
async function loadSparkStatus() {
|
||||||
|
try {
|
||||||
|
var response = await fetch('/spark');
|
||||||
|
var data = await response.json();
|
||||||
|
var st = data.status || {};
|
||||||
|
|
||||||
|
document.getElementById('spark-events').textContent = st.total_events || 0;
|
||||||
|
document.getElementById('spark-memories').textContent = st.total_memories || 0;
|
||||||
|
document.getElementById('spark-predictions').textContent = st.total_predictions || 0;
|
||||||
|
|
||||||
|
var badge = document.getElementById('spark-status-badge');
|
||||||
|
if (st.total_events > 0) {
|
||||||
|
badge.textContent = 'Active';
|
||||||
|
badge.className = 'badge badge-success';
|
||||||
|
} else {
|
||||||
|
badge.textContent = 'Idle';
|
||||||
|
badge.className = 'badge badge-warning';
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
var badge = document.getElementById('spark-status-badge');
|
||||||
|
badge.textContent = 'Offline';
|
||||||
|
badge.className = 'badge badge-danger';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Initial load
|
// Initial load
|
||||||
|
loadSparkStatus();
|
||||||
loadSovereignty();
|
loadSovereignty();
|
||||||
loadHealth();
|
loadHealth();
|
||||||
loadSwarmStats();
|
loadSwarmStats();
|
||||||
@@ -442,5 +510,6 @@ setInterval(loadHealth, 10000);
|
|||||||
setInterval(loadSwarmStats, 5000);
|
setInterval(loadSwarmStats, 5000);
|
||||||
setInterval(updateHeartbeat, 5000);
|
setInterval(updateHeartbeat, 5000);
|
||||||
setInterval(loadGrokStats, 10000);
|
setInterval(loadGrokStats, 10000);
|
||||||
|
setInterval(loadSparkStatus, 15000);
|
||||||
</script>
|
</script>
|
||||||
{% endblock %}
|
{% endblock %}
|
||||||
|
|||||||
@@ -120,14 +120,17 @@
|
|||||||
|
|
||||||
function updateFromData(data) {
|
function updateFromData(data) {
|
||||||
if (data.is_working && data.current_task) {
|
if (data.is_working && data.current_task) {
|
||||||
statusEl.innerHTML = '<span style="color: #ffaa00;">working...</span>';
|
statusEl.textContent = 'working...';
|
||||||
|
statusEl.style.color = '#ffaa00';
|
||||||
banner.style.display = 'block';
|
banner.style.display = 'block';
|
||||||
taskTitle.textContent = data.current_task.title;
|
taskTitle.textContent = data.current_task.title;
|
||||||
} else if (data.tasks_ahead > 0) {
|
} else if (data.tasks_ahead > 0) {
|
||||||
statusEl.innerHTML = '<span style="color: #888;">queue: ' + data.tasks_ahead + ' ahead</span>';
|
statusEl.textContent = 'queue: ' + data.tasks_ahead + ' ahead';
|
||||||
|
statusEl.style.color = '#888';
|
||||||
banner.style.display = 'none';
|
banner.style.display = 'none';
|
||||||
} else {
|
} else {
|
||||||
statusEl.innerHTML = '<span style="color: #00ff88;">ready</span>';
|
statusEl.textContent = 'ready';
|
||||||
|
statusEl.style.color = '#00ff88';
|
||||||
banner.style.display = 'none';
|
banner.style.display = 'none';
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,7 +20,7 @@
|
|||||||
{% else %}
|
{% else %}
|
||||||
<div class="chat-message agent">
|
<div class="chat-message agent">
|
||||||
<div class="msg-meta">TIMMY // SYSTEM</div>
|
<div class="msg-meta">TIMMY // SYSTEM</div>
|
||||||
<div class="msg-body">Mission Control initialized. Timmy ready — awaiting input.</div>
|
<div class="msg-body">{{ welcome_message | e }}</div>
|
||||||
</div>
|
</div>
|
||||||
{% endif %}
|
{% endif %}
|
||||||
<script>if(typeof scrollChat==='function'){setTimeout(scrollChat,50);}</script>
|
<script>if(typeof scrollChat==='function'){setTimeout(scrollChat,50);}</script>
|
||||||
|
|||||||
@@ -198,17 +198,43 @@ function addActivityEvent(evt) {
|
|||||||
} catch(e) {}
|
} catch(e) {}
|
||||||
}
|
}
|
||||||
|
|
||||||
item.innerHTML = `
|
// Build DOM safely using createElement and textContent
|
||||||
<div class="activity-icon">${icon}</div>
|
var iconDiv = document.createElement('div');
|
||||||
<div class="activity-content">
|
iconDiv.className = 'activity-icon';
|
||||||
<div class="activity-label">${label}</div>
|
iconDiv.textContent = icon;
|
||||||
${desc ? `<div class="activity-desc">${desc}</div>` : ''}
|
|
||||||
<div class="activity-meta">
|
var contentDiv = document.createElement('div');
|
||||||
<span class="activity-time">${time}</span>
|
contentDiv.className = 'activity-content';
|
||||||
<span class="activity-source">${evt.source || 'system'}</span>
|
|
||||||
</div>
|
var labelDiv = document.createElement('div');
|
||||||
</div>
|
labelDiv.className = 'activity-label';
|
||||||
`;
|
labelDiv.textContent = label;
|
||||||
|
contentDiv.appendChild(labelDiv);
|
||||||
|
|
||||||
|
if (desc) {
|
||||||
|
var descDiv = document.createElement('div');
|
||||||
|
descDiv.className = 'activity-desc';
|
||||||
|
descDiv.textContent = desc;
|
||||||
|
contentDiv.appendChild(descDiv);
|
||||||
|
}
|
||||||
|
|
||||||
|
var metaDiv = document.createElement('div');
|
||||||
|
metaDiv.className = 'activity-meta';
|
||||||
|
|
||||||
|
var timeSpan = document.createElement('span');
|
||||||
|
timeSpan.className = 'activity-time';
|
||||||
|
timeSpan.textContent = time;
|
||||||
|
|
||||||
|
var sourceSpan = document.createElement('span');
|
||||||
|
sourceSpan.className = 'activity-source';
|
||||||
|
sourceSpan.textContent = evt.source || 'system';
|
||||||
|
|
||||||
|
metaDiv.appendChild(timeSpan);
|
||||||
|
metaDiv.appendChild(sourceSpan);
|
||||||
|
contentDiv.appendChild(metaDiv);
|
||||||
|
|
||||||
|
item.appendChild(iconDiv);
|
||||||
|
item.appendChild(contentDiv);
|
||||||
|
|
||||||
// Add to top
|
// Add to top
|
||||||
container.insertBefore(item, container.firstChild);
|
container.insertBefore(item, container.firstChild);
|
||||||
|
|||||||
180
src/dashboard/templates/tower.html
Normal file
180
src/dashboard/templates/tower.html
Normal file
@@ -0,0 +1,180 @@
|
|||||||
|
{% extends "base.html" %}
|
||||||
|
|
||||||
|
{% block title %}Timmy Time — Tower{% endblock %}
|
||||||
|
|
||||||
|
{% block extra_styles %}{% endblock %}
|
||||||
|
|
||||||
|
{% block content %}
|
||||||
|
<div class="container-fluid tower-container py-3">
|
||||||
|
|
||||||
|
<div class="tower-header">
|
||||||
|
<div class="tower-title">TOWER</div>
|
||||||
|
<div class="tower-subtitle">
|
||||||
|
Real-time Spark visualization —
|
||||||
|
<span id="tower-conn" class="tower-conn-badge tower-conn-connecting">CONNECTING</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="row g-3">
|
||||||
|
|
||||||
|
<!-- Left: THINKING (events) -->
|
||||||
|
<div class="col-12 col-lg-4 d-flex flex-column gap-3">
|
||||||
|
<div class="card mc-panel tower-phase-card">
|
||||||
|
<div class="card-header mc-panel-header tower-phase-thinking">// THINKING</div>
|
||||||
|
<div class="card-body p-3 tower-scroll" id="tower-events">
|
||||||
|
<div class="tower-empty">Waiting for Spark data…</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Middle: PREDICTING (EIDOS) -->
|
||||||
|
<div class="col-12 col-lg-4 d-flex flex-column gap-3">
|
||||||
|
<div class="card mc-panel tower-phase-card">
|
||||||
|
<div class="card-header mc-panel-header tower-phase-predicting">// PREDICTING</div>
|
||||||
|
<div class="card-body p-3" id="tower-predictions">
|
||||||
|
<div class="tower-empty">Waiting for Spark data…</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="card mc-panel">
|
||||||
|
<div class="card-header mc-panel-header">// EIDOS STATS</div>
|
||||||
|
<div class="card-body p-3">
|
||||||
|
<div class="tower-stat-grid" id="tower-stats">
|
||||||
|
<div class="tower-stat"><span class="tower-stat-label">EVENTS</span><span class="tower-stat-value" id="ts-events">0</span></div>
|
||||||
|
<div class="tower-stat"><span class="tower-stat-label">MEMORIES</span><span class="tower-stat-value" id="ts-memories">0</span></div>
|
||||||
|
<div class="tower-stat"><span class="tower-stat-label">PREDICTIONS</span><span class="tower-stat-value" id="ts-preds">0</span></div>
|
||||||
|
<div class="tower-stat"><span class="tower-stat-label">ACCURACY</span><span class="tower-stat-value" id="ts-accuracy">—</span></div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Right: ADVISING -->
|
||||||
|
<div class="col-12 col-lg-4 d-flex flex-column gap-3">
|
||||||
|
<div class="card mc-panel tower-phase-card">
|
||||||
|
<div class="card-header mc-panel-header tower-phase-advising">// ADVISING</div>
|
||||||
|
<div class="card-body p-3 tower-scroll" id="tower-advisories">
|
||||||
|
<div class="tower-empty">Waiting for Spark data…</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<script>
|
||||||
|
(function() {
|
||||||
|
var ws = null;
|
||||||
|
var badge = document.getElementById('tower-conn');
|
||||||
|
|
||||||
|
function setConn(state) {
|
||||||
|
badge.textContent = state.toUpperCase();
|
||||||
|
badge.className = 'tower-conn-badge tower-conn-' + state;
|
||||||
|
}
|
||||||
|
|
||||||
|
function esc(s) { var d = document.createElement('div'); d.textContent = s; return d.innerHTML; }
|
||||||
|
|
||||||
|
function renderEvents(events) {
|
||||||
|
var el = document.getElementById('tower-events');
|
||||||
|
if (!events || !events.length) { el.innerHTML = '<div class="tower-empty">No events captured yet.</div>'; return; }
|
||||||
|
var html = '';
|
||||||
|
for (var i = 0; i < events.length; i++) {
|
||||||
|
var ev = events[i];
|
||||||
|
var dots = ev.importance >= 0.8 ? '\u25cf\u25cf\u25cf' : ev.importance >= 0.5 ? '\u25cf\u25cf' : '\u25cf';
|
||||||
|
html += '<div class="tower-event tower-etype-' + esc(ev.event_type) + '">'
|
||||||
|
+ '<div class="tower-ev-head">'
|
||||||
|
+ '<span class="tower-ev-badge">' + esc(ev.event_type.replace(/_/g, ' ').toUpperCase()) + '</span>'
|
||||||
|
+ '<span class="tower-ev-dots">' + dots + '</span>'
|
||||||
|
+ '</div>'
|
||||||
|
+ '<div class="tower-ev-desc">' + esc(ev.description) + '</div>'
|
||||||
|
+ '<div class="tower-ev-time">' + esc((ev.created_at || '').slice(0, 19)) + '</div>'
|
||||||
|
+ '</div>';
|
||||||
|
}
|
||||||
|
el.innerHTML = html;
|
||||||
|
}
|
||||||
|
|
||||||
|
function renderPredictions(preds) {
|
||||||
|
var el = document.getElementById('tower-predictions');
|
||||||
|
if (!preds || !preds.length) { el.innerHTML = '<div class="tower-empty">No predictions yet.</div>'; return; }
|
||||||
|
var html = '';
|
||||||
|
for (var i = 0; i < preds.length; i++) {
|
||||||
|
var p = preds[i];
|
||||||
|
var cls = p.evaluated ? 'tower-pred-done' : 'tower-pred-pending';
|
||||||
|
var accTxt = p.accuracy != null ? Math.round(p.accuracy * 100) + '%' : 'PENDING';
|
||||||
|
var accCls = p.accuracy != null ? (p.accuracy >= 0.7 ? 'text-success' : p.accuracy < 0.4 ? 'text-danger' : 'text-warning') : '';
|
||||||
|
html += '<div class="tower-pred ' + cls + '">'
|
||||||
|
+ '<div class="tower-pred-head">'
|
||||||
|
+ '<span class="tower-pred-task">' + esc(p.task_id) + '</span>'
|
||||||
|
+ '<span class="tower-pred-acc ' + accCls + '">' + accTxt + '</span>'
|
||||||
|
+ '</div>';
|
||||||
|
if (p.predicted) {
|
||||||
|
var pr = p.predicted;
|
||||||
|
html += '<div class="tower-pred-detail">';
|
||||||
|
if (pr.likely_winner) html += '<span>Winner: ' + esc(pr.likely_winner.slice(0, 8)) + '</span> ';
|
||||||
|
if (pr.success_probability != null) html += '<span>Success: ' + Math.round(pr.success_probability * 100) + '%</span> ';
|
||||||
|
html += '</div>';
|
||||||
|
}
|
||||||
|
html += '<div class="tower-ev-time">' + esc((p.created_at || '').slice(0, 19)) + '</div>'
|
||||||
|
+ '</div>';
|
||||||
|
}
|
||||||
|
el.innerHTML = html;
|
||||||
|
}
|
||||||
|
|
||||||
|
function renderAdvisories(advs) {
|
||||||
|
var el = document.getElementById('tower-advisories');
|
||||||
|
if (!advs || !advs.length) { el.innerHTML = '<div class="tower-empty">No advisories yet.</div>'; return; }
|
||||||
|
var html = '';
|
||||||
|
for (var i = 0; i < advs.length; i++) {
|
||||||
|
var a = advs[i];
|
||||||
|
var prio = a.priority >= 0.7 ? 'high' : a.priority >= 0.4 ? 'medium' : 'low';
|
||||||
|
html += '<div class="tower-advisory tower-adv-' + prio + '">'
|
||||||
|
+ '<div class="tower-adv-head">'
|
||||||
|
+ '<span class="tower-adv-cat">' + esc(a.category.replace(/_/g, ' ').toUpperCase()) + '</span>'
|
||||||
|
+ '<span class="tower-adv-prio">' + Math.round(a.priority * 100) + '%</span>'
|
||||||
|
+ '</div>'
|
||||||
|
+ '<div class="tower-adv-title">' + esc(a.title) + '</div>'
|
||||||
|
+ '<div class="tower-adv-detail">' + esc(a.detail) + '</div>'
|
||||||
|
+ '<div class="tower-adv-action">' + esc(a.suggested_action) + '</div>'
|
||||||
|
+ '</div>';
|
||||||
|
}
|
||||||
|
el.innerHTML = html;
|
||||||
|
}
|
||||||
|
|
||||||
|
function renderStats(status) {
|
||||||
|
if (!status) return;
|
||||||
|
document.getElementById('ts-events').textContent = status.events_captured || 0;
|
||||||
|
document.getElementById('ts-memories').textContent = status.memories_stored || 0;
|
||||||
|
var p = status.predictions || {};
|
||||||
|
document.getElementById('ts-preds').textContent = p.total_predictions || 0;
|
||||||
|
var acc = p.avg_accuracy;
|
||||||
|
var accEl = document.getElementById('ts-accuracy');
|
||||||
|
if (acc != null) {
|
||||||
|
accEl.textContent = Math.round(acc * 100) + '%';
|
||||||
|
accEl.className = 'tower-stat-value ' + (acc >= 0.7 ? 'text-success' : acc < 0.4 ? 'text-danger' : 'text-warning');
|
||||||
|
} else {
|
||||||
|
accEl.textContent = '\u2014';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function handleMsg(data) {
|
||||||
|
if (data.type !== 'spark_state') return;
|
||||||
|
renderEvents(data.events);
|
||||||
|
renderPredictions(data.predictions);
|
||||||
|
renderAdvisories(data.advisories);
|
||||||
|
renderStats(data.status);
|
||||||
|
}
|
||||||
|
|
||||||
|
function connect() {
|
||||||
|
var proto = location.protocol === 'https:' ? 'wss:' : 'ws:';
|
||||||
|
ws = new WebSocket(proto + '//' + location.host + '/tower/ws');
|
||||||
|
ws.onopen = function() { setConn('live'); };
|
||||||
|
ws.onclose = function() { setConn('offline'); setTimeout(connect, 3000); };
|
||||||
|
ws.onerror = function() { setConn('offline'); };
|
||||||
|
ws.onmessage = function(e) {
|
||||||
|
try { handleMsg(JSON.parse(e.data)); } catch(err) { console.error('Tower WS parse error', err); }
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
connect();
|
||||||
|
})();
|
||||||
|
</script>
|
||||||
|
{% endblock %}
|
||||||
153
src/infrastructure/chat_store.py
Normal file
153
src/infrastructure/chat_store.py
Normal file
@@ -0,0 +1,153 @@
|
|||||||
|
"""Persistent chat message store backed by SQLite.
|
||||||
|
|
||||||
|
Provides the same API as the original in-memory MessageLog so all callers
|
||||||
|
(dashboard routes, chat_api, thinking, briefing) work without changes.
|
||||||
|
|
||||||
|
Data lives in ``data/chat.db`` — survives server restarts.
|
||||||
|
A configurable retention policy (default 500 messages) keeps the DB lean.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sqlite3
|
||||||
|
import threading
|
||||||
|
from collections.abc import Generator
|
||||||
|
from contextlib import closing, contextmanager
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# ── Data dir — resolved relative to repo root (three levels up from this file) ──
|
||||||
|
_REPO_ROOT = Path(__file__).resolve().parents[3]
|
||||||
|
DB_PATH: Path = _REPO_ROOT / "data" / "chat.db"
|
||||||
|
|
||||||
|
# Maximum messages to retain (oldest pruned on append)
|
||||||
|
MAX_MESSAGES: int = 500
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class Message:
|
||||||
|
role: str # "user" | "agent" | "error"
|
||||||
|
content: str
|
||||||
|
timestamp: str
|
||||||
|
source: str = "browser" # "browser" | "api" | "telegram" | "discord" | "system"
|
||||||
|
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def _get_conn(db_path: Path | None = None) -> Generator[sqlite3.Connection, None, None]:
|
||||||
|
"""Open (or create) the chat database and ensure schema exists."""
|
||||||
|
path = db_path or DB_PATH
|
||||||
|
path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
with closing(sqlite3.connect(str(path), check_same_thread=False)) as conn:
|
||||||
|
conn.row_factory = sqlite3.Row
|
||||||
|
conn.execute("PRAGMA journal_mode=WAL")
|
||||||
|
conn.execute("""
|
||||||
|
CREATE TABLE IF NOT EXISTS chat_messages (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
role TEXT NOT NULL,
|
||||||
|
content TEXT NOT NULL,
|
||||||
|
timestamp TEXT NOT NULL,
|
||||||
|
source TEXT NOT NULL DEFAULT 'browser'
|
||||||
|
)
|
||||||
|
""")
|
||||||
|
conn.commit()
|
||||||
|
yield conn
|
||||||
|
|
||||||
|
|
||||||
|
class MessageLog:
|
||||||
|
"""SQLite-backed chat history — drop-in replacement for the old in-memory list."""
|
||||||
|
|
||||||
|
def __init__(self, db_path: Path | None = None) -> None:
|
||||||
|
self._db_path = db_path or DB_PATH
|
||||||
|
self._lock = threading.Lock()
|
||||||
|
self._conn: sqlite3.Connection | None = None
|
||||||
|
|
||||||
|
# Lazy connection — opened on first use, not at import time.
|
||||||
|
def _ensure_conn(self) -> sqlite3.Connection:
|
||||||
|
if self._conn is None:
|
||||||
|
# Open a persistent connection for the class instance
|
||||||
|
path = self._db_path or DB_PATH
|
||||||
|
path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
conn = sqlite3.connect(str(path), check_same_thread=False)
|
||||||
|
conn.row_factory = sqlite3.Row
|
||||||
|
conn.execute("PRAGMA journal_mode=WAL")
|
||||||
|
conn.execute("""
|
||||||
|
CREATE TABLE IF NOT EXISTS chat_messages (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
role TEXT NOT NULL,
|
||||||
|
content TEXT NOT NULL,
|
||||||
|
timestamp TEXT NOT NULL,
|
||||||
|
source TEXT NOT NULL DEFAULT 'browser'
|
||||||
|
)
|
||||||
|
""")
|
||||||
|
conn.commit()
|
||||||
|
self._conn = conn
|
||||||
|
return self._conn
|
||||||
|
|
||||||
|
def append(self, role: str, content: str, timestamp: str, source: str = "browser") -> None:
|
||||||
|
with self._lock:
|
||||||
|
conn = self._ensure_conn()
|
||||||
|
conn.execute(
|
||||||
|
"INSERT INTO chat_messages (role, content, timestamp, source) VALUES (?, ?, ?, ?)",
|
||||||
|
(role, content, timestamp, source),
|
||||||
|
)
|
||||||
|
conn.commit()
|
||||||
|
self._prune(conn)
|
||||||
|
|
||||||
|
def all(self) -> list[Message]:
|
||||||
|
with self._lock:
|
||||||
|
conn = self._ensure_conn()
|
||||||
|
rows = conn.execute(
|
||||||
|
"SELECT role, content, timestamp, source FROM chat_messages ORDER BY id"
|
||||||
|
).fetchall()
|
||||||
|
return [
|
||||||
|
Message(
|
||||||
|
role=r["role"], content=r["content"], timestamp=r["timestamp"], source=r["source"]
|
||||||
|
)
|
||||||
|
for r in rows
|
||||||
|
]
|
||||||
|
|
||||||
|
def recent(self, limit: int = 50) -> list[Message]:
|
||||||
|
"""Return the *limit* most recent messages (oldest-first)."""
|
||||||
|
with self._lock:
|
||||||
|
conn = self._ensure_conn()
|
||||||
|
rows = conn.execute(
|
||||||
|
"SELECT role, content, timestamp, source FROM chat_messages "
|
||||||
|
"ORDER BY id DESC LIMIT ?",
|
||||||
|
(limit,),
|
||||||
|
).fetchall()
|
||||||
|
return [
|
||||||
|
Message(
|
||||||
|
role=r["role"], content=r["content"], timestamp=r["timestamp"], source=r["source"]
|
||||||
|
)
|
||||||
|
for r in reversed(rows)
|
||||||
|
]
|
||||||
|
|
||||||
|
def clear(self) -> None:
|
||||||
|
with self._lock:
|
||||||
|
conn = self._ensure_conn()
|
||||||
|
conn.execute("DELETE FROM chat_messages")
|
||||||
|
conn.commit()
|
||||||
|
|
||||||
|
def _prune(self, conn: sqlite3.Connection) -> None:
|
||||||
|
"""Keep at most MAX_MESSAGES rows, deleting the oldest."""
|
||||||
|
count = conn.execute("SELECT COUNT(*) FROM chat_messages").fetchone()[0]
|
||||||
|
if count > MAX_MESSAGES:
|
||||||
|
excess = count - MAX_MESSAGES
|
||||||
|
conn.execute(
|
||||||
|
"DELETE FROM chat_messages WHERE id IN "
|
||||||
|
"(SELECT id FROM chat_messages ORDER BY id LIMIT ?)",
|
||||||
|
(excess,),
|
||||||
|
)
|
||||||
|
conn.commit()
|
||||||
|
|
||||||
|
def close(self) -> None:
|
||||||
|
if self._conn is not None:
|
||||||
|
self._conn.close()
|
||||||
|
self._conn = None
|
||||||
|
|
||||||
|
def __len__(self) -> int:
|
||||||
|
with self._lock:
|
||||||
|
conn = self._ensure_conn()
|
||||||
|
return conn.execute("SELECT COUNT(*) FROM chat_messages").fetchone()[0]
|
||||||
|
|
||||||
|
|
||||||
|
# Module-level singleton shared across the app
|
||||||
|
message_log = MessageLog()
|
||||||
@@ -22,6 +22,14 @@ logger = logging.getLogger(__name__)
|
|||||||
# In-memory dedup cache: hash -> last_seen timestamp
|
# In-memory dedup cache: hash -> last_seen timestamp
|
||||||
_dedup_cache: dict[str, datetime] = {}
|
_dedup_cache: dict[str, datetime] = {}
|
||||||
|
|
||||||
|
_error_recorder = None
|
||||||
|
|
||||||
|
|
||||||
|
def register_error_recorder(fn):
|
||||||
|
"""Register a callback for recording errors to session log."""
|
||||||
|
global _error_recorder
|
||||||
|
_error_recorder = fn
|
||||||
|
|
||||||
|
|
||||||
def _stack_hash(exc: Exception) -> str:
|
def _stack_hash(exc: Exception) -> str:
|
||||||
"""Create a stable hash of the exception type + traceback locations.
|
"""Create a stable hash of the exception type + traceback locations.
|
||||||
@@ -87,10 +95,177 @@ def _get_git_context() -> dict:
|
|||||||
).stdout.strip()
|
).stdout.strip()
|
||||||
|
|
||||||
return {"branch": branch, "commit": commit}
|
return {"branch": branch, "commit": commit}
|
||||||
except Exception:
|
except Exception as exc:
|
||||||
|
logger.warning("Git info capture error: %s", exc)
|
||||||
return {"branch": "unknown", "commit": "unknown"}
|
return {"branch": "unknown", "commit": "unknown"}
|
||||||
|
|
||||||
|
|
||||||
|
def _extract_traceback_info(exc: Exception) -> tuple[str, str, int]:
|
||||||
|
"""Extract formatted traceback, affected file, and line number.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (traceback_string, affected_file, affected_line).
|
||||||
|
"""
|
||||||
|
tb_str = "".join(traceback.format_exception(type(exc), exc, exc.__traceback__))
|
||||||
|
|
||||||
|
tb_obj = exc.__traceback__
|
||||||
|
affected_file = "unknown"
|
||||||
|
affected_line = 0
|
||||||
|
while tb_obj and tb_obj.tb_next:
|
||||||
|
tb_obj = tb_obj.tb_next
|
||||||
|
if tb_obj:
|
||||||
|
affected_file = tb_obj.tb_frame.f_code.co_filename
|
||||||
|
affected_line = tb_obj.tb_lineno
|
||||||
|
|
||||||
|
return tb_str, affected_file, affected_line
|
||||||
|
|
||||||
|
|
||||||
|
def _log_error_event(
|
||||||
|
exc: Exception,
|
||||||
|
source: str,
|
||||||
|
error_hash: str,
|
||||||
|
affected_file: str,
|
||||||
|
affected_line: int,
|
||||||
|
git_ctx: dict,
|
||||||
|
) -> None:
|
||||||
|
"""Log the captured error to the event log."""
|
||||||
|
try:
|
||||||
|
from swarm.event_log import EventType, log_event
|
||||||
|
|
||||||
|
log_event(
|
||||||
|
EventType.ERROR_CAPTURED,
|
||||||
|
source=source,
|
||||||
|
data={
|
||||||
|
"error_type": type(exc).__name__,
|
||||||
|
"message": str(exc)[:500],
|
||||||
|
"hash": error_hash,
|
||||||
|
"file": affected_file,
|
||||||
|
"line": affected_line,
|
||||||
|
"git_branch": git_ctx.get("branch", ""),
|
||||||
|
"git_commit": git_ctx.get("commit", ""),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
except Exception as log_exc:
|
||||||
|
logger.debug("Failed to log error event: %s", log_exc)
|
||||||
|
|
||||||
|
|
||||||
|
def _build_report_description(
|
||||||
|
exc: Exception,
|
||||||
|
source: str,
|
||||||
|
context: dict | None,
|
||||||
|
error_hash: str,
|
||||||
|
tb_str: str,
|
||||||
|
affected_file: str,
|
||||||
|
affected_line: int,
|
||||||
|
git_ctx: dict,
|
||||||
|
) -> str:
|
||||||
|
"""Build the markdown description for a bug report task."""
|
||||||
|
parts = [
|
||||||
|
f"**Error:** {type(exc).__name__}: {str(exc)}",
|
||||||
|
f"**Source:** {source}",
|
||||||
|
f"**File:** {affected_file}:{affected_line}",
|
||||||
|
f"**Git:** {git_ctx.get('branch', '?')} @ {git_ctx.get('commit', '?')}",
|
||||||
|
f"**Time:** {datetime.now(UTC).isoformat()}",
|
||||||
|
f"**Hash:** {error_hash}",
|
||||||
|
]
|
||||||
|
|
||||||
|
if context:
|
||||||
|
ctx_str = ", ".join(f"{k}={v}" for k, v in context.items())
|
||||||
|
parts.append(f"**Context:** {ctx_str}")
|
||||||
|
|
||||||
|
parts.append(f"\n**Stack Trace:**\n```\n{tb_str[:2000]}\n```")
|
||||||
|
return "\n".join(parts)
|
||||||
|
|
||||||
|
|
||||||
|
def _log_bug_report_created(source: str, task_id: str, error_hash: str, title: str) -> None:
|
||||||
|
"""Log a BUG_REPORT_CREATED event (best-effort)."""
|
||||||
|
try:
|
||||||
|
from swarm.event_log import EventType, log_event
|
||||||
|
|
||||||
|
log_event(
|
||||||
|
EventType.BUG_REPORT_CREATED,
|
||||||
|
source=source,
|
||||||
|
task_id=task_id,
|
||||||
|
data={
|
||||||
|
"error_hash": error_hash,
|
||||||
|
"title": title[:100],
|
||||||
|
},
|
||||||
|
)
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning("Bug report event log error: %s", exc)
|
||||||
|
|
||||||
|
|
||||||
|
def _create_bug_report(
|
||||||
|
exc: Exception,
|
||||||
|
source: str,
|
||||||
|
context: dict | None,
|
||||||
|
error_hash: str,
|
||||||
|
tb_str: str,
|
||||||
|
affected_file: str,
|
||||||
|
affected_line: int,
|
||||||
|
git_ctx: dict,
|
||||||
|
) -> str | None:
|
||||||
|
"""Create a bug report task and return the task ID (or None on failure)."""
|
||||||
|
try:
|
||||||
|
from swarm.task_queue.models import create_task
|
||||||
|
|
||||||
|
title = f"[BUG] {type(exc).__name__}: {str(exc)[:80]}"
|
||||||
|
description = _build_report_description(
|
||||||
|
exc,
|
||||||
|
source,
|
||||||
|
context,
|
||||||
|
error_hash,
|
||||||
|
tb_str,
|
||||||
|
affected_file,
|
||||||
|
affected_line,
|
||||||
|
git_ctx,
|
||||||
|
)
|
||||||
|
|
||||||
|
task = create_task(
|
||||||
|
title=title,
|
||||||
|
description=description,
|
||||||
|
assigned_to="default",
|
||||||
|
created_by="system",
|
||||||
|
priority="normal",
|
||||||
|
requires_approval=False,
|
||||||
|
auto_approve=True,
|
||||||
|
task_type="bug_report",
|
||||||
|
)
|
||||||
|
|
||||||
|
_log_bug_report_created(source, task.id, error_hash, title)
|
||||||
|
return task.id
|
||||||
|
|
||||||
|
except Exception as task_exc:
|
||||||
|
logger.debug("Failed to create bug report task: %s", task_exc)
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def _notify_bug_report(exc: Exception, source: str) -> None:
|
||||||
|
"""Send a push notification about the captured error."""
|
||||||
|
try:
|
||||||
|
from infrastructure.notifications.push import notifier
|
||||||
|
|
||||||
|
notifier.notify(
|
||||||
|
title="Bug Report Filed",
|
||||||
|
message=f"{type(exc).__name__} in {source}: {str(exc)[:80]}",
|
||||||
|
category="system",
|
||||||
|
)
|
||||||
|
except Exception as notify_exc:
|
||||||
|
logger.warning("Bug report notification error: %s", notify_exc)
|
||||||
|
|
||||||
|
|
||||||
|
def _record_to_session(exc: Exception, source: str) -> None:
|
||||||
|
"""Record the error via the registered session callback."""
|
||||||
|
if _error_recorder is not None:
|
||||||
|
try:
|
||||||
|
_error_recorder(
|
||||||
|
error=f"{type(exc).__name__}: {str(exc)}",
|
||||||
|
context=source,
|
||||||
|
)
|
||||||
|
except Exception as log_exc:
|
||||||
|
logger.warning("Bug report session logging error: %s", log_exc)
|
||||||
|
|
||||||
|
|
||||||
def capture_error(
|
def capture_error(
|
||||||
exc: Exception,
|
exc: Exception,
|
||||||
source: str = "unknown",
|
source: str = "unknown",
|
||||||
@@ -117,116 +292,23 @@ def capture_error(
|
|||||||
logger.debug("Duplicate error suppressed: %s (hash=%s)", exc, error_hash)
|
logger.debug("Duplicate error suppressed: %s (hash=%s)", exc, error_hash)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# Format the stack trace
|
tb_str, affected_file, affected_line = _extract_traceback_info(exc)
|
||||||
tb_str = "".join(traceback.format_exception(type(exc), exc, exc.__traceback__))
|
|
||||||
|
|
||||||
# Extract file/line from traceback
|
|
||||||
tb_obj = exc.__traceback__
|
|
||||||
affected_file = "unknown"
|
|
||||||
affected_line = 0
|
|
||||||
while tb_obj and tb_obj.tb_next:
|
|
||||||
tb_obj = tb_obj.tb_next
|
|
||||||
if tb_obj:
|
|
||||||
affected_file = tb_obj.tb_frame.f_code.co_filename
|
|
||||||
affected_line = tb_obj.tb_lineno
|
|
||||||
|
|
||||||
git_ctx = _get_git_context()
|
git_ctx = _get_git_context()
|
||||||
|
|
||||||
# 1. Log to event_log
|
_log_error_event(exc, source, error_hash, affected_file, affected_line, git_ctx)
|
||||||
try:
|
|
||||||
from swarm.event_log import EventType, log_event
|
|
||||||
|
|
||||||
log_event(
|
task_id = _create_bug_report(
|
||||||
EventType.ERROR_CAPTURED,
|
exc,
|
||||||
source=source,
|
source,
|
||||||
data={
|
context,
|
||||||
"error_type": type(exc).__name__,
|
error_hash,
|
||||||
"message": str(exc)[:500],
|
tb_str,
|
||||||
"hash": error_hash,
|
affected_file,
|
||||||
"file": affected_file,
|
affected_line,
|
||||||
"line": affected_line,
|
git_ctx,
|
||||||
"git_branch": git_ctx.get("branch", ""),
|
)
|
||||||
"git_commit": git_ctx.get("commit", ""),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
except Exception as log_exc:
|
|
||||||
logger.debug("Failed to log error event: %s", log_exc)
|
|
||||||
|
|
||||||
# 2. Create bug report task
|
_notify_bug_report(exc, source)
|
||||||
task_id = None
|
_record_to_session(exc, source)
|
||||||
try:
|
|
||||||
from swarm.task_queue.models import create_task
|
|
||||||
|
|
||||||
title = f"[BUG] {type(exc).__name__}: {str(exc)[:80]}"
|
|
||||||
|
|
||||||
description_parts = [
|
|
||||||
f"**Error:** {type(exc).__name__}: {str(exc)}",
|
|
||||||
f"**Source:** {source}",
|
|
||||||
f"**File:** {affected_file}:{affected_line}",
|
|
||||||
f"**Git:** {git_ctx.get('branch', '?')} @ {git_ctx.get('commit', '?')}",
|
|
||||||
f"**Time:** {datetime.now(UTC).isoformat()}",
|
|
||||||
f"**Hash:** {error_hash}",
|
|
||||||
]
|
|
||||||
|
|
||||||
if context:
|
|
||||||
ctx_str = ", ".join(f"{k}={v}" for k, v in context.items())
|
|
||||||
description_parts.append(f"**Context:** {ctx_str}")
|
|
||||||
|
|
||||||
description_parts.append(f"\n**Stack Trace:**\n```\n{tb_str[:2000]}\n```")
|
|
||||||
|
|
||||||
task = create_task(
|
|
||||||
title=title,
|
|
||||||
description="\n".join(description_parts),
|
|
||||||
assigned_to="default",
|
|
||||||
created_by="system",
|
|
||||||
priority="normal",
|
|
||||||
requires_approval=False,
|
|
||||||
auto_approve=True,
|
|
||||||
task_type="bug_report",
|
|
||||||
)
|
|
||||||
task_id = task.id
|
|
||||||
|
|
||||||
# Log the creation event
|
|
||||||
try:
|
|
||||||
from swarm.event_log import EventType, log_event
|
|
||||||
|
|
||||||
log_event(
|
|
||||||
EventType.BUG_REPORT_CREATED,
|
|
||||||
source=source,
|
|
||||||
task_id=task_id,
|
|
||||||
data={
|
|
||||||
"error_hash": error_hash,
|
|
||||||
"title": title[:100],
|
|
||||||
},
|
|
||||||
)
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
except Exception as task_exc:
|
|
||||||
logger.debug("Failed to create bug report task: %s", task_exc)
|
|
||||||
|
|
||||||
# 3. Send notification
|
|
||||||
try:
|
|
||||||
from infrastructure.notifications.push import notifier
|
|
||||||
|
|
||||||
notifier.notify(
|
|
||||||
title="Bug Report Filed",
|
|
||||||
message=f"{type(exc).__name__} in {source}: {str(exc)[:80]}",
|
|
||||||
category="system",
|
|
||||||
)
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
# 4. Record in session logger
|
|
||||||
try:
|
|
||||||
from timmy.session_logger import get_session_logger
|
|
||||||
|
|
||||||
session_logger = get_session_logger()
|
|
||||||
session_logger.record_error(
|
|
||||||
error=f"{type(exc).__name__}: {str(exc)}",
|
|
||||||
context=source,
|
|
||||||
)
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
return task_id
|
return task_id
|
||||||
|
|||||||
@@ -1,193 +0,0 @@
|
|||||||
"""Event Broadcaster - bridges event_log to WebSocket clients.
|
|
||||||
|
|
||||||
When events are logged, they are broadcast to all connected dashboard clients
|
|
||||||
via WebSocket for real-time activity feed updates.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import asyncio
|
|
||||||
import logging
|
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
try:
|
|
||||||
from swarm.event_log import EventLogEntry
|
|
||||||
except ImportError:
|
|
||||||
EventLogEntry = None
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class EventBroadcaster:
|
|
||||||
"""Broadcasts events to WebSocket clients.
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
from infrastructure.events.broadcaster import event_broadcaster
|
|
||||||
event_broadcaster.broadcast(event)
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self) -> None:
|
|
||||||
self._ws_manager: Optional = None
|
|
||||||
|
|
||||||
def _get_ws_manager(self):
|
|
||||||
"""Lazy import to avoid circular deps."""
|
|
||||||
if self._ws_manager is None:
|
|
||||||
try:
|
|
||||||
from infrastructure.ws_manager.handler import ws_manager
|
|
||||||
|
|
||||||
self._ws_manager = ws_manager
|
|
||||||
except Exception as exc:
|
|
||||||
logger.debug("WebSocket manager not available: %s", exc)
|
|
||||||
return self._ws_manager
|
|
||||||
|
|
||||||
async def broadcast(self, event: EventLogEntry) -> int:
|
|
||||||
"""Broadcast an event to all connected WebSocket clients.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
event: The event to broadcast
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Number of clients notified
|
|
||||||
"""
|
|
||||||
ws_manager = self._get_ws_manager()
|
|
||||||
if not ws_manager:
|
|
||||||
return 0
|
|
||||||
|
|
||||||
# Build message payload
|
|
||||||
payload = {
|
|
||||||
"type": "event",
|
|
||||||
"payload": {
|
|
||||||
"id": event.id,
|
|
||||||
"event_type": event.event_type.value,
|
|
||||||
"source": event.source,
|
|
||||||
"task_id": event.task_id,
|
|
||||||
"agent_id": event.agent_id,
|
|
||||||
"timestamp": event.timestamp,
|
|
||||||
"data": event.data,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Broadcast to all connected clients
|
|
||||||
count = await ws_manager.broadcast_json(payload)
|
|
||||||
logger.debug("Broadcasted event %s to %d clients", event.id[:8], count)
|
|
||||||
return count
|
|
||||||
except Exception as exc:
|
|
||||||
logger.error("Failed to broadcast event: %s", exc)
|
|
||||||
return 0
|
|
||||||
|
|
||||||
def broadcast_sync(self, event: EventLogEntry) -> None:
|
|
||||||
"""Synchronous wrapper for broadcast.
|
|
||||||
|
|
||||||
Use this from synchronous code - it schedules the async broadcast
|
|
||||||
in the event loop if one is running.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
asyncio.get_running_loop()
|
|
||||||
# Schedule in background, don't wait
|
|
||||||
asyncio.create_task(self.broadcast(event))
|
|
||||||
except RuntimeError:
|
|
||||||
# No event loop running, skip broadcast
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
# Global singleton
|
|
||||||
event_broadcaster = EventBroadcaster()
|
|
||||||
|
|
||||||
|
|
||||||
# Event type to icon/emoji mapping
|
|
||||||
EVENT_ICONS = {
|
|
||||||
"task.created": "📝",
|
|
||||||
"task.bidding": "⏳",
|
|
||||||
"task.assigned": "👤",
|
|
||||||
"task.started": "▶️",
|
|
||||||
"task.completed": "✅",
|
|
||||||
"task.failed": "❌",
|
|
||||||
"agent.joined": "🟢",
|
|
||||||
"agent.left": "🔴",
|
|
||||||
"agent.status_changed": "🔄",
|
|
||||||
"bid.submitted": "💰",
|
|
||||||
"auction.closed": "🏁",
|
|
||||||
"tool.called": "🔧",
|
|
||||||
"tool.completed": "⚙️",
|
|
||||||
"tool.failed": "💥",
|
|
||||||
"system.error": "⚠️",
|
|
||||||
"system.warning": "🔶",
|
|
||||||
"system.info": "ℹ️",
|
|
||||||
"error.captured": "🐛",
|
|
||||||
"bug_report.created": "📋",
|
|
||||||
}
|
|
||||||
|
|
||||||
EVENT_LABELS = {
|
|
||||||
"task.created": "New task",
|
|
||||||
"task.bidding": "Bidding open",
|
|
||||||
"task.assigned": "Task assigned",
|
|
||||||
"task.started": "Task started",
|
|
||||||
"task.completed": "Task completed",
|
|
||||||
"task.failed": "Task failed",
|
|
||||||
"agent.joined": "Agent joined",
|
|
||||||
"agent.left": "Agent left",
|
|
||||||
"agent.status_changed": "Status changed",
|
|
||||||
"bid.submitted": "Bid submitted",
|
|
||||||
"auction.closed": "Auction closed",
|
|
||||||
"tool.called": "Tool called",
|
|
||||||
"tool.completed": "Tool completed",
|
|
||||||
"tool.failed": "Tool failed",
|
|
||||||
"system.error": "Error",
|
|
||||||
"system.warning": "Warning",
|
|
||||||
"system.info": "Info",
|
|
||||||
"error.captured": "Error captured",
|
|
||||||
"bug_report.created": "Bug report filed",
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def get_event_icon(event_type: str) -> str:
|
|
||||||
"""Get emoji icon for event type."""
|
|
||||||
return EVENT_ICONS.get(event_type, "•")
|
|
||||||
|
|
||||||
|
|
||||||
def get_event_label(event_type: str) -> str:
|
|
||||||
"""Get human-readable label for event type."""
|
|
||||||
return EVENT_LABELS.get(event_type, event_type)
|
|
||||||
|
|
||||||
|
|
||||||
def format_event_for_display(event: EventLogEntry) -> dict:
|
|
||||||
"""Format event for display in activity feed.
|
|
||||||
|
|
||||||
Returns dict with display-friendly fields.
|
|
||||||
"""
|
|
||||||
data = event.data or {}
|
|
||||||
|
|
||||||
# Build description based on event type
|
|
||||||
description = ""
|
|
||||||
if event.event_type.value == "task.created":
|
|
||||||
desc = data.get("description", "")
|
|
||||||
description = desc[:60] + "..." if len(desc) > 60 else desc
|
|
||||||
elif event.event_type.value == "task.assigned":
|
|
||||||
agent = event.agent_id[:8] if event.agent_id else "unknown"
|
|
||||||
bid = data.get("bid_sats", "?")
|
|
||||||
description = f"to {agent} ({bid} sats)"
|
|
||||||
elif event.event_type.value == "bid.submitted":
|
|
||||||
bid = data.get("bid_sats", "?")
|
|
||||||
description = f"{bid} sats"
|
|
||||||
elif event.event_type.value == "agent.joined":
|
|
||||||
persona = data.get("persona_id", "")
|
|
||||||
description = f"Persona: {persona}" if persona else "New agent"
|
|
||||||
else:
|
|
||||||
# Generic: use any string data
|
|
||||||
for key in ["message", "reason", "description"]:
|
|
||||||
if key in data:
|
|
||||||
val = str(data[key])
|
|
||||||
description = val[:60] + "..." if len(val) > 60 else val
|
|
||||||
break
|
|
||||||
|
|
||||||
return {
|
|
||||||
"id": event.id,
|
|
||||||
"icon": get_event_icon(event.event_type.value),
|
|
||||||
"label": get_event_label(event.event_type.value),
|
|
||||||
"type": event.event_type.value,
|
|
||||||
"source": event.source,
|
|
||||||
"description": description,
|
|
||||||
"timestamp": event.timestamp,
|
|
||||||
"time_short": event.timestamp[11:19] if event.timestamp else "",
|
|
||||||
"task_id": event.task_id,
|
|
||||||
"agent_id": event.agent_id,
|
|
||||||
}
|
|
||||||
@@ -9,7 +9,8 @@ import asyncio
|
|||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import sqlite3
|
import sqlite3
|
||||||
from collections.abc import Callable, Coroutine
|
from collections.abc import Callable, Coroutine, Generator
|
||||||
|
from contextlib import closing, contextmanager
|
||||||
from dataclasses import dataclass, field
|
from dataclasses import dataclass, field
|
||||||
from datetime import UTC, datetime
|
from datetime import UTC, datetime
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
@@ -63,7 +64,7 @@ class EventBus:
|
|||||||
|
|
||||||
@bus.subscribe("agent.task.*")
|
@bus.subscribe("agent.task.*")
|
||||||
async def handle_task(event: Event):
|
async def handle_task(event: Event):
|
||||||
print(f"Task event: {event.data}")
|
logger.debug("Task event: %s", event.data)
|
||||||
|
|
||||||
await bus.publish(Event(
|
await bus.publish(Event(
|
||||||
type="agent.task.assigned",
|
type="agent.task.assigned",
|
||||||
@@ -99,51 +100,48 @@ class EventBus:
|
|||||||
if self._persistence_db_path is None:
|
if self._persistence_db_path is None:
|
||||||
return
|
return
|
||||||
self._persistence_db_path.parent.mkdir(parents=True, exist_ok=True)
|
self._persistence_db_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
conn = sqlite3.connect(str(self._persistence_db_path))
|
with closing(sqlite3.connect(str(self._persistence_db_path))) as conn:
|
||||||
try:
|
|
||||||
conn.execute("PRAGMA journal_mode=WAL")
|
conn.execute("PRAGMA journal_mode=WAL")
|
||||||
conn.execute("PRAGMA busy_timeout=5000")
|
conn.execute("PRAGMA busy_timeout=5000")
|
||||||
conn.executescript(_EVENTS_SCHEMA)
|
conn.executescript(_EVENTS_SCHEMA)
|
||||||
conn.commit()
|
conn.commit()
|
||||||
finally:
|
|
||||||
conn.close()
|
|
||||||
|
|
||||||
def _get_persistence_conn(self) -> sqlite3.Connection | None:
|
@contextmanager
|
||||||
|
def _get_persistence_conn(self) -> Generator[sqlite3.Connection | None, None, None]:
|
||||||
"""Get a connection to the persistence database."""
|
"""Get a connection to the persistence database."""
|
||||||
if self._persistence_db_path is None:
|
if self._persistence_db_path is None:
|
||||||
return None
|
yield None
|
||||||
conn = sqlite3.connect(str(self._persistence_db_path))
|
return
|
||||||
conn.row_factory = sqlite3.Row
|
with closing(sqlite3.connect(str(self._persistence_db_path))) as conn:
|
||||||
conn.execute("PRAGMA busy_timeout=5000")
|
conn.row_factory = sqlite3.Row
|
||||||
return conn
|
conn.execute("PRAGMA busy_timeout=5000")
|
||||||
|
yield conn
|
||||||
|
|
||||||
def _persist_event(self, event: Event) -> None:
|
def _persist_event(self, event: Event) -> None:
|
||||||
"""Write an event to the persistence database."""
|
"""Write an event to the persistence database."""
|
||||||
conn = self._get_persistence_conn()
|
with self._get_persistence_conn() as conn:
|
||||||
if conn is None:
|
if conn is None:
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
task_id = event.data.get("task_id", "")
|
task_id = event.data.get("task_id", "")
|
||||||
agent_id = event.data.get("agent_id", "")
|
agent_id = event.data.get("agent_id", "")
|
||||||
conn.execute(
|
conn.execute(
|
||||||
"INSERT OR IGNORE INTO events "
|
"INSERT OR IGNORE INTO events "
|
||||||
"(id, event_type, source, task_id, agent_id, data, timestamp) "
|
"(id, event_type, source, task_id, agent_id, data, timestamp) "
|
||||||
"VALUES (?, ?, ?, ?, ?, ?, ?)",
|
"VALUES (?, ?, ?, ?, ?, ?, ?)",
|
||||||
(
|
(
|
||||||
event.id,
|
event.id,
|
||||||
event.type,
|
event.type,
|
||||||
event.source,
|
event.source,
|
||||||
task_id,
|
task_id,
|
||||||
agent_id,
|
agent_id,
|
||||||
json.dumps(event.data),
|
json.dumps(event.data),
|
||||||
event.timestamp,
|
event.timestamp,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
conn.commit()
|
conn.commit()
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
logger.debug("Failed to persist event: %s", exc)
|
logger.debug("Failed to persist event: %s", exc)
|
||||||
finally:
|
|
||||||
conn.close()
|
|
||||||
|
|
||||||
# ── Replay ───────────────────────────────────────────────────────────
|
# ── Replay ───────────────────────────────────────────────────────────
|
||||||
|
|
||||||
@@ -165,45 +163,43 @@ class EventBus:
|
|||||||
Returns:
|
Returns:
|
||||||
List of Event objects from persistent storage.
|
List of Event objects from persistent storage.
|
||||||
"""
|
"""
|
||||||
conn = self._get_persistence_conn()
|
with self._get_persistence_conn() as conn:
|
||||||
if conn is None:
|
if conn is None:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
try:
|
try:
|
||||||
conditions = []
|
conditions = []
|
||||||
params: list = []
|
params: list = []
|
||||||
|
|
||||||
if event_type:
|
if event_type:
|
||||||
conditions.append("event_type = ?")
|
conditions.append("event_type = ?")
|
||||||
params.append(event_type)
|
params.append(event_type)
|
||||||
if source:
|
if source:
|
||||||
conditions.append("source = ?")
|
conditions.append("source = ?")
|
||||||
params.append(source)
|
params.append(source)
|
||||||
if task_id:
|
if task_id:
|
||||||
conditions.append("task_id = ?")
|
conditions.append("task_id = ?")
|
||||||
params.append(task_id)
|
params.append(task_id)
|
||||||
|
|
||||||
where = " AND ".join(conditions) if conditions else "1=1"
|
where = " AND ".join(conditions) if conditions else "1=1"
|
||||||
sql = f"SELECT * FROM events WHERE {where} ORDER BY timestamp DESC LIMIT ?"
|
sql = f"SELECT * FROM events WHERE {where} ORDER BY timestamp DESC LIMIT ?"
|
||||||
params.append(limit)
|
params.append(limit)
|
||||||
|
|
||||||
rows = conn.execute(sql, params).fetchall()
|
rows = conn.execute(sql, params).fetchall()
|
||||||
|
|
||||||
return [
|
return [
|
||||||
Event(
|
Event(
|
||||||
id=row["id"],
|
id=row["id"],
|
||||||
type=row["event_type"],
|
type=row["event_type"],
|
||||||
source=row["source"],
|
source=row["source"],
|
||||||
data=json.loads(row["data"]) if row["data"] else {},
|
data=json.loads(row["data"]) if row["data"] else {},
|
||||||
timestamp=row["timestamp"],
|
timestamp=row["timestamp"],
|
||||||
)
|
)
|
||||||
for row in rows
|
for row in rows
|
||||||
]
|
]
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
logger.debug("Failed to replay events: %s", exc)
|
logger.debug("Failed to replay events: %s", exc)
|
||||||
return []
|
return []
|
||||||
finally:
|
|
||||||
conn.close()
|
|
||||||
|
|
||||||
# ── Subscribe / Publish ──────────────────────────────────────────────
|
# ── Subscribe / Publish ──────────────────────────────────────────────
|
||||||
|
|
||||||
|
|||||||
@@ -144,6 +144,65 @@ class ShellHand:
|
|||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _build_run_env(env: dict | None) -> dict:
|
||||||
|
"""Merge *env* overrides into a copy of the current environment."""
|
||||||
|
import os
|
||||||
|
|
||||||
|
run_env = os.environ.copy()
|
||||||
|
if env:
|
||||||
|
run_env.update(env)
|
||||||
|
return run_env
|
||||||
|
|
||||||
|
async def _execute_subprocess(
|
||||||
|
self,
|
||||||
|
command: str,
|
||||||
|
effective_timeout: int,
|
||||||
|
cwd: str | None,
|
||||||
|
run_env: dict,
|
||||||
|
start: float,
|
||||||
|
) -> ShellResult:
|
||||||
|
"""Run *command* as a subprocess with timeout enforcement."""
|
||||||
|
proc = await asyncio.create_subprocess_shell(
|
||||||
|
command,
|
||||||
|
stdout=asyncio.subprocess.PIPE,
|
||||||
|
stderr=asyncio.subprocess.PIPE,
|
||||||
|
cwd=cwd,
|
||||||
|
env=run_env,
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
stdout_bytes, stderr_bytes = await asyncio.wait_for(
|
||||||
|
proc.communicate(), timeout=effective_timeout
|
||||||
|
)
|
||||||
|
except TimeoutError:
|
||||||
|
proc.kill()
|
||||||
|
await proc.wait()
|
||||||
|
latency = (time.time() - start) * 1000
|
||||||
|
logger.warning("Shell command timed out after %ds: %s", effective_timeout, command)
|
||||||
|
return ShellResult(
|
||||||
|
command=command,
|
||||||
|
success=False,
|
||||||
|
exit_code=-1,
|
||||||
|
error=f"Command timed out after {effective_timeout}s",
|
||||||
|
latency_ms=latency,
|
||||||
|
timed_out=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
latency = (time.time() - start) * 1000
|
||||||
|
exit_code = proc.returncode if proc.returncode is not None else -1
|
||||||
|
stdout = stdout_bytes.decode("utf-8", errors="replace").strip()
|
||||||
|
stderr = stderr_bytes.decode("utf-8", errors="replace").strip()
|
||||||
|
|
||||||
|
return ShellResult(
|
||||||
|
command=command,
|
||||||
|
success=exit_code == 0,
|
||||||
|
exit_code=exit_code,
|
||||||
|
stdout=stdout,
|
||||||
|
stderr=stderr,
|
||||||
|
latency_ms=latency,
|
||||||
|
)
|
||||||
|
|
||||||
async def run(
|
async def run(
|
||||||
self,
|
self,
|
||||||
command: str,
|
command: str,
|
||||||
@@ -164,7 +223,6 @@ class ShellHand:
|
|||||||
"""
|
"""
|
||||||
start = time.time()
|
start = time.time()
|
||||||
|
|
||||||
# Validate
|
|
||||||
validation_error = self._validate_command(command)
|
validation_error = self._validate_command(command)
|
||||||
if validation_error:
|
if validation_error:
|
||||||
return ShellResult(
|
return ShellResult(
|
||||||
@@ -178,52 +236,8 @@ class ShellHand:
|
|||||||
cwd = working_dir or self._working_dir
|
cwd = working_dir or self._working_dir
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import os
|
run_env = self._build_run_env(env)
|
||||||
|
return await self._execute_subprocess(command, effective_timeout, cwd, run_env, start)
|
||||||
run_env = os.environ.copy()
|
|
||||||
if env:
|
|
||||||
run_env.update(env)
|
|
||||||
|
|
||||||
proc = await asyncio.create_subprocess_shell(
|
|
||||||
command,
|
|
||||||
stdout=asyncio.subprocess.PIPE,
|
|
||||||
stderr=asyncio.subprocess.PIPE,
|
|
||||||
cwd=cwd,
|
|
||||||
env=run_env,
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
stdout_bytes, stderr_bytes = await asyncio.wait_for(
|
|
||||||
proc.communicate(), timeout=effective_timeout
|
|
||||||
)
|
|
||||||
except TimeoutError:
|
|
||||||
proc.kill()
|
|
||||||
await proc.wait()
|
|
||||||
latency = (time.time() - start) * 1000
|
|
||||||
logger.warning("Shell command timed out after %ds: %s", effective_timeout, command)
|
|
||||||
return ShellResult(
|
|
||||||
command=command,
|
|
||||||
success=False,
|
|
||||||
exit_code=-1,
|
|
||||||
error=f"Command timed out after {effective_timeout}s",
|
|
||||||
latency_ms=latency,
|
|
||||||
timed_out=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
latency = (time.time() - start) * 1000
|
|
||||||
exit_code = proc.returncode or 0
|
|
||||||
stdout = stdout_bytes.decode("utf-8", errors="replace").strip()
|
|
||||||
stderr = stderr_bytes.decode("utf-8", errors="replace").strip()
|
|
||||||
|
|
||||||
return ShellResult(
|
|
||||||
command=command,
|
|
||||||
success=exit_code == 0,
|
|
||||||
exit_code=exit_code,
|
|
||||||
stdout=stdout,
|
|
||||||
stderr=stderr,
|
|
||||||
latency_ms=latency,
|
|
||||||
)
|
|
||||||
|
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
latency = (time.time() - start) * 1000
|
latency = (time.time() - start) * 1000
|
||||||
logger.warning("Shell command failed: %s — %s", command, exc)
|
logger.warning("Shell command failed: %s — %s", command, exc)
|
||||||
|
|||||||
266
src/infrastructure/matrix_config.py
Normal file
266
src/infrastructure/matrix_config.py
Normal file
@@ -0,0 +1,266 @@
|
|||||||
|
"""Matrix configuration loader utility.
|
||||||
|
|
||||||
|
Provides a typed dataclass for Matrix world configuration and a loader
|
||||||
|
that fetches settings from YAML with sensible defaults.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class PointLight:
|
||||||
|
"""A single point light in the Matrix world."""
|
||||||
|
|
||||||
|
color: str = "#FFFFFF"
|
||||||
|
intensity: float = 1.0
|
||||||
|
position: dict[str, float] = field(default_factory=lambda: {"x": 0, "y": 0, "z": 0})
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, data: dict[str, Any]) -> "PointLight":
|
||||||
|
"""Create a PointLight from a dictionary with defaults."""
|
||||||
|
return cls(
|
||||||
|
color=data.get("color", "#FFFFFF"),
|
||||||
|
intensity=data.get("intensity", 1.0),
|
||||||
|
position=data.get("position", {"x": 0, "y": 0, "z": 0}),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _default_point_lights_factory() -> list[PointLight]:
|
||||||
|
"""Factory function for default point lights."""
|
||||||
|
return [
|
||||||
|
PointLight(
|
||||||
|
color="#FFAA55", # Warm amber (Workshop)
|
||||||
|
intensity=1.2,
|
||||||
|
position={"x": 0, "y": 5, "z": 0},
|
||||||
|
),
|
||||||
|
PointLight(
|
||||||
|
color="#3B82F6", # Cool blue (Matrix)
|
||||||
|
intensity=0.8,
|
||||||
|
position={"x": -5, "y": 3, "z": -5},
|
||||||
|
),
|
||||||
|
PointLight(
|
||||||
|
color="#A855F7", # Purple accent
|
||||||
|
intensity=0.6,
|
||||||
|
position={"x": 5, "y": 3, "z": 5},
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class LightingConfig:
|
||||||
|
"""Lighting configuration for the Matrix world."""
|
||||||
|
|
||||||
|
ambient_color: str = "#FFAA55" # Warm amber (Workshop warmth)
|
||||||
|
ambient_intensity: float = 0.5
|
||||||
|
point_lights: list[PointLight] = field(default_factory=_default_point_lights_factory)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, data: dict[str, Any] | None) -> "LightingConfig":
|
||||||
|
"""Create a LightingConfig from a dictionary with defaults."""
|
||||||
|
if data is None:
|
||||||
|
data = {}
|
||||||
|
|
||||||
|
point_lights_data = data.get("point_lights", [])
|
||||||
|
point_lights = (
|
||||||
|
[PointLight.from_dict(pl) for pl in point_lights_data]
|
||||||
|
if point_lights_data
|
||||||
|
else _default_point_lights_factory()
|
||||||
|
)
|
||||||
|
|
||||||
|
return cls(
|
||||||
|
ambient_color=data.get("ambient_color", "#FFAA55"),
|
||||||
|
ambient_intensity=data.get("ambient_intensity", 0.5),
|
||||||
|
point_lights=point_lights,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class EnvironmentConfig:
|
||||||
|
"""Environment settings for the Matrix world."""
|
||||||
|
|
||||||
|
rain_enabled: bool = False
|
||||||
|
starfield_enabled: bool = True
|
||||||
|
fog_color: str = "#0f0f23"
|
||||||
|
fog_density: float = 0.02
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, data: dict[str, Any] | None) -> "EnvironmentConfig":
|
||||||
|
"""Create an EnvironmentConfig from a dictionary with defaults."""
|
||||||
|
if data is None:
|
||||||
|
data = {}
|
||||||
|
return cls(
|
||||||
|
rain_enabled=data.get("rain_enabled", False),
|
||||||
|
starfield_enabled=data.get("starfield_enabled", True),
|
||||||
|
fog_color=data.get("fog_color", "#0f0f23"),
|
||||||
|
fog_density=data.get("fog_density", 0.02),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class FeaturesConfig:
|
||||||
|
"""Feature toggles for the Matrix world."""
|
||||||
|
|
||||||
|
chat_enabled: bool = True
|
||||||
|
visitor_avatars: bool = True
|
||||||
|
pip_familiar: bool = True
|
||||||
|
workshop_portal: bool = True
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, data: dict[str, Any] | None) -> "FeaturesConfig":
|
||||||
|
"""Create a FeaturesConfig from a dictionary with defaults."""
|
||||||
|
if data is None:
|
||||||
|
data = {}
|
||||||
|
return cls(
|
||||||
|
chat_enabled=data.get("chat_enabled", True),
|
||||||
|
visitor_avatars=data.get("visitor_avatars", True),
|
||||||
|
pip_familiar=data.get("pip_familiar", True),
|
||||||
|
workshop_portal=data.get("workshop_portal", True),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class AgentConfig:
|
||||||
|
"""Configuration for a single Matrix agent."""
|
||||||
|
|
||||||
|
name: str = ""
|
||||||
|
role: str = ""
|
||||||
|
enabled: bool = True
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, data: dict[str, Any]) -> "AgentConfig":
|
||||||
|
"""Create an AgentConfig from a dictionary with defaults."""
|
||||||
|
return cls(
|
||||||
|
name=data.get("name", ""),
|
||||||
|
role=data.get("role", ""),
|
||||||
|
enabled=data.get("enabled", True),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class AgentsConfig:
|
||||||
|
"""Agent registry configuration."""
|
||||||
|
|
||||||
|
default_count: int = 5
|
||||||
|
max_count: int = 20
|
||||||
|
agents: list[AgentConfig] = field(default_factory=list)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, data: dict[str, Any] | None) -> "AgentsConfig":
|
||||||
|
"""Create an AgentsConfig from a dictionary with defaults."""
|
||||||
|
if data is None:
|
||||||
|
data = {}
|
||||||
|
|
||||||
|
agents_data = data.get("agents", [])
|
||||||
|
agents = [AgentConfig.from_dict(a) for a in agents_data] if agents_data else []
|
||||||
|
|
||||||
|
return cls(
|
||||||
|
default_count=data.get("default_count", 5),
|
||||||
|
max_count=data.get("max_count", 20),
|
||||||
|
agents=agents,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class MatrixConfig:
|
||||||
|
"""Complete Matrix world configuration.
|
||||||
|
|
||||||
|
Combines lighting, environment, features, and agent settings
|
||||||
|
into a single configuration object.
|
||||||
|
"""
|
||||||
|
|
||||||
|
lighting: LightingConfig = field(default_factory=LightingConfig)
|
||||||
|
environment: EnvironmentConfig = field(default_factory=EnvironmentConfig)
|
||||||
|
features: FeaturesConfig = field(default_factory=FeaturesConfig)
|
||||||
|
agents: AgentsConfig = field(default_factory=AgentsConfig)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, data: dict[str, Any] | None) -> "MatrixConfig":
|
||||||
|
"""Create a MatrixConfig from a dictionary with defaults for missing sections."""
|
||||||
|
if data is None:
|
||||||
|
data = {}
|
||||||
|
return cls(
|
||||||
|
lighting=LightingConfig.from_dict(data.get("lighting")),
|
||||||
|
environment=EnvironmentConfig.from_dict(data.get("environment")),
|
||||||
|
features=FeaturesConfig.from_dict(data.get("features")),
|
||||||
|
agents=AgentsConfig.from_dict(data.get("agents")),
|
||||||
|
)
|
||||||
|
|
||||||
|
def to_dict(self) -> dict[str, Any]:
|
||||||
|
"""Convert the configuration to a plain dictionary."""
|
||||||
|
return {
|
||||||
|
"lighting": {
|
||||||
|
"ambient_color": self.lighting.ambient_color,
|
||||||
|
"ambient_intensity": self.lighting.ambient_intensity,
|
||||||
|
"point_lights": [
|
||||||
|
{
|
||||||
|
"color": pl.color,
|
||||||
|
"intensity": pl.intensity,
|
||||||
|
"position": pl.position,
|
||||||
|
}
|
||||||
|
for pl in self.lighting.point_lights
|
||||||
|
],
|
||||||
|
},
|
||||||
|
"environment": {
|
||||||
|
"rain_enabled": self.environment.rain_enabled,
|
||||||
|
"starfield_enabled": self.environment.starfield_enabled,
|
||||||
|
"fog_color": self.environment.fog_color,
|
||||||
|
"fog_density": self.environment.fog_density,
|
||||||
|
},
|
||||||
|
"features": {
|
||||||
|
"chat_enabled": self.features.chat_enabled,
|
||||||
|
"visitor_avatars": self.features.visitor_avatars,
|
||||||
|
"pip_familiar": self.features.pip_familiar,
|
||||||
|
"workshop_portal": self.features.workshop_portal,
|
||||||
|
},
|
||||||
|
"agents": {
|
||||||
|
"default_count": self.agents.default_count,
|
||||||
|
"max_count": self.agents.max_count,
|
||||||
|
"agents": [
|
||||||
|
{"name": a.name, "role": a.role, "enabled": a.enabled}
|
||||||
|
for a in self.agents.agents
|
||||||
|
],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def load_from_yaml(path: str | Path) -> MatrixConfig:
|
||||||
|
"""Load Matrix configuration from a YAML file.
|
||||||
|
|
||||||
|
Missing keys are filled with sensible defaults. If the file
|
||||||
|
cannot be read or parsed, returns a fully default configuration.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
path: Path to the YAML configuration file.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A MatrixConfig instance with loaded or default values.
|
||||||
|
"""
|
||||||
|
path = Path(path)
|
||||||
|
|
||||||
|
if not path.exists():
|
||||||
|
logger.warning("Matrix config file not found: %s, using defaults", path)
|
||||||
|
return MatrixConfig()
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(path, encoding="utf-8") as f:
|
||||||
|
raw_data = yaml.safe_load(f)
|
||||||
|
|
||||||
|
if not isinstance(raw_data, dict):
|
||||||
|
logger.warning("Matrix config invalid format, using defaults")
|
||||||
|
return MatrixConfig()
|
||||||
|
|
||||||
|
return MatrixConfig.from_dict(raw_data)
|
||||||
|
|
||||||
|
except yaml.YAMLError as exc:
|
||||||
|
logger.warning("Matrix config YAML parse error: %s, using defaults", exc)
|
||||||
|
return MatrixConfig()
|
||||||
|
except OSError as exc:
|
||||||
|
logger.warning("Matrix config read error: %s, using defaults", exc)
|
||||||
|
return MatrixConfig()
|
||||||
@@ -13,7 +13,7 @@ import logging
|
|||||||
from dataclasses import dataclass, field
|
from dataclasses import dataclass, field
|
||||||
from enum import Enum, auto
|
from enum import Enum, auto
|
||||||
|
|
||||||
from config import settings
|
from config import normalize_ollama_url, settings
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -93,18 +93,6 @@ KNOWN_MODEL_CAPABILITIES: dict[str, set[ModelCapability]] = {
|
|||||||
ModelCapability.VISION,
|
ModelCapability.VISION,
|
||||||
},
|
},
|
||||||
# Qwen series
|
# Qwen series
|
||||||
"qwen3.5": {
|
|
||||||
ModelCapability.TEXT,
|
|
||||||
ModelCapability.TOOLS,
|
|
||||||
ModelCapability.JSON,
|
|
||||||
ModelCapability.STREAMING,
|
|
||||||
},
|
|
||||||
"qwen3.5:latest": {
|
|
||||||
ModelCapability.TEXT,
|
|
||||||
ModelCapability.TOOLS,
|
|
||||||
ModelCapability.JSON,
|
|
||||||
ModelCapability.STREAMING,
|
|
||||||
},
|
|
||||||
"qwen2.5": {
|
"qwen2.5": {
|
||||||
ModelCapability.TEXT,
|
ModelCapability.TEXT,
|
||||||
ModelCapability.TOOLS,
|
ModelCapability.TOOLS,
|
||||||
@@ -271,9 +259,8 @@ DEFAULT_FALLBACK_CHAINS: dict[ModelCapability, list[str]] = {
|
|||||||
],
|
],
|
||||||
ModelCapability.TOOLS: [
|
ModelCapability.TOOLS: [
|
||||||
"llama3.1:8b-instruct", # Best tool use
|
"llama3.1:8b-instruct", # Best tool use
|
||||||
"qwen3.5:latest", # Qwen 3.5 — strong tool use
|
|
||||||
"llama3.2:3b", # Smaller but capable
|
|
||||||
"qwen2.5:7b", # Reliable fallback
|
"qwen2.5:7b", # Reliable fallback
|
||||||
|
"llama3.2:3b", # Smaller but capable
|
||||||
],
|
],
|
||||||
ModelCapability.AUDIO: [
|
ModelCapability.AUDIO: [
|
||||||
# Audio models are less common in Ollama
|
# Audio models are less common in Ollama
|
||||||
@@ -320,7 +307,7 @@ class MultiModalManager:
|
|||||||
import json
|
import json
|
||||||
import urllib.request
|
import urllib.request
|
||||||
|
|
||||||
url = self.ollama_url.replace("localhost", "127.0.0.1")
|
url = normalize_ollama_url(self.ollama_url)
|
||||||
req = urllib.request.Request(
|
req = urllib.request.Request(
|
||||||
f"{url}/api/tags",
|
f"{url}/api/tags",
|
||||||
method="GET",
|
method="GET",
|
||||||
@@ -475,7 +462,7 @@ class MultiModalManager:
|
|||||||
|
|
||||||
logger.info("Pulling model: %s", model_name)
|
logger.info("Pulling model: %s", model_name)
|
||||||
|
|
||||||
url = self.ollama_url.replace("localhost", "127.0.0.1")
|
url = normalize_ollama_url(self.ollama_url)
|
||||||
req = urllib.request.Request(
|
req = urllib.request.Request(
|
||||||
f"{url}/api/pull",
|
f"{url}/api/pull",
|
||||||
method="POST",
|
method="POST",
|
||||||
|
|||||||
@@ -11,6 +11,8 @@ model roles (student, teacher, judge/PRM) run on dedicated resources.
|
|||||||
import logging
|
import logging
|
||||||
import sqlite3
|
import sqlite3
|
||||||
import threading
|
import threading
|
||||||
|
from collections.abc import Generator
|
||||||
|
from contextlib import closing, contextmanager
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from datetime import UTC, datetime
|
from datetime import UTC, datetime
|
||||||
from enum import StrEnum
|
from enum import StrEnum
|
||||||
@@ -60,36 +62,37 @@ class CustomModel:
|
|||||||
self.registered_at = datetime.now(UTC).isoformat()
|
self.registered_at = datetime.now(UTC).isoformat()
|
||||||
|
|
||||||
|
|
||||||
def _get_conn() -> sqlite3.Connection:
|
@contextmanager
|
||||||
|
def _get_conn() -> Generator[sqlite3.Connection, None, None]:
|
||||||
DB_PATH.parent.mkdir(parents=True, exist_ok=True)
|
DB_PATH.parent.mkdir(parents=True, exist_ok=True)
|
||||||
conn = sqlite3.connect(str(DB_PATH))
|
with closing(sqlite3.connect(str(DB_PATH))) as conn:
|
||||||
conn.row_factory = sqlite3.Row
|
conn.row_factory = sqlite3.Row
|
||||||
conn.execute("PRAGMA journal_mode=WAL")
|
conn.execute("PRAGMA journal_mode=WAL")
|
||||||
conn.execute("PRAGMA busy_timeout=5000")
|
conn.execute("PRAGMA busy_timeout=5000")
|
||||||
conn.execute("""
|
conn.execute("""
|
||||||
CREATE TABLE IF NOT EXISTS custom_models (
|
CREATE TABLE IF NOT EXISTS custom_models (
|
||||||
name TEXT PRIMARY KEY,
|
name TEXT PRIMARY KEY,
|
||||||
format TEXT NOT NULL,
|
format TEXT NOT NULL,
|
||||||
path TEXT NOT NULL,
|
path TEXT NOT NULL,
|
||||||
role TEXT NOT NULL DEFAULT 'general',
|
role TEXT NOT NULL DEFAULT 'general',
|
||||||
context_window INTEGER NOT NULL DEFAULT 4096,
|
context_window INTEGER NOT NULL DEFAULT 4096,
|
||||||
description TEXT NOT NULL DEFAULT '',
|
description TEXT NOT NULL DEFAULT '',
|
||||||
registered_at TEXT NOT NULL,
|
registered_at TEXT NOT NULL,
|
||||||
active INTEGER NOT NULL DEFAULT 1,
|
active INTEGER NOT NULL DEFAULT 1,
|
||||||
default_temperature REAL NOT NULL DEFAULT 0.7,
|
default_temperature REAL NOT NULL DEFAULT 0.7,
|
||||||
max_tokens INTEGER NOT NULL DEFAULT 2048
|
max_tokens INTEGER NOT NULL DEFAULT 2048
|
||||||
)
|
)
|
||||||
""")
|
""")
|
||||||
conn.execute("""
|
conn.execute("""
|
||||||
CREATE TABLE IF NOT EXISTS agent_model_assignments (
|
CREATE TABLE IF NOT EXISTS agent_model_assignments (
|
||||||
agent_id TEXT PRIMARY KEY,
|
agent_id TEXT PRIMARY KEY,
|
||||||
model_name TEXT NOT NULL,
|
model_name TEXT NOT NULL,
|
||||||
assigned_at TEXT NOT NULL,
|
assigned_at TEXT NOT NULL,
|
||||||
FOREIGN KEY (model_name) REFERENCES custom_models(name)
|
FOREIGN KEY (model_name) REFERENCES custom_models(name)
|
||||||
)
|
)
|
||||||
""")
|
""")
|
||||||
conn.commit()
|
conn.commit()
|
||||||
return conn
|
yield conn
|
||||||
|
|
||||||
|
|
||||||
class ModelRegistry:
|
class ModelRegistry:
|
||||||
@@ -105,23 +108,22 @@ class ModelRegistry:
|
|||||||
def _load_from_db(self) -> None:
|
def _load_from_db(self) -> None:
|
||||||
"""Bootstrap cache from SQLite."""
|
"""Bootstrap cache from SQLite."""
|
||||||
try:
|
try:
|
||||||
conn = _get_conn()
|
with _get_conn() as conn:
|
||||||
for row in conn.execute("SELECT * FROM custom_models WHERE active = 1").fetchall():
|
for row in conn.execute("SELECT * FROM custom_models WHERE active = 1").fetchall():
|
||||||
self._models[row["name"]] = CustomModel(
|
self._models[row["name"]] = CustomModel(
|
||||||
name=row["name"],
|
name=row["name"],
|
||||||
format=ModelFormat(row["format"]),
|
format=ModelFormat(row["format"]),
|
||||||
path=row["path"],
|
path=row["path"],
|
||||||
role=ModelRole(row["role"]),
|
role=ModelRole(row["role"]),
|
||||||
context_window=row["context_window"],
|
context_window=row["context_window"],
|
||||||
description=row["description"],
|
description=row["description"],
|
||||||
registered_at=row["registered_at"],
|
registered_at=row["registered_at"],
|
||||||
active=bool(row["active"]),
|
active=bool(row["active"]),
|
||||||
default_temperature=row["default_temperature"],
|
default_temperature=row["default_temperature"],
|
||||||
max_tokens=row["max_tokens"],
|
max_tokens=row["max_tokens"],
|
||||||
)
|
)
|
||||||
for row in conn.execute("SELECT * FROM agent_model_assignments").fetchall():
|
for row in conn.execute("SELECT * FROM agent_model_assignments").fetchall():
|
||||||
self._agent_assignments[row["agent_id"]] = row["model_name"]
|
self._agent_assignments[row["agent_id"]] = row["model_name"]
|
||||||
conn.close()
|
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
logger.warning("Failed to load model registry from DB: %s", exc)
|
logger.warning("Failed to load model registry from DB: %s", exc)
|
||||||
|
|
||||||
@@ -130,29 +132,28 @@ class ModelRegistry:
|
|||||||
def register(self, model: CustomModel) -> CustomModel:
|
def register(self, model: CustomModel) -> CustomModel:
|
||||||
"""Register a new custom model."""
|
"""Register a new custom model."""
|
||||||
with self._lock:
|
with self._lock:
|
||||||
conn = _get_conn()
|
with _get_conn() as conn:
|
||||||
conn.execute(
|
conn.execute(
|
||||||
"""
|
"""
|
||||||
INSERT OR REPLACE INTO custom_models
|
INSERT OR REPLACE INTO custom_models
|
||||||
(name, format, path, role, context_window, description,
|
(name, format, path, role, context_window, description,
|
||||||
registered_at, active, default_temperature, max_tokens)
|
registered_at, active, default_temperature, max_tokens)
|
||||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||||
""",
|
""",
|
||||||
(
|
(
|
||||||
model.name,
|
model.name,
|
||||||
model.format.value,
|
model.format.value,
|
||||||
model.path,
|
model.path,
|
||||||
model.role.value,
|
model.role.value,
|
||||||
model.context_window,
|
model.context_window,
|
||||||
model.description,
|
model.description,
|
||||||
model.registered_at,
|
model.registered_at,
|
||||||
int(model.active),
|
int(model.active),
|
||||||
model.default_temperature,
|
model.default_temperature,
|
||||||
model.max_tokens,
|
model.max_tokens,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
conn.commit()
|
conn.commit()
|
||||||
conn.close()
|
|
||||||
self._models[model.name] = model
|
self._models[model.name] = model
|
||||||
logger.info("Registered model: %s (%s)", model.name, model.format.value)
|
logger.info("Registered model: %s (%s)", model.name, model.format.value)
|
||||||
return model
|
return model
|
||||||
@@ -162,11 +163,10 @@ class ModelRegistry:
|
|||||||
with self._lock:
|
with self._lock:
|
||||||
if name not in self._models:
|
if name not in self._models:
|
||||||
return False
|
return False
|
||||||
conn = _get_conn()
|
with _get_conn() as conn:
|
||||||
conn.execute("DELETE FROM custom_models WHERE name = ?", (name,))
|
conn.execute("DELETE FROM custom_models WHERE name = ?", (name,))
|
||||||
conn.execute("DELETE FROM agent_model_assignments WHERE model_name = ?", (name,))
|
conn.execute("DELETE FROM agent_model_assignments WHERE model_name = ?", (name,))
|
||||||
conn.commit()
|
conn.commit()
|
||||||
conn.close()
|
|
||||||
del self._models[name]
|
del self._models[name]
|
||||||
# Remove any agent assignments using this model
|
# Remove any agent assignments using this model
|
||||||
self._agent_assignments = {
|
self._agent_assignments = {
|
||||||
@@ -193,13 +193,12 @@ class ModelRegistry:
|
|||||||
return False
|
return False
|
||||||
with self._lock:
|
with self._lock:
|
||||||
model.active = active
|
model.active = active
|
||||||
conn = _get_conn()
|
with _get_conn() as conn:
|
||||||
conn.execute(
|
conn.execute(
|
||||||
"UPDATE custom_models SET active = ? WHERE name = ?",
|
"UPDATE custom_models SET active = ? WHERE name = ?",
|
||||||
(int(active), name),
|
(int(active), name),
|
||||||
)
|
)
|
||||||
conn.commit()
|
conn.commit()
|
||||||
conn.close()
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
# ── Agent-model assignments ────────────────────────────────────────────
|
# ── Agent-model assignments ────────────────────────────────────────────
|
||||||
@@ -210,17 +209,16 @@ class ModelRegistry:
|
|||||||
return False
|
return False
|
||||||
with self._lock:
|
with self._lock:
|
||||||
now = datetime.now(UTC).isoformat()
|
now = datetime.now(UTC).isoformat()
|
||||||
conn = _get_conn()
|
with _get_conn() as conn:
|
||||||
conn.execute(
|
conn.execute(
|
||||||
"""
|
"""
|
||||||
INSERT OR REPLACE INTO agent_model_assignments
|
INSERT OR REPLACE INTO agent_model_assignments
|
||||||
(agent_id, model_name, assigned_at)
|
(agent_id, model_name, assigned_at)
|
||||||
VALUES (?, ?, ?)
|
VALUES (?, ?, ?)
|
||||||
""",
|
""",
|
||||||
(agent_id, model_name, now),
|
(agent_id, model_name, now),
|
||||||
)
|
)
|
||||||
conn.commit()
|
conn.commit()
|
||||||
conn.close()
|
|
||||||
self._agent_assignments[agent_id] = model_name
|
self._agent_assignments[agent_id] = model_name
|
||||||
logger.info("Assigned model %s to agent %s", model_name, agent_id)
|
logger.info("Assigned model %s to agent %s", model_name, agent_id)
|
||||||
return True
|
return True
|
||||||
@@ -230,13 +228,12 @@ class ModelRegistry:
|
|||||||
with self._lock:
|
with self._lock:
|
||||||
if agent_id not in self._agent_assignments:
|
if agent_id not in self._agent_assignments:
|
||||||
return False
|
return False
|
||||||
conn = _get_conn()
|
with _get_conn() as conn:
|
||||||
conn.execute(
|
conn.execute(
|
||||||
"DELETE FROM agent_model_assignments WHERE agent_id = ?",
|
"DELETE FROM agent_model_assignments WHERE agent_id = ?",
|
||||||
(agent_id,),
|
(agent_id,),
|
||||||
)
|
)
|
||||||
conn.commit()
|
conn.commit()
|
||||||
conn.close()
|
|
||||||
del self._agent_assignments[agent_id]
|
del self._agent_assignments[agent_id]
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|||||||
333
src/infrastructure/presence.py
Normal file
333
src/infrastructure/presence.py
Normal file
@@ -0,0 +1,333 @@
|
|||||||
|
"""Presence state serializer — transforms ADR-023 presence dicts for consumers.
|
||||||
|
|
||||||
|
Converts the raw presence schema (version, liveness, mood, energy, etc.)
|
||||||
|
into the camelCase world-state payload consumed by the Workshop 3D renderer
|
||||||
|
and WebSocket gateway.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import time
|
||||||
|
from datetime import UTC, datetime
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Default Pip familiar state (used when familiar module unavailable)
|
||||||
|
DEFAULT_PIP_STATE = {
|
||||||
|
"name": "Pip",
|
||||||
|
"mood": "sleepy",
|
||||||
|
"energy": 0.5,
|
||||||
|
"color": "0x00b450", # emerald green
|
||||||
|
"trail_color": "0xdaa520", # gold
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _get_familiar_state() -> dict:
|
||||||
|
"""Get Pip familiar state from familiar module, with graceful fallback.
|
||||||
|
|
||||||
|
Returns a dict with name, mood, energy, color, and trail_color.
|
||||||
|
Falls back to default state if familiar module unavailable or raises.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
from timmy.familiar import pip_familiar
|
||||||
|
|
||||||
|
snapshot = pip_familiar.snapshot()
|
||||||
|
# Map PipSnapshot fields to the expected agent_state format
|
||||||
|
return {
|
||||||
|
"name": snapshot.name,
|
||||||
|
"mood": snapshot.state,
|
||||||
|
"energy": DEFAULT_PIP_STATE["energy"], # Pip doesn't track energy yet
|
||||||
|
"color": DEFAULT_PIP_STATE["color"],
|
||||||
|
"trail_color": DEFAULT_PIP_STATE["trail_color"],
|
||||||
|
}
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning("Familiar state unavailable, using default: %s", exc)
|
||||||
|
return DEFAULT_PIP_STATE.copy()
|
||||||
|
|
||||||
|
|
||||||
|
# Valid bark styles for Matrix protocol
|
||||||
|
BARK_STYLES = {"speech", "thought", "whisper", "shout"}
|
||||||
|
|
||||||
|
|
||||||
|
def produce_bark(agent_id: str, text: str, reply_to: str = None, style: str = "speech") -> dict:
|
||||||
|
"""Format a chat response as a Matrix bark message.
|
||||||
|
|
||||||
|
Barks appear as floating text above agents in the Matrix 3D world with
|
||||||
|
typing animation. This function formats the text for the Matrix protocol.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
agent_id:
|
||||||
|
Unique identifier for the agent (e.g. ``"timmy"``).
|
||||||
|
text:
|
||||||
|
The chat response text to display as a bark.
|
||||||
|
reply_to:
|
||||||
|
Optional message ID or reference this bark is replying to.
|
||||||
|
style:
|
||||||
|
Visual style of the bark. One of: "speech" (default), "thought",
|
||||||
|
"whisper", "shout". Invalid styles fall back to "speech".
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
dict
|
||||||
|
Bark message with keys ``type``, ``agent_id``, ``data`` (containing
|
||||||
|
``text``, ``reply_to``, ``style``), and ``ts``.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> produce_bark("timmy", "Hello world!")
|
||||||
|
{
|
||||||
|
"type": "bark",
|
||||||
|
"agent_id": "timmy",
|
||||||
|
"data": {"text": "Hello world!", "reply_to": None, "style": "speech"},
|
||||||
|
"ts": 1742529600,
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
# Validate and normalize style
|
||||||
|
if style not in BARK_STYLES:
|
||||||
|
style = "speech"
|
||||||
|
|
||||||
|
# Truncate text to 280 characters (bark, not essay)
|
||||||
|
truncated_text = text[:280] if text else ""
|
||||||
|
|
||||||
|
return {
|
||||||
|
"type": "bark",
|
||||||
|
"agent_id": agent_id,
|
||||||
|
"data": {
|
||||||
|
"text": truncated_text,
|
||||||
|
"reply_to": reply_to,
|
||||||
|
"style": style,
|
||||||
|
},
|
||||||
|
"ts": int(time.time()),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def produce_thought(
|
||||||
|
agent_id: str, thought_text: str, thought_id: int, chain_id: str = None
|
||||||
|
) -> dict:
|
||||||
|
"""Format a thinking engine thought as a Matrix thought message.
|
||||||
|
|
||||||
|
Thoughts appear as subtle floating text in the 3D world, streaming from
|
||||||
|
Timmy's thinking engine (/thinking/api). This function wraps thoughts in
|
||||||
|
Matrix protocol format.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
agent_id:
|
||||||
|
Unique identifier for the agent (e.g. ``"timmy"``).
|
||||||
|
thought_text:
|
||||||
|
The thought text to display. Truncated to 500 characters.
|
||||||
|
thought_id:
|
||||||
|
Unique identifier for this thought (sequence number).
|
||||||
|
chain_id:
|
||||||
|
Optional chain identifier grouping related thoughts.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
dict
|
||||||
|
Thought message with keys ``type``, ``agent_id``, ``data`` (containing
|
||||||
|
``text``, ``thought_id``, ``chain_id``), and ``ts``.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> produce_thought("timmy", "Considering the options...", 42, "chain-123")
|
||||||
|
{
|
||||||
|
"type": "thought",
|
||||||
|
"agent_id": "timmy",
|
||||||
|
"data": {"text": "Considering the options...", "thought_id": 42, "chain_id": "chain-123"},
|
||||||
|
"ts": 1742529600,
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
# Truncate text to 500 characters (thoughts can be longer than barks)
|
||||||
|
truncated_text = thought_text[:500] if thought_text else ""
|
||||||
|
|
||||||
|
return {
|
||||||
|
"type": "thought",
|
||||||
|
"agent_id": agent_id,
|
||||||
|
"data": {
|
||||||
|
"text": truncated_text,
|
||||||
|
"thought_id": thought_id,
|
||||||
|
"chain_id": chain_id,
|
||||||
|
},
|
||||||
|
"ts": int(time.time()),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def serialize_presence(presence: dict) -> dict:
|
||||||
|
"""Transform an ADR-023 presence dict into the world-state API shape.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
presence:
|
||||||
|
Raw presence dict as written by
|
||||||
|
:func:`~timmy.workshop_state.get_state_dict` or read from
|
||||||
|
``~/.timmy/presence.json``.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
dict
|
||||||
|
CamelCase world-state payload with ``timmyState``, ``familiar``,
|
||||||
|
``activeThreads``, ``recentEvents``, ``concerns``, ``visitorPresent``,
|
||||||
|
``updatedAt``, and ``version`` keys.
|
||||||
|
"""
|
||||||
|
return {
|
||||||
|
"timmyState": {
|
||||||
|
"mood": presence.get("mood", "calm"),
|
||||||
|
"activity": presence.get("current_focus", "idle"),
|
||||||
|
"energy": presence.get("energy", 0.5),
|
||||||
|
"confidence": presence.get("confidence", 0.7),
|
||||||
|
},
|
||||||
|
"familiar": presence.get("familiar"),
|
||||||
|
"activeThreads": presence.get("active_threads", []),
|
||||||
|
"recentEvents": presence.get("recent_events", []),
|
||||||
|
"concerns": presence.get("concerns", []),
|
||||||
|
"visitorPresent": False,
|
||||||
|
"updatedAt": presence.get("liveness", datetime.now(UTC).strftime("%Y-%m-%dT%H:%M:%SZ")),
|
||||||
|
"version": presence.get("version", 1),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Status mapping: ADR-023 current_focus → Matrix agent status
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
_STATUS_KEYWORDS: dict[str, str] = {
|
||||||
|
"thinking": "thinking",
|
||||||
|
"speaking": "speaking",
|
||||||
|
"talking": "speaking",
|
||||||
|
"idle": "idle",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _derive_status(current_focus: str) -> str:
|
||||||
|
"""Map a free-text current_focus value to a Matrix status enum.
|
||||||
|
|
||||||
|
Returns one of: online, idle, thinking, speaking.
|
||||||
|
"""
|
||||||
|
focus_lower = current_focus.lower()
|
||||||
|
for keyword, status in _STATUS_KEYWORDS.items():
|
||||||
|
if keyword in focus_lower:
|
||||||
|
return status
|
||||||
|
if current_focus and current_focus != "idle":
|
||||||
|
return "online"
|
||||||
|
return "idle"
|
||||||
|
|
||||||
|
|
||||||
|
def produce_agent_state(agent_id: str, presence: dict) -> dict:
|
||||||
|
"""Build a Matrix-compatible ``agent_state`` message from presence data.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
agent_id:
|
||||||
|
Unique identifier for the agent (e.g. ``"timmy"``).
|
||||||
|
presence:
|
||||||
|
Raw ADR-023 presence dict.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
dict
|
||||||
|
Message with keys ``type``, ``agent_id``, ``data``, and ``ts``.
|
||||||
|
"""
|
||||||
|
return {
|
||||||
|
"type": "agent_state",
|
||||||
|
"agent_id": agent_id,
|
||||||
|
"data": {
|
||||||
|
"display_name": presence.get("display_name", agent_id.title()),
|
||||||
|
"role": presence.get("role", "assistant"),
|
||||||
|
"status": _derive_status(presence.get("current_focus", "idle")),
|
||||||
|
"mood": presence.get("mood", "calm"),
|
||||||
|
"energy": presence.get("energy", 0.5),
|
||||||
|
"bark": presence.get("bark", ""),
|
||||||
|
"familiar": _get_familiar_state(),
|
||||||
|
},
|
||||||
|
"ts": int(time.time()),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def produce_system_status() -> dict:
|
||||||
|
"""Generate a system_status message for the Matrix.
|
||||||
|
|
||||||
|
Returns a dict with system health metrics including agent count,
|
||||||
|
visitor count, uptime, thinking engine status, and memory count.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
dict
|
||||||
|
Message with keys ``type``, ``data`` (containing ``agents_online``,
|
||||||
|
``visitors``, ``uptime_seconds``, ``thinking_active``, ``memory_count``),
|
||||||
|
and ``ts``.
|
||||||
|
|
||||||
|
Examples
|
||||||
|
--------
|
||||||
|
>>> produce_system_status()
|
||||||
|
{
|
||||||
|
"type": "system_status",
|
||||||
|
"data": {
|
||||||
|
"agents_online": 5,
|
||||||
|
"visitors": 2,
|
||||||
|
"uptime_seconds": 3600,
|
||||||
|
"thinking_active": True,
|
||||||
|
"memory_count": 150,
|
||||||
|
},
|
||||||
|
"ts": 1742529600,
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
# Count agents with status != offline
|
||||||
|
agents_online = 0
|
||||||
|
try:
|
||||||
|
from timmy.agents.loader import list_agents
|
||||||
|
|
||||||
|
agents = list_agents()
|
||||||
|
agents_online = sum(1 for a in agents if a.get("status", "") not in ("offline", ""))
|
||||||
|
except Exception as exc:
|
||||||
|
logger.debug("Failed to count agents: %s", exc)
|
||||||
|
|
||||||
|
# Count visitors from WebSocket clients
|
||||||
|
visitors = 0
|
||||||
|
try:
|
||||||
|
from dashboard.routes.world import _ws_clients
|
||||||
|
|
||||||
|
visitors = len(_ws_clients)
|
||||||
|
except Exception as exc:
|
||||||
|
logger.debug("Failed to count visitors: %s", exc)
|
||||||
|
|
||||||
|
# Calculate uptime
|
||||||
|
uptime_seconds = 0
|
||||||
|
try:
|
||||||
|
from datetime import UTC
|
||||||
|
|
||||||
|
from config import APP_START_TIME
|
||||||
|
|
||||||
|
uptime_seconds = int((datetime.now(UTC) - APP_START_TIME).total_seconds())
|
||||||
|
except Exception as exc:
|
||||||
|
logger.debug("Failed to calculate uptime: %s", exc)
|
||||||
|
|
||||||
|
# Check thinking engine status
|
||||||
|
thinking_active = False
|
||||||
|
try:
|
||||||
|
from config import settings
|
||||||
|
from timmy.thinking import thinking_engine
|
||||||
|
|
||||||
|
thinking_active = settings.thinking_enabled and thinking_engine is not None
|
||||||
|
except Exception as exc:
|
||||||
|
logger.debug("Failed to check thinking status: %s", exc)
|
||||||
|
|
||||||
|
# Count memories in vector store
|
||||||
|
memory_count = 0
|
||||||
|
try:
|
||||||
|
from timmy.memory_system import get_memory_stats
|
||||||
|
|
||||||
|
stats = get_memory_stats()
|
||||||
|
memory_count = stats.get("total_entries", 0)
|
||||||
|
except Exception as exc:
|
||||||
|
logger.debug("Failed to count memories: %s", exc)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"type": "system_status",
|
||||||
|
"data": {
|
||||||
|
"agents_online": agents_online,
|
||||||
|
"visitors": visitors,
|
||||||
|
"uptime_seconds": uptime_seconds,
|
||||||
|
"thinking_active": thinking_active,
|
||||||
|
"memory_count": memory_count,
|
||||||
|
},
|
||||||
|
"ts": int(time.time()),
|
||||||
|
}
|
||||||
261
src/infrastructure/protocol.py
Normal file
261
src/infrastructure/protocol.py
Normal file
@@ -0,0 +1,261 @@
|
|||||||
|
"""Shared WebSocket message protocol for the Matrix frontend.
|
||||||
|
|
||||||
|
Defines all WebSocket message types as an enum and typed dataclasses
|
||||||
|
with ``to_json()`` / ``from_json()`` helpers so every producer and the
|
||||||
|
gateway speak the same language.
|
||||||
|
|
||||||
|
Message wire format
|
||||||
|
-------------------
|
||||||
|
.. code-block:: json
|
||||||
|
|
||||||
|
{"type": "agent_state", "agent_id": "timmy", "data": {...}, "ts": 1234567890}
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import time
|
||||||
|
from dataclasses import asdict, dataclass, field
|
||||||
|
from enum import StrEnum
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class MessageType(StrEnum):
|
||||||
|
"""All WebSocket message types defined by the Matrix PROTOCOL.md."""
|
||||||
|
|
||||||
|
AGENT_STATE = "agent_state"
|
||||||
|
VISITOR_STATE = "visitor_state"
|
||||||
|
BARK = "bark"
|
||||||
|
THOUGHT = "thought"
|
||||||
|
SYSTEM_STATUS = "system_status"
|
||||||
|
CONNECTION_ACK = "connection_ack"
|
||||||
|
ERROR = "error"
|
||||||
|
TASK_UPDATE = "task_update"
|
||||||
|
MEMORY_FLASH = "memory_flash"
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Base message
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class WSMessage:
|
||||||
|
"""Base WebSocket message with common envelope fields."""
|
||||||
|
|
||||||
|
type: str
|
||||||
|
ts: float = field(default_factory=time.time)
|
||||||
|
|
||||||
|
def to_json(self) -> str:
|
||||||
|
"""Serialise the message to a JSON string."""
|
||||||
|
return json.dumps(asdict(self))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_json(cls, raw: str) -> "WSMessage":
|
||||||
|
"""Deserialise a JSON string into the correct message subclass.
|
||||||
|
|
||||||
|
Falls back to the base ``WSMessage`` when the ``type`` field is
|
||||||
|
unrecognised.
|
||||||
|
"""
|
||||||
|
data = json.loads(raw)
|
||||||
|
msg_type = data.get("type")
|
||||||
|
sub = _REGISTRY.get(msg_type)
|
||||||
|
if sub is not None:
|
||||||
|
return sub.from_json(raw)
|
||||||
|
return cls(**data)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Concrete message types
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class AgentStateMessage(WSMessage):
|
||||||
|
"""State update for a single agent."""
|
||||||
|
|
||||||
|
type: str = field(default=MessageType.AGENT_STATE)
|
||||||
|
agent_id: str = ""
|
||||||
|
data: dict[str, Any] = field(default_factory=dict)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_json(cls, raw: str) -> "AgentStateMessage":
|
||||||
|
payload = json.loads(raw)
|
||||||
|
return cls(
|
||||||
|
type=payload.get("type", MessageType.AGENT_STATE),
|
||||||
|
ts=payload.get("ts", time.time()),
|
||||||
|
agent_id=payload.get("agent_id", ""),
|
||||||
|
data=payload.get("data", {}),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class VisitorStateMessage(WSMessage):
|
||||||
|
"""State update for a visitor / user session."""
|
||||||
|
|
||||||
|
type: str = field(default=MessageType.VISITOR_STATE)
|
||||||
|
visitor_id: str = ""
|
||||||
|
data: dict[str, Any] = field(default_factory=dict)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_json(cls, raw: str) -> "VisitorStateMessage":
|
||||||
|
payload = json.loads(raw)
|
||||||
|
return cls(
|
||||||
|
type=payload.get("type", MessageType.VISITOR_STATE),
|
||||||
|
ts=payload.get("ts", time.time()),
|
||||||
|
visitor_id=payload.get("visitor_id", ""),
|
||||||
|
data=payload.get("data", {}),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class BarkMessage(WSMessage):
|
||||||
|
"""A bark (chat-like utterance) from an agent."""
|
||||||
|
|
||||||
|
type: str = field(default=MessageType.BARK)
|
||||||
|
agent_id: str = ""
|
||||||
|
content: str = ""
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_json(cls, raw: str) -> "BarkMessage":
|
||||||
|
payload = json.loads(raw)
|
||||||
|
return cls(
|
||||||
|
type=payload.get("type", MessageType.BARK),
|
||||||
|
ts=payload.get("ts", time.time()),
|
||||||
|
agent_id=payload.get("agent_id", ""),
|
||||||
|
content=payload.get("content", ""),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ThoughtMessage(WSMessage):
|
||||||
|
"""An inner thought from an agent."""
|
||||||
|
|
||||||
|
type: str = field(default=MessageType.THOUGHT)
|
||||||
|
agent_id: str = ""
|
||||||
|
content: str = ""
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_json(cls, raw: str) -> "ThoughtMessage":
|
||||||
|
payload = json.loads(raw)
|
||||||
|
return cls(
|
||||||
|
type=payload.get("type", MessageType.THOUGHT),
|
||||||
|
ts=payload.get("ts", time.time()),
|
||||||
|
agent_id=payload.get("agent_id", ""),
|
||||||
|
content=payload.get("content", ""),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class SystemStatusMessage(WSMessage):
|
||||||
|
"""System-wide status broadcast."""
|
||||||
|
|
||||||
|
type: str = field(default=MessageType.SYSTEM_STATUS)
|
||||||
|
status: str = ""
|
||||||
|
data: dict[str, Any] = field(default_factory=dict)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_json(cls, raw: str) -> "SystemStatusMessage":
|
||||||
|
payload = json.loads(raw)
|
||||||
|
return cls(
|
||||||
|
type=payload.get("type", MessageType.SYSTEM_STATUS),
|
||||||
|
ts=payload.get("ts", time.time()),
|
||||||
|
status=payload.get("status", ""),
|
||||||
|
data=payload.get("data", {}),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ConnectionAckMessage(WSMessage):
|
||||||
|
"""Acknowledgement sent when a client connects."""
|
||||||
|
|
||||||
|
type: str = field(default=MessageType.CONNECTION_ACK)
|
||||||
|
client_id: str = ""
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_json(cls, raw: str) -> "ConnectionAckMessage":
|
||||||
|
payload = json.loads(raw)
|
||||||
|
return cls(
|
||||||
|
type=payload.get("type", MessageType.CONNECTION_ACK),
|
||||||
|
ts=payload.get("ts", time.time()),
|
||||||
|
client_id=payload.get("client_id", ""),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ErrorMessage(WSMessage):
|
||||||
|
"""Error message sent to a client."""
|
||||||
|
|
||||||
|
type: str = field(default=MessageType.ERROR)
|
||||||
|
code: str = ""
|
||||||
|
message: str = ""
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_json(cls, raw: str) -> "ErrorMessage":
|
||||||
|
payload = json.loads(raw)
|
||||||
|
return cls(
|
||||||
|
type=payload.get("type", MessageType.ERROR),
|
||||||
|
ts=payload.get("ts", time.time()),
|
||||||
|
code=payload.get("code", ""),
|
||||||
|
message=payload.get("message", ""),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class TaskUpdateMessage(WSMessage):
|
||||||
|
"""Update about a task (created, assigned, completed, etc.)."""
|
||||||
|
|
||||||
|
type: str = field(default=MessageType.TASK_UPDATE)
|
||||||
|
task_id: str = ""
|
||||||
|
status: str = ""
|
||||||
|
data: dict[str, Any] = field(default_factory=dict)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_json(cls, raw: str) -> "TaskUpdateMessage":
|
||||||
|
payload = json.loads(raw)
|
||||||
|
return cls(
|
||||||
|
type=payload.get("type", MessageType.TASK_UPDATE),
|
||||||
|
ts=payload.get("ts", time.time()),
|
||||||
|
task_id=payload.get("task_id", ""),
|
||||||
|
status=payload.get("status", ""),
|
||||||
|
data=payload.get("data", {}),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class MemoryFlashMessage(WSMessage):
|
||||||
|
"""A flash of memory — a recalled or stored memory event."""
|
||||||
|
|
||||||
|
type: str = field(default=MessageType.MEMORY_FLASH)
|
||||||
|
agent_id: str = ""
|
||||||
|
memory_key: str = ""
|
||||||
|
content: str = ""
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_json(cls, raw: str) -> "MemoryFlashMessage":
|
||||||
|
payload = json.loads(raw)
|
||||||
|
return cls(
|
||||||
|
type=payload.get("type", MessageType.MEMORY_FLASH),
|
||||||
|
ts=payload.get("ts", time.time()),
|
||||||
|
agent_id=payload.get("agent_id", ""),
|
||||||
|
memory_key=payload.get("memory_key", ""),
|
||||||
|
content=payload.get("content", ""),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Registry for from_json dispatch
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
_REGISTRY: dict[str, type[WSMessage]] = {
|
||||||
|
MessageType.AGENT_STATE: AgentStateMessage,
|
||||||
|
MessageType.VISITOR_STATE: VisitorStateMessage,
|
||||||
|
MessageType.BARK: BarkMessage,
|
||||||
|
MessageType.THOUGHT: ThoughtMessage,
|
||||||
|
MessageType.SYSTEM_STATUS: SystemStatusMessage,
|
||||||
|
MessageType.CONNECTION_ACK: ConnectionAckMessage,
|
||||||
|
MessageType.ERROR: ErrorMessage,
|
||||||
|
MessageType.TASK_UPDATE: TaskUpdateMessage,
|
||||||
|
MessageType.MEMORY_FLASH: MemoryFlashMessage,
|
||||||
|
}
|
||||||
@@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
from .api import router
|
from .api import router
|
||||||
from .cascade import CascadeRouter, Provider, ProviderStatus, get_router
|
from .cascade import CascadeRouter, Provider, ProviderStatus, get_router
|
||||||
|
from .history import HealthHistoryStore, get_history_store
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
"CascadeRouter",
|
"CascadeRouter",
|
||||||
@@ -9,4 +10,6 @@ __all__ = [
|
|||||||
"ProviderStatus",
|
"ProviderStatus",
|
||||||
"get_router",
|
"get_router",
|
||||||
"router",
|
"router",
|
||||||
|
"HealthHistoryStore",
|
||||||
|
"get_history_store",
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ from fastapi import APIRouter, Depends, HTTPException
|
|||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from .cascade import CascadeRouter, get_router
|
from .cascade import CascadeRouter, get_router
|
||||||
|
from .history import HealthHistoryStore, get_history_store
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
router = APIRouter(prefix="/api/v1/router", tags=["router"])
|
router = APIRouter(prefix="/api/v1/router", tags=["router"])
|
||||||
@@ -183,6 +184,33 @@ async def run_health_check(
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/reload")
|
||||||
|
async def reload_config(
|
||||||
|
cascade: Annotated[CascadeRouter, Depends(get_cascade_router)],
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
"""Hot-reload providers.yaml without restart.
|
||||||
|
|
||||||
|
Preserves circuit breaker state and metrics for existing providers.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
result = cascade.reload_config()
|
||||||
|
return {"status": "ok", **result}
|
||||||
|
except Exception as exc:
|
||||||
|
logger.error("Config reload failed: %s", exc)
|
||||||
|
raise HTTPException(status_code=500, detail=f"Reload failed: {exc}") from exc
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/history")
|
||||||
|
async def get_history(
|
||||||
|
hours: int = 24,
|
||||||
|
store: Annotated[HealthHistoryStore, Depends(get_history_store)] = None,
|
||||||
|
) -> list[dict[str, Any]]:
|
||||||
|
"""Get provider health history for the last N hours."""
|
||||||
|
if store is None:
|
||||||
|
store = get_history_store()
|
||||||
|
return store.get_history(hours=hours)
|
||||||
|
|
||||||
|
|
||||||
@router.get("/config")
|
@router.get("/config")
|
||||||
async def get_config(
|
async def get_config(
|
||||||
cascade: Annotated[CascadeRouter, Depends(get_cascade_router)],
|
cascade: Annotated[CascadeRouter, Depends(get_cascade_router)],
|
||||||
|
|||||||
@@ -18,6 +18,8 @@ from enum import Enum
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
|
from config import settings
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import yaml
|
import yaml
|
||||||
except ImportError:
|
except ImportError:
|
||||||
@@ -100,7 +102,7 @@ class Provider:
|
|||||||
"""LLM provider configuration and state."""
|
"""LLM provider configuration and state."""
|
||||||
|
|
||||||
name: str
|
name: str
|
||||||
type: str # ollama, openai, anthropic, airllm
|
type: str # ollama, openai, anthropic
|
||||||
enabled: bool
|
enabled: bool
|
||||||
priority: int
|
priority: int
|
||||||
url: str | None = None
|
url: str | None = None
|
||||||
@@ -219,65 +221,56 @@ class CascadeRouter:
|
|||||||
raise RuntimeError("PyYAML not installed")
|
raise RuntimeError("PyYAML not installed")
|
||||||
|
|
||||||
content = self.config_path.read_text()
|
content = self.config_path.read_text()
|
||||||
# Expand environment variables
|
|
||||||
content = self._expand_env_vars(content)
|
content = self._expand_env_vars(content)
|
||||||
data = yaml.safe_load(content)
|
data = yaml.safe_load(content)
|
||||||
|
|
||||||
# Load cascade settings
|
self.config = self._parse_router_config(data)
|
||||||
cascade = data.get("cascade", {})
|
self._load_providers(data)
|
||||||
|
|
||||||
# Load fallback chains
|
|
||||||
fallback_chains = data.get("fallback_chains", {})
|
|
||||||
|
|
||||||
# Load multi-modal settings
|
|
||||||
multimodal = data.get("multimodal", {})
|
|
||||||
|
|
||||||
self.config = RouterConfig(
|
|
||||||
timeout_seconds=cascade.get("timeout_seconds", 30),
|
|
||||||
max_retries_per_provider=cascade.get("max_retries_per_provider", 2),
|
|
||||||
retry_delay_seconds=cascade.get("retry_delay_seconds", 1),
|
|
||||||
circuit_breaker_failure_threshold=cascade.get("circuit_breaker", {}).get(
|
|
||||||
"failure_threshold", 5
|
|
||||||
),
|
|
||||||
circuit_breaker_recovery_timeout=cascade.get("circuit_breaker", {}).get(
|
|
||||||
"recovery_timeout", 60
|
|
||||||
),
|
|
||||||
circuit_breaker_half_open_max_calls=cascade.get("circuit_breaker", {}).get(
|
|
||||||
"half_open_max_calls", 2
|
|
||||||
),
|
|
||||||
auto_pull_models=multimodal.get("auto_pull", True),
|
|
||||||
fallback_chains=fallback_chains,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Load providers
|
|
||||||
for p_data in data.get("providers", []):
|
|
||||||
# Skip disabled providers
|
|
||||||
if not p_data.get("enabled", False):
|
|
||||||
continue
|
|
||||||
|
|
||||||
provider = Provider(
|
|
||||||
name=p_data["name"],
|
|
||||||
type=p_data["type"],
|
|
||||||
enabled=p_data.get("enabled", True),
|
|
||||||
priority=p_data.get("priority", 99),
|
|
||||||
url=p_data.get("url"),
|
|
||||||
api_key=p_data.get("api_key"),
|
|
||||||
base_url=p_data.get("base_url"),
|
|
||||||
models=p_data.get("models", []),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Check if provider is actually available
|
|
||||||
if self._check_provider_available(provider):
|
|
||||||
self.providers.append(provider)
|
|
||||||
else:
|
|
||||||
logger.warning("Provider %s not available, skipping", provider.name)
|
|
||||||
|
|
||||||
# Sort by priority
|
|
||||||
self.providers.sort(key=lambda p: p.priority)
|
|
||||||
|
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
logger.error("Failed to load config: %s", exc)
|
logger.error("Failed to load config: %s", exc)
|
||||||
|
|
||||||
|
def _parse_router_config(self, data: dict) -> RouterConfig:
|
||||||
|
"""Build a RouterConfig from parsed YAML data."""
|
||||||
|
cascade = data.get("cascade", {})
|
||||||
|
cb = cascade.get("circuit_breaker", {})
|
||||||
|
multimodal = data.get("multimodal", {})
|
||||||
|
|
||||||
|
return RouterConfig(
|
||||||
|
timeout_seconds=cascade.get("timeout_seconds", 30),
|
||||||
|
max_retries_per_provider=cascade.get("max_retries_per_provider", 2),
|
||||||
|
retry_delay_seconds=cascade.get("retry_delay_seconds", 1),
|
||||||
|
circuit_breaker_failure_threshold=cb.get("failure_threshold", 5),
|
||||||
|
circuit_breaker_recovery_timeout=cb.get("recovery_timeout", 60),
|
||||||
|
circuit_breaker_half_open_max_calls=cb.get("half_open_max_calls", 2),
|
||||||
|
auto_pull_models=multimodal.get("auto_pull", True),
|
||||||
|
fallback_chains=data.get("fallback_chains", {}),
|
||||||
|
)
|
||||||
|
|
||||||
|
def _load_providers(self, data: dict) -> None:
|
||||||
|
"""Load, filter, and sort providers from parsed YAML data."""
|
||||||
|
for p_data in data.get("providers", []):
|
||||||
|
if not p_data.get("enabled", False):
|
||||||
|
continue
|
||||||
|
|
||||||
|
provider = Provider(
|
||||||
|
name=p_data["name"],
|
||||||
|
type=p_data["type"],
|
||||||
|
enabled=p_data.get("enabled", True),
|
||||||
|
priority=p_data.get("priority", 99),
|
||||||
|
url=p_data.get("url"),
|
||||||
|
api_key=p_data.get("api_key"),
|
||||||
|
base_url=p_data.get("base_url"),
|
||||||
|
models=p_data.get("models", []),
|
||||||
|
)
|
||||||
|
|
||||||
|
if self._check_provider_available(provider):
|
||||||
|
self.providers.append(provider)
|
||||||
|
else:
|
||||||
|
logger.warning("Provider %s not available, skipping", provider.name)
|
||||||
|
|
||||||
|
self.providers.sort(key=lambda p: p.priority)
|
||||||
|
|
||||||
def _expand_env_vars(self, content: str) -> str:
|
def _expand_env_vars(self, content: str) -> str:
|
||||||
"""Expand ${VAR} syntax in YAML content.
|
"""Expand ${VAR} syntax in YAML content.
|
||||||
|
|
||||||
@@ -301,19 +294,11 @@ class CascadeRouter:
|
|||||||
# Can't check without requests, assume available
|
# Can't check without requests, assume available
|
||||||
return True
|
return True
|
||||||
try:
|
try:
|
||||||
url = provider.url or "http://localhost:11434"
|
url = provider.url or settings.ollama_url
|
||||||
response = requests.get(f"{url}/api/tags", timeout=5)
|
response = requests.get(f"{url}/api/tags", timeout=5)
|
||||||
return response.status_code == 200
|
return response.status_code == 200
|
||||||
except Exception:
|
except Exception as exc:
|
||||||
return False
|
logger.debug("Ollama provider check error: %s", exc)
|
||||||
|
|
||||||
elif provider.type == "airllm":
|
|
||||||
# Check if airllm is installed
|
|
||||||
try:
|
|
||||||
import importlib.util
|
|
||||||
|
|
||||||
return importlib.util.find_spec("airllm") is not None
|
|
||||||
except (ImportError, ModuleNotFoundError):
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
elif provider.type in ("openai", "anthropic", "grok"):
|
elif provider.type in ("openai", "anthropic", "grok"):
|
||||||
@@ -394,6 +379,101 @@ class CascadeRouter:
|
|||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
def _select_model(
|
||||||
|
self, provider: Provider, model: str | None, content_type: ContentType
|
||||||
|
) -> tuple[str | None, bool]:
|
||||||
|
"""Select the best model for the request, with vision fallback.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (selected_model, is_fallback_model).
|
||||||
|
"""
|
||||||
|
selected_model = model or provider.get_default_model()
|
||||||
|
is_fallback = False
|
||||||
|
|
||||||
|
if content_type != ContentType.TEXT and selected_model:
|
||||||
|
if provider.type == "ollama" and self._mm_manager:
|
||||||
|
from infrastructure.models.multimodal import ModelCapability
|
||||||
|
|
||||||
|
if content_type == ContentType.VISION:
|
||||||
|
supports = self._mm_manager.model_supports(
|
||||||
|
selected_model, ModelCapability.VISION
|
||||||
|
)
|
||||||
|
if not supports:
|
||||||
|
fallback = self._get_fallback_model(provider, selected_model, content_type)
|
||||||
|
if fallback:
|
||||||
|
logger.info(
|
||||||
|
"Model %s doesn't support vision, falling back to %s",
|
||||||
|
selected_model,
|
||||||
|
fallback,
|
||||||
|
)
|
||||||
|
selected_model = fallback
|
||||||
|
is_fallback = True
|
||||||
|
else:
|
||||||
|
logger.warning(
|
||||||
|
"No vision-capable model found on %s, trying anyway",
|
||||||
|
provider.name,
|
||||||
|
)
|
||||||
|
|
||||||
|
return selected_model, is_fallback
|
||||||
|
|
||||||
|
async def _attempt_with_retry(
|
||||||
|
self,
|
||||||
|
provider: Provider,
|
||||||
|
messages: list[dict],
|
||||||
|
model: str | None,
|
||||||
|
temperature: float,
|
||||||
|
max_tokens: int | None,
|
||||||
|
content_type: ContentType,
|
||||||
|
) -> dict:
|
||||||
|
"""Try a provider with retries, returning the result dict.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
RuntimeError: If all retry attempts fail.
|
||||||
|
Returns error strings collected during retries via the exception message.
|
||||||
|
"""
|
||||||
|
errors: list[str] = []
|
||||||
|
for attempt in range(self.config.max_retries_per_provider):
|
||||||
|
try:
|
||||||
|
return await self._try_provider(
|
||||||
|
provider=provider,
|
||||||
|
messages=messages,
|
||||||
|
model=model,
|
||||||
|
temperature=temperature,
|
||||||
|
max_tokens=max_tokens,
|
||||||
|
content_type=content_type,
|
||||||
|
)
|
||||||
|
except Exception as exc:
|
||||||
|
error_msg = str(exc)
|
||||||
|
logger.warning(
|
||||||
|
"Provider %s attempt %d failed: %s",
|
||||||
|
provider.name,
|
||||||
|
attempt + 1,
|
||||||
|
error_msg,
|
||||||
|
)
|
||||||
|
errors.append(f"{provider.name}: {error_msg}")
|
||||||
|
|
||||||
|
if attempt < self.config.max_retries_per_provider - 1:
|
||||||
|
await asyncio.sleep(self.config.retry_delay_seconds)
|
||||||
|
|
||||||
|
raise RuntimeError("; ".join(errors))
|
||||||
|
|
||||||
|
def _is_provider_available(self, provider: Provider) -> bool:
|
||||||
|
"""Check if a provider should be tried (enabled + circuit breaker)."""
|
||||||
|
if not provider.enabled:
|
||||||
|
logger.debug("Skipping %s (disabled)", provider.name)
|
||||||
|
return False
|
||||||
|
|
||||||
|
if provider.status == ProviderStatus.UNHEALTHY:
|
||||||
|
if self._can_close_circuit(provider):
|
||||||
|
provider.circuit_state = CircuitState.HALF_OPEN
|
||||||
|
provider.half_open_calls = 0
|
||||||
|
logger.info("Circuit breaker half-open for %s", provider.name)
|
||||||
|
else:
|
||||||
|
logger.debug("Skipping %s (circuit open)", provider.name)
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
async def complete(
|
async def complete(
|
||||||
self,
|
self,
|
||||||
messages: list[dict],
|
messages: list[dict],
|
||||||
@@ -420,7 +500,6 @@ class CascadeRouter:
|
|||||||
Raises:
|
Raises:
|
||||||
RuntimeError: If all providers fail
|
RuntimeError: If all providers fail
|
||||||
"""
|
"""
|
||||||
# Detect content type for multi-modal routing
|
|
||||||
content_type = self._detect_content_type(messages)
|
content_type = self._detect_content_type(messages)
|
||||||
if content_type != ContentType.TEXT:
|
if content_type != ContentType.TEXT:
|
||||||
logger.debug("Detected %s content, selecting appropriate model", content_type.value)
|
logger.debug("Detected %s content, selecting appropriate model", content_type.value)
|
||||||
@@ -428,93 +507,34 @@ class CascadeRouter:
|
|||||||
errors = []
|
errors = []
|
||||||
|
|
||||||
for provider in self.providers:
|
for provider in self.providers:
|
||||||
# Skip disabled providers
|
if not self._is_provider_available(provider):
|
||||||
if not provider.enabled:
|
|
||||||
logger.debug("Skipping %s (disabled)", provider.name)
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Skip unhealthy providers (circuit breaker)
|
selected_model, is_fallback_model = self._select_model(provider, model, content_type)
|
||||||
if provider.status == ProviderStatus.UNHEALTHY:
|
|
||||||
# Check if circuit breaker can close
|
|
||||||
if self._can_close_circuit(provider):
|
|
||||||
provider.circuit_state = CircuitState.HALF_OPEN
|
|
||||||
provider.half_open_calls = 0
|
|
||||||
logger.info("Circuit breaker half-open for %s", provider.name)
|
|
||||||
else:
|
|
||||||
logger.debug("Skipping %s (circuit open)", provider.name)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Determine which model to use
|
try:
|
||||||
selected_model = model or provider.get_default_model()
|
result = await self._attempt_with_retry(
|
||||||
is_fallback_model = False
|
provider,
|
||||||
|
messages,
|
||||||
|
selected_model,
|
||||||
|
temperature,
|
||||||
|
max_tokens,
|
||||||
|
content_type,
|
||||||
|
)
|
||||||
|
except RuntimeError as exc:
|
||||||
|
errors.append(str(exc))
|
||||||
|
self._record_failure(provider)
|
||||||
|
continue
|
||||||
|
|
||||||
# For non-text content, check if model supports it
|
self._record_success(provider, result.get("latency_ms", 0))
|
||||||
if content_type != ContentType.TEXT and selected_model:
|
return {
|
||||||
if provider.type == "ollama" and self._mm_manager:
|
"content": result["content"],
|
||||||
from infrastructure.models.multimodal import ModelCapability
|
"provider": provider.name,
|
||||||
|
"model": result.get("model", selected_model or provider.get_default_model()),
|
||||||
|
"latency_ms": result.get("latency_ms", 0),
|
||||||
|
"is_fallback_model": is_fallback_model,
|
||||||
|
}
|
||||||
|
|
||||||
# Check if selected model supports the required capability
|
|
||||||
if content_type == ContentType.VISION:
|
|
||||||
supports = self._mm_manager.model_supports(
|
|
||||||
selected_model, ModelCapability.VISION
|
|
||||||
)
|
|
||||||
if not supports:
|
|
||||||
# Find fallback model
|
|
||||||
fallback = self._get_fallback_model(
|
|
||||||
provider, selected_model, content_type
|
|
||||||
)
|
|
||||||
if fallback:
|
|
||||||
logger.info(
|
|
||||||
"Model %s doesn't support vision, falling back to %s",
|
|
||||||
selected_model,
|
|
||||||
fallback,
|
|
||||||
)
|
|
||||||
selected_model = fallback
|
|
||||||
is_fallback_model = True
|
|
||||||
else:
|
|
||||||
logger.warning(
|
|
||||||
"No vision-capable model found on %s, trying anyway",
|
|
||||||
provider.name,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Try this provider
|
|
||||||
for attempt in range(self.config.max_retries_per_provider):
|
|
||||||
try:
|
|
||||||
result = await self._try_provider(
|
|
||||||
provider=provider,
|
|
||||||
messages=messages,
|
|
||||||
model=selected_model,
|
|
||||||
temperature=temperature,
|
|
||||||
max_tokens=max_tokens,
|
|
||||||
content_type=content_type,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Success! Update metrics and return
|
|
||||||
self._record_success(provider, result.get("latency_ms", 0))
|
|
||||||
return {
|
|
||||||
"content": result["content"],
|
|
||||||
"provider": provider.name,
|
|
||||||
"model": result.get(
|
|
||||||
"model", selected_model or provider.get_default_model()
|
|
||||||
),
|
|
||||||
"latency_ms": result.get("latency_ms", 0),
|
|
||||||
"is_fallback_model": is_fallback_model,
|
|
||||||
}
|
|
||||||
|
|
||||||
except Exception as exc:
|
|
||||||
error_msg = str(exc)
|
|
||||||
logger.warning(
|
|
||||||
"Provider %s attempt %d failed: %s", provider.name, attempt + 1, error_msg
|
|
||||||
)
|
|
||||||
errors.append(f"{provider.name}: {error_msg}")
|
|
||||||
|
|
||||||
if attempt < self.config.max_retries_per_provider - 1:
|
|
||||||
await asyncio.sleep(self.config.retry_delay_seconds)
|
|
||||||
|
|
||||||
# All retries failed for this provider
|
|
||||||
self._record_failure(provider)
|
|
||||||
|
|
||||||
# All providers failed
|
|
||||||
raise RuntimeError(f"All providers failed: {'; '.join(errors)}")
|
raise RuntimeError(f"All providers failed: {'; '.join(errors)}")
|
||||||
|
|
||||||
async def _try_provider(
|
async def _try_provider(
|
||||||
@@ -535,6 +555,7 @@ class CascadeRouter:
|
|||||||
messages=messages,
|
messages=messages,
|
||||||
model=model or provider.get_default_model(),
|
model=model or provider.get_default_model(),
|
||||||
temperature=temperature,
|
temperature=temperature,
|
||||||
|
max_tokens=max_tokens,
|
||||||
content_type=content_type,
|
content_type=content_type,
|
||||||
)
|
)
|
||||||
elif provider.type == "openai":
|
elif provider.type == "openai":
|
||||||
@@ -575,23 +596,26 @@ class CascadeRouter:
|
|||||||
messages: list[dict],
|
messages: list[dict],
|
||||||
model: str,
|
model: str,
|
||||||
temperature: float,
|
temperature: float,
|
||||||
|
max_tokens: int | None = None,
|
||||||
content_type: ContentType = ContentType.TEXT,
|
content_type: ContentType = ContentType.TEXT,
|
||||||
) -> dict:
|
) -> dict:
|
||||||
"""Call Ollama API with multi-modal support."""
|
"""Call Ollama API with multi-modal support."""
|
||||||
import aiohttp
|
import aiohttp
|
||||||
|
|
||||||
url = f"{provider.url}/api/chat"
|
url = f"{provider.url or settings.ollama_url}/api/chat"
|
||||||
|
|
||||||
# Transform messages for Ollama format (including images)
|
# Transform messages for Ollama format (including images)
|
||||||
transformed_messages = self._transform_messages_for_ollama(messages)
|
transformed_messages = self._transform_messages_for_ollama(messages)
|
||||||
|
|
||||||
|
options = {"temperature": temperature}
|
||||||
|
if max_tokens:
|
||||||
|
options["num_predict"] = max_tokens
|
||||||
|
|
||||||
payload = {
|
payload = {
|
||||||
"model": model,
|
"model": model,
|
||||||
"messages": transformed_messages,
|
"messages": transformed_messages,
|
||||||
"stream": False,
|
"stream": False,
|
||||||
"options": {
|
"options": options,
|
||||||
"temperature": temperature,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
timeout = aiohttp.ClientTimeout(total=self.config.timeout_seconds)
|
timeout = aiohttp.ClientTimeout(total=self.config.timeout_seconds)
|
||||||
@@ -735,7 +759,7 @@ class CascadeRouter:
|
|||||||
|
|
||||||
client = openai.AsyncOpenAI(
|
client = openai.AsyncOpenAI(
|
||||||
api_key=provider.api_key,
|
api_key=provider.api_key,
|
||||||
base_url=provider.base_url or "https://api.x.ai/v1",
|
base_url=provider.base_url or settings.xai_base_url,
|
||||||
timeout=httpx.Timeout(300.0),
|
timeout=httpx.Timeout(300.0),
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -814,6 +838,66 @@ class CascadeRouter:
|
|||||||
provider.status = ProviderStatus.HEALTHY
|
provider.status = ProviderStatus.HEALTHY
|
||||||
logger.info("Circuit breaker CLOSED for %s", provider.name)
|
logger.info("Circuit breaker CLOSED for %s", provider.name)
|
||||||
|
|
||||||
|
def reload_config(self) -> dict:
|
||||||
|
"""Hot-reload providers.yaml, preserving runtime state.
|
||||||
|
|
||||||
|
Re-reads the config file, rebuilds the provider list, and
|
||||||
|
preserves circuit breaker state and metrics for providers
|
||||||
|
that still exist after reload.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Summary dict with added/removed/preserved counts.
|
||||||
|
"""
|
||||||
|
# Snapshot current runtime state keyed by provider name
|
||||||
|
old_state: dict[
|
||||||
|
str, tuple[ProviderMetrics, CircuitState, float | None, int, ProviderStatus]
|
||||||
|
] = {}
|
||||||
|
for p in self.providers:
|
||||||
|
old_state[p.name] = (
|
||||||
|
p.metrics,
|
||||||
|
p.circuit_state,
|
||||||
|
p.circuit_opened_at,
|
||||||
|
p.half_open_calls,
|
||||||
|
p.status,
|
||||||
|
)
|
||||||
|
|
||||||
|
old_names = set(old_state.keys())
|
||||||
|
|
||||||
|
# Reload from disk
|
||||||
|
self.providers = []
|
||||||
|
self._load_config()
|
||||||
|
|
||||||
|
# Restore preserved state
|
||||||
|
new_names = {p.name for p in self.providers}
|
||||||
|
preserved = 0
|
||||||
|
for p in self.providers:
|
||||||
|
if p.name in old_state:
|
||||||
|
metrics, circuit, opened_at, half_open, status = old_state[p.name]
|
||||||
|
p.metrics = metrics
|
||||||
|
p.circuit_state = circuit
|
||||||
|
p.circuit_opened_at = opened_at
|
||||||
|
p.half_open_calls = half_open
|
||||||
|
p.status = status
|
||||||
|
preserved += 1
|
||||||
|
|
||||||
|
added = new_names - old_names
|
||||||
|
removed = old_names - new_names
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"Config reloaded: %d providers (%d preserved, %d added, %d removed)",
|
||||||
|
len(self.providers),
|
||||||
|
preserved,
|
||||||
|
len(added),
|
||||||
|
len(removed),
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"total_providers": len(self.providers),
|
||||||
|
"preserved": preserved,
|
||||||
|
"added": sorted(added),
|
||||||
|
"removed": sorted(removed),
|
||||||
|
}
|
||||||
|
|
||||||
def get_metrics(self) -> dict:
|
def get_metrics(self) -> dict:
|
||||||
"""Get metrics for all providers."""
|
"""Get metrics for all providers."""
|
||||||
return {
|
return {
|
||||||
|
|||||||
152
src/infrastructure/router/history.py
Normal file
152
src/infrastructure/router/history.py
Normal file
@@ -0,0 +1,152 @@
|
|||||||
|
"""Provider health history — time-series snapshots for dashboard visualization."""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
import sqlite3
|
||||||
|
from datetime import UTC, datetime, timedelta
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
_store: "HealthHistoryStore | None" = None
|
||||||
|
|
||||||
|
|
||||||
|
class HealthHistoryStore:
|
||||||
|
"""Stores timestamped provider health snapshots in SQLite."""
|
||||||
|
|
||||||
|
def __init__(self, db_path: str = "data/router_history.db") -> None:
|
||||||
|
self.db_path = db_path
|
||||||
|
if db_path != ":memory:":
|
||||||
|
Path(db_path).parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
self._conn = sqlite3.connect(db_path, check_same_thread=False)
|
||||||
|
self._conn.row_factory = sqlite3.Row
|
||||||
|
self._init_schema()
|
||||||
|
self._bg_task: asyncio.Task | None = None
|
||||||
|
|
||||||
|
def _init_schema(self) -> None:
|
||||||
|
self._conn.execute("""
|
||||||
|
CREATE TABLE IF NOT EXISTS snapshots (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
timestamp TEXT NOT NULL,
|
||||||
|
provider_name TEXT NOT NULL,
|
||||||
|
status TEXT NOT NULL,
|
||||||
|
error_rate REAL NOT NULL,
|
||||||
|
avg_latency_ms REAL NOT NULL,
|
||||||
|
circuit_state TEXT NOT NULL,
|
||||||
|
total_requests INTEGER NOT NULL
|
||||||
|
)
|
||||||
|
""")
|
||||||
|
self._conn.execute("""
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_snapshots_ts
|
||||||
|
ON snapshots(timestamp)
|
||||||
|
""")
|
||||||
|
self._conn.commit()
|
||||||
|
|
||||||
|
def record_snapshot(self, providers: list[dict]) -> None:
|
||||||
|
"""Record a health snapshot for all providers."""
|
||||||
|
ts = datetime.now(UTC).isoformat()
|
||||||
|
rows = [
|
||||||
|
(
|
||||||
|
ts,
|
||||||
|
p["name"],
|
||||||
|
p["status"],
|
||||||
|
p["error_rate"],
|
||||||
|
p["avg_latency_ms"],
|
||||||
|
p["circuit_state"],
|
||||||
|
p["total_requests"],
|
||||||
|
)
|
||||||
|
for p in providers
|
||||||
|
]
|
||||||
|
self._conn.executemany(
|
||||||
|
"""INSERT INTO snapshots
|
||||||
|
(timestamp, provider_name, status, error_rate,
|
||||||
|
avg_latency_ms, circuit_state, total_requests)
|
||||||
|
VALUES (?, ?, ?, ?, ?, ?, ?)""",
|
||||||
|
rows,
|
||||||
|
)
|
||||||
|
self._conn.commit()
|
||||||
|
|
||||||
|
def get_history(self, hours: int = 24) -> list[dict]:
|
||||||
|
"""Return snapshots from the last N hours, grouped by timestamp."""
|
||||||
|
cutoff = (datetime.now(UTC) - timedelta(hours=hours)).isoformat()
|
||||||
|
rows = self._conn.execute(
|
||||||
|
"""SELECT timestamp, provider_name, status, error_rate,
|
||||||
|
avg_latency_ms, circuit_state, total_requests
|
||||||
|
FROM snapshots WHERE timestamp >= ? ORDER BY timestamp""",
|
||||||
|
(cutoff,),
|
||||||
|
).fetchall()
|
||||||
|
|
||||||
|
# Group by timestamp
|
||||||
|
snapshots: dict[str, list[dict]] = {}
|
||||||
|
for row in rows:
|
||||||
|
ts = row["timestamp"]
|
||||||
|
if ts not in snapshots:
|
||||||
|
snapshots[ts] = []
|
||||||
|
snapshots[ts].append(
|
||||||
|
{
|
||||||
|
"name": row["provider_name"],
|
||||||
|
"status": row["status"],
|
||||||
|
"error_rate": row["error_rate"],
|
||||||
|
"avg_latency_ms": row["avg_latency_ms"],
|
||||||
|
"circuit_state": row["circuit_state"],
|
||||||
|
"total_requests": row["total_requests"],
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
return [{"timestamp": ts, "providers": providers} for ts, providers in snapshots.items()]
|
||||||
|
|
||||||
|
def prune(self, keep_hours: int = 168) -> int:
|
||||||
|
"""Remove snapshots older than keep_hours. Returns rows deleted."""
|
||||||
|
cutoff = (datetime.now(UTC) - timedelta(hours=keep_hours)).isoformat()
|
||||||
|
cursor = self._conn.execute("DELETE FROM snapshots WHERE timestamp < ?", (cutoff,))
|
||||||
|
self._conn.commit()
|
||||||
|
return cursor.rowcount
|
||||||
|
|
||||||
|
def close(self) -> None:
|
||||||
|
"""Close the database connection."""
|
||||||
|
if self._bg_task and not self._bg_task.done():
|
||||||
|
self._bg_task.cancel()
|
||||||
|
self._conn.close()
|
||||||
|
|
||||||
|
def _capture_snapshot(self, cascade_router) -> None: # noqa: ANN001
|
||||||
|
"""Capture current provider state as a snapshot."""
|
||||||
|
providers = []
|
||||||
|
for p in cascade_router.providers:
|
||||||
|
providers.append(
|
||||||
|
{
|
||||||
|
"name": p.name,
|
||||||
|
"status": p.status.value,
|
||||||
|
"error_rate": round(p.metrics.error_rate, 4),
|
||||||
|
"avg_latency_ms": round(p.metrics.avg_latency_ms, 2),
|
||||||
|
"circuit_state": p.circuit_state.value,
|
||||||
|
"total_requests": p.metrics.total_requests,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
self.record_snapshot(providers)
|
||||||
|
|
||||||
|
async def start_background_task(
|
||||||
|
self,
|
||||||
|
cascade_router,
|
||||||
|
interval_seconds: int = 60, # noqa: ANN001
|
||||||
|
) -> None:
|
||||||
|
"""Start periodic snapshot capture."""
|
||||||
|
|
||||||
|
async def _loop() -> None:
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
self._capture_snapshot(cascade_router)
|
||||||
|
logger.debug("Recorded health snapshot")
|
||||||
|
except Exception:
|
||||||
|
logger.exception("Failed to record health snapshot")
|
||||||
|
await asyncio.sleep(interval_seconds)
|
||||||
|
|
||||||
|
self._bg_task = asyncio.create_task(_loop())
|
||||||
|
logger.info("Health history background task started (interval=%ds)", interval_seconds)
|
||||||
|
|
||||||
|
|
||||||
|
def get_history_store() -> HealthHistoryStore:
|
||||||
|
"""Get or create the singleton history store."""
|
||||||
|
global _store # noqa: PLW0603
|
||||||
|
if _store is None:
|
||||||
|
_store = HealthHistoryStore()
|
||||||
|
return _store
|
||||||
166
src/infrastructure/visitor.py
Normal file
166
src/infrastructure/visitor.py
Normal file
@@ -0,0 +1,166 @@
|
|||||||
|
"""Visitor state tracking for the Matrix frontend.
|
||||||
|
|
||||||
|
Tracks active visitors as they connect and move around the 3D world,
|
||||||
|
and provides serialization for Matrix protocol broadcast messages.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import time
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from datetime import UTC, datetime
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class VisitorState:
|
||||||
|
"""State for a single visitor in the Matrix.
|
||||||
|
|
||||||
|
Attributes
|
||||||
|
----------
|
||||||
|
visitor_id: Unique identifier for the visitor (client ID).
|
||||||
|
display_name: Human-readable name shown above the visitor.
|
||||||
|
position: 3D coordinates (x, y, z) in the world.
|
||||||
|
rotation: Rotation angle in degrees (0-360).
|
||||||
|
connected_at: ISO timestamp when the visitor connected.
|
||||||
|
"""
|
||||||
|
|
||||||
|
visitor_id: str
|
||||||
|
display_name: str = ""
|
||||||
|
position: dict[str, float] = field(default_factory=lambda: {"x": 0.0, "y": 0.0, "z": 0.0})
|
||||||
|
rotation: float = 0.0
|
||||||
|
connected_at: str = field(
|
||||||
|
default_factory=lambda: datetime.now(UTC).strftime("%Y-%m-%dT%H:%M:%SZ")
|
||||||
|
)
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
"""Set display_name to visitor_id if not provided; copy position dict."""
|
||||||
|
if not self.display_name:
|
||||||
|
self.display_name = self.visitor_id
|
||||||
|
# Copy position to avoid shared mutable state
|
||||||
|
self.position = dict(self.position)
|
||||||
|
|
||||||
|
|
||||||
|
class VisitorRegistry:
|
||||||
|
"""Registry of active visitors in the Matrix.
|
||||||
|
|
||||||
|
Thread-safe singleton pattern (Python GIL protects dict operations).
|
||||||
|
Used by the WebSocket layer to track and broadcast visitor positions.
|
||||||
|
"""
|
||||||
|
|
||||||
|
_instance: "VisitorRegistry | None" = None
|
||||||
|
|
||||||
|
def __new__(cls) -> "VisitorRegistry":
|
||||||
|
"""Singleton constructor."""
|
||||||
|
if cls._instance is None:
|
||||||
|
cls._instance = super().__new__(cls)
|
||||||
|
cls._instance._visitors: dict[str, VisitorState] = {}
|
||||||
|
return cls._instance
|
||||||
|
|
||||||
|
def add(
|
||||||
|
self, visitor_id: str, display_name: str = "", position: dict | None = None
|
||||||
|
) -> VisitorState:
|
||||||
|
"""Add a new visitor to the registry.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
visitor_id: Unique identifier for the visitor.
|
||||||
|
display_name: Optional display name (defaults to visitor_id).
|
||||||
|
position: Optional initial position (defaults to origin).
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
The newly created VisitorState.
|
||||||
|
"""
|
||||||
|
visitor = VisitorState(
|
||||||
|
visitor_id=visitor_id,
|
||||||
|
display_name=display_name,
|
||||||
|
position=position if position else {"x": 0.0, "y": 0.0, "z": 0.0},
|
||||||
|
)
|
||||||
|
self._visitors[visitor_id] = visitor
|
||||||
|
return visitor
|
||||||
|
|
||||||
|
def remove(self, visitor_id: str) -> bool:
|
||||||
|
"""Remove a visitor from the registry.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
visitor_id: The visitor to remove.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
True if the visitor was found and removed, False otherwise.
|
||||||
|
"""
|
||||||
|
if visitor_id in self._visitors:
|
||||||
|
del self._visitors[visitor_id]
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def update_position(
|
||||||
|
self,
|
||||||
|
visitor_id: str,
|
||||||
|
position: dict[str, float],
|
||||||
|
rotation: float | None = None,
|
||||||
|
) -> bool:
|
||||||
|
"""Update a visitor's position and rotation.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
visitor_id: The visitor to update.
|
||||||
|
position: New 3D coordinates (x, y, z).
|
||||||
|
rotation: Optional new rotation angle.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
True if the visitor was found and updated, False otherwise.
|
||||||
|
"""
|
||||||
|
if visitor_id not in self._visitors:
|
||||||
|
return False
|
||||||
|
|
||||||
|
self._visitors[visitor_id].position = position
|
||||||
|
if rotation is not None:
|
||||||
|
self._visitors[visitor_id].rotation = rotation
|
||||||
|
return True
|
||||||
|
|
||||||
|
def get(self, visitor_id: str) -> VisitorState | None:
|
||||||
|
"""Get a single visitor's state.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
visitor_id: The visitor to retrieve.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
The VisitorState if found, None otherwise.
|
||||||
|
"""
|
||||||
|
return self._visitors.get(visitor_id)
|
||||||
|
|
||||||
|
def get_all(self) -> list[dict]:
|
||||||
|
"""Get all active visitors as Matrix protocol message dicts.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
List of visitor_state dicts ready for WebSocket broadcast.
|
||||||
|
Each dict has: type, visitor_id, data (with display_name,
|
||||||
|
position, rotation, connected_at), and ts.
|
||||||
|
"""
|
||||||
|
now = int(time.time())
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
"type": "visitor_state",
|
||||||
|
"visitor_id": v.visitor_id,
|
||||||
|
"data": {
|
||||||
|
"display_name": v.display_name,
|
||||||
|
"position": v.position,
|
||||||
|
"rotation": v.rotation,
|
||||||
|
"connected_at": v.connected_at,
|
||||||
|
},
|
||||||
|
"ts": now,
|
||||||
|
}
|
||||||
|
for v in self._visitors.values()
|
||||||
|
]
|
||||||
|
|
||||||
|
def clear(self) -> None:
|
||||||
|
"""Remove all visitors (useful for testing)."""
|
||||||
|
self._visitors.clear()
|
||||||
|
|
||||||
|
def __len__(self) -> int:
|
||||||
|
"""Return the number of active visitors."""
|
||||||
|
return len(self._visitors)
|
||||||
@@ -54,7 +54,8 @@ class WebSocketManager:
|
|||||||
for event in list(self._event_history)[-20:]:
|
for event in list(self._event_history)[-20:]:
|
||||||
try:
|
try:
|
||||||
await websocket.send_text(event.to_json())
|
await websocket.send_text(event.to_json())
|
||||||
except Exception:
|
except Exception as exc:
|
||||||
|
logger.warning("WebSocket history send error: %s", exc)
|
||||||
break
|
break
|
||||||
|
|
||||||
def disconnect(self, websocket: WebSocket) -> None:
|
def disconnect(self, websocket: WebSocket) -> None:
|
||||||
@@ -83,8 +84,8 @@ class WebSocketManager:
|
|||||||
await ws.send_text(message)
|
await ws.send_text(message)
|
||||||
except ConnectionError:
|
except ConnectionError:
|
||||||
disconnected.append(ws)
|
disconnected.append(ws)
|
||||||
except Exception:
|
except Exception as exc:
|
||||||
logger.warning("Unexpected WebSocket send error", exc_info=True)
|
logger.warning("Unexpected WebSocket send error: %s", exc)
|
||||||
disconnected.append(ws)
|
disconnected.append(ws)
|
||||||
|
|
||||||
# Clean up dead connections
|
# Clean up dead connections
|
||||||
@@ -156,7 +157,8 @@ class WebSocketManager:
|
|||||||
try:
|
try:
|
||||||
await ws.send_text(message)
|
await ws.send_text(message)
|
||||||
count += 1
|
count += 1
|
||||||
except Exception:
|
except Exception as exc:
|
||||||
|
logger.warning("WebSocket direct send error: %s", exc)
|
||||||
disconnected.append(ws)
|
disconnected.append(ws)
|
||||||
|
|
||||||
# Clean up dead connections
|
# Clean up dead connections
|
||||||
|
|||||||
143
src/integrations/chat_bridge/vendors/discord.py
vendored
143
src/integrations/chat_bridge/vendors/discord.py
vendored
@@ -87,7 +87,8 @@ if _DISCORD_UI_AVAILABLE:
|
|||||||
await action["target"].send(
|
await action["target"].send(
|
||||||
f"Action `{action['tool_name']}` timed out and was auto-rejected."
|
f"Action `{action['tool_name']}` timed out and was auto-rejected."
|
||||||
)
|
)
|
||||||
except Exception:
|
except Exception as exc:
|
||||||
|
logger.warning("Discord action timeout message error: %s", exc)
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
@@ -186,7 +187,8 @@ class DiscordVendor(ChatPlatform):
|
|||||||
if self._client and not self._client.is_closed():
|
if self._client and not self._client.is_closed():
|
||||||
try:
|
try:
|
||||||
await self._client.close()
|
await self._client.close()
|
||||||
except Exception:
|
except Exception as exc:
|
||||||
|
logger.warning("Discord client close error: %s", exc)
|
||||||
pass
|
pass
|
||||||
self._client = None
|
self._client = None
|
||||||
|
|
||||||
@@ -330,7 +332,8 @@ class DiscordVendor(ChatPlatform):
|
|||||||
|
|
||||||
if settings.discord_token:
|
if settings.discord_token:
|
||||||
return settings.discord_token
|
return settings.discord_token
|
||||||
except Exception:
|
except Exception as exc:
|
||||||
|
logger.warning("Discord token load error: %s", exc)
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# 2. Fall back to state file (set via /discord/setup endpoint)
|
# 2. Fall back to state file (set via /discord/setup endpoint)
|
||||||
@@ -458,7 +461,8 @@ class DiscordVendor(ChatPlatform):
|
|||||||
req.reject(note="User rejected from Discord")
|
req.reject(note="User rejected from Discord")
|
||||||
try:
|
try:
|
||||||
await continue_chat(action["run_output"], action.get("session_id"))
|
await continue_chat(action["run_output"], action.get("session_id"))
|
||||||
except Exception:
|
except Exception as exc:
|
||||||
|
logger.warning("Discord continue chat error: %s", exc)
|
||||||
pass
|
pass
|
||||||
|
|
||||||
await interaction.response.send_message(
|
await interaction.response.send_message(
|
||||||
@@ -511,25 +515,36 @@ class DiscordVendor(ChatPlatform):
|
|||||||
|
|
||||||
async def _handle_message(self, message) -> None:
|
async def _handle_message(self, message) -> None:
|
||||||
"""Process an incoming message and respond via a thread."""
|
"""Process an incoming message and respond via a thread."""
|
||||||
# Strip the bot mention from the message content
|
content = self._extract_content(message)
|
||||||
content = message.content
|
|
||||||
if self._client.user:
|
|
||||||
content = content.replace(f"<@{self._client.user.id}>", "").strip()
|
|
||||||
|
|
||||||
if not content:
|
if not content:
|
||||||
return
|
return
|
||||||
|
|
||||||
# Create or reuse a thread for this conversation
|
|
||||||
thread = await self._get_or_create_thread(message)
|
thread = await self._get_or_create_thread(message)
|
||||||
target = thread or message.channel
|
target = thread or message.channel
|
||||||
|
session_id = f"discord_{thread.id}" if thread else f"discord_{message.channel.id}"
|
||||||
|
|
||||||
# Derive session_id for per-conversation history via Agno's SQLite
|
run_output, response = await self._invoke_agent(content, session_id, target)
|
||||||
if thread:
|
|
||||||
session_id = f"discord_{thread.id}"
|
|
||||||
else:
|
|
||||||
session_id = f"discord_{message.channel.id}"
|
|
||||||
|
|
||||||
# Run Timmy agent with typing indicator and timeout
|
if run_output is not None:
|
||||||
|
await self._handle_paused_run(run_output, target, session_id)
|
||||||
|
raw_content = run_output.content if hasattr(run_output, "content") else ""
|
||||||
|
response = _clean_response(raw_content or "")
|
||||||
|
|
||||||
|
await self._send_response(response, target)
|
||||||
|
|
||||||
|
def _extract_content(self, message) -> str:
|
||||||
|
"""Strip the bot mention and return clean message text."""
|
||||||
|
content = message.content
|
||||||
|
if self._client.user:
|
||||||
|
content = content.replace(f"<@{self._client.user.id}>", "").strip()
|
||||||
|
return content
|
||||||
|
|
||||||
|
async def _invoke_agent(self, content: str, session_id: str, target):
|
||||||
|
"""Run chat_with_tools with a typing indicator and timeout.
|
||||||
|
|
||||||
|
Returns a (run_output, error_response) tuple. On success the
|
||||||
|
error_response is ``None``; on failure run_output is ``None``.
|
||||||
|
"""
|
||||||
run_output = None
|
run_output = None
|
||||||
response = None
|
response = None
|
||||||
try:
|
try:
|
||||||
@@ -543,54 +558,58 @@ class DiscordVendor(ChatPlatform):
|
|||||||
response = "Sorry, that took too long. Please try a simpler request."
|
response = "Sorry, that took too long. Please try a simpler request."
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
logger.error("Discord: chat_with_tools() failed: %s", exc)
|
logger.error("Discord: chat_with_tools() failed: %s", exc)
|
||||||
response = (
|
response = "I'm having trouble reaching my inference backend right now. Please try again shortly."
|
||||||
"I'm having trouble reaching my language model right now. Please try again shortly."
|
return run_output, response
|
||||||
|
|
||||||
|
async def _handle_paused_run(self, run_output, target, session_id: str) -> None:
|
||||||
|
"""If Agno paused the run for tool confirmation, enqueue approvals."""
|
||||||
|
status = getattr(run_output, "status", None)
|
||||||
|
is_paused = status == "PAUSED" or str(status) == "RunStatus.paused"
|
||||||
|
|
||||||
|
if not (is_paused and getattr(run_output, "active_requirements", None)):
|
||||||
|
return
|
||||||
|
|
||||||
|
from config import settings
|
||||||
|
|
||||||
|
if not settings.discord_confirm_actions:
|
||||||
|
return
|
||||||
|
|
||||||
|
for req in run_output.active_requirements:
|
||||||
|
if not getattr(req, "needs_confirmation", False):
|
||||||
|
continue
|
||||||
|
te = req.tool_execution
|
||||||
|
tool_name = getattr(te, "tool_name", "unknown")
|
||||||
|
tool_args = getattr(te, "tool_args", {}) or {}
|
||||||
|
|
||||||
|
from timmy.approvals import create_item
|
||||||
|
|
||||||
|
item = create_item(
|
||||||
|
title=f"Discord: {tool_name}",
|
||||||
|
description=_format_action_description(tool_name, tool_args),
|
||||||
|
proposed_action=json.dumps({"tool": tool_name, "args": tool_args}),
|
||||||
|
impact=_get_impact_level(tool_name),
|
||||||
)
|
)
|
||||||
|
self._pending_actions[item.id] = {
|
||||||
|
"run_output": run_output,
|
||||||
|
"requirement": req,
|
||||||
|
"tool_name": tool_name,
|
||||||
|
"tool_args": tool_args,
|
||||||
|
"target": target,
|
||||||
|
"session_id": session_id,
|
||||||
|
}
|
||||||
|
await self._send_confirmation(target, tool_name, tool_args, item.id)
|
||||||
|
|
||||||
# Check if Agno paused the run for tool confirmation
|
@staticmethod
|
||||||
if run_output is not None:
|
async def _send_response(response: str | None, target) -> None:
|
||||||
status = getattr(run_output, "status", None)
|
"""Send a response to Discord, chunked to the 2000-char limit."""
|
||||||
is_paused = status == "PAUSED" or str(status) == "RunStatus.paused"
|
if not response or not response.strip():
|
||||||
|
return
|
||||||
if is_paused and getattr(run_output, "active_requirements", None):
|
for chunk in _chunk_message(response, 2000):
|
||||||
from config import settings
|
try:
|
||||||
|
await target.send(chunk)
|
||||||
if settings.discord_confirm_actions:
|
except Exception as exc:
|
||||||
for req in run_output.active_requirements:
|
logger.error("Discord: failed to send message chunk: %s", exc)
|
||||||
if getattr(req, "needs_confirmation", False):
|
break
|
||||||
te = req.tool_execution
|
|
||||||
tool_name = getattr(te, "tool_name", "unknown")
|
|
||||||
tool_args = getattr(te, "tool_args", {}) or {}
|
|
||||||
|
|
||||||
from timmy.approvals import create_item
|
|
||||||
|
|
||||||
item = create_item(
|
|
||||||
title=f"Discord: {tool_name}",
|
|
||||||
description=_format_action_description(tool_name, tool_args),
|
|
||||||
proposed_action=json.dumps({"tool": tool_name, "args": tool_args}),
|
|
||||||
impact=_get_impact_level(tool_name),
|
|
||||||
)
|
|
||||||
self._pending_actions[item.id] = {
|
|
||||||
"run_output": run_output,
|
|
||||||
"requirement": req,
|
|
||||||
"tool_name": tool_name,
|
|
||||||
"tool_args": tool_args,
|
|
||||||
"target": target,
|
|
||||||
"session_id": session_id,
|
|
||||||
}
|
|
||||||
await self._send_confirmation(target, tool_name, tool_args, item.id)
|
|
||||||
|
|
||||||
raw_content = run_output.content if hasattr(run_output, "content") else ""
|
|
||||||
response = _clean_response(raw_content or "")
|
|
||||||
|
|
||||||
# Discord has a 2000 character limit — send with error handling
|
|
||||||
if response and response.strip():
|
|
||||||
for chunk in _chunk_message(response, 2000):
|
|
||||||
try:
|
|
||||||
await target.send(chunk)
|
|
||||||
except Exception as exc:
|
|
||||||
logger.error("Discord: failed to send message chunk: %s", exc)
|
|
||||||
break
|
|
||||||
|
|
||||||
async def _get_or_create_thread(self, message):
|
async def _get_or_create_thread(self, message):
|
||||||
"""Get the active thread for a channel, or create one.
|
"""Get the active thread for a channel, or create one.
|
||||||
|
|||||||
@@ -56,7 +56,8 @@ class TelegramBot:
|
|||||||
from config import settings
|
from config import settings
|
||||||
|
|
||||||
return settings.telegram_token or None
|
return settings.telegram_token or None
|
||||||
except Exception:
|
except Exception as exc:
|
||||||
|
logger.warning("Telegram token load error: %s", exc)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def save_token(self, token: str) -> None:
|
def save_token(self, token: str) -> None:
|
||||||
|
|||||||
1
src/lightning/__init__.py
Normal file
1
src/lightning/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
"""Lightning Network integration for tool-usage micro-payments."""
|
||||||
69
src/lightning/factory.py
Normal file
69
src/lightning/factory.py
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
"""Lightning backend factory.
|
||||||
|
|
||||||
|
Returns a mock or real LND backend based on ``settings.lightning_backend``.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import hashlib
|
||||||
|
import logging
|
||||||
|
import secrets
|
||||||
|
from dataclasses import dataclass
|
||||||
|
|
||||||
|
from config import settings
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class Invoice:
|
||||||
|
"""Minimal Lightning invoice representation."""
|
||||||
|
|
||||||
|
payment_hash: str
|
||||||
|
payment_request: str
|
||||||
|
amount_sats: int
|
||||||
|
memo: str
|
||||||
|
|
||||||
|
|
||||||
|
class MockBackend:
|
||||||
|
"""In-memory mock Lightning backend for development and testing."""
|
||||||
|
|
||||||
|
def create_invoice(self, amount_sats: int, memo: str = "") -> Invoice:
|
||||||
|
"""Create a fake invoice with a random payment hash."""
|
||||||
|
raw = secrets.token_bytes(32)
|
||||||
|
payment_hash = hashlib.sha256(raw).hexdigest()
|
||||||
|
payment_request = f"lnbc{amount_sats}mock{payment_hash[:20]}"
|
||||||
|
logger.debug("Mock invoice: %s sats — %s", amount_sats, payment_hash[:12])
|
||||||
|
return Invoice(
|
||||||
|
payment_hash=payment_hash,
|
||||||
|
payment_request=payment_request,
|
||||||
|
amount_sats=amount_sats,
|
||||||
|
memo=memo,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Singleton — lazily created
|
||||||
|
_backend: MockBackend | None = None
|
||||||
|
|
||||||
|
|
||||||
|
def get_backend() -> MockBackend:
|
||||||
|
"""Return the configured Lightning backend (currently mock-only).
|
||||||
|
|
||||||
|
Raises ``ValueError`` if an unsupported backend is requested.
|
||||||
|
"""
|
||||||
|
global _backend # noqa: PLW0603
|
||||||
|
if _backend is not None:
|
||||||
|
return _backend
|
||||||
|
|
||||||
|
kind = settings.lightning_backend
|
||||||
|
if kind == "mock":
|
||||||
|
_backend = MockBackend()
|
||||||
|
elif kind == "lnd":
|
||||||
|
# LND gRPC integration is on the roadmap — for now fall back to mock.
|
||||||
|
logger.warning("LND backend not yet implemented — using mock")
|
||||||
|
_backend = MockBackend()
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unknown lightning_backend: {kind!r}")
|
||||||
|
|
||||||
|
logger.info("Lightning backend: %s", kind)
|
||||||
|
return _backend
|
||||||
146
src/lightning/ledger.py
Normal file
146
src/lightning/ledger.py
Normal file
@@ -0,0 +1,146 @@
|
|||||||
|
"""In-memory Lightning transaction ledger.
|
||||||
|
|
||||||
|
Tracks invoices, settlements, and balances per the schema in
|
||||||
|
``docs/adr/018-lightning-ledger.md``. Uses a simple in-memory list so the
|
||||||
|
dashboard can display real (ephemeral) data without requiring SQLite yet.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import uuid
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from datetime import UTC, datetime
|
||||||
|
from enum import StrEnum
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class TxType(StrEnum):
|
||||||
|
incoming = "incoming"
|
||||||
|
outgoing = "outgoing"
|
||||||
|
|
||||||
|
|
||||||
|
class TxStatus(StrEnum):
|
||||||
|
pending = "pending"
|
||||||
|
settled = "settled"
|
||||||
|
failed = "failed"
|
||||||
|
expired = "expired"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class LedgerEntry:
|
||||||
|
"""Single ledger row matching the ADR-018 schema."""
|
||||||
|
|
||||||
|
id: str
|
||||||
|
tx_type: TxType
|
||||||
|
status: TxStatus
|
||||||
|
payment_hash: str
|
||||||
|
amount_sats: int
|
||||||
|
memo: str
|
||||||
|
source: str
|
||||||
|
created_at: str
|
||||||
|
invoice: str = ""
|
||||||
|
preimage: str = ""
|
||||||
|
task_id: str = ""
|
||||||
|
agent_id: str = ""
|
||||||
|
settled_at: str = ""
|
||||||
|
fee_sats: int = 0
|
||||||
|
|
||||||
|
|
||||||
|
# ── In-memory store ──────────────────────────────────────────────────
|
||||||
|
_entries: list[LedgerEntry] = []
|
||||||
|
|
||||||
|
|
||||||
|
def create_invoice_entry(
|
||||||
|
payment_hash: str,
|
||||||
|
amount_sats: int,
|
||||||
|
memo: str = "",
|
||||||
|
source: str = "tool_usage",
|
||||||
|
task_id: str = "",
|
||||||
|
agent_id: str = "",
|
||||||
|
invoice: str = "",
|
||||||
|
) -> LedgerEntry:
|
||||||
|
"""Record a new incoming invoice in the ledger."""
|
||||||
|
entry = LedgerEntry(
|
||||||
|
id=uuid.uuid4().hex[:16],
|
||||||
|
tx_type=TxType.incoming,
|
||||||
|
status=TxStatus.pending,
|
||||||
|
payment_hash=payment_hash,
|
||||||
|
amount_sats=amount_sats,
|
||||||
|
memo=memo,
|
||||||
|
source=source,
|
||||||
|
task_id=task_id,
|
||||||
|
agent_id=agent_id,
|
||||||
|
invoice=invoice,
|
||||||
|
created_at=datetime.now(UTC).isoformat(),
|
||||||
|
)
|
||||||
|
_entries.append(entry)
|
||||||
|
logger.debug("Ledger entry created: %s (%s sats)", entry.id, amount_sats)
|
||||||
|
return entry
|
||||||
|
|
||||||
|
|
||||||
|
def mark_settled(payment_hash: str, preimage: str = "") -> LedgerEntry | None:
|
||||||
|
"""Mark a pending entry as settled by payment hash."""
|
||||||
|
for entry in _entries:
|
||||||
|
if entry.payment_hash == payment_hash and entry.status == TxStatus.pending:
|
||||||
|
entry.status = TxStatus.settled
|
||||||
|
entry.preimage = preimage
|
||||||
|
entry.settled_at = datetime.now(UTC).isoformat()
|
||||||
|
logger.debug("Ledger settled: %s", payment_hash[:12])
|
||||||
|
return entry
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def get_balance() -> dict:
|
||||||
|
"""Compute the current balance from settled and pending entries."""
|
||||||
|
incoming_total = sum(
|
||||||
|
e.amount_sats
|
||||||
|
for e in _entries
|
||||||
|
if e.tx_type == TxType.incoming and e.status == TxStatus.settled
|
||||||
|
)
|
||||||
|
outgoing_total = sum(
|
||||||
|
e.amount_sats
|
||||||
|
for e in _entries
|
||||||
|
if e.tx_type == TxType.outgoing and e.status == TxStatus.settled
|
||||||
|
)
|
||||||
|
fees = sum(e.fee_sats for e in _entries if e.status == TxStatus.settled)
|
||||||
|
pending_in = sum(
|
||||||
|
e.amount_sats
|
||||||
|
for e in _entries
|
||||||
|
if e.tx_type == TxType.incoming and e.status == TxStatus.pending
|
||||||
|
)
|
||||||
|
pending_out = sum(
|
||||||
|
e.amount_sats
|
||||||
|
for e in _entries
|
||||||
|
if e.tx_type == TxType.outgoing and e.status == TxStatus.pending
|
||||||
|
)
|
||||||
|
net = incoming_total - outgoing_total - fees
|
||||||
|
return {
|
||||||
|
"incoming_total_sats": incoming_total,
|
||||||
|
"outgoing_total_sats": outgoing_total,
|
||||||
|
"fees_paid_sats": fees,
|
||||||
|
"net_sats": net,
|
||||||
|
"pending_incoming_sats": pending_in,
|
||||||
|
"pending_outgoing_sats": pending_out,
|
||||||
|
"available_sats": net - pending_out,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def get_transactions(
|
||||||
|
tx_type: str | None = None,
|
||||||
|
status: str | None = None,
|
||||||
|
limit: int = 50,
|
||||||
|
) -> list[LedgerEntry]:
|
||||||
|
"""Return ledger entries, optionally filtered."""
|
||||||
|
result = _entries
|
||||||
|
if tx_type:
|
||||||
|
result = [e for e in result if e.tx_type.value == tx_type]
|
||||||
|
if status:
|
||||||
|
result = [e for e in result if e.status.value == status]
|
||||||
|
return list(reversed(result))[:limit]
|
||||||
|
|
||||||
|
|
||||||
|
def clear() -> None:
|
||||||
|
"""Reset the ledger (for testing)."""
|
||||||
|
_entries.clear()
|
||||||
1
src/loop/__init__.py
Normal file
1
src/loop/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
"""Three-phase agent loop: Gather → Reason → Act."""
|
||||||
37
src/loop/phase1_gather.py
Normal file
37
src/loop/phase1_gather.py
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
"""Phase 1 — Gather: accept raw input, produce structured context.
|
||||||
|
|
||||||
|
This is the sensory phase. It receives a raw ContextPayload and enriches
|
||||||
|
it with whatever context Timmy needs before reasoning. In the stub form,
|
||||||
|
it simply passes the payload through with a phase marker.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from loop.schema import ContextPayload
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def gather(payload: ContextPayload) -> ContextPayload:
|
||||||
|
"""Accept raw input and return structured context for reasoning.
|
||||||
|
|
||||||
|
Stub: tags the payload with phase=gather and logs transit.
|
||||||
|
Timmy will flesh this out with context selection, memory lookup,
|
||||||
|
adapter polling, and attention-residual weighting.
|
||||||
|
"""
|
||||||
|
logger.info(
|
||||||
|
"Phase 1 (Gather) received: source=%s content_len=%d tokens=%d",
|
||||||
|
payload.source,
|
||||||
|
len(payload.content),
|
||||||
|
payload.token_count,
|
||||||
|
)
|
||||||
|
|
||||||
|
result = payload.with_metadata(phase="gather", gathered=True)
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"Phase 1 (Gather) produced: metadata_keys=%s",
|
||||||
|
sorted(result.metadata.keys()),
|
||||||
|
)
|
||||||
|
return result
|
||||||
36
src/loop/phase2_reason.py
Normal file
36
src/loop/phase2_reason.py
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
"""Phase 2 — Reason: accept gathered context, produce reasoning output.
|
||||||
|
|
||||||
|
This is the deliberation phase. It receives enriched context from Phase 1
|
||||||
|
and decides what to do. In the stub form, it passes the payload through
|
||||||
|
with a phase marker.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from loop.schema import ContextPayload
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def reason(payload: ContextPayload) -> ContextPayload:
|
||||||
|
"""Accept gathered context and return a reasoning result.
|
||||||
|
|
||||||
|
Stub: tags the payload with phase=reason and logs transit.
|
||||||
|
Timmy will flesh this out with LLM calls, confidence scoring,
|
||||||
|
plan generation, and judgment logic.
|
||||||
|
"""
|
||||||
|
logger.info(
|
||||||
|
"Phase 2 (Reason) received: source=%s gathered=%s",
|
||||||
|
payload.source,
|
||||||
|
payload.metadata.get("gathered", False),
|
||||||
|
)
|
||||||
|
|
||||||
|
result = payload.with_metadata(phase="reason", reasoned=True)
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"Phase 2 (Reason) produced: metadata_keys=%s",
|
||||||
|
sorted(result.metadata.keys()),
|
||||||
|
)
|
||||||
|
return result
|
||||||
36
src/loop/phase3_act.py
Normal file
36
src/loop/phase3_act.py
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
"""Phase 3 — Act: accept reasoning output, execute and produce feedback.
|
||||||
|
|
||||||
|
This is the command phase. It receives the reasoning result from Phase 2
|
||||||
|
and takes action. In the stub form, it passes the payload through with a
|
||||||
|
phase marker and produces feedback for the next cycle.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from loop.schema import ContextPayload
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def act(payload: ContextPayload) -> ContextPayload:
|
||||||
|
"""Accept reasoning result and return action output + feedback.
|
||||||
|
|
||||||
|
Stub: tags the payload with phase=act and logs transit.
|
||||||
|
Timmy will flesh this out with tool execution, delegation,
|
||||||
|
response generation, and feedback construction.
|
||||||
|
"""
|
||||||
|
logger.info(
|
||||||
|
"Phase 3 (Act) received: source=%s reasoned=%s",
|
||||||
|
payload.source,
|
||||||
|
payload.metadata.get("reasoned", False),
|
||||||
|
)
|
||||||
|
|
||||||
|
result = payload.with_metadata(phase="act", acted=True)
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"Phase 3 (Act) produced: metadata_keys=%s",
|
||||||
|
sorted(result.metadata.keys()),
|
||||||
|
)
|
||||||
|
return result
|
||||||
40
src/loop/runner.py
Normal file
40
src/loop/runner.py
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
"""Loop runner — orchestrates the three phases in sequence.
|
||||||
|
|
||||||
|
Runs Gather → Reason → Act as a single cycle, passing output from each
|
||||||
|
phase as input to the next. The Act output feeds back as input to the
|
||||||
|
next Gather call.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from loop.phase1_gather import gather
|
||||||
|
from loop.phase2_reason import reason
|
||||||
|
from loop.phase3_act import act
|
||||||
|
from loop.schema import ContextPayload
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def run_cycle(payload: ContextPayload) -> ContextPayload:
|
||||||
|
"""Execute one full Gather → Reason → Act cycle.
|
||||||
|
|
||||||
|
Returns the Act phase output, which can be fed back as input
|
||||||
|
to the next cycle.
|
||||||
|
"""
|
||||||
|
logger.info("=== Loop cycle start: source=%s ===", payload.source)
|
||||||
|
|
||||||
|
gathered = gather(payload)
|
||||||
|
reasoned = reason(gathered)
|
||||||
|
acted = act(reasoned)
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"=== Loop cycle complete: phases=%s ===",
|
||||||
|
[
|
||||||
|
gathered.metadata.get("phase"),
|
||||||
|
reasoned.metadata.get("phase"),
|
||||||
|
acted.metadata.get("phase"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
return acted
|
||||||
43
src/loop/schema.py
Normal file
43
src/loop/schema.py
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
"""Data schema for the three-phase loop.
|
||||||
|
|
||||||
|
Each phase passes a ContextPayload forward. The schema is intentionally
|
||||||
|
minimal — Timmy decides what fields matter as the loop matures.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from datetime import UTC, datetime
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ContextPayload:
|
||||||
|
"""Immutable context packet passed between loop phases.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
source: Where this payload originated (e.g. "user", "timer", "event").
|
||||||
|
content: The raw content string to process.
|
||||||
|
timestamp: When the payload was created.
|
||||||
|
token_count: Estimated token count for budget tracking. -1 = unknown.
|
||||||
|
metadata: Arbitrary key-value pairs for phase-specific data.
|
||||||
|
"""
|
||||||
|
|
||||||
|
source: str
|
||||||
|
content: str
|
||||||
|
timestamp: datetime = field(default_factory=lambda: datetime.now(UTC))
|
||||||
|
token_count: int = -1
|
||||||
|
metadata: dict = field(default_factory=dict)
|
||||||
|
|
||||||
|
def with_metadata(self, **kwargs: object) -> ContextPayload:
|
||||||
|
"""Return a new payload with additional metadata merged in."""
|
||||||
|
merged = {**self.metadata, **kwargs}
|
||||||
|
return ContextPayload(
|
||||||
|
source=self.source,
|
||||||
|
content=self.content,
|
||||||
|
timestamp=self.timestamp,
|
||||||
|
token_count=self.token_count,
|
||||||
|
metadata=merged,
|
||||||
|
)
|
||||||
@@ -16,6 +16,8 @@ import json
|
|||||||
import logging
|
import logging
|
||||||
import sqlite3
|
import sqlite3
|
||||||
import uuid
|
import uuid
|
||||||
|
from collections.abc import Generator
|
||||||
|
from contextlib import closing, contextmanager
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from datetime import UTC, datetime
|
from datetime import UTC, datetime
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
@@ -39,28 +41,31 @@ class Prediction:
|
|||||||
evaluated_at: str | None
|
evaluated_at: str | None
|
||||||
|
|
||||||
|
|
||||||
def _get_conn() -> sqlite3.Connection:
|
@contextmanager
|
||||||
|
def _get_conn() -> Generator[sqlite3.Connection, None, None]:
|
||||||
DB_PATH.parent.mkdir(parents=True, exist_ok=True)
|
DB_PATH.parent.mkdir(parents=True, exist_ok=True)
|
||||||
conn = sqlite3.connect(str(DB_PATH))
|
with closing(sqlite3.connect(str(DB_PATH))) as conn:
|
||||||
conn.row_factory = sqlite3.Row
|
conn.row_factory = sqlite3.Row
|
||||||
conn.execute("PRAGMA journal_mode=WAL")
|
conn.execute("PRAGMA journal_mode=WAL")
|
||||||
conn.execute("PRAGMA busy_timeout=5000")
|
conn.execute("PRAGMA busy_timeout=5000")
|
||||||
conn.execute("""
|
conn.execute("""
|
||||||
CREATE TABLE IF NOT EXISTS spark_predictions (
|
CREATE TABLE IF NOT EXISTS spark_predictions (
|
||||||
id TEXT PRIMARY KEY,
|
id TEXT PRIMARY KEY,
|
||||||
task_id TEXT NOT NULL,
|
task_id TEXT NOT NULL,
|
||||||
prediction_type TEXT NOT NULL,
|
prediction_type TEXT NOT NULL,
|
||||||
predicted_value TEXT NOT NULL,
|
predicted_value TEXT NOT NULL,
|
||||||
actual_value TEXT,
|
actual_value TEXT,
|
||||||
accuracy REAL,
|
accuracy REAL,
|
||||||
created_at TEXT NOT NULL,
|
created_at TEXT NOT NULL,
|
||||||
evaluated_at TEXT
|
evaluated_at TEXT
|
||||||
|
)
|
||||||
|
""")
|
||||||
|
conn.execute("CREATE INDEX IF NOT EXISTS idx_pred_task ON spark_predictions(task_id)")
|
||||||
|
conn.execute(
|
||||||
|
"CREATE INDEX IF NOT EXISTS idx_pred_type ON spark_predictions(prediction_type)"
|
||||||
)
|
)
|
||||||
""")
|
conn.commit()
|
||||||
conn.execute("CREATE INDEX IF NOT EXISTS idx_pred_task ON spark_predictions(task_id)")
|
yield conn
|
||||||
conn.execute("CREATE INDEX IF NOT EXISTS idx_pred_type ON spark_predictions(prediction_type)")
|
|
||||||
conn.commit()
|
|
||||||
return conn
|
|
||||||
|
|
||||||
|
|
||||||
# ── Prediction phase ────────────────────────────────────────────────────────
|
# ── Prediction phase ────────────────────────────────────────────────────────
|
||||||
@@ -119,17 +124,16 @@ def predict_task_outcome(
|
|||||||
# Store prediction
|
# Store prediction
|
||||||
pred_id = str(uuid.uuid4())
|
pred_id = str(uuid.uuid4())
|
||||||
now = datetime.now(UTC).isoformat()
|
now = datetime.now(UTC).isoformat()
|
||||||
conn = _get_conn()
|
with _get_conn() as conn:
|
||||||
conn.execute(
|
conn.execute(
|
||||||
"""
|
"""
|
||||||
INSERT INTO spark_predictions
|
INSERT INTO spark_predictions
|
||||||
(id, task_id, prediction_type, predicted_value, created_at)
|
(id, task_id, prediction_type, predicted_value, created_at)
|
||||||
VALUES (?, ?, ?, ?, ?)
|
VALUES (?, ?, ?, ?, ?)
|
||||||
""",
|
""",
|
||||||
(pred_id, task_id, "outcome", json.dumps(prediction), now),
|
(pred_id, task_id, "outcome", json.dumps(prediction), now),
|
||||||
)
|
)
|
||||||
conn.commit()
|
conn.commit()
|
||||||
conn.close()
|
|
||||||
|
|
||||||
prediction["prediction_id"] = pred_id
|
prediction["prediction_id"] = pred_id
|
||||||
return prediction
|
return prediction
|
||||||
@@ -148,41 +152,39 @@ def evaluate_prediction(
|
|||||||
|
|
||||||
Returns the evaluation result or None if no prediction exists.
|
Returns the evaluation result or None if no prediction exists.
|
||||||
"""
|
"""
|
||||||
conn = _get_conn()
|
with _get_conn() as conn:
|
||||||
row = conn.execute(
|
row = conn.execute(
|
||||||
"""
|
"""
|
||||||
SELECT * FROM spark_predictions
|
SELECT * FROM spark_predictions
|
||||||
WHERE task_id = ? AND prediction_type = 'outcome' AND evaluated_at IS NULL
|
WHERE task_id = ? AND prediction_type = 'outcome' AND evaluated_at IS NULL
|
||||||
ORDER BY created_at DESC LIMIT 1
|
ORDER BY created_at DESC LIMIT 1
|
||||||
""",
|
""",
|
||||||
(task_id,),
|
(task_id,),
|
||||||
).fetchone()
|
).fetchone()
|
||||||
|
|
||||||
if not row:
|
if not row:
|
||||||
conn.close()
|
return None
|
||||||
return None
|
|
||||||
|
|
||||||
predicted = json.loads(row["predicted_value"])
|
predicted = json.loads(row["predicted_value"])
|
||||||
actual = {
|
actual = {
|
||||||
"winner": actual_winner,
|
"winner": actual_winner,
|
||||||
"succeeded": task_succeeded,
|
"succeeded": task_succeeded,
|
||||||
"winning_bid": winning_bid,
|
"winning_bid": winning_bid,
|
||||||
}
|
}
|
||||||
|
|
||||||
# Calculate accuracy
|
# Calculate accuracy
|
||||||
accuracy = _compute_accuracy(predicted, actual)
|
accuracy = _compute_accuracy(predicted, actual)
|
||||||
now = datetime.now(UTC).isoformat()
|
now = datetime.now(UTC).isoformat()
|
||||||
|
|
||||||
conn.execute(
|
conn.execute(
|
||||||
"""
|
"""
|
||||||
UPDATE spark_predictions
|
UPDATE spark_predictions
|
||||||
SET actual_value = ?, accuracy = ?, evaluated_at = ?
|
SET actual_value = ?, accuracy = ?, evaluated_at = ?
|
||||||
WHERE id = ?
|
WHERE id = ?
|
||||||
""",
|
""",
|
||||||
(json.dumps(actual), accuracy, now, row["id"]),
|
(json.dumps(actual), accuracy, now, row["id"]),
|
||||||
)
|
)
|
||||||
conn.commit()
|
conn.commit()
|
||||||
conn.close()
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"prediction_id": row["id"],
|
"prediction_id": row["id"],
|
||||||
@@ -243,7 +245,6 @@ def get_predictions(
|
|||||||
limit: int = 50,
|
limit: int = 50,
|
||||||
) -> list[Prediction]:
|
) -> list[Prediction]:
|
||||||
"""Query stored predictions."""
|
"""Query stored predictions."""
|
||||||
conn = _get_conn()
|
|
||||||
query = "SELECT * FROM spark_predictions WHERE 1=1"
|
query = "SELECT * FROM spark_predictions WHERE 1=1"
|
||||||
params: list = []
|
params: list = []
|
||||||
|
|
||||||
@@ -256,8 +257,8 @@ def get_predictions(
|
|||||||
query += " ORDER BY created_at DESC LIMIT ?"
|
query += " ORDER BY created_at DESC LIMIT ?"
|
||||||
params.append(limit)
|
params.append(limit)
|
||||||
|
|
||||||
rows = conn.execute(query, params).fetchall()
|
with _get_conn() as conn:
|
||||||
conn.close()
|
rows = conn.execute(query, params).fetchall()
|
||||||
return [
|
return [
|
||||||
Prediction(
|
Prediction(
|
||||||
id=r["id"],
|
id=r["id"],
|
||||||
@@ -275,17 +276,16 @@ def get_predictions(
|
|||||||
|
|
||||||
def get_accuracy_stats() -> dict:
|
def get_accuracy_stats() -> dict:
|
||||||
"""Return aggregate accuracy statistics for the EIDOS loop."""
|
"""Return aggregate accuracy statistics for the EIDOS loop."""
|
||||||
conn = _get_conn()
|
with _get_conn() as conn:
|
||||||
row = conn.execute("""
|
row = conn.execute("""
|
||||||
SELECT
|
SELECT
|
||||||
COUNT(*) AS total_predictions,
|
COUNT(*) AS total_predictions,
|
||||||
COUNT(evaluated_at) AS evaluated,
|
COUNT(evaluated_at) AS evaluated,
|
||||||
AVG(CASE WHEN accuracy IS NOT NULL THEN accuracy END) AS avg_accuracy,
|
AVG(CASE WHEN accuracy IS NOT NULL THEN accuracy END) AS avg_accuracy,
|
||||||
MIN(CASE WHEN accuracy IS NOT NULL THEN accuracy END) AS min_accuracy,
|
MIN(CASE WHEN accuracy IS NOT NULL THEN accuracy END) AS min_accuracy,
|
||||||
MAX(CASE WHEN accuracy IS NOT NULL THEN accuracy END) AS max_accuracy
|
MAX(CASE WHEN accuracy IS NOT NULL THEN accuracy END) AS max_accuracy
|
||||||
FROM spark_predictions
|
FROM spark_predictions
|
||||||
""").fetchone()
|
""").fetchone()
|
||||||
conn.close()
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"total_predictions": row["total_predictions"] or 0,
|
"total_predictions": row["total_predictions"] or 0,
|
||||||
|
|||||||
@@ -273,6 +273,8 @@ class SparkEngine:
|
|||||||
|
|
||||||
def _maybe_consolidate(self, agent_id: str) -> None:
|
def _maybe_consolidate(self, agent_id: str) -> None:
|
||||||
"""Consolidate events into memories when enough data exists."""
|
"""Consolidate events into memories when enough data exists."""
|
||||||
|
from datetime import UTC, datetime, timedelta
|
||||||
|
|
||||||
agent_events = spark_memory.get_events(agent_id=agent_id, limit=50)
|
agent_events = spark_memory.get_events(agent_id=agent_id, limit=50)
|
||||||
if len(agent_events) < 5:
|
if len(agent_events) < 5:
|
||||||
return
|
return
|
||||||
@@ -286,7 +288,34 @@ class SparkEngine:
|
|||||||
|
|
||||||
success_rate = len(completions) / total if total else 0
|
success_rate = len(completions) / total if total else 0
|
||||||
|
|
||||||
|
# Determine target memory type based on success rate
|
||||||
if success_rate >= 0.8:
|
if success_rate >= 0.8:
|
||||||
|
target_memory_type = "pattern"
|
||||||
|
elif success_rate <= 0.3:
|
||||||
|
target_memory_type = "anomaly"
|
||||||
|
else:
|
||||||
|
return # No consolidation needed for neutral success rates
|
||||||
|
|
||||||
|
# Check for recent memories of the same type for this agent
|
||||||
|
existing_memories = spark_memory.get_memories(subject=agent_id, limit=5)
|
||||||
|
now = datetime.now(UTC)
|
||||||
|
one_hour_ago = now - timedelta(hours=1)
|
||||||
|
|
||||||
|
for memory in existing_memories:
|
||||||
|
if memory.memory_type == target_memory_type:
|
||||||
|
try:
|
||||||
|
created_at = datetime.fromisoformat(memory.created_at)
|
||||||
|
if created_at >= one_hour_ago:
|
||||||
|
logger.info(
|
||||||
|
"Consolidation: skipping — recent memory exists for %s",
|
||||||
|
agent_id[:8],
|
||||||
|
)
|
||||||
|
return
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Store the new memory
|
||||||
|
if target_memory_type == "pattern":
|
||||||
spark_memory.store_memory(
|
spark_memory.store_memory(
|
||||||
memory_type="pattern",
|
memory_type="pattern",
|
||||||
subject=agent_id,
|
subject=agent_id,
|
||||||
@@ -295,7 +324,7 @@ class SparkEngine:
|
|||||||
confidence=min(0.95, 0.6 + total * 0.05),
|
confidence=min(0.95, 0.6 + total * 0.05),
|
||||||
source_events=total,
|
source_events=total,
|
||||||
)
|
)
|
||||||
elif success_rate <= 0.3:
|
else: # anomaly
|
||||||
spark_memory.store_memory(
|
spark_memory.store_memory(
|
||||||
memory_type="anomaly",
|
memory_type="anomaly",
|
||||||
subject=agent_id,
|
subject=agent_id,
|
||||||
@@ -358,7 +387,8 @@ def get_spark_engine() -> SparkEngine:
|
|||||||
from config import settings
|
from config import settings
|
||||||
|
|
||||||
_spark_engine = SparkEngine(enabled=settings.spark_enabled)
|
_spark_engine = SparkEngine(enabled=settings.spark_enabled)
|
||||||
except Exception:
|
except Exception as exc:
|
||||||
|
logger.debug("Spark engine settings load error: %s", exc)
|
||||||
_spark_engine = SparkEngine(enabled=True)
|
_spark_engine = SparkEngine(enabled=True)
|
||||||
return _spark_engine
|
return _spark_engine
|
||||||
|
|
||||||
|
|||||||
@@ -10,12 +10,17 @@ spark_events — raw event log (every swarm event)
|
|||||||
spark_memories — consolidated insights extracted from event patterns
|
spark_memories — consolidated insights extracted from event patterns
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
import sqlite3
|
import sqlite3
|
||||||
import uuid
|
import uuid
|
||||||
|
from collections.abc import Generator
|
||||||
|
from contextlib import closing, contextmanager
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from datetime import UTC, datetime
|
from datetime import UTC, datetime
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
DB_PATH = Path("data/spark.db")
|
DB_PATH = Path("data/spark.db")
|
||||||
|
|
||||||
# Importance thresholds
|
# Importance thresholds
|
||||||
@@ -52,42 +57,43 @@ class SparkMemory:
|
|||||||
expires_at: str | None
|
expires_at: str | None
|
||||||
|
|
||||||
|
|
||||||
def _get_conn() -> sqlite3.Connection:
|
@contextmanager
|
||||||
|
def _get_conn() -> Generator[sqlite3.Connection, None, None]:
|
||||||
DB_PATH.parent.mkdir(parents=True, exist_ok=True)
|
DB_PATH.parent.mkdir(parents=True, exist_ok=True)
|
||||||
conn = sqlite3.connect(str(DB_PATH))
|
with closing(sqlite3.connect(str(DB_PATH))) as conn:
|
||||||
conn.row_factory = sqlite3.Row
|
conn.row_factory = sqlite3.Row
|
||||||
conn.execute("PRAGMA journal_mode=WAL")
|
conn.execute("PRAGMA journal_mode=WAL")
|
||||||
conn.execute("PRAGMA busy_timeout=5000")
|
conn.execute("PRAGMA busy_timeout=5000")
|
||||||
conn.execute("""
|
conn.execute("""
|
||||||
CREATE TABLE IF NOT EXISTS spark_events (
|
CREATE TABLE IF NOT EXISTS spark_events (
|
||||||
id TEXT PRIMARY KEY,
|
id TEXT PRIMARY KEY,
|
||||||
event_type TEXT NOT NULL,
|
event_type TEXT NOT NULL,
|
||||||
agent_id TEXT,
|
agent_id TEXT,
|
||||||
task_id TEXT,
|
task_id TEXT,
|
||||||
description TEXT NOT NULL DEFAULT '',
|
description TEXT NOT NULL DEFAULT '',
|
||||||
data TEXT NOT NULL DEFAULT '{}',
|
data TEXT NOT NULL DEFAULT '{}',
|
||||||
importance REAL NOT NULL DEFAULT 0.5,
|
importance REAL NOT NULL DEFAULT 0.5,
|
||||||
created_at TEXT NOT NULL
|
created_at TEXT NOT NULL
|
||||||
)
|
)
|
||||||
""")
|
""")
|
||||||
conn.execute("""
|
conn.execute("""
|
||||||
CREATE TABLE IF NOT EXISTS spark_memories (
|
CREATE TABLE IF NOT EXISTS spark_memories (
|
||||||
id TEXT PRIMARY KEY,
|
id TEXT PRIMARY KEY,
|
||||||
memory_type TEXT NOT NULL,
|
memory_type TEXT NOT NULL,
|
||||||
subject TEXT NOT NULL DEFAULT 'system',
|
subject TEXT NOT NULL DEFAULT 'system',
|
||||||
content TEXT NOT NULL,
|
content TEXT NOT NULL,
|
||||||
confidence REAL NOT NULL DEFAULT 0.5,
|
confidence REAL NOT NULL DEFAULT 0.5,
|
||||||
source_events INTEGER NOT NULL DEFAULT 0,
|
source_events INTEGER NOT NULL DEFAULT 0,
|
||||||
created_at TEXT NOT NULL,
|
created_at TEXT NOT NULL,
|
||||||
expires_at TEXT
|
expires_at TEXT
|
||||||
)
|
)
|
||||||
""")
|
""")
|
||||||
conn.execute("CREATE INDEX IF NOT EXISTS idx_events_type ON spark_events(event_type)")
|
conn.execute("CREATE INDEX IF NOT EXISTS idx_events_type ON spark_events(event_type)")
|
||||||
conn.execute("CREATE INDEX IF NOT EXISTS idx_events_agent ON spark_events(agent_id)")
|
conn.execute("CREATE INDEX IF NOT EXISTS idx_events_agent ON spark_events(agent_id)")
|
||||||
conn.execute("CREATE INDEX IF NOT EXISTS idx_events_task ON spark_events(task_id)")
|
conn.execute("CREATE INDEX IF NOT EXISTS idx_events_task ON spark_events(task_id)")
|
||||||
conn.execute("CREATE INDEX IF NOT EXISTS idx_memories_subject ON spark_memories(subject)")
|
conn.execute("CREATE INDEX IF NOT EXISTS idx_memories_subject ON spark_memories(subject)")
|
||||||
conn.commit()
|
conn.commit()
|
||||||
return conn
|
yield conn
|
||||||
|
|
||||||
|
|
||||||
# ── Importance scoring ──────────────────────────────────────────────────────
|
# ── Importance scoring ──────────────────────────────────────────────────────
|
||||||
@@ -146,17 +152,16 @@ def record_event(
|
|||||||
parsed = {}
|
parsed = {}
|
||||||
importance = score_importance(event_type, parsed)
|
importance = score_importance(event_type, parsed)
|
||||||
|
|
||||||
conn = _get_conn()
|
with _get_conn() as conn:
|
||||||
conn.execute(
|
conn.execute(
|
||||||
"""
|
"""
|
||||||
INSERT INTO spark_events
|
INSERT INTO spark_events
|
||||||
(id, event_type, agent_id, task_id, description, data, importance, created_at)
|
(id, event_type, agent_id, task_id, description, data, importance, created_at)
|
||||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||||
""",
|
""",
|
||||||
(event_id, event_type, agent_id, task_id, description, data, importance, now),
|
(event_id, event_type, agent_id, task_id, description, data, importance, now),
|
||||||
)
|
)
|
||||||
conn.commit()
|
conn.commit()
|
||||||
conn.close()
|
|
||||||
|
|
||||||
# Bridge to unified event log so all events are queryable from one place
|
# Bridge to unified event log so all events are queryable from one place
|
||||||
try:
|
try:
|
||||||
@@ -170,7 +175,8 @@ def record_event(
|
|||||||
task_id=task_id or "",
|
task_id=task_id or "",
|
||||||
agent_id=agent_id or "",
|
agent_id=agent_id or "",
|
||||||
)
|
)
|
||||||
except Exception:
|
except Exception as exc:
|
||||||
|
logger.debug("Spark event log error: %s", exc)
|
||||||
pass # Graceful — don't break spark if event_log is unavailable
|
pass # Graceful — don't break spark if event_log is unavailable
|
||||||
|
|
||||||
return event_id
|
return event_id
|
||||||
@@ -184,7 +190,6 @@ def get_events(
|
|||||||
min_importance: float = 0.0,
|
min_importance: float = 0.0,
|
||||||
) -> list[SparkEvent]:
|
) -> list[SparkEvent]:
|
||||||
"""Query events with optional filters."""
|
"""Query events with optional filters."""
|
||||||
conn = _get_conn()
|
|
||||||
query = "SELECT * FROM spark_events WHERE importance >= ?"
|
query = "SELECT * FROM spark_events WHERE importance >= ?"
|
||||||
params: list = [min_importance]
|
params: list = [min_importance]
|
||||||
|
|
||||||
@@ -201,8 +206,8 @@ def get_events(
|
|||||||
query += " ORDER BY created_at DESC LIMIT ?"
|
query += " ORDER BY created_at DESC LIMIT ?"
|
||||||
params.append(limit)
|
params.append(limit)
|
||||||
|
|
||||||
rows = conn.execute(query, params).fetchall()
|
with _get_conn() as conn:
|
||||||
conn.close()
|
rows = conn.execute(query, params).fetchall()
|
||||||
return [
|
return [
|
||||||
SparkEvent(
|
SparkEvent(
|
||||||
id=r["id"],
|
id=r["id"],
|
||||||
@@ -220,15 +225,14 @@ def get_events(
|
|||||||
|
|
||||||
def count_events(event_type: str | None = None) -> int:
|
def count_events(event_type: str | None = None) -> int:
|
||||||
"""Count events, optionally filtered by type."""
|
"""Count events, optionally filtered by type."""
|
||||||
conn = _get_conn()
|
with _get_conn() as conn:
|
||||||
if event_type:
|
if event_type:
|
||||||
row = conn.execute(
|
row = conn.execute(
|
||||||
"SELECT COUNT(*) FROM spark_events WHERE event_type = ?",
|
"SELECT COUNT(*) FROM spark_events WHERE event_type = ?",
|
||||||
(event_type,),
|
(event_type,),
|
||||||
).fetchone()
|
).fetchone()
|
||||||
else:
|
else:
|
||||||
row = conn.execute("SELECT COUNT(*) FROM spark_events").fetchone()
|
row = conn.execute("SELECT COUNT(*) FROM spark_events").fetchone()
|
||||||
conn.close()
|
|
||||||
return row[0]
|
return row[0]
|
||||||
|
|
||||||
|
|
||||||
@@ -246,17 +250,16 @@ def store_memory(
|
|||||||
"""Store a consolidated memory. Returns the memory id."""
|
"""Store a consolidated memory. Returns the memory id."""
|
||||||
mem_id = str(uuid.uuid4())
|
mem_id = str(uuid.uuid4())
|
||||||
now = datetime.now(UTC).isoformat()
|
now = datetime.now(UTC).isoformat()
|
||||||
conn = _get_conn()
|
with _get_conn() as conn:
|
||||||
conn.execute(
|
conn.execute(
|
||||||
"""
|
"""
|
||||||
INSERT INTO spark_memories
|
INSERT INTO spark_memories
|
||||||
(id, memory_type, subject, content, confidence, source_events, created_at, expires_at)
|
(id, memory_type, subject, content, confidence, source_events, created_at, expires_at)
|
||||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||||
""",
|
""",
|
||||||
(mem_id, memory_type, subject, content, confidence, source_events, now, expires_at),
|
(mem_id, memory_type, subject, content, confidence, source_events, now, expires_at),
|
||||||
)
|
)
|
||||||
conn.commit()
|
conn.commit()
|
||||||
conn.close()
|
|
||||||
return mem_id
|
return mem_id
|
||||||
|
|
||||||
|
|
||||||
@@ -267,7 +270,6 @@ def get_memories(
|
|||||||
limit: int = 50,
|
limit: int = 50,
|
||||||
) -> list[SparkMemory]:
|
) -> list[SparkMemory]:
|
||||||
"""Query memories with optional filters."""
|
"""Query memories with optional filters."""
|
||||||
conn = _get_conn()
|
|
||||||
query = "SELECT * FROM spark_memories WHERE confidence >= ?"
|
query = "SELECT * FROM spark_memories WHERE confidence >= ?"
|
||||||
params: list = [min_confidence]
|
params: list = [min_confidence]
|
||||||
|
|
||||||
@@ -281,8 +283,8 @@ def get_memories(
|
|||||||
query += " ORDER BY created_at DESC LIMIT ?"
|
query += " ORDER BY created_at DESC LIMIT ?"
|
||||||
params.append(limit)
|
params.append(limit)
|
||||||
|
|
||||||
rows = conn.execute(query, params).fetchall()
|
with _get_conn() as conn:
|
||||||
conn.close()
|
rows = conn.execute(query, params).fetchall()
|
||||||
return [
|
return [
|
||||||
SparkMemory(
|
SparkMemory(
|
||||||
id=r["id"],
|
id=r["id"],
|
||||||
@@ -300,13 +302,12 @@ def get_memories(
|
|||||||
|
|
||||||
def count_memories(memory_type: str | None = None) -> int:
|
def count_memories(memory_type: str | None = None) -> int:
|
||||||
"""Count memories, optionally filtered by type."""
|
"""Count memories, optionally filtered by type."""
|
||||||
conn = _get_conn()
|
with _get_conn() as conn:
|
||||||
if memory_type:
|
if memory_type:
|
||||||
row = conn.execute(
|
row = conn.execute(
|
||||||
"SELECT COUNT(*) FROM spark_memories WHERE memory_type = ?",
|
"SELECT COUNT(*) FROM spark_memories WHERE memory_type = ?",
|
||||||
(memory_type,),
|
(memory_type,),
|
||||||
).fetchone()
|
).fetchone()
|
||||||
else:
|
else:
|
||||||
row = conn.execute("SELECT COUNT(*) FROM spark_memories").fetchone()
|
row = conn.execute("SELECT COUNT(*) FROM spark_memories").fetchone()
|
||||||
conn.close()
|
|
||||||
return row[0]
|
return row[0]
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
"""Timmy — Core AI agent (Ollama/AirLLM backends, CLI, prompts)."""
|
"""Timmy — Core AI agent (Ollama/Grok/Claude backends, CLI, prompts)."""
|
||||||
|
|||||||
1
src/timmy/adapters/__init__.py
Normal file
1
src/timmy/adapters/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
"""Adapters — normalize external data streams into sensory events."""
|
||||||
136
src/timmy/adapters/gitea_adapter.py
Normal file
136
src/timmy/adapters/gitea_adapter.py
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
"""Gitea webhook adapter — normalize webhook payloads to event bus events.
|
||||||
|
|
||||||
|
Receives raw Gitea webhook payloads and emits typed events via the
|
||||||
|
infrastructure event bus. Bot-only activity is filtered unless it
|
||||||
|
represents a PR merge (which is always noteworthy).
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
from infrastructure.events.bus import emit
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Gitea usernames considered "bot" accounts
|
||||||
|
BOT_USERNAMES = frozenset({"hermes", "kimi", "manus"})
|
||||||
|
|
||||||
|
# Owner username — activity from this user is always emitted
|
||||||
|
OWNER_USERNAME = "rockachopa"
|
||||||
|
|
||||||
|
# Mapping from Gitea webhook event type to our bus event type
|
||||||
|
_EVENT_TYPE_MAP = {
|
||||||
|
"push": "gitea.push",
|
||||||
|
"issues": "gitea.issue.opened",
|
||||||
|
"issue_comment": "gitea.issue.comment",
|
||||||
|
"pull_request": "gitea.pull_request",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _extract_actor(payload: dict[str, Any]) -> str:
|
||||||
|
"""Extract the actor username from a webhook payload."""
|
||||||
|
# Gitea puts actor in sender.login for most events
|
||||||
|
sender = payload.get("sender", {})
|
||||||
|
return sender.get("login", "unknown")
|
||||||
|
|
||||||
|
|
||||||
|
def _is_bot(username: str) -> bool:
|
||||||
|
return username.lower() in BOT_USERNAMES
|
||||||
|
|
||||||
|
|
||||||
|
def _is_pr_merge(event_type: str, payload: dict[str, Any]) -> bool:
|
||||||
|
"""Check if this is a pull_request merge event."""
|
||||||
|
if event_type != "pull_request":
|
||||||
|
return False
|
||||||
|
action = payload.get("action", "")
|
||||||
|
pr = payload.get("pull_request", {})
|
||||||
|
return action == "closed" and pr.get("merged", False)
|
||||||
|
|
||||||
|
|
||||||
|
def _normalize_push(payload: dict[str, Any], actor: str) -> dict[str, Any]:
|
||||||
|
"""Normalize a push event payload."""
|
||||||
|
commits = payload.get("commits", [])
|
||||||
|
return {
|
||||||
|
"actor": actor,
|
||||||
|
"ref": payload.get("ref", ""),
|
||||||
|
"repo": payload.get("repository", {}).get("full_name", ""),
|
||||||
|
"num_commits": len(commits),
|
||||||
|
"head_message": commits[0].get("message", "").split("\n", 1)[0].strip() if commits else "",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _normalize_issue_opened(payload: dict[str, Any], actor: str) -> dict[str, Any]:
|
||||||
|
"""Normalize an issue-opened event payload."""
|
||||||
|
issue = payload.get("issue", {})
|
||||||
|
return {
|
||||||
|
"actor": actor,
|
||||||
|
"action": payload.get("action", "opened"),
|
||||||
|
"repo": payload.get("repository", {}).get("full_name", ""),
|
||||||
|
"issue_number": issue.get("number", 0),
|
||||||
|
"title": issue.get("title", ""),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _normalize_issue_comment(payload: dict[str, Any], actor: str) -> dict[str, Any]:
|
||||||
|
"""Normalize an issue-comment event payload."""
|
||||||
|
issue = payload.get("issue", {})
|
||||||
|
comment = payload.get("comment", {})
|
||||||
|
return {
|
||||||
|
"actor": actor,
|
||||||
|
"action": payload.get("action", "created"),
|
||||||
|
"repo": payload.get("repository", {}).get("full_name", ""),
|
||||||
|
"issue_number": issue.get("number", 0),
|
||||||
|
"issue_title": issue.get("title", ""),
|
||||||
|
"comment_body": (comment.get("body", "")[:200]),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _normalize_pull_request(payload: dict[str, Any], actor: str) -> dict[str, Any]:
|
||||||
|
"""Normalize a pull-request event payload."""
|
||||||
|
pr = payload.get("pull_request", {})
|
||||||
|
return {
|
||||||
|
"actor": actor,
|
||||||
|
"action": payload.get("action", ""),
|
||||||
|
"repo": payload.get("repository", {}).get("full_name", ""),
|
||||||
|
"pr_number": pr.get("number", 0),
|
||||||
|
"title": pr.get("title", ""),
|
||||||
|
"merged": pr.get("merged", False),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
_NORMALIZERS = {
|
||||||
|
"push": _normalize_push,
|
||||||
|
"issues": _normalize_issue_opened,
|
||||||
|
"issue_comment": _normalize_issue_comment,
|
||||||
|
"pull_request": _normalize_pull_request,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
async def handle_webhook(event_type: str, payload: dict[str, Any]) -> bool:
|
||||||
|
"""Normalize a Gitea webhook payload and emit it to the event bus.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event_type: The Gitea event type header (e.g. "push", "issues").
|
||||||
|
payload: The raw JSON payload from the webhook.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if an event was emitted, False if filtered or unsupported.
|
||||||
|
"""
|
||||||
|
bus_event_type = _EVENT_TYPE_MAP.get(event_type)
|
||||||
|
if bus_event_type is None:
|
||||||
|
logger.debug("Unsupported Gitea event type: %s", event_type)
|
||||||
|
return False
|
||||||
|
|
||||||
|
actor = _extract_actor(payload)
|
||||||
|
|
||||||
|
# Filter bot-only activity — except PR merges
|
||||||
|
if _is_bot(actor) and not _is_pr_merge(event_type, payload):
|
||||||
|
logger.debug("Filtered bot activity from %s on %s", actor, event_type)
|
||||||
|
return False
|
||||||
|
|
||||||
|
normalizer = _NORMALIZERS[event_type]
|
||||||
|
data = normalizer(payload, actor)
|
||||||
|
|
||||||
|
await emit(bus_event_type, source="gitea", data=data)
|
||||||
|
logger.info("Emitted %s from %s", bus_event_type, actor)
|
||||||
|
return True
|
||||||
82
src/timmy/adapters/time_adapter.py
Normal file
82
src/timmy/adapters/time_adapter.py
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
"""Time adapter — circadian awareness for Timmy.
|
||||||
|
|
||||||
|
Emits time-of-day events so Timmy knows the current period
|
||||||
|
and tracks how long since the last user interaction.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from datetime import UTC, datetime
|
||||||
|
|
||||||
|
from infrastructure.events.bus import emit
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Time-of-day periods: (event_name, start_hour, end_hour)
|
||||||
|
_PERIODS = [
|
||||||
|
("morning", 6, 9),
|
||||||
|
("afternoon", 12, 14),
|
||||||
|
("evening", 18, 20),
|
||||||
|
("late_night", 23, 24),
|
||||||
|
("late_night", 0, 3),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def classify_period(hour: int) -> str | None:
|
||||||
|
"""Return the circadian period name for a given hour, or None."""
|
||||||
|
for name, start, end in _PERIODS:
|
||||||
|
if start <= hour < end:
|
||||||
|
return name
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
class TimeAdapter:
|
||||||
|
"""Emits circadian and interaction-tracking events."""
|
||||||
|
|
||||||
|
def __init__(self) -> None:
|
||||||
|
self._last_interaction: datetime | None = None
|
||||||
|
self._last_period: str | None = None
|
||||||
|
self._last_date: str | None = None
|
||||||
|
|
||||||
|
def record_interaction(self, now: datetime | None = None) -> None:
|
||||||
|
"""Record a user interaction timestamp."""
|
||||||
|
self._last_interaction = now or datetime.now(UTC)
|
||||||
|
|
||||||
|
def time_since_last_interaction(
|
||||||
|
self,
|
||||||
|
now: datetime | None = None,
|
||||||
|
) -> float | None:
|
||||||
|
"""Seconds since last user interaction, or None if no interaction."""
|
||||||
|
if self._last_interaction is None:
|
||||||
|
return None
|
||||||
|
current = now or datetime.now(UTC)
|
||||||
|
return (current - self._last_interaction).total_seconds()
|
||||||
|
|
||||||
|
async def tick(self, now: datetime | None = None) -> list[str]:
|
||||||
|
"""Check current time and emit relevant events.
|
||||||
|
|
||||||
|
Returns list of event types emitted (useful for testing).
|
||||||
|
"""
|
||||||
|
current = now or datetime.now(UTC)
|
||||||
|
emitted: list[str] = []
|
||||||
|
|
||||||
|
# --- new_day ---
|
||||||
|
date_str = current.strftime("%Y-%m-%d")
|
||||||
|
if self._last_date is not None and date_str != self._last_date:
|
||||||
|
event_type = "time.new_day"
|
||||||
|
await emit(event_type, source="time_adapter", data={"date": date_str})
|
||||||
|
emitted.append(event_type)
|
||||||
|
self._last_date = date_str
|
||||||
|
|
||||||
|
# --- circadian period ---
|
||||||
|
period = classify_period(current.hour)
|
||||||
|
if period is not None and period != self._last_period:
|
||||||
|
event_type = f"time.{period}"
|
||||||
|
await emit(
|
||||||
|
event_type,
|
||||||
|
source="time_adapter",
|
||||||
|
data={"hour": current.hour, "period": period},
|
||||||
|
)
|
||||||
|
emitted.append(event_type)
|
||||||
|
self._last_period = period
|
||||||
|
|
||||||
|
return emitted
|
||||||
@@ -16,6 +16,7 @@ Handoff Protocol maintains continuity across sessions.
|
|||||||
import logging
|
import logging
|
||||||
from typing import TYPE_CHECKING, Union
|
from typing import TYPE_CHECKING, Union
|
||||||
|
|
||||||
|
import httpx
|
||||||
from agno.agent import Agent
|
from agno.agent import Agent
|
||||||
from agno.db.sqlite import SqliteDb
|
from agno.db.sqlite import SqliteDb
|
||||||
from agno.models.ollama import Ollama
|
from agno.models.ollama import Ollama
|
||||||
@@ -25,30 +26,12 @@ from timmy.prompts import get_system_prompt
|
|||||||
from timmy.tools import create_full_toolkit
|
from timmy.tools import create_full_toolkit
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from timmy.backends import ClaudeBackend, GrokBackend, TimmyAirLLMAgent
|
from timmy.backends import ClaudeBackend, GrokBackend
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
# Fallback chain for text/tool models (in order of preference)
|
|
||||||
DEFAULT_MODEL_FALLBACKS = [
|
|
||||||
"llama3.1:8b-instruct",
|
|
||||||
"llama3.1",
|
|
||||||
"qwen3.5:latest",
|
|
||||||
"qwen2.5:14b",
|
|
||||||
"qwen2.5:7b",
|
|
||||||
"llama3.2:3b",
|
|
||||||
]
|
|
||||||
|
|
||||||
# Fallback chain for vision models
|
|
||||||
VISION_MODEL_FALLBACKS = [
|
|
||||||
"llama3.2:3b",
|
|
||||||
"llava:7b",
|
|
||||||
"qwen2.5-vl:3b",
|
|
||||||
"moondream:1.8b",
|
|
||||||
]
|
|
||||||
|
|
||||||
# Union type for callers that want to hint the return type.
|
# Union type for callers that want to hint the return type.
|
||||||
TimmyAgent = Union[Agent, "TimmyAirLLMAgent", "GrokBackend", "ClaudeBackend"]
|
TimmyAgent = Union[Agent, "GrokBackend", "ClaudeBackend"]
|
||||||
|
|
||||||
# Models known to be too small for reliable tool calling.
|
# Models known to be too small for reliable tool calling.
|
||||||
# These hallucinate tool calls as text, invoke tools randomly,
|
# These hallucinate tool calls as text, invoke tools randomly,
|
||||||
@@ -80,7 +63,7 @@ def _pull_model(model_name: str) -> bool:
|
|||||||
|
|
||||||
logger.info("Pulling model: %s", model_name)
|
logger.info("Pulling model: %s", model_name)
|
||||||
|
|
||||||
url = settings.ollama_url.replace("localhost", "127.0.0.1")
|
url = settings.normalized_ollama_url
|
||||||
req = urllib.request.Request(
|
req = urllib.request.Request(
|
||||||
f"{url}/api/pull",
|
f"{url}/api/pull",
|
||||||
method="POST",
|
method="POST",
|
||||||
@@ -130,8 +113,8 @@ def _resolve_model_with_fallback(
|
|||||||
return model, False
|
return model, False
|
||||||
logger.warning("Failed to pull %s, checking fallbacks...", model)
|
logger.warning("Failed to pull %s, checking fallbacks...", model)
|
||||||
|
|
||||||
# Use appropriate fallback chain
|
# Use appropriate configurable fallback chain (from settings / env vars)
|
||||||
fallback_chain = VISION_MODEL_FALLBACKS if require_vision else DEFAULT_MODEL_FALLBACKS
|
fallback_chain = settings.vision_fallback_models if require_vision else settings.fallback_models
|
||||||
|
|
||||||
for fallback_model in fallback_chain:
|
for fallback_model in fallback_chain:
|
||||||
if _check_model_available(fallback_model):
|
if _check_model_available(fallback_model):
|
||||||
@@ -162,99 +145,61 @@ def _model_supports_tools(model_name: str) -> bool:
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def _resolve_backend(requested: str | None) -> str:
|
def _warmup_model(model_name: str) -> bool:
|
||||||
"""Return the backend name to use, resolving 'auto' and explicit overrides.
|
"""Warm up an Ollama model by sending a minimal generation request.
|
||||||
|
|
||||||
Priority (highest → lowest):
|
This prevents 'Server disconnected' errors on first request after cold model load.
|
||||||
|
Cold loads can take 30-40s, so we use a 60s timeout.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model_name: Name of the Ollama model to warm up
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if warmup succeeded, False otherwise (does not raise)
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
response = httpx.post(
|
||||||
|
f"{settings.ollama_url}/api/generate",
|
||||||
|
json={"model": model_name, "prompt": "hi", "options": {"num_predict": 1}},
|
||||||
|
timeout=60.0,
|
||||||
|
)
|
||||||
|
response.raise_for_status()
|
||||||
|
logger.info("Model %s warmed up successfully", model_name)
|
||||||
|
return True
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning("Model warmup failed: %s — first request may disconnect", exc)
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def _resolve_backend(requested: str | None) -> str:
|
||||||
|
"""Return the backend name to use.
|
||||||
|
|
||||||
|
Priority (highest -> lowest):
|
||||||
1. CLI flag passed directly to create_timmy()
|
1. CLI flag passed directly to create_timmy()
|
||||||
2. TIMMY_MODEL_BACKEND env var / .env setting
|
2. TIMMY_MODEL_BACKEND env var / .env setting
|
||||||
3. 'ollama' (safe default — no surprises)
|
3. 'ollama' (safe default -- no surprises)
|
||||||
|
|
||||||
'auto' triggers Apple Silicon detection: uses AirLLM if both
|
|
||||||
is_apple_silicon() and airllm_available() return True.
|
|
||||||
"""
|
"""
|
||||||
if requested is not None:
|
if requested is not None:
|
||||||
return requested
|
return requested
|
||||||
|
|
||||||
configured = settings.timmy_model_backend # "ollama" | "airllm" | "grok" | "claude" | "auto"
|
return settings.timmy_model_backend # "ollama" | "grok" | "claude"
|
||||||
if configured != "auto":
|
|
||||||
return configured
|
|
||||||
|
|
||||||
# "auto" path — lazy import to keep startup fast and tests clean.
|
|
||||||
from timmy.backends import airllm_available, is_apple_silicon
|
|
||||||
|
|
||||||
if is_apple_silicon() and airllm_available():
|
|
||||||
return "airllm"
|
|
||||||
return "ollama"
|
|
||||||
|
|
||||||
|
|
||||||
def create_timmy(
|
def _build_tools_list(use_tools: bool, skip_mcp: bool, model_name: str) -> list:
|
||||||
db_file: str = "timmy.db",
|
"""Assemble the tools list based on model capability and MCP flags.
|
||||||
backend: str | None = None,
|
|
||||||
model_size: str | None = None,
|
|
||||||
) -> TimmyAgent:
|
|
||||||
"""Instantiate the agent — Ollama or AirLLM, same public interface.
|
|
||||||
|
|
||||||
Args:
|
Returns a list of Toolkit / MCPTools objects, or an empty list.
|
||||||
db_file: SQLite file for Agno conversation memory (Ollama path only).
|
|
||||||
backend: "ollama" | "airllm" | "auto" | None (reads config/env).
|
|
||||||
model_size: AirLLM size — "8b" | "70b" | "405b" | None (reads config).
|
|
||||||
|
|
||||||
Returns an Agno Agent or backend-specific agent — all expose
|
|
||||||
print_response(message, stream).
|
|
||||||
"""
|
"""
|
||||||
resolved = _resolve_backend(backend)
|
|
||||||
size = model_size or settings.airllm_model_size
|
|
||||||
|
|
||||||
if resolved == "claude":
|
|
||||||
from timmy.backends import ClaudeBackend
|
|
||||||
|
|
||||||
return ClaudeBackend()
|
|
||||||
|
|
||||||
if resolved == "grok":
|
|
||||||
from timmy.backends import GrokBackend
|
|
||||||
|
|
||||||
return GrokBackend()
|
|
||||||
|
|
||||||
if resolved == "airllm":
|
|
||||||
from timmy.backends import TimmyAirLLMAgent
|
|
||||||
|
|
||||||
return TimmyAirLLMAgent(model_size=size)
|
|
||||||
|
|
||||||
# Default: Ollama via Agno.
|
|
||||||
# Resolve model with automatic pulling and fallback
|
|
||||||
model_name, is_fallback = _resolve_model_with_fallback(
|
|
||||||
requested_model=None,
|
|
||||||
require_vision=False,
|
|
||||||
auto_pull=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
# If Ollama is completely unreachable, fail loudly.
|
|
||||||
# Sovereignty: never silently send data to a cloud API.
|
|
||||||
# Use --backend claude explicitly if you want cloud inference.
|
|
||||||
if not _check_model_available(model_name):
|
|
||||||
logger.error(
|
|
||||||
"Ollama unreachable and no local models available. "
|
|
||||||
"Start Ollama with 'ollama serve' or use --backend claude explicitly."
|
|
||||||
)
|
|
||||||
|
|
||||||
if is_fallback:
|
|
||||||
logger.info("Using fallback model %s (requested was unavailable)", model_name)
|
|
||||||
|
|
||||||
use_tools = _model_supports_tools(model_name)
|
|
||||||
|
|
||||||
# Conditionally include tools — small models get none
|
|
||||||
toolkit = create_full_toolkit() if use_tools else None
|
|
||||||
if not use_tools:
|
if not use_tools:
|
||||||
logger.info("Tools disabled for model %s (too small for reliable tool calling)", model_name)
|
logger.info("Tools disabled for model %s (too small for reliable tool calling)", model_name)
|
||||||
|
return []
|
||||||
|
|
||||||
# Build the tools list — Agno accepts a list of Toolkit / MCPTools
|
tools_list: list = [create_full_toolkit()]
|
||||||
tools_list: list = []
|
|
||||||
if toolkit:
|
|
||||||
tools_list.append(toolkit)
|
|
||||||
|
|
||||||
# Add MCP tool servers (lazy-connected on first arun())
|
# Add MCP tool servers (lazy-connected on first arun()).
|
||||||
if use_tools:
|
# Skipped when skip_mcp=True — MCP's stdio transport uses anyio cancel
|
||||||
|
# scopes that conflict with asyncio background task cancellation (#72).
|
||||||
|
if not skip_mcp:
|
||||||
try:
|
try:
|
||||||
from timmy.mcp_tools import create_filesystem_mcp_tools, create_gitea_mcp_tools
|
from timmy.mcp_tools import create_filesystem_mcp_tools, create_gitea_mcp_tools
|
||||||
|
|
||||||
@@ -268,39 +213,121 @@ def create_timmy(
|
|||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
logger.debug("MCP tools unavailable: %s", exc)
|
logger.debug("MCP tools unavailable: %s", exc)
|
||||||
|
|
||||||
# Select prompt tier based on tool capability
|
return tools_list
|
||||||
base_prompt = get_system_prompt(tools_enabled=use_tools)
|
|
||||||
|
|
||||||
|
def _build_prompt(use_tools: bool, session_id: str) -> str:
|
||||||
|
"""Build the full system prompt with optional memory context."""
|
||||||
|
base_prompt = get_system_prompt(tools_enabled=use_tools, session_id=session_id)
|
||||||
|
|
||||||
# Try to load memory context
|
|
||||||
try:
|
try:
|
||||||
from timmy.memory_system import memory_system
|
from timmy.memory_system import memory_system
|
||||||
|
|
||||||
memory_context = memory_system.get_system_context()
|
memory_context = memory_system.get_system_context()
|
||||||
if memory_context:
|
if memory_context:
|
||||||
# Truncate if too long — smaller budget for small models
|
# Smaller budget for small models — expanded prompt uses more tokens
|
||||||
# since the expanded prompt (roster, guardrails) uses more tokens
|
|
||||||
max_context = 2000 if not use_tools else 8000
|
max_context = 2000 if not use_tools else 8000
|
||||||
if len(memory_context) > max_context:
|
if len(memory_context) > max_context:
|
||||||
memory_context = memory_context[:max_context] + "\n... [truncated]"
|
memory_context = memory_context[:max_context] + "\n... [truncated]"
|
||||||
full_prompt = f"{base_prompt}\n\n## Memory Context\n\n{memory_context}"
|
return (
|
||||||
else:
|
f"{base_prompt}\n\n"
|
||||||
full_prompt = base_prompt
|
f"## GROUNDED CONTEXT (verified sources — cite when using)\n\n"
|
||||||
|
f"{memory_context}"
|
||||||
|
)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
logger.warning("Failed to load memory context: %s", exc)
|
logger.warning("Failed to load memory context: %s", exc)
|
||||||
full_prompt = base_prompt
|
|
||||||
|
|
||||||
return Agent(
|
return base_prompt
|
||||||
|
|
||||||
|
|
||||||
|
def _create_ollama_agent(
|
||||||
|
*,
|
||||||
|
db_file: str,
|
||||||
|
model_name: str,
|
||||||
|
tools_list: list,
|
||||||
|
full_prompt: str,
|
||||||
|
use_tools: bool,
|
||||||
|
) -> Agent:
|
||||||
|
"""Construct the Agno Agent with Ollama backend and warm up the model."""
|
||||||
|
model_kwargs = {}
|
||||||
|
if settings.ollama_num_ctx > 0:
|
||||||
|
model_kwargs["options"] = {"num_ctx": settings.ollama_num_ctx}
|
||||||
|
|
||||||
|
agent = Agent(
|
||||||
name="Agent",
|
name="Agent",
|
||||||
model=Ollama(id=model_name, host=settings.ollama_url, timeout=300),
|
model=Ollama(id=model_name, host=settings.ollama_url, timeout=300, **model_kwargs),
|
||||||
db=SqliteDb(db_file=db_file),
|
db=SqliteDb(db_file=db_file),
|
||||||
description=full_prompt,
|
description=full_prompt,
|
||||||
add_history_to_context=True,
|
add_history_to_context=True,
|
||||||
num_history_runs=20,
|
num_history_runs=20,
|
||||||
markdown=True,
|
markdown=False,
|
||||||
tools=tools_list if tools_list else None,
|
tools=tools_list if tools_list else None,
|
||||||
tool_call_limit=settings.max_agent_steps if use_tools else None,
|
tool_call_limit=settings.max_agent_steps if use_tools else None,
|
||||||
telemetry=settings.telemetry_enabled,
|
telemetry=settings.telemetry_enabled,
|
||||||
)
|
)
|
||||||
|
_warmup_model(model_name)
|
||||||
|
return agent
|
||||||
|
|
||||||
|
|
||||||
|
def create_timmy(
|
||||||
|
db_file: str = "timmy.db",
|
||||||
|
backend: str | None = None,
|
||||||
|
*,
|
||||||
|
skip_mcp: bool = False,
|
||||||
|
session_id: str = "unknown",
|
||||||
|
) -> TimmyAgent:
|
||||||
|
"""Instantiate the agent — Ollama, Grok, or Claude.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
db_file: SQLite file for Agno conversation memory (Ollama path only).
|
||||||
|
backend: "ollama" | "grok" | "claude" | None (reads config/env).
|
||||||
|
skip_mcp: If True, omit MCP tool servers (Gitea, filesystem).
|
||||||
|
Use for background tasks (thinking, QA) where MCP's
|
||||||
|
stdio cancel-scope lifecycle conflicts with asyncio
|
||||||
|
task cancellation.
|
||||||
|
|
||||||
|
Returns an Agno Agent or backend-specific agent — all expose
|
||||||
|
print_response(message, stream).
|
||||||
|
"""
|
||||||
|
resolved = _resolve_backend(backend)
|
||||||
|
|
||||||
|
if resolved == "claude":
|
||||||
|
from timmy.backends import ClaudeBackend
|
||||||
|
|
||||||
|
return ClaudeBackend()
|
||||||
|
|
||||||
|
if resolved == "grok":
|
||||||
|
from timmy.backends import GrokBackend
|
||||||
|
|
||||||
|
return GrokBackend()
|
||||||
|
|
||||||
|
# Default: Ollama via Agno.
|
||||||
|
model_name, is_fallback = _resolve_model_with_fallback(
|
||||||
|
requested_model=None,
|
||||||
|
require_vision=False,
|
||||||
|
auto_pull=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
if not _check_model_available(model_name):
|
||||||
|
logger.error(
|
||||||
|
"Ollama unreachable and no local models available. "
|
||||||
|
"Start Ollama with 'ollama serve' or use --backend claude explicitly."
|
||||||
|
)
|
||||||
|
|
||||||
|
if is_fallback:
|
||||||
|
logger.info("Using fallback model %s (requested was unavailable)", model_name)
|
||||||
|
|
||||||
|
use_tools = _model_supports_tools(model_name)
|
||||||
|
tools_list = _build_tools_list(use_tools, skip_mcp, model_name)
|
||||||
|
full_prompt = _build_prompt(use_tools, session_id)
|
||||||
|
|
||||||
|
return _create_ollama_agent(
|
||||||
|
db_file=db_file,
|
||||||
|
model_name=model_name,
|
||||||
|
tools_list=tools_list,
|
||||||
|
full_prompt=full_prompt,
|
||||||
|
use_tools=use_tools,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class TimmyWithMemory:
|
class TimmyWithMemory:
|
||||||
@@ -317,15 +344,47 @@ class TimmyWithMemory:
|
|||||||
self.initial_context = self.memory.get_system_context()
|
self.initial_context = self.memory.get_system_context()
|
||||||
|
|
||||||
def chat(self, message: str) -> str:
|
def chat(self, message: str) -> str:
|
||||||
"""Simple chat interface that tracks in memory."""
|
"""Simple chat interface that tracks in memory.
|
||||||
|
|
||||||
|
Retries on transient Ollama errors (GPU contention, timeouts)
|
||||||
|
with exponential backoff (#70).
|
||||||
|
"""
|
||||||
|
import time
|
||||||
|
|
||||||
# Check for user facts to extract
|
# Check for user facts to extract
|
||||||
self._extract_and_store_facts(message)
|
self._extract_and_store_facts(message)
|
||||||
|
|
||||||
# Run agent
|
# Retry with backoff — GPU contention causes ReadError/ReadTimeout
|
||||||
result = self.agent.run(message, stream=False)
|
max_retries = 3
|
||||||
response_text = result.content if hasattr(result, "content") else str(result)
|
for attempt in range(1, max_retries + 1):
|
||||||
|
try:
|
||||||
return response_text
|
result = self.agent.run(message, stream=False)
|
||||||
|
return result.content if hasattr(result, "content") else str(result)
|
||||||
|
except (
|
||||||
|
httpx.ConnectError,
|
||||||
|
httpx.ReadError,
|
||||||
|
httpx.ReadTimeout,
|
||||||
|
httpx.ConnectTimeout,
|
||||||
|
ConnectionError,
|
||||||
|
TimeoutError,
|
||||||
|
) as exc:
|
||||||
|
if attempt < max_retries:
|
||||||
|
wait = min(2**attempt, 16)
|
||||||
|
logger.warning(
|
||||||
|
"Ollama contention on attempt %d/%d: %s. Waiting %ds before retry...",
|
||||||
|
attempt,
|
||||||
|
max_retries,
|
||||||
|
type(exc).__name__,
|
||||||
|
wait,
|
||||||
|
)
|
||||||
|
time.sleep(wait)
|
||||||
|
else:
|
||||||
|
logger.error(
|
||||||
|
"Ollama unreachable after %d attempts: %s",
|
||||||
|
max_retries,
|
||||||
|
exc,
|
||||||
|
)
|
||||||
|
raise
|
||||||
|
|
||||||
def _extract_and_store_facts(self, message: str) -> None:
|
def _extract_and_store_facts(self, message: str) -> None:
|
||||||
"""Extract user facts from message and store in memory."""
|
"""Extract user facts from message and store in memory."""
|
||||||
@@ -336,7 +395,8 @@ class TimmyWithMemory:
|
|||||||
if name:
|
if name:
|
||||||
self.memory.update_user_fact("Name", name)
|
self.memory.update_user_fact("Name", name)
|
||||||
self.memory.record_decision(f"Learned user's name: {name}")
|
self.memory.record_decision(f"Learned user's name: {name}")
|
||||||
except Exception:
|
except Exception as exc:
|
||||||
|
logger.warning("User name extraction failed: %s", exc)
|
||||||
pass # Best-effort extraction
|
pass # Best-effort extraction
|
||||||
|
|
||||||
def end_session(self, summary: str = "Session completed") -> None:
|
def end_session(self, summary: str = "Session completed") -> None:
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
"""Agent Core — Substrate-agnostic agent interface and base classes."""
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user