Compare commits
38 Commits
fix/csrf-c
...
kimi/issue
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
919a011cae | ||
| a95cf806c8 | |||
| 19367d6e41 | |||
| 7e983fcdb3 | |||
| 46f89d59db | |||
| e3a0f1d2d6 | |||
| 2a9d21cea1 | |||
| 05b87c3ac1 | |||
| 8276279775 | |||
| d1f5c2714b | |||
| 65df56414a | |||
| b08ce53bab | |||
| e0660bf768 | |||
| dc9f0c04eb | |||
| 815933953c | |||
| d54493a87b | |||
| f7404f67ec | |||
| 5f4580f98d | |||
| 695d1401fd | |||
| ddadc95e55 | |||
| 8fc8e0fc3d | |||
| ada0774ca6 | |||
| 2a7b6d5708 | |||
| 9d4ac8e7cc | |||
| c9601ba32c | |||
| 646eaefa3e | |||
| 2fa5b23c0c | |||
| 9b57774282 | |||
| 62bde03f9e | |||
| 3474eeb4eb | |||
| e92e151dc3 | |||
| 1f1bc222e4 | |||
| cc30bdb391 | |||
| 6f0863b587 | |||
| e3d425483d | |||
| c9445e3056 | |||
| 11cd2e3372 | |||
| 9d0f5c778e |
33
config/matrix.yaml
Normal file
33
config/matrix.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
# Matrix World Configuration
|
||||
# Serves lighting, environment, and feature settings to the Matrix frontend.
|
||||
|
||||
lighting:
|
||||
ambient_color: "#FFAA55" # Warm amber (Workshop warmth)
|
||||
ambient_intensity: 0.5
|
||||
point_lights:
|
||||
- color: "#FFAA55" # Warm amber (Workshop center light)
|
||||
intensity: 1.2
|
||||
position: { x: 0, y: 5, z: 0 }
|
||||
- color: "#3B82F6" # Cool blue (Matrix accent)
|
||||
intensity: 0.8
|
||||
position: { x: -5, y: 3, z: -5 }
|
||||
- color: "#A855F7" # Purple accent
|
||||
intensity: 0.6
|
||||
position: { x: 5, y: 3, z: 5 }
|
||||
|
||||
environment:
|
||||
rain_enabled: false
|
||||
starfield_enabled: true # Cool blue starfield (Matrix feel)
|
||||
fog_color: "#0f0f23"
|
||||
fog_density: 0.02
|
||||
|
||||
features:
|
||||
chat_enabled: true
|
||||
visitor_avatars: true
|
||||
pip_familiar: true
|
||||
workshop_portal: true
|
||||
|
||||
agents:
|
||||
default_count: 5
|
||||
max_count: 20
|
||||
agents: []
|
||||
178
config/quests.yaml
Normal file
178
config/quests.yaml
Normal file
@@ -0,0 +1,178 @@
|
||||
# ── Token Quest System Configuration ─────────────────────────────────────────
|
||||
#
|
||||
# Quests are special objectives that agents (and humans) can complete for
|
||||
# bonus tokens. Each quest has:
|
||||
# - id: Unique identifier
|
||||
# - name: Display name
|
||||
# - description: What the quest requires
|
||||
# - reward_tokens: Number of tokens awarded on completion
|
||||
# - criteria: Detection rules for completion
|
||||
# - enabled: Whether this quest is active
|
||||
# - repeatable: Whether this quest can be completed multiple times
|
||||
# - cooldown_hours: Minimum hours between completions (if repeatable)
|
||||
#
|
||||
# Quest Types:
|
||||
# - issue_count: Complete when N issues matching criteria are closed
|
||||
# - issue_reduce: Complete when open issue count drops by N
|
||||
# - docs_update: Complete when documentation files are updated
|
||||
# - test_improve: Complete when test coverage/cases improve
|
||||
# - daily_run: Complete Daily Run session objectives
|
||||
# - custom: Special quests with manual completion
|
||||
#
|
||||
# ── Active Quests ─────────────────────────────────────────────────────────────
|
||||
|
||||
quests:
|
||||
# ── Daily Run & Test Improvement Quests ───────────────────────────────────
|
||||
|
||||
close_flaky_tests:
|
||||
id: close_flaky_tests
|
||||
name: Flaky Test Hunter
|
||||
description: Close 3 issues labeled "flaky-test"
|
||||
reward_tokens: 150
|
||||
type: issue_count
|
||||
enabled: true
|
||||
repeatable: true
|
||||
cooldown_hours: 24
|
||||
criteria:
|
||||
issue_labels:
|
||||
- flaky-test
|
||||
target_count: 3
|
||||
issue_state: closed
|
||||
lookback_days: 7
|
||||
notification_message: "Quest Complete! You closed 3 flaky-test issues and earned {tokens} tokens."
|
||||
|
||||
reduce_p1_issues:
|
||||
id: reduce_p1_issues
|
||||
name: Priority Firefighter
|
||||
description: Reduce open P1 Daily Run issues by 2
|
||||
reward_tokens: 200
|
||||
type: issue_reduce
|
||||
enabled: true
|
||||
repeatable: true
|
||||
cooldown_hours: 48
|
||||
criteria:
|
||||
issue_labels:
|
||||
- layer:triage
|
||||
- P1
|
||||
target_reduction: 2
|
||||
lookback_days: 3
|
||||
notification_message: "Quest Complete! You reduced P1 issues by 2 and earned {tokens} tokens."
|
||||
|
||||
improve_test_coverage:
|
||||
id: improve_test_coverage
|
||||
name: Coverage Champion
|
||||
description: Improve test coverage by 5% or add 10 new test cases
|
||||
reward_tokens: 300
|
||||
type: test_improve
|
||||
enabled: true
|
||||
repeatable: false
|
||||
criteria:
|
||||
coverage_increase_percent: 5
|
||||
min_new_tests: 10
|
||||
notification_message: "Quest Complete! You improved test coverage and earned {tokens} tokens."
|
||||
|
||||
complete_daily_run_session:
|
||||
id: complete_daily_run_session
|
||||
name: Daily Runner
|
||||
description: Successfully complete 5 Daily Run sessions in a week
|
||||
reward_tokens: 250
|
||||
type: daily_run
|
||||
enabled: true
|
||||
repeatable: true
|
||||
cooldown_hours: 168 # 1 week
|
||||
criteria:
|
||||
min_sessions: 5
|
||||
lookback_days: 7
|
||||
notification_message: "Quest Complete! You completed 5 Daily Run sessions and earned {tokens} tokens."
|
||||
|
||||
# ── Documentation & Maintenance Quests ────────────────────────────────────
|
||||
|
||||
improve_automation_docs:
|
||||
id: improve_automation_docs
|
||||
name: Documentation Hero
|
||||
description: Improve documentation for automations (update 3+ doc files)
|
||||
reward_tokens: 100
|
||||
type: docs_update
|
||||
enabled: true
|
||||
repeatable: true
|
||||
cooldown_hours: 72
|
||||
criteria:
|
||||
file_patterns:
|
||||
- "docs/**/*.md"
|
||||
- "**/README.md"
|
||||
- "timmy_automations/**/*.md"
|
||||
min_files_changed: 3
|
||||
lookback_days: 7
|
||||
notification_message: "Quest Complete! You improved automation docs and earned {tokens} tokens."
|
||||
|
||||
close_micro_fixes:
|
||||
id: close_micro_fixes
|
||||
name: Micro Fix Master
|
||||
description: Close 5 issues labeled "layer:micro-fix"
|
||||
reward_tokens: 125
|
||||
type: issue_count
|
||||
enabled: true
|
||||
repeatable: true
|
||||
cooldown_hours: 24
|
||||
criteria:
|
||||
issue_labels:
|
||||
- layer:micro-fix
|
||||
target_count: 5
|
||||
issue_state: closed
|
||||
lookback_days: 7
|
||||
notification_message: "Quest Complete! You closed 5 micro-fix issues and earned {tokens} tokens."
|
||||
|
||||
# ── Special Achievements ──────────────────────────────────────────────────
|
||||
|
||||
first_contribution:
|
||||
id: first_contribution
|
||||
name: First Steps
|
||||
description: Make your first contribution (close any issue)
|
||||
reward_tokens: 50
|
||||
type: issue_count
|
||||
enabled: true
|
||||
repeatable: false
|
||||
criteria:
|
||||
target_count: 1
|
||||
issue_state: closed
|
||||
lookback_days: 30
|
||||
notification_message: "Welcome! You completed your first contribution and earned {tokens} tokens."
|
||||
|
||||
bug_squasher:
|
||||
id: bug_squasher
|
||||
name: Bug Squasher
|
||||
description: Close 10 issues labeled "bug"
|
||||
reward_tokens: 500
|
||||
type: issue_count
|
||||
enabled: true
|
||||
repeatable: true
|
||||
cooldown_hours: 168 # 1 week
|
||||
criteria:
|
||||
issue_labels:
|
||||
- bug
|
||||
target_count: 10
|
||||
issue_state: closed
|
||||
lookback_days: 7
|
||||
notification_message: "Quest Complete! You squashed 10 bugs and earned {tokens} tokens."
|
||||
|
||||
# ── Quest System Settings ───────────────────────────────────────────────────
|
||||
|
||||
settings:
|
||||
# Enable/disable quest notifications
|
||||
notifications_enabled: true
|
||||
|
||||
# Maximum number of concurrent active quests per agent
|
||||
max_concurrent_quests: 5
|
||||
|
||||
# Auto-detect quest completions on Daily Run metrics update
|
||||
auto_detect_on_daily_run: true
|
||||
|
||||
# Gitea issue labels that indicate quest-related work
|
||||
quest_work_labels:
|
||||
- layer:triage
|
||||
- layer:micro-fix
|
||||
- layer:tests
|
||||
- layer:economy
|
||||
- flaky-test
|
||||
- bug
|
||||
- documentation
|
||||
98
config/stress_modes.yaml
Normal file
98
config/stress_modes.yaml
Normal file
@@ -0,0 +1,98 @@
|
||||
# ── System Stress Modes Configuration ────────────────────────────────────────
|
||||
#
|
||||
# This configuration defines how token rewards adapt based on system stress.
|
||||
# When the system detects elevated stress (flaky tests, growing backlog,
|
||||
# CI failures), quest rewards are adjusted to incentivize agents to focus
|
||||
# on the most critical areas.
|
||||
#
|
||||
# ── How It Works ─────────────────────────────────────────────────────────────
|
||||
#
|
||||
# 1. SIGNALS: System metrics are monitored continuously
|
||||
# 2. SCORE: Weighted contributions from triggered signals create a stress score
|
||||
# 3. MODE: Score determines the stress mode (calm, elevated, high)
|
||||
# 4. MULTIPLIERS: Token rewards are multiplied based on the current mode
|
||||
#
|
||||
# ── Stress Thresholds ────────────────────────────────────────────────────────
|
||||
|
||||
thresholds:
|
||||
# Minimum score to enter elevated mode (0.0 - 1.0)
|
||||
elevated_min: 0.3
|
||||
|
||||
# Minimum score to enter high stress mode (0.0 - 1.0)
|
||||
high_min: 0.6
|
||||
|
||||
# ── Stress Signals ───────────────────────────────────────────────────────────
|
||||
#
|
||||
# Each signal has:
|
||||
# - threshold: Value at which signal is considered "triggered"
|
||||
# - weight: Contribution to overall stress score (should sum to ~1.0)
|
||||
|
||||
signals:
|
||||
flaky_test_rate:
|
||||
threshold: 0.15 # 15% of tests showing flakiness
|
||||
weight: 0.30
|
||||
description: "Percentage of test runs that are flaky"
|
||||
|
||||
p1_backlog_growth:
|
||||
threshold: 5 # 5 new P1 issues in lookback period
|
||||
weight: 0.25
|
||||
description: "Net growth in P1 priority issues over 7 days"
|
||||
|
||||
ci_failure_rate:
|
||||
threshold: 0.20 # 20% of CI runs failing
|
||||
weight: 0.25
|
||||
description: "Percentage of CI runs failing in lookback period"
|
||||
|
||||
open_bug_count:
|
||||
threshold: 20 # 20 open bugs
|
||||
weight: 0.20
|
||||
description: "Total open issues labeled as 'bug'"
|
||||
|
||||
# ── Token Multipliers ────────────────────────────────────────────────────────
|
||||
#
|
||||
# Multipliers are applied to quest rewards based on current stress mode.
|
||||
# Values > 1.0 increase rewards, < 1.0 decrease rewards.
|
||||
#
|
||||
# Quest types:
|
||||
# - test_improve: Test coverage/quality improvements
|
||||
# - docs_update: Documentation updates
|
||||
# - issue_count: Closing specific issue types
|
||||
# - issue_reduce: Reducing overall issue backlog
|
||||
# - daily_run: Daily Run session completion
|
||||
# - custom: Special/manual quests
|
||||
# - exploration: Exploratory work
|
||||
# - refactor: Code refactoring
|
||||
|
||||
multipliers:
|
||||
calm:
|
||||
# Calm periods: incentivize maintenance and exploration
|
||||
test_improve: 1.0
|
||||
docs_update: 1.2
|
||||
issue_count: 1.0
|
||||
issue_reduce: 1.0
|
||||
daily_run: 1.0
|
||||
custom: 1.0
|
||||
exploration: 1.3
|
||||
refactor: 1.2
|
||||
|
||||
elevated:
|
||||
# Elevated stress: start emphasizing stability
|
||||
test_improve: 1.2
|
||||
docs_update: 1.0
|
||||
issue_count: 1.1
|
||||
issue_reduce: 1.1
|
||||
daily_run: 1.0
|
||||
custom: 1.0
|
||||
exploration: 1.0
|
||||
refactor: 0.9 # Discourage risky changes
|
||||
|
||||
high:
|
||||
# High stress: crisis mode, focus on stabilization
|
||||
test_improve: 1.5 # Strongly incentivize testing
|
||||
docs_update: 0.8 # Deprioritize docs
|
||||
issue_count: 1.3 # Reward closing issues
|
||||
issue_reduce: 1.4 # Strongly reward reducing backlog
|
||||
daily_run: 1.1
|
||||
custom: 1.0
|
||||
exploration: 0.7 # Discourage exploration
|
||||
refactor: 0.6 # Discourage refactors during crisis
|
||||
912
docs/research/openclaw-architecture-deployment-guide.md
Normal file
912
docs/research/openclaw-architecture-deployment-guide.md
Normal file
@@ -0,0 +1,912 @@
|
||||
# OpenClaw Architecture, Deployment Modes, and Ollama Integration
|
||||
|
||||
## Research Report for Timmy Time Dashboard Project
|
||||
|
||||
**Issue:** #721 — [Kimi Research] OpenClaw architecture, deployment modes, and Ollama integration
|
||||
**Date:** 2026-03-21
|
||||
**Author:** Kimi (Moonshot AI)
|
||||
**Status:** Complete
|
||||
|
||||
---
|
||||
|
||||
## Executive Summary
|
||||
|
||||
OpenClaw is an open-source AI agent framework that bridges messaging platforms (WhatsApp, Telegram, Slack, Discord, iMessage) to AI coding agents through a centralized gateway. Originally known as Clawdbot and Moltbot, it was rebranded to OpenClaw in early 2026. This report provides a comprehensive analysis of OpenClaw's architecture, deployment options, Ollama integration capabilities, and suitability for deployment on resource-constrained VPS environments like the Hermes DigitalOcean droplet (2GB RAM / 1 vCPU).
|
||||
|
||||
**Key Finding:** Running OpenClaw with local LLMs on a 2GB RAM VPS is **not recommended**. The absolute minimum for a text-only agent with external API models is 4GB RAM. For local model inference via Ollama, 8-16GB RAM is the practical minimum. A hybrid approach using OpenRouter as the primary provider with Ollama as fallback is the most viable configuration for small VPS deployments.
|
||||
|
||||
---
|
||||
|
||||
## 1. Architecture Overview
|
||||
|
||||
### 1.1 Core Components
|
||||
|
||||
OpenClaw follows a **hub-and-spoke (轴辐式)** architecture optimized for multi-agent task execution:
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────────────┐
|
||||
│ OPENCLAW ARCHITECTURE │
|
||||
├─────────────────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │
|
||||
│ │ WhatsApp │ │ Telegram │ │ Discord │ │
|
||||
│ │ Channel │ │ Channel │ │ Channel │ │
|
||||
│ └──────┬───────┘ └──────┬───────┘ └──────┬───────┘ │
|
||||
│ │ │ │ │
|
||||
│ └────────────────────┼────────────────────┘ │
|
||||
│ ▼ │
|
||||
│ ┌──────────────────┐ │
|
||||
│ │ Gateway │◄─────── WebSocket/API │
|
||||
│ │ (Port 18789) │ Control Plane │
|
||||
│ └────────┬─────────┘ │
|
||||
│ │ │
|
||||
│ ┌──────────────┼──────────────┐ │
|
||||
│ ▼ ▼ ▼ │
|
||||
│ ┌──────────┐ ┌──────────┐ ┌──────────┐ │
|
||||
│ │ Agent A │ │ Agent B │ │ Pi Agent│ │
|
||||
│ │ (main) │ │ (coder) │ │(delegate)│ │
|
||||
│ └────┬─────┘ └────┬─────┘ └────┬─────┘ │
|
||||
│ │ │ │ │
|
||||
│ └──────────────┼──────────────┘ │
|
||||
│ ▼ │
|
||||
│ ┌────────────────────────┐ │
|
||||
│ │ LLM Router │ │
|
||||
│ │ (Primary/Fallback) │ │
|
||||
│ └───────────┬────────────┘ │
|
||||
│ │ │
|
||||
│ ┌─────────────────┼─────────────────┐ │
|
||||
│ ▼ ▼ ▼ │
|
||||
│ ┌─────────┐ ┌─────────┐ ┌─────────┐ │
|
||||
│ │ Ollama │ │ OpenAI │ │Anthropic│ │
|
||||
│ │(local) │ │(cloud) │ │(cloud) │ │
|
||||
│ └─────────┘ └─────────┘ └─────────┘ │
|
||||
│ │ ┌─────┐ │
|
||||
│ └────────────────────────────────────────────────────►│ MCP │ │
|
||||
│ │Tools│ │
|
||||
│ └─────┘ │
|
||||
│ │
|
||||
│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │
|
||||
│ │ Memory │ │ Skills │ │ Workspace │ │
|
||||
│ │ (SOUL.md) │ │ (SKILL.md) │ │ (sessions) │ │
|
||||
│ └──────────────┘ └──────────────┘ └──────────────┘ │
|
||||
│ │
|
||||
└─────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### 1.2 Component Deep Dive
|
||||
|
||||
| Component | Purpose | Configuration File |
|
||||
|-----------|---------|-------------------|
|
||||
| **Gateway** | Central control plane, WebSocket/API server, session management | `gateway` section in `openclaw.json` |
|
||||
| **Pi Agent** | Core agent runner, "指挥中心" - schedules LLM calls, tool execution, error handling | `agents` section in `openclaw.json` |
|
||||
| **Channels** | Messaging platform integrations (Telegram, WhatsApp, Slack, Discord, iMessage) | `channels` section in `openclaw.json` |
|
||||
| **SOUL.md** | Agent persona definition - personality, communication style, behavioral guidelines | `~/.openclaw/workspace/SOUL.md` |
|
||||
| **AGENTS.md** | Multi-agent configuration, routing rules, agent specialization definitions | `~/.openclaw/workspace/AGENTS.md` |
|
||||
| **Workspace** | File system for agent state, session data, temporary files | `~/.openclaw/workspace/` |
|
||||
| **Skills** | Bundled tools, prompts, configurations that teach agents specific tasks | `~/.openclaw/workspace/skills/` |
|
||||
| **Sessions** | Conversation history, context persistence between interactions | `~/.openclaw/agents/<agent>/sessions/` |
|
||||
| **MCP Tools** | Model Context Protocol integration for external tool access | Via `mcporter` or native MCP |
|
||||
|
||||
### 1.3 Agent Runner Execution Flow
|
||||
|
||||
According to OpenClaw documentation, a complete agent run follows these stages:
|
||||
|
||||
1. **Queuing** - Session-level queue (serializes same-session requests) → Global queue (controls total concurrency)
|
||||
2. **Preparation** - Parse workspace, provider/model, thinking level parameters
|
||||
3. **Plugin Loading** - Load relevant skills based on task context
|
||||
4. **Memory Retrieval** - Fetch relevant context from SOUL.md and conversation history
|
||||
5. **LLM Inference** - Send prompt to configured provider with tool definitions
|
||||
6. **Tool Execution** - Execute any tool calls returned by the LLM
|
||||
7. **Response Generation** - Format and return final response to the channel
|
||||
8. **Memory Storage** - Persist conversation and results to session storage
|
||||
|
||||
---
|
||||
|
||||
## 2. Deployment Modes
|
||||
|
||||
### 2.1 Comparison Matrix
|
||||
|
||||
| Deployment Mode | Best For | Setup Complexity | Resource Overhead | Stability |
|
||||
|----------------|----------|------------------|-------------------|-----------|
|
||||
| **npm global** | Development, quick testing | Low | Minimal (~200MB) | Moderate |
|
||||
| **Docker** | Production, isolation, reproducibility | Medium | Higher (~2.5GB base image) | High |
|
||||
| **Docker Compose** | Multi-service stacks, complex setups | Medium-High | Higher | High |
|
||||
| **Bare metal/systemd** | Maximum performance, dedicated hardware | High | Minimal | Moderate |
|
||||
|
||||
### 2.2 NPM Global Installation (Recommended for Quick Start)
|
||||
|
||||
```bash
|
||||
# One-line installer
|
||||
curl -fsSL https://openclaw.ai/install.sh | bash
|
||||
|
||||
# Or manual npm install
|
||||
npm install -g openclaw
|
||||
|
||||
# Initialize configuration
|
||||
openclaw onboard
|
||||
|
||||
# Start gateway
|
||||
openclaw gateway
|
||||
```
|
||||
|
||||
**Pros:**
|
||||
- Fastest setup (~30 seconds)
|
||||
- Direct access to host resources
|
||||
- Easy updates via `npm update -g openclaw`
|
||||
|
||||
**Cons:**
|
||||
- Node.js 22+ dependency required
|
||||
- No process isolation
|
||||
- Manual dependency management
|
||||
|
||||
### 2.3 Docker Deployment (Recommended for Production)
|
||||
|
||||
```bash
|
||||
# Pull and run
|
||||
docker pull openclaw/openclaw:latest
|
||||
docker run -d \
|
||||
--name openclaw \
|
||||
-p 127.0.0.1:18789:18789 \
|
||||
-v ~/.openclaw:/root/.openclaw \
|
||||
-e ANTHROPIC_API_KEY=sk-ant-... \
|
||||
openclaw/openclaw:latest
|
||||
|
||||
# Or with Docker Compose
|
||||
docker compose -f compose.yml --env-file .env up -d --build
|
||||
```
|
||||
|
||||
**Docker Compose Configuration (production-ready):**
|
||||
|
||||
```yaml
|
||||
version: '3.8'
|
||||
services:
|
||||
openclaw:
|
||||
image: openclaw/openclaw:latest
|
||||
container_name: openclaw
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "127.0.0.1:18789:18789" # Never expose to 0.0.0.0
|
||||
volumes:
|
||||
- ./openclaw-data:/root/.openclaw
|
||||
- ./workspace:/root/.openclaw/workspace
|
||||
environment:
|
||||
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
|
||||
- OPENROUTER_API_KEY=${OPENROUTER_API_KEY}
|
||||
- OLLAMA_API_KEY=ollama-local
|
||||
networks:
|
||||
- openclaw-net
|
||||
# Resource limits for small VPS
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: '1.5'
|
||||
memory: 3G
|
||||
reservations:
|
||||
cpus: '0.5'
|
||||
memory: 1G
|
||||
|
||||
networks:
|
||||
openclaw-net:
|
||||
driver: bridge
|
||||
```
|
||||
|
||||
### 2.4 Bare Metal / Systemd Installation
|
||||
|
||||
For running as a system service on Linux:
|
||||
|
||||
```bash
|
||||
# Create systemd service
|
||||
sudo tee /etc/systemd/system/openclaw.service > /dev/null <<EOF
|
||||
[Unit]
|
||||
Description=OpenClaw Gateway
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=openclaw
|
||||
Group=openclaw
|
||||
WorkingDirectory=/home/openclaw
|
||||
Environment="PATH=/usr/local/bin:/usr/bin:/bin"
|
||||
Environment="NODE_ENV=production"
|
||||
Environment="ANTHROPIC_API_KEY=sk-ant-..."
|
||||
ExecStart=/usr/local/bin/openclaw gateway
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable openclaw
|
||||
sudo systemctl start openclaw
|
||||
```
|
||||
|
||||
### 2.5 Recommended Deployment for 2GB RAM VPS
|
||||
|
||||
**⚠️ Critical Finding:** OpenClaw's official minimum is 4GB RAM. On a 2GB VPS:
|
||||
|
||||
1. **Do NOT run local LLMs** - Use external API providers exclusively
|
||||
2. **Use npm installation** - Docker overhead is too heavy
|
||||
3. **Disable browser automation** - Chromium requires 2-4GB alone
|
||||
4. **Enable swap** - Critical for preventing OOM kills
|
||||
5. **Use OpenRouter** - Cheap/free tier models reduce costs
|
||||
|
||||
**Setup script for 2GB VPS:**
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# openclaw-minimal-vps.sh
|
||||
# Setup for 2GB RAM VPS - EXTERNAL API ONLY
|
||||
|
||||
# Create 4GB swap
|
||||
sudo fallocate -l 4G /swapfile
|
||||
sudo chmod 600 /swapfile
|
||||
sudo mkswap /swapfile
|
||||
sudo swapon /swapfile
|
||||
echo '/swapfile none swap sw 0 0' | sudo tee -a /etc/fstab
|
||||
|
||||
# Install Node.js 22
|
||||
curl -fsSL https://deb.nodesource.com/setup_22.x | sudo bash -
|
||||
sudo apt-get install -y nodejs
|
||||
|
||||
# Install OpenClaw
|
||||
npm install -g openclaw
|
||||
|
||||
# Configure for minimal resource usage
|
||||
mkdir -p ~/.openclaw
|
||||
cat > ~/.openclaw/openclaw.json <<'EOF'
|
||||
{
|
||||
"gateway": {
|
||||
"bind": "127.0.0.1",
|
||||
"port": 18789,
|
||||
"mode": "local"
|
||||
},
|
||||
"agents": {
|
||||
"defaults": {
|
||||
"model": {
|
||||
"primary": "openrouter/google/gemma-3-4b-it:free",
|
||||
"fallbacks": [
|
||||
"openrouter/meta/llama-3.1-8b-instruct:free"
|
||||
]
|
||||
},
|
||||
"maxIterations": 15,
|
||||
"timeout": 120
|
||||
}
|
||||
},
|
||||
"channels": {
|
||||
"telegram": {
|
||||
"enabled": true,
|
||||
"dmPolicy": "pairing"
|
||||
}
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
# Set OpenRouter API key
|
||||
export OPENROUTER_API_KEY="sk-or-v1-..."
|
||||
|
||||
# Start gateway
|
||||
openclaw gateway &
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 3. Ollama Integration
|
||||
|
||||
### 3.1 Architecture
|
||||
|
||||
OpenClaw integrates with Ollama through its native `/api/chat` endpoint, supporting both streaming responses and tool calling simultaneously:
|
||||
|
||||
```
|
||||
┌──────────────┐ HTTP/JSON ┌──────────────┐ GGUF/CPU/GPU ┌──────────┐
|
||||
│ OpenClaw │◄───────────────────►│ Ollama │◄────────────────────►│ Local │
|
||||
│ Gateway │ /api/chat │ Server │ Model inference │ LLM │
|
||||
│ │ Port 11434 │ Port 11434 │ │ │
|
||||
└──────────────┘ └──────────────┘ └──────────┘
|
||||
```
|
||||
|
||||
### 3.2 Configuration
|
||||
|
||||
**Basic Ollama Setup:**
|
||||
|
||||
```bash
|
||||
# Install Ollama
|
||||
curl -fsSL https://ollama.com/install.sh | sh
|
||||
|
||||
# Start server
|
||||
ollama serve
|
||||
|
||||
# Pull a tool-capable model
|
||||
ollama pull qwen2.5-coder:7b
|
||||
ollama pull llama3.1:8b
|
||||
|
||||
# Configure OpenClaw
|
||||
export OLLAMA_API_KEY="ollama-local" # Any non-empty string works
|
||||
```
|
||||
|
||||
**OpenClaw Configuration for Ollama:**
|
||||
|
||||
```json
|
||||
{
|
||||
"models": {
|
||||
"providers": {
|
||||
"ollama": {
|
||||
"baseUrl": "http://localhost:11434",
|
||||
"apiKey": "ollama-local",
|
||||
"api": "ollama",
|
||||
"models": [
|
||||
{
|
||||
"id": "qwen2.5-coder:7b",
|
||||
"name": "Qwen 2.5 Coder 7B",
|
||||
"contextWindow": 32768,
|
||||
"maxTokens": 8192,
|
||||
"cost": { "input": 0, "output": 0 }
|
||||
},
|
||||
{
|
||||
"id": "llama3.1:8b",
|
||||
"name": "Llama 3.1 8B",
|
||||
"contextWindow": 128000,
|
||||
"maxTokens": 8192,
|
||||
"cost": { "input": 0, "output": 0 }
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"agents": {
|
||||
"defaults": {
|
||||
"model": {
|
||||
"primary": "ollama/qwen2.5-coder:7b",
|
||||
"fallbacks": ["ollama/llama3.1:8b"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3.3 Context Window Requirements
|
||||
|
||||
**⚠️ Critical Requirement:** OpenClaw requires a minimum **64K token context window** for reliable multi-step task execution.
|
||||
|
||||
| Model | Parameters | Context Window | Tool Support | OpenClaw Compatible |
|
||||
|-------|-----------|----------------|--------------|---------------------|
|
||||
| **llama3.1** | 8B | 128K | ✅ Yes | ✅ Yes |
|
||||
| **qwen2.5-coder** | 7B | 32K | ✅ Yes | ⚠️ Below minimum |
|
||||
| **qwen2.5-coder** | 32B | 128K | ✅ Yes | ✅ Yes |
|
||||
| **gpt-oss** | 20B | 128K | ✅ Yes | ✅ Yes |
|
||||
| **glm-4.7-flash** | - | 128K | ✅ Yes | ✅ Yes |
|
||||
| **deepseek-coder-v2** | 33B | 128K | ✅ Yes | ✅ Yes |
|
||||
| **mistral-small3.1** | - | 128K | ✅ Yes | ✅ Yes |
|
||||
|
||||
**Context Window Configuration:**
|
||||
|
||||
For models that don't report context window via Ollama's API:
|
||||
|
||||
```bash
|
||||
# Create custom Modelfile with extended context
|
||||
cat > ~/qwen-custom.modelfile <<EOF
|
||||
FROM qwen2.5-coder:7b
|
||||
PARAMETER num_ctx 65536
|
||||
PARAMETER temperature 0.7
|
||||
EOF
|
||||
|
||||
# Create custom model
|
||||
ollama create qwen2.5-coder-64k -f ~/qwen-custom.modelfile
|
||||
```
|
||||
|
||||
### 3.4 Models for Small VPS (≤8B Parameters)
|
||||
|
||||
For resource-constrained environments (2-4GB RAM):
|
||||
|
||||
| Model | Quantization | RAM Required | VRAM Required | Performance |
|
||||
|-------|-------------|--------------|---------------|-------------|
|
||||
| **Llama 3.1 8B** | Q4_K_M | ~5GB | ~6GB | Good |
|
||||
| **Llama 3.2 3B** | Q4_K_M | ~2.5GB | ~3GB | Basic |
|
||||
| **Qwen 2.5 7B** | Q4_K_M | ~5GB | ~6GB | Good |
|
||||
| **Qwen 2.5 3B** | Q4_K_M | ~2.5GB | ~3GB | Basic |
|
||||
| **DeepSeek 7B** | Q4_K_M | ~5GB | ~6GB | Good |
|
||||
| **Phi-4 4B** | Q4_K_M | ~3GB | ~4GB | Moderate |
|
||||
|
||||
**⚠️ Verdict for 2GB VPS:** Running local LLMs is **NOT viable**. Use external APIs only.
|
||||
|
||||
---
|
||||
|
||||
## 4. OpenRouter Integration (Fallback Strategy)
|
||||
|
||||
### 4.1 Overview
|
||||
|
||||
OpenRouter provides a unified API gateway to multiple LLM providers, enabling:
|
||||
- Single API key access to 200+ models
|
||||
- Automatic failover between providers
|
||||
- Free tier models for cost-conscious deployments
|
||||
- Unified billing and usage tracking
|
||||
|
||||
### 4.2 Configuration
|
||||
|
||||
**Environment Variable Setup:**
|
||||
|
||||
```bash
|
||||
export OPENROUTER_API_KEY="sk-or-v1-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
|
||||
```
|
||||
|
||||
**OpenClaw Configuration:**
|
||||
|
||||
```json
|
||||
{
|
||||
"models": {
|
||||
"providers": {
|
||||
"openrouter": {
|
||||
"apiKey": "${OPENROUTER_API_KEY}",
|
||||
"baseUrl": "https://openrouter.ai/api/v1"
|
||||
}
|
||||
}
|
||||
},
|
||||
"agents": {
|
||||
"defaults": {
|
||||
"model": {
|
||||
"primary": "openrouter/anthropic/claude-sonnet-4-6",
|
||||
"fallbacks": [
|
||||
"openrouter/google/gemini-3.1-pro",
|
||||
"openrouter/meta/llama-3.3-70b-instruct",
|
||||
"openrouter/google/gemma-3-4b-it:free"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 4.3 Recommended Free/Cheap Models on OpenRouter
|
||||
|
||||
For cost-conscious VPS deployments:
|
||||
|
||||
| Model | Cost | Context | Best For |
|
||||
|-------|------|---------|----------|
|
||||
| **google/gemma-3-4b-it:free** | Free | 128K | General tasks, simple automation |
|
||||
| **meta/llama-3.1-8b-instruct:free** | Free | 128K | General tasks, longer contexts |
|
||||
| **deepseek/deepseek-chat-v3.2** | $0.53/M | 64K | Code generation, reasoning |
|
||||
| **xiaomi/mimo-v2-flash** | $0.40/M | 128K | Fast responses, basic tasks |
|
||||
| **qwen/qwen3-coder-next** | $1.20/M | 128K | Code-focused tasks |
|
||||
|
||||
### 4.4 Hybrid Configuration (Recommended for Timmy)
|
||||
|
||||
A production-ready configuration for the Hermes VPS:
|
||||
|
||||
```json
|
||||
{
|
||||
"models": {
|
||||
"providers": {
|
||||
"openrouter": {
|
||||
"apiKey": "${OPENROUTER_API_KEY}",
|
||||
"models": [
|
||||
{
|
||||
"id": "google/gemma-3-4b-it:free",
|
||||
"name": "Gemma 3 4B (Free)",
|
||||
"contextWindow": 131072,
|
||||
"maxTokens": 8192,
|
||||
"cost": { "input": 0, "output": 0 }
|
||||
},
|
||||
{
|
||||
"id": "deepseek/deepseek-chat-v3.2",
|
||||
"name": "DeepSeek V3.2",
|
||||
"contextWindow": 64000,
|
||||
"maxTokens": 8192,
|
||||
"cost": { "input": 0.00053, "output": 0.00053 }
|
||||
}
|
||||
]
|
||||
},
|
||||
"ollama": {
|
||||
"baseUrl": "http://localhost:11434",
|
||||
"apiKey": "ollama-local",
|
||||
"models": [
|
||||
{
|
||||
"id": "llama3.2:3b",
|
||||
"name": "Llama 3.2 3B (Local Fallback)",
|
||||
"contextWindow": 128000,
|
||||
"maxTokens": 4096,
|
||||
"cost": { "input": 0, "output": 0 }
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"agents": {
|
||||
"defaults": {
|
||||
"model": {
|
||||
"primary": "openrouter/google/gemma-3-4b-it:free",
|
||||
"fallbacks": [
|
||||
"openrouter/deepseek/deepseek-chat-v3.2",
|
||||
"ollama/llama3.2:3b"
|
||||
]
|
||||
},
|
||||
"maxIterations": 10,
|
||||
"timeout": 90
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5. Hardware Constraints & VPS Viability
|
||||
|
||||
### 5.1 System Requirements Summary
|
||||
|
||||
| Component | Minimum | Recommended | Notes |
|
||||
|-----------|---------|-------------|-------|
|
||||
| **CPU** | 2 vCPU | 4 vCPU | Dedicated preferred over shared |
|
||||
| **RAM** | 4 GB | 8 GB | 2GB causes OOM with external APIs |
|
||||
| **Storage** | 40 GB SSD | 80 GB NVMe | Docker images are ~10-15GB |
|
||||
| **Network** | 100 Mbps | 1 Gbps | For API calls and model downloads |
|
||||
| **OS** | Ubuntu 22.04/Debian 12 | Ubuntu 24.04 LTS | Linux required for production |
|
||||
|
||||
### 5.2 2GB RAM VPS Analysis
|
||||
|
||||
**Can it work?** Yes, with severe limitations:
|
||||
|
||||
✅ **What works:**
|
||||
- Text-only agents with external API providers
|
||||
- Single Telegram/Discord channel
|
||||
- Basic file operations and shell commands
|
||||
- No browser automation
|
||||
|
||||
❌ **What doesn't work:**
|
||||
- Local LLM inference via Ollama
|
||||
- Browser automation (Chromium needs 2-4GB)
|
||||
- Multiple concurrent channels
|
||||
- Python environment-heavy skills
|
||||
|
||||
**Required mitigations for 2GB VPS:**
|
||||
|
||||
```bash
|
||||
# 1. Create substantial swap
|
||||
sudo fallocate -l 4G /swapfile
|
||||
sudo chmod 600 /swapfile
|
||||
sudo mkswap /swapfile
|
||||
sudo swapon /swapfile
|
||||
|
||||
# 2. Configure swappiness
|
||||
echo 'vm.swappiness=60' | sudo tee -a /etc/sysctl.conf
|
||||
sudo sysctl -p
|
||||
|
||||
# 3. Limit Node.js memory
|
||||
export NODE_OPTIONS="--max-old-space-size=1536"
|
||||
|
||||
# 4. Use external APIs only - NO OLLAMA
|
||||
# 5. Disable browser skills
|
||||
# 6. Set conservative concurrency limits
|
||||
```
|
||||
|
||||
### 5.3 4-bit Quantization Viability
|
||||
|
||||
**Qwen 2.5 7B Q4_K_M on 2GB VPS:**
|
||||
- Model size: ~4.5GB
|
||||
- RAM required at runtime: ~5-6GB
|
||||
- **Verdict:** Will cause immediate OOM on 2GB VPS
|
||||
- **Even with 4GB VPS:** Marginal, heavy swap usage, poor performance
|
||||
|
||||
**Viable models for 4GB VPS with Ollama:**
|
||||
- Llama 3.2 3B Q4_K_M (~2.5GB RAM)
|
||||
- Qwen 2.5 3B Q4_K_M (~2.5GB RAM)
|
||||
- Phi-4 4B Q4_K_M (~3GB RAM)
|
||||
|
||||
---
|
||||
|
||||
## 6. Security Configuration
|
||||
|
||||
### 6.1 Network Ports
|
||||
|
||||
| Port | Purpose | Exposure |
|
||||
|------|---------|----------|
|
||||
| **18789/tcp** | OpenClaw Gateway (WebSocket/HTTP) | **NEVER expose to internet** |
|
||||
| **11434/tcp** | Ollama API (if running locally) | Localhost only |
|
||||
| **22/tcp** | SSH | Restrict to known IPs |
|
||||
|
||||
**⚠️ CRITICAL:** Never expose port 18789 to the public internet. Use Tailscale or SSH tunnels for remote access.
|
||||
|
||||
### 6.2 Tailscale Integration
|
||||
|
||||
Tailscale provides zero-configuration VPN mesh for secure remote access:
|
||||
|
||||
```bash
|
||||
# Install Tailscale
|
||||
curl -fsSL https://tailscale.com/install.sh | sh
|
||||
sudo tailscale up
|
||||
|
||||
# Get Tailscale IP
|
||||
tailscale ip
|
||||
# Returns: 100.x.y.z
|
||||
|
||||
# Configure OpenClaw to bind to Tailscale
|
||||
cat > ~/.openclaw/openclaw.json <<EOF
|
||||
{
|
||||
"gateway": {
|
||||
"bind": "tailnet",
|
||||
"port": 18789
|
||||
},
|
||||
"tailscale": {
|
||||
"mode": "on",
|
||||
"resetOnExit": false
|
||||
}
|
||||
}
|
||||
EOF
|
||||
```
|
||||
|
||||
**Tailscale vs SSH Tunnel:**
|
||||
|
||||
| Feature | Tailscale | SSH Tunnel |
|
||||
|---------|-----------|------------|
|
||||
| Setup | Very easy | Moderate |
|
||||
| Persistence | Automatic | Requires autossh |
|
||||
| Multiple devices | Built-in | One tunnel per connection |
|
||||
| NAT traversal | Works | Requires exposed SSH |
|
||||
| Access control | Tailscale ACL | SSH keys |
|
||||
|
||||
### 6.3 Firewall Configuration (UFW)
|
||||
|
||||
```bash
|
||||
# Default deny
|
||||
sudo ufw default deny incoming
|
||||
sudo ufw default allow outgoing
|
||||
|
||||
# Allow SSH
|
||||
sudo ufw allow 22/tcp
|
||||
|
||||
# Allow Tailscale only (if using)
|
||||
sudo ufw allow in on tailscale0 to any port 18789
|
||||
|
||||
# Block public access to OpenClaw
|
||||
# (bind is 127.0.0.1, so this is defense in depth)
|
||||
|
||||
sudo ufw enable
|
||||
```
|
||||
|
||||
### 6.4 Authentication Configuration
|
||||
|
||||
```json
|
||||
{
|
||||
"gateway": {
|
||||
"bind": "127.0.0.1",
|
||||
"port": 18789,
|
||||
"auth": {
|
||||
"mode": "token",
|
||||
"token": "your-64-char-hex-token-here"
|
||||
},
|
||||
"controlUi": {
|
||||
"allowedOrigins": [
|
||||
"http://localhost:18789",
|
||||
"https://your-domain.tailnet-name.ts.net"
|
||||
],
|
||||
"allowInsecureAuth": false,
|
||||
"dangerouslyDisableDeviceAuth": false
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Generate secure token:**
|
||||
|
||||
```bash
|
||||
openssl rand -hex 32
|
||||
```
|
||||
|
||||
### 6.5 Sandboxing Considerations
|
||||
|
||||
OpenClaw executes arbitrary shell commands and file operations by default. For production:
|
||||
|
||||
1. **Run as non-root user:**
|
||||
```bash
|
||||
sudo useradd -r -s /bin/false openclaw
|
||||
sudo mkdir -p /home/openclaw/.openclaw
|
||||
sudo chown -R openclaw:openclaw /home/openclaw
|
||||
```
|
||||
|
||||
2. **Use Docker for isolation:**
|
||||
```bash
|
||||
docker run --security-opt=no-new-privileges \
|
||||
--cap-drop=ALL \
|
||||
--read-only \
|
||||
--tmpfs /tmp:noexec,nosuid,size=100m \
|
||||
openclaw/openclaw:latest
|
||||
```
|
||||
|
||||
3. **Enable dmPolicy for channels:**
|
||||
```json
|
||||
{
|
||||
"channels": {
|
||||
"telegram": {
|
||||
"dmPolicy": "pairing" // Require one-time code for new contacts
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 7. MCP (Model Context Protocol) Tools
|
||||
|
||||
### 7.1 Overview
|
||||
|
||||
MCP is an open standard created by Anthropic (donated to Linux Foundation in Dec 2025) that lets AI applications connect to external tools through a universal interface. Think of it as "USB-C for AI."
|
||||
|
||||
### 7.2 MCP vs OpenClaw Skills
|
||||
|
||||
| Aspect | MCP | OpenClaw Skills |
|
||||
|--------|-----|-----------------|
|
||||
| **Protocol** | Standardized (Anthropic) | OpenClaw-specific |
|
||||
| **Isolation** | Process-isolated | Runs in agent context |
|
||||
| **Security** | Higher (sandboxed) | Lower (full system access) |
|
||||
| **Discovery** | Automatic via protocol | Manual via SKILL.md |
|
||||
| **Ecosystem** | 10,000+ servers | 5400+ skills |
|
||||
|
||||
**Note:** OpenClaw currently has limited native MCP support. Use `mcporter` tool for MCP integration.
|
||||
|
||||
### 7.3 Using MCPorter (MCP Bridge)
|
||||
|
||||
```bash
|
||||
# Install mcporter
|
||||
clawhub install mcporter
|
||||
|
||||
# Configure MCP server
|
||||
mcporter config add github \
|
||||
--url "https://api.github.com/mcp" \
|
||||
--token "ghp_..."
|
||||
|
||||
# List available tools
|
||||
mcporter list
|
||||
|
||||
# Call MCP tool
|
||||
mcporter call github.list_repos --owner "rockachopa"
|
||||
```
|
||||
|
||||
### 7.4 Popular MCP Servers
|
||||
|
||||
| Server | Purpose | Integration |
|
||||
|--------|---------|-------------|
|
||||
| **GitHub** | Repo management, PRs, issues | `mcp-github` |
|
||||
| **Slack** | Messaging, channel management | `mcp-slack` |
|
||||
| **PostgreSQL** | Database queries | `mcp-postgres` |
|
||||
| **Filesystem** | File operations (sandboxed) | `mcp-filesystem` |
|
||||
| **Brave Search** | Web search | `mcp-brave` |
|
||||
|
||||
---
|
||||
|
||||
## 8. Recommendations for Timmy Time Dashboard
|
||||
|
||||
### 8.1 Deployment Strategy for Hermes VPS (2GB RAM)
|
||||
|
||||
Given the hardware constraints, here's the recommended approach:
|
||||
|
||||
**Option A: External API Only (Recommended)**
|
||||
```
|
||||
┌─────────────────────────────────────────┐
|
||||
│ Hermes VPS (2GB RAM) │
|
||||
│ ┌─────────────────────────────────┐ │
|
||||
│ │ OpenClaw Gateway │ │
|
||||
│ │ (npm global install) │ │
|
||||
│ └─────────────┬───────────────────┘ │
|
||||
│ │ │
|
||||
│ ▼ │
|
||||
│ ┌─────────────────────────────────┐ │
|
||||
│ │ OpenRouter API (Free Tier) │ │
|
||||
│ │ google/gemma-3-4b-it:free │ │
|
||||
│ └─────────────────────────────────┘ │
|
||||
│ │
|
||||
│ NO OLLAMA - insufficient RAM │
|
||||
└─────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Option B: Hybrid with External Ollama**
|
||||
```
|
||||
┌──────────────────────┐ ┌──────────────────────────┐
|
||||
│ Hermes VPS (2GB) │ │ Separate Ollama Host │
|
||||
│ ┌────────────────┐ │ │ ┌────────────────────┐ │
|
||||
│ │ OpenClaw │ │◄────►│ │ Ollama Server │ │
|
||||
│ │ (external API) │ │ │ │ (8GB+ RAM required)│ │
|
||||
│ └────────────────┘ │ │ └────────────────────┘ │
|
||||
└──────────────────────┘ └──────────────────────────┘
|
||||
```
|
||||
|
||||
### 8.2 Configuration Summary
|
||||
|
||||
```json
|
||||
{
|
||||
"gateway": {
|
||||
"bind": "127.0.0.1",
|
||||
"port": 18789,
|
||||
"auth": {
|
||||
"mode": "token",
|
||||
"token": "GENERATE_WITH_OPENSSL_RAND"
|
||||
}
|
||||
},
|
||||
"models": {
|
||||
"providers": {
|
||||
"openrouter": {
|
||||
"apiKey": "${OPENROUTER_API_KEY}",
|
||||
"models": [
|
||||
{
|
||||
"id": "google/gemma-3-4b-it:free",
|
||||
"contextWindow": 131072,
|
||||
"maxTokens": 4096
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"agents": {
|
||||
"defaults": {
|
||||
"model": {
|
||||
"primary": "openrouter/google/gemma-3-4b-it:free"
|
||||
},
|
||||
"maxIterations": 10,
|
||||
"timeout": 90,
|
||||
"maxConcurrent": 2
|
||||
}
|
||||
},
|
||||
"channels": {
|
||||
"telegram": {
|
||||
"enabled": true,
|
||||
"dmPolicy": "pairing"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 8.3 Migration Path (Future)
|
||||
|
||||
When upgrading to a larger VPS (4-8GB RAM):
|
||||
|
||||
1. **Phase 1:** Enable Ollama with Llama 3.2 3B as fallback
|
||||
2. **Phase 2:** Add browser automation skills (requires 4GB+ RAM)
|
||||
3. **Phase 3:** Enable multi-agent routing with specialized agents
|
||||
4. **Phase 4:** Add MCP server integration for external tools
|
||||
|
||||
---
|
||||
|
||||
## 9. References
|
||||
|
||||
1. OpenClaw Official Documentation: https://docs.openclaw.ai
|
||||
2. Ollama Integration Guide: https://docs.ollama.com/integrations/openclaw
|
||||
3. OpenRouter Documentation: https://openrouter.ai/docs
|
||||
4. MCP Specification: https://modelcontextprotocol.io
|
||||
5. OpenClaw Community Discord: https://discord.gg/openclaw
|
||||
6. GitHub Repository: https://github.com/openclaw/openclaw
|
||||
|
||||
---
|
||||
|
||||
## 10. Appendix: Quick Command Reference
|
||||
|
||||
```bash
|
||||
# Installation
|
||||
curl -fsSL https://openclaw.ai/install.sh | bash
|
||||
|
||||
# Configuration
|
||||
openclaw onboard # Interactive setup
|
||||
openclaw configure # Edit config
|
||||
openclaw config set <key> <value> # Set specific value
|
||||
|
||||
# Gateway management
|
||||
openclaw gateway # Start gateway
|
||||
openclaw gateway --verbose # Start with logs
|
||||
openclaw gateway status # Check status
|
||||
openclaw gateway restart # Restart gateway
|
||||
openclaw gateway stop # Stop gateway
|
||||
|
||||
# Model management
|
||||
openclaw models list # List available models
|
||||
openclaw models set <model> # Set default model
|
||||
openclaw models status # Check model status
|
||||
|
||||
# Diagnostics
|
||||
openclaw doctor # System health check
|
||||
openclaw doctor --repair # Auto-fix issues
|
||||
openclaw security audit # Security check
|
||||
|
||||
# Dashboard
|
||||
openclaw dashboard # Open web UI
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
*End of Research Report*
|
||||
@@ -20,6 +20,7 @@ packages = [
|
||||
{ include = "spark", from = "src" },
|
||||
{ include = "timmy", from = "src" },
|
||||
{ include = "timmy_serve", from = "src" },
|
||||
{ include = "timmyctl", from = "src" },
|
||||
]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
@@ -82,6 +83,7 @@ mypy = ">=1.0.0"
|
||||
[tool.poetry.scripts]
|
||||
timmy = "timmy.cli:main"
|
||||
timmy-serve = "timmy_serve.cli:main"
|
||||
timmyctl = "timmyctl.cli:main"
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
testpaths = ["tests"]
|
||||
|
||||
@@ -27,11 +27,15 @@ from pathlib import Path
|
||||
REPO_ROOT = Path(__file__).resolve().parent.parent
|
||||
QUEUE_FILE = REPO_ROOT / ".loop" / "queue.json"
|
||||
IDLE_STATE_FILE = REPO_ROOT / ".loop" / "idle_state.json"
|
||||
CYCLE_RESULT_FILE = REPO_ROOT / ".loop" / "cycle_result.json"
|
||||
TOKEN_FILE = Path.home() / ".hermes" / "gitea_token"
|
||||
|
||||
GITEA_API = os.environ.get("GITEA_API", "http://localhost:3000/api/v1")
|
||||
REPO_SLUG = os.environ.get("REPO_SLUG", "rockachopa/Timmy-time-dashboard")
|
||||
|
||||
# Default cycle duration in seconds (5 min); stale threshold = 2× this
|
||||
CYCLE_DURATION = int(os.environ.get("CYCLE_DURATION", "300"))
|
||||
|
||||
# Backoff sequence: 60s, 120s, 240s, 600s max
|
||||
BACKOFF_BASE = 60
|
||||
BACKOFF_MAX = 600
|
||||
@@ -77,6 +81,89 @@ def _fetch_open_issue_numbers() -> set[int] | None:
|
||||
return None
|
||||
|
||||
|
||||
def _load_cycle_result() -> dict:
|
||||
"""Read cycle_result.json, handling markdown-fenced JSON."""
|
||||
if not CYCLE_RESULT_FILE.exists():
|
||||
return {}
|
||||
try:
|
||||
raw = CYCLE_RESULT_FILE.read_text().strip()
|
||||
if raw.startswith("```"):
|
||||
lines = raw.splitlines()
|
||||
lines = [ln for ln in lines if not ln.startswith("```")]
|
||||
raw = "\n".join(lines)
|
||||
return json.loads(raw)
|
||||
except (json.JSONDecodeError, OSError):
|
||||
return {}
|
||||
|
||||
|
||||
def _is_issue_open(issue_number: int) -> bool | None:
|
||||
"""Check if a single issue is open. Returns None on API failure."""
|
||||
token = _get_token()
|
||||
if not token:
|
||||
return None
|
||||
try:
|
||||
url = f"{GITEA_API}/repos/{REPO_SLUG}/issues/{issue_number}"
|
||||
req = urllib.request.Request(
|
||||
url,
|
||||
headers={
|
||||
"Authorization": f"token {token}",
|
||||
"Accept": "application/json",
|
||||
},
|
||||
)
|
||||
with urllib.request.urlopen(req, timeout=10) as resp:
|
||||
data = json.loads(resp.read())
|
||||
return data.get("state") == "open"
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def validate_cycle_result() -> bool:
|
||||
"""Pre-cycle validation: remove stale or invalid cycle_result.json.
|
||||
|
||||
Checks:
|
||||
1. Age — if older than 2× CYCLE_DURATION, delete it.
|
||||
2. Issue — if the referenced issue is closed, delete it.
|
||||
|
||||
Returns True if the file was removed, False otherwise.
|
||||
"""
|
||||
if not CYCLE_RESULT_FILE.exists():
|
||||
return False
|
||||
|
||||
# Age check
|
||||
try:
|
||||
age = time.time() - CYCLE_RESULT_FILE.stat().st_mtime
|
||||
except OSError:
|
||||
return False
|
||||
stale_threshold = CYCLE_DURATION * 2
|
||||
if age > stale_threshold:
|
||||
print(
|
||||
f"[loop-guard] cycle_result.json is {int(age)}s old "
|
||||
f"(threshold {stale_threshold}s) — removing stale file"
|
||||
)
|
||||
CYCLE_RESULT_FILE.unlink(missing_ok=True)
|
||||
return True
|
||||
|
||||
# Issue check
|
||||
cr = _load_cycle_result()
|
||||
issue_num = cr.get("issue")
|
||||
if issue_num is not None:
|
||||
try:
|
||||
issue_num = int(issue_num)
|
||||
except (ValueError, TypeError):
|
||||
return False
|
||||
is_open = _is_issue_open(issue_num)
|
||||
if is_open is False:
|
||||
print(
|
||||
f"[loop-guard] cycle_result.json references closed "
|
||||
f"issue #{issue_num} — removing"
|
||||
)
|
||||
CYCLE_RESULT_FILE.unlink(missing_ok=True)
|
||||
return True
|
||||
# is_open is None (API failure) or True — keep file
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def load_queue() -> list[dict]:
|
||||
"""Load queue.json and return ready items, filtering out closed issues."""
|
||||
if not QUEUE_FILE.exists():
|
||||
@@ -150,6 +237,9 @@ def main() -> int:
|
||||
}, indent=2))
|
||||
return 0
|
||||
|
||||
# Pre-cycle validation: remove stale cycle_result.json
|
||||
validate_cycle_result()
|
||||
|
||||
ready = load_queue()
|
||||
|
||||
if ready:
|
||||
|
||||
@@ -149,6 +149,18 @@ class Settings(BaseSettings):
|
||||
"http://127.0.0.1:8000",
|
||||
]
|
||||
|
||||
# ── Matrix Frontend Integration ────────────────────────────────────────
|
||||
# URL of the Matrix frontend (Replit/Tailscale) for CORS.
|
||||
# When set, this origin is added to CORS allowed_origins.
|
||||
# Example: "http://100.124.176.28:8080" or "https://alexanderwhitestone.com"
|
||||
matrix_frontend_url: str = "" # Empty = disabled
|
||||
|
||||
# WebSocket authentication token for Matrix connections.
|
||||
# When set, clients must provide this token via ?token= query param
|
||||
# or in the first message as {"type": "auth", "token": "..."}.
|
||||
# Empty/unset = auth disabled (dev mode).
|
||||
matrix_ws_token: str = ""
|
||||
|
||||
# Trusted hosts for the Host header check (TrustedHostMiddleware).
|
||||
# Set TRUSTED_HOSTS as a comma-separated list. Wildcards supported (e.g. "*.ts.net").
|
||||
# Defaults include localhost + Tailscale MagicDNS. Add your Tailscale IP if needed.
|
||||
|
||||
@@ -10,6 +10,7 @@ Key improvements:
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
from contextlib import asynccontextmanager
|
||||
from pathlib import Path
|
||||
|
||||
@@ -23,6 +24,7 @@ from config import settings
|
||||
|
||||
# Import dedicated middleware
|
||||
from dashboard.middleware.csrf import CSRFMiddleware
|
||||
from dashboard.middleware.rate_limit import RateLimitMiddleware
|
||||
from dashboard.middleware.request_logging import RequestLoggingMiddleware
|
||||
from dashboard.middleware.security_headers import SecurityHeadersMiddleware
|
||||
from dashboard.routes.agents import router as agents_router
|
||||
@@ -30,6 +32,7 @@ from dashboard.routes.briefing import router as briefing_router
|
||||
from dashboard.routes.calm import router as calm_router
|
||||
from dashboard.routes.chat_api import router as chat_api_router
|
||||
from dashboard.routes.chat_api_v1 import router as chat_api_v1_router
|
||||
from dashboard.routes.daily_run import router as daily_run_router
|
||||
from dashboard.routes.db_explorer import router as db_explorer_router
|
||||
from dashboard.routes.discord import router as discord_router
|
||||
from dashboard.routes.experiments import router as experiments_router
|
||||
@@ -40,6 +43,7 @@ from dashboard.routes.memory import router as memory_router
|
||||
from dashboard.routes.mobile import router as mobile_router
|
||||
from dashboard.routes.models import api_router as models_api_router
|
||||
from dashboard.routes.models import router as models_router
|
||||
from dashboard.routes.quests import router as quests_router
|
||||
from dashboard.routes.spark import router as spark_router
|
||||
from dashboard.routes.system import router as system_router
|
||||
from dashboard.routes.tasks import router as tasks_router
|
||||
@@ -49,6 +53,7 @@ from dashboard.routes.tools import router as tools_router
|
||||
from dashboard.routes.tower import router as tower_router
|
||||
from dashboard.routes.voice import router as voice_router
|
||||
from dashboard.routes.work_orders import router as work_orders_router
|
||||
from dashboard.routes.world import matrix_router
|
||||
from dashboard.routes.world import router as world_router
|
||||
from timmy.workshop_state import PRESENCE_FILE
|
||||
|
||||
@@ -519,25 +524,55 @@ app = FastAPI(
|
||||
|
||||
|
||||
def _get_cors_origins() -> list[str]:
|
||||
"""Get CORS origins from settings, rejecting wildcards in production."""
|
||||
origins = settings.cors_origins
|
||||
"""Get CORS origins from settings, rejecting wildcards in production.
|
||||
|
||||
Adds matrix_frontend_url when configured. Always allows Tailscale IPs
|
||||
(100.x.x.x range) for development convenience.
|
||||
"""
|
||||
origins = list(settings.cors_origins)
|
||||
|
||||
# Strip wildcards in production (security)
|
||||
if "*" in origins and not settings.debug:
|
||||
logger.warning(
|
||||
"Wildcard '*' in CORS_ORIGINS stripped in production — "
|
||||
"set explicit origins via CORS_ORIGINS env var"
|
||||
)
|
||||
origins = [o for o in origins if o != "*"]
|
||||
|
||||
# Add Matrix frontend URL if configured
|
||||
if settings.matrix_frontend_url:
|
||||
url = settings.matrix_frontend_url.strip()
|
||||
if url and url not in origins:
|
||||
origins.append(url)
|
||||
logger.debug("Added Matrix frontend to CORS: %s", url)
|
||||
|
||||
return origins
|
||||
|
||||
|
||||
# Pattern to match Tailscale IPs (100.x.x.x) for CORS origin regex
|
||||
_TAILSCALE_IP_PATTERN = re.compile(r"^https?://100\.\d{1,3}\.\d{1,3}\.\d{1,3}(?::\d+)?$")
|
||||
|
||||
|
||||
def _is_tailscale_origin(origin: str) -> bool:
|
||||
"""Check if origin is a Tailscale IP (100.x.x.x range)."""
|
||||
return bool(_TAILSCALE_IP_PATTERN.match(origin))
|
||||
|
||||
|
||||
# Add dedicated middleware in correct order
|
||||
# 1. Logging (outermost to capture everything)
|
||||
app.add_middleware(RequestLoggingMiddleware, skip_paths=["/health"])
|
||||
|
||||
# 2. Security Headers
|
||||
# 2. Rate Limiting (before security to prevent abuse early)
|
||||
app.add_middleware(
|
||||
RateLimitMiddleware,
|
||||
path_prefixes=["/api/matrix/"],
|
||||
requests_per_minute=30,
|
||||
)
|
||||
|
||||
# 3. Security Headers
|
||||
app.add_middleware(SecurityHeadersMiddleware, production=not settings.debug)
|
||||
|
||||
# 3. CSRF Protection
|
||||
# 4. CSRF Protection
|
||||
app.add_middleware(CSRFMiddleware)
|
||||
|
||||
# 4. Standard FastAPI middleware
|
||||
@@ -551,6 +586,7 @@ app.add_middleware(
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=_get_cors_origins(),
|
||||
allow_origin_regex=r"https?://100\.\d{1,3}\.\d{1,3}\.\d{1,3}(:\d+)?",
|
||||
allow_credentials=True,
|
||||
allow_methods=["GET", "POST", "PUT", "DELETE", "OPTIONS"],
|
||||
allow_headers=["Content-Type", "Authorization"],
|
||||
@@ -589,7 +625,10 @@ app.include_router(system_router)
|
||||
app.include_router(experiments_router)
|
||||
app.include_router(db_explorer_router)
|
||||
app.include_router(world_router)
|
||||
app.include_router(matrix_router)
|
||||
app.include_router(tower_router)
|
||||
app.include_router(daily_run_router)
|
||||
app.include_router(quests_router)
|
||||
|
||||
|
||||
@app.websocket("/ws")
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
"""Dashboard middleware package."""
|
||||
|
||||
from .csrf import CSRFMiddleware, csrf_exempt, generate_csrf_token, validate_csrf_token
|
||||
from .rate_limit import RateLimiter, RateLimitMiddleware
|
||||
from .request_logging import RequestLoggingMiddleware
|
||||
from .security_headers import SecurityHeadersMiddleware
|
||||
|
||||
@@ -9,6 +10,8 @@ __all__ = [
|
||||
"csrf_exempt",
|
||||
"generate_csrf_token",
|
||||
"validate_csrf_token",
|
||||
"RateLimiter",
|
||||
"RateLimitMiddleware",
|
||||
"SecurityHeadersMiddleware",
|
||||
"RequestLoggingMiddleware",
|
||||
]
|
||||
|
||||
@@ -131,7 +131,6 @@ class CSRFMiddleware(BaseHTTPMiddleware):
|
||||
For safe methods: Set a CSRF token cookie if not present.
|
||||
For unsafe methods: Validate the CSRF token or check if exempt.
|
||||
"""
|
||||
# Bypass CSRF if explicitly disabled (e.g. in tests)
|
||||
from config import settings
|
||||
|
||||
if settings.timmy_disable_csrf:
|
||||
@@ -141,52 +140,55 @@ class CSRFMiddleware(BaseHTTPMiddleware):
|
||||
if request.headers.get("upgrade", "").lower() == "websocket":
|
||||
return await call_next(request)
|
||||
|
||||
# Get existing CSRF token from cookie
|
||||
csrf_cookie = request.cookies.get(self.cookie_name)
|
||||
|
||||
# For safe methods, just ensure a token exists
|
||||
if request.method in self.SAFE_METHODS:
|
||||
response = await call_next(request)
|
||||
return await self._handle_safe_method(request, call_next, csrf_cookie)
|
||||
|
||||
# Set CSRF token cookie if not present
|
||||
if not csrf_cookie:
|
||||
new_token = generate_csrf_token()
|
||||
response.set_cookie(
|
||||
key=self.cookie_name,
|
||||
value=new_token,
|
||||
httponly=False, # Must be readable by JavaScript
|
||||
secure=settings.csrf_cookie_secure,
|
||||
samesite="Lax",
|
||||
max_age=86400, # 24 hours
|
||||
)
|
||||
return await self._handle_unsafe_method(request, call_next, csrf_cookie)
|
||||
|
||||
return response
|
||||
async def _handle_safe_method(
|
||||
self, request: Request, call_next, csrf_cookie: str | None
|
||||
) -> Response:
|
||||
"""Handle safe HTTP methods (GET, HEAD, OPTIONS, TRACE).
|
||||
|
||||
# For unsafe methods, we need to validate or check if exempt
|
||||
# First, try to validate the CSRF token
|
||||
if await self._validate_request(request, csrf_cookie):
|
||||
# Token is valid, allow the request
|
||||
return await call_next(request)
|
||||
Forwards the request and sets a CSRF token cookie if not present.
|
||||
"""
|
||||
from config import settings
|
||||
|
||||
# Token validation failed, check if the path is exempt
|
||||
path = request.url.path
|
||||
if self._is_likely_exempt(path):
|
||||
# Path is exempt, allow the request
|
||||
return await call_next(request)
|
||||
|
||||
# Token validation failed and path is not exempt
|
||||
# We still need to call the app to check if the endpoint is decorated
|
||||
# with @csrf_exempt, so we'll let it through and check after routing
|
||||
response = await call_next(request)
|
||||
|
||||
# After routing, check if the endpoint is marked as exempt
|
||||
endpoint = request.scope.get("endpoint")
|
||||
if endpoint and is_csrf_exempt(endpoint):
|
||||
# Endpoint is marked as exempt, allow the response
|
||||
return response
|
||||
if not csrf_cookie:
|
||||
new_token = generate_csrf_token()
|
||||
response.set_cookie(
|
||||
key=self.cookie_name,
|
||||
value=new_token,
|
||||
httponly=False, # Must be readable by JavaScript
|
||||
secure=settings.csrf_cookie_secure,
|
||||
samesite="Lax",
|
||||
max_age=86400, # 24 hours
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
async def _handle_unsafe_method(
|
||||
self, request: Request, call_next, csrf_cookie: str | None
|
||||
) -> Response:
|
||||
"""Handle unsafe HTTP methods (POST, PUT, DELETE, PATCH).
|
||||
|
||||
Validates the CSRF token, checks path and endpoint exemptions,
|
||||
or returns a 403 error.
|
||||
"""
|
||||
if await self._validate_request(request, csrf_cookie):
|
||||
return await call_next(request)
|
||||
|
||||
if self._is_likely_exempt(request.url.path):
|
||||
return await call_next(request)
|
||||
|
||||
endpoint = self._resolve_endpoint(request)
|
||||
if endpoint and is_csrf_exempt(endpoint):
|
||||
return await call_next(request)
|
||||
|
||||
# Endpoint is not exempt and token validation failed
|
||||
# Return 403 error
|
||||
return JSONResponse(
|
||||
status_code=403,
|
||||
content={
|
||||
@@ -196,6 +198,41 @@ class CSRFMiddleware(BaseHTTPMiddleware):
|
||||
},
|
||||
)
|
||||
|
||||
def _resolve_endpoint(self, request: Request) -> Callable | None:
|
||||
"""Resolve the route endpoint without executing it.
|
||||
|
||||
Walks the Starlette/FastAPI router to find which endpoint function
|
||||
handles this request, so we can check @csrf_exempt before any
|
||||
side effects occur.
|
||||
|
||||
Returns:
|
||||
The endpoint callable, or None if no route matched.
|
||||
"""
|
||||
# If routing already happened (endpoint in scope), use it
|
||||
endpoint = request.scope.get("endpoint")
|
||||
if endpoint:
|
||||
return endpoint
|
||||
|
||||
# Walk the middleware/app chain to find something with routes
|
||||
from starlette.routing import Match
|
||||
|
||||
app = self.app
|
||||
while app is not None:
|
||||
if hasattr(app, "routes"):
|
||||
for route in app.routes:
|
||||
match, _ = route.matches(request.scope)
|
||||
if match == Match.FULL:
|
||||
return getattr(route, "endpoint", None)
|
||||
# Try .router (FastAPI stores routes on app.router)
|
||||
if hasattr(app, "router") and hasattr(app.router, "routes"):
|
||||
for route in app.router.routes:
|
||||
match, _ = route.matches(request.scope)
|
||||
if match == Match.FULL:
|
||||
return getattr(route, "endpoint", None)
|
||||
app = getattr(app, "app", None)
|
||||
|
||||
return None
|
||||
|
||||
def _is_likely_exempt(self, path: str) -> bool:
|
||||
"""Check if a path is likely to be CSRF exempt.
|
||||
|
||||
|
||||
209
src/dashboard/middleware/rate_limit.py
Normal file
209
src/dashboard/middleware/rate_limit.py
Normal file
@@ -0,0 +1,209 @@
|
||||
"""Rate limiting middleware for FastAPI.
|
||||
|
||||
Simple in-memory rate limiter for API endpoints. Tracks requests per IP
|
||||
with configurable limits and automatic cleanup of stale entries.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import time
|
||||
from collections import deque
|
||||
|
||||
from starlette.middleware.base import BaseHTTPMiddleware
|
||||
from starlette.requests import Request
|
||||
from starlette.responses import JSONResponse, Response
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RateLimiter:
|
||||
"""In-memory rate limiter for tracking requests per IP.
|
||||
|
||||
Stores request timestamps in a dict keyed by client IP.
|
||||
Automatically cleans up stale entries every 60 seconds.
|
||||
|
||||
Attributes:
|
||||
requests_per_minute: Maximum requests allowed per minute per IP.
|
||||
cleanup_interval_seconds: How often to clean stale entries.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
requests_per_minute: int = 30,
|
||||
cleanup_interval_seconds: int = 60,
|
||||
):
|
||||
self.requests_per_minute = requests_per_minute
|
||||
self.cleanup_interval_seconds = cleanup_interval_seconds
|
||||
self._storage: dict[str, deque[float]] = {}
|
||||
self._last_cleanup: float = time.time()
|
||||
self._window_seconds: float = 60.0 # 1 minute window
|
||||
|
||||
def _get_client_ip(self, request: Request) -> str:
|
||||
"""Extract client IP from request, respecting X-Forwarded-For header.
|
||||
|
||||
Args:
|
||||
request: The incoming request.
|
||||
|
||||
Returns:
|
||||
Client IP address string.
|
||||
"""
|
||||
# Check for forwarded IP (when behind proxy/load balancer)
|
||||
forwarded = request.headers.get("x-forwarded-for")
|
||||
if forwarded:
|
||||
# Take the first IP in the chain
|
||||
return forwarded.split(",")[0].strip()
|
||||
|
||||
real_ip = request.headers.get("x-real-ip")
|
||||
if real_ip:
|
||||
return real_ip
|
||||
|
||||
# Fall back to direct connection
|
||||
if request.client:
|
||||
return request.client.host
|
||||
|
||||
return "unknown"
|
||||
|
||||
def _cleanup_if_needed(self) -> None:
|
||||
"""Remove stale entries older than the cleanup interval."""
|
||||
now = time.time()
|
||||
if now - self._last_cleanup < self.cleanup_interval_seconds:
|
||||
return
|
||||
|
||||
cutoff = now - self._window_seconds
|
||||
stale_ips: list[str] = []
|
||||
|
||||
for ip, timestamps in self._storage.items():
|
||||
# Remove timestamps older than the window
|
||||
while timestamps and timestamps[0] < cutoff:
|
||||
timestamps.popleft()
|
||||
# Mark IP for removal if no recent requests
|
||||
if not timestamps:
|
||||
stale_ips.append(ip)
|
||||
|
||||
# Remove stale IP entries
|
||||
for ip in stale_ips:
|
||||
del self._storage[ip]
|
||||
|
||||
self._last_cleanup = now
|
||||
if stale_ips:
|
||||
logger.debug("Rate limiter cleanup: removed %d stale IPs", len(stale_ips))
|
||||
|
||||
def is_allowed(self, client_ip: str) -> tuple[bool, float]:
|
||||
"""Check if a request from the given IP is allowed.
|
||||
|
||||
Args:
|
||||
client_ip: The client's IP address.
|
||||
|
||||
Returns:
|
||||
Tuple of (allowed: bool, retry_after: float).
|
||||
retry_after is seconds until next allowed request, 0 if allowed now.
|
||||
"""
|
||||
now = time.time()
|
||||
cutoff = now - self._window_seconds
|
||||
|
||||
# Get or create timestamp deque for this IP
|
||||
if client_ip not in self._storage:
|
||||
self._storage[client_ip] = deque()
|
||||
|
||||
timestamps = self._storage[client_ip]
|
||||
|
||||
# Remove timestamps outside the window
|
||||
while timestamps and timestamps[0] < cutoff:
|
||||
timestamps.popleft()
|
||||
|
||||
# Check if limit exceeded
|
||||
if len(timestamps) >= self.requests_per_minute:
|
||||
# Calculate retry after time
|
||||
oldest = timestamps[0]
|
||||
retry_after = self._window_seconds - (now - oldest)
|
||||
return False, max(0.0, retry_after)
|
||||
|
||||
# Record this request
|
||||
timestamps.append(now)
|
||||
return True, 0.0
|
||||
|
||||
def check_request(self, request: Request) -> tuple[bool, float]:
|
||||
"""Check if the request is allowed under rate limits.
|
||||
|
||||
Args:
|
||||
request: The incoming request.
|
||||
|
||||
Returns:
|
||||
Tuple of (allowed: bool, retry_after: float).
|
||||
"""
|
||||
self._cleanup_if_needed()
|
||||
client_ip = self._get_client_ip(request)
|
||||
return self.is_allowed(client_ip)
|
||||
|
||||
|
||||
class RateLimitMiddleware(BaseHTTPMiddleware):
|
||||
"""Middleware to apply rate limiting to specific routes.
|
||||
|
||||
Usage:
|
||||
# Apply to all routes (not recommended for public static files)
|
||||
app.add_middleware(RateLimitMiddleware)
|
||||
|
||||
# Apply only to specific paths
|
||||
app.add_middleware(
|
||||
RateLimitMiddleware,
|
||||
path_prefixes=["/api/matrix/"],
|
||||
requests_per_minute=30,
|
||||
)
|
||||
|
||||
Attributes:
|
||||
path_prefixes: List of URL path prefixes to rate limit.
|
||||
If empty, applies to all paths.
|
||||
requests_per_minute: Maximum requests per minute per IP.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
app,
|
||||
path_prefixes: list[str] | None = None,
|
||||
requests_per_minute: int = 30,
|
||||
):
|
||||
super().__init__(app)
|
||||
self.path_prefixes = path_prefixes or []
|
||||
self.limiter = RateLimiter(requests_per_minute=requests_per_minute)
|
||||
|
||||
def _should_rate_limit(self, path: str) -> bool:
|
||||
"""Check if the given path should be rate limited.
|
||||
|
||||
Args:
|
||||
path: The request URL path.
|
||||
|
||||
Returns:
|
||||
True if path matches any configured prefix.
|
||||
"""
|
||||
if not self.path_prefixes:
|
||||
return True
|
||||
return any(path.startswith(prefix) for prefix in self.path_prefixes)
|
||||
|
||||
async def dispatch(self, request: Request, call_next) -> Response:
|
||||
"""Apply rate limiting to configured paths.
|
||||
|
||||
Args:
|
||||
request: The incoming request.
|
||||
call_next: Callable to get the response from downstream.
|
||||
|
||||
Returns:
|
||||
Response from downstream, or 429 if rate limited.
|
||||
"""
|
||||
# Skip if path doesn't match configured prefixes
|
||||
if not self._should_rate_limit(request.url.path):
|
||||
return await call_next(request)
|
||||
|
||||
# Check rate limit
|
||||
allowed, retry_after = self.limiter.check_request(request)
|
||||
|
||||
if not allowed:
|
||||
return JSONResponse(
|
||||
status_code=429,
|
||||
content={
|
||||
"error": "Rate limit exceeded. Try again later.",
|
||||
"retry_after": int(retry_after) + 1,
|
||||
},
|
||||
headers={"Retry-After": str(int(retry_after) + 1)},
|
||||
)
|
||||
|
||||
# Process the request
|
||||
return await call_next(request)
|
||||
@@ -42,6 +42,114 @@ class RequestLoggingMiddleware(BaseHTTPMiddleware):
|
||||
self.skip_paths = set(skip_paths or [])
|
||||
self.log_level = log_level
|
||||
|
||||
def _should_skip_path(self, path: str) -> bool:
|
||||
"""Check if the request path should be skipped from logging.
|
||||
|
||||
Args:
|
||||
path: The request URL path.
|
||||
|
||||
Returns:
|
||||
True if the path should be skipped, False otherwise.
|
||||
"""
|
||||
return path in self.skip_paths
|
||||
|
||||
def _prepare_request_context(self, request: Request) -> tuple[str, float]:
|
||||
"""Prepare context for request processing.
|
||||
|
||||
Generates a correlation ID and records the start time.
|
||||
|
||||
Args:
|
||||
request: The incoming request.
|
||||
|
||||
Returns:
|
||||
Tuple of (correlation_id, start_time).
|
||||
"""
|
||||
correlation_id = str(uuid.uuid4())[:8]
|
||||
request.state.correlation_id = correlation_id
|
||||
start_time = time.time()
|
||||
return correlation_id, start_time
|
||||
|
||||
def _get_duration_ms(self, start_time: float) -> float:
|
||||
"""Calculate the request duration in milliseconds.
|
||||
|
||||
Args:
|
||||
start_time: The start time from time.time().
|
||||
|
||||
Returns:
|
||||
Duration in milliseconds.
|
||||
"""
|
||||
return (time.time() - start_time) * 1000
|
||||
|
||||
def _log_success(
|
||||
self,
|
||||
request: Request,
|
||||
response: Response,
|
||||
correlation_id: str,
|
||||
duration_ms: float,
|
||||
client_ip: str,
|
||||
user_agent: str,
|
||||
) -> None:
|
||||
"""Log a successful request.
|
||||
|
||||
Args:
|
||||
request: The incoming request.
|
||||
response: The response from downstream.
|
||||
correlation_id: The request correlation ID.
|
||||
duration_ms: Request duration in milliseconds.
|
||||
client_ip: Client IP address.
|
||||
user_agent: User-Agent header value.
|
||||
"""
|
||||
self._log_request(
|
||||
method=request.method,
|
||||
path=request.url.path,
|
||||
status_code=response.status_code,
|
||||
duration_ms=duration_ms,
|
||||
client_ip=client_ip,
|
||||
user_agent=user_agent,
|
||||
correlation_id=correlation_id,
|
||||
)
|
||||
|
||||
def _log_error(
|
||||
self,
|
||||
request: Request,
|
||||
exc: Exception,
|
||||
correlation_id: str,
|
||||
duration_ms: float,
|
||||
client_ip: str,
|
||||
) -> None:
|
||||
"""Log a failed request and capture the error.
|
||||
|
||||
Args:
|
||||
request: The incoming request.
|
||||
exc: The exception that was raised.
|
||||
correlation_id: The request correlation ID.
|
||||
duration_ms: Request duration in milliseconds.
|
||||
client_ip: Client IP address.
|
||||
"""
|
||||
logger.error(
|
||||
f"[{correlation_id}] {request.method} {request.url.path} "
|
||||
f"- ERROR - {duration_ms:.2f}ms - {client_ip} - {str(exc)}"
|
||||
)
|
||||
|
||||
# Auto-escalate: create bug report task from unhandled exception
|
||||
try:
|
||||
from infrastructure.error_capture import capture_error
|
||||
|
||||
capture_error(
|
||||
exc,
|
||||
source="http",
|
||||
context={
|
||||
"method": request.method,
|
||||
"path": request.url.path,
|
||||
"correlation_id": correlation_id,
|
||||
"client_ip": client_ip,
|
||||
"duration_ms": f"{duration_ms:.0f}",
|
||||
},
|
||||
)
|
||||
except Exception:
|
||||
logger.warning("Escalation logging error: capture failed")
|
||||
# never let escalation break the request
|
||||
|
||||
async def dispatch(self, request: Request, call_next) -> Response:
|
||||
"""Log the request and response details.
|
||||
|
||||
@@ -52,74 +160,23 @@ class RequestLoggingMiddleware(BaseHTTPMiddleware):
|
||||
Returns:
|
||||
The response from downstream.
|
||||
"""
|
||||
# Check if we should skip logging this path
|
||||
if request.url.path in self.skip_paths:
|
||||
if self._should_skip_path(request.url.path):
|
||||
return await call_next(request)
|
||||
|
||||
# Generate correlation ID
|
||||
correlation_id = str(uuid.uuid4())[:8]
|
||||
request.state.correlation_id = correlation_id
|
||||
|
||||
# Record start time
|
||||
start_time = time.time()
|
||||
|
||||
# Get client info
|
||||
correlation_id, start_time = self._prepare_request_context(request)
|
||||
client_ip = self._get_client_ip(request)
|
||||
user_agent = request.headers.get("user-agent", "-")
|
||||
|
||||
try:
|
||||
# Process the request
|
||||
response = await call_next(request)
|
||||
|
||||
# Calculate duration
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
|
||||
# Log the request
|
||||
self._log_request(
|
||||
method=request.method,
|
||||
path=request.url.path,
|
||||
status_code=response.status_code,
|
||||
duration_ms=duration_ms,
|
||||
client_ip=client_ip,
|
||||
user_agent=user_agent,
|
||||
correlation_id=correlation_id,
|
||||
)
|
||||
|
||||
# Add correlation ID to response headers
|
||||
duration_ms = self._get_duration_ms(start_time)
|
||||
self._log_success(request, response, correlation_id, duration_ms, client_ip, user_agent)
|
||||
response.headers["X-Correlation-ID"] = correlation_id
|
||||
|
||||
return response
|
||||
|
||||
except Exception as exc:
|
||||
# Calculate duration even for failed requests
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
|
||||
# Log the error
|
||||
logger.error(
|
||||
f"[{correlation_id}] {request.method} {request.url.path} "
|
||||
f"- ERROR - {duration_ms:.2f}ms - {client_ip} - {str(exc)}"
|
||||
)
|
||||
|
||||
# Auto-escalate: create bug report task from unhandled exception
|
||||
try:
|
||||
from infrastructure.error_capture import capture_error
|
||||
|
||||
capture_error(
|
||||
exc,
|
||||
source="http",
|
||||
context={
|
||||
"method": request.method,
|
||||
"path": request.url.path,
|
||||
"correlation_id": correlation_id,
|
||||
"client_ip": client_ip,
|
||||
"duration_ms": f"{duration_ms:.0f}",
|
||||
},
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.debug("Escalation logging error: %s", exc)
|
||||
pass # never let escalation break the request
|
||||
|
||||
# Re-raise the exception
|
||||
duration_ms = self._get_duration_ms(start_time)
|
||||
self._log_error(request, exc, correlation_id, duration_ms, client_ip)
|
||||
raise
|
||||
|
||||
def _get_client_ip(self, request: Request) -> str:
|
||||
|
||||
435
src/dashboard/routes/daily_run.py
Normal file
435
src/dashboard/routes/daily_run.py
Normal file
@@ -0,0 +1,435 @@
|
||||
"""Daily Run metrics routes — dashboard card for triage and session metrics."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from dataclasses import dataclass
|
||||
from datetime import UTC, datetime, timedelta
|
||||
from pathlib import Path
|
||||
from urllib.error import HTTPError, URLError
|
||||
from urllib.request import Request as UrlRequest
|
||||
from urllib.request import urlopen
|
||||
|
||||
from fastapi import APIRouter, Request
|
||||
from fastapi.responses import HTMLResponse, JSONResponse
|
||||
|
||||
from config import settings
|
||||
from dashboard.templating import templates
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(tags=["daily-run"])
|
||||
|
||||
REPO_ROOT = Path(settings.repo_root)
|
||||
CONFIG_PATH = REPO_ROOT / "timmy_automations" / "config" / "daily_run.json"
|
||||
|
||||
DEFAULT_CONFIG = {
|
||||
"gitea_api": "http://localhost:3000/api/v1",
|
||||
"repo_slug": "rockachopa/Timmy-time-dashboard",
|
||||
"token_file": "~/.hermes/gitea_token",
|
||||
"layer_labels_prefix": "layer:",
|
||||
}
|
||||
|
||||
LAYER_LABELS = ["layer:triage", "layer:micro-fix", "layer:tests", "layer:economy"]
|
||||
|
||||
|
||||
def _load_config() -> dict:
|
||||
"""Load configuration from config file with fallback to defaults."""
|
||||
config = DEFAULT_CONFIG.copy()
|
||||
if CONFIG_PATH.exists():
|
||||
try:
|
||||
file_config = json.loads(CONFIG_PATH.read_text())
|
||||
if "orchestrator" in file_config:
|
||||
config.update(file_config["orchestrator"])
|
||||
except (json.JSONDecodeError, OSError) as exc:
|
||||
logger.debug("Could not load daily_run config: %s", exc)
|
||||
|
||||
# Environment variable overrides
|
||||
if os.environ.get("TIMMY_GITEA_API"):
|
||||
config["gitea_api"] = os.environ.get("TIMMY_GITEA_API")
|
||||
if os.environ.get("TIMMY_REPO_SLUG"):
|
||||
config["repo_slug"] = os.environ.get("TIMMY_REPO_SLUG")
|
||||
if os.environ.get("TIMMY_GITEA_TOKEN"):
|
||||
config["token"] = os.environ.get("TIMMY_GITEA_TOKEN")
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def _get_token(config: dict) -> str | None:
|
||||
"""Get Gitea token from environment or file."""
|
||||
if "token" in config:
|
||||
return config["token"]
|
||||
|
||||
token_file = Path(config["token_file"]).expanduser()
|
||||
if token_file.exists():
|
||||
return token_file.read_text().strip()
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class GiteaClient:
|
||||
"""Simple Gitea API client with graceful degradation."""
|
||||
|
||||
def __init__(self, config: dict, token: str | None):
|
||||
self.api_base = config["gitea_api"].rstrip("/")
|
||||
self.repo_slug = config["repo_slug"]
|
||||
self.token = token
|
||||
self._available: bool | None = None
|
||||
|
||||
def _headers(self) -> dict:
|
||||
headers = {"Accept": "application/json"}
|
||||
if self.token:
|
||||
headers["Authorization"] = f"token {self.token}"
|
||||
return headers
|
||||
|
||||
def _api_url(self, path: str) -> str:
|
||||
return f"{self.api_base}/repos/{self.repo_slug}/{path}"
|
||||
|
||||
def is_available(self) -> bool:
|
||||
"""Check if Gitea API is reachable."""
|
||||
if self._available is not None:
|
||||
return self._available
|
||||
|
||||
try:
|
||||
req = UrlRequest(
|
||||
f"{self.api_base}/version",
|
||||
headers=self._headers(),
|
||||
method="GET",
|
||||
)
|
||||
with urlopen(req, timeout=5) as resp:
|
||||
self._available = resp.status == 200
|
||||
return self._available
|
||||
except (HTTPError, URLError, TimeoutError):
|
||||
self._available = False
|
||||
return False
|
||||
|
||||
def get_paginated(self, path: str, params: dict | None = None) -> list:
|
||||
"""Fetch all pages of a paginated endpoint."""
|
||||
all_items = []
|
||||
page = 1
|
||||
limit = 50
|
||||
|
||||
while True:
|
||||
url = self._api_url(path)
|
||||
query_parts = [f"limit={limit}", f"page={page}"]
|
||||
if params:
|
||||
for key, val in params.items():
|
||||
query_parts.append(f"{key}={val}")
|
||||
url = f"{url}?{'&'.join(query_parts)}"
|
||||
|
||||
req = UrlRequest(url, headers=self._headers(), method="GET")
|
||||
with urlopen(req, timeout=15) as resp:
|
||||
batch = json.loads(resp.read())
|
||||
|
||||
if not batch:
|
||||
break
|
||||
|
||||
all_items.extend(batch)
|
||||
if len(batch) < limit:
|
||||
break
|
||||
page += 1
|
||||
|
||||
return all_items
|
||||
|
||||
|
||||
@dataclass
|
||||
class LayerMetrics:
|
||||
"""Metrics for a single layer."""
|
||||
|
||||
name: str
|
||||
label: str
|
||||
current_count: int
|
||||
previous_count: int
|
||||
|
||||
@property
|
||||
def trend(self) -> str:
|
||||
"""Return trend indicator."""
|
||||
if self.previous_count == 0:
|
||||
return "→" if self.current_count == 0 else "↑"
|
||||
diff = self.current_count - self.previous_count
|
||||
pct = (diff / self.previous_count) * 100
|
||||
if pct > 20:
|
||||
return "↑↑"
|
||||
elif pct > 5:
|
||||
return "↑"
|
||||
elif pct < -20:
|
||||
return "↓↓"
|
||||
elif pct < -5:
|
||||
return "↓"
|
||||
return "→"
|
||||
|
||||
@property
|
||||
def trend_color(self) -> str:
|
||||
"""Return color for trend (CSS variable name)."""
|
||||
trend = self.trend
|
||||
if trend in ("↑↑", "↑"):
|
||||
return "var(--green)" # More work = positive
|
||||
elif trend in ("↓↓", "↓"):
|
||||
return "var(--amber)" # Less work = caution
|
||||
return "var(--text-dim)"
|
||||
|
||||
|
||||
@dataclass
|
||||
class DailyRunMetrics:
|
||||
"""Complete Daily Run metrics."""
|
||||
|
||||
sessions_completed: int
|
||||
sessions_previous: int
|
||||
layers: list[LayerMetrics]
|
||||
total_touched_current: int
|
||||
total_touched_previous: int
|
||||
lookback_days: int
|
||||
generated_at: str
|
||||
|
||||
@property
|
||||
def sessions_trend(self) -> str:
|
||||
"""Return sessions trend indicator."""
|
||||
if self.sessions_previous == 0:
|
||||
return "→" if self.sessions_completed == 0 else "↑"
|
||||
diff = self.sessions_completed - self.sessions_previous
|
||||
pct = (diff / self.sessions_previous) * 100
|
||||
if pct > 20:
|
||||
return "↑↑"
|
||||
elif pct > 5:
|
||||
return "↑"
|
||||
elif pct < -20:
|
||||
return "↓↓"
|
||||
elif pct < -5:
|
||||
return "↓"
|
||||
return "→"
|
||||
|
||||
@property
|
||||
def sessions_trend_color(self) -> str:
|
||||
"""Return color for sessions trend."""
|
||||
trend = self.sessions_trend
|
||||
if trend in ("↑↑", "↑"):
|
||||
return "var(--green)"
|
||||
elif trend in ("↓↓", "↓"):
|
||||
return "var(--amber)"
|
||||
return "var(--text-dim)"
|
||||
|
||||
|
||||
def _extract_layer(labels: list[dict]) -> str | None:
|
||||
"""Extract layer label from issue labels."""
|
||||
for label in labels:
|
||||
name = label.get("name", "")
|
||||
if name.startswith("layer:"):
|
||||
return name.replace("layer:", "")
|
||||
return None
|
||||
|
||||
|
||||
def _load_cycle_data(days: int = 14) -> dict:
|
||||
"""Load cycle retrospective data for session counting."""
|
||||
retro_file = REPO_ROOT / ".loop" / "retro" / "cycles.jsonl"
|
||||
if not retro_file.exists():
|
||||
return {"current": 0, "previous": 0}
|
||||
|
||||
try:
|
||||
entries = []
|
||||
for line in retro_file.read_text().strip().splitlines():
|
||||
try:
|
||||
entries.append(json.loads(line))
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
|
||||
now = datetime.now(UTC)
|
||||
current_cutoff = now - timedelta(days=days)
|
||||
previous_cutoff = now - timedelta(days=days * 2)
|
||||
|
||||
current_count = 0
|
||||
previous_count = 0
|
||||
|
||||
for entry in entries:
|
||||
ts_str = entry.get("timestamp", "")
|
||||
if not ts_str:
|
||||
continue
|
||||
try:
|
||||
ts = datetime.fromisoformat(ts_str.replace("Z", "+00:00"))
|
||||
if ts >= current_cutoff:
|
||||
if entry.get("success", False):
|
||||
current_count += 1
|
||||
elif ts >= previous_cutoff:
|
||||
if entry.get("success", False):
|
||||
previous_count += 1
|
||||
except (ValueError, TypeError):
|
||||
continue
|
||||
|
||||
return {"current": current_count, "previous": previous_count}
|
||||
except (OSError, ValueError) as exc:
|
||||
logger.debug("Failed to load cycle data: %s", exc)
|
||||
return {"current": 0, "previous": 0}
|
||||
|
||||
|
||||
def _fetch_layer_metrics(
|
||||
client: GiteaClient, lookback_days: int = 7
|
||||
) -> tuple[list[LayerMetrics], int, int]:
|
||||
"""Fetch metrics for each layer from Gitea issues."""
|
||||
now = datetime.now(UTC)
|
||||
current_cutoff = now - timedelta(days=lookback_days)
|
||||
previous_cutoff = now - timedelta(days=lookback_days * 2)
|
||||
|
||||
layers = []
|
||||
total_current = 0
|
||||
total_previous = 0
|
||||
|
||||
for layer_label in LAYER_LABELS:
|
||||
layer_name = layer_label.replace("layer:", "")
|
||||
try:
|
||||
# Fetch all issues with this layer label (both open and closed)
|
||||
issues = client.get_paginated(
|
||||
"issues",
|
||||
{"state": "all", "labels": layer_label, "limit": 100},
|
||||
)
|
||||
|
||||
current_count = 0
|
||||
previous_count = 0
|
||||
|
||||
for issue in issues:
|
||||
updated_at = issue.get("updated_at", "")
|
||||
if not updated_at:
|
||||
continue
|
||||
try:
|
||||
updated = datetime.fromisoformat(updated_at.replace("Z", "+00:00"))
|
||||
if updated >= current_cutoff:
|
||||
current_count += 1
|
||||
elif updated >= previous_cutoff:
|
||||
previous_count += 1
|
||||
except (ValueError, TypeError):
|
||||
continue
|
||||
|
||||
layers.append(
|
||||
LayerMetrics(
|
||||
name=layer_name,
|
||||
label=layer_label,
|
||||
current_count=current_count,
|
||||
previous_count=previous_count,
|
||||
)
|
||||
)
|
||||
total_current += current_count
|
||||
total_previous += previous_count
|
||||
|
||||
except (HTTPError, URLError) as exc:
|
||||
logger.debug("Failed to fetch issues for %s: %s", layer_label, exc)
|
||||
layers.append(
|
||||
LayerMetrics(
|
||||
name=layer_name,
|
||||
label=layer_label,
|
||||
current_count=0,
|
||||
previous_count=0,
|
||||
)
|
||||
)
|
||||
|
||||
return layers, total_current, total_previous
|
||||
|
||||
|
||||
def _get_metrics(lookback_days: int = 7) -> DailyRunMetrics | None:
|
||||
"""Get Daily Run metrics from Gitea API."""
|
||||
config = _load_config()
|
||||
token = _get_token(config)
|
||||
client = GiteaClient(config, token)
|
||||
|
||||
if not client.is_available():
|
||||
logger.debug("Gitea API not available for Daily Run metrics")
|
||||
return None
|
||||
|
||||
try:
|
||||
# Get layer metrics from issues
|
||||
layers, total_current, total_previous = _fetch_layer_metrics(client, lookback_days)
|
||||
|
||||
# Get session data from cycle retrospectives
|
||||
cycle_data = _load_cycle_data(days=lookback_days)
|
||||
|
||||
return DailyRunMetrics(
|
||||
sessions_completed=cycle_data["current"],
|
||||
sessions_previous=cycle_data["previous"],
|
||||
layers=layers,
|
||||
total_touched_current=total_current,
|
||||
total_touched_previous=total_previous,
|
||||
lookback_days=lookback_days,
|
||||
generated_at=datetime.now(UTC).isoformat(),
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.debug("Error fetching Daily Run metrics: %s", exc)
|
||||
return None
|
||||
|
||||
|
||||
@router.get("/daily-run/metrics", response_class=JSONResponse)
|
||||
async def daily_run_metrics_api(lookback_days: int = 7):
|
||||
"""Return Daily Run metrics as JSON API."""
|
||||
metrics = _get_metrics(lookback_days)
|
||||
if not metrics:
|
||||
return JSONResponse(
|
||||
{"error": "Gitea API unavailable", "status": "unavailable"},
|
||||
status_code=503,
|
||||
)
|
||||
|
||||
# Check for quest completions based on Daily Run metrics
|
||||
quest_rewards = []
|
||||
try:
|
||||
from dashboard.routes.quests import check_daily_run_quests
|
||||
|
||||
quest_rewards = await check_daily_run_quests(agent_id="system")
|
||||
except Exception as exc:
|
||||
logger.debug("Quest checking failed: %s", exc)
|
||||
|
||||
return JSONResponse(
|
||||
{
|
||||
"status": "ok",
|
||||
"lookback_days": metrics.lookback_days,
|
||||
"sessions": {
|
||||
"completed": metrics.sessions_completed,
|
||||
"previous": metrics.sessions_previous,
|
||||
"trend": metrics.sessions_trend,
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"name": layer.name,
|
||||
"label": layer.label,
|
||||
"current": layer.current_count,
|
||||
"previous": layer.previous_count,
|
||||
"trend": layer.trend,
|
||||
}
|
||||
for layer in metrics.layers
|
||||
],
|
||||
"totals": {
|
||||
"current": metrics.total_touched_current,
|
||||
"previous": metrics.total_touched_previous,
|
||||
},
|
||||
"generated_at": metrics.generated_at,
|
||||
"quest_rewards": quest_rewards,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@router.get("/daily-run/panel", response_class=HTMLResponse)
|
||||
async def daily_run_panel(request: Request, lookback_days: int = 7):
|
||||
"""Return Daily Run metrics panel HTML for HTMX polling."""
|
||||
metrics = _get_metrics(lookback_days)
|
||||
|
||||
# Build Gitea URLs for filtered issue lists
|
||||
config = _load_config()
|
||||
repo_slug = config.get("repo_slug", "rockachopa/Timmy-time-dashboard")
|
||||
gitea_base = config.get("gitea_api", "http://localhost:3000/api/v1").replace("/api/v1", "")
|
||||
|
||||
# Logbook URL (link to issues with any layer label)
|
||||
layer_labels = ",".join(LAYER_LABELS)
|
||||
logbook_url = f"{gitea_base}/{repo_slug}/issues?labels={layer_labels}&state=all"
|
||||
|
||||
# Layer-specific URLs
|
||||
layer_urls = {
|
||||
layer: f"{gitea_base}/{repo_slug}/issues?labels=layer:{layer}&state=all"
|
||||
for layer in ["triage", "micro-fix", "tests", "economy"]
|
||||
}
|
||||
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
"partials/daily_run_panel.html",
|
||||
{
|
||||
"metrics": metrics,
|
||||
"logbook_url": logbook_url,
|
||||
"layer_urls": layer_urls,
|
||||
"gitea_available": metrics is not None,
|
||||
},
|
||||
)
|
||||
@@ -75,6 +75,7 @@ def _query_database(db_path: str) -> dict:
|
||||
"truncated": count > MAX_ROWS,
|
||||
}
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to query table %s", table_name)
|
||||
result["tables"][table_name] = {
|
||||
"error": str(exc),
|
||||
"columns": [],
|
||||
@@ -83,6 +84,7 @@ def _query_database(db_path: str) -> dict:
|
||||
"truncated": False,
|
||||
}
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to query database %s", db_path)
|
||||
result["error"] = str(exc)
|
||||
|
||||
return result
|
||||
|
||||
@@ -135,6 +135,7 @@ def _run_grok_query(message: str) -> dict:
|
||||
result = backend.run(message)
|
||||
return {"response": f"**[Grok]{invoice_note}:** {result.content}", "error": None}
|
||||
except Exception as exc:
|
||||
logger.exception("Grok query failed")
|
||||
return {"response": None, "error": f"Grok error: {exc}"}
|
||||
|
||||
|
||||
@@ -193,6 +194,7 @@ async def grok_stats():
|
||||
"model": settings.grok_default_model,
|
||||
}
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to load Grok stats")
|
||||
return {"error": str(exc)}
|
||||
|
||||
|
||||
|
||||
@@ -148,6 +148,7 @@ def _check_sqlite() -> DependencyStatus:
|
||||
details={"path": str(db_path)},
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.exception("SQLite health check failed")
|
||||
return DependencyStatus(
|
||||
name="SQLite Database",
|
||||
status="unavailable",
|
||||
|
||||
447
src/dashboard/routes/quests.py
Normal file
447
src/dashboard/routes/quests.py
Normal file
@@ -0,0 +1,447 @@
|
||||
"""Quest system routes for agent token rewards.
|
||||
|
||||
Provides API endpoints for:
|
||||
- Listing quests and their status
|
||||
- Claiming quest rewards
|
||||
- Getting quest leaderboard
|
||||
- Quest progress tracking
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from fastapi import APIRouter, Request
|
||||
from fastapi.responses import HTMLResponse, JSONResponse
|
||||
from pydantic import BaseModel
|
||||
|
||||
from dashboard.templating import templates
|
||||
from timmy.quest_system import (
|
||||
QuestStatus,
|
||||
auto_evaluate_all_quests,
|
||||
claim_quest_reward,
|
||||
evaluate_quest_progress,
|
||||
get_active_quests,
|
||||
get_agent_quests_status,
|
||||
get_quest_definition,
|
||||
get_quest_leaderboard,
|
||||
load_quest_config,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(prefix="/quests", tags=["quests"])
|
||||
|
||||
|
||||
class ClaimQuestRequest(BaseModel):
|
||||
"""Request to claim a quest reward."""
|
||||
|
||||
agent_id: str
|
||||
quest_id: str
|
||||
|
||||
|
||||
class EvaluateQuestRequest(BaseModel):
|
||||
"""Request to manually evaluate quest progress."""
|
||||
|
||||
agent_id: str
|
||||
quest_id: str
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# API Endpoints
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@router.get("/api/definitions")
|
||||
async def get_quest_definitions_api() -> JSONResponse:
|
||||
"""Get all quest definitions.
|
||||
|
||||
Returns:
|
||||
JSON list of all quest definitions with their criteria.
|
||||
"""
|
||||
definitions = get_active_quests()
|
||||
return JSONResponse(
|
||||
{
|
||||
"quests": [
|
||||
{
|
||||
"id": q.id,
|
||||
"name": q.name,
|
||||
"description": q.description,
|
||||
"reward_tokens": q.reward_tokens,
|
||||
"type": q.quest_type.value,
|
||||
"repeatable": q.repeatable,
|
||||
"cooldown_hours": q.cooldown_hours,
|
||||
"criteria": q.criteria,
|
||||
}
|
||||
for q in definitions
|
||||
]
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@router.get("/api/status/{agent_id}")
|
||||
async def get_agent_quest_status(agent_id: str) -> JSONResponse:
|
||||
"""Get quest status for a specific agent.
|
||||
|
||||
Returns:
|
||||
Complete quest status including progress, completion counts,
|
||||
and tokens earned.
|
||||
"""
|
||||
status = get_agent_quests_status(agent_id)
|
||||
return JSONResponse(status)
|
||||
|
||||
|
||||
@router.post("/api/claim")
|
||||
async def claim_quest_reward_api(request: ClaimQuestRequest) -> JSONResponse:
|
||||
"""Claim a quest reward for an agent.
|
||||
|
||||
The quest must be completed but not yet claimed.
|
||||
"""
|
||||
reward = claim_quest_reward(request.quest_id, request.agent_id)
|
||||
|
||||
if not reward:
|
||||
return JSONResponse(
|
||||
{
|
||||
"success": False,
|
||||
"error": "Quest not completed, already claimed, or on cooldown",
|
||||
},
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
return JSONResponse(
|
||||
{
|
||||
"success": True,
|
||||
"reward": reward,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@router.post("/api/evaluate")
|
||||
async def evaluate_quest_api(request: EvaluateQuestRequest) -> JSONResponse:
|
||||
"""Manually evaluate quest progress with provided context.
|
||||
|
||||
This is useful for testing or when the quest completion
|
||||
needs to be triggered manually.
|
||||
"""
|
||||
quest = get_quest_definition(request.quest_id)
|
||||
if not quest:
|
||||
return JSONResponse(
|
||||
{"success": False, "error": "Quest not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
# Build evaluation context based on quest type
|
||||
context = await _build_evaluation_context(quest)
|
||||
|
||||
progress = evaluate_quest_progress(request.quest_id, request.agent_id, context)
|
||||
|
||||
if not progress:
|
||||
return JSONResponse(
|
||||
{"success": False, "error": "Failed to evaluate quest"},
|
||||
status_code=500,
|
||||
)
|
||||
|
||||
# Auto-claim if completed
|
||||
reward = None
|
||||
if progress.status == QuestStatus.COMPLETED:
|
||||
reward = claim_quest_reward(request.quest_id, request.agent_id)
|
||||
|
||||
return JSONResponse(
|
||||
{
|
||||
"success": True,
|
||||
"progress": progress.to_dict(),
|
||||
"reward": reward,
|
||||
"completed": progress.status == QuestStatus.COMPLETED,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@router.get("/api/leaderboard")
|
||||
async def get_leaderboard_api() -> JSONResponse:
|
||||
"""Get the quest completion leaderboard.
|
||||
|
||||
Returns agents sorted by total tokens earned.
|
||||
"""
|
||||
leaderboard = get_quest_leaderboard()
|
||||
return JSONResponse(
|
||||
{
|
||||
"leaderboard": leaderboard,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@router.post("/api/reload")
|
||||
async def reload_quest_config_api() -> JSONResponse:
|
||||
"""Reload quest configuration from quests.yaml.
|
||||
|
||||
Useful for applying quest changes without restarting.
|
||||
"""
|
||||
definitions, quest_settings = load_quest_config()
|
||||
return JSONResponse(
|
||||
{
|
||||
"success": True,
|
||||
"quests_loaded": len(definitions),
|
||||
"settings": quest_settings,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Stress Mode Endpoints
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@router.get("/api/stress")
|
||||
async def get_stress_status_api() -> JSONResponse:
|
||||
"""Get current stress mode status and multipliers.
|
||||
|
||||
Returns:
|
||||
Current stress mode, score, active signals, and multipliers
|
||||
"""
|
||||
try:
|
||||
from timmy.stress_detector import (
|
||||
detect_stress_mode,
|
||||
get_stress_summary,
|
||||
)
|
||||
|
||||
snapshot = detect_stress_mode()
|
||||
summary = get_stress_summary()
|
||||
|
||||
return JSONResponse(
|
||||
{
|
||||
"status": "ok",
|
||||
"stress": summary,
|
||||
"raw": snapshot.to_dict(),
|
||||
}
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.warning("Failed to get stress status: %s", exc)
|
||||
return JSONResponse(
|
||||
{
|
||||
"status": "error",
|
||||
"error": str(exc),
|
||||
},
|
||||
status_code=500,
|
||||
)
|
||||
|
||||
|
||||
@router.post("/api/stress/refresh")
|
||||
async def refresh_stress_detection_api() -> JSONResponse:
|
||||
"""Force a fresh stress detection check.
|
||||
|
||||
Normally stress is cached for 60 seconds. This endpoint
|
||||
bypasses the cache for immediate results.
|
||||
"""
|
||||
try:
|
||||
from timmy.stress_detector import detect_stress_mode, get_stress_summary
|
||||
|
||||
snapshot = detect_stress_mode(force_refresh=True)
|
||||
summary = get_stress_summary()
|
||||
|
||||
return JSONResponse(
|
||||
{
|
||||
"status": "ok",
|
||||
"stress": summary,
|
||||
"raw": snapshot.to_dict(),
|
||||
}
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.warning("Failed to refresh stress detection: %s", exc)
|
||||
return JSONResponse(
|
||||
{
|
||||
"status": "error",
|
||||
"error": str(exc),
|
||||
},
|
||||
status_code=500,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Dashboard UI Endpoints
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@router.get("", response_class=HTMLResponse)
|
||||
async def quests_dashboard(request: Request) -> HTMLResponse:
|
||||
"""Main quests dashboard page."""
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
"quests.html",
|
||||
{"agent_id": "current_user"},
|
||||
)
|
||||
|
||||
|
||||
@router.get("/panel/{agent_id}", response_class=HTMLResponse)
|
||||
async def quests_panel(request: Request, agent_id: str) -> HTMLResponse:
|
||||
"""Quest panel for HTMX partial updates."""
|
||||
status = get_agent_quests_status(agent_id)
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
"partials/quests_panel.html",
|
||||
{
|
||||
"agent_id": agent_id,
|
||||
"quests": status["quests"],
|
||||
"total_tokens": status["total_tokens_earned"],
|
||||
"completed_count": status["total_quests_completed"],
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Internal Functions
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def _build_evaluation_context(quest) -> dict[str, Any]:
|
||||
"""Build evaluation context for a quest based on its type."""
|
||||
context: dict[str, Any] = {}
|
||||
|
||||
if quest.quest_type.value == "issue_count":
|
||||
# Fetch closed issues with relevant labels
|
||||
context["closed_issues"] = await _fetch_closed_issues(
|
||||
quest.criteria.get("issue_labels", [])
|
||||
)
|
||||
|
||||
elif quest.quest_type.value == "issue_reduce":
|
||||
# Fetch current and previous issue counts
|
||||
labels = quest.criteria.get("issue_labels", [])
|
||||
context["current_issue_count"] = await _fetch_open_issue_count(labels)
|
||||
context["previous_issue_count"] = await _fetch_previous_issue_count(
|
||||
labels, quest.criteria.get("lookback_days", 7)
|
||||
)
|
||||
|
||||
elif quest.quest_type.value == "daily_run":
|
||||
# Fetch Daily Run metrics
|
||||
metrics = await _fetch_daily_run_metrics()
|
||||
context["sessions_completed"] = metrics.get("sessions_completed", 0)
|
||||
|
||||
return context
|
||||
|
||||
|
||||
async def _fetch_closed_issues(labels: list[str]) -> list[dict]:
|
||||
"""Fetch closed issues matching the given labels."""
|
||||
try:
|
||||
from dashboard.routes.daily_run import GiteaClient, _load_config
|
||||
|
||||
config = _load_config()
|
||||
token = _get_gitea_token(config)
|
||||
client = GiteaClient(config, token)
|
||||
|
||||
if not client.is_available():
|
||||
return []
|
||||
|
||||
# Build label filter
|
||||
label_filter = ",".join(labels) if labels else ""
|
||||
|
||||
issues = client.get_paginated(
|
||||
"issues",
|
||||
{"state": "closed", "labels": label_filter, "limit": 100},
|
||||
)
|
||||
|
||||
return issues
|
||||
except Exception as exc:
|
||||
logger.debug("Failed to fetch closed issues: %s", exc)
|
||||
return []
|
||||
|
||||
|
||||
async def _fetch_open_issue_count(labels: list[str]) -> int:
|
||||
"""Fetch count of open issues with given labels."""
|
||||
try:
|
||||
from dashboard.routes.daily_run import GiteaClient, _load_config
|
||||
|
||||
config = _load_config()
|
||||
token = _get_gitea_token(config)
|
||||
client = GiteaClient(config, token)
|
||||
|
||||
if not client.is_available():
|
||||
return 0
|
||||
|
||||
label_filter = ",".join(labels) if labels else ""
|
||||
|
||||
issues = client.get_paginated(
|
||||
"issues",
|
||||
{"state": "open", "labels": label_filter, "limit": 100},
|
||||
)
|
||||
|
||||
return len(issues)
|
||||
except Exception as exc:
|
||||
logger.debug("Failed to fetch open issue count: %s", exc)
|
||||
return 0
|
||||
|
||||
|
||||
async def _fetch_previous_issue_count(labels: list[str], lookback_days: int) -> int:
|
||||
"""Fetch previous issue count (simplified - uses current for now)."""
|
||||
# This is a simplified implementation
|
||||
# In production, you'd query historical data
|
||||
return await _fetch_open_issue_count(labels)
|
||||
|
||||
|
||||
async def _fetch_daily_run_metrics() -> dict[str, Any]:
|
||||
"""Fetch Daily Run metrics."""
|
||||
try:
|
||||
from dashboard.routes.daily_run import _get_metrics
|
||||
|
||||
metrics = _get_metrics(lookback_days=7)
|
||||
if metrics:
|
||||
return {
|
||||
"sessions_completed": metrics.sessions_completed,
|
||||
"sessions_previous": metrics.sessions_previous,
|
||||
}
|
||||
except Exception as exc:
|
||||
logger.debug("Failed to fetch Daily Run metrics: %s", exc)
|
||||
|
||||
return {"sessions_completed": 0, "sessions_previous": 0}
|
||||
|
||||
|
||||
def _get_gitea_token(config: dict) -> str | None:
|
||||
"""Get Gitea token from config."""
|
||||
if "token" in config:
|
||||
return config["token"]
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
token_file = Path(config.get("token_file", "~/.hermes/gitea_token")).expanduser()
|
||||
if token_file.exists():
|
||||
return token_file.read_text().strip()
|
||||
|
||||
return None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Daily Run Integration
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def check_daily_run_quests(agent_id: str = "system") -> list[dict]:
|
||||
"""Check and award Daily Run related quests.
|
||||
|
||||
Called by the Daily Run system when metrics are updated.
|
||||
|
||||
Returns:
|
||||
List of rewards awarded
|
||||
"""
|
||||
# Check if auto-detect is enabled
|
||||
_, quest_settings = load_quest_config()
|
||||
if not quest_settings.get("auto_detect_on_daily_run", True):
|
||||
return []
|
||||
|
||||
# Build context from Daily Run metrics
|
||||
metrics = await _fetch_daily_run_metrics()
|
||||
context = {
|
||||
"sessions_completed": metrics.get("sessions_completed", 0),
|
||||
"sessions_previous": metrics.get("sessions_previous", 0),
|
||||
}
|
||||
|
||||
# Add closed issues for issue_count quests
|
||||
active_quests = get_active_quests()
|
||||
for quest in active_quests:
|
||||
if quest.quest_type.value == "issue_count":
|
||||
labels = quest.criteria.get("issue_labels", [])
|
||||
context["closed_issues"] = await _fetch_closed_issues(labels)
|
||||
break # Only need to fetch once
|
||||
|
||||
# Evaluate all quests
|
||||
rewards = auto_evaluate_all_quests(agent_id, context)
|
||||
|
||||
return rewards
|
||||
@@ -59,6 +59,7 @@ async def tts_speak(text: str = Form(...)):
|
||||
voice_tts.speak(text)
|
||||
return {"spoken": True, "text": text}
|
||||
except Exception as exc:
|
||||
logger.exception("TTS speak failed")
|
||||
return {"spoken": False, "reason": str(exc)}
|
||||
|
||||
|
||||
|
||||
@@ -17,16 +17,221 @@ or missing.
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import math
|
||||
import re
|
||||
import time
|
||||
from collections import deque
|
||||
from datetime import UTC, datetime
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from fastapi import APIRouter, WebSocket
|
||||
import yaml
|
||||
from fastapi import APIRouter, Request, WebSocket
|
||||
from fastapi.responses import JSONResponse
|
||||
from pydantic import BaseModel
|
||||
|
||||
from config import settings
|
||||
from infrastructure.presence import produce_bark, serialize_presence
|
||||
from timmy.memory_system import search_memories
|
||||
from timmy.workshop_state import PRESENCE_FILE
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(prefix="/api/world", tags=["world"])
|
||||
matrix_router = APIRouter(prefix="/api/matrix", tags=["matrix"])
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Matrix Bark Endpoint — HTTP fallback for bark messages
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Rate limiting: 1 request per 3 seconds per visitor_id
|
||||
_BARK_RATE_LIMIT_SECONDS = 3
|
||||
_bark_last_request: dict[str, float] = {}
|
||||
|
||||
|
||||
class BarkRequest(BaseModel):
|
||||
"""Request body for POST /api/matrix/bark."""
|
||||
|
||||
text: str
|
||||
visitor_id: str
|
||||
|
||||
|
||||
@matrix_router.post("/bark")
|
||||
async def post_matrix_bark(request: BarkRequest) -> JSONResponse:
|
||||
"""Generate a bark response for a visitor message.
|
||||
|
||||
HTTP fallback for when WebSocket isn't available. The Matrix frontend
|
||||
can POST a message and get Timmy's bark response back as JSON.
|
||||
|
||||
Rate limited to 1 request per 3 seconds per visitor_id.
|
||||
|
||||
Request body:
|
||||
- text: The visitor's message text
|
||||
- visitor_id: Unique identifier for the visitor (used for rate limiting)
|
||||
|
||||
Returns:
|
||||
- 200: Bark message in produce_bark() format
|
||||
- 429: Rate limit exceeded (try again later)
|
||||
- 422: Invalid request (missing/invalid fields)
|
||||
"""
|
||||
# Validate inputs
|
||||
text = request.text.strip() if request.text else ""
|
||||
visitor_id = request.visitor_id.strip() if request.visitor_id else ""
|
||||
|
||||
if not text:
|
||||
return JSONResponse(
|
||||
status_code=422,
|
||||
content={"error": "text is required"},
|
||||
)
|
||||
|
||||
if not visitor_id:
|
||||
return JSONResponse(
|
||||
status_code=422,
|
||||
content={"error": "visitor_id is required"},
|
||||
)
|
||||
|
||||
# Rate limiting check
|
||||
now = time.time()
|
||||
last_request = _bark_last_request.get(visitor_id, 0)
|
||||
time_since_last = now - last_request
|
||||
|
||||
if time_since_last < _BARK_RATE_LIMIT_SECONDS:
|
||||
retry_after = _BARK_RATE_LIMIT_SECONDS - time_since_last
|
||||
return JSONResponse(
|
||||
status_code=429,
|
||||
content={"error": "Rate limit exceeded. Try again later."},
|
||||
headers={"Retry-After": str(int(retry_after) + 1)},
|
||||
)
|
||||
|
||||
# Record this request
|
||||
_bark_last_request[visitor_id] = now
|
||||
|
||||
# Generate bark response
|
||||
try:
|
||||
reply = await _generate_bark(text)
|
||||
except Exception as exc:
|
||||
logger.warning("Bark generation failed: %s", exc)
|
||||
reply = "Hmm, my thoughts are a bit tangled right now."
|
||||
|
||||
# Build bark response using produce_bark format
|
||||
bark = produce_bark(agent_id="timmy", text=reply, style="speech")
|
||||
|
||||
return JSONResponse(
|
||||
content=bark,
|
||||
headers={"Cache-Control": "no-cache, no-store"},
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Matrix Agent Registry — serves agents to the Matrix visualization
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Agent color mapping — consistent with Matrix visual identity
|
||||
_AGENT_COLORS: dict[str, str] = {
|
||||
"timmy": "#FFD700", # Gold
|
||||
"orchestrator": "#FFD700", # Gold
|
||||
"perplexity": "#3B82F6", # Blue
|
||||
"replit": "#F97316", # Orange
|
||||
"kimi": "#06B6D4", # Cyan
|
||||
"claude": "#A855F7", # Purple
|
||||
"researcher": "#10B981", # Emerald
|
||||
"coder": "#EF4444", # Red
|
||||
"writer": "#EC4899", # Pink
|
||||
"memory": "#8B5CF6", # Violet
|
||||
"experimenter": "#14B8A6", # Teal
|
||||
"forge": "#EF4444", # Red (coder alias)
|
||||
"seer": "#10B981", # Emerald (researcher alias)
|
||||
"quill": "#EC4899", # Pink (writer alias)
|
||||
"echo": "#8B5CF6", # Violet (memory alias)
|
||||
"lab": "#14B8A6", # Teal (experimenter alias)
|
||||
}
|
||||
|
||||
# Agent shape mapping for 3D visualization
|
||||
_AGENT_SHAPES: dict[str, str] = {
|
||||
"timmy": "sphere",
|
||||
"orchestrator": "sphere",
|
||||
"perplexity": "cube",
|
||||
"replit": "cylinder",
|
||||
"kimi": "dodecahedron",
|
||||
"claude": "octahedron",
|
||||
"researcher": "icosahedron",
|
||||
"coder": "cube",
|
||||
"writer": "cone",
|
||||
"memory": "torus",
|
||||
"experimenter": "tetrahedron",
|
||||
"forge": "cube",
|
||||
"seer": "icosahedron",
|
||||
"quill": "cone",
|
||||
"echo": "torus",
|
||||
"lab": "tetrahedron",
|
||||
}
|
||||
|
||||
# Default fallback values
|
||||
_DEFAULT_COLOR = "#9CA3AF" # Gray
|
||||
_DEFAULT_SHAPE = "sphere"
|
||||
_DEFAULT_STATUS = "available"
|
||||
|
||||
|
||||
def _get_agent_color(agent_id: str) -> str:
|
||||
"""Get the Matrix color for an agent."""
|
||||
return _AGENT_COLORS.get(agent_id.lower(), _DEFAULT_COLOR)
|
||||
|
||||
|
||||
def _get_agent_shape(agent_id: str) -> str:
|
||||
"""Get the Matrix shape for an agent."""
|
||||
return _AGENT_SHAPES.get(agent_id.lower(), _DEFAULT_SHAPE)
|
||||
|
||||
|
||||
def _compute_circular_positions(count: int, radius: float = 3.0) -> list[dict[str, float]]:
|
||||
"""Compute circular positions for agents in the Matrix.
|
||||
|
||||
Agents are arranged in a circle on the XZ plane at y=0.
|
||||
"""
|
||||
positions = []
|
||||
for i in range(count):
|
||||
angle = (2 * math.pi * i) / count
|
||||
x = radius * math.cos(angle)
|
||||
z = radius * math.sin(angle)
|
||||
positions.append({"x": round(x, 2), "y": 0.0, "z": round(z, 2)})
|
||||
return positions
|
||||
|
||||
|
||||
def _build_matrix_agents_response() -> list[dict[str, Any]]:
|
||||
"""Build the Matrix agent registry response.
|
||||
|
||||
Reads from agents.yaml and returns agents with Matrix-compatible
|
||||
formatting including colors, shapes, and positions.
|
||||
"""
|
||||
try:
|
||||
from timmy.agents.loader import list_agents
|
||||
|
||||
agents = list_agents()
|
||||
if not agents:
|
||||
return []
|
||||
|
||||
positions = _compute_circular_positions(len(agents))
|
||||
|
||||
result = []
|
||||
for i, agent in enumerate(agents):
|
||||
agent_id = agent.get("id", "")
|
||||
result.append(
|
||||
{
|
||||
"id": agent_id,
|
||||
"display_name": agent.get("name", agent_id.title()),
|
||||
"role": agent.get("role", "general"),
|
||||
"color": _get_agent_color(agent_id),
|
||||
"position": positions[i],
|
||||
"shape": _get_agent_shape(agent_id),
|
||||
"status": agent.get("status", _DEFAULT_STATUS),
|
||||
}
|
||||
)
|
||||
|
||||
return result
|
||||
except Exception as exc:
|
||||
logger.warning("Failed to load agents for Matrix: %s", exc)
|
||||
return []
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(prefix="/api/world", tags=["world"])
|
||||
@@ -149,21 +354,7 @@ def _read_presence_file() -> dict | None:
|
||||
|
||||
def _build_world_state(presence: dict) -> dict:
|
||||
"""Transform presence dict into the world/state API response."""
|
||||
return {
|
||||
"timmyState": {
|
||||
"mood": presence.get("mood", "calm"),
|
||||
"activity": presence.get("current_focus", "idle"),
|
||||
"energy": presence.get("energy", 0.5),
|
||||
"confidence": presence.get("confidence", 0.7),
|
||||
},
|
||||
"familiar": presence.get("familiar"),
|
||||
"activeThreads": presence.get("active_threads", []),
|
||||
"recentEvents": presence.get("recent_events", []),
|
||||
"concerns": presence.get("concerns", []),
|
||||
"visitorPresent": False,
|
||||
"updatedAt": presence.get("liveness", datetime.now(UTC).strftime("%Y-%m-%dT%H:%M:%SZ")),
|
||||
"version": presence.get("version", 1),
|
||||
}
|
||||
return serialize_presence(presence)
|
||||
|
||||
|
||||
def _get_current_state() -> dict:
|
||||
@@ -224,6 +415,50 @@ async def _heartbeat(websocket: WebSocket) -> None:
|
||||
logger.debug("Heartbeat stopped — connection gone")
|
||||
|
||||
|
||||
async def _authenticate_ws(websocket: WebSocket) -> bool:
|
||||
"""Authenticate WebSocket connection using matrix_ws_token.
|
||||
|
||||
Checks for token in query param ?token= first. If no query param,
|
||||
accepts the connection and waits for first message with
|
||||
{"type": "auth", "token": "..."}.
|
||||
|
||||
Returns True if authenticated (or if auth is disabled).
|
||||
Returns False and closes connection with code 4001 if invalid.
|
||||
"""
|
||||
token_setting = settings.matrix_ws_token
|
||||
|
||||
# Auth disabled in dev mode (empty/unset token)
|
||||
if not token_setting:
|
||||
return True
|
||||
|
||||
# Check query param first (can validate before accept)
|
||||
query_token = websocket.query_params.get("token", "")
|
||||
if query_token:
|
||||
if query_token == token_setting:
|
||||
return True
|
||||
# Invalid token in query param - we need to accept to close properly
|
||||
await websocket.accept()
|
||||
await websocket.close(code=4001, reason="Invalid token")
|
||||
return False
|
||||
|
||||
# No query token - accept and wait for auth message
|
||||
await websocket.accept()
|
||||
|
||||
# Wait for auth message as first message
|
||||
try:
|
||||
raw = await websocket.receive_text()
|
||||
data = json.loads(raw)
|
||||
if data.get("type") == "auth" and data.get("token") == token_setting:
|
||||
return True
|
||||
# Invalid auth message
|
||||
await websocket.close(code=4001, reason="Invalid token")
|
||||
return False
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
# Non-JSON first message without valid token
|
||||
await websocket.close(code=4001, reason="Authentication required")
|
||||
return False
|
||||
|
||||
|
||||
@router.websocket("/ws")
|
||||
async def world_ws(websocket: WebSocket) -> None:
|
||||
"""Accept a Workshop client and keep it alive for state broadcasts.
|
||||
@@ -232,8 +467,28 @@ async def world_ws(websocket: WebSocket) -> None:
|
||||
client never starts from a blank slate. Incoming frames are parsed
|
||||
as JSON — ``visitor_message`` triggers a bark response. A background
|
||||
heartbeat ping runs every 15 s to detect dead connections early.
|
||||
|
||||
Authentication:
|
||||
- If matrix_ws_token is configured, clients must provide it via
|
||||
?token= query param or in the first message as
|
||||
{"type": "auth", "token": "..."}.
|
||||
- Invalid token results in close code 4001.
|
||||
- Valid token receives a connection_ack message.
|
||||
"""
|
||||
await websocket.accept()
|
||||
# Authenticate (may accept connection internally)
|
||||
is_authed = await _authenticate_ws(websocket)
|
||||
if not is_authed:
|
||||
logger.info("World WS connection rejected — invalid token")
|
||||
return
|
||||
|
||||
# Auth passed - accept if not already accepted
|
||||
if websocket.client_state.name != "CONNECTED":
|
||||
await websocket.accept()
|
||||
|
||||
# Send connection_ack if auth was required
|
||||
if settings.matrix_ws_token:
|
||||
await websocket.send_text(json.dumps({"type": "connection_ack"}))
|
||||
|
||||
_ws_clients.append(websocket)
|
||||
logger.info("World WS connected — %d clients", len(_ws_clients))
|
||||
|
||||
@@ -383,3 +638,428 @@ async def _generate_bark(visitor_text: str) -> str:
|
||||
except Exception as exc:
|
||||
logger.warning("Bark generation failed: %s", exc)
|
||||
return "Hmm, my thoughts are a bit tangled right now."
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Matrix Configuration Endpoint
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Default Matrix configuration (fallback when matrix.yaml is missing/corrupt)
|
||||
_DEFAULT_MATRIX_CONFIG: dict[str, Any] = {
|
||||
"lighting": {
|
||||
"ambient_color": "#1a1a2e",
|
||||
"ambient_intensity": 0.4,
|
||||
"point_lights": [
|
||||
{"color": "#FFD700", "intensity": 1.2, "position": {"x": 0, "y": 5, "z": 0}},
|
||||
{"color": "#3B82F6", "intensity": 0.8, "position": {"x": -5, "y": 3, "z": -5}},
|
||||
{"color": "#A855F7", "intensity": 0.6, "position": {"x": 5, "y": 3, "z": 5}},
|
||||
],
|
||||
},
|
||||
"environment": {
|
||||
"rain_enabled": False,
|
||||
"starfield_enabled": True,
|
||||
"fog_color": "#0f0f23",
|
||||
"fog_density": 0.02,
|
||||
},
|
||||
"features": {
|
||||
"chat_enabled": True,
|
||||
"visitor_avatars": True,
|
||||
"pip_familiar": True,
|
||||
"workshop_portal": True,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def _load_matrix_config() -> dict[str, Any]:
|
||||
"""Load Matrix world configuration from matrix.yaml with fallback to defaults.
|
||||
|
||||
Returns a dict with sections: lighting, environment, features.
|
||||
If the config file is missing or invalid, returns sensible defaults.
|
||||
"""
|
||||
try:
|
||||
config_path = Path(settings.repo_root) / "config" / "matrix.yaml"
|
||||
if not config_path.exists():
|
||||
logger.debug("matrix.yaml not found, using default config")
|
||||
return _DEFAULT_MATRIX_CONFIG.copy()
|
||||
|
||||
raw = config_path.read_text()
|
||||
config = yaml.safe_load(raw)
|
||||
if not isinstance(config, dict):
|
||||
logger.warning("matrix.yaml invalid format, using defaults")
|
||||
return _DEFAULT_MATRIX_CONFIG.copy()
|
||||
|
||||
# Merge with defaults to ensure all required fields exist
|
||||
result: dict[str, Any] = {
|
||||
"lighting": {
|
||||
**_DEFAULT_MATRIX_CONFIG["lighting"],
|
||||
**config.get("lighting", {}),
|
||||
},
|
||||
"environment": {
|
||||
**_DEFAULT_MATRIX_CONFIG["environment"],
|
||||
**config.get("environment", {}),
|
||||
},
|
||||
"features": {
|
||||
**_DEFAULT_MATRIX_CONFIG["features"],
|
||||
**config.get("features", {}),
|
||||
},
|
||||
}
|
||||
|
||||
# Ensure point_lights is a list
|
||||
if "point_lights" in config.get("lighting", {}):
|
||||
result["lighting"]["point_lights"] = config["lighting"]["point_lights"]
|
||||
else:
|
||||
result["lighting"]["point_lights"] = _DEFAULT_MATRIX_CONFIG["lighting"]["point_lights"]
|
||||
|
||||
return result
|
||||
except Exception as exc:
|
||||
logger.warning("Failed to load matrix config: %s, using defaults", exc)
|
||||
return _DEFAULT_MATRIX_CONFIG.copy()
|
||||
|
||||
|
||||
@matrix_router.get("/config")
|
||||
async def get_matrix_config() -> JSONResponse:
|
||||
"""Return Matrix world configuration.
|
||||
|
||||
Serves lighting presets, environment settings, and feature flags
|
||||
to the Matrix frontend so it can be config-driven rather than
|
||||
hardcoded. Reads from config/matrix.yaml with sensible defaults.
|
||||
|
||||
Response structure:
|
||||
- lighting: ambient_color, ambient_intensity, point_lights[]
|
||||
- environment: rain_enabled, starfield_enabled, fog_color, fog_density
|
||||
- features: chat_enabled, visitor_avatars, pip_familiar, workshop_portal
|
||||
"""
|
||||
config = _load_matrix_config()
|
||||
return JSONResponse(
|
||||
content=config,
|
||||
headers={"Cache-Control": "no-cache, no-store"},
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Matrix Agent Registry Endpoint
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@matrix_router.get("/agents")
|
||||
async def get_matrix_agents() -> JSONResponse:
|
||||
"""Return the agent registry for Matrix visualization.
|
||||
|
||||
Serves agents from agents.yaml with Matrix-compatible formatting:
|
||||
- id: agent identifier
|
||||
- display_name: human-readable name
|
||||
- role: functional role
|
||||
- color: hex color code for visualization
|
||||
- position: {x, y, z} coordinates in 3D space
|
||||
- shape: 3D shape type
|
||||
- status: availability status
|
||||
|
||||
Agents are arranged in a circular layout by default.
|
||||
Returns 200 with empty list if no agents configured.
|
||||
"""
|
||||
agents = _build_matrix_agents_response()
|
||||
return JSONResponse(
|
||||
content=agents,
|
||||
headers={"Cache-Control": "no-cache, no-store"},
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Matrix Thoughts Endpoint — Timmy's recent thought stream for Matrix display
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_MAX_THOUGHT_LIMIT = 50 # Maximum thoughts allowed per request
|
||||
_DEFAULT_THOUGHT_LIMIT = 10 # Default number of thoughts to return
|
||||
_MAX_THOUGHT_TEXT_LEN = 500 # Max characters for thought text
|
||||
|
||||
|
||||
def _build_matrix_thoughts_response(limit: int = _DEFAULT_THOUGHT_LIMIT) -> list[dict[str, Any]]:
|
||||
"""Build the Matrix thoughts response from the thinking engine.
|
||||
|
||||
Returns recent thoughts formatted for Matrix display:
|
||||
- id: thought UUID
|
||||
- text: thought content (truncated to 500 chars)
|
||||
- created_at: ISO-8601 timestamp
|
||||
- chain_id: parent thought ID (or null if root thought)
|
||||
|
||||
Returns empty list if thinking engine is disabled or fails.
|
||||
"""
|
||||
try:
|
||||
from timmy.thinking import thinking_engine
|
||||
|
||||
thoughts = thinking_engine.get_recent_thoughts(limit=limit)
|
||||
return [
|
||||
{
|
||||
"id": t.id,
|
||||
"text": t.content[:_MAX_THOUGHT_TEXT_LEN],
|
||||
"created_at": t.created_at,
|
||||
"chain_id": t.parent_id,
|
||||
}
|
||||
for t in thoughts
|
||||
]
|
||||
except Exception as exc:
|
||||
logger.warning("Failed to load thoughts for Matrix: %s", exc)
|
||||
return []
|
||||
|
||||
|
||||
@matrix_router.get("/thoughts")
|
||||
async def get_matrix_thoughts(limit: int = _DEFAULT_THOUGHT_LIMIT) -> JSONResponse:
|
||||
"""Return Timmy's recent thoughts formatted for Matrix display.
|
||||
|
||||
This is the REST companion to the thought WebSocket messages,
|
||||
allowing the Matrix frontend to display what Timmy is actually
|
||||
thinking about rather than canned contextual lines.
|
||||
|
||||
Query params:
|
||||
- limit: Number of thoughts to return (default 10, max 50)
|
||||
|
||||
Response: JSON array of thought objects:
|
||||
- id: thought UUID
|
||||
- text: thought content (truncated to 500 chars)
|
||||
- created_at: ISO-8601 timestamp
|
||||
- chain_id: parent thought ID (null if root thought)
|
||||
|
||||
Returns empty array if thinking engine is disabled or fails.
|
||||
"""
|
||||
# Clamp limit to valid range
|
||||
if limit < 1:
|
||||
limit = 1
|
||||
elif limit > _MAX_THOUGHT_LIMIT:
|
||||
limit = _MAX_THOUGHT_LIMIT
|
||||
|
||||
thoughts = _build_matrix_thoughts_response(limit=limit)
|
||||
return JSONResponse(
|
||||
content=thoughts,
|
||||
headers={"Cache-Control": "no-cache, no-store"},
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Matrix Health Endpoint — backend capability discovery
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Health check cache (5-second TTL for capability checks)
|
||||
_health_cache: dict | None = None
|
||||
_health_cache_ts: float = 0.0
|
||||
_HEALTH_CACHE_TTL = 5.0
|
||||
|
||||
|
||||
def _check_capability_thinking() -> bool:
|
||||
"""Check if thinking engine is available."""
|
||||
try:
|
||||
from timmy.thinking import thinking_engine
|
||||
|
||||
# Check if the engine has been initialized (has a db path)
|
||||
return hasattr(thinking_engine, "_db") and thinking_engine._db is not None
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _check_capability_memory() -> bool:
|
||||
"""Check if memory system is available."""
|
||||
try:
|
||||
from timmy.memory_system import HOT_MEMORY_PATH
|
||||
|
||||
return HOT_MEMORY_PATH.exists()
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _check_capability_bark() -> bool:
|
||||
"""Check if bark production is available."""
|
||||
try:
|
||||
from infrastructure.presence import produce_bark
|
||||
|
||||
return callable(produce_bark)
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _check_capability_familiar() -> bool:
|
||||
"""Check if familiar (Pip) is available."""
|
||||
try:
|
||||
from timmy.familiar import pip_familiar
|
||||
|
||||
return pip_familiar is not None
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _check_capability_lightning() -> bool:
|
||||
"""Check if Lightning payments are available."""
|
||||
# Lightning is currently disabled per health.py
|
||||
# Returns False until properly re-implemented
|
||||
return False
|
||||
|
||||
|
||||
def _build_matrix_health_response() -> dict[str, Any]:
|
||||
"""Build the Matrix health response with capability checks.
|
||||
|
||||
Performs lightweight checks (<100ms total) to determine which features
|
||||
are available. Returns 200 even if some capabilities are degraded.
|
||||
"""
|
||||
capabilities = {
|
||||
"thinking": _check_capability_thinking(),
|
||||
"memory": _check_capability_memory(),
|
||||
"bark": _check_capability_bark(),
|
||||
"familiar": _check_capability_familiar(),
|
||||
"lightning": _check_capability_lightning(),
|
||||
}
|
||||
|
||||
# Status is ok if core capabilities (thinking, memory, bark) are available
|
||||
core_caps = ["thinking", "memory", "bark"]
|
||||
core_available = all(capabilities[c] for c in core_caps)
|
||||
status = "ok" if core_available else "degraded"
|
||||
|
||||
return {
|
||||
"status": status,
|
||||
"version": "1.0.0",
|
||||
"capabilities": capabilities,
|
||||
}
|
||||
|
||||
|
||||
@matrix_router.get("/health")
|
||||
async def get_matrix_health() -> JSONResponse:
|
||||
"""Return health status and capability availability for Matrix frontend.
|
||||
|
||||
This endpoint allows the Matrix frontend to discover what backend
|
||||
capabilities are available so it can show/hide UI elements:
|
||||
- thinking: Show thought bubbles if enabled
|
||||
- memory: Show crystal ball memory search if available
|
||||
- bark: Enable visitor chat responses
|
||||
- familiar: Show Pip the familiar
|
||||
- lightning: Enable payment features
|
||||
|
||||
Response time is <100ms (no heavy checks). Returns 200 even if
|
||||
some capabilities are degraded.
|
||||
|
||||
Response:
|
||||
- status: "ok" or "degraded"
|
||||
- version: API version string
|
||||
- capabilities: dict of feature:bool
|
||||
"""
|
||||
response = _build_matrix_health_response()
|
||||
status_code = 200 # Always 200, even if degraded
|
||||
|
||||
return JSONResponse(
|
||||
content=response,
|
||||
status_code=status_code,
|
||||
headers={"Cache-Control": "no-cache, no-store"},
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Matrix Memory Search Endpoint — visitors query Timmy's memory
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Rate limiting: 1 search per 5 seconds per IP
|
||||
_MEMORY_SEARCH_RATE_LIMIT_SECONDS = 5
|
||||
_memory_search_last_request: dict[str, float] = {}
|
||||
_MAX_MEMORY_RESULTS = 5
|
||||
_MAX_MEMORY_TEXT_LENGTH = 200
|
||||
|
||||
|
||||
def _get_client_ip(request) -> str:
|
||||
"""Extract client IP from request, respecting X-Forwarded-For header."""
|
||||
# Check for forwarded IP (when behind proxy)
|
||||
forwarded = request.headers.get("X-Forwarded-For")
|
||||
if forwarded:
|
||||
# Take the first IP in the chain
|
||||
return forwarded.split(",")[0].strip()
|
||||
# Fall back to direct client IP
|
||||
if request.client:
|
||||
return request.client.host
|
||||
return "unknown"
|
||||
|
||||
|
||||
def _build_matrix_memory_response(
|
||||
memories: list,
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Build the Matrix memory search response.
|
||||
|
||||
Formats memory entries for Matrix display:
|
||||
- text: truncated to 200 characters
|
||||
- relevance: 0-1 score from relevance_score
|
||||
- created_at: ISO-8601 timestamp
|
||||
- context_type: the memory type
|
||||
|
||||
Results are capped at _MAX_MEMORY_RESULTS.
|
||||
"""
|
||||
results = []
|
||||
for mem in memories[:_MAX_MEMORY_RESULTS]:
|
||||
text = mem.content
|
||||
if len(text) > _MAX_MEMORY_TEXT_LENGTH:
|
||||
text = text[:_MAX_MEMORY_TEXT_LENGTH] + "..."
|
||||
|
||||
results.append(
|
||||
{
|
||||
"text": text,
|
||||
"relevance": round(mem.relevance_score or 0.0, 4),
|
||||
"created_at": mem.timestamp,
|
||||
"context_type": mem.context_type,
|
||||
}
|
||||
)
|
||||
return results
|
||||
|
||||
|
||||
@matrix_router.get("/memory/search")
|
||||
async def get_matrix_memory_search(request: Request, q: str | None = None) -> JSONResponse:
|
||||
"""Search Timmy's memory for relevant snippets.
|
||||
|
||||
Allows Matrix visitors to query Timmy's memory ("what do you remember
|
||||
about sovereignty?"). Results appear as floating crystal-ball text
|
||||
in the Workshop room.
|
||||
|
||||
Query params:
|
||||
- q: Search query text (required)
|
||||
|
||||
Response: JSON array of memory objects:
|
||||
- text: Memory content (truncated to 200 chars)
|
||||
- relevance: Similarity score 0-1
|
||||
- created_at: ISO-8601 timestamp
|
||||
- context_type: Memory type (conversation, fact, etc.)
|
||||
|
||||
Rate limited to 1 search per 5 seconds per IP.
|
||||
|
||||
Returns:
|
||||
- 200: JSON array of memory results (max 5)
|
||||
- 400: Missing or empty query parameter
|
||||
- 429: Rate limit exceeded
|
||||
"""
|
||||
# Validate query parameter
|
||||
query = q.strip() if q else ""
|
||||
if not query:
|
||||
return JSONResponse(
|
||||
status_code=400,
|
||||
content={"error": "Query parameter 'q' is required"},
|
||||
)
|
||||
|
||||
# Rate limiting check by IP
|
||||
client_ip = _get_client_ip(request)
|
||||
now = time.time()
|
||||
last_request = _memory_search_last_request.get(client_ip, 0)
|
||||
time_since_last = now - last_request
|
||||
|
||||
if time_since_last < _MEMORY_SEARCH_RATE_LIMIT_SECONDS:
|
||||
retry_after = _MEMORY_SEARCH_RATE_LIMIT_SECONDS - time_since_last
|
||||
return JSONResponse(
|
||||
status_code=429,
|
||||
content={"error": "Rate limit exceeded. Try again later."},
|
||||
headers={"Retry-After": str(int(retry_after) + 1)},
|
||||
)
|
||||
|
||||
# Record this request
|
||||
_memory_search_last_request[client_ip] = now
|
||||
|
||||
# Search memories
|
||||
try:
|
||||
memories = search_memories(query, limit=_MAX_MEMORY_RESULTS)
|
||||
results = _build_matrix_memory_response(memories)
|
||||
except Exception as exc:
|
||||
logger.warning("Memory search failed: %s", exc)
|
||||
results = []
|
||||
|
||||
return JSONResponse(
|
||||
content=results,
|
||||
headers={"Cache-Control": "no-cache, no-store"},
|
||||
)
|
||||
|
||||
@@ -21,6 +21,11 @@
|
||||
</div>
|
||||
{% endcall %}
|
||||
|
||||
<!-- Daily Run Metrics (HTMX polled) -->
|
||||
{% call panel("DAILY RUN", hx_get="/daily-run/panel", hx_trigger="every 60s") %}
|
||||
<div class="mc-loading-placeholder">LOADING...</div>
|
||||
{% endcall %}
|
||||
|
||||
</div>
|
||||
|
||||
<!-- Main panel — swappable via HTMX; defaults to Timmy on load -->
|
||||
|
||||
54
src/dashboard/templates/partials/daily_run_panel.html
Normal file
54
src/dashboard/templates/partials/daily_run_panel.html
Normal file
@@ -0,0 +1,54 @@
|
||||
<div class="card-header mc-panel-header">// DAILY RUN METRICS</div>
|
||||
<div class="card-body p-3">
|
||||
{% if not gitea_available %}
|
||||
<div class="mc-muted" style="font-size: 0.85rem; padding: 8px 0;">
|
||||
<span style="color: var(--amber);">⚠</span> Gitea API unavailable
|
||||
</div>
|
||||
{% else %}
|
||||
{% set m = metrics %}
|
||||
|
||||
<!-- Sessions summary -->
|
||||
<div class="dr-section" style="margin-bottom: 16px;">
|
||||
<div class="dr-row" style="display: flex; justify-content: space-between; align-items: center; margin-bottom: 8px;">
|
||||
<span class="dr-label" style="font-size: 0.85rem; color: var(--text-dim);">Sessions ({{ m.lookback_days }}d)</span>
|
||||
<a href="{{ logbook_url }}" target="_blank" class="dr-link" style="font-size: 0.75rem; color: var(--green); text-decoration: none;">
|
||||
Logbook →
|
||||
</a>
|
||||
</div>
|
||||
<div class="dr-stat" style="display: flex; align-items: baseline; gap: 8px;">
|
||||
<span class="dr-value" style="font-size: 1.5rem; font-weight: 600; color: var(--text-bright);">{{ m.sessions_completed }}</span>
|
||||
<span class="dr-trend" style="font-size: 0.9rem; color: {{ m.sessions_trend_color }};">{{ m.sessions_trend }}</span>
|
||||
<span class="dr-prev" style="font-size: 0.75rem; color: var(--text-dim);">vs {{ m.sessions_previous }} prev</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Layer breakdown -->
|
||||
<div class="dr-section">
|
||||
<div class="dr-label" style="font-size: 0.85rem; color: var(--text-dim); margin-bottom: 8px;">Issues by Layer</div>
|
||||
<div class="dr-layers" style="display: flex; flex-direction: column; gap: 6px;">
|
||||
{% for layer in m.layers %}
|
||||
<div class="dr-layer-row" style="display: flex; justify-content: space-between; align-items: center;">
|
||||
<a href="{{ layer_urls[layer.name] }}" target="_blank" class="dr-layer-name" style="font-size: 0.8rem; color: var(--text); text-decoration: none; text-transform: capitalize;">
|
||||
{{ layer.name.replace('-', ' ') }}
|
||||
</a>
|
||||
<div class="dr-layer-stat" style="display: flex; align-items: center; gap: 6px;">
|
||||
<span class="dr-layer-value" style="font-size: 0.9rem; font-weight: 500; color: var(--text-bright);">{{ layer.current_count }}</span>
|
||||
<span class="dr-layer-trend" style="font-size: 0.75rem; color: {{ layer.trend_color }}; width: 18px; text-align: center;">{{ layer.trend }}</span>
|
||||
</div>
|
||||
</div>
|
||||
{% endfor %}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Total touched -->
|
||||
<div class="dr-section" style="margin-top: 12px; padding-top: 12px; border-top: 1px solid var(--border);">
|
||||
<div class="dr-row" style="display: flex; justify-content: space-between; align-items: center;">
|
||||
<span class="dr-label" style="font-size: 0.8rem; color: var(--text-dim);">Total Issues Touched</span>
|
||||
<div class="dr-total-stat" style="display: flex; align-items: center; gap: 6px;">
|
||||
<span class="dr-total-value" style="font-size: 1rem; font-weight: 600; color: var(--text-bright);">{{ m.total_touched_current }}</span>
|
||||
<span class="dr-total-prev" style="font-size: 0.7rem; color: var(--text-dim);">/ {{ m.total_touched_previous }} prev</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
80
src/dashboard/templates/partials/quests_panel.html
Normal file
80
src/dashboard/templates/partials/quests_panel.html
Normal file
@@ -0,0 +1,80 @@
|
||||
{% from "macros.html" import panel %}
|
||||
|
||||
<div class="quests-summary mb-4">
|
||||
<div class="row">
|
||||
<div class="col-md-4">
|
||||
<div class="stat-card">
|
||||
<div class="stat-value">{{ total_tokens }}</div>
|
||||
<div class="stat-label">Tokens Earned</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-md-4">
|
||||
<div class="stat-card">
|
||||
<div class="stat-value">{{ completed_count }}</div>
|
||||
<div class="stat-label">Quests Completed</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-md-4">
|
||||
<div class="stat-card">
|
||||
<div class="stat-value">{{ quests|selectattr('enabled', 'equalto', true)|list|length }}</div>
|
||||
<div class="stat-label">Active Quests</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="quests-list">
|
||||
{% for quest in quests %}
|
||||
{% if quest.enabled %}
|
||||
<div class="quest-card quest-status-{{ quest.status }}">
|
||||
<div class="quest-header">
|
||||
<h5 class="quest-name">{{ quest.name }}</h5>
|
||||
<span class="quest-reward">+{{ quest.reward_tokens }} ⚡</span>
|
||||
</div>
|
||||
<p class="quest-description">{{ quest.description }}</p>
|
||||
|
||||
<div class="quest-progress">
|
||||
{% if quest.status == 'completed' %}
|
||||
<div class="progress">
|
||||
<div class="progress-bar bg-success" style="width: 100%"></div>
|
||||
</div>
|
||||
<span class="quest-status-badge completed">Completed</span>
|
||||
{% elif quest.status == 'claimed' %}
|
||||
<div class="progress">
|
||||
<div class="progress-bar bg-success" style="width: 100%"></div>
|
||||
</div>
|
||||
<span class="quest-status-badge claimed">Reward Claimed</span>
|
||||
{% elif quest.on_cooldown %}
|
||||
<div class="progress">
|
||||
<div class="progress-bar bg-secondary" style="width: 100%"></div>
|
||||
</div>
|
||||
<span class="quest-status-badge cooldown">
|
||||
Cooldown: {{ quest.cooldown_hours_remaining }}h remaining
|
||||
</span>
|
||||
{% else %}
|
||||
<div class="progress">
|
||||
<div class="progress-bar" style="width: {{ (quest.current_value / quest.target_value * 100)|int }}%"></div>
|
||||
</div>
|
||||
<span class="quest-progress-text">{{ quest.current_value }} / {{ quest.target_value }}</span>
|
||||
{% endif %}
|
||||
</div>
|
||||
|
||||
<div class="quest-meta">
|
||||
<span class="quest-type">{{ quest.type }}</span>
|
||||
{% if quest.repeatable %}
|
||||
<span class="quest-repeatable">↻ Repeatable</span>
|
||||
{% endif %}
|
||||
{% if quest.completion_count > 0 %}
|
||||
<span class="quest-completions">Completed {{ quest.completion_count }} time{% if quest.completion_count != 1 %}s{% endif %}</span>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</div>
|
||||
|
||||
{% if not quests|selectattr('enabled', 'equalto', true)|list|length %}
|
||||
<div class="alert alert-info">
|
||||
No active quests available. Check back later or contact an administrator.
|
||||
</div>
|
||||
{% endif %}
|
||||
50
src/dashboard/templates/quests.html
Normal file
50
src/dashboard/templates/quests.html
Normal file
@@ -0,0 +1,50 @@
|
||||
{% extends "base.html" %}
|
||||
|
||||
{% block title %}Quests — Mission Control{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<div class="container-fluid">
|
||||
<div class="row">
|
||||
<div class="col-12">
|
||||
<h1 class="mc-title">Token Quests</h1>
|
||||
<p class="mc-subtitle">Complete quests to earn bonus tokens</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row mt-4">
|
||||
<div class="col-md-8">
|
||||
<div id="quests-panel" hx-get="/quests/panel/{{ agent_id }}" hx-trigger="load, every 30s">
|
||||
<div class="mc-loading">Loading quests...</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="col-md-4">
|
||||
<div class="card mc-panel">
|
||||
<div class="card-header">
|
||||
<h5 class="mb-0">Leaderboard</h5>
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<div id="leaderboard" hx-get="/quests/api/leaderboard" hx-trigger="load, every 60s">
|
||||
<div class="mc-loading">Loading leaderboard...</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="card mc-panel mt-4">
|
||||
<div class="card-header">
|
||||
<h5 class="mb-0">About Quests</h5>
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<p class="mb-2">Quests are special objectives that reward tokens upon completion.</p>
|
||||
<ul class="mc-list mb-0">
|
||||
<li>Complete Daily Run sessions</li>
|
||||
<li>Close flaky-test issues</li>
|
||||
<li>Reduce P1 issue backlog</li>
|
||||
<li>Improve documentation</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% endblock %}
|
||||
266
src/infrastructure/matrix_config.py
Normal file
266
src/infrastructure/matrix_config.py
Normal file
@@ -0,0 +1,266 @@
|
||||
"""Matrix configuration loader utility.
|
||||
|
||||
Provides a typed dataclass for Matrix world configuration and a loader
|
||||
that fetches settings from YAML with sensible defaults.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import yaml
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PointLight:
|
||||
"""A single point light in the Matrix world."""
|
||||
|
||||
color: str = "#FFFFFF"
|
||||
intensity: float = 1.0
|
||||
position: dict[str, float] = field(default_factory=lambda: {"x": 0, "y": 0, "z": 0})
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict[str, Any]) -> "PointLight":
|
||||
"""Create a PointLight from a dictionary with defaults."""
|
||||
return cls(
|
||||
color=data.get("color", "#FFFFFF"),
|
||||
intensity=data.get("intensity", 1.0),
|
||||
position=data.get("position", {"x": 0, "y": 0, "z": 0}),
|
||||
)
|
||||
|
||||
|
||||
def _default_point_lights_factory() -> list[PointLight]:
|
||||
"""Factory function for default point lights."""
|
||||
return [
|
||||
PointLight(
|
||||
color="#FFAA55", # Warm amber (Workshop)
|
||||
intensity=1.2,
|
||||
position={"x": 0, "y": 5, "z": 0},
|
||||
),
|
||||
PointLight(
|
||||
color="#3B82F6", # Cool blue (Matrix)
|
||||
intensity=0.8,
|
||||
position={"x": -5, "y": 3, "z": -5},
|
||||
),
|
||||
PointLight(
|
||||
color="#A855F7", # Purple accent
|
||||
intensity=0.6,
|
||||
position={"x": 5, "y": 3, "z": 5},
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@dataclass
|
||||
class LightingConfig:
|
||||
"""Lighting configuration for the Matrix world."""
|
||||
|
||||
ambient_color: str = "#FFAA55" # Warm amber (Workshop warmth)
|
||||
ambient_intensity: float = 0.5
|
||||
point_lights: list[PointLight] = field(default_factory=_default_point_lights_factory)
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict[str, Any] | None) -> "LightingConfig":
|
||||
"""Create a LightingConfig from a dictionary with defaults."""
|
||||
if data is None:
|
||||
data = {}
|
||||
|
||||
point_lights_data = data.get("point_lights", [])
|
||||
point_lights = (
|
||||
[PointLight.from_dict(pl) for pl in point_lights_data]
|
||||
if point_lights_data
|
||||
else _default_point_lights_factory()
|
||||
)
|
||||
|
||||
return cls(
|
||||
ambient_color=data.get("ambient_color", "#FFAA55"),
|
||||
ambient_intensity=data.get("ambient_intensity", 0.5),
|
||||
point_lights=point_lights,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class EnvironmentConfig:
|
||||
"""Environment settings for the Matrix world."""
|
||||
|
||||
rain_enabled: bool = False
|
||||
starfield_enabled: bool = True
|
||||
fog_color: str = "#0f0f23"
|
||||
fog_density: float = 0.02
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict[str, Any] | None) -> "EnvironmentConfig":
|
||||
"""Create an EnvironmentConfig from a dictionary with defaults."""
|
||||
if data is None:
|
||||
data = {}
|
||||
return cls(
|
||||
rain_enabled=data.get("rain_enabled", False),
|
||||
starfield_enabled=data.get("starfield_enabled", True),
|
||||
fog_color=data.get("fog_color", "#0f0f23"),
|
||||
fog_density=data.get("fog_density", 0.02),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class FeaturesConfig:
|
||||
"""Feature toggles for the Matrix world."""
|
||||
|
||||
chat_enabled: bool = True
|
||||
visitor_avatars: bool = True
|
||||
pip_familiar: bool = True
|
||||
workshop_portal: bool = True
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict[str, Any] | None) -> "FeaturesConfig":
|
||||
"""Create a FeaturesConfig from a dictionary with defaults."""
|
||||
if data is None:
|
||||
data = {}
|
||||
return cls(
|
||||
chat_enabled=data.get("chat_enabled", True),
|
||||
visitor_avatars=data.get("visitor_avatars", True),
|
||||
pip_familiar=data.get("pip_familiar", True),
|
||||
workshop_portal=data.get("workshop_portal", True),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentConfig:
|
||||
"""Configuration for a single Matrix agent."""
|
||||
|
||||
name: str = ""
|
||||
role: str = ""
|
||||
enabled: bool = True
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict[str, Any]) -> "AgentConfig":
|
||||
"""Create an AgentConfig from a dictionary with defaults."""
|
||||
return cls(
|
||||
name=data.get("name", ""),
|
||||
role=data.get("role", ""),
|
||||
enabled=data.get("enabled", True),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentsConfig:
|
||||
"""Agent registry configuration."""
|
||||
|
||||
default_count: int = 5
|
||||
max_count: int = 20
|
||||
agents: list[AgentConfig] = field(default_factory=list)
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict[str, Any] | None) -> "AgentsConfig":
|
||||
"""Create an AgentsConfig from a dictionary with defaults."""
|
||||
if data is None:
|
||||
data = {}
|
||||
|
||||
agents_data = data.get("agents", [])
|
||||
agents = [AgentConfig.from_dict(a) for a in agents_data] if agents_data else []
|
||||
|
||||
return cls(
|
||||
default_count=data.get("default_count", 5),
|
||||
max_count=data.get("max_count", 20),
|
||||
agents=agents,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class MatrixConfig:
|
||||
"""Complete Matrix world configuration.
|
||||
|
||||
Combines lighting, environment, features, and agent settings
|
||||
into a single configuration object.
|
||||
"""
|
||||
|
||||
lighting: LightingConfig = field(default_factory=LightingConfig)
|
||||
environment: EnvironmentConfig = field(default_factory=EnvironmentConfig)
|
||||
features: FeaturesConfig = field(default_factory=FeaturesConfig)
|
||||
agents: AgentsConfig = field(default_factory=AgentsConfig)
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict[str, Any] | None) -> "MatrixConfig":
|
||||
"""Create a MatrixConfig from a dictionary with defaults for missing sections."""
|
||||
if data is None:
|
||||
data = {}
|
||||
return cls(
|
||||
lighting=LightingConfig.from_dict(data.get("lighting")),
|
||||
environment=EnvironmentConfig.from_dict(data.get("environment")),
|
||||
features=FeaturesConfig.from_dict(data.get("features")),
|
||||
agents=AgentsConfig.from_dict(data.get("agents")),
|
||||
)
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
"""Convert the configuration to a plain dictionary."""
|
||||
return {
|
||||
"lighting": {
|
||||
"ambient_color": self.lighting.ambient_color,
|
||||
"ambient_intensity": self.lighting.ambient_intensity,
|
||||
"point_lights": [
|
||||
{
|
||||
"color": pl.color,
|
||||
"intensity": pl.intensity,
|
||||
"position": pl.position,
|
||||
}
|
||||
for pl in self.lighting.point_lights
|
||||
],
|
||||
},
|
||||
"environment": {
|
||||
"rain_enabled": self.environment.rain_enabled,
|
||||
"starfield_enabled": self.environment.starfield_enabled,
|
||||
"fog_color": self.environment.fog_color,
|
||||
"fog_density": self.environment.fog_density,
|
||||
},
|
||||
"features": {
|
||||
"chat_enabled": self.features.chat_enabled,
|
||||
"visitor_avatars": self.features.visitor_avatars,
|
||||
"pip_familiar": self.features.pip_familiar,
|
||||
"workshop_portal": self.features.workshop_portal,
|
||||
},
|
||||
"agents": {
|
||||
"default_count": self.agents.default_count,
|
||||
"max_count": self.agents.max_count,
|
||||
"agents": [
|
||||
{"name": a.name, "role": a.role, "enabled": a.enabled}
|
||||
for a in self.agents.agents
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def load_from_yaml(path: str | Path) -> MatrixConfig:
|
||||
"""Load Matrix configuration from a YAML file.
|
||||
|
||||
Missing keys are filled with sensible defaults. If the file
|
||||
cannot be read or parsed, returns a fully default configuration.
|
||||
|
||||
Args:
|
||||
path: Path to the YAML configuration file.
|
||||
|
||||
Returns:
|
||||
A MatrixConfig instance with loaded or default values.
|
||||
"""
|
||||
path = Path(path)
|
||||
|
||||
if not path.exists():
|
||||
logger.warning("Matrix config file not found: %s, using defaults", path)
|
||||
return MatrixConfig()
|
||||
|
||||
try:
|
||||
with open(path, encoding="utf-8") as f:
|
||||
raw_data = yaml.safe_load(f)
|
||||
|
||||
if not isinstance(raw_data, dict):
|
||||
logger.warning("Matrix config invalid format, using defaults")
|
||||
return MatrixConfig()
|
||||
|
||||
return MatrixConfig.from_dict(raw_data)
|
||||
|
||||
except yaml.YAMLError as exc:
|
||||
logger.warning("Matrix config YAML parse error: %s, using defaults", exc)
|
||||
return MatrixConfig()
|
||||
except OSError as exc:
|
||||
logger.warning("Matrix config read error: %s, using defaults", exc)
|
||||
return MatrixConfig()
|
||||
333
src/infrastructure/presence.py
Normal file
333
src/infrastructure/presence.py
Normal file
@@ -0,0 +1,333 @@
|
||||
"""Presence state serializer — transforms ADR-023 presence dicts for consumers.
|
||||
|
||||
Converts the raw presence schema (version, liveness, mood, energy, etc.)
|
||||
into the camelCase world-state payload consumed by the Workshop 3D renderer
|
||||
and WebSocket gateway.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import time
|
||||
from datetime import UTC, datetime
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Default Pip familiar state (used when familiar module unavailable)
|
||||
DEFAULT_PIP_STATE = {
|
||||
"name": "Pip",
|
||||
"mood": "sleepy",
|
||||
"energy": 0.5,
|
||||
"color": "0x00b450", # emerald green
|
||||
"trail_color": "0xdaa520", # gold
|
||||
}
|
||||
|
||||
|
||||
def _get_familiar_state() -> dict:
|
||||
"""Get Pip familiar state from familiar module, with graceful fallback.
|
||||
|
||||
Returns a dict with name, mood, energy, color, and trail_color.
|
||||
Falls back to default state if familiar module unavailable or raises.
|
||||
"""
|
||||
try:
|
||||
from timmy.familiar import pip_familiar
|
||||
|
||||
snapshot = pip_familiar.snapshot()
|
||||
# Map PipSnapshot fields to the expected agent_state format
|
||||
return {
|
||||
"name": snapshot.name,
|
||||
"mood": snapshot.state,
|
||||
"energy": DEFAULT_PIP_STATE["energy"], # Pip doesn't track energy yet
|
||||
"color": DEFAULT_PIP_STATE["color"],
|
||||
"trail_color": DEFAULT_PIP_STATE["trail_color"],
|
||||
}
|
||||
except Exception as exc:
|
||||
logger.warning("Familiar state unavailable, using default: %s", exc)
|
||||
return DEFAULT_PIP_STATE.copy()
|
||||
|
||||
|
||||
# Valid bark styles for Matrix protocol
|
||||
BARK_STYLES = {"speech", "thought", "whisper", "shout"}
|
||||
|
||||
|
||||
def produce_bark(agent_id: str, text: str, reply_to: str = None, style: str = "speech") -> dict:
|
||||
"""Format a chat response as a Matrix bark message.
|
||||
|
||||
Barks appear as floating text above agents in the Matrix 3D world with
|
||||
typing animation. This function formats the text for the Matrix protocol.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
agent_id:
|
||||
Unique identifier for the agent (e.g. ``"timmy"``).
|
||||
text:
|
||||
The chat response text to display as a bark.
|
||||
reply_to:
|
||||
Optional message ID or reference this bark is replying to.
|
||||
style:
|
||||
Visual style of the bark. One of: "speech" (default), "thought",
|
||||
"whisper", "shout". Invalid styles fall back to "speech".
|
||||
|
||||
Returns
|
||||
-------
|
||||
dict
|
||||
Bark message with keys ``type``, ``agent_id``, ``data`` (containing
|
||||
``text``, ``reply_to``, ``style``), and ``ts``.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> produce_bark("timmy", "Hello world!")
|
||||
{
|
||||
"type": "bark",
|
||||
"agent_id": "timmy",
|
||||
"data": {"text": "Hello world!", "reply_to": None, "style": "speech"},
|
||||
"ts": 1742529600,
|
||||
}
|
||||
"""
|
||||
# Validate and normalize style
|
||||
if style not in BARK_STYLES:
|
||||
style = "speech"
|
||||
|
||||
# Truncate text to 280 characters (bark, not essay)
|
||||
truncated_text = text[:280] if text else ""
|
||||
|
||||
return {
|
||||
"type": "bark",
|
||||
"agent_id": agent_id,
|
||||
"data": {
|
||||
"text": truncated_text,
|
||||
"reply_to": reply_to,
|
||||
"style": style,
|
||||
},
|
||||
"ts": int(time.time()),
|
||||
}
|
||||
|
||||
|
||||
def produce_thought(
|
||||
agent_id: str, thought_text: str, thought_id: int, chain_id: str = None
|
||||
) -> dict:
|
||||
"""Format a thinking engine thought as a Matrix thought message.
|
||||
|
||||
Thoughts appear as subtle floating text in the 3D world, streaming from
|
||||
Timmy's thinking engine (/thinking/api). This function wraps thoughts in
|
||||
Matrix protocol format.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
agent_id:
|
||||
Unique identifier for the agent (e.g. ``"timmy"``).
|
||||
thought_text:
|
||||
The thought text to display. Truncated to 500 characters.
|
||||
thought_id:
|
||||
Unique identifier for this thought (sequence number).
|
||||
chain_id:
|
||||
Optional chain identifier grouping related thoughts.
|
||||
|
||||
Returns
|
||||
-------
|
||||
dict
|
||||
Thought message with keys ``type``, ``agent_id``, ``data`` (containing
|
||||
``text``, ``thought_id``, ``chain_id``), and ``ts``.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> produce_thought("timmy", "Considering the options...", 42, "chain-123")
|
||||
{
|
||||
"type": "thought",
|
||||
"agent_id": "timmy",
|
||||
"data": {"text": "Considering the options...", "thought_id": 42, "chain_id": "chain-123"},
|
||||
"ts": 1742529600,
|
||||
}
|
||||
"""
|
||||
# Truncate text to 500 characters (thoughts can be longer than barks)
|
||||
truncated_text = thought_text[:500] if thought_text else ""
|
||||
|
||||
return {
|
||||
"type": "thought",
|
||||
"agent_id": agent_id,
|
||||
"data": {
|
||||
"text": truncated_text,
|
||||
"thought_id": thought_id,
|
||||
"chain_id": chain_id,
|
||||
},
|
||||
"ts": int(time.time()),
|
||||
}
|
||||
|
||||
|
||||
def serialize_presence(presence: dict) -> dict:
|
||||
"""Transform an ADR-023 presence dict into the world-state API shape.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
presence:
|
||||
Raw presence dict as written by
|
||||
:func:`~timmy.workshop_state.get_state_dict` or read from
|
||||
``~/.timmy/presence.json``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
dict
|
||||
CamelCase world-state payload with ``timmyState``, ``familiar``,
|
||||
``activeThreads``, ``recentEvents``, ``concerns``, ``visitorPresent``,
|
||||
``updatedAt``, and ``version`` keys.
|
||||
"""
|
||||
return {
|
||||
"timmyState": {
|
||||
"mood": presence.get("mood", "calm"),
|
||||
"activity": presence.get("current_focus", "idle"),
|
||||
"energy": presence.get("energy", 0.5),
|
||||
"confidence": presence.get("confidence", 0.7),
|
||||
},
|
||||
"familiar": presence.get("familiar"),
|
||||
"activeThreads": presence.get("active_threads", []),
|
||||
"recentEvents": presence.get("recent_events", []),
|
||||
"concerns": presence.get("concerns", []),
|
||||
"visitorPresent": False,
|
||||
"updatedAt": presence.get("liveness", datetime.now(UTC).strftime("%Y-%m-%dT%H:%M:%SZ")),
|
||||
"version": presence.get("version", 1),
|
||||
}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Status mapping: ADR-023 current_focus → Matrix agent status
|
||||
# ---------------------------------------------------------------------------
|
||||
_STATUS_KEYWORDS: dict[str, str] = {
|
||||
"thinking": "thinking",
|
||||
"speaking": "speaking",
|
||||
"talking": "speaking",
|
||||
"idle": "idle",
|
||||
}
|
||||
|
||||
|
||||
def _derive_status(current_focus: str) -> str:
|
||||
"""Map a free-text current_focus value to a Matrix status enum.
|
||||
|
||||
Returns one of: online, idle, thinking, speaking.
|
||||
"""
|
||||
focus_lower = current_focus.lower()
|
||||
for keyword, status in _STATUS_KEYWORDS.items():
|
||||
if keyword in focus_lower:
|
||||
return status
|
||||
if current_focus and current_focus != "idle":
|
||||
return "online"
|
||||
return "idle"
|
||||
|
||||
|
||||
def produce_agent_state(agent_id: str, presence: dict) -> dict:
|
||||
"""Build a Matrix-compatible ``agent_state`` message from presence data.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
agent_id:
|
||||
Unique identifier for the agent (e.g. ``"timmy"``).
|
||||
presence:
|
||||
Raw ADR-023 presence dict.
|
||||
|
||||
Returns
|
||||
-------
|
||||
dict
|
||||
Message with keys ``type``, ``agent_id``, ``data``, and ``ts``.
|
||||
"""
|
||||
return {
|
||||
"type": "agent_state",
|
||||
"agent_id": agent_id,
|
||||
"data": {
|
||||
"display_name": presence.get("display_name", agent_id.title()),
|
||||
"role": presence.get("role", "assistant"),
|
||||
"status": _derive_status(presence.get("current_focus", "idle")),
|
||||
"mood": presence.get("mood", "calm"),
|
||||
"energy": presence.get("energy", 0.5),
|
||||
"bark": presence.get("bark", ""),
|
||||
"familiar": _get_familiar_state(),
|
||||
},
|
||||
"ts": int(time.time()),
|
||||
}
|
||||
|
||||
|
||||
def produce_system_status() -> dict:
|
||||
"""Generate a system_status message for the Matrix.
|
||||
|
||||
Returns a dict with system health metrics including agent count,
|
||||
visitor count, uptime, thinking engine status, and memory count.
|
||||
|
||||
Returns
|
||||
-------
|
||||
dict
|
||||
Message with keys ``type``, ``data`` (containing ``agents_online``,
|
||||
``visitors``, ``uptime_seconds``, ``thinking_active``, ``memory_count``),
|
||||
and ``ts``.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> produce_system_status()
|
||||
{
|
||||
"type": "system_status",
|
||||
"data": {
|
||||
"agents_online": 5,
|
||||
"visitors": 2,
|
||||
"uptime_seconds": 3600,
|
||||
"thinking_active": True,
|
||||
"memory_count": 150,
|
||||
},
|
||||
"ts": 1742529600,
|
||||
}
|
||||
"""
|
||||
# Count agents with status != offline
|
||||
agents_online = 0
|
||||
try:
|
||||
from timmy.agents.loader import list_agents
|
||||
|
||||
agents = list_agents()
|
||||
agents_online = sum(1 for a in agents if a.get("status", "") not in ("offline", ""))
|
||||
except Exception as exc:
|
||||
logger.debug("Failed to count agents: %s", exc)
|
||||
|
||||
# Count visitors from WebSocket clients
|
||||
visitors = 0
|
||||
try:
|
||||
from dashboard.routes.world import _ws_clients
|
||||
|
||||
visitors = len(_ws_clients)
|
||||
except Exception as exc:
|
||||
logger.debug("Failed to count visitors: %s", exc)
|
||||
|
||||
# Calculate uptime
|
||||
uptime_seconds = 0
|
||||
try:
|
||||
from datetime import UTC
|
||||
|
||||
from config import APP_START_TIME
|
||||
|
||||
uptime_seconds = int((datetime.now(UTC) - APP_START_TIME).total_seconds())
|
||||
except Exception as exc:
|
||||
logger.debug("Failed to calculate uptime: %s", exc)
|
||||
|
||||
# Check thinking engine status
|
||||
thinking_active = False
|
||||
try:
|
||||
from config import settings
|
||||
from timmy.thinking import thinking_engine
|
||||
|
||||
thinking_active = settings.thinking_enabled and thinking_engine is not None
|
||||
except Exception as exc:
|
||||
logger.debug("Failed to check thinking status: %s", exc)
|
||||
|
||||
# Count memories in vector store
|
||||
memory_count = 0
|
||||
try:
|
||||
from timmy.memory_system import get_memory_stats
|
||||
|
||||
stats = get_memory_stats()
|
||||
memory_count = stats.get("total_entries", 0)
|
||||
except Exception as exc:
|
||||
logger.debug("Failed to count memories: %s", exc)
|
||||
|
||||
return {
|
||||
"type": "system_status",
|
||||
"data": {
|
||||
"agents_online": agents_online,
|
||||
"visitors": visitors,
|
||||
"uptime_seconds": uptime_seconds,
|
||||
"thinking_active": thinking_active,
|
||||
"memory_count": memory_count,
|
||||
},
|
||||
"ts": int(time.time()),
|
||||
}
|
||||
261
src/infrastructure/protocol.py
Normal file
261
src/infrastructure/protocol.py
Normal file
@@ -0,0 +1,261 @@
|
||||
"""Shared WebSocket message protocol for the Matrix frontend.
|
||||
|
||||
Defines all WebSocket message types as an enum and typed dataclasses
|
||||
with ``to_json()`` / ``from_json()`` helpers so every producer and the
|
||||
gateway speak the same language.
|
||||
|
||||
Message wire format
|
||||
-------------------
|
||||
.. code-block:: json
|
||||
|
||||
{"type": "agent_state", "agent_id": "timmy", "data": {...}, "ts": 1234567890}
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
from dataclasses import asdict, dataclass, field
|
||||
from enum import StrEnum
|
||||
from typing import Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MessageType(StrEnum):
|
||||
"""All WebSocket message types defined by the Matrix PROTOCOL.md."""
|
||||
|
||||
AGENT_STATE = "agent_state"
|
||||
VISITOR_STATE = "visitor_state"
|
||||
BARK = "bark"
|
||||
THOUGHT = "thought"
|
||||
SYSTEM_STATUS = "system_status"
|
||||
CONNECTION_ACK = "connection_ack"
|
||||
ERROR = "error"
|
||||
TASK_UPDATE = "task_update"
|
||||
MEMORY_FLASH = "memory_flash"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Base message
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@dataclass
|
||||
class WSMessage:
|
||||
"""Base WebSocket message with common envelope fields."""
|
||||
|
||||
type: str
|
||||
ts: float = field(default_factory=time.time)
|
||||
|
||||
def to_json(self) -> str:
|
||||
"""Serialise the message to a JSON string."""
|
||||
return json.dumps(asdict(self))
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, raw: str) -> "WSMessage":
|
||||
"""Deserialise a JSON string into the correct message subclass.
|
||||
|
||||
Falls back to the base ``WSMessage`` when the ``type`` field is
|
||||
unrecognised.
|
||||
"""
|
||||
data = json.loads(raw)
|
||||
msg_type = data.get("type")
|
||||
sub = _REGISTRY.get(msg_type)
|
||||
if sub is not None:
|
||||
return sub.from_json(raw)
|
||||
return cls(**data)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Concrete message types
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentStateMessage(WSMessage):
|
||||
"""State update for a single agent."""
|
||||
|
||||
type: str = field(default=MessageType.AGENT_STATE)
|
||||
agent_id: str = ""
|
||||
data: dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, raw: str) -> "AgentStateMessage":
|
||||
payload = json.loads(raw)
|
||||
return cls(
|
||||
type=payload.get("type", MessageType.AGENT_STATE),
|
||||
ts=payload.get("ts", time.time()),
|
||||
agent_id=payload.get("agent_id", ""),
|
||||
data=payload.get("data", {}),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class VisitorStateMessage(WSMessage):
|
||||
"""State update for a visitor / user session."""
|
||||
|
||||
type: str = field(default=MessageType.VISITOR_STATE)
|
||||
visitor_id: str = ""
|
||||
data: dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, raw: str) -> "VisitorStateMessage":
|
||||
payload = json.loads(raw)
|
||||
return cls(
|
||||
type=payload.get("type", MessageType.VISITOR_STATE),
|
||||
ts=payload.get("ts", time.time()),
|
||||
visitor_id=payload.get("visitor_id", ""),
|
||||
data=payload.get("data", {}),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class BarkMessage(WSMessage):
|
||||
"""A bark (chat-like utterance) from an agent."""
|
||||
|
||||
type: str = field(default=MessageType.BARK)
|
||||
agent_id: str = ""
|
||||
content: str = ""
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, raw: str) -> "BarkMessage":
|
||||
payload = json.loads(raw)
|
||||
return cls(
|
||||
type=payload.get("type", MessageType.BARK),
|
||||
ts=payload.get("ts", time.time()),
|
||||
agent_id=payload.get("agent_id", ""),
|
||||
content=payload.get("content", ""),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ThoughtMessage(WSMessage):
|
||||
"""An inner thought from an agent."""
|
||||
|
||||
type: str = field(default=MessageType.THOUGHT)
|
||||
agent_id: str = ""
|
||||
content: str = ""
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, raw: str) -> "ThoughtMessage":
|
||||
payload = json.loads(raw)
|
||||
return cls(
|
||||
type=payload.get("type", MessageType.THOUGHT),
|
||||
ts=payload.get("ts", time.time()),
|
||||
agent_id=payload.get("agent_id", ""),
|
||||
content=payload.get("content", ""),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SystemStatusMessage(WSMessage):
|
||||
"""System-wide status broadcast."""
|
||||
|
||||
type: str = field(default=MessageType.SYSTEM_STATUS)
|
||||
status: str = ""
|
||||
data: dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, raw: str) -> "SystemStatusMessage":
|
||||
payload = json.loads(raw)
|
||||
return cls(
|
||||
type=payload.get("type", MessageType.SYSTEM_STATUS),
|
||||
ts=payload.get("ts", time.time()),
|
||||
status=payload.get("status", ""),
|
||||
data=payload.get("data", {}),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConnectionAckMessage(WSMessage):
|
||||
"""Acknowledgement sent when a client connects."""
|
||||
|
||||
type: str = field(default=MessageType.CONNECTION_ACK)
|
||||
client_id: str = ""
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, raw: str) -> "ConnectionAckMessage":
|
||||
payload = json.loads(raw)
|
||||
return cls(
|
||||
type=payload.get("type", MessageType.CONNECTION_ACK),
|
||||
ts=payload.get("ts", time.time()),
|
||||
client_id=payload.get("client_id", ""),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ErrorMessage(WSMessage):
|
||||
"""Error message sent to a client."""
|
||||
|
||||
type: str = field(default=MessageType.ERROR)
|
||||
code: str = ""
|
||||
message: str = ""
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, raw: str) -> "ErrorMessage":
|
||||
payload = json.loads(raw)
|
||||
return cls(
|
||||
type=payload.get("type", MessageType.ERROR),
|
||||
ts=payload.get("ts", time.time()),
|
||||
code=payload.get("code", ""),
|
||||
message=payload.get("message", ""),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class TaskUpdateMessage(WSMessage):
|
||||
"""Update about a task (created, assigned, completed, etc.)."""
|
||||
|
||||
type: str = field(default=MessageType.TASK_UPDATE)
|
||||
task_id: str = ""
|
||||
status: str = ""
|
||||
data: dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, raw: str) -> "TaskUpdateMessage":
|
||||
payload = json.loads(raw)
|
||||
return cls(
|
||||
type=payload.get("type", MessageType.TASK_UPDATE),
|
||||
ts=payload.get("ts", time.time()),
|
||||
task_id=payload.get("task_id", ""),
|
||||
status=payload.get("status", ""),
|
||||
data=payload.get("data", {}),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class MemoryFlashMessage(WSMessage):
|
||||
"""A flash of memory — a recalled or stored memory event."""
|
||||
|
||||
type: str = field(default=MessageType.MEMORY_FLASH)
|
||||
agent_id: str = ""
|
||||
memory_key: str = ""
|
||||
content: str = ""
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, raw: str) -> "MemoryFlashMessage":
|
||||
payload = json.loads(raw)
|
||||
return cls(
|
||||
type=payload.get("type", MessageType.MEMORY_FLASH),
|
||||
ts=payload.get("ts", time.time()),
|
||||
agent_id=payload.get("agent_id", ""),
|
||||
memory_key=payload.get("memory_key", ""),
|
||||
content=payload.get("content", ""),
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Registry for from_json dispatch
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_REGISTRY: dict[str, type[WSMessage]] = {
|
||||
MessageType.AGENT_STATE: AgentStateMessage,
|
||||
MessageType.VISITOR_STATE: VisitorStateMessage,
|
||||
MessageType.BARK: BarkMessage,
|
||||
MessageType.THOUGHT: ThoughtMessage,
|
||||
MessageType.SYSTEM_STATUS: SystemStatusMessage,
|
||||
MessageType.CONNECTION_ACK: ConnectionAckMessage,
|
||||
MessageType.ERROR: ErrorMessage,
|
||||
MessageType.TASK_UPDATE: TaskUpdateMessage,
|
||||
MessageType.MEMORY_FLASH: MemoryFlashMessage,
|
||||
}
|
||||
166
src/infrastructure/visitor.py
Normal file
166
src/infrastructure/visitor.py
Normal file
@@ -0,0 +1,166 @@
|
||||
"""Visitor state tracking for the Matrix frontend.
|
||||
|
||||
Tracks active visitors as they connect and move around the 3D world,
|
||||
and provides serialization for Matrix protocol broadcast messages.
|
||||
"""
|
||||
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import UTC, datetime
|
||||
|
||||
|
||||
@dataclass
|
||||
class VisitorState:
|
||||
"""State for a single visitor in the Matrix.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
visitor_id: Unique identifier for the visitor (client ID).
|
||||
display_name: Human-readable name shown above the visitor.
|
||||
position: 3D coordinates (x, y, z) in the world.
|
||||
rotation: Rotation angle in degrees (0-360).
|
||||
connected_at: ISO timestamp when the visitor connected.
|
||||
"""
|
||||
|
||||
visitor_id: str
|
||||
display_name: str = ""
|
||||
position: dict[str, float] = field(default_factory=lambda: {"x": 0.0, "y": 0.0, "z": 0.0})
|
||||
rotation: float = 0.0
|
||||
connected_at: str = field(
|
||||
default_factory=lambda: datetime.now(UTC).strftime("%Y-%m-%dT%H:%M:%SZ")
|
||||
)
|
||||
|
||||
def __post_init__(self):
|
||||
"""Set display_name to visitor_id if not provided; copy position dict."""
|
||||
if not self.display_name:
|
||||
self.display_name = self.visitor_id
|
||||
# Copy position to avoid shared mutable state
|
||||
self.position = dict(self.position)
|
||||
|
||||
|
||||
class VisitorRegistry:
|
||||
"""Registry of active visitors in the Matrix.
|
||||
|
||||
Thread-safe singleton pattern (Python GIL protects dict operations).
|
||||
Used by the WebSocket layer to track and broadcast visitor positions.
|
||||
"""
|
||||
|
||||
_instance: "VisitorRegistry | None" = None
|
||||
|
||||
def __new__(cls) -> "VisitorRegistry":
|
||||
"""Singleton constructor."""
|
||||
if cls._instance is None:
|
||||
cls._instance = super().__new__(cls)
|
||||
cls._instance._visitors: dict[str, VisitorState] = {}
|
||||
return cls._instance
|
||||
|
||||
def add(
|
||||
self, visitor_id: str, display_name: str = "", position: dict | None = None
|
||||
) -> VisitorState:
|
||||
"""Add a new visitor to the registry.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
visitor_id: Unique identifier for the visitor.
|
||||
display_name: Optional display name (defaults to visitor_id).
|
||||
position: Optional initial position (defaults to origin).
|
||||
|
||||
Returns
|
||||
-------
|
||||
The newly created VisitorState.
|
||||
"""
|
||||
visitor = VisitorState(
|
||||
visitor_id=visitor_id,
|
||||
display_name=display_name,
|
||||
position=position if position else {"x": 0.0, "y": 0.0, "z": 0.0},
|
||||
)
|
||||
self._visitors[visitor_id] = visitor
|
||||
return visitor
|
||||
|
||||
def remove(self, visitor_id: str) -> bool:
|
||||
"""Remove a visitor from the registry.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
visitor_id: The visitor to remove.
|
||||
|
||||
Returns
|
||||
-------
|
||||
True if the visitor was found and removed, False otherwise.
|
||||
"""
|
||||
if visitor_id in self._visitors:
|
||||
del self._visitors[visitor_id]
|
||||
return True
|
||||
return False
|
||||
|
||||
def update_position(
|
||||
self,
|
||||
visitor_id: str,
|
||||
position: dict[str, float],
|
||||
rotation: float | None = None,
|
||||
) -> bool:
|
||||
"""Update a visitor's position and rotation.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
visitor_id: The visitor to update.
|
||||
position: New 3D coordinates (x, y, z).
|
||||
rotation: Optional new rotation angle.
|
||||
|
||||
Returns
|
||||
-------
|
||||
True if the visitor was found and updated, False otherwise.
|
||||
"""
|
||||
if visitor_id not in self._visitors:
|
||||
return False
|
||||
|
||||
self._visitors[visitor_id].position = position
|
||||
if rotation is not None:
|
||||
self._visitors[visitor_id].rotation = rotation
|
||||
return True
|
||||
|
||||
def get(self, visitor_id: str) -> VisitorState | None:
|
||||
"""Get a single visitor's state.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
visitor_id: The visitor to retrieve.
|
||||
|
||||
Returns
|
||||
-------
|
||||
The VisitorState if found, None otherwise.
|
||||
"""
|
||||
return self._visitors.get(visitor_id)
|
||||
|
||||
def get_all(self) -> list[dict]:
|
||||
"""Get all active visitors as Matrix protocol message dicts.
|
||||
|
||||
Returns
|
||||
-------
|
||||
List of visitor_state dicts ready for WebSocket broadcast.
|
||||
Each dict has: type, visitor_id, data (with display_name,
|
||||
position, rotation, connected_at), and ts.
|
||||
"""
|
||||
now = int(time.time())
|
||||
return [
|
||||
{
|
||||
"type": "visitor_state",
|
||||
"visitor_id": v.visitor_id,
|
||||
"data": {
|
||||
"display_name": v.display_name,
|
||||
"position": v.position,
|
||||
"rotation": v.rotation,
|
||||
"connected_at": v.connected_at,
|
||||
},
|
||||
"ts": now,
|
||||
}
|
||||
for v in self._visitors.values()
|
||||
]
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Remove all visitors (useful for testing)."""
|
||||
self._visitors.clear()
|
||||
|
||||
def __len__(self) -> int:
|
||||
"""Return the number of active visitors."""
|
||||
return len(self._visitors)
|
||||
@@ -99,11 +99,11 @@ class GrokBackend:
|
||||
|
||||
def _get_client(self):
|
||||
"""Create OpenAI client configured for xAI endpoint."""
|
||||
from config import settings
|
||||
|
||||
import httpx
|
||||
from openai import OpenAI
|
||||
|
||||
from config import settings
|
||||
|
||||
return OpenAI(
|
||||
api_key=self._api_key,
|
||||
base_url=settings.xai_base_url,
|
||||
@@ -112,11 +112,11 @@ class GrokBackend:
|
||||
|
||||
async def _get_async_client(self):
|
||||
"""Create async OpenAI client configured for xAI endpoint."""
|
||||
from config import settings
|
||||
|
||||
import httpx
|
||||
from openai import AsyncOpenAI
|
||||
|
||||
from config import settings
|
||||
|
||||
return AsyncOpenAI(
|
||||
api_key=self._api_key,
|
||||
base_url=settings.xai_base_url,
|
||||
@@ -264,6 +264,7 @@ class GrokBackend:
|
||||
},
|
||||
}
|
||||
except Exception as exc:
|
||||
logger.exception("Grok health check failed")
|
||||
return {
|
||||
"ok": False,
|
||||
"error": str(exc),
|
||||
@@ -430,6 +431,7 @@ class ClaudeBackend:
|
||||
)
|
||||
return {"ok": True, "error": None, "backend": "claude", "model": self._model}
|
||||
except Exception as exc:
|
||||
logger.exception("Claude health check failed")
|
||||
return {"ok": False, "error": str(exc), "backend": "claude", "model": self._model}
|
||||
|
||||
# ── Private helpers ───────────────────────────────────────────────────
|
||||
|
||||
@@ -37,6 +37,39 @@ def _is_interactive() -> bool:
|
||||
return hasattr(sys.stdin, "isatty") and sys.stdin.isatty()
|
||||
|
||||
|
||||
def _read_message_input(message: list[str]) -> str:
|
||||
"""Join CLI args into a message, reading from stdin when requested.
|
||||
|
||||
Returns the final message string. Raises ``typer.Exit(1)`` when
|
||||
stdin is explicitly requested (``-``) but empty.
|
||||
"""
|
||||
message_str = " ".join(message)
|
||||
|
||||
if message_str == "-" or not _is_interactive():
|
||||
try:
|
||||
stdin_content = sys.stdin.read().strip()
|
||||
except (KeyboardInterrupt, EOFError):
|
||||
stdin_content = ""
|
||||
if stdin_content:
|
||||
message_str = stdin_content
|
||||
elif message_str == "-":
|
||||
typer.echo("No input provided via stdin.", err=True)
|
||||
raise typer.Exit(1)
|
||||
|
||||
return message_str
|
||||
|
||||
|
||||
def _resolve_session_id(session_id: str | None, new_session: bool) -> str:
|
||||
"""Return the effective session ID for a chat invocation."""
|
||||
import uuid
|
||||
|
||||
if session_id is not None:
|
||||
return session_id
|
||||
if new_session:
|
||||
return str(uuid.uuid4())
|
||||
return _CLI_SESSION_ID
|
||||
|
||||
|
||||
def _prompt_interactive(req, tool_name: str, tool_args: dict) -> None:
|
||||
"""Display tool details and prompt the human for approval."""
|
||||
description = format_action_description(tool_name, tool_args)
|
||||
@@ -143,6 +176,35 @@ def think(
|
||||
timmy.print_response(f"Think carefully about: {topic}", stream=True, session_id=_CLI_SESSION_ID)
|
||||
|
||||
|
||||
def _read_message_input(message: list[str]) -> str:
|
||||
"""Join CLI arguments and read from stdin when appropriate."""
|
||||
message_str = " ".join(message)
|
||||
|
||||
if message_str == "-" or not _is_interactive():
|
||||
try:
|
||||
stdin_content = sys.stdin.read().strip()
|
||||
except (KeyboardInterrupt, EOFError):
|
||||
stdin_content = ""
|
||||
if stdin_content:
|
||||
message_str = stdin_content
|
||||
elif message_str == "-":
|
||||
typer.echo("No input provided via stdin.", err=True)
|
||||
raise typer.Exit(1)
|
||||
|
||||
return message_str
|
||||
|
||||
|
||||
def _resolve_session_id(session_id: str | None, new_session: bool) -> str:
|
||||
"""Return the effective session ID based on CLI flags."""
|
||||
import uuid
|
||||
|
||||
if session_id is not None:
|
||||
return session_id
|
||||
if new_session:
|
||||
return str(uuid.uuid4())
|
||||
return _CLI_SESSION_ID
|
||||
|
||||
|
||||
@app.command()
|
||||
def chat(
|
||||
message: list[str] = typer.Argument(
|
||||
@@ -179,38 +241,13 @@ def chat(
|
||||
|
||||
Read from stdin by passing "-" as the message or piping input.
|
||||
"""
|
||||
import uuid
|
||||
|
||||
# Join multiple arguments into a single message string
|
||||
message_str = " ".join(message)
|
||||
|
||||
# Handle stdin input if "-" is passed or stdin is not a tty
|
||||
if message_str == "-" or not _is_interactive():
|
||||
try:
|
||||
stdin_content = sys.stdin.read().strip()
|
||||
except (KeyboardInterrupt, EOFError):
|
||||
stdin_content = ""
|
||||
if stdin_content:
|
||||
message_str = stdin_content
|
||||
elif message_str == "-":
|
||||
typer.echo("No input provided via stdin.", err=True)
|
||||
raise typer.Exit(1)
|
||||
|
||||
if session_id is not None:
|
||||
pass # use the provided value
|
||||
elif new_session:
|
||||
session_id = str(uuid.uuid4())
|
||||
else:
|
||||
session_id = _CLI_SESSION_ID
|
||||
message_str = _read_message_input(message)
|
||||
session_id = _resolve_session_id(session_id, new_session)
|
||||
timmy = create_timmy(backend=backend, session_id=session_id)
|
||||
|
||||
# Use agent.run() so we can intercept paused runs for tool confirmation.
|
||||
run_output = timmy.run(message_str, stream=False, session_id=session_id)
|
||||
|
||||
# Handle paused runs — dangerous tools need user approval
|
||||
run_output = _handle_tool_confirmation(timmy, run_output, session_id, autonomous=autonomous)
|
||||
|
||||
# Print the final response
|
||||
content = run_output.content if hasattr(run_output, "content") else str(run_output)
|
||||
if content:
|
||||
from timmy.session import _clean_response
|
||||
|
||||
@@ -97,6 +97,7 @@ async def probe_tool_use() -> dict:
|
||||
"error_type": "empty_result",
|
||||
}
|
||||
except Exception as exc:
|
||||
logger.exception("Tool use probe failed")
|
||||
return {
|
||||
"success": False,
|
||||
"capability": cap,
|
||||
@@ -129,6 +130,7 @@ async def probe_multistep_planning() -> dict:
|
||||
"error_type": "verification_failed",
|
||||
}
|
||||
except Exception as exc:
|
||||
logger.exception("Multistep planning probe failed")
|
||||
return {
|
||||
"success": False,
|
||||
"capability": cap,
|
||||
@@ -151,6 +153,7 @@ async def probe_memory_write() -> dict:
|
||||
"error_type": None,
|
||||
}
|
||||
except Exception as exc:
|
||||
logger.exception("Memory write probe failed")
|
||||
return {
|
||||
"success": False,
|
||||
"capability": cap,
|
||||
@@ -179,6 +182,7 @@ async def probe_memory_read() -> dict:
|
||||
"error_type": "empty_result",
|
||||
}
|
||||
except Exception as exc:
|
||||
logger.exception("Memory read probe failed")
|
||||
return {
|
||||
"success": False,
|
||||
"capability": cap,
|
||||
@@ -214,6 +218,7 @@ async def probe_self_coding() -> dict:
|
||||
"error_type": "verification_failed",
|
||||
}
|
||||
except Exception as exc:
|
||||
logger.exception("Self-coding probe failed")
|
||||
return {
|
||||
"success": False,
|
||||
"capability": cap,
|
||||
@@ -325,6 +330,7 @@ class LoopQAOrchestrator:
|
||||
result = await probe_fn()
|
||||
except Exception as exc:
|
||||
# Probe itself crashed — record failure and report
|
||||
logger.exception("Loop QA probe %s crashed", cap.value)
|
||||
capture_error(exc, source="loop_qa", context={"capability": cap.value})
|
||||
result = {
|
||||
"success": False,
|
||||
|
||||
632
src/timmy/quest_system.py
Normal file
632
src/timmy/quest_system.py
Normal file
@@ -0,0 +1,632 @@
|
||||
"""Token Quest System for agent rewards.
|
||||
|
||||
Provides quest definitions, progress tracking, completion detection,
|
||||
and token awards for agent accomplishments.
|
||||
|
||||
Quests are defined in config/quests.yaml and loaded at runtime.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import UTC, datetime, timedelta
|
||||
from enum import StrEnum
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import yaml
|
||||
|
||||
from config import settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Path to quest configuration
|
||||
QUEST_CONFIG_PATH = Path(settings.repo_root) / "config" / "quests.yaml"
|
||||
|
||||
|
||||
class QuestType(StrEnum):
|
||||
"""Types of quests supported by the system."""
|
||||
|
||||
ISSUE_COUNT = "issue_count"
|
||||
ISSUE_REDUCE = "issue_reduce"
|
||||
DOCS_UPDATE = "docs_update"
|
||||
TEST_IMPROVE = "test_improve"
|
||||
DAILY_RUN = "daily_run"
|
||||
CUSTOM = "custom"
|
||||
|
||||
|
||||
class QuestStatus(StrEnum):
|
||||
"""Status of a quest for an agent."""
|
||||
|
||||
NOT_STARTED = "not_started"
|
||||
IN_PROGRESS = "in_progress"
|
||||
COMPLETED = "completed"
|
||||
CLAIMED = "claimed"
|
||||
EXPIRED = "expired"
|
||||
|
||||
|
||||
@dataclass
|
||||
class QuestDefinition:
|
||||
"""Definition of a quest from configuration."""
|
||||
|
||||
id: str
|
||||
name: str
|
||||
description: str
|
||||
reward_tokens: int
|
||||
quest_type: QuestType
|
||||
enabled: bool
|
||||
repeatable: bool
|
||||
cooldown_hours: int
|
||||
criteria: dict[str, Any]
|
||||
notification_message: str
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict[str, Any]) -> QuestDefinition:
|
||||
"""Create a QuestDefinition from a dictionary."""
|
||||
return cls(
|
||||
id=data["id"],
|
||||
name=data.get("name", "Unnamed Quest"),
|
||||
description=data.get("description", ""),
|
||||
reward_tokens=data.get("reward_tokens", 0),
|
||||
quest_type=QuestType(data.get("type", "custom")),
|
||||
enabled=data.get("enabled", True),
|
||||
repeatable=data.get("repeatable", False),
|
||||
cooldown_hours=data.get("cooldown_hours", 0),
|
||||
criteria=data.get("criteria", {}),
|
||||
notification_message=data.get(
|
||||
"notification_message", "Quest Complete! You earned {tokens} tokens."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class QuestProgress:
|
||||
"""Progress of a quest for a specific agent."""
|
||||
|
||||
quest_id: str
|
||||
agent_id: str
|
||||
status: QuestStatus
|
||||
current_value: int = 0
|
||||
target_value: int = 0
|
||||
started_at: str = ""
|
||||
completed_at: str = ""
|
||||
claimed_at: str = ""
|
||||
completion_count: int = 0
|
||||
last_completed_at: str = ""
|
||||
metadata: dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
"""Convert to dictionary for serialization."""
|
||||
return {
|
||||
"quest_id": self.quest_id,
|
||||
"agent_id": self.agent_id,
|
||||
"status": self.status.value,
|
||||
"current_value": self.current_value,
|
||||
"target_value": self.target_value,
|
||||
"started_at": self.started_at,
|
||||
"completed_at": self.completed_at,
|
||||
"claimed_at": self.claimed_at,
|
||||
"completion_count": self.completion_count,
|
||||
"last_completed_at": self.last_completed_at,
|
||||
"metadata": self.metadata,
|
||||
}
|
||||
|
||||
|
||||
# In-memory storage for quest progress
|
||||
_quest_progress: dict[str, QuestProgress] = {}
|
||||
_quest_definitions: dict[str, QuestDefinition] = {}
|
||||
_quest_settings: dict[str, Any] = {}
|
||||
|
||||
|
||||
def _get_progress_key(quest_id: str, agent_id: str) -> str:
|
||||
"""Generate a unique key for quest progress."""
|
||||
return f"{agent_id}:{quest_id}"
|
||||
|
||||
|
||||
def load_quest_config() -> tuple[dict[str, QuestDefinition], dict[str, Any]]:
|
||||
"""Load quest definitions from quests.yaml.
|
||||
|
||||
Returns:
|
||||
Tuple of (quest definitions dict, settings dict)
|
||||
"""
|
||||
global _quest_definitions, _quest_settings
|
||||
|
||||
if not QUEST_CONFIG_PATH.exists():
|
||||
logger.warning("Quest config not found at %s", QUEST_CONFIG_PATH)
|
||||
return {}, {}
|
||||
|
||||
try:
|
||||
raw = QUEST_CONFIG_PATH.read_text()
|
||||
config = yaml.safe_load(raw)
|
||||
|
||||
if not isinstance(config, dict):
|
||||
logger.warning("Invalid quest config format")
|
||||
return {}, {}
|
||||
|
||||
# Load quest definitions
|
||||
quests_data = config.get("quests", {})
|
||||
definitions = {}
|
||||
for quest_id, quest_data in quests_data.items():
|
||||
quest_data["id"] = quest_id
|
||||
try:
|
||||
definition = QuestDefinition.from_dict(quest_data)
|
||||
definitions[quest_id] = definition
|
||||
except (ValueError, KeyError) as exc:
|
||||
logger.warning("Failed to load quest %s: %s", quest_id, exc)
|
||||
|
||||
# Load settings
|
||||
_quest_settings = config.get("settings", {})
|
||||
_quest_definitions = definitions
|
||||
|
||||
logger.debug("Loaded %d quest definitions", len(definitions))
|
||||
return definitions, _quest_settings
|
||||
|
||||
except (OSError, yaml.YAMLError) as exc:
|
||||
logger.warning("Failed to load quest config: %s", exc)
|
||||
return {}, {}
|
||||
|
||||
|
||||
def get_quest_definitions() -> dict[str, QuestDefinition]:
|
||||
"""Get all quest definitions, loading if necessary."""
|
||||
global _quest_definitions
|
||||
if not _quest_definitions:
|
||||
_quest_definitions, _ = load_quest_config()
|
||||
return _quest_definitions
|
||||
|
||||
|
||||
def get_quest_definition(quest_id: str) -> QuestDefinition | None:
|
||||
"""Get a specific quest definition by ID."""
|
||||
definitions = get_quest_definitions()
|
||||
return definitions.get(quest_id)
|
||||
|
||||
|
||||
def get_active_quests() -> list[QuestDefinition]:
|
||||
"""Get all enabled quest definitions."""
|
||||
definitions = get_quest_definitions()
|
||||
return [q for q in definitions.values() if q.enabled]
|
||||
|
||||
|
||||
def get_quest_progress(quest_id: str, agent_id: str) -> QuestProgress | None:
|
||||
"""Get progress for a specific quest and agent."""
|
||||
key = _get_progress_key(quest_id, agent_id)
|
||||
return _quest_progress.get(key)
|
||||
|
||||
|
||||
def get_or_create_progress(quest_id: str, agent_id: str) -> QuestProgress:
|
||||
"""Get existing progress or create new for quest/agent."""
|
||||
key = _get_progress_key(quest_id, agent_id)
|
||||
if key not in _quest_progress:
|
||||
quest = get_quest_definition(quest_id)
|
||||
if not quest:
|
||||
raise ValueError(f"Quest {quest_id} not found")
|
||||
|
||||
target = _get_target_value(quest)
|
||||
_quest_progress[key] = QuestProgress(
|
||||
quest_id=quest_id,
|
||||
agent_id=agent_id,
|
||||
status=QuestStatus.NOT_STARTED,
|
||||
current_value=0,
|
||||
target_value=target,
|
||||
started_at=datetime.now(UTC).isoformat(),
|
||||
)
|
||||
return _quest_progress[key]
|
||||
|
||||
|
||||
def _get_target_value(quest: QuestDefinition) -> int:
|
||||
"""Extract target value from quest criteria."""
|
||||
criteria = quest.criteria
|
||||
if quest.quest_type == QuestType.ISSUE_COUNT:
|
||||
return criteria.get("target_count", 1)
|
||||
elif quest.quest_type == QuestType.ISSUE_REDUCE:
|
||||
return criteria.get("target_reduction", 1)
|
||||
elif quest.quest_type == QuestType.DAILY_RUN:
|
||||
return criteria.get("min_sessions", 1)
|
||||
elif quest.quest_type == QuestType.DOCS_UPDATE:
|
||||
return criteria.get("min_files_changed", 1)
|
||||
elif quest.quest_type == QuestType.TEST_IMPROVE:
|
||||
return criteria.get("min_new_tests", 1)
|
||||
return 1
|
||||
|
||||
|
||||
def update_quest_progress(
|
||||
quest_id: str,
|
||||
agent_id: str,
|
||||
current_value: int,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> QuestProgress:
|
||||
"""Update progress for a quest."""
|
||||
progress = get_or_create_progress(quest_id, agent_id)
|
||||
progress.current_value = current_value
|
||||
|
||||
if metadata:
|
||||
progress.metadata.update(metadata)
|
||||
|
||||
# Check if quest is now complete
|
||||
if progress.current_value >= progress.target_value:
|
||||
if progress.status not in (QuestStatus.COMPLETED, QuestStatus.CLAIMED):
|
||||
progress.status = QuestStatus.COMPLETED
|
||||
progress.completed_at = datetime.now(UTC).isoformat()
|
||||
logger.info("Quest %s completed for agent %s", quest_id, agent_id)
|
||||
|
||||
return progress
|
||||
|
||||
|
||||
def _is_on_cooldown(progress: QuestProgress, quest: QuestDefinition) -> bool:
|
||||
"""Check if a repeatable quest is on cooldown."""
|
||||
if not quest.repeatable or not progress.last_completed_at:
|
||||
return False
|
||||
|
||||
if quest.cooldown_hours <= 0:
|
||||
return False
|
||||
|
||||
try:
|
||||
last_completed = datetime.fromisoformat(progress.last_completed_at)
|
||||
cooldown_end = last_completed + timedelta(hours=quest.cooldown_hours)
|
||||
return datetime.now(UTC) < cooldown_end
|
||||
except (ValueError, TypeError):
|
||||
return False
|
||||
|
||||
|
||||
def _apply_stress_multiplier(base_reward: int, quest_type: QuestType) -> tuple[int, float]:
|
||||
"""Apply stress-based multiplier to quest reward.
|
||||
|
||||
Returns:
|
||||
Tuple of (adjusted_reward, multiplier_used)
|
||||
"""
|
||||
try:
|
||||
from timmy.stress_detector import apply_multiplier
|
||||
|
||||
multiplier = apply_multiplier(base_reward, quest_type.value)
|
||||
return multiplier, multiplier / max(base_reward, 1)
|
||||
except Exception as exc:
|
||||
logger.debug("Failed to apply stress multiplier: %s", exc)
|
||||
return base_reward, 1.0
|
||||
|
||||
|
||||
def claim_quest_reward(quest_id: str, agent_id: str) -> dict[str, Any] | None:
|
||||
"""Claim the token reward for a completed quest.
|
||||
|
||||
Returns:
|
||||
Reward info dict if successful, None if not claimable
|
||||
"""
|
||||
progress = get_quest_progress(quest_id, agent_id)
|
||||
if not progress:
|
||||
return None
|
||||
|
||||
quest = get_quest_definition(quest_id)
|
||||
if not quest:
|
||||
return None
|
||||
|
||||
# Check if quest is completed but not yet claimed
|
||||
if progress.status != QuestStatus.COMPLETED:
|
||||
return None
|
||||
|
||||
# Check cooldown for repeatable quests
|
||||
if _is_on_cooldown(progress, quest):
|
||||
return None
|
||||
|
||||
try:
|
||||
# Apply stress-based multiplier
|
||||
adjusted_reward, multiplier = _apply_stress_multiplier(
|
||||
quest.reward_tokens, quest.quest_type
|
||||
)
|
||||
|
||||
# Award tokens via ledger
|
||||
from lightning.ledger import create_invoice_entry, mark_settled
|
||||
|
||||
# Create a mock invoice for the reward
|
||||
invoice_entry = create_invoice_entry(
|
||||
payment_hash=f"quest_{quest_id}_{agent_id}_{int(time.time())}",
|
||||
amount_sats=adjusted_reward,
|
||||
memo=f"Quest reward: {quest.name}",
|
||||
source="quest_reward",
|
||||
agent_id=agent_id,
|
||||
)
|
||||
|
||||
# Mark as settled immediately (quest rewards are auto-settled)
|
||||
mark_settled(invoice_entry.payment_hash, preimage=f"quest_{quest_id}")
|
||||
|
||||
# Update progress
|
||||
progress.status = QuestStatus.CLAIMED
|
||||
progress.claimed_at = datetime.now(UTC).isoformat()
|
||||
progress.completion_count += 1
|
||||
progress.last_completed_at = progress.claimed_at
|
||||
|
||||
# Reset for repeatable quests
|
||||
if quest.repeatable:
|
||||
progress.status = QuestStatus.NOT_STARTED
|
||||
progress.current_value = 0
|
||||
progress.completed_at = ""
|
||||
progress.claimed_at = ""
|
||||
|
||||
# Build notification with multiplier info
|
||||
notification = quest.notification_message.format(tokens=adjusted_reward)
|
||||
if multiplier != 1.0:
|
||||
pct = int((multiplier - 1.0) * 100)
|
||||
if pct > 0:
|
||||
notification += f" (+{pct}% stress bonus)"
|
||||
else:
|
||||
notification += f" ({pct}% stress adjustment)"
|
||||
|
||||
return {
|
||||
"quest_id": quest_id,
|
||||
"agent_id": agent_id,
|
||||
"tokens_awarded": adjusted_reward,
|
||||
"base_reward": quest.reward_tokens,
|
||||
"multiplier": round(multiplier, 2),
|
||||
"notification": notification,
|
||||
"completion_count": progress.completion_count,
|
||||
}
|
||||
|
||||
except Exception as exc:
|
||||
logger.error("Failed to award quest reward: %s", exc)
|
||||
return None
|
||||
|
||||
|
||||
def check_issue_count_quest(
|
||||
quest: QuestDefinition,
|
||||
agent_id: str,
|
||||
closed_issues: list[dict],
|
||||
) -> QuestProgress | None:
|
||||
"""Check progress for issue_count type quest."""
|
||||
criteria = quest.criteria
|
||||
target_labels = set(criteria.get("issue_labels", []))
|
||||
# target_count is available in criteria but not used directly here
|
||||
|
||||
# Count matching issues
|
||||
matching_count = 0
|
||||
for issue in closed_issues:
|
||||
issue_labels = {label.get("name", "") for label in issue.get("labels", [])}
|
||||
if target_labels.issubset(issue_labels) or (not target_labels and issue_labels):
|
||||
matching_count += 1
|
||||
|
||||
progress = update_quest_progress(
|
||||
quest.id, agent_id, matching_count, {"matching_issues": matching_count}
|
||||
)
|
||||
|
||||
return progress
|
||||
|
||||
|
||||
def check_issue_reduce_quest(
|
||||
quest: QuestDefinition,
|
||||
agent_id: str,
|
||||
previous_count: int,
|
||||
current_count: int,
|
||||
) -> QuestProgress | None:
|
||||
"""Check progress for issue_reduce type quest."""
|
||||
# target_reduction available in quest.criteria but we track actual reduction
|
||||
reduction = max(0, previous_count - current_count)
|
||||
|
||||
progress = update_quest_progress(quest.id, agent_id, reduction, {"reduction": reduction})
|
||||
|
||||
return progress
|
||||
|
||||
|
||||
def check_daily_run_quest(
|
||||
quest: QuestDefinition,
|
||||
agent_id: str,
|
||||
sessions_completed: int,
|
||||
) -> QuestProgress | None:
|
||||
"""Check progress for daily_run type quest."""
|
||||
# min_sessions available in quest.criteria but we track actual sessions
|
||||
progress = update_quest_progress(
|
||||
quest.id, agent_id, sessions_completed, {"sessions": sessions_completed}
|
||||
)
|
||||
|
||||
return progress
|
||||
|
||||
|
||||
def evaluate_quest_progress(
|
||||
quest_id: str,
|
||||
agent_id: str,
|
||||
context: dict[str, Any],
|
||||
) -> QuestProgress | None:
|
||||
"""Evaluate quest progress based on quest type and context.
|
||||
|
||||
Args:
|
||||
quest_id: The quest to evaluate
|
||||
agent_id: The agent to evaluate for
|
||||
context: Context data for evaluation (issues, metrics, etc.)
|
||||
|
||||
Returns:
|
||||
Updated QuestProgress or None if evaluation failed
|
||||
"""
|
||||
quest = get_quest_definition(quest_id)
|
||||
if not quest or not quest.enabled:
|
||||
return None
|
||||
|
||||
progress = get_quest_progress(quest_id, agent_id)
|
||||
|
||||
# Check cooldown for repeatable quests
|
||||
if progress and _is_on_cooldown(progress, quest):
|
||||
return progress
|
||||
|
||||
try:
|
||||
if quest.quest_type == QuestType.ISSUE_COUNT:
|
||||
closed_issues = context.get("closed_issues", [])
|
||||
return check_issue_count_quest(quest, agent_id, closed_issues)
|
||||
|
||||
elif quest.quest_type == QuestType.ISSUE_REDUCE:
|
||||
prev_count = context.get("previous_issue_count", 0)
|
||||
curr_count = context.get("current_issue_count", 0)
|
||||
return check_issue_reduce_quest(quest, agent_id, prev_count, curr_count)
|
||||
|
||||
elif quest.quest_type == QuestType.DAILY_RUN:
|
||||
sessions = context.get("sessions_completed", 0)
|
||||
return check_daily_run_quest(quest, agent_id, sessions)
|
||||
|
||||
elif quest.quest_type == QuestType.CUSTOM:
|
||||
# Custom quests require manual completion
|
||||
return progress
|
||||
|
||||
else:
|
||||
logger.debug("Quest type %s not yet implemented", quest.quest_type)
|
||||
return progress
|
||||
|
||||
except Exception as exc:
|
||||
logger.warning("Quest evaluation failed for %s: %s", quest_id, exc)
|
||||
return progress
|
||||
|
||||
|
||||
def auto_evaluate_all_quests(agent_id: str, context: dict[str, Any]) -> list[dict]:
|
||||
"""Evaluate all active quests for an agent and award rewards.
|
||||
|
||||
Returns:
|
||||
List of reward info for newly completed quests
|
||||
"""
|
||||
rewards = []
|
||||
active_quests = get_active_quests()
|
||||
|
||||
for quest in active_quests:
|
||||
progress = evaluate_quest_progress(quest.id, agent_id, context)
|
||||
if progress and progress.status == QuestStatus.COMPLETED:
|
||||
# Auto-claim the reward
|
||||
reward = claim_quest_reward(quest.id, agent_id)
|
||||
if reward:
|
||||
rewards.append(reward)
|
||||
|
||||
return rewards
|
||||
|
||||
|
||||
def get_agent_quests_status(agent_id: str) -> dict[str, Any]:
|
||||
"""Get complete quest status for an agent."""
|
||||
definitions = get_quest_definitions()
|
||||
quests_status = []
|
||||
total_rewards = 0
|
||||
completed_count = 0
|
||||
|
||||
# Get current stress mode for adjusted rewards display
|
||||
try:
|
||||
from timmy.stress_detector import get_current_stress_mode, get_multiplier
|
||||
|
||||
current_mode = get_current_stress_mode()
|
||||
except Exception:
|
||||
current_mode = None
|
||||
|
||||
for quest_id, quest in definitions.items():
|
||||
progress = get_quest_progress(quest_id, agent_id)
|
||||
if not progress:
|
||||
progress = get_or_create_progress(quest_id, agent_id)
|
||||
|
||||
is_on_cooldown = _is_on_cooldown(progress, quest) if quest.repeatable else False
|
||||
|
||||
# Calculate adjusted reward with stress multiplier
|
||||
adjusted_reward = quest.reward_tokens
|
||||
multiplier = 1.0
|
||||
if current_mode:
|
||||
try:
|
||||
multiplier = get_multiplier(quest.quest_type.value, current_mode)
|
||||
adjusted_reward = int(quest.reward_tokens * multiplier)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
quest_info = {
|
||||
"quest_id": quest_id,
|
||||
"name": quest.name,
|
||||
"description": quest.description,
|
||||
"reward_tokens": quest.reward_tokens,
|
||||
"adjusted_reward": adjusted_reward,
|
||||
"multiplier": round(multiplier, 2),
|
||||
"type": quest.quest_type.value,
|
||||
"enabled": quest.enabled,
|
||||
"repeatable": quest.repeatable,
|
||||
"status": progress.status.value,
|
||||
"current_value": progress.current_value,
|
||||
"target_value": progress.target_value,
|
||||
"completion_count": progress.completion_count,
|
||||
"on_cooldown": is_on_cooldown,
|
||||
"cooldown_hours_remaining": 0,
|
||||
}
|
||||
|
||||
if is_on_cooldown and progress.last_completed_at:
|
||||
try:
|
||||
last = datetime.fromisoformat(progress.last_completed_at)
|
||||
cooldown_end = last + timedelta(hours=quest.cooldown_hours)
|
||||
hours_remaining = (cooldown_end - datetime.now(UTC)).total_seconds() / 3600
|
||||
quest_info["cooldown_hours_remaining"] = round(max(0, hours_remaining), 1)
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
|
||||
quests_status.append(quest_info)
|
||||
total_rewards += progress.completion_count * quest.reward_tokens
|
||||
completed_count += progress.completion_count
|
||||
|
||||
return {
|
||||
"agent_id": agent_id,
|
||||
"quests": quests_status,
|
||||
"total_tokens_earned": total_rewards,
|
||||
"total_quests_completed": completed_count,
|
||||
"active_quests_count": len([q for q in quests_status if q["enabled"]]),
|
||||
"stress_mode": current_mode.value if current_mode else None,
|
||||
}
|
||||
|
||||
|
||||
def reset_quest_progress(quest_id: str | None = None, agent_id: str | None = None) -> int:
|
||||
"""Reset quest progress. Useful for testing.
|
||||
|
||||
Args:
|
||||
quest_id: Specific quest to reset, or None for all
|
||||
agent_id: Specific agent to reset, or None for all
|
||||
|
||||
Returns:
|
||||
Number of progress entries reset
|
||||
"""
|
||||
global _quest_progress
|
||||
count = 0
|
||||
|
||||
keys_to_reset = []
|
||||
for key, _progress in _quest_progress.items():
|
||||
key_agent, key_quest = key.split(":", 1)
|
||||
if (quest_id is None or key_quest == quest_id) and (
|
||||
agent_id is None or key_agent == agent_id
|
||||
):
|
||||
keys_to_reset.append(key)
|
||||
|
||||
for key in keys_to_reset:
|
||||
del _quest_progress[key]
|
||||
count += 1
|
||||
|
||||
return count
|
||||
|
||||
|
||||
def get_quest_leaderboard() -> list[dict[str, Any]]:
|
||||
"""Get a leaderboard of agents by quest completion."""
|
||||
agent_stats: dict[str, dict[str, Any]] = {}
|
||||
|
||||
for _key, progress in _quest_progress.items():
|
||||
agent_id = progress.agent_id
|
||||
if agent_id not in agent_stats:
|
||||
agent_stats[agent_id] = {
|
||||
"agent_id": agent_id,
|
||||
"total_completions": 0,
|
||||
"total_tokens": 0,
|
||||
"quests_completed": set(),
|
||||
}
|
||||
|
||||
quest = get_quest_definition(progress.quest_id)
|
||||
if quest:
|
||||
agent_stats[agent_id]["total_completions"] += progress.completion_count
|
||||
agent_stats[agent_id]["total_tokens"] += progress.completion_count * quest.reward_tokens
|
||||
if progress.completion_count > 0:
|
||||
agent_stats[agent_id]["quests_completed"].add(quest.id)
|
||||
|
||||
leaderboard = []
|
||||
for stats in agent_stats.values():
|
||||
leaderboard.append(
|
||||
{
|
||||
"agent_id": stats["agent_id"],
|
||||
"total_completions": stats["total_completions"],
|
||||
"total_tokens": stats["total_tokens"],
|
||||
"unique_quests_completed": len(stats["quests_completed"]),
|
||||
}
|
||||
)
|
||||
|
||||
# Sort by total tokens (descending)
|
||||
leaderboard.sort(key=lambda x: x["total_tokens"], reverse=True)
|
||||
return leaderboard
|
||||
|
||||
|
||||
# Initialize on module load
|
||||
load_quest_config()
|
||||
565
src/timmy/stress_detector.py
Normal file
565
src/timmy/stress_detector.py
Normal file
@@ -0,0 +1,565 @@
|
||||
"""System stress detection for adaptive token rewards.
|
||||
|
||||
Monitors system signals like flakiness, backlog growth, and CI failures
|
||||
to determine the current stress mode. Token rewards are then adjusted
|
||||
based on the stress mode to incentivize agents to focus on critical areas.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import UTC, datetime, timedelta
|
||||
from enum import StrEnum
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import yaml
|
||||
|
||||
from config import settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Path to stress mode configuration
|
||||
STRESS_CONFIG_PATH = Path(settings.repo_root) / "config" / "stress_modes.yaml"
|
||||
|
||||
|
||||
class StressMode(StrEnum):
|
||||
"""System stress modes.
|
||||
|
||||
- CALM: Normal operations, incentivize exploration and refactoring
|
||||
- ELEVATED: Some stress signals detected, balance incentives
|
||||
- HIGH: Critical stress, strongly incentivize bug fixes and stabilization
|
||||
"""
|
||||
|
||||
CALM = "calm"
|
||||
ELEVATED = "elevated"
|
||||
HIGH = "high"
|
||||
|
||||
|
||||
@dataclass
|
||||
class StressSignal:
|
||||
"""A single stress signal reading."""
|
||||
|
||||
name: str
|
||||
value: float
|
||||
threshold: float
|
||||
weight: float
|
||||
timestamp: str = field(default_factory=lambda: datetime.now(UTC).isoformat())
|
||||
|
||||
@property
|
||||
def is_triggered(self) -> bool:
|
||||
"""Whether this signal exceeds its threshold."""
|
||||
return self.value >= self.threshold
|
||||
|
||||
@property
|
||||
def contribution(self) -> float:
|
||||
"""Calculate this signal's contribution to stress score."""
|
||||
if not self.is_triggered:
|
||||
return 0.0
|
||||
# Contribution is weighted ratio of value to threshold
|
||||
return min(1.0, (self.value / max(self.threshold, 1.0))) * self.weight
|
||||
|
||||
|
||||
@dataclass
|
||||
class StressSnapshot:
|
||||
"""Complete stress assessment at a point in time."""
|
||||
|
||||
mode: StressMode
|
||||
score: float
|
||||
signals: list[StressSignal]
|
||||
multipliers: dict[str, float]
|
||||
timestamp: str = field(default_factory=lambda: datetime.now(UTC).isoformat())
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
"""Convert to dictionary for serialization."""
|
||||
return {
|
||||
"mode": self.mode.value,
|
||||
"score": round(self.score, 3),
|
||||
"signals": [
|
||||
{
|
||||
"name": s.name,
|
||||
"value": s.value,
|
||||
"threshold": s.threshold,
|
||||
"triggered": s.is_triggered,
|
||||
"contribution": round(s.contribution, 3),
|
||||
}
|
||||
for s in self.signals
|
||||
],
|
||||
"multipliers": self.multipliers,
|
||||
"timestamp": self.timestamp,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class StressThresholds:
|
||||
"""Thresholds for entering/exiting stress modes."""
|
||||
|
||||
elevated_min: float = 0.3
|
||||
high_min: float = 0.6
|
||||
|
||||
def get_mode_for_score(self, score: float) -> StressMode:
|
||||
"""Determine stress mode based on score."""
|
||||
if score >= self.high_min:
|
||||
return StressMode.HIGH
|
||||
elif score >= self.elevated_min:
|
||||
return StressMode.ELEVATED
|
||||
return StressMode.CALM
|
||||
|
||||
|
||||
# In-memory storage for stress state
|
||||
_current_snapshot: StressSnapshot | None = None
|
||||
_last_check_time: datetime | None = None
|
||||
_config_cache: dict[str, Any] | None = None
|
||||
_config_mtime: float = 0.0
|
||||
|
||||
|
||||
def _load_stress_config() -> dict[str, Any]:
|
||||
"""Load stress mode configuration from YAML.
|
||||
|
||||
Returns:
|
||||
Configuration dictionary with default fallbacks
|
||||
"""
|
||||
global _config_cache, _config_mtime
|
||||
|
||||
# Check if config file has been modified
|
||||
if STRESS_CONFIG_PATH.exists():
|
||||
mtime = STRESS_CONFIG_PATH.stat().st_mtime
|
||||
if mtime != _config_mtime or _config_cache is None:
|
||||
try:
|
||||
raw = STRESS_CONFIG_PATH.read_text()
|
||||
_config_cache = yaml.safe_load(raw) or {}
|
||||
_config_mtime = mtime
|
||||
logger.debug("Loaded stress config from %s", STRESS_CONFIG_PATH)
|
||||
except (OSError, yaml.YAMLError) as exc:
|
||||
logger.warning("Failed to load stress config: %s", exc)
|
||||
_config_cache = {}
|
||||
|
||||
if _config_cache is None:
|
||||
_config_cache = {}
|
||||
|
||||
return _config_cache
|
||||
|
||||
|
||||
def get_default_config() -> dict[str, Any]:
|
||||
"""Get default stress configuration."""
|
||||
return {
|
||||
"thresholds": {
|
||||
"elevated_min": 0.3,
|
||||
"high_min": 0.6,
|
||||
},
|
||||
"signals": {
|
||||
"flaky_test_rate": {
|
||||
"threshold": 0.15, # 15% flaky test rate
|
||||
"weight": 0.3,
|
||||
"description": "Percentage of tests that are flaky",
|
||||
},
|
||||
"p1_backlog_growth": {
|
||||
"threshold": 5, # 5 new P1 issues
|
||||
"weight": 0.25,
|
||||
"description": "Net growth in P1 priority issues",
|
||||
},
|
||||
"ci_failure_rate": {
|
||||
"threshold": 0.2, # 20% CI failure rate
|
||||
"weight": 0.25,
|
||||
"description": "Percentage of CI runs failing",
|
||||
},
|
||||
"open_bug_count": {
|
||||
"threshold": 20, # 20 open bugs
|
||||
"weight": 0.2,
|
||||
"description": "Total open issues labeled as bugs",
|
||||
},
|
||||
},
|
||||
"multipliers": {
|
||||
StressMode.CALM.value: {
|
||||
"test_improve": 1.0,
|
||||
"docs_update": 1.2, # Calm periods good for docs
|
||||
"issue_count": 1.0,
|
||||
"issue_reduce": 1.0,
|
||||
"daily_run": 1.0,
|
||||
"custom": 1.0,
|
||||
"exploration": 1.3, # Encourage exploration
|
||||
"refactor": 1.2, # Encourage refactoring
|
||||
},
|
||||
StressMode.ELEVATED.value: {
|
||||
"test_improve": 1.2, # Start emphasizing tests
|
||||
"docs_update": 1.0,
|
||||
"issue_count": 1.1,
|
||||
"issue_reduce": 1.1,
|
||||
"daily_run": 1.0,
|
||||
"custom": 1.0,
|
||||
"exploration": 1.0,
|
||||
"refactor": 0.9, # Discourage risky refactors
|
||||
},
|
||||
StressMode.HIGH.value: {
|
||||
"test_improve": 1.5, # Strongly incentivize testing
|
||||
"docs_update": 0.8, # Deprioritize docs
|
||||
"issue_count": 1.3, # Reward closing issues
|
||||
"issue_reduce": 1.4, # Strongly reward reducing backlog
|
||||
"daily_run": 1.1,
|
||||
"custom": 1.0,
|
||||
"exploration": 0.7, # Discourage exploration
|
||||
"refactor": 0.6, # Discourage refactors during crisis
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def _get_config_value(key_path: str, default: Any = None) -> Any:
|
||||
"""Get a value from config using dot notation path."""
|
||||
config = _load_stress_config()
|
||||
keys = key_path.split(".")
|
||||
value = config
|
||||
for key in keys:
|
||||
if isinstance(value, dict):
|
||||
value = value.get(key)
|
||||
else:
|
||||
return default
|
||||
return value if value is not None else default
|
||||
|
||||
|
||||
def _calculate_flaky_test_rate() -> float:
|
||||
"""Calculate current flaky test rate from available data."""
|
||||
try:
|
||||
# Try to load from daily run metrics or test results
|
||||
test_results_path = Path(settings.repo_root) / ".loop" / "test_results.jsonl"
|
||||
if not test_results_path.exists():
|
||||
return 0.0
|
||||
|
||||
# Count recent test runs and flaky results
|
||||
now = datetime.now(UTC)
|
||||
cutoff = now - timedelta(days=7)
|
||||
|
||||
total_runs = 0
|
||||
flaky_runs = 0
|
||||
|
||||
if test_results_path.exists():
|
||||
for line in test_results_path.read_text().strip().splitlines():
|
||||
try:
|
||||
entry = json.loads(line)
|
||||
ts_str = entry.get("timestamp", "")
|
||||
if not ts_str:
|
||||
continue
|
||||
ts = datetime.fromisoformat(ts_str.replace("Z", "+00:00"))
|
||||
if ts >= cutoff:
|
||||
total_runs += 1
|
||||
if entry.get("is_flaky", False):
|
||||
flaky_runs += 1
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
continue
|
||||
|
||||
return flaky_runs / max(total_runs, 1)
|
||||
except Exception as exc:
|
||||
logger.debug("Failed to calculate flaky test rate: %s", exc)
|
||||
return 0.0
|
||||
|
||||
|
||||
def _calculate_p1_backlog_growth() -> float:
|
||||
"""Calculate P1 issue backlog growth."""
|
||||
try:
|
||||
from dashboard.routes.daily_run import GiteaClient, _load_config
|
||||
|
||||
config = _load_config()
|
||||
token = config.get("token")
|
||||
client = GiteaClient(config, token)
|
||||
|
||||
if not client.is_available():
|
||||
return 0.0
|
||||
|
||||
# Get current P1 issues
|
||||
now = datetime.now(UTC)
|
||||
cutoff_current = now - timedelta(days=7)
|
||||
cutoff_previous = now - timedelta(days=14)
|
||||
|
||||
issues = client.get_paginated("issues", {"state": "all", "labels": "P1", "limit": 100})
|
||||
|
||||
current_count = 0
|
||||
previous_count = 0
|
||||
|
||||
for issue in issues:
|
||||
created_at = issue.get("created_at", "")
|
||||
if not created_at:
|
||||
continue
|
||||
try:
|
||||
created = datetime.fromisoformat(created_at.replace("Z", "+00:00"))
|
||||
if created >= cutoff_current:
|
||||
current_count += 1
|
||||
elif created >= cutoff_previous:
|
||||
previous_count += 1
|
||||
except (ValueError, TypeError):
|
||||
continue
|
||||
|
||||
# Return net growth (positive means growing backlog)
|
||||
return max(0, current_count - previous_count)
|
||||
except Exception as exc:
|
||||
logger.debug("Failed to calculate P1 backlog growth: %s", exc)
|
||||
return 0.0
|
||||
|
||||
|
||||
def _calculate_ci_failure_rate() -> float:
|
||||
"""Calculate CI failure rate from recent runs."""
|
||||
try:
|
||||
# Try to get CI metrics from Gitea or local files
|
||||
ci_results_path = Path(settings.repo_root) / ".loop" / "ci_results.jsonl"
|
||||
if not ci_results_path.exists():
|
||||
return 0.0
|
||||
|
||||
now = datetime.now(UTC)
|
||||
cutoff = now - timedelta(days=7)
|
||||
|
||||
total_runs = 0
|
||||
failed_runs = 0
|
||||
|
||||
for line in ci_results_path.read_text().strip().splitlines():
|
||||
try:
|
||||
entry = json.loads(line)
|
||||
ts_str = entry.get("timestamp", "")
|
||||
if not ts_str:
|
||||
continue
|
||||
ts = datetime.fromisoformat(ts_str.replace("Z", "+00:00"))
|
||||
if ts >= cutoff:
|
||||
total_runs += 1
|
||||
if entry.get("status") != "success":
|
||||
failed_runs += 1
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
continue
|
||||
|
||||
return failed_runs / max(total_runs, 1)
|
||||
except Exception as exc:
|
||||
logger.debug("Failed to calculate CI failure rate: %s", exc)
|
||||
return 0.0
|
||||
|
||||
|
||||
def _calculate_open_bug_count() -> float:
|
||||
"""Calculate current open bug count."""
|
||||
try:
|
||||
from dashboard.routes.daily_run import GiteaClient, _load_config
|
||||
|
||||
config = _load_config()
|
||||
token = config.get("token")
|
||||
client = GiteaClient(config, token)
|
||||
|
||||
if not client.is_available():
|
||||
return 0.0
|
||||
|
||||
issues = client.get_paginated("issues", {"state": "open", "labels": "bug", "limit": 100})
|
||||
|
||||
return float(len(issues))
|
||||
except Exception as exc:
|
||||
logger.debug("Failed to calculate open bug count: %s", exc)
|
||||
return 0.0
|
||||
|
||||
|
||||
def _collect_stress_signals() -> list[StressSignal]:
|
||||
"""Collect all stress signals from the system."""
|
||||
config = _load_stress_config()
|
||||
default_config = get_default_config()
|
||||
signals_config = config.get("signals", default_config["signals"])
|
||||
|
||||
signals = []
|
||||
|
||||
# Define signal collectors
|
||||
collectors = {
|
||||
"flaky_test_rate": _calculate_flaky_test_rate,
|
||||
"p1_backlog_growth": _calculate_p1_backlog_growth,
|
||||
"ci_failure_rate": _calculate_ci_failure_rate,
|
||||
"open_bug_count": _calculate_open_bug_count,
|
||||
}
|
||||
|
||||
for signal_name, collector in collectors.items():
|
||||
signal_cfg = signals_config.get(signal_name, {})
|
||||
default_cfg = default_config["signals"].get(signal_name, {})
|
||||
|
||||
try:
|
||||
value = collector()
|
||||
threshold = signal_cfg.get("threshold", default_cfg.get("threshold", 1.0))
|
||||
weight = signal_cfg.get("weight", default_cfg.get("weight", 0.25))
|
||||
|
||||
signals.append(
|
||||
StressSignal(
|
||||
name=signal_name,
|
||||
value=value,
|
||||
threshold=threshold,
|
||||
weight=weight,
|
||||
)
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.debug("Failed to collect signal %s: %s", signal_name, exc)
|
||||
|
||||
return signals
|
||||
|
||||
|
||||
def _calculate_stress_score(signals: list[StressSignal]) -> float:
|
||||
"""Calculate overall stress score from signals.
|
||||
|
||||
Score is weighted sum of triggered signal contributions,
|
||||
normalized to 0-1 range.
|
||||
"""
|
||||
if not signals:
|
||||
return 0.0
|
||||
|
||||
total_weight = sum(s.weight for s in signals)
|
||||
if total_weight == 0:
|
||||
return 0.0
|
||||
|
||||
triggered_contribution = sum(s.contribution for s in signals)
|
||||
return min(1.0, triggered_contribution / total_weight)
|
||||
|
||||
|
||||
def _get_multipliers_for_mode(mode: StressMode) -> dict[str, float]:
|
||||
"""Get token multipliers for a specific stress mode."""
|
||||
config = _load_stress_config()
|
||||
default_config = get_default_config()
|
||||
|
||||
multipliers = config.get("multipliers", default_config["multipliers"])
|
||||
mode_multipliers = multipliers.get(mode.value, {})
|
||||
default_mode_multipliers = default_config["multipliers"].get(mode.value, {})
|
||||
|
||||
# Merge with defaults
|
||||
result = default_mode_multipliers.copy()
|
||||
result.update(mode_multipliers)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def detect_stress_mode(
|
||||
force_refresh: bool = False,
|
||||
min_check_interval_seconds: int = 60,
|
||||
) -> StressSnapshot:
|
||||
"""Detect current system stress mode.
|
||||
|
||||
Args:
|
||||
force_refresh: Force a new check even if recently checked
|
||||
min_check_interval_seconds: Minimum seconds between checks
|
||||
|
||||
Returns:
|
||||
StressSnapshot with mode, score, signals, and multipliers
|
||||
"""
|
||||
global _current_snapshot, _last_check_time
|
||||
|
||||
now = datetime.now(UTC)
|
||||
|
||||
# Return cached snapshot if recent and not forced
|
||||
if not force_refresh and _current_snapshot is not None and _last_check_time is not None:
|
||||
elapsed = (now - _last_check_time).total_seconds()
|
||||
if elapsed < min_check_interval_seconds:
|
||||
return _current_snapshot
|
||||
|
||||
# Collect signals and calculate stress
|
||||
signals = _collect_stress_signals()
|
||||
score = _calculate_stress_score(signals)
|
||||
|
||||
# Determine mode from score
|
||||
config = _load_stress_config()
|
||||
default_config = get_default_config()
|
||||
thresholds_cfg = config.get("thresholds", default_config["thresholds"])
|
||||
thresholds = StressThresholds(
|
||||
elevated_min=thresholds_cfg.get("elevated_min", 0.3),
|
||||
high_min=thresholds_cfg.get("high_min", 0.6),
|
||||
)
|
||||
mode = thresholds.get_mode_for_score(score)
|
||||
|
||||
# Get multipliers for this mode
|
||||
multipliers = _get_multipliers_for_mode(mode)
|
||||
|
||||
# Create snapshot
|
||||
snapshot = StressSnapshot(
|
||||
mode=mode,
|
||||
score=score,
|
||||
signals=signals,
|
||||
multipliers=multipliers,
|
||||
timestamp=now.isoformat(),
|
||||
)
|
||||
|
||||
# Cache result
|
||||
_current_snapshot = snapshot
|
||||
_last_check_time = now
|
||||
|
||||
# Log mode changes
|
||||
if _current_snapshot is not None and _current_snapshot.mode != mode:
|
||||
logger.info(
|
||||
"Stress mode changed: %s -> %s (score: %.2f)",
|
||||
_current_snapshot.mode.value if _current_snapshot else "none",
|
||||
mode.value,
|
||||
score,
|
||||
)
|
||||
|
||||
return snapshot
|
||||
|
||||
|
||||
def get_current_stress_mode() -> StressMode:
|
||||
"""Get current stress mode (uses cached or fresh detection)."""
|
||||
snapshot = detect_stress_mode()
|
||||
return snapshot.mode
|
||||
|
||||
|
||||
def get_multiplier(quest_type: str, mode: StressMode | None = None) -> float:
|
||||
"""Get token multiplier for a quest type.
|
||||
|
||||
Args:
|
||||
quest_type: Type of quest (test_improve, issue_count, etc.)
|
||||
mode: Specific mode to get multiplier for, or None for current
|
||||
|
||||
Returns:
|
||||
Multiplier value (1.0 = normal, 1.5 = 50% bonus, etc.)
|
||||
"""
|
||||
if mode is None:
|
||||
mode = get_current_stress_mode()
|
||||
|
||||
multipliers = _get_multipliers_for_mode(mode)
|
||||
return multipliers.get(quest_type, 1.0)
|
||||
|
||||
|
||||
def apply_multiplier(base_reward: int, quest_type: str) -> int:
|
||||
"""Apply stress-based multiplier to a base reward.
|
||||
|
||||
Args:
|
||||
base_reward: Base token reward amount
|
||||
quest_type: Type of quest for multiplier lookup
|
||||
|
||||
Returns:
|
||||
Adjusted reward amount (always >= 1)
|
||||
"""
|
||||
multiplier = get_multiplier(quest_type)
|
||||
adjusted = int(base_reward * multiplier)
|
||||
return max(1, adjusted)
|
||||
|
||||
|
||||
def get_stress_summary() -> dict[str, Any]:
|
||||
"""Get a human-readable summary of current stress state."""
|
||||
snapshot = detect_stress_mode()
|
||||
|
||||
# Generate explanation
|
||||
explanations = {
|
||||
StressMode.CALM: "System is calm. Good time for exploration and refactoring.",
|
||||
StressMode.ELEVATED: "Elevated stress detected. Focus on stability and tests.",
|
||||
StressMode.HIGH: "HIGH STRESS MODE. Prioritize bug fixes and test hardening.",
|
||||
}
|
||||
|
||||
triggered_signals = [s for s in snapshot.signals if s.is_triggered]
|
||||
|
||||
return {
|
||||
"mode": snapshot.mode.value,
|
||||
"score": round(snapshot.score, 3),
|
||||
"explanation": explanations.get(snapshot.mode, "Unknown mode"),
|
||||
"active_signals": [
|
||||
{
|
||||
"name": s.name,
|
||||
"value": round(s.value, 3),
|
||||
"threshold": s.threshold,
|
||||
}
|
||||
for s in triggered_signals
|
||||
],
|
||||
"current_multipliers": snapshot.multipliers,
|
||||
"last_updated": snapshot.timestamp,
|
||||
}
|
||||
|
||||
|
||||
def reset_stress_state() -> None:
|
||||
"""Reset stress state cache (useful for testing)."""
|
||||
global _current_snapshot, _last_check_time, _config_cache, _config_mtime
|
||||
_current_snapshot = None
|
||||
_last_check_time = None
|
||||
_config_cache = None
|
||||
_config_mtime = 0.0
|
||||
@@ -139,6 +139,7 @@ def _run_kimi(cmd: list[str], workdir: str) -> dict[str, Any]:
|
||||
"error": "Kimi timed out after 300s. Task may be too broad — try breaking it into smaller pieces.",
|
||||
}
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to run Kimi subprocess")
|
||||
return {
|
||||
"success": False,
|
||||
"error": f"Failed to run Kimi: {exc}",
|
||||
|
||||
@@ -122,6 +122,7 @@ def check_ollama_health() -> dict[str, Any]:
|
||||
models = response.json().get("models", [])
|
||||
result["available_models"] = [m.get("name", "") for m in models]
|
||||
except Exception as e:
|
||||
logger.exception("Ollama health check failed")
|
||||
result["error"] = str(e)
|
||||
|
||||
return result
|
||||
@@ -289,6 +290,7 @@ def get_live_system_status() -> dict[str, Any]:
|
||||
try:
|
||||
result["system"] = get_system_info()
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to get system info")
|
||||
result["system"] = {"error": str(exc)}
|
||||
|
||||
# Task queue
|
||||
@@ -301,6 +303,7 @@ def get_live_system_status() -> dict[str, Any]:
|
||||
try:
|
||||
result["memory"] = get_memory_status()
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to get memory status")
|
||||
result["memory"] = {"error": str(exc)}
|
||||
|
||||
# Uptime
|
||||
@@ -406,4 +409,5 @@ def run_self_tests(scope: str = "fast", _repo_root: str | None = None) -> dict[s
|
||||
except subprocess.TimeoutExpired:
|
||||
return {"success": False, "error": "Test run timed out (120s limit)"}
|
||||
except Exception as exc:
|
||||
logger.exception("Self-test run failed")
|
||||
return {"success": False, "error": str(exc)}
|
||||
|
||||
7
src/timmyctl/__init__.py
Normal file
7
src/timmyctl/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""Timmy Control Panel — CLI entry point for automations.
|
||||
|
||||
This package provides the `timmyctl` command-line interface for managing
|
||||
Timmy automations, configuration, and daily operations.
|
||||
"""
|
||||
|
||||
__version__ = "1.0.0"
|
||||
316
src/timmyctl/cli.py
Normal file
316
src/timmyctl/cli.py
Normal file
@@ -0,0 +1,316 @@
|
||||
"""Timmy Control Panel CLI — primary control surface for automations.
|
||||
|
||||
Usage:
|
||||
timmyctl daily-run # Run the Daily Run orchestration
|
||||
timmyctl log-run # Capture a Daily Run logbook entry
|
||||
timmyctl inbox # Show what's "calling Timmy"
|
||||
timmyctl config # Display key configuration
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import typer
|
||||
import yaml
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
|
||||
# Initialize Rich console for nice output
|
||||
console = Console()
|
||||
|
||||
app = typer.Typer(
|
||||
help="Timmy Control Panel — primary control surface for automations",
|
||||
rich_markup_mode="rich",
|
||||
)
|
||||
|
||||
# Default config paths
|
||||
DEFAULT_CONFIG_DIR = Path("timmy_automations/config")
|
||||
AUTOMATIONS_CONFIG = DEFAULT_CONFIG_DIR / "automations.json"
|
||||
DAILY_RUN_CONFIG = DEFAULT_CONFIG_DIR / "daily_run.json"
|
||||
TRIAGE_RULES_CONFIG = DEFAULT_CONFIG_DIR / "triage_rules.yaml"
|
||||
|
||||
|
||||
def _load_json_config(path: Path) -> dict[str, Any]:
|
||||
"""Load a JSON config file, returning empty dict on error."""
|
||||
try:
|
||||
with open(path, encoding="utf-8") as f:
|
||||
return json.load(f)
|
||||
except (FileNotFoundError, json.JSONDecodeError) as e:
|
||||
console.print(f"[red]Error loading {path}: {e}[/red]")
|
||||
return {}
|
||||
|
||||
|
||||
def _load_yaml_config(path: Path) -> dict[str, Any]:
|
||||
"""Load a YAML config file, returning empty dict on error."""
|
||||
try:
|
||||
with open(path, encoding="utf-8") as f:
|
||||
return yaml.safe_load(f) or {}
|
||||
except (FileNotFoundError, yaml.YAMLError) as e:
|
||||
console.print(f"[red]Error loading {path}: {e}[/red]")
|
||||
return {}
|
||||
|
||||
|
||||
def _get_config_dir() -> Path:
|
||||
"""Return the config directory path."""
|
||||
# Allow override via environment variable
|
||||
env_dir = os.environ.get("TIMMY_CONFIG_DIR")
|
||||
if env_dir:
|
||||
return Path(env_dir)
|
||||
return DEFAULT_CONFIG_DIR
|
||||
|
||||
|
||||
@app.command()
|
||||
def daily_run(
|
||||
dry_run: bool = typer.Option(
|
||||
False, "--dry-run", "-n", help="Show what would run without executing"
|
||||
),
|
||||
verbose: bool = typer.Option(False, "--verbose", "-v", help="Show detailed output"),
|
||||
):
|
||||
"""Run the Daily Run orchestration (agenda + summary).
|
||||
|
||||
Executes the daily run workflow including:
|
||||
- Loop Guard checks
|
||||
- Cycle Retrospective
|
||||
- Triage scoring (if scheduled)
|
||||
- Loop introspection (if scheduled)
|
||||
"""
|
||||
console.print("[bold green]Timmy Daily Run[/bold green]")
|
||||
console.print()
|
||||
|
||||
config_path = _get_config_dir() / "daily_run.json"
|
||||
config = _load_json_config(config_path)
|
||||
|
||||
if not config:
|
||||
console.print("[yellow]No daily run configuration found.[/yellow]")
|
||||
raise typer.Exit(1)
|
||||
|
||||
schedules = config.get("schedules", {})
|
||||
triggers = config.get("triggers", {})
|
||||
|
||||
if verbose:
|
||||
console.print(f"[dim]Config loaded from: {config_path}[/dim]")
|
||||
console.print()
|
||||
|
||||
# Show the daily run schedule
|
||||
table = Table(title="Daily Run Schedules")
|
||||
table.add_column("Schedule", style="cyan")
|
||||
table.add_column("Description", style="green")
|
||||
table.add_column("Automations", style="yellow")
|
||||
|
||||
for schedule_name, schedule_data in schedules.items():
|
||||
automations = schedule_data.get("automations", [])
|
||||
table.add_row(
|
||||
schedule_name,
|
||||
schedule_data.get("description", ""),
|
||||
", ".join(automations) if automations else "—",
|
||||
)
|
||||
|
||||
console.print(table)
|
||||
console.print()
|
||||
|
||||
# Show triggers
|
||||
trigger_table = Table(title="Triggers")
|
||||
trigger_table.add_column("Trigger", style="cyan")
|
||||
trigger_table.add_column("Description", style="green")
|
||||
trigger_table.add_column("Automations", style="yellow")
|
||||
|
||||
for trigger_name, trigger_data in triggers.items():
|
||||
automations = trigger_data.get("automations", [])
|
||||
trigger_table.add_row(
|
||||
trigger_name,
|
||||
trigger_data.get("description", ""),
|
||||
", ".join(automations) if automations else "—",
|
||||
)
|
||||
|
||||
console.print(trigger_table)
|
||||
console.print()
|
||||
|
||||
if dry_run:
|
||||
console.print("[yellow]Dry run mode — no actions executed.[/yellow]")
|
||||
else:
|
||||
console.print("[green]Executing daily run automations...[/green]")
|
||||
# TODO: Implement actual automation execution
|
||||
# This would call the appropriate scripts from the automations config
|
||||
console.print("[dim]Automation execution not yet implemented.[/dim]")
|
||||
|
||||
|
||||
@app.command()
|
||||
def log_run(
|
||||
message: str = typer.Argument(..., help="Logbook entry message"),
|
||||
category: str = typer.Option(
|
||||
"general", "--category", "-c", help="Entry category (e.g., retro, todo, note)"
|
||||
),
|
||||
):
|
||||
"""Capture a quick Daily Run logbook entry.
|
||||
|
||||
Logs a structured entry to the daily run logbook for later review.
|
||||
Entries are timestamped and categorized automatically.
|
||||
"""
|
||||
from datetime import datetime
|
||||
|
||||
timestamp = datetime.now().isoformat()
|
||||
|
||||
console.print("[bold green]Daily Run Log Entry[/bold green]")
|
||||
console.print()
|
||||
console.print(f"[dim]Timestamp:[/dim] {timestamp}")
|
||||
console.print(f"[dim]Category:[/dim] {category}")
|
||||
console.print(f"[dim]Message:[/dim] {message}")
|
||||
console.print()
|
||||
|
||||
# TODO: Persist to actual logbook file
|
||||
# This would append to a logbook file (e.g., .loop/logbook.jsonl)
|
||||
console.print("[green]✓[/green] Entry logged (simulated)")
|
||||
|
||||
|
||||
@app.command()
|
||||
def inbox(
|
||||
limit: int = typer.Option(10, "--limit", "-l", help="Maximum items to show"),
|
||||
include_prs: bool = typer.Option(True, "--prs/--no-prs", help="Show open PRs"),
|
||||
include_issues: bool = typer.Option(True, "--issues/--no-issues", help="Show relevant issues"),
|
||||
):
|
||||
"""Show what's "calling Timmy" — PRs, Daily Run items, alerts.
|
||||
|
||||
Displays a unified inbox of items requiring attention:
|
||||
- Open pull requests awaiting review
|
||||
- Daily run queue items
|
||||
- Alerts and notifications
|
||||
"""
|
||||
console.print("[bold green]Timmy Inbox[/bold green]")
|
||||
console.print()
|
||||
|
||||
# Load automations to show what's enabled
|
||||
config_path = _get_config_dir() / "automations.json"
|
||||
config = _load_json_config(config_path)
|
||||
|
||||
automations = config.get("automations", [])
|
||||
enabled_automations = [a for a in automations if a.get("enabled", False)]
|
||||
|
||||
# Show automation status
|
||||
auto_table = Table(title="Active Automations")
|
||||
auto_table.add_column("ID", style="cyan")
|
||||
auto_table.add_column("Name", style="green")
|
||||
auto_table.add_column("Category", style="yellow")
|
||||
auto_table.add_column("Trigger", style="magenta")
|
||||
|
||||
for auto in enabled_automations[:limit]:
|
||||
auto_table.add_row(
|
||||
auto.get("id", ""),
|
||||
auto.get("name", ""),
|
||||
"✓" if auto.get("enabled", False) else "✗",
|
||||
auto.get("category", ""),
|
||||
)
|
||||
|
||||
console.print(auto_table)
|
||||
console.print()
|
||||
|
||||
# TODO: Fetch actual PRs from Gitea API
|
||||
if include_prs:
|
||||
pr_table = Table(title="Open Pull Requests (placeholder)")
|
||||
pr_table.add_column("#", style="cyan")
|
||||
pr_table.add_column("Title", style="green")
|
||||
pr_table.add_column("Author", style="yellow")
|
||||
pr_table.add_column("Status", style="magenta")
|
||||
pr_table.add_row("—", "[dim]No PRs fetched (Gitea API not configured)[/dim]", "—", "—")
|
||||
console.print(pr_table)
|
||||
console.print()
|
||||
|
||||
# TODO: Fetch relevant issues from Gitea API
|
||||
if include_issues:
|
||||
issue_table = Table(title="Issues Calling for Attention (placeholder)")
|
||||
issue_table.add_column("#", style="cyan")
|
||||
issue_table.add_column("Title", style="green")
|
||||
issue_table.add_column("Type", style="yellow")
|
||||
issue_table.add_column("Priority", style="magenta")
|
||||
issue_table.add_row(
|
||||
"—", "[dim]No issues fetched (Gitea API not configured)[/dim]", "—", "—"
|
||||
)
|
||||
console.print(issue_table)
|
||||
console.print()
|
||||
|
||||
|
||||
@app.command()
|
||||
def config(
|
||||
key: str | None = typer.Argument(None, help="Show specific config key (e.g., 'automations')"),
|
||||
show_rules: bool = typer.Option(False, "--rules", "-r", help="Show triage rules overview"),
|
||||
):
|
||||
"""Display key configuration — labels, logbook issue ID, token rules overview.
|
||||
|
||||
Shows the current Timmy automation configuration including:
|
||||
- Automation manifest
|
||||
- Daily run schedules
|
||||
- Triage scoring rules
|
||||
"""
|
||||
console.print("[bold green]Timmy Configuration[/bold green]")
|
||||
console.print()
|
||||
|
||||
config_dir = _get_config_dir()
|
||||
|
||||
if key == "automations" or key is None:
|
||||
auto_config = _load_json_config(config_dir / "automations.json")
|
||||
automations = auto_config.get("automations", [])
|
||||
|
||||
table = Table(title="Automations Manifest")
|
||||
table.add_column("ID", style="cyan")
|
||||
table.add_column("Name", style="green")
|
||||
table.add_column("Enabled", style="yellow")
|
||||
table.add_column("Category", style="magenta")
|
||||
|
||||
for auto in automations:
|
||||
enabled = "✓" if auto.get("enabled", False) else "✗"
|
||||
table.add_row(
|
||||
auto.get("id", ""),
|
||||
auto.get("name", ""),
|
||||
enabled,
|
||||
auto.get("category", ""),
|
||||
)
|
||||
|
||||
console.print(table)
|
||||
console.print()
|
||||
|
||||
if key == "daily_run" or (key is None and not show_rules):
|
||||
daily_config = _load_json_config(config_dir / "daily_run.json")
|
||||
|
||||
if daily_config:
|
||||
console.print("[bold]Daily Run Configuration:[/bold]")
|
||||
console.print(f"[dim]Version:[/dim] {daily_config.get('version', 'unknown')}")
|
||||
console.print(f"[dim]Description:[/dim] {daily_config.get('description', '')}")
|
||||
console.print()
|
||||
|
||||
if show_rules or key == "triage_rules":
|
||||
rules_config = _load_yaml_config(config_dir / "triage_rules.yaml")
|
||||
|
||||
if rules_config:
|
||||
thresholds = rules_config.get("thresholds", {})
|
||||
console.print("[bold]Triage Scoring Rules:[/bold]")
|
||||
console.print(f" Ready threshold: {thresholds.get('ready', 'N/A')}")
|
||||
console.print(f" Excellent threshold: {thresholds.get('excellent', 'N/A')}")
|
||||
console.print()
|
||||
|
||||
scope = rules_config.get("scope", {})
|
||||
console.print("[bold]Scope Scoring:[/bold]")
|
||||
console.print(f" Meta penalty: {scope.get('meta_penalty', 'N/A')}")
|
||||
console.print()
|
||||
|
||||
alignment = rules_config.get("alignment", {})
|
||||
console.print("[bold]Alignment Scoring:[/bold]")
|
||||
console.print(f" Bug score: {alignment.get('bug_score', 'N/A')}")
|
||||
console.print(f" Refactor score: {alignment.get('refactor_score', 'N/A')}")
|
||||
console.print(f" Feature score: {alignment.get('feature_score', 'N/A')}")
|
||||
console.print()
|
||||
|
||||
quarantine = rules_config.get("quarantine", {})
|
||||
console.print("[bold]Quarantine Rules:[/bold]")
|
||||
console.print(f" Failure threshold: {quarantine.get('failure_threshold', 'N/A')}")
|
||||
console.print(f" Lookback cycles: {quarantine.get('lookback_cycles', 'N/A')}")
|
||||
console.print()
|
||||
|
||||
|
||||
def main():
|
||||
"""Entry point for the timmyctl CLI."""
|
||||
app()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -31,6 +31,8 @@ for _mod in [
|
||||
"pyzbar.pyzbar",
|
||||
"pyttsx3",
|
||||
"sentence_transformers",
|
||||
"swarm",
|
||||
"swarm.event_log",
|
||||
]:
|
||||
sys.modules.setdefault(_mod, MagicMock())
|
||||
|
||||
|
||||
@@ -120,3 +120,50 @@ class TestCSRFDecoratorSupport:
|
||||
# Protected endpoint should be 403
|
||||
response2 = client.post("/protected")
|
||||
assert response2.status_code == 403
|
||||
|
||||
def test_csrf_exempt_endpoint_not_executed_before_check(self):
|
||||
"""Regression test for #626: endpoint must NOT execute before CSRF check.
|
||||
|
||||
Previously the middleware called call_next() first, executing the endpoint
|
||||
and its side effects, then checked @csrf_exempt afterward. This meant
|
||||
non-exempt endpoints would execute even when CSRF validation failed.
|
||||
"""
|
||||
app = FastAPI()
|
||||
app.add_middleware(CSRFMiddleware)
|
||||
|
||||
side_effect_log: list[str] = []
|
||||
|
||||
@app.post("/protected-with-side-effects")
|
||||
def protected_with_side_effects():
|
||||
side_effect_log.append("executed")
|
||||
return {"message": "should not run"}
|
||||
|
||||
client = TestClient(app)
|
||||
|
||||
# POST without CSRF token — should be blocked with 403
|
||||
response = client.post("/protected-with-side-effects")
|
||||
assert response.status_code == 403
|
||||
# The critical assertion: the endpoint must NOT have executed
|
||||
assert side_effect_log == [], (
|
||||
"Endpoint executed before CSRF validation! Side effects occurred "
|
||||
"despite CSRF failure (see issue #626)."
|
||||
)
|
||||
|
||||
def test_csrf_exempt_endpoint_does_execute(self):
|
||||
"""Ensure @csrf_exempt endpoints still execute normally."""
|
||||
app = FastAPI()
|
||||
app.add_middleware(CSRFMiddleware)
|
||||
|
||||
side_effect_log: list[str] = []
|
||||
|
||||
@app.post("/exempt-webhook")
|
||||
@csrf_exempt
|
||||
def exempt_webhook():
|
||||
side_effect_log.append("executed")
|
||||
return {"message": "webhook ok"}
|
||||
|
||||
client = TestClient(app)
|
||||
|
||||
response = client.post("/exempt-webhook")
|
||||
assert response.status_code == 200
|
||||
assert side_effect_log == ["executed"]
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
509
tests/infrastructure/test_multimodal.py
Normal file
509
tests/infrastructure/test_multimodal.py
Normal file
@@ -0,0 +1,509 @@
|
||||
"""Tests for infrastructure.models.multimodal — multi-modal model management."""
|
||||
|
||||
import json
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from infrastructure.models.multimodal import (
|
||||
DEFAULT_FALLBACK_CHAINS,
|
||||
KNOWN_MODEL_CAPABILITIES,
|
||||
ModelCapability,
|
||||
ModelInfo,
|
||||
MultiModalManager,
|
||||
get_model_for_capability,
|
||||
model_supports_tools,
|
||||
model_supports_vision,
|
||||
pull_model_with_fallback,
|
||||
)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# ModelCapability enum
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestModelCapability:
|
||||
def test_members_exist(self):
|
||||
assert ModelCapability.TEXT
|
||||
assert ModelCapability.VISION
|
||||
assert ModelCapability.AUDIO
|
||||
assert ModelCapability.TOOLS
|
||||
assert ModelCapability.JSON
|
||||
assert ModelCapability.STREAMING
|
||||
|
||||
def test_all_members_unique(self):
|
||||
values = [m.value for m in ModelCapability]
|
||||
assert len(values) == len(set(values))
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# ModelInfo dataclass
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestModelInfo:
|
||||
def test_defaults(self):
|
||||
info = ModelInfo(name="test-model")
|
||||
assert info.name == "test-model"
|
||||
assert info.capabilities == set()
|
||||
assert info.is_available is False
|
||||
assert info.is_pulled is False
|
||||
assert info.size_mb is None
|
||||
assert info.description == ""
|
||||
|
||||
def test_supports_true(self):
|
||||
info = ModelInfo(name="m", capabilities={ModelCapability.TEXT, ModelCapability.VISION})
|
||||
assert info.supports(ModelCapability.TEXT) is True
|
||||
assert info.supports(ModelCapability.VISION) is True
|
||||
|
||||
def test_supports_false(self):
|
||||
info = ModelInfo(name="m", capabilities={ModelCapability.TEXT})
|
||||
assert info.supports(ModelCapability.VISION) is False
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Known model capabilities lookup table
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestKnownModelCapabilities:
|
||||
def test_vision_models_have_vision(self):
|
||||
vision_names = [
|
||||
"llama3.2-vision",
|
||||
"llava",
|
||||
"moondream",
|
||||
"qwen2.5-vl",
|
||||
]
|
||||
for name in vision_names:
|
||||
assert ModelCapability.VISION in KNOWN_MODEL_CAPABILITIES[name], name
|
||||
|
||||
def test_text_models_lack_vision(self):
|
||||
text_only = ["deepseek-r1", "gemma2", "phi3"]
|
||||
for name in text_only:
|
||||
assert ModelCapability.VISION not in KNOWN_MODEL_CAPABILITIES[name], name
|
||||
|
||||
def test_all_models_have_text(self):
|
||||
for name, caps in KNOWN_MODEL_CAPABILITIES.items():
|
||||
assert ModelCapability.TEXT in caps, f"{name} should have TEXT"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Default fallback chains
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestDefaultFallbackChains:
|
||||
def test_vision_chain_non_empty(self):
|
||||
assert len(DEFAULT_FALLBACK_CHAINS[ModelCapability.VISION]) > 0
|
||||
|
||||
def test_tools_chain_non_empty(self):
|
||||
assert len(DEFAULT_FALLBACK_CHAINS[ModelCapability.TOOLS]) > 0
|
||||
|
||||
def test_audio_chain_empty(self):
|
||||
assert DEFAULT_FALLBACK_CHAINS[ModelCapability.AUDIO] == []
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers to build a manager without hitting the network
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _fake_ollama_tags(*model_names: str) -> bytes:
|
||||
"""Build a JSON response mimicking Ollama /api/tags."""
|
||||
models = []
|
||||
for name in model_names:
|
||||
models.append({"name": name, "size": 4 * 1024 * 1024 * 1024, "details": {"family": "test"}})
|
||||
return json.dumps({"models": models}).encode()
|
||||
|
||||
|
||||
def _make_manager(model_names: list[str] | None = None) -> MultiModalManager:
|
||||
"""Create a MultiModalManager with mocked Ollama responses."""
|
||||
if model_names is None:
|
||||
# No models available — Ollama unreachable
|
||||
with patch("urllib.request.urlopen", side_effect=ConnectionError("no ollama")):
|
||||
return MultiModalManager(ollama_url="http://localhost:11434")
|
||||
|
||||
resp = MagicMock()
|
||||
resp.__enter__ = MagicMock(return_value=resp)
|
||||
resp.__exit__ = MagicMock(return_value=False)
|
||||
resp.read.return_value = _fake_ollama_tags(*model_names)
|
||||
resp.status = 200
|
||||
|
||||
with patch("urllib.request.urlopen", return_value=resp):
|
||||
return MultiModalManager(ollama_url="http://localhost:11434")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# MultiModalManager — init & refresh
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestMultiModalManagerInit:
|
||||
def test_init_no_ollama(self):
|
||||
mgr = _make_manager(None)
|
||||
assert mgr.list_available_models() == []
|
||||
|
||||
def test_init_with_models(self):
|
||||
mgr = _make_manager(["llama3.1:8b", "llava:7b"])
|
||||
names = {m.name for m in mgr.list_available_models()}
|
||||
assert names == {"llama3.1:8b", "llava:7b"}
|
||||
|
||||
def test_refresh_updates_models(self):
|
||||
mgr = _make_manager([])
|
||||
assert mgr.list_available_models() == []
|
||||
|
||||
resp = MagicMock()
|
||||
resp.__enter__ = MagicMock(return_value=resp)
|
||||
resp.__exit__ = MagicMock(return_value=False)
|
||||
resp.read.return_value = _fake_ollama_tags("gemma2:9b")
|
||||
resp.status = 200
|
||||
|
||||
with patch("urllib.request.urlopen", return_value=resp):
|
||||
mgr.refresh()
|
||||
|
||||
names = {m.name for m in mgr.list_available_models()}
|
||||
assert "gemma2:9b" in names
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _detect_capabilities
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestDetectCapabilities:
|
||||
def test_exact_match(self):
|
||||
mgr = _make_manager(None)
|
||||
caps = mgr._detect_capabilities("llava:7b")
|
||||
assert ModelCapability.VISION in caps
|
||||
|
||||
def test_base_name_match(self):
|
||||
mgr = _make_manager(None)
|
||||
caps = mgr._detect_capabilities("llava:99b")
|
||||
# "llava:99b" not in table, but "llava" is
|
||||
assert ModelCapability.VISION in caps
|
||||
|
||||
def test_unknown_model_defaults_to_text(self):
|
||||
mgr = _make_manager(None)
|
||||
caps = mgr._detect_capabilities("totally-unknown-model:1b")
|
||||
assert caps == {ModelCapability.TEXT, ModelCapability.STREAMING}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# get_model_capabilities / model_supports
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestGetModelCapabilities:
|
||||
def test_available_model(self):
|
||||
mgr = _make_manager(["llava:7b"])
|
||||
caps = mgr.get_model_capabilities("llava:7b")
|
||||
assert ModelCapability.VISION in caps
|
||||
|
||||
def test_unavailable_model_uses_detection(self):
|
||||
mgr = _make_manager([])
|
||||
caps = mgr.get_model_capabilities("llava:7b")
|
||||
assert ModelCapability.VISION in caps
|
||||
|
||||
|
||||
class TestModelSupports:
|
||||
def test_supports_true(self):
|
||||
mgr = _make_manager(["llava:7b"])
|
||||
assert mgr.model_supports("llava:7b", ModelCapability.VISION) is True
|
||||
|
||||
def test_supports_false(self):
|
||||
mgr = _make_manager(["deepseek-r1:7b"])
|
||||
assert mgr.model_supports("deepseek-r1:7b", ModelCapability.VISION) is False
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# get_models_with_capability
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestGetModelsWithCapability:
|
||||
def test_returns_vision_models(self):
|
||||
mgr = _make_manager(["llava:7b", "deepseek-r1:7b"])
|
||||
vision = mgr.get_models_with_capability(ModelCapability.VISION)
|
||||
names = {m.name for m in vision}
|
||||
assert "llava:7b" in names
|
||||
assert "deepseek-r1:7b" not in names
|
||||
|
||||
def test_empty_when_none_available(self):
|
||||
mgr = _make_manager(["deepseek-r1:7b"])
|
||||
vision = mgr.get_models_with_capability(ModelCapability.VISION)
|
||||
assert vision == []
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# get_best_model_for
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestGetBestModelFor:
|
||||
def test_preferred_model_with_capability(self):
|
||||
mgr = _make_manager(["llava:7b", "llama3.1:8b"])
|
||||
result = mgr.get_best_model_for(ModelCapability.VISION, preferred_model="llava:7b")
|
||||
assert result == "llava:7b"
|
||||
|
||||
def test_preferred_model_without_capability_uses_fallback(self):
|
||||
mgr = _make_manager(["deepseek-r1:7b", "llava:7b"])
|
||||
# preferred doesn't have VISION, fallback chain has llava:7b
|
||||
result = mgr.get_best_model_for(ModelCapability.VISION, preferred_model="deepseek-r1:7b")
|
||||
assert result == "llava:7b"
|
||||
|
||||
def test_fallback_chain_order(self):
|
||||
# First in chain: llama3.2:3b
|
||||
mgr = _make_manager(["llama3.2:3b", "llava:7b"])
|
||||
result = mgr.get_best_model_for(ModelCapability.VISION)
|
||||
assert result == "llama3.2:3b"
|
||||
|
||||
def test_any_capable_model_when_no_fallback(self):
|
||||
mgr = _make_manager(["moondream:1.8b"])
|
||||
mgr._fallback_chains[ModelCapability.VISION] = [] # clear chain
|
||||
result = mgr.get_best_model_for(ModelCapability.VISION)
|
||||
assert result == "moondream:1.8b"
|
||||
|
||||
def test_none_when_no_capable_model(self):
|
||||
mgr = _make_manager(["deepseek-r1:7b"])
|
||||
result = mgr.get_best_model_for(ModelCapability.VISION)
|
||||
assert result is None
|
||||
|
||||
def test_preferred_model_not_available_skipped(self):
|
||||
mgr = _make_manager(["llava:7b"])
|
||||
# preferred_model "llava:13b" is not in available_models
|
||||
result = mgr.get_best_model_for(ModelCapability.VISION, preferred_model="llava:13b")
|
||||
assert result == "llava:7b"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# pull_model_with_fallback (manager method)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestPullModelWithFallback:
|
||||
def test_already_available(self):
|
||||
mgr = _make_manager(["llama3.1:8b"])
|
||||
model, is_fallback = mgr.pull_model_with_fallback("llama3.1:8b")
|
||||
assert model == "llama3.1:8b"
|
||||
assert is_fallback is False
|
||||
|
||||
def test_pull_succeeds(self):
|
||||
mgr = _make_manager([])
|
||||
|
||||
pull_resp = MagicMock()
|
||||
pull_resp.__enter__ = MagicMock(return_value=pull_resp)
|
||||
pull_resp.__exit__ = MagicMock(return_value=False)
|
||||
pull_resp.status = 200
|
||||
|
||||
# After pull, refresh returns the model
|
||||
refresh_resp = MagicMock()
|
||||
refresh_resp.__enter__ = MagicMock(return_value=refresh_resp)
|
||||
refresh_resp.__exit__ = MagicMock(return_value=False)
|
||||
refresh_resp.read.return_value = _fake_ollama_tags("llama3.1:8b")
|
||||
refresh_resp.status = 200
|
||||
|
||||
with patch("urllib.request.urlopen", side_effect=[pull_resp, refresh_resp]):
|
||||
model, is_fallback = mgr.pull_model_with_fallback("llama3.1:8b")
|
||||
assert model == "llama3.1:8b"
|
||||
assert is_fallback is False
|
||||
|
||||
def test_pull_fails_uses_capability_fallback(self):
|
||||
mgr = _make_manager(["llava:7b"])
|
||||
with patch("urllib.request.urlopen", side_effect=ConnectionError("fail")):
|
||||
model, is_fallback = mgr.pull_model_with_fallback(
|
||||
"nonexistent-vision:1b",
|
||||
capability=ModelCapability.VISION,
|
||||
)
|
||||
assert model == "llava:7b"
|
||||
assert is_fallback is True
|
||||
|
||||
def test_pull_fails_uses_default_model(self):
|
||||
mgr = _make_manager([settings_ollama_model := "llama3.1:8b"])
|
||||
with (
|
||||
patch("urllib.request.urlopen", side_effect=ConnectionError("fail")),
|
||||
patch("infrastructure.models.multimodal.settings") as mock_settings,
|
||||
):
|
||||
mock_settings.ollama_model = settings_ollama_model
|
||||
mock_settings.ollama_url = "http://localhost:11434"
|
||||
model, is_fallback = mgr.pull_model_with_fallback("missing-model:99b")
|
||||
assert model == "llama3.1:8b"
|
||||
assert is_fallback is True
|
||||
|
||||
def test_auto_pull_false_skips_pull(self):
|
||||
mgr = _make_manager([])
|
||||
with patch("infrastructure.models.multimodal.settings") as mock_settings:
|
||||
mock_settings.ollama_model = "default"
|
||||
model, is_fallback = mgr.pull_model_with_fallback("missing:1b", auto_pull=False)
|
||||
# Falls through to absolute last resort
|
||||
assert model == "missing:1b"
|
||||
assert is_fallback is False
|
||||
|
||||
def test_absolute_last_resort(self):
|
||||
mgr = _make_manager([])
|
||||
with (
|
||||
patch("urllib.request.urlopen", side_effect=ConnectionError("fail")),
|
||||
patch("infrastructure.models.multimodal.settings") as mock_settings,
|
||||
):
|
||||
mock_settings.ollama_model = "not-available"
|
||||
model, is_fallback = mgr.pull_model_with_fallback("primary:1b")
|
||||
assert model == "primary:1b"
|
||||
assert is_fallback is False
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _pull_model
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestPullModel:
|
||||
def test_pull_success(self):
|
||||
mgr = _make_manager([])
|
||||
|
||||
pull_resp = MagicMock()
|
||||
pull_resp.__enter__ = MagicMock(return_value=pull_resp)
|
||||
pull_resp.__exit__ = MagicMock(return_value=False)
|
||||
pull_resp.status = 200
|
||||
|
||||
refresh_resp = MagicMock()
|
||||
refresh_resp.__enter__ = MagicMock(return_value=refresh_resp)
|
||||
refresh_resp.__exit__ = MagicMock(return_value=False)
|
||||
refresh_resp.read.return_value = _fake_ollama_tags("new-model:1b")
|
||||
refresh_resp.status = 200
|
||||
|
||||
with patch("urllib.request.urlopen", side_effect=[pull_resp, refresh_resp]):
|
||||
assert mgr._pull_model("new-model:1b") is True
|
||||
|
||||
def test_pull_network_error(self):
|
||||
mgr = _make_manager([])
|
||||
with patch("urllib.request.urlopen", side_effect=ConnectionError("offline")):
|
||||
assert mgr._pull_model("any-model:1b") is False
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# configure_fallback_chain / get_fallback_chain
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestFallbackChainConfig:
|
||||
def test_configure_and_get(self):
|
||||
mgr = _make_manager(None)
|
||||
mgr.configure_fallback_chain(ModelCapability.VISION, ["model-a", "model-b"])
|
||||
assert mgr.get_fallback_chain(ModelCapability.VISION) == ["model-a", "model-b"]
|
||||
|
||||
def test_get_returns_copy(self):
|
||||
mgr = _make_manager(None)
|
||||
chain = mgr.get_fallback_chain(ModelCapability.VISION)
|
||||
chain.append("mutated")
|
||||
assert "mutated" not in mgr.get_fallback_chain(ModelCapability.VISION)
|
||||
|
||||
def test_get_empty_for_unknown(self):
|
||||
mgr = _make_manager(None)
|
||||
# AUDIO has an empty chain by default
|
||||
assert mgr.get_fallback_chain(ModelCapability.AUDIO) == []
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# get_model_for_content
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestGetModelForContent:
|
||||
def test_image_content(self):
|
||||
mgr = _make_manager(["llava:7b"])
|
||||
model, is_fb = mgr.get_model_for_content("image")
|
||||
assert model == "llava:7b"
|
||||
|
||||
def test_vision_content(self):
|
||||
mgr = _make_manager(["llava:7b"])
|
||||
model, _ = mgr.get_model_for_content("vision")
|
||||
assert model == "llava:7b"
|
||||
|
||||
def test_multimodal_content(self):
|
||||
mgr = _make_manager(["llava:7b"])
|
||||
model, _ = mgr.get_model_for_content("multimodal")
|
||||
assert model == "llava:7b"
|
||||
|
||||
def test_audio_content(self):
|
||||
mgr = _make_manager(["llama3.1:8b"])
|
||||
with patch("infrastructure.models.multimodal.settings") as mock_settings:
|
||||
mock_settings.ollama_model = "llama3.1:8b"
|
||||
mock_settings.ollama_url = "http://localhost:11434"
|
||||
model, _ = mgr.get_model_for_content("audio")
|
||||
assert model == "llama3.1:8b"
|
||||
|
||||
def test_text_content(self):
|
||||
mgr = _make_manager(["llama3.1:8b"])
|
||||
with patch("infrastructure.models.multimodal.settings") as mock_settings:
|
||||
mock_settings.ollama_model = "llama3.1:8b"
|
||||
mock_settings.ollama_url = "http://localhost:11434"
|
||||
model, _ = mgr.get_model_for_content("text")
|
||||
assert model == "llama3.1:8b"
|
||||
|
||||
def test_preferred_model_respected(self):
|
||||
mgr = _make_manager(["llama3.2:3b", "llava:7b"])
|
||||
model, _ = mgr.get_model_for_content("image", preferred_model="llama3.2:3b")
|
||||
assert model == "llama3.2:3b"
|
||||
|
||||
def test_case_insensitive(self):
|
||||
mgr = _make_manager(["llava:7b"])
|
||||
model, _ = mgr.get_model_for_content("IMAGE")
|
||||
assert model == "llava:7b"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Module-level convenience functions
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestConvenienceFunctions:
|
||||
def _patch_manager(self, mgr):
|
||||
return patch(
|
||||
"infrastructure.models.multimodal._multimodal_manager",
|
||||
mgr,
|
||||
)
|
||||
|
||||
def test_get_model_for_capability(self):
|
||||
mgr = _make_manager(["llava:7b"])
|
||||
with self._patch_manager(mgr):
|
||||
result = get_model_for_capability(ModelCapability.VISION)
|
||||
assert result == "llava:7b"
|
||||
|
||||
def test_pull_model_with_fallback_convenience(self):
|
||||
mgr = _make_manager(["llama3.1:8b"])
|
||||
with self._patch_manager(mgr):
|
||||
model, is_fb = pull_model_with_fallback("llama3.1:8b")
|
||||
assert model == "llama3.1:8b"
|
||||
assert is_fb is False
|
||||
|
||||
def test_model_supports_vision_true(self):
|
||||
mgr = _make_manager(["llava:7b"])
|
||||
with self._patch_manager(mgr):
|
||||
assert model_supports_vision("llava:7b") is True
|
||||
|
||||
def test_model_supports_vision_false(self):
|
||||
mgr = _make_manager(["llama3.1:8b"])
|
||||
with self._patch_manager(mgr):
|
||||
assert model_supports_vision("llama3.1:8b") is False
|
||||
|
||||
def test_model_supports_tools_true(self):
|
||||
mgr = _make_manager(["llama3.1:8b"])
|
||||
with self._patch_manager(mgr):
|
||||
assert model_supports_tools("llama3.1:8b") is True
|
||||
|
||||
def test_model_supports_tools_false(self):
|
||||
mgr = _make_manager(["deepseek-r1:7b"])
|
||||
with self._patch_manager(mgr):
|
||||
assert model_supports_tools("deepseek-r1:7b") is False
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# ModelInfo in available_models — size_mb and description populated
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestModelInfoPopulation:
|
||||
def test_size_and_description(self):
|
||||
mgr = _make_manager(["llama3.1:8b"])
|
||||
info = mgr._available_models["llama3.1:8b"]
|
||||
assert info.is_available is True
|
||||
assert info.is_pulled is True
|
||||
assert info.size_mb == 4 * 1024 # 4 GiB in MiB
|
||||
assert info.description == "test"
|
||||
163
tests/loop/test_loop_guard_validate.py
Normal file
163
tests/loop/test_loop_guard_validate.py
Normal file
@@ -0,0 +1,163 @@
|
||||
"""Tests for cycle_result.json validation in loop_guard.
|
||||
|
||||
Covers validate_cycle_result(), _load_cycle_result(), and _is_issue_open().
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import time
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
import scripts.loop_guard as lg
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def _isolate(tmp_path, monkeypatch):
|
||||
"""Redirect loop_guard paths to tmp_path for isolation."""
|
||||
monkeypatch.setattr(lg, "CYCLE_RESULT_FILE", tmp_path / "cycle_result.json")
|
||||
monkeypatch.setattr(lg, "CYCLE_DURATION", 300)
|
||||
monkeypatch.setattr(lg, "GITEA_API", "http://test:3000/api/v1")
|
||||
monkeypatch.setattr(lg, "REPO_SLUG", "owner/repo")
|
||||
|
||||
|
||||
def _write_cr(tmp_path, data: dict, age_seconds: float = 0) -> Path:
|
||||
"""Write a cycle_result.json and optionally backdate it."""
|
||||
p = tmp_path / "cycle_result.json"
|
||||
p.write_text(json.dumps(data))
|
||||
if age_seconds:
|
||||
mtime = time.time() - age_seconds
|
||||
import os
|
||||
|
||||
os.utime(p, (mtime, mtime))
|
||||
return p
|
||||
|
||||
|
||||
# --- _load_cycle_result ---
|
||||
|
||||
|
||||
def test_load_cycle_result_missing(tmp_path):
|
||||
assert lg._load_cycle_result() == {}
|
||||
|
||||
|
||||
def test_load_cycle_result_valid(tmp_path):
|
||||
_write_cr(tmp_path, {"issue": 42, "type": "fix"})
|
||||
assert lg._load_cycle_result() == {"issue": 42, "type": "fix"}
|
||||
|
||||
|
||||
def test_load_cycle_result_markdown_fenced(tmp_path):
|
||||
p = tmp_path / "cycle_result.json"
|
||||
p.write_text('```json\n{"issue": 99}\n```')
|
||||
assert lg._load_cycle_result() == {"issue": 99}
|
||||
|
||||
|
||||
def test_load_cycle_result_malformed(tmp_path):
|
||||
p = tmp_path / "cycle_result.json"
|
||||
p.write_text("not json at all")
|
||||
assert lg._load_cycle_result() == {}
|
||||
|
||||
|
||||
# --- _is_issue_open ---
|
||||
|
||||
|
||||
def test_is_issue_open_true(monkeypatch):
|
||||
monkeypatch.setattr(lg, "_get_token", lambda: "tok")
|
||||
resp_data = json.dumps({"state": "open"}).encode()
|
||||
|
||||
class FakeResp:
|
||||
def read(self):
|
||||
return resp_data
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *a):
|
||||
pass
|
||||
|
||||
with patch("urllib.request.urlopen", return_value=FakeResp()):
|
||||
assert lg._is_issue_open(42) is True
|
||||
|
||||
|
||||
def test_is_issue_open_closed(monkeypatch):
|
||||
monkeypatch.setattr(lg, "_get_token", lambda: "tok")
|
||||
resp_data = json.dumps({"state": "closed"}).encode()
|
||||
|
||||
class FakeResp:
|
||||
def read(self):
|
||||
return resp_data
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *a):
|
||||
pass
|
||||
|
||||
with patch("urllib.request.urlopen", return_value=FakeResp()):
|
||||
assert lg._is_issue_open(42) is False
|
||||
|
||||
|
||||
def test_is_issue_open_no_token(monkeypatch):
|
||||
monkeypatch.setattr(lg, "_get_token", lambda: "")
|
||||
assert lg._is_issue_open(42) is None
|
||||
|
||||
|
||||
def test_is_issue_open_api_error(monkeypatch):
|
||||
monkeypatch.setattr(lg, "_get_token", lambda: "tok")
|
||||
with patch("urllib.request.urlopen", side_effect=OSError("timeout")):
|
||||
assert lg._is_issue_open(42) is None
|
||||
|
||||
|
||||
# --- validate_cycle_result ---
|
||||
|
||||
|
||||
def test_validate_no_file(tmp_path):
|
||||
"""No file → returns False, no crash."""
|
||||
assert lg.validate_cycle_result() is False
|
||||
|
||||
|
||||
def test_validate_fresh_file_open_issue(tmp_path, monkeypatch):
|
||||
"""Fresh file with open issue → kept."""
|
||||
_write_cr(tmp_path, {"issue": 10})
|
||||
monkeypatch.setattr(lg, "_is_issue_open", lambda n: True)
|
||||
assert lg.validate_cycle_result() is False
|
||||
assert (tmp_path / "cycle_result.json").exists()
|
||||
|
||||
|
||||
def test_validate_stale_file_removed(tmp_path):
|
||||
"""File older than 2× CYCLE_DURATION → removed."""
|
||||
_write_cr(tmp_path, {"issue": 10}, age_seconds=700)
|
||||
assert lg.validate_cycle_result() is True
|
||||
assert not (tmp_path / "cycle_result.json").exists()
|
||||
|
||||
|
||||
def test_validate_fresh_file_closed_issue(tmp_path, monkeypatch):
|
||||
"""Fresh file referencing closed issue → removed."""
|
||||
_write_cr(tmp_path, {"issue": 10})
|
||||
monkeypatch.setattr(lg, "_is_issue_open", lambda n: False)
|
||||
assert lg.validate_cycle_result() is True
|
||||
assert not (tmp_path / "cycle_result.json").exists()
|
||||
|
||||
|
||||
def test_validate_api_failure_keeps_file(tmp_path, monkeypatch):
|
||||
"""API failure → file kept (graceful degradation)."""
|
||||
_write_cr(tmp_path, {"issue": 10})
|
||||
monkeypatch.setattr(lg, "_is_issue_open", lambda n: None)
|
||||
assert lg.validate_cycle_result() is False
|
||||
assert (tmp_path / "cycle_result.json").exists()
|
||||
|
||||
|
||||
def test_validate_no_issue_field(tmp_path):
|
||||
"""File without issue field → kept (only age check applies)."""
|
||||
_write_cr(tmp_path, {"type": "fix"})
|
||||
assert lg.validate_cycle_result() is False
|
||||
assert (tmp_path / "cycle_result.json").exists()
|
||||
|
||||
|
||||
def test_validate_stale_threshold_boundary(tmp_path, monkeypatch):
|
||||
"""File just under threshold → kept (not stale yet)."""
|
||||
_write_cr(tmp_path, {"issue": 10}, age_seconds=599)
|
||||
monkeypatch.setattr(lg, "_is_issue_open", lambda n: True)
|
||||
assert lg.validate_cycle_result() is False
|
||||
assert (tmp_path / "cycle_result.json").exists()
|
||||
327
tests/spark/test_advisor.py
Normal file
327
tests/spark/test_advisor.py
Normal file
@@ -0,0 +1,327 @@
|
||||
"""Comprehensive tests for spark.advisor module.
|
||||
|
||||
Covers all advisory-generation helpers:
|
||||
- _check_failure_patterns (grouped agent failures)
|
||||
- _check_agent_performance (top / struggling agents)
|
||||
- _check_bid_patterns (spread + high average)
|
||||
- _check_prediction_accuracy (low / high accuracy)
|
||||
- _check_system_activity (idle / tasks-posted-but-no-completions)
|
||||
- generate_advisories (integration, sorting, min-events guard)
|
||||
"""
|
||||
|
||||
import json
|
||||
|
||||
from spark.advisor import (
|
||||
_MIN_EVENTS,
|
||||
Advisory,
|
||||
_check_agent_performance,
|
||||
_check_bid_patterns,
|
||||
_check_failure_patterns,
|
||||
_check_prediction_accuracy,
|
||||
_check_system_activity,
|
||||
generate_advisories,
|
||||
)
|
||||
from spark.memory import record_event
|
||||
|
||||
# ── Advisory dataclass ─────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestAdvisoryDataclass:
|
||||
def test_defaults(self):
|
||||
a = Advisory(
|
||||
category="test",
|
||||
priority=0.5,
|
||||
title="T",
|
||||
detail="D",
|
||||
suggested_action="A",
|
||||
)
|
||||
assert a.subject is None
|
||||
assert a.evidence_count == 0
|
||||
|
||||
def test_all_fields(self):
|
||||
a = Advisory(
|
||||
category="c",
|
||||
priority=0.9,
|
||||
title="T",
|
||||
detail="D",
|
||||
suggested_action="A",
|
||||
subject="agent-1",
|
||||
evidence_count=7,
|
||||
)
|
||||
assert a.subject == "agent-1"
|
||||
assert a.evidence_count == 7
|
||||
|
||||
|
||||
# ── _check_failure_patterns ────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestCheckFailurePatterns:
|
||||
def test_no_failures_returns_empty(self):
|
||||
assert _check_failure_patterns() == []
|
||||
|
||||
def test_single_failure_not_enough(self):
|
||||
record_event("task_failed", "once", agent_id="a1", task_id="t1")
|
||||
assert _check_failure_patterns() == []
|
||||
|
||||
def test_two_failures_triggers_advisory(self):
|
||||
for i in range(2):
|
||||
record_event("task_failed", f"fail {i}", agent_id="agent-abc", task_id=f"t{i}")
|
||||
results = _check_failure_patterns()
|
||||
assert len(results) == 1
|
||||
assert results[0].category == "failure_prevention"
|
||||
assert results[0].subject == "agent-abc"
|
||||
assert results[0].evidence_count == 2
|
||||
|
||||
def test_priority_scales_with_count(self):
|
||||
for i in range(5):
|
||||
record_event("task_failed", f"fail {i}", agent_id="agent-x", task_id=f"f{i}")
|
||||
results = _check_failure_patterns()
|
||||
assert len(results) == 1
|
||||
assert results[0].priority > 0.5
|
||||
|
||||
def test_priority_capped_at_one(self):
|
||||
for i in range(20):
|
||||
record_event("task_failed", f"fail {i}", agent_id="agent-y", task_id=f"ff{i}")
|
||||
results = _check_failure_patterns()
|
||||
assert results[0].priority <= 1.0
|
||||
|
||||
def test_multiple_agents_separate_advisories(self):
|
||||
for i in range(3):
|
||||
record_event("task_failed", f"a fail {i}", agent_id="agent-a", task_id=f"a{i}")
|
||||
record_event("task_failed", f"b fail {i}", agent_id="agent-b", task_id=f"b{i}")
|
||||
results = _check_failure_patterns()
|
||||
assert len(results) == 2
|
||||
subjects = {r.subject for r in results}
|
||||
assert subjects == {"agent-a", "agent-b"}
|
||||
|
||||
def test_events_without_agent_id_skipped(self):
|
||||
for i in range(3):
|
||||
record_event("task_failed", f"no-agent {i}", task_id=f"na{i}")
|
||||
assert _check_failure_patterns() == []
|
||||
|
||||
|
||||
# ── _check_agent_performance ───────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestCheckAgentPerformance:
|
||||
def test_no_events_returns_empty(self):
|
||||
assert _check_agent_performance() == []
|
||||
|
||||
def test_too_few_tasks_skipped(self):
|
||||
record_event("task_completed", "done", agent_id="agent-1", task_id="t1")
|
||||
assert _check_agent_performance() == []
|
||||
|
||||
def test_high_performer_detected(self):
|
||||
for i in range(4):
|
||||
record_event("task_completed", f"done {i}", agent_id="agent-star", task_id=f"s{i}")
|
||||
results = _check_agent_performance()
|
||||
perf = [r for r in results if r.category == "agent_performance"]
|
||||
assert len(perf) == 1
|
||||
assert "excels" in perf[0].title
|
||||
assert perf[0].subject == "agent-star"
|
||||
|
||||
def test_struggling_agent_detected(self):
|
||||
# 1 success, 4 failures = 20% rate
|
||||
record_event("task_completed", "ok", agent_id="agent-bad", task_id="ok1")
|
||||
for i in range(4):
|
||||
record_event("task_failed", f"nope {i}", agent_id="agent-bad", task_id=f"bad{i}")
|
||||
results = _check_agent_performance()
|
||||
struggling = [r for r in results if "struggling" in r.title]
|
||||
assert len(struggling) == 1
|
||||
assert struggling[0].priority > 0.5
|
||||
|
||||
def test_middling_agent_no_advisory(self):
|
||||
# 50% success rate — neither excelling nor struggling
|
||||
for i in range(3):
|
||||
record_event("task_completed", f"ok {i}", agent_id="agent-mid", task_id=f"m{i}")
|
||||
for i in range(3):
|
||||
record_event("task_failed", f"nope {i}", agent_id="agent-mid", task_id=f"mf{i}")
|
||||
results = _check_agent_performance()
|
||||
mid_advisories = [r for r in results if r.subject == "agent-mid"]
|
||||
assert mid_advisories == []
|
||||
|
||||
def test_events_without_agent_id_skipped(self):
|
||||
for i in range(5):
|
||||
record_event("task_completed", f"done {i}", task_id=f"no-agent-{i}")
|
||||
assert _check_agent_performance() == []
|
||||
|
||||
|
||||
# ── _check_bid_patterns ────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestCheckBidPatterns:
|
||||
def _record_bids(self, amounts):
|
||||
for i, sats in enumerate(amounts):
|
||||
record_event(
|
||||
"bid_submitted",
|
||||
f"bid {i}",
|
||||
agent_id=f"a{i}",
|
||||
task_id=f"bt{i}",
|
||||
data=json.dumps({"bid_sats": sats}),
|
||||
)
|
||||
|
||||
def test_too_few_bids_returns_empty(self):
|
||||
self._record_bids([10, 20, 30])
|
||||
assert _check_bid_patterns() == []
|
||||
|
||||
def test_wide_spread_detected(self):
|
||||
# avg=50, spread=90 > 50*1.5=75
|
||||
self._record_bids([5, 10, 50, 90, 95])
|
||||
results = _check_bid_patterns()
|
||||
spread_advisories = [r for r in results if "spread" in r.title.lower()]
|
||||
assert len(spread_advisories) == 1
|
||||
|
||||
def test_high_average_detected(self):
|
||||
self._record_bids([80, 85, 90, 95, 100])
|
||||
results = _check_bid_patterns()
|
||||
high_avg = [r for r in results if "High average" in r.title]
|
||||
assert len(high_avg) == 1
|
||||
|
||||
def test_normal_bids_no_advisory(self):
|
||||
# Tight spread, low average
|
||||
self._record_bids([30, 32, 28, 31, 29])
|
||||
results = _check_bid_patterns()
|
||||
assert results == []
|
||||
|
||||
def test_invalid_json_data_skipped(self):
|
||||
for i in range(6):
|
||||
record_event(
|
||||
"bid_submitted",
|
||||
f"bid {i}",
|
||||
agent_id=f"a{i}",
|
||||
task_id=f"inv{i}",
|
||||
data="not-json",
|
||||
)
|
||||
results = _check_bid_patterns()
|
||||
assert results == []
|
||||
|
||||
def test_zero_bid_sats_skipped(self):
|
||||
for i in range(6):
|
||||
record_event(
|
||||
"bid_submitted",
|
||||
f"bid {i}",
|
||||
data=json.dumps({"bid_sats": 0}),
|
||||
)
|
||||
assert _check_bid_patterns() == []
|
||||
|
||||
def test_both_spread_and_high_avg(self):
|
||||
# Wide spread AND high average: avg=82, spread=150 > 82*1.5=123
|
||||
self._record_bids([5, 80, 90, 100, 155])
|
||||
results = _check_bid_patterns()
|
||||
assert len(results) == 2
|
||||
|
||||
|
||||
# ── _check_prediction_accuracy ─────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestCheckPredictionAccuracy:
|
||||
def test_too_few_evaluations(self):
|
||||
assert _check_prediction_accuracy() == []
|
||||
|
||||
def test_low_accuracy_advisory(self):
|
||||
from spark.eidos import evaluate_prediction, predict_task_outcome
|
||||
|
||||
for i in range(4):
|
||||
predict_task_outcome(f"pa-{i}", "task", ["agent-a"])
|
||||
evaluate_prediction(f"pa-{i}", "agent-wrong", task_succeeded=False, winning_bid=999)
|
||||
results = _check_prediction_accuracy()
|
||||
low = [r for r in results if "Low prediction" in r.title]
|
||||
assert len(low) == 1
|
||||
assert low[0].priority > 0.5
|
||||
|
||||
def test_high_accuracy_advisory(self):
|
||||
from spark.eidos import evaluate_prediction, predict_task_outcome
|
||||
|
||||
for i in range(4):
|
||||
predict_task_outcome(f"ph-{i}", "task", ["agent-a"])
|
||||
evaluate_prediction(f"ph-{i}", "agent-a", task_succeeded=True, winning_bid=30)
|
||||
results = _check_prediction_accuracy()
|
||||
high = [r for r in results if "Strong prediction" in r.title]
|
||||
assert len(high) == 1
|
||||
|
||||
def test_middling_accuracy_no_advisory(self):
|
||||
from spark.eidos import evaluate_prediction, predict_task_outcome
|
||||
|
||||
# Mix of correct and incorrect to get ~0.5 accuracy
|
||||
for i in range(3):
|
||||
predict_task_outcome(f"pm-{i}", "task", ["agent-a"])
|
||||
evaluate_prediction(f"pm-{i}", "agent-a", task_succeeded=True, winning_bid=30)
|
||||
for i in range(3):
|
||||
predict_task_outcome(f"pmx-{i}", "task", ["agent-a"])
|
||||
evaluate_prediction(f"pmx-{i}", "agent-wrong", task_succeeded=False, winning_bid=999)
|
||||
results = _check_prediction_accuracy()
|
||||
# avg should be middling — neither low nor high advisory
|
||||
low = [r for r in results if "Low" in r.title]
|
||||
high = [r for r in results if "Strong" in r.title]
|
||||
# At least one side should be empty (depends on exact accuracy)
|
||||
assert not (low and high)
|
||||
|
||||
|
||||
# ── _check_system_activity ─────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestCheckSystemActivity:
|
||||
def test_no_events_idle_advisory(self):
|
||||
results = _check_system_activity()
|
||||
assert len(results) == 1
|
||||
assert "No swarm activity" in results[0].title
|
||||
|
||||
def test_has_events_no_idle_advisory(self):
|
||||
record_event("task_completed", "done", task_id="t1")
|
||||
results = _check_system_activity()
|
||||
idle = [r for r in results if "No swarm activity" in r.title]
|
||||
assert idle == []
|
||||
|
||||
def test_tasks_posted_but_none_completing(self):
|
||||
for i in range(5):
|
||||
record_event("task_posted", f"posted {i}", task_id=f"tp{i}")
|
||||
results = _check_system_activity()
|
||||
stalled = [r for r in results if "none completing" in r.title.lower()]
|
||||
assert len(stalled) == 1
|
||||
assert stalled[0].evidence_count >= 4
|
||||
|
||||
def test_posts_with_completions_no_stalled_advisory(self):
|
||||
for i in range(5):
|
||||
record_event("task_posted", f"posted {i}", task_id=f"tpx{i}")
|
||||
record_event("task_completed", "done", task_id="tpx0")
|
||||
results = _check_system_activity()
|
||||
stalled = [r for r in results if "none completing" in r.title.lower()]
|
||||
assert stalled == []
|
||||
|
||||
|
||||
# ── generate_advisories (integration) ──────────────────────────────────────
|
||||
|
||||
|
||||
class TestGenerateAdvisories:
|
||||
def test_below_min_events_returns_insufficient(self):
|
||||
advisories = generate_advisories()
|
||||
assert len(advisories) >= 1
|
||||
assert advisories[0].title == "Insufficient data"
|
||||
assert advisories[0].evidence_count == 0
|
||||
|
||||
def test_exactly_at_min_events_proceeds(self):
|
||||
for i in range(_MIN_EVENTS):
|
||||
record_event("task_posted", f"ev {i}", task_id=f"min{i}")
|
||||
advisories = generate_advisories()
|
||||
insufficient = [a for a in advisories if a.title == "Insufficient data"]
|
||||
assert insufficient == []
|
||||
|
||||
def test_results_sorted_by_priority_descending(self):
|
||||
for i in range(5):
|
||||
record_event("task_posted", f"posted {i}", task_id=f"sp{i}")
|
||||
for i in range(3):
|
||||
record_event("task_failed", f"fail {i}", agent_id="agent-fail", task_id=f"sf{i}")
|
||||
advisories = generate_advisories()
|
||||
if len(advisories) >= 2:
|
||||
for i in range(len(advisories) - 1):
|
||||
assert advisories[i].priority >= advisories[i + 1].priority
|
||||
|
||||
def test_multiple_categories_produced(self):
|
||||
# Create failures + posted-no-completions
|
||||
for i in range(5):
|
||||
record_event("task_failed", f"fail {i}", agent_id="agent-bad", task_id=f"mf{i}")
|
||||
for i in range(5):
|
||||
record_event("task_posted", f"posted {i}", task_id=f"mp{i}")
|
||||
advisories = generate_advisories()
|
||||
categories = {a.category for a in advisories}
|
||||
assert len(categories) >= 2
|
||||
299
tests/spark/test_eidos.py
Normal file
299
tests/spark/test_eidos.py
Normal file
@@ -0,0 +1,299 @@
|
||||
"""Comprehensive tests for spark.eidos module.
|
||||
|
||||
Covers:
|
||||
- _get_conn (schema creation, WAL, busy timeout)
|
||||
- predict_task_outcome (baseline, with history, edge cases)
|
||||
- evaluate_prediction (correct, wrong, missing, double-eval)
|
||||
- _compute_accuracy (all components, edge cases)
|
||||
- get_predictions (filters: task_id, evaluated_only, limit)
|
||||
- get_accuracy_stats (empty, after evaluations)
|
||||
"""
|
||||
|
||||
import pytest
|
||||
|
||||
from spark.eidos import (
|
||||
Prediction,
|
||||
_compute_accuracy,
|
||||
evaluate_prediction,
|
||||
get_accuracy_stats,
|
||||
get_predictions,
|
||||
predict_task_outcome,
|
||||
)
|
||||
|
||||
# ── Prediction dataclass ──────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestPredictionDataclass:
|
||||
def test_defaults(self):
|
||||
p = Prediction(
|
||||
id="1",
|
||||
task_id="t1",
|
||||
prediction_type="outcome",
|
||||
predicted_value="{}",
|
||||
actual_value=None,
|
||||
accuracy=None,
|
||||
created_at="2026-01-01",
|
||||
evaluated_at=None,
|
||||
)
|
||||
assert p.actual_value is None
|
||||
assert p.accuracy is None
|
||||
|
||||
|
||||
# ── predict_task_outcome ──────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestPredictTaskOutcome:
|
||||
def test_baseline_no_history(self):
|
||||
result = predict_task_outcome("t-base", "Do stuff", ["a1", "a2"])
|
||||
assert result["likely_winner"] == "a1"
|
||||
assert result["success_probability"] == 0.7
|
||||
assert result["estimated_bid_range"] == [20, 80]
|
||||
assert "baseline" in result["reasoning"]
|
||||
assert "prediction_id" in result
|
||||
|
||||
def test_empty_candidates(self):
|
||||
result = predict_task_outcome("t-empty", "Nothing", [])
|
||||
assert result["likely_winner"] is None
|
||||
|
||||
def test_history_selects_best_agent(self):
|
||||
history = {
|
||||
"a1": {"success_rate": 0.3, "avg_winning_bid": 40},
|
||||
"a2": {"success_rate": 0.95, "avg_winning_bid": 50},
|
||||
}
|
||||
result = predict_task_outcome("t-hist", "Task", ["a1", "a2"], agent_history=history)
|
||||
assert result["likely_winner"] == "a2"
|
||||
assert result["success_probability"] > 0.7
|
||||
|
||||
def test_history_agent_not_in_candidates_ignored(self):
|
||||
history = {
|
||||
"a-outside": {"success_rate": 0.99, "avg_winning_bid": 10},
|
||||
}
|
||||
result = predict_task_outcome("t-out", "Task", ["a1"], agent_history=history)
|
||||
# a-outside not in candidates, so falls back to baseline
|
||||
assert result["likely_winner"] == "a1"
|
||||
|
||||
def test_history_adjusts_bid_range(self):
|
||||
history = {
|
||||
"a1": {"success_rate": 0.5, "avg_winning_bid": 100},
|
||||
"a2": {"success_rate": 0.8, "avg_winning_bid": 200},
|
||||
}
|
||||
result = predict_task_outcome("t-bid", "Task", ["a1", "a2"], agent_history=history)
|
||||
low, high = result["estimated_bid_range"]
|
||||
assert low == max(1, int(100 * 0.8))
|
||||
assert high == int(200 * 1.2)
|
||||
|
||||
def test_history_with_zero_avg_bid_skipped(self):
|
||||
history = {
|
||||
"a1": {"success_rate": 0.8, "avg_winning_bid": 0},
|
||||
}
|
||||
result = predict_task_outcome("t-zero-bid", "Task", ["a1"], agent_history=history)
|
||||
# Zero avg_winning_bid should be skipped, keep default range
|
||||
assert result["estimated_bid_range"] == [20, 80]
|
||||
|
||||
def test_prediction_stored_in_db(self):
|
||||
result = predict_task_outcome("t-db", "Store me", ["a1"])
|
||||
preds = get_predictions(task_id="t-db")
|
||||
assert len(preds) == 1
|
||||
assert preds[0].id == result["prediction_id"]
|
||||
assert preds[0].prediction_type == "outcome"
|
||||
|
||||
def test_success_probability_clamped(self):
|
||||
history = {
|
||||
"a1": {"success_rate": 1.5, "avg_winning_bid": 50},
|
||||
}
|
||||
result = predict_task_outcome("t-clamp", "Task", ["a1"], agent_history=history)
|
||||
assert result["success_probability"] <= 1.0
|
||||
|
||||
|
||||
# ── evaluate_prediction ───────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestEvaluatePrediction:
|
||||
def test_correct_prediction(self):
|
||||
predict_task_outcome("t-eval-ok", "Task", ["a1"])
|
||||
result = evaluate_prediction("t-eval-ok", "a1", task_succeeded=True, winning_bid=30)
|
||||
assert result is not None
|
||||
assert 0.0 <= result["accuracy"] <= 1.0
|
||||
assert result["actual"]["winner"] == "a1"
|
||||
assert result["actual"]["succeeded"] is True
|
||||
|
||||
def test_wrong_prediction(self):
|
||||
predict_task_outcome("t-eval-wrong", "Task", ["a1"])
|
||||
result = evaluate_prediction("t-eval-wrong", "a2", task_succeeded=False)
|
||||
assert result is not None
|
||||
assert result["accuracy"] < 1.0
|
||||
|
||||
def test_no_prediction_returns_none(self):
|
||||
result = evaluate_prediction("nonexistent", "a1", task_succeeded=True)
|
||||
assert result is None
|
||||
|
||||
def test_double_evaluation_returns_none(self):
|
||||
predict_task_outcome("t-double", "Task", ["a1"])
|
||||
evaluate_prediction("t-double", "a1", task_succeeded=True)
|
||||
result = evaluate_prediction("t-double", "a1", task_succeeded=True)
|
||||
assert result is None
|
||||
|
||||
def test_evaluation_updates_db(self):
|
||||
predict_task_outcome("t-upd", "Task", ["a1"])
|
||||
evaluate_prediction("t-upd", "a1", task_succeeded=True, winning_bid=50)
|
||||
preds = get_predictions(task_id="t-upd", evaluated_only=True)
|
||||
assert len(preds) == 1
|
||||
assert preds[0].accuracy is not None
|
||||
assert preds[0].actual_value is not None
|
||||
assert preds[0].evaluated_at is not None
|
||||
|
||||
def test_winning_bid_none(self):
|
||||
predict_task_outcome("t-nobid", "Task", ["a1"])
|
||||
result = evaluate_prediction("t-nobid", "a1", task_succeeded=True)
|
||||
assert result is not None
|
||||
assert result["actual"]["winning_bid"] is None
|
||||
|
||||
|
||||
# ── _compute_accuracy ─────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestComputeAccuracy:
|
||||
def test_perfect_match(self):
|
||||
predicted = {
|
||||
"likely_winner": "a1",
|
||||
"success_probability": 1.0,
|
||||
"estimated_bid_range": [20, 40],
|
||||
}
|
||||
actual = {"winner": "a1", "succeeded": True, "winning_bid": 30}
|
||||
assert _compute_accuracy(predicted, actual) == pytest.approx(1.0, abs=0.01)
|
||||
|
||||
def test_all_wrong(self):
|
||||
predicted = {
|
||||
"likely_winner": "a1",
|
||||
"success_probability": 1.0,
|
||||
"estimated_bid_range": [10, 20],
|
||||
}
|
||||
actual = {"winner": "a2", "succeeded": False, "winning_bid": 100}
|
||||
assert _compute_accuracy(predicted, actual) < 0.3
|
||||
|
||||
def test_no_winner_in_predicted(self):
|
||||
predicted = {"success_probability": 0.5, "estimated_bid_range": [20, 40]}
|
||||
actual = {"winner": "a1", "succeeded": True, "winning_bid": 30}
|
||||
acc = _compute_accuracy(predicted, actual)
|
||||
# Winner component skipped, success + bid counted
|
||||
assert 0.0 <= acc <= 1.0
|
||||
|
||||
def test_no_winner_in_actual(self):
|
||||
predicted = {"likely_winner": "a1", "success_probability": 0.5}
|
||||
actual = {"succeeded": True}
|
||||
acc = _compute_accuracy(predicted, actual)
|
||||
assert 0.0 <= acc <= 1.0
|
||||
|
||||
def test_bid_outside_range_partial_credit(self):
|
||||
predicted = {
|
||||
"likely_winner": "a1",
|
||||
"success_probability": 1.0,
|
||||
"estimated_bid_range": [20, 40],
|
||||
}
|
||||
# Bid just outside range
|
||||
actual = {"winner": "a1", "succeeded": True, "winning_bid": 45}
|
||||
acc = _compute_accuracy(predicted, actual)
|
||||
assert 0.5 < acc < 1.0
|
||||
|
||||
def test_bid_far_outside_range(self):
|
||||
predicted = {
|
||||
"likely_winner": "a1",
|
||||
"success_probability": 1.0,
|
||||
"estimated_bid_range": [20, 40],
|
||||
}
|
||||
actual = {"winner": "a1", "succeeded": True, "winning_bid": 500}
|
||||
acc = _compute_accuracy(predicted, actual)
|
||||
assert acc < 1.0
|
||||
|
||||
def test_no_actual_bid(self):
|
||||
predicted = {
|
||||
"likely_winner": "a1",
|
||||
"success_probability": 0.7,
|
||||
"estimated_bid_range": [20, 40],
|
||||
}
|
||||
actual = {"winner": "a1", "succeeded": True, "winning_bid": None}
|
||||
acc = _compute_accuracy(predicted, actual)
|
||||
# Bid component skipped — only winner + success
|
||||
assert 0.0 <= acc <= 1.0
|
||||
|
||||
def test_failed_prediction_low_probability(self):
|
||||
predicted = {"success_probability": 0.1}
|
||||
actual = {"succeeded": False}
|
||||
acc = _compute_accuracy(predicted, actual)
|
||||
# Predicted low success and task failed → high accuracy
|
||||
assert acc > 0.8
|
||||
|
||||
|
||||
# ── get_predictions ───────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestGetPredictions:
|
||||
def test_empty_db(self):
|
||||
assert get_predictions() == []
|
||||
|
||||
def test_filter_by_task_id(self):
|
||||
predict_task_outcome("t-filter1", "A", ["a1"])
|
||||
predict_task_outcome("t-filter2", "B", ["a2"])
|
||||
preds = get_predictions(task_id="t-filter1")
|
||||
assert len(preds) == 1
|
||||
assert preds[0].task_id == "t-filter1"
|
||||
|
||||
def test_evaluated_only(self):
|
||||
predict_task_outcome("t-eo1", "A", ["a1"])
|
||||
predict_task_outcome("t-eo2", "B", ["a1"])
|
||||
evaluate_prediction("t-eo1", "a1", task_succeeded=True)
|
||||
preds = get_predictions(evaluated_only=True)
|
||||
assert len(preds) == 1
|
||||
assert preds[0].task_id == "t-eo1"
|
||||
|
||||
def test_limit(self):
|
||||
for i in range(10):
|
||||
predict_task_outcome(f"t-lim{i}", "X", ["a1"])
|
||||
preds = get_predictions(limit=3)
|
||||
assert len(preds) == 3
|
||||
|
||||
def test_combined_filters(self):
|
||||
predict_task_outcome("t-combo", "A", ["a1"])
|
||||
evaluate_prediction("t-combo", "a1", task_succeeded=True)
|
||||
predict_task_outcome("t-combo2", "B", ["a1"])
|
||||
preds = get_predictions(task_id="t-combo", evaluated_only=True)
|
||||
assert len(preds) == 1
|
||||
|
||||
def test_order_by_created_desc(self):
|
||||
for i in range(3):
|
||||
predict_task_outcome(f"t-ord{i}", f"Task {i}", ["a1"])
|
||||
preds = get_predictions()
|
||||
# Most recent first
|
||||
assert preds[0].task_id == "t-ord2"
|
||||
|
||||
|
||||
# ── get_accuracy_stats ────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestGetAccuracyStats:
|
||||
def test_empty(self):
|
||||
stats = get_accuracy_stats()
|
||||
assert stats["total_predictions"] == 0
|
||||
assert stats["evaluated"] == 0
|
||||
assert stats["pending"] == 0
|
||||
assert stats["avg_accuracy"] == 0.0
|
||||
assert stats["min_accuracy"] == 0.0
|
||||
assert stats["max_accuracy"] == 0.0
|
||||
|
||||
def test_with_unevaluated(self):
|
||||
predict_task_outcome("t-uneval", "X", ["a1"])
|
||||
stats = get_accuracy_stats()
|
||||
assert stats["total_predictions"] == 1
|
||||
assert stats["evaluated"] == 0
|
||||
assert stats["pending"] == 1
|
||||
|
||||
def test_with_evaluations(self):
|
||||
for i in range(3):
|
||||
predict_task_outcome(f"t-stats{i}", "X", ["a1"])
|
||||
evaluate_prediction(f"t-stats{i}", "a1", task_succeeded=True, winning_bid=30)
|
||||
stats = get_accuracy_stats()
|
||||
assert stats["total_predictions"] == 3
|
||||
assert stats["evaluated"] == 3
|
||||
assert stats["pending"] == 0
|
||||
assert stats["avg_accuracy"] > 0.0
|
||||
assert stats["min_accuracy"] <= stats["avg_accuracy"] <= stats["max_accuracy"]
|
||||
389
tests/spark/test_memory.py
Normal file
389
tests/spark/test_memory.py
Normal file
@@ -0,0 +1,389 @@
|
||||
"""Comprehensive tests for spark.memory module.
|
||||
|
||||
Covers:
|
||||
- SparkEvent / SparkMemory dataclasses
|
||||
- _get_conn (schema creation, WAL, busy timeout, idempotent indexes)
|
||||
- score_importance (all event types, boosts, edge cases)
|
||||
- record_event (auto-importance, explicit importance, invalid JSON, swarm bridge)
|
||||
- get_events (all filters, ordering, limit)
|
||||
- count_events (total, by type)
|
||||
- store_memory (with/without expiry)
|
||||
- get_memories (all filters)
|
||||
- count_memories (total, by type)
|
||||
"""
|
||||
|
||||
import json
|
||||
|
||||
import pytest
|
||||
|
||||
from spark.memory import (
|
||||
IMPORTANCE_HIGH,
|
||||
IMPORTANCE_LOW,
|
||||
IMPORTANCE_MEDIUM,
|
||||
SparkEvent,
|
||||
SparkMemory,
|
||||
_get_conn,
|
||||
count_events,
|
||||
count_memories,
|
||||
get_events,
|
||||
get_memories,
|
||||
record_event,
|
||||
score_importance,
|
||||
store_memory,
|
||||
)
|
||||
|
||||
# ── Constants ─────────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestConstants:
|
||||
def test_importance_ordering(self):
|
||||
assert IMPORTANCE_LOW < IMPORTANCE_MEDIUM < IMPORTANCE_HIGH
|
||||
|
||||
|
||||
# ── Dataclasses ───────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestSparkEventDataclass:
|
||||
def test_all_fields(self):
|
||||
ev = SparkEvent(
|
||||
id="1",
|
||||
event_type="task_posted",
|
||||
agent_id="a1",
|
||||
task_id="t1",
|
||||
description="Test",
|
||||
data="{}",
|
||||
importance=0.5,
|
||||
created_at="2026-01-01",
|
||||
)
|
||||
assert ev.event_type == "task_posted"
|
||||
assert ev.agent_id == "a1"
|
||||
|
||||
def test_nullable_fields(self):
|
||||
ev = SparkEvent(
|
||||
id="2",
|
||||
event_type="task_posted",
|
||||
agent_id=None,
|
||||
task_id=None,
|
||||
description="",
|
||||
data="{}",
|
||||
importance=0.5,
|
||||
created_at="2026-01-01",
|
||||
)
|
||||
assert ev.agent_id is None
|
||||
assert ev.task_id is None
|
||||
|
||||
|
||||
class TestSparkMemoryDataclass:
|
||||
def test_all_fields(self):
|
||||
mem = SparkMemory(
|
||||
id="1",
|
||||
memory_type="pattern",
|
||||
subject="system",
|
||||
content="Test insight",
|
||||
confidence=0.8,
|
||||
source_events=5,
|
||||
created_at="2026-01-01",
|
||||
expires_at="2026-12-31",
|
||||
)
|
||||
assert mem.memory_type == "pattern"
|
||||
assert mem.expires_at == "2026-12-31"
|
||||
|
||||
def test_nullable_expires(self):
|
||||
mem = SparkMemory(
|
||||
id="2",
|
||||
memory_type="anomaly",
|
||||
subject="agent-1",
|
||||
content="Odd behavior",
|
||||
confidence=0.6,
|
||||
source_events=3,
|
||||
created_at="2026-01-01",
|
||||
expires_at=None,
|
||||
)
|
||||
assert mem.expires_at is None
|
||||
|
||||
|
||||
# ── _get_conn ─────────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestGetConn:
|
||||
def test_creates_tables(self):
|
||||
with _get_conn() as conn:
|
||||
tables = conn.execute("SELECT name FROM sqlite_master WHERE type='table'").fetchall()
|
||||
names = {r["name"] for r in tables}
|
||||
assert "spark_events" in names
|
||||
assert "spark_memories" in names
|
||||
|
||||
def test_wal_mode(self):
|
||||
with _get_conn() as conn:
|
||||
mode = conn.execute("PRAGMA journal_mode").fetchone()[0]
|
||||
assert mode == "wal"
|
||||
|
||||
def test_busy_timeout(self):
|
||||
with _get_conn() as conn:
|
||||
timeout = conn.execute("PRAGMA busy_timeout").fetchone()[0]
|
||||
assert timeout == 5000
|
||||
|
||||
def test_idempotent(self):
|
||||
# Calling _get_conn twice should not raise
|
||||
with _get_conn():
|
||||
pass
|
||||
with _get_conn():
|
||||
pass
|
||||
|
||||
|
||||
# ── score_importance ──────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestScoreImportance:
|
||||
@pytest.mark.parametrize(
|
||||
"event_type,expected_min,expected_max",
|
||||
[
|
||||
("task_posted", 0.3, 0.5),
|
||||
("bid_submitted", 0.1, 0.3),
|
||||
("task_assigned", 0.4, 0.6),
|
||||
("task_completed", 0.5, 0.7),
|
||||
("task_failed", 0.9, 1.0),
|
||||
("agent_joined", 0.4, 0.6),
|
||||
("prediction_result", 0.6, 0.8),
|
||||
],
|
||||
)
|
||||
def test_base_scores(self, event_type, expected_min, expected_max):
|
||||
score = score_importance(event_type, {})
|
||||
assert expected_min <= score <= expected_max
|
||||
|
||||
def test_unknown_event_default(self):
|
||||
assert score_importance("never_heard_of_this", {}) == 0.5
|
||||
|
||||
def test_failure_boost(self):
|
||||
score = score_importance("task_failed", {})
|
||||
assert score == 1.0
|
||||
|
||||
def test_high_bid_boost(self):
|
||||
low = score_importance("bid_submitted", {"bid_sats": 10})
|
||||
high = score_importance("bid_submitted", {"bid_sats": 100})
|
||||
assert high > low
|
||||
assert high <= 1.0
|
||||
|
||||
def test_high_bid_on_failure(self):
|
||||
score = score_importance("task_failed", {"bid_sats": 100})
|
||||
assert score == 1.0 # capped at 1.0
|
||||
|
||||
def test_score_always_rounded(self):
|
||||
score = score_importance("bid_submitted", {"bid_sats": 100})
|
||||
assert score == round(score, 2)
|
||||
|
||||
|
||||
# ── record_event ──────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestRecordEvent:
|
||||
def test_basic_record(self):
|
||||
eid = record_event("task_posted", "New task", task_id="t1")
|
||||
assert isinstance(eid, str)
|
||||
assert len(eid) > 0
|
||||
|
||||
def test_auto_importance(self):
|
||||
record_event("task_failed", "Failed", task_id="t-auto")
|
||||
events = get_events(task_id="t-auto")
|
||||
assert events[0].importance >= 0.9
|
||||
|
||||
def test_explicit_importance(self):
|
||||
record_event("task_posted", "Custom", task_id="t-expl", importance=0.1)
|
||||
events = get_events(task_id="t-expl")
|
||||
assert events[0].importance == 0.1
|
||||
|
||||
def test_with_agent_and_data(self):
|
||||
data = json.dumps({"bid_sats": 42})
|
||||
record_event("bid_submitted", "Bid", agent_id="a1", task_id="t-data", data=data)
|
||||
events = get_events(task_id="t-data")
|
||||
assert events[0].agent_id == "a1"
|
||||
parsed = json.loads(events[0].data)
|
||||
assert parsed["bid_sats"] == 42
|
||||
|
||||
def test_invalid_json_data_uses_default_importance(self):
|
||||
record_event("task_posted", "Bad data", task_id="t-bad", data="not-json")
|
||||
events = get_events(task_id="t-bad")
|
||||
assert events[0].importance == 0.4 # base for task_posted
|
||||
|
||||
def test_returns_unique_ids(self):
|
||||
id1 = record_event("task_posted", "A")
|
||||
id2 = record_event("task_posted", "B")
|
||||
assert id1 != id2
|
||||
|
||||
|
||||
# ── get_events ────────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestGetEvents:
|
||||
def test_empty_db(self):
|
||||
assert get_events() == []
|
||||
|
||||
def test_filter_by_type(self):
|
||||
record_event("task_posted", "A")
|
||||
record_event("task_completed", "B")
|
||||
events = get_events(event_type="task_posted")
|
||||
assert len(events) == 1
|
||||
assert events[0].event_type == "task_posted"
|
||||
|
||||
def test_filter_by_agent(self):
|
||||
record_event("task_posted", "A", agent_id="a1")
|
||||
record_event("task_posted", "B", agent_id="a2")
|
||||
events = get_events(agent_id="a1")
|
||||
assert len(events) == 1
|
||||
assert events[0].agent_id == "a1"
|
||||
|
||||
def test_filter_by_task(self):
|
||||
record_event("task_posted", "A", task_id="t1")
|
||||
record_event("task_posted", "B", task_id="t2")
|
||||
events = get_events(task_id="t1")
|
||||
assert len(events) == 1
|
||||
|
||||
def test_filter_by_min_importance(self):
|
||||
record_event("task_posted", "Low", importance=0.1)
|
||||
record_event("task_failed", "High", importance=0.9)
|
||||
events = get_events(min_importance=0.5)
|
||||
assert len(events) == 1
|
||||
assert events[0].importance >= 0.5
|
||||
|
||||
def test_limit(self):
|
||||
for i in range(10):
|
||||
record_event("task_posted", f"ev{i}")
|
||||
events = get_events(limit=3)
|
||||
assert len(events) == 3
|
||||
|
||||
def test_order_by_created_desc(self):
|
||||
record_event("task_posted", "first", task_id="ord1")
|
||||
record_event("task_posted", "second", task_id="ord2")
|
||||
events = get_events()
|
||||
# Most recent first
|
||||
assert events[0].task_id == "ord2"
|
||||
|
||||
def test_combined_filters(self):
|
||||
record_event("task_failed", "A", agent_id="a1", task_id="t1", importance=0.9)
|
||||
record_event("task_posted", "B", agent_id="a1", task_id="t2", importance=0.4)
|
||||
record_event("task_failed", "C", agent_id="a2", task_id="t3", importance=0.9)
|
||||
events = get_events(event_type="task_failed", agent_id="a1", min_importance=0.5)
|
||||
assert len(events) == 1
|
||||
assert events[0].task_id == "t1"
|
||||
|
||||
|
||||
# ── count_events ──────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestCountEvents:
|
||||
def test_empty(self):
|
||||
assert count_events() == 0
|
||||
|
||||
def test_total(self):
|
||||
record_event("task_posted", "A")
|
||||
record_event("task_failed", "B")
|
||||
assert count_events() == 2
|
||||
|
||||
def test_by_type(self):
|
||||
record_event("task_posted", "A")
|
||||
record_event("task_posted", "B")
|
||||
record_event("task_failed", "C")
|
||||
assert count_events("task_posted") == 2
|
||||
assert count_events("task_failed") == 1
|
||||
assert count_events("task_completed") == 0
|
||||
|
||||
|
||||
# ── store_memory ──────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestStoreMemory:
|
||||
def test_basic_store(self):
|
||||
mid = store_memory("pattern", "system", "Test insight")
|
||||
assert isinstance(mid, str)
|
||||
assert len(mid) > 0
|
||||
|
||||
def test_returns_unique_ids(self):
|
||||
id1 = store_memory("pattern", "a", "X")
|
||||
id2 = store_memory("pattern", "b", "Y")
|
||||
assert id1 != id2
|
||||
|
||||
def test_with_all_params(self):
|
||||
store_memory(
|
||||
"anomaly",
|
||||
"agent-1",
|
||||
"Odd pattern",
|
||||
confidence=0.9,
|
||||
source_events=10,
|
||||
expires_at="2026-12-31",
|
||||
)
|
||||
mems = get_memories(subject="agent-1")
|
||||
assert len(mems) == 1
|
||||
assert mems[0].confidence == 0.9
|
||||
assert mems[0].source_events == 10
|
||||
assert mems[0].expires_at == "2026-12-31"
|
||||
|
||||
def test_default_values(self):
|
||||
store_memory("insight", "sys", "Default test")
|
||||
mems = get_memories(subject="sys")
|
||||
assert mems[0].confidence == 0.5
|
||||
assert mems[0].source_events == 0
|
||||
assert mems[0].expires_at is None
|
||||
|
||||
|
||||
# ── get_memories ──────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestGetMemories:
|
||||
def test_empty(self):
|
||||
assert get_memories() == []
|
||||
|
||||
def test_filter_by_type(self):
|
||||
store_memory("pattern", "a", "X")
|
||||
store_memory("anomaly", "a", "Y")
|
||||
mems = get_memories(memory_type="pattern")
|
||||
assert len(mems) == 1
|
||||
assert mems[0].memory_type == "pattern"
|
||||
|
||||
def test_filter_by_subject(self):
|
||||
store_memory("pattern", "a", "X")
|
||||
store_memory("pattern", "b", "Y")
|
||||
mems = get_memories(subject="a")
|
||||
assert len(mems) == 1
|
||||
|
||||
def test_filter_by_min_confidence(self):
|
||||
store_memory("pattern", "a", "Low", confidence=0.2)
|
||||
store_memory("pattern", "b", "High", confidence=0.9)
|
||||
mems = get_memories(min_confidence=0.5)
|
||||
assert len(mems) == 1
|
||||
assert mems[0].content == "High"
|
||||
|
||||
def test_limit(self):
|
||||
for i in range(10):
|
||||
store_memory("pattern", "a", f"M{i}")
|
||||
mems = get_memories(limit=3)
|
||||
assert len(mems) == 3
|
||||
|
||||
def test_combined_filters(self):
|
||||
store_memory("pattern", "a", "Target", confidence=0.9)
|
||||
store_memory("anomaly", "a", "Wrong type", confidence=0.9)
|
||||
store_memory("pattern", "b", "Wrong subject", confidence=0.9)
|
||||
store_memory("pattern", "a", "Low conf", confidence=0.1)
|
||||
mems = get_memories(memory_type="pattern", subject="a", min_confidence=0.5)
|
||||
assert len(mems) == 1
|
||||
assert mems[0].content == "Target"
|
||||
|
||||
|
||||
# ── count_memories ────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestCountMemories:
|
||||
def test_empty(self):
|
||||
assert count_memories() == 0
|
||||
|
||||
def test_total(self):
|
||||
store_memory("pattern", "a", "X")
|
||||
store_memory("anomaly", "b", "Y")
|
||||
assert count_memories() == 2
|
||||
|
||||
def test_by_type(self):
|
||||
store_memory("pattern", "a", "X")
|
||||
store_memory("pattern", "b", "Y")
|
||||
store_memory("anomaly", "c", "Z")
|
||||
assert count_memories("pattern") == 2
|
||||
assert count_memories("anomaly") == 1
|
||||
assert count_memories("insight") == 0
|
||||
470
tests/test_config_module.py
Normal file
470
tests/test_config_module.py
Normal file
@@ -0,0 +1,470 @@
|
||||
"""Tests for src/config.py — Settings, validation, and helper functions."""
|
||||
|
||||
import os
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
class TestNormalizeOllamaUrl:
|
||||
"""normalize_ollama_url replaces localhost with 127.0.0.1."""
|
||||
|
||||
def test_replaces_localhost(self):
|
||||
from config import normalize_ollama_url
|
||||
|
||||
assert normalize_ollama_url("http://localhost:11434") == "http://127.0.0.1:11434"
|
||||
|
||||
def test_preserves_ip(self):
|
||||
from config import normalize_ollama_url
|
||||
|
||||
assert normalize_ollama_url("http://192.168.1.5:11434") == "http://192.168.1.5:11434"
|
||||
|
||||
def test_preserves_non_localhost_hostname(self):
|
||||
from config import normalize_ollama_url
|
||||
|
||||
assert normalize_ollama_url("http://ollama.local:11434") == "http://ollama.local:11434"
|
||||
|
||||
def test_replaces_multiple_occurrences(self):
|
||||
from config import normalize_ollama_url
|
||||
|
||||
result = normalize_ollama_url("http://localhost:11434/localhost")
|
||||
assert result == "http://127.0.0.1:11434/127.0.0.1"
|
||||
|
||||
|
||||
class TestSettingsDefaults:
|
||||
"""Settings instantiation produces correct defaults."""
|
||||
|
||||
def _make_settings(self, **env_overrides):
|
||||
"""Create a fresh Settings instance with given env overrides."""
|
||||
from config import Settings
|
||||
|
||||
clean_env = {
|
||||
k: v
|
||||
for k, v in os.environ.items()
|
||||
if not k.startswith(("OLLAMA_", "TIMMY_", "AGENT_", "DEBUG"))
|
||||
}
|
||||
clean_env.update(env_overrides)
|
||||
with patch.dict(os.environ, clean_env, clear=True):
|
||||
return Settings()
|
||||
|
||||
def test_default_agent_name(self):
|
||||
s = self._make_settings()
|
||||
assert s.agent_name == "Agent"
|
||||
|
||||
def test_default_ollama_url(self):
|
||||
s = self._make_settings()
|
||||
assert s.ollama_url == "http://localhost:11434"
|
||||
|
||||
def test_default_ollama_model(self):
|
||||
s = self._make_settings()
|
||||
assert s.ollama_model == "qwen3:30b"
|
||||
|
||||
def test_default_ollama_num_ctx(self):
|
||||
s = self._make_settings()
|
||||
assert s.ollama_num_ctx == 4096
|
||||
|
||||
def test_default_debug_false(self):
|
||||
s = self._make_settings()
|
||||
assert s.debug is False
|
||||
|
||||
def test_default_timmy_env(self):
|
||||
s = self._make_settings()
|
||||
assert s.timmy_env == "development"
|
||||
|
||||
def test_default_timmy_test_mode(self):
|
||||
s = self._make_settings()
|
||||
assert s.timmy_test_mode is False
|
||||
|
||||
def test_default_spark_enabled(self):
|
||||
s = self._make_settings()
|
||||
assert s.spark_enabled is True
|
||||
|
||||
def test_default_lightning_backend(self):
|
||||
s = self._make_settings()
|
||||
assert s.lightning_backend == "mock"
|
||||
|
||||
def test_default_max_agent_steps(self):
|
||||
s = self._make_settings()
|
||||
assert s.max_agent_steps == 10
|
||||
|
||||
def test_default_memory_prune_days(self):
|
||||
s = self._make_settings()
|
||||
assert s.memory_prune_days == 90
|
||||
|
||||
def test_default_fallback_models_is_list(self):
|
||||
s = self._make_settings()
|
||||
assert isinstance(s.fallback_models, list)
|
||||
assert len(s.fallback_models) > 0
|
||||
|
||||
def test_default_cors_origins_is_list(self):
|
||||
s = self._make_settings()
|
||||
assert isinstance(s.cors_origins, list)
|
||||
|
||||
def test_default_trusted_hosts_is_list(self):
|
||||
s = self._make_settings()
|
||||
assert isinstance(s.trusted_hosts, list)
|
||||
assert "localhost" in s.trusted_hosts
|
||||
|
||||
def test_normalized_ollama_url_property(self):
|
||||
s = self._make_settings()
|
||||
assert "127.0.0.1" in s.normalized_ollama_url
|
||||
assert "localhost" not in s.normalized_ollama_url
|
||||
|
||||
|
||||
class TestSettingsEnvOverrides:
|
||||
"""Environment variables override default values."""
|
||||
|
||||
def _make_settings(self, **env_overrides):
|
||||
from config import Settings
|
||||
|
||||
clean_env = {
|
||||
k: v
|
||||
for k, v in os.environ.items()
|
||||
if not k.startswith(("OLLAMA_", "TIMMY_", "AGENT_", "DEBUG"))
|
||||
}
|
||||
clean_env.update(env_overrides)
|
||||
with patch.dict(os.environ, clean_env, clear=True):
|
||||
return Settings()
|
||||
|
||||
def test_agent_name_override(self):
|
||||
s = self._make_settings(AGENT_NAME="Timmy")
|
||||
assert s.agent_name == "Timmy"
|
||||
|
||||
def test_ollama_url_override(self):
|
||||
s = self._make_settings(OLLAMA_URL="http://10.0.0.1:11434")
|
||||
assert s.ollama_url == "http://10.0.0.1:11434"
|
||||
|
||||
def test_ollama_model_override(self):
|
||||
s = self._make_settings(OLLAMA_MODEL="llama3.1")
|
||||
assert s.ollama_model == "llama3.1"
|
||||
|
||||
def test_debug_true_from_string(self):
|
||||
s = self._make_settings(DEBUG="true")
|
||||
assert s.debug is True
|
||||
|
||||
def test_debug_false_from_string(self):
|
||||
s = self._make_settings(DEBUG="false")
|
||||
assert s.debug is False
|
||||
|
||||
def test_numeric_override(self):
|
||||
s = self._make_settings(OLLAMA_NUM_CTX="8192")
|
||||
assert s.ollama_num_ctx == 8192
|
||||
|
||||
def test_max_agent_steps_override(self):
|
||||
s = self._make_settings(MAX_AGENT_STEPS="25")
|
||||
assert s.max_agent_steps == 25
|
||||
|
||||
def test_timmy_env_production(self):
|
||||
s = self._make_settings(TIMMY_ENV="production")
|
||||
assert s.timmy_env == "production"
|
||||
|
||||
def test_timmy_test_mode_true(self):
|
||||
s = self._make_settings(TIMMY_TEST_MODE="true")
|
||||
assert s.timmy_test_mode is True
|
||||
|
||||
def test_grok_enabled_override(self):
|
||||
s = self._make_settings(GROK_ENABLED="true")
|
||||
assert s.grok_enabled is True
|
||||
|
||||
def test_spark_enabled_override(self):
|
||||
s = self._make_settings(SPARK_ENABLED="false")
|
||||
assert s.spark_enabled is False
|
||||
|
||||
def test_memory_prune_days_override(self):
|
||||
s = self._make_settings(MEMORY_PRUNE_DAYS="30")
|
||||
assert s.memory_prune_days == 30
|
||||
|
||||
|
||||
class TestSettingsTypeValidation:
|
||||
"""Pydantic correctly parses and validates types from string env vars."""
|
||||
|
||||
def _make_settings(self, **env_overrides):
|
||||
from config import Settings
|
||||
|
||||
clean_env = {
|
||||
k: v
|
||||
for k, v in os.environ.items()
|
||||
if not k.startswith(("OLLAMA_", "TIMMY_", "AGENT_", "DEBUG"))
|
||||
}
|
||||
clean_env.update(env_overrides)
|
||||
with patch.dict(os.environ, clean_env, clear=True):
|
||||
return Settings()
|
||||
|
||||
def test_bool_from_1(self):
|
||||
s = self._make_settings(DEBUG="1")
|
||||
assert s.debug is True
|
||||
|
||||
def test_bool_from_0(self):
|
||||
s = self._make_settings(DEBUG="0")
|
||||
assert s.debug is False
|
||||
|
||||
def test_int_field_rejects_non_numeric(self):
|
||||
from pydantic import ValidationError
|
||||
|
||||
with pytest.raises(ValidationError):
|
||||
self._make_settings(OLLAMA_NUM_CTX="not_a_number")
|
||||
|
||||
def test_literal_field_rejects_invalid(self):
|
||||
from pydantic import ValidationError
|
||||
|
||||
with pytest.raises(ValidationError):
|
||||
self._make_settings(TIMMY_ENV="staging")
|
||||
|
||||
def test_literal_backend_rejects_invalid(self):
|
||||
from pydantic import ValidationError
|
||||
|
||||
with pytest.raises(ValidationError):
|
||||
self._make_settings(TIMMY_MODEL_BACKEND="openai")
|
||||
|
||||
def test_literal_backend_accepts_valid(self):
|
||||
for backend in ("ollama", "grok", "claude", "auto"):
|
||||
s = self._make_settings(TIMMY_MODEL_BACKEND=backend)
|
||||
assert s.timmy_model_backend == backend
|
||||
|
||||
def test_extra_fields_ignored(self):
|
||||
# model_config has extra="ignore"
|
||||
s = self._make_settings(TOTALLY_UNKNOWN_FIELD="hello")
|
||||
assert not hasattr(s, "totally_unknown_field")
|
||||
|
||||
|
||||
class TestSettingsEdgeCases:
|
||||
"""Edge cases: empty strings, missing vars, boundary values."""
|
||||
|
||||
def _make_settings(self, **env_overrides):
|
||||
from config import Settings
|
||||
|
||||
clean_env = {
|
||||
k: v
|
||||
for k, v in os.environ.items()
|
||||
if not k.startswith(("OLLAMA_", "TIMMY_", "AGENT_", "DEBUG"))
|
||||
}
|
||||
clean_env.update(env_overrides)
|
||||
with patch.dict(os.environ, clean_env, clear=True):
|
||||
return Settings()
|
||||
|
||||
def test_empty_string_tokens_stay_empty(self):
|
||||
s = self._make_settings(TELEGRAM_TOKEN="", DISCORD_TOKEN="")
|
||||
assert s.telegram_token == ""
|
||||
assert s.discord_token == ""
|
||||
|
||||
def test_zero_int_fields(self):
|
||||
s = self._make_settings(OLLAMA_NUM_CTX="0", MEMORY_PRUNE_DAYS="0")
|
||||
assert s.ollama_num_ctx == 0
|
||||
assert s.memory_prune_days == 0
|
||||
|
||||
def test_large_int_value(self):
|
||||
s = self._make_settings(CHAT_API_MAX_BODY_BYTES="104857600")
|
||||
assert s.chat_api_max_body_bytes == 104857600
|
||||
|
||||
def test_negative_int_accepted(self):
|
||||
# Pydantic doesn't constrain these to positive
|
||||
s = self._make_settings(MAX_AGENT_STEPS="-1")
|
||||
assert s.max_agent_steps == -1
|
||||
|
||||
|
||||
class TestComputeRepoRoot:
|
||||
"""_compute_repo_root auto-detects .git directory."""
|
||||
|
||||
def test_returns_string(self):
|
||||
from config import Settings
|
||||
|
||||
s = Settings()
|
||||
result = s._compute_repo_root()
|
||||
assert isinstance(result, str)
|
||||
assert len(result) > 0
|
||||
|
||||
def test_explicit_repo_root_used(self):
|
||||
from config import Settings
|
||||
|
||||
with patch.dict(os.environ, {"REPO_ROOT": "/tmp/myrepo"}, clear=False):
|
||||
s = Settings()
|
||||
s.repo_root = "/tmp/myrepo"
|
||||
assert s._compute_repo_root() == "/tmp/myrepo"
|
||||
|
||||
|
||||
class TestModelPostInit:
|
||||
"""model_post_init resolves gitea_token from file fallback."""
|
||||
|
||||
def test_gitea_token_from_env(self):
|
||||
from config import Settings
|
||||
|
||||
with patch.dict(os.environ, {"GITEA_TOKEN": "test-token-123"}, clear=False):
|
||||
s = Settings()
|
||||
assert s.gitea_token == "test-token-123"
|
||||
|
||||
def test_gitea_token_stays_empty_when_no_file(self):
|
||||
from config import Settings
|
||||
|
||||
env = {k: v for k, v in os.environ.items() if k != "GITEA_TOKEN"}
|
||||
with patch.dict(os.environ, env, clear=True):
|
||||
with patch("os.path.isfile", return_value=False):
|
||||
s = Settings()
|
||||
assert s.gitea_token == ""
|
||||
|
||||
|
||||
class TestCheckOllamaModelAvailable:
|
||||
"""check_ollama_model_available handles network responses and errors."""
|
||||
|
||||
def test_returns_false_on_network_error(self):
|
||||
from config import check_ollama_model_available
|
||||
|
||||
with patch("urllib.request.urlopen", side_effect=OSError("Connection refused")):
|
||||
assert check_ollama_model_available("llama3.1") is False
|
||||
|
||||
def test_returns_true_when_model_found(self):
|
||||
import json
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from config import check_ollama_model_available
|
||||
|
||||
response_data = json.dumps({"models": [{"name": "llama3.1:8b-instruct"}]}).encode()
|
||||
mock_response = MagicMock()
|
||||
mock_response.read.return_value = response_data
|
||||
mock_response.__enter__ = lambda s: s
|
||||
mock_response.__exit__ = MagicMock(return_value=False)
|
||||
|
||||
with patch("urllib.request.urlopen", return_value=mock_response):
|
||||
assert check_ollama_model_available("llama3.1") is True
|
||||
|
||||
def test_returns_false_when_model_not_found(self):
|
||||
import json
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from config import check_ollama_model_available
|
||||
|
||||
response_data = json.dumps({"models": [{"name": "qwen2.5:7b"}]}).encode()
|
||||
mock_response = MagicMock()
|
||||
mock_response.read.return_value = response_data
|
||||
mock_response.__enter__ = lambda s: s
|
||||
mock_response.__exit__ = MagicMock(return_value=False)
|
||||
|
||||
with patch("urllib.request.urlopen", return_value=mock_response):
|
||||
assert check_ollama_model_available("llama3.1") is False
|
||||
|
||||
|
||||
class TestGetEffectiveOllamaModel:
|
||||
"""get_effective_ollama_model walks fallback chain."""
|
||||
|
||||
def test_returns_primary_when_available(self):
|
||||
from config import get_effective_ollama_model
|
||||
|
||||
with patch("config.check_ollama_model_available", return_value=True):
|
||||
result = get_effective_ollama_model()
|
||||
assert result == "qwen3:30b"
|
||||
|
||||
def test_falls_back_when_primary_unavailable(self):
|
||||
from config import get_effective_ollama_model
|
||||
|
||||
def side_effect(model):
|
||||
return model == "llama3.1:8b-instruct"
|
||||
|
||||
with patch("config.check_ollama_model_available", side_effect=side_effect):
|
||||
result = get_effective_ollama_model()
|
||||
assert result == "llama3.1:8b-instruct"
|
||||
|
||||
def test_returns_user_model_when_nothing_available(self):
|
||||
from config import get_effective_ollama_model
|
||||
|
||||
with patch("config.check_ollama_model_available", return_value=False):
|
||||
result = get_effective_ollama_model()
|
||||
assert result == "qwen3:30b"
|
||||
|
||||
|
||||
class TestValidateStartup:
|
||||
"""validate_startup enforces security in production, warns in dev."""
|
||||
|
||||
def setup_method(self):
|
||||
import config
|
||||
|
||||
config._startup_validated = False
|
||||
|
||||
def test_skips_in_test_mode(self):
|
||||
import config
|
||||
|
||||
with patch.dict(os.environ, {"TIMMY_TEST_MODE": "1"}):
|
||||
config.validate_startup()
|
||||
assert config._startup_validated is True
|
||||
|
||||
def test_dev_mode_warns_but_does_not_exit(self, caplog):
|
||||
import logging
|
||||
|
||||
import config
|
||||
|
||||
config._startup_validated = False
|
||||
env = {k: v for k, v in os.environ.items() if k != "TIMMY_TEST_MODE"}
|
||||
env["TIMMY_ENV"] = "development"
|
||||
with patch.dict(os.environ, env, clear=True):
|
||||
with caplog.at_level(logging.WARNING, logger="config"):
|
||||
config.validate_startup()
|
||||
assert config._startup_validated is True
|
||||
|
||||
def test_production_exits_without_secrets(self):
|
||||
import config
|
||||
|
||||
config._startup_validated = False
|
||||
env = {k: v for k, v in os.environ.items() if k != "TIMMY_TEST_MODE"}
|
||||
env["TIMMY_ENV"] = "production"
|
||||
env.pop("L402_HMAC_SECRET", None)
|
||||
env.pop("L402_MACAROON_SECRET", None)
|
||||
with patch.dict(os.environ, env, clear=True):
|
||||
with patch.object(config.settings, "timmy_env", "production"):
|
||||
with patch.object(config.settings, "l402_hmac_secret", ""):
|
||||
with patch.object(config.settings, "l402_macaroon_secret", ""):
|
||||
with pytest.raises(SystemExit):
|
||||
config.validate_startup(force=True)
|
||||
|
||||
def test_production_exits_with_cors_wildcard(self):
|
||||
import config
|
||||
|
||||
config._startup_validated = False
|
||||
env = {k: v for k, v in os.environ.items() if k != "TIMMY_TEST_MODE"}
|
||||
env["TIMMY_ENV"] = "production"
|
||||
with patch.dict(os.environ, env, clear=True):
|
||||
with patch.object(config.settings, "timmy_env", "production"):
|
||||
with patch.object(config.settings, "l402_hmac_secret", "secret1"):
|
||||
with patch.object(config.settings, "l402_macaroon_secret", "secret2"):
|
||||
with patch.object(config.settings, "cors_origins", ["*"]):
|
||||
with pytest.raises(SystemExit):
|
||||
config.validate_startup(force=True)
|
||||
|
||||
def test_production_passes_with_all_secrets(self):
|
||||
import config
|
||||
|
||||
config._startup_validated = False
|
||||
env = {k: v for k, v in os.environ.items() if k != "TIMMY_TEST_MODE"}
|
||||
env["TIMMY_ENV"] = "production"
|
||||
with patch.dict(os.environ, env, clear=True):
|
||||
with patch.object(config.settings, "timmy_env", "production"):
|
||||
with patch.object(config.settings, "l402_hmac_secret", "secret1"):
|
||||
with patch.object(config.settings, "l402_macaroon_secret", "secret2"):
|
||||
with patch.object(
|
||||
config.settings,
|
||||
"cors_origins",
|
||||
["http://localhost:3000"],
|
||||
):
|
||||
config.validate_startup(force=True)
|
||||
assert config._startup_validated is True
|
||||
|
||||
def test_idempotent_without_force(self):
|
||||
import config
|
||||
|
||||
config._startup_validated = True
|
||||
# Should return immediately without doing anything
|
||||
config.validate_startup()
|
||||
assert config._startup_validated is True
|
||||
|
||||
|
||||
class TestAppStartTime:
|
||||
"""APP_START_TIME is set at module load."""
|
||||
|
||||
def test_app_start_time_is_datetime(self):
|
||||
from datetime import datetime
|
||||
|
||||
from config import APP_START_TIME
|
||||
|
||||
assert isinstance(APP_START_TIME, datetime)
|
||||
|
||||
def test_app_start_time_has_timezone(self):
|
||||
from config import APP_START_TIME
|
||||
|
||||
assert APP_START_TIME.tzinfo is not None
|
||||
536
tests/timmy/test_golden_path.py
Normal file
536
tests/timmy/test_golden_path.py
Normal file
@@ -0,0 +1,536 @@
|
||||
"""Tests for the Golden Path generator."""
|
||||
|
||||
import json
|
||||
from datetime import UTC, datetime
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from timmy_automations.daily_run.golden_path import (
|
||||
TIME_ESTIMATES,
|
||||
TYPE_PATTERNS,
|
||||
GiteaClient,
|
||||
GoldenPath,
|
||||
PathItem,
|
||||
build_golden_path,
|
||||
classify_issue_type,
|
||||
estimate_time,
|
||||
extract_size,
|
||||
generate_golden_path,
|
||||
get_token,
|
||||
group_issues_by_type,
|
||||
load_config,
|
||||
score_issue_for_path,
|
||||
)
|
||||
|
||||
|
||||
class TestLoadConfig:
|
||||
"""Tests for configuration loading."""
|
||||
|
||||
def test_load_config_defaults(self):
|
||||
"""Config should have sensible defaults."""
|
||||
config = load_config()
|
||||
assert "gitea_api" in config
|
||||
assert "repo_slug" in config
|
||||
assert "size_labels" in config
|
||||
|
||||
def test_load_config_env_override(self, monkeypatch):
|
||||
"""Environment variables should override defaults."""
|
||||
monkeypatch.setenv("TIMMY_GITEA_API", "http://custom:3000/api/v1")
|
||||
monkeypatch.setenv("TIMMY_REPO_SLUG", "custom/repo")
|
||||
monkeypatch.setenv("TIMMY_GITEA_TOKEN", "test-token")
|
||||
|
||||
config = load_config()
|
||||
assert config["gitea_api"] == "http://custom:3000/api/v1"
|
||||
assert config["repo_slug"] == "custom/repo"
|
||||
assert config["token"] == "test-token"
|
||||
|
||||
|
||||
class TestGetToken:
|
||||
"""Tests for token retrieval."""
|
||||
|
||||
def test_get_token_from_config(self):
|
||||
"""Token from config takes precedence."""
|
||||
config = {"token": "config-token", "token_file": "~/.test"}
|
||||
assert get_token(config) == "config-token"
|
||||
|
||||
@patch("pathlib.Path.exists")
|
||||
@patch("pathlib.Path.read_text")
|
||||
def test_get_token_from_file(self, mock_read, mock_exists):
|
||||
"""Token can be read from file."""
|
||||
mock_exists.return_value = True
|
||||
mock_read.return_value = "file-token\n"
|
||||
|
||||
config = {"token_file": "~/.hermes/test_token"}
|
||||
assert get_token(config) == "file-token"
|
||||
|
||||
def test_get_token_none(self):
|
||||
"""Returns None if no token available."""
|
||||
config = {"token_file": "/nonexistent/path"}
|
||||
assert get_token(config) is None
|
||||
|
||||
|
||||
class TestExtractSize:
|
||||
"""Tests for size label extraction."""
|
||||
|
||||
def test_extract_size_xs(self):
|
||||
"""Should extract XS size."""
|
||||
labels = [{"name": "size:XS"}, {"name": "bug"}]
|
||||
assert extract_size(labels) == "XS"
|
||||
|
||||
def test_extract_size_s(self):
|
||||
"""Should extract S size."""
|
||||
labels = [{"name": "bug"}, {"name": "size:S"}]
|
||||
assert extract_size(labels) == "S"
|
||||
|
||||
def test_extract_size_m(self):
|
||||
"""Should extract M size."""
|
||||
labels = [{"name": "size:M"}]
|
||||
assert extract_size(labels) == "M"
|
||||
|
||||
def test_extract_size_unknown(self):
|
||||
"""Should return ? for unknown size."""
|
||||
labels = [{"name": "bug"}, {"name": "feature"}]
|
||||
assert extract_size(labels) == "?"
|
||||
|
||||
def test_extract_size_empty(self):
|
||||
"""Should return ? for empty labels."""
|
||||
assert extract_size([]) == "?"
|
||||
|
||||
|
||||
class TestClassifyIssueType:
|
||||
"""Tests for issue type classification."""
|
||||
|
||||
def test_classify_triage(self):
|
||||
"""Should classify triage issues."""
|
||||
issue = {
|
||||
"title": "Triage new issues",
|
||||
"labels": [{"name": "triage"}],
|
||||
}
|
||||
assert classify_issue_type(issue) == "triage"
|
||||
|
||||
def test_classify_test(self):
|
||||
"""Should classify test issues."""
|
||||
issue = {
|
||||
"title": "Add unit tests for parser",
|
||||
"labels": [{"name": "test"}],
|
||||
}
|
||||
assert classify_issue_type(issue) == "test"
|
||||
|
||||
def test_classify_fix(self):
|
||||
"""Should classify fix issues."""
|
||||
issue = {
|
||||
"title": "Fix login bug",
|
||||
"labels": [{"name": "bug"}],
|
||||
}
|
||||
assert classify_issue_type(issue) == "fix"
|
||||
|
||||
def test_classify_docs(self):
|
||||
"""Should classify docs issues."""
|
||||
issue = {
|
||||
"title": "Update README",
|
||||
"labels": [{"name": "docs"}],
|
||||
}
|
||||
assert classify_issue_type(issue) == "docs"
|
||||
|
||||
def test_classify_refactor(self):
|
||||
"""Should classify refactor issues."""
|
||||
issue = {
|
||||
"title": "Refactor validation logic",
|
||||
"labels": [{"name": "refactor"}],
|
||||
}
|
||||
assert classify_issue_type(issue) == "refactor"
|
||||
|
||||
def test_classify_default_to_fix(self):
|
||||
"""Should default to fix for uncategorized."""
|
||||
issue = {
|
||||
"title": "Something vague",
|
||||
"labels": [{"name": "question"}],
|
||||
}
|
||||
assert classify_issue_type(issue) == "fix"
|
||||
|
||||
def test_classify_title_priority(self):
|
||||
"""Title patterns should contribute to classification."""
|
||||
issue = {
|
||||
"title": "Fix the broken parser",
|
||||
"labels": [],
|
||||
}
|
||||
assert classify_issue_type(issue) == "fix"
|
||||
|
||||
|
||||
class TestEstimateTime:
|
||||
"""Tests for time estimation."""
|
||||
|
||||
def test_estimate_xs_fix(self):
|
||||
"""XS fix should be 10 minutes."""
|
||||
issue = {
|
||||
"title": "Fix typo",
|
||||
"labels": [{"name": "size:XS"}, {"name": "bug"}],
|
||||
}
|
||||
assert estimate_time(issue) == 10
|
||||
|
||||
def test_estimate_s_test(self):
|
||||
"""S test should be 15 minutes."""
|
||||
issue = {
|
||||
"title": "Add test coverage",
|
||||
"labels": [{"name": "size:S"}, {"name": "test"}],
|
||||
}
|
||||
assert estimate_time(issue) == 15
|
||||
|
||||
def test_estimate_m_fix(self):
|
||||
"""M fix should be 25 minutes."""
|
||||
issue = {
|
||||
"title": "Fix complex bug",
|
||||
"labels": [{"name": "size:M"}, {"name": "bug"}],
|
||||
}
|
||||
assert estimate_time(issue) == 25
|
||||
|
||||
def test_estimate_unknown_size(self):
|
||||
"""Unknown size should fallback to S."""
|
||||
issue = {
|
||||
"title": "Some fix",
|
||||
"labels": [{"name": "bug"}],
|
||||
}
|
||||
# Falls back to S/fix = 15
|
||||
assert estimate_time(issue) == 15
|
||||
|
||||
|
||||
class TestScoreIssueForPath:
|
||||
"""Tests for issue scoring."""
|
||||
|
||||
def test_score_prefers_xs(self):
|
||||
"""XS issues should score higher."""
|
||||
xs = {"title": "Fix", "labels": [{"name": "size:XS"}]}
|
||||
s = {"title": "Fix", "labels": [{"name": "size:S"}]}
|
||||
m = {"title": "Fix", "labels": [{"name": "size:M"}]}
|
||||
|
||||
assert score_issue_for_path(xs) > score_issue_for_path(s)
|
||||
assert score_issue_for_path(s) > score_issue_for_path(m)
|
||||
|
||||
def test_score_prefers_clear_types(self):
|
||||
"""Issues with clear type labels score higher."""
|
||||
# Bug label adds score, so with bug should be >= without bug
|
||||
with_type = {
|
||||
"title": "Fix bug",
|
||||
"labels": [{"name": "size:S"}, {"name": "bug"}],
|
||||
}
|
||||
without_type = {
|
||||
"title": "Something",
|
||||
"labels": [{"name": "size:S"}],
|
||||
}
|
||||
|
||||
assert score_issue_for_path(with_type) >= score_issue_for_path(without_type)
|
||||
|
||||
def test_score_accepts_criteria(self):
|
||||
"""Issues with acceptance criteria score higher."""
|
||||
with_criteria = {
|
||||
"title": "Fix",
|
||||
"labels": [{"name": "size:S"}],
|
||||
"body": "## Acceptance Criteria\n- [ ] Fix it",
|
||||
}
|
||||
without_criteria = {
|
||||
"title": "Fix",
|
||||
"labels": [{"name": "size:S"}],
|
||||
"body": "Just fix it",
|
||||
}
|
||||
|
||||
assert score_issue_for_path(with_criteria) > score_issue_for_path(without_criteria)
|
||||
|
||||
|
||||
class TestGroupIssuesByType:
|
||||
"""Tests for issue grouping."""
|
||||
|
||||
def test_groups_by_type(self):
|
||||
"""Issues should be grouped by their type."""
|
||||
issues = [
|
||||
{"title": "Fix bug", "labels": [{"name": "bug"}], "number": 1},
|
||||
{"title": "Add test", "labels": [{"name": "test"}], "number": 2},
|
||||
{"title": "Another fix", "labels": [{"name": "bug"}], "number": 3},
|
||||
]
|
||||
|
||||
grouped = group_issues_by_type(issues)
|
||||
|
||||
assert len(grouped["fix"]) == 2
|
||||
assert len(grouped["test"]) == 1
|
||||
assert len(grouped["triage"]) == 0
|
||||
|
||||
def test_sorts_by_score(self):
|
||||
"""Issues within groups should be sorted by score."""
|
||||
issues = [
|
||||
{"title": "Fix", "labels": [{"name": "size:M"}], "number": 1},
|
||||
{"title": "Fix", "labels": [{"name": "size:XS"}], "number": 2},
|
||||
{"title": "Fix", "labels": [{"name": "size:S"}], "number": 3},
|
||||
]
|
||||
|
||||
grouped = group_issues_by_type(issues)
|
||||
|
||||
# XS should be first (highest score)
|
||||
assert grouped["fix"][0]["number"] == 2
|
||||
# M should be last (lowest score)
|
||||
assert grouped["fix"][2]["number"] == 1
|
||||
|
||||
|
||||
class TestBuildGoldenPath:
|
||||
"""Tests for Golden Path building."""
|
||||
|
||||
def test_builds_path_with_all_types(self):
|
||||
"""Path should include items from different types."""
|
||||
grouped = {
|
||||
"triage": [
|
||||
{"title": "Triage", "labels": [{"name": "size:XS"}], "number": 1, "html_url": ""},
|
||||
],
|
||||
"fix": [
|
||||
{"title": "Fix 1", "labels": [{"name": "size:S"}], "number": 2, "html_url": ""},
|
||||
{"title": "Fix 2", "labels": [{"name": "size:XS"}], "number": 3, "html_url": ""},
|
||||
],
|
||||
"test": [
|
||||
{"title": "Test", "labels": [{"name": "size:S"}], "number": 4, "html_url": ""},
|
||||
],
|
||||
"docs": [],
|
||||
"refactor": [],
|
||||
}
|
||||
|
||||
path = build_golden_path(grouped, target_minutes=45)
|
||||
|
||||
assert path.item_count >= 3
|
||||
assert path.items[0].issue_type == "triage" # Warm-up
|
||||
assert any(item.issue_type == "test" for item in path.items)
|
||||
|
||||
def test_respects_time_budget(self):
|
||||
"""Path should stay within reasonable time budget."""
|
||||
grouped = {
|
||||
"triage": [
|
||||
{"title": "Triage", "labels": [{"name": "size:S"}], "number": 1, "html_url": ""},
|
||||
],
|
||||
"fix": [
|
||||
{"title": "Fix 1", "labels": [{"name": "size:S"}], "number": 2, "html_url": ""},
|
||||
{"title": "Fix 2", "labels": [{"name": "size:S"}], "number": 3, "html_url": ""},
|
||||
],
|
||||
"test": [
|
||||
{"title": "Test", "labels": [{"name": "size:S"}], "number": 4, "html_url": ""},
|
||||
],
|
||||
"docs": [],
|
||||
"refactor": [],
|
||||
}
|
||||
|
||||
path = build_golden_path(grouped, target_minutes=45)
|
||||
|
||||
# Should be in 30-60 minute range
|
||||
assert 20 <= path.total_estimated_minutes <= 70
|
||||
|
||||
def test_no_duplicate_issues(self):
|
||||
"""Path should not include the same issue twice."""
|
||||
grouped = {
|
||||
"triage": [],
|
||||
"fix": [
|
||||
{"title": "Fix", "labels": [{"name": "size:S"}], "number": 1, "html_url": ""},
|
||||
],
|
||||
"test": [],
|
||||
"docs": [],
|
||||
"refactor": [],
|
||||
}
|
||||
|
||||
path = build_golden_path(grouped, target_minutes=45)
|
||||
|
||||
numbers = [item.number for item in path.items]
|
||||
assert len(numbers) == len(set(numbers)) # No duplicates
|
||||
|
||||
def test_fallback_when_triage_missing(self):
|
||||
"""Should use fallback when no triage issues available."""
|
||||
grouped = {
|
||||
"triage": [],
|
||||
"fix": [
|
||||
{"title": "Fix", "labels": [{"name": "size:XS"}], "number": 1, "html_url": ""},
|
||||
],
|
||||
"test": [
|
||||
{"title": "Test", "labels": [{"name": "size:XS"}], "number": 2, "html_url": ""},
|
||||
],
|
||||
"docs": [],
|
||||
"refactor": [],
|
||||
}
|
||||
|
||||
path = build_golden_path(grouped, target_minutes=45)
|
||||
|
||||
assert path.item_count > 0
|
||||
|
||||
|
||||
class TestGoldenPathDataclass:
|
||||
"""Tests for the GoldenPath dataclass."""
|
||||
|
||||
def test_total_time_calculation(self):
|
||||
"""Should sum item times correctly."""
|
||||
path = GoldenPath(
|
||||
generated_at=datetime.now(UTC).isoformat(),
|
||||
target_minutes=45,
|
||||
items=[
|
||||
PathItem(1, "Test 1", "XS", "fix", 10, ""),
|
||||
PathItem(2, "Test 2", "S", "test", 15, ""),
|
||||
],
|
||||
)
|
||||
|
||||
assert path.total_estimated_minutes == 25
|
||||
|
||||
def test_to_dict(self):
|
||||
"""Should convert to dict correctly."""
|
||||
path = GoldenPath(
|
||||
generated_at="2024-01-01T00:00:00+00:00",
|
||||
target_minutes=45,
|
||||
items=[PathItem(1, "Test", "XS", "fix", 10, "http://test")],
|
||||
)
|
||||
|
||||
data = path.to_dict()
|
||||
|
||||
assert data["target_minutes"] == 45
|
||||
assert data["total_estimated_minutes"] == 10
|
||||
assert data["item_count"] == 1
|
||||
assert len(data["items"]) == 1
|
||||
|
||||
def test_to_json(self):
|
||||
"""Should convert to JSON correctly."""
|
||||
path = GoldenPath(
|
||||
generated_at="2024-01-01T00:00:00+00:00",
|
||||
target_minutes=45,
|
||||
items=[],
|
||||
)
|
||||
|
||||
json_str = path.to_json()
|
||||
data = json.loads(json_str)
|
||||
|
||||
assert data["target_minutes"] == 45
|
||||
|
||||
|
||||
class TestGiteaClient:
|
||||
"""Tests for the GiteaClient."""
|
||||
|
||||
def test_client_initialization(self):
|
||||
"""Client should initialize with config."""
|
||||
config = {
|
||||
"gitea_api": "http://test:3000/api/v1",
|
||||
"repo_slug": "test/repo",
|
||||
}
|
||||
client = GiteaClient(config, "token123")
|
||||
|
||||
assert client.api_base == "http://test:3000/api/v1"
|
||||
assert client.repo_slug == "test/repo"
|
||||
assert client.token == "token123"
|
||||
|
||||
def test_headers_with_token(self):
|
||||
"""Headers should include auth token."""
|
||||
config = {"gitea_api": "http://test", "repo_slug": "test/repo"}
|
||||
client = GiteaClient(config, "mytoken")
|
||||
|
||||
headers = client._headers()
|
||||
|
||||
assert headers["Authorization"] == "token mytoken"
|
||||
assert headers["Accept"] == "application/json"
|
||||
|
||||
def test_headers_without_token(self):
|
||||
"""Headers should work without token."""
|
||||
config = {"gitea_api": "http://test", "repo_slug": "test/repo"}
|
||||
client = GiteaClient(config, None)
|
||||
|
||||
headers = client._headers()
|
||||
|
||||
assert "Authorization" not in headers
|
||||
assert headers["Accept"] == "application/json"
|
||||
|
||||
@patch("timmy_automations.daily_run.golden_path.urlopen")
|
||||
def test_is_available_success(self, mock_urlopen):
|
||||
"""Should detect API availability."""
|
||||
mock_response = MagicMock()
|
||||
mock_response.status = 200
|
||||
mock_context = MagicMock()
|
||||
mock_context.__enter__ = MagicMock(return_value=mock_response)
|
||||
mock_context.__exit__ = MagicMock(return_value=False)
|
||||
mock_urlopen.return_value = mock_context
|
||||
|
||||
config = {"gitea_api": "http://test", "repo_slug": "test/repo"}
|
||||
client = GiteaClient(config, None)
|
||||
|
||||
assert client.is_available() is True
|
||||
|
||||
@patch("urllib.request.urlopen")
|
||||
def test_is_available_failure(self, mock_urlopen):
|
||||
"""Should handle API unavailability."""
|
||||
from urllib.error import URLError
|
||||
|
||||
mock_urlopen.side_effect = URLError("Connection refused")
|
||||
|
||||
config = {"gitea_api": "http://test", "repo_slug": "test/repo"}
|
||||
client = GiteaClient(config, None)
|
||||
|
||||
assert client.is_available() is False
|
||||
|
||||
|
||||
class TestIntegration:
|
||||
"""Integration-style tests."""
|
||||
|
||||
@patch("timmy_automations.daily_run.golden_path.GiteaClient")
|
||||
def test_generate_golden_path_integration(self, mock_client_class):
|
||||
"""End-to-end test with mocked Gitea."""
|
||||
# Setup mock
|
||||
mock_client = MagicMock()
|
||||
mock_client.is_available.return_value = True
|
||||
mock_client.get_paginated.return_value = [
|
||||
{
|
||||
"number": 1,
|
||||
"title": "Triage issues",
|
||||
"labels": [{"name": "size:XS"}, {"name": "triage"}],
|
||||
"html_url": "http://test/1",
|
||||
},
|
||||
{
|
||||
"number": 2,
|
||||
"title": "Fix bug",
|
||||
"labels": [{"name": "size:S"}, {"name": "bug"}],
|
||||
"html_url": "http://test/2",
|
||||
},
|
||||
{
|
||||
"number": 3,
|
||||
"title": "Add tests",
|
||||
"labels": [{"name": "size:S"}, {"name": "test"}],
|
||||
"html_url": "http://test/3",
|
||||
},
|
||||
{
|
||||
"number": 4,
|
||||
"title": "Another fix",
|
||||
"labels": [{"name": "size:XS"}, {"name": "bug"}],
|
||||
"html_url": "http://test/4",
|
||||
},
|
||||
]
|
||||
mock_client_class.return_value = mock_client
|
||||
|
||||
path = generate_golden_path(target_minutes=45)
|
||||
|
||||
assert path.item_count >= 3
|
||||
assert all(item.url.startswith("http://test/") for item in path.items)
|
||||
|
||||
@patch("timmy_automations.daily_run.golden_path.GiteaClient")
|
||||
def test_generate_when_unavailable(self, mock_client_class):
|
||||
"""Should return empty path when Gitea unavailable."""
|
||||
mock_client = MagicMock()
|
||||
mock_client.is_available.return_value = False
|
||||
mock_client_class.return_value = mock_client
|
||||
|
||||
path = generate_golden_path(target_minutes=45)
|
||||
|
||||
assert path.item_count == 0
|
||||
assert path.items == []
|
||||
|
||||
|
||||
class TestTypePatterns:
|
||||
"""Tests for type pattern definitions."""
|
||||
|
||||
def test_type_patterns_structure(self):
|
||||
"""Type patterns should have required keys."""
|
||||
for _issue_type, patterns in TYPE_PATTERNS.items():
|
||||
assert "labels" in patterns
|
||||
assert "title" in patterns
|
||||
assert isinstance(patterns["labels"], list)
|
||||
assert isinstance(patterns["title"], list)
|
||||
|
||||
def test_time_estimates_structure(self):
|
||||
"""Time estimates should have all sizes."""
|
||||
for size in ["XS", "S", "M"]:
|
||||
assert size in TIME_ESTIMATES
|
||||
for issue_type in ["triage", "fix", "test", "docs", "refactor"]:
|
||||
assert issue_type in TIME_ESTIMATES[size]
|
||||
assert isinstance(TIME_ESTIMATES[size][issue_type], int)
|
||||
assert TIME_ESTIMATES[size][issue_type] > 0
|
||||
331
tests/unit/test_matrix_config.py
Normal file
331
tests/unit/test_matrix_config.py
Normal file
@@ -0,0 +1,331 @@
|
||||
"""Tests for the matrix configuration loader utility."""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
import yaml
|
||||
|
||||
from infrastructure.matrix_config import (
|
||||
AgentConfig,
|
||||
AgentsConfig,
|
||||
EnvironmentConfig,
|
||||
FeaturesConfig,
|
||||
LightingConfig,
|
||||
MatrixConfig,
|
||||
PointLight,
|
||||
load_from_yaml,
|
||||
)
|
||||
|
||||
|
||||
class TestPointLight:
|
||||
"""Tests for PointLight dataclass."""
|
||||
|
||||
def test_default_values(self):
|
||||
"""PointLight has correct defaults."""
|
||||
pl = PointLight()
|
||||
assert pl.color == "#FFFFFF"
|
||||
assert pl.intensity == 1.0
|
||||
assert pl.position == {"x": 0, "y": 0, "z": 0}
|
||||
|
||||
def test_from_dict_full(self):
|
||||
"""PointLight.from_dict loads all fields."""
|
||||
data = {
|
||||
"color": "#FF0000",
|
||||
"intensity": 2.5,
|
||||
"position": {"x": 1, "y": 2, "z": 3},
|
||||
}
|
||||
pl = PointLight.from_dict(data)
|
||||
assert pl.color == "#FF0000"
|
||||
assert pl.intensity == 2.5
|
||||
assert pl.position == {"x": 1, "y": 2, "z": 3}
|
||||
|
||||
def test_from_dict_partial(self):
|
||||
"""PointLight.from_dict fills missing fields with defaults."""
|
||||
data = {"color": "#00FF00"}
|
||||
pl = PointLight.from_dict(data)
|
||||
assert pl.color == "#00FF00"
|
||||
assert pl.intensity == 1.0
|
||||
assert pl.position == {"x": 0, "y": 0, "z": 0}
|
||||
|
||||
|
||||
class TestLightingConfig:
|
||||
"""Tests for LightingConfig dataclass."""
|
||||
|
||||
def test_default_values(self):
|
||||
"""LightingConfig has correct Workshop+Matrix blend defaults."""
|
||||
cfg = LightingConfig()
|
||||
assert cfg.ambient_color == "#FFAA55" # Warm amber (Workshop)
|
||||
assert cfg.ambient_intensity == 0.5
|
||||
assert len(cfg.point_lights) == 3
|
||||
# First light is warm amber center
|
||||
assert cfg.point_lights[0].color == "#FFAA55"
|
||||
# Second light is cool blue (Matrix)
|
||||
assert cfg.point_lights[1].color == "#3B82F6"
|
||||
|
||||
def test_from_dict_full(self):
|
||||
"""LightingConfig.from_dict loads all fields."""
|
||||
data = {
|
||||
"ambient_color": "#123456",
|
||||
"ambient_intensity": 0.8,
|
||||
"point_lights": [
|
||||
{"color": "#ABCDEF", "intensity": 1.5, "position": {"x": 1, "y": 1, "z": 1}}
|
||||
],
|
||||
}
|
||||
cfg = LightingConfig.from_dict(data)
|
||||
assert cfg.ambient_color == "#123456"
|
||||
assert cfg.ambient_intensity == 0.8
|
||||
assert len(cfg.point_lights) == 1
|
||||
assert cfg.point_lights[0].color == "#ABCDEF"
|
||||
|
||||
def test_from_dict_empty_list_uses_defaults(self):
|
||||
"""Empty point_lights list triggers default lights."""
|
||||
data = {"ambient_color": "#000000", "point_lights": []}
|
||||
cfg = LightingConfig.from_dict(data)
|
||||
assert cfg.ambient_color == "#000000"
|
||||
assert len(cfg.point_lights) == 3 # Default lights
|
||||
|
||||
def test_from_dict_none(self):
|
||||
"""LightingConfig.from_dict handles None."""
|
||||
cfg = LightingConfig.from_dict(None)
|
||||
assert cfg.ambient_color == "#FFAA55"
|
||||
assert len(cfg.point_lights) == 3
|
||||
|
||||
|
||||
class TestEnvironmentConfig:
|
||||
"""Tests for EnvironmentConfig dataclass."""
|
||||
|
||||
def test_default_values(self):
|
||||
"""EnvironmentConfig has correct defaults."""
|
||||
cfg = EnvironmentConfig()
|
||||
assert cfg.rain_enabled is False
|
||||
assert cfg.starfield_enabled is True # Matrix starfield
|
||||
assert cfg.fog_color == "#0f0f23"
|
||||
assert cfg.fog_density == 0.02
|
||||
|
||||
def test_from_dict_full(self):
|
||||
"""EnvironmentConfig.from_dict loads all fields."""
|
||||
data = {
|
||||
"rain_enabled": True,
|
||||
"starfield_enabled": False,
|
||||
"fog_color": "#FFFFFF",
|
||||
"fog_density": 0.1,
|
||||
}
|
||||
cfg = EnvironmentConfig.from_dict(data)
|
||||
assert cfg.rain_enabled is True
|
||||
assert cfg.starfield_enabled is False
|
||||
assert cfg.fog_color == "#FFFFFF"
|
||||
assert cfg.fog_density == 0.1
|
||||
|
||||
def test_from_dict_partial(self):
|
||||
"""EnvironmentConfig.from_dict fills missing fields."""
|
||||
data = {"rain_enabled": True}
|
||||
cfg = EnvironmentConfig.from_dict(data)
|
||||
assert cfg.rain_enabled is True
|
||||
assert cfg.starfield_enabled is True # Default
|
||||
assert cfg.fog_color == "#0f0f23"
|
||||
|
||||
|
||||
class TestFeaturesConfig:
|
||||
"""Tests for FeaturesConfig dataclass."""
|
||||
|
||||
def test_default_values_all_enabled(self):
|
||||
"""FeaturesConfig defaults to all features enabled."""
|
||||
cfg = FeaturesConfig()
|
||||
assert cfg.chat_enabled is True
|
||||
assert cfg.visitor_avatars is True
|
||||
assert cfg.pip_familiar is True
|
||||
assert cfg.workshop_portal is True
|
||||
|
||||
def test_from_dict_full(self):
|
||||
"""FeaturesConfig.from_dict loads all fields."""
|
||||
data = {
|
||||
"chat_enabled": False,
|
||||
"visitor_avatars": False,
|
||||
"pip_familiar": False,
|
||||
"workshop_portal": False,
|
||||
}
|
||||
cfg = FeaturesConfig.from_dict(data)
|
||||
assert cfg.chat_enabled is False
|
||||
assert cfg.visitor_avatars is False
|
||||
assert cfg.pip_familiar is False
|
||||
assert cfg.workshop_portal is False
|
||||
|
||||
def test_from_dict_partial(self):
|
||||
"""FeaturesConfig.from_dict fills missing fields."""
|
||||
data = {"chat_enabled": False}
|
||||
cfg = FeaturesConfig.from_dict(data)
|
||||
assert cfg.chat_enabled is False
|
||||
assert cfg.visitor_avatars is True # Default
|
||||
assert cfg.pip_familiar is True
|
||||
assert cfg.workshop_portal is True
|
||||
|
||||
|
||||
class TestAgentConfig:
|
||||
"""Tests for AgentConfig dataclass."""
|
||||
|
||||
def test_default_values(self):
|
||||
"""AgentConfig has correct defaults."""
|
||||
cfg = AgentConfig()
|
||||
assert cfg.name == ""
|
||||
assert cfg.role == ""
|
||||
assert cfg.enabled is True
|
||||
|
||||
def test_from_dict_full(self):
|
||||
"""AgentConfig.from_dict loads all fields."""
|
||||
data = {"name": "Timmy", "role": "guide", "enabled": False}
|
||||
cfg = AgentConfig.from_dict(data)
|
||||
assert cfg.name == "Timmy"
|
||||
assert cfg.role == "guide"
|
||||
assert cfg.enabled is False
|
||||
|
||||
|
||||
class TestAgentsConfig:
|
||||
"""Tests for AgentsConfig dataclass."""
|
||||
|
||||
def test_default_values(self):
|
||||
"""AgentsConfig has correct defaults."""
|
||||
cfg = AgentsConfig()
|
||||
assert cfg.default_count == 5
|
||||
assert cfg.max_count == 20
|
||||
assert cfg.agents == []
|
||||
|
||||
def test_from_dict_with_agents(self):
|
||||
"""AgentsConfig.from_dict loads agent list."""
|
||||
data = {
|
||||
"default_count": 10,
|
||||
"max_count": 50,
|
||||
"agents": [
|
||||
{"name": "Timmy", "role": "guide", "enabled": True},
|
||||
{"name": "Helper", "role": "assistant"},
|
||||
],
|
||||
}
|
||||
cfg = AgentsConfig.from_dict(data)
|
||||
assert cfg.default_count == 10
|
||||
assert cfg.max_count == 50
|
||||
assert len(cfg.agents) == 2
|
||||
assert cfg.agents[0].name == "Timmy"
|
||||
assert cfg.agents[1].enabled is True # Default
|
||||
|
||||
|
||||
class TestMatrixConfig:
|
||||
"""Tests for MatrixConfig dataclass."""
|
||||
|
||||
def test_default_values(self):
|
||||
"""MatrixConfig has correct composite defaults."""
|
||||
cfg = MatrixConfig()
|
||||
assert isinstance(cfg.lighting, LightingConfig)
|
||||
assert isinstance(cfg.environment, EnvironmentConfig)
|
||||
assert isinstance(cfg.features, FeaturesConfig)
|
||||
assert isinstance(cfg.agents, AgentsConfig)
|
||||
# Check the blend
|
||||
assert cfg.lighting.ambient_color == "#FFAA55"
|
||||
assert cfg.environment.starfield_enabled is True
|
||||
assert cfg.features.chat_enabled is True
|
||||
|
||||
def test_from_dict_full(self):
|
||||
"""MatrixConfig.from_dict loads all sections."""
|
||||
data = {
|
||||
"lighting": {"ambient_color": "#000000"},
|
||||
"environment": {"rain_enabled": True},
|
||||
"features": {"chat_enabled": False},
|
||||
"agents": {"default_count": 3},
|
||||
}
|
||||
cfg = MatrixConfig.from_dict(data)
|
||||
assert cfg.lighting.ambient_color == "#000000"
|
||||
assert cfg.environment.rain_enabled is True
|
||||
assert cfg.features.chat_enabled is False
|
||||
assert cfg.agents.default_count == 3
|
||||
|
||||
def test_from_dict_partial(self):
|
||||
"""MatrixConfig.from_dict fills missing sections with defaults."""
|
||||
data = {"lighting": {"ambient_color": "#111111"}}
|
||||
cfg = MatrixConfig.from_dict(data)
|
||||
assert cfg.lighting.ambient_color == "#111111"
|
||||
assert cfg.environment.starfield_enabled is True # Default
|
||||
assert cfg.features.pip_familiar is True # Default
|
||||
|
||||
def test_from_dict_none(self):
|
||||
"""MatrixConfig.from_dict handles None."""
|
||||
cfg = MatrixConfig.from_dict(None)
|
||||
assert cfg.lighting.ambient_color == "#FFAA55"
|
||||
assert cfg.features.chat_enabled is True
|
||||
|
||||
def test_to_dict_roundtrip(self):
|
||||
"""MatrixConfig.to_dict produces serializable output."""
|
||||
cfg = MatrixConfig()
|
||||
data = cfg.to_dict()
|
||||
assert isinstance(data, dict)
|
||||
assert "lighting" in data
|
||||
assert "environment" in data
|
||||
assert "features" in data
|
||||
assert "agents" in data
|
||||
# Verify point lights are included
|
||||
assert len(data["lighting"]["point_lights"]) == 3
|
||||
|
||||
|
||||
class TestLoadFromYaml:
|
||||
"""Tests for load_from_yaml function."""
|
||||
|
||||
def test_loads_valid_yaml(self, tmp_path: Path):
|
||||
"""load_from_yaml reads a valid YAML file."""
|
||||
config_path = tmp_path / "matrix.yaml"
|
||||
data = {
|
||||
"lighting": {"ambient_color": "#TEST11"},
|
||||
"features": {"chat_enabled": False},
|
||||
}
|
||||
config_path.write_text(yaml.safe_dump(data))
|
||||
|
||||
cfg = load_from_yaml(config_path)
|
||||
assert cfg.lighting.ambient_color == "#TEST11"
|
||||
assert cfg.features.chat_enabled is False
|
||||
|
||||
def test_missing_file_returns_defaults(self, tmp_path: Path):
|
||||
"""load_from_yaml returns defaults when file doesn't exist."""
|
||||
config_path = tmp_path / "nonexistent.yaml"
|
||||
cfg = load_from_yaml(config_path)
|
||||
assert cfg.lighting.ambient_color == "#FFAA55"
|
||||
assert cfg.features.chat_enabled is True
|
||||
|
||||
def test_empty_file_returns_defaults(self, tmp_path: Path):
|
||||
"""load_from_yaml returns defaults for empty file."""
|
||||
config_path = tmp_path / "empty.yaml"
|
||||
config_path.write_text("")
|
||||
cfg = load_from_yaml(config_path)
|
||||
assert cfg.lighting.ambient_color == "#FFAA55"
|
||||
|
||||
def test_invalid_yaml_returns_defaults(self, tmp_path: Path):
|
||||
"""load_from_yaml returns defaults for invalid YAML."""
|
||||
config_path = tmp_path / "invalid.yaml"
|
||||
config_path.write_text("not: valid: yaml: [")
|
||||
cfg = load_from_yaml(config_path)
|
||||
assert cfg.lighting.ambient_color == "#FFAA55"
|
||||
assert cfg.features.chat_enabled is True
|
||||
|
||||
def test_non_dict_yaml_returns_defaults(self, tmp_path: Path):
|
||||
"""load_from_yaml returns defaults when YAML is not a dict."""
|
||||
config_path = tmp_path / "list.yaml"
|
||||
config_path.write_text("- item1\n- item2")
|
||||
cfg = load_from_yaml(config_path)
|
||||
assert cfg.lighting.ambient_color == "#FFAA55"
|
||||
|
||||
def test_loads_actual_config_file(self):
|
||||
"""load_from_yaml can load the project's config/matrix.yaml."""
|
||||
repo_root = Path(__file__).parent.parent.parent
|
||||
config_path = repo_root / "config" / "matrix.yaml"
|
||||
if not config_path.exists():
|
||||
pytest.skip("config/matrix.yaml not found")
|
||||
|
||||
cfg = load_from_yaml(config_path)
|
||||
# Verify it loaded with expected values
|
||||
assert cfg.lighting.ambient_color == "#FFAA55"
|
||||
assert len(cfg.lighting.point_lights) == 3
|
||||
assert cfg.environment.starfield_enabled is True
|
||||
assert cfg.features.workshop_portal is True
|
||||
|
||||
def test_str_path_accepted(self, tmp_path: Path):
|
||||
"""load_from_yaml accepts string path."""
|
||||
config_path = tmp_path / "matrix.yaml"
|
||||
config_path.write_text(yaml.safe_dump({"lighting": {"ambient_intensity": 0.9}}))
|
||||
|
||||
cfg = load_from_yaml(str(config_path))
|
||||
assert cfg.lighting.ambient_intensity == 0.9
|
||||
502
tests/unit/test_presence.py
Normal file
502
tests/unit/test_presence.py
Normal file
@@ -0,0 +1,502 @@
|
||||
"""Tests for infrastructure.presence — presence state serializer."""
|
||||
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from infrastructure.presence import (
|
||||
DEFAULT_PIP_STATE,
|
||||
_get_familiar_state,
|
||||
produce_agent_state,
|
||||
produce_bark,
|
||||
produce_system_status,
|
||||
produce_thought,
|
||||
serialize_presence,
|
||||
)
|
||||
|
||||
|
||||
class TestSerializePresence:
|
||||
"""Round-trip and edge-case tests for serialize_presence()."""
|
||||
|
||||
@pytest.fixture()
|
||||
def full_presence(self):
|
||||
"""A complete ADR-023 presence dict."""
|
||||
return {
|
||||
"version": 1,
|
||||
"liveness": "2026-03-21T12:00:00Z",
|
||||
"current_focus": "writing tests",
|
||||
"mood": "focused",
|
||||
"energy": 0.9,
|
||||
"confidence": 0.85,
|
||||
"active_threads": [
|
||||
{"type": "thinking", "ref": "refactor presence", "status": "active"}
|
||||
],
|
||||
"recent_events": ["committed code"],
|
||||
"concerns": ["test coverage"],
|
||||
"familiar": {"name": "Pip", "state": "alert"},
|
||||
}
|
||||
|
||||
def test_full_round_trip(self, full_presence):
|
||||
"""All ADR-023 fields map to the expected camelCase keys."""
|
||||
result = serialize_presence(full_presence)
|
||||
|
||||
assert result["timmyState"]["mood"] == "focused"
|
||||
assert result["timmyState"]["activity"] == "writing tests"
|
||||
assert result["timmyState"]["energy"] == 0.9
|
||||
assert result["timmyState"]["confidence"] == 0.85
|
||||
assert result["familiar"] == {"name": "Pip", "state": "alert"}
|
||||
assert result["activeThreads"] == full_presence["active_threads"]
|
||||
assert result["recentEvents"] == ["committed code"]
|
||||
assert result["concerns"] == ["test coverage"]
|
||||
assert result["visitorPresent"] is False
|
||||
assert result["updatedAt"] == "2026-03-21T12:00:00Z"
|
||||
assert result["version"] == 1
|
||||
|
||||
def test_defaults_on_empty_dict(self):
|
||||
"""Missing fields fall back to safe defaults."""
|
||||
result = serialize_presence({})
|
||||
|
||||
assert result["timmyState"]["mood"] == "calm"
|
||||
assert result["timmyState"]["activity"] == "idle"
|
||||
assert result["timmyState"]["energy"] == 0.5
|
||||
assert result["timmyState"]["confidence"] == 0.7
|
||||
assert result["familiar"] is None
|
||||
assert result["activeThreads"] == []
|
||||
assert result["recentEvents"] == []
|
||||
assert result["concerns"] == []
|
||||
assert result["visitorPresent"] is False
|
||||
assert result["version"] == 1
|
||||
# updatedAt should be an ISO timestamp string
|
||||
assert "T" in result["updatedAt"]
|
||||
|
||||
def test_partial_presence(self):
|
||||
"""Only some fields provided — others get defaults."""
|
||||
result = serialize_presence({"mood": "excited", "energy": 0.3})
|
||||
|
||||
assert result["timmyState"]["mood"] == "excited"
|
||||
assert result["timmyState"]["energy"] == 0.3
|
||||
assert result["timmyState"]["confidence"] == 0.7 # default
|
||||
assert result["activeThreads"] == [] # default
|
||||
|
||||
def test_return_type_is_dict(self, full_presence):
|
||||
"""serialize_presence always returns a plain dict."""
|
||||
result = serialize_presence(full_presence)
|
||||
assert isinstance(result, dict)
|
||||
assert isinstance(result["timmyState"], dict)
|
||||
|
||||
def test_visitor_present_always_false(self, full_presence):
|
||||
"""visitorPresent is always False — set by the WS layer, not here."""
|
||||
assert serialize_presence(full_presence)["visitorPresent"] is False
|
||||
assert serialize_presence({})["visitorPresent"] is False
|
||||
|
||||
|
||||
class TestProduceAgentState:
|
||||
"""Tests for produce_agent_state() — Matrix agent_state message producer."""
|
||||
|
||||
@pytest.fixture()
|
||||
def full_presence(self):
|
||||
"""A presence dict with all agent_state-relevant fields."""
|
||||
return {
|
||||
"display_name": "Timmy",
|
||||
"role": "companion",
|
||||
"current_focus": "thinking about tests",
|
||||
"mood": "focused",
|
||||
"energy": 0.9,
|
||||
"bark": "Running test suite...",
|
||||
}
|
||||
|
||||
@patch("infrastructure.presence.time")
|
||||
def test_full_message_structure(self, mock_time, full_presence):
|
||||
"""Returns dict with type, agent_id, data, and ts keys."""
|
||||
mock_time.time.return_value = 1742529600
|
||||
result = produce_agent_state("timmy", full_presence)
|
||||
|
||||
assert result["type"] == "agent_state"
|
||||
assert result["agent_id"] == "timmy"
|
||||
assert result["ts"] == 1742529600
|
||||
assert isinstance(result["data"], dict)
|
||||
|
||||
def test_data_fields(self, full_presence):
|
||||
"""data dict contains all required presence fields."""
|
||||
data = produce_agent_state("timmy", full_presence)["data"]
|
||||
|
||||
assert data["display_name"] == "Timmy"
|
||||
assert data["role"] == "companion"
|
||||
assert data["status"] == "thinking"
|
||||
assert data["mood"] == "focused"
|
||||
assert data["energy"] == 0.9
|
||||
assert data["bark"] == "Running test suite..."
|
||||
|
||||
def test_defaults_on_empty_presence(self):
|
||||
"""Missing fields get sensible defaults."""
|
||||
result = produce_agent_state("timmy", {})
|
||||
data = result["data"]
|
||||
|
||||
assert data["display_name"] == "Timmy" # agent_id.title()
|
||||
assert data["role"] == "assistant"
|
||||
assert data["status"] == "idle"
|
||||
assert data["mood"] == "calm"
|
||||
assert data["energy"] == 0.5
|
||||
assert data["bark"] == ""
|
||||
|
||||
def test_ts_is_unix_timestamp(self):
|
||||
"""ts should be an integer Unix timestamp."""
|
||||
result = produce_agent_state("timmy", {})
|
||||
assert isinstance(result["ts"], int)
|
||||
assert result["ts"] > 0
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("focus", "expected_status"),
|
||||
[
|
||||
("thinking about code", "thinking"),
|
||||
("speaking to user", "speaking"),
|
||||
("talking with agent", "speaking"),
|
||||
("idle", "idle"),
|
||||
("", "idle"),
|
||||
("writing tests", "online"),
|
||||
("reviewing PR", "online"),
|
||||
],
|
||||
)
|
||||
def test_status_derivation(self, focus, expected_status):
|
||||
"""current_focus maps to the correct Matrix status."""
|
||||
data = produce_agent_state("t", {"current_focus": focus})["data"]
|
||||
assert data["status"] == expected_status
|
||||
|
||||
def test_agent_id_passed_through(self):
|
||||
"""agent_id appears in the top-level message."""
|
||||
result = produce_agent_state("spark", {})
|
||||
assert result["agent_id"] == "spark"
|
||||
|
||||
def test_display_name_from_agent_id(self):
|
||||
"""When display_name is missing, it's derived from agent_id.title()."""
|
||||
data = produce_agent_state("spark", {})["data"]
|
||||
assert data["display_name"] == "Spark"
|
||||
|
||||
def test_familiar_in_data(self):
|
||||
"""agent_state.data includes familiar field with required keys."""
|
||||
data = produce_agent_state("timmy", {})["data"]
|
||||
|
||||
assert "familiar" in data
|
||||
familiar = data["familiar"]
|
||||
assert familiar["name"] == "Pip"
|
||||
assert "mood" in familiar
|
||||
assert "energy" in familiar
|
||||
assert familiar["color"] == "0x00b450"
|
||||
assert familiar["trail_color"] == "0xdaa520"
|
||||
|
||||
def test_familiar_has_all_required_fields(self):
|
||||
"""familiar dict contains all required fields per acceptance criteria."""
|
||||
data = produce_agent_state("timmy", {})["data"]
|
||||
familiar = data["familiar"]
|
||||
|
||||
required_fields = {"name", "mood", "energy", "color", "trail_color"}
|
||||
assert set(familiar.keys()) >= required_fields
|
||||
|
||||
|
||||
class TestFamiliarState:
|
||||
"""Tests for _get_familiar_state() — Pip familiar state retrieval."""
|
||||
|
||||
def test_get_familiar_state_returns_dict(self):
|
||||
"""_get_familiar_state returns a dict."""
|
||||
result = _get_familiar_state()
|
||||
assert isinstance(result, dict)
|
||||
|
||||
def test_get_familiar_state_has_required_fields(self):
|
||||
"""Result contains name, mood, energy, color, trail_color."""
|
||||
result = _get_familiar_state()
|
||||
|
||||
assert result["name"] == "Pip"
|
||||
assert "mood" in result
|
||||
assert isinstance(result["energy"], (int, float))
|
||||
assert result["color"] == "0x00b450"
|
||||
assert result["trail_color"] == "0xdaa520"
|
||||
|
||||
def test_default_pip_state_constant(self):
|
||||
"""DEFAULT_PIP_STATE has expected values."""
|
||||
assert DEFAULT_PIP_STATE["name"] == "Pip"
|
||||
assert DEFAULT_PIP_STATE["mood"] == "sleepy"
|
||||
assert DEFAULT_PIP_STATE["energy"] == 0.5
|
||||
assert DEFAULT_PIP_STATE["color"] == "0x00b450"
|
||||
assert DEFAULT_PIP_STATE["trail_color"] == "0xdaa520"
|
||||
|
||||
@patch("infrastructure.presence.logger")
|
||||
def test_get_familiar_state_fallback_on_exception(self, mock_logger):
|
||||
"""When familiar module raises, falls back to default and logs warning."""
|
||||
# Patch inside the function where pip_familiar is imported
|
||||
with patch("timmy.familiar.pip_familiar.snapshot") as mock_snapshot:
|
||||
mock_snapshot.side_effect = RuntimeError("Pip is napping")
|
||||
result = _get_familiar_state()
|
||||
|
||||
assert result["name"] == "Pip"
|
||||
assert result["mood"] == "sleepy"
|
||||
mock_logger.warning.assert_called_once()
|
||||
assert "Pip is napping" in str(mock_logger.warning.call_args)
|
||||
|
||||
|
||||
class TestProduceBark:
|
||||
"""Tests for produce_bark() — Matrix bark message producer."""
|
||||
|
||||
@patch("infrastructure.presence.time")
|
||||
def test_full_message_structure(self, mock_time):
|
||||
"""Returns dict with type, agent_id, data, and ts keys."""
|
||||
mock_time.time.return_value = 1742529600
|
||||
result = produce_bark("timmy", "Hello world!")
|
||||
|
||||
assert result["type"] == "bark"
|
||||
assert result["agent_id"] == "timmy"
|
||||
assert result["ts"] == 1742529600
|
||||
assert isinstance(result["data"], dict)
|
||||
|
||||
def test_data_fields(self):
|
||||
"""data dict contains text, reply_to, and style."""
|
||||
result = produce_bark("timmy", "Hello world!", reply_to="msg-123", style="shout")
|
||||
data = result["data"]
|
||||
|
||||
assert data["text"] == "Hello world!"
|
||||
assert data["reply_to"] == "msg-123"
|
||||
assert data["style"] == "shout"
|
||||
|
||||
def test_default_style_is_speech(self):
|
||||
"""When style is not provided, defaults to 'speech'."""
|
||||
result = produce_bark("timmy", "Hello!")
|
||||
assert result["data"]["style"] == "speech"
|
||||
|
||||
def test_default_reply_to_is_none(self):
|
||||
"""When reply_to is not provided, defaults to None."""
|
||||
result = produce_bark("timmy", "Hello!")
|
||||
assert result["data"]["reply_to"] is None
|
||||
|
||||
def test_text_truncated_to_280_chars(self):
|
||||
"""Text longer than 280 chars is truncated."""
|
||||
long_text = "A" * 500
|
||||
result = produce_bark("timmy", long_text)
|
||||
assert len(result["data"]["text"]) == 280
|
||||
assert result["data"]["text"] == "A" * 280
|
||||
|
||||
def test_text_exactly_280_chars_not_truncated(self):
|
||||
"""Text exactly 280 chars is not truncated."""
|
||||
text = "B" * 280
|
||||
result = produce_bark("timmy", text)
|
||||
assert result["data"]["text"] == text
|
||||
|
||||
def test_text_shorter_than_280_not_padded(self):
|
||||
"""Text shorter than 280 chars is not padded."""
|
||||
result = produce_bark("timmy", "Short")
|
||||
assert result["data"]["text"] == "Short"
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("style", "expected_style"),
|
||||
[
|
||||
("speech", "speech"),
|
||||
("thought", "thought"),
|
||||
("whisper", "whisper"),
|
||||
("shout", "shout"),
|
||||
],
|
||||
)
|
||||
def test_valid_styles_preserved(self, style, expected_style):
|
||||
"""Valid style values are preserved."""
|
||||
result = produce_bark("timmy", "Hello!", style=style)
|
||||
assert result["data"]["style"] == expected_style
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"invalid_style",
|
||||
["yell", "scream", "", "SPEECH", "Speech", None, 123],
|
||||
)
|
||||
def test_invalid_style_defaults_to_speech(self, invalid_style):
|
||||
"""Invalid style values fall back to 'speech'."""
|
||||
result = produce_bark("timmy", "Hello!", style=invalid_style)
|
||||
assert result["data"]["style"] == "speech"
|
||||
|
||||
def test_empty_text_handled(self):
|
||||
"""Empty text is handled gracefully."""
|
||||
result = produce_bark("timmy", "")
|
||||
assert result["data"]["text"] == ""
|
||||
|
||||
def test_ts_is_unix_timestamp(self):
|
||||
"""ts should be an integer Unix timestamp."""
|
||||
result = produce_bark("timmy", "Hello!")
|
||||
assert isinstance(result["ts"], int)
|
||||
assert result["ts"] > 0
|
||||
|
||||
def test_agent_id_passed_through(self):
|
||||
"""agent_id appears in the top-level message."""
|
||||
result = produce_bark("spark", "Hello!")
|
||||
assert result["agent_id"] == "spark"
|
||||
|
||||
def test_with_all_parameters(self):
|
||||
"""Full parameter set produces expected output."""
|
||||
result = produce_bark(
|
||||
agent_id="timmy",
|
||||
text="Running test suite...",
|
||||
reply_to="parent-msg-456",
|
||||
style="thought",
|
||||
)
|
||||
|
||||
assert result["type"] == "bark"
|
||||
assert result["agent_id"] == "timmy"
|
||||
assert result["data"]["text"] == "Running test suite..."
|
||||
assert result["data"]["reply_to"] == "parent-msg-456"
|
||||
assert result["data"]["style"] == "thought"
|
||||
|
||||
|
||||
class TestProduceThought:
|
||||
"""Tests for produce_thought() — Matrix thought message producer."""
|
||||
|
||||
@patch("infrastructure.presence.time")
|
||||
def test_full_message_structure(self, mock_time):
|
||||
"""Returns dict with type, agent_id, data, and ts keys."""
|
||||
mock_time.time.return_value = 1742529600
|
||||
result = produce_thought("timmy", "Considering the options...", 42)
|
||||
|
||||
assert result["type"] == "thought"
|
||||
assert result["agent_id"] == "timmy"
|
||||
assert result["ts"] == 1742529600
|
||||
assert isinstance(result["data"], dict)
|
||||
|
||||
def test_data_fields(self):
|
||||
"""data dict contains text, thought_id, and chain_id."""
|
||||
result = produce_thought("timmy", "Considering...", 42, chain_id="chain-123")
|
||||
data = result["data"]
|
||||
|
||||
assert data["text"] == "Considering..."
|
||||
assert data["thought_id"] == 42
|
||||
assert data["chain_id"] == "chain-123"
|
||||
|
||||
def test_default_chain_id_is_none(self):
|
||||
"""When chain_id is not provided, defaults to None."""
|
||||
result = produce_thought("timmy", "Thinking...", 1)
|
||||
assert result["data"]["chain_id"] is None
|
||||
|
||||
def test_text_truncated_to_500_chars(self):
|
||||
"""Text longer than 500 chars is truncated."""
|
||||
long_text = "A" * 600
|
||||
result = produce_thought("timmy", long_text, 1)
|
||||
assert len(result["data"]["text"]) == 500
|
||||
assert result["data"]["text"] == "A" * 500
|
||||
|
||||
def test_text_exactly_500_chars_not_truncated(self):
|
||||
"""Text exactly 500 chars is not truncated."""
|
||||
text = "B" * 500
|
||||
result = produce_thought("timmy", text, 1)
|
||||
assert result["data"]["text"] == text
|
||||
|
||||
def test_text_shorter_than_500_not_padded(self):
|
||||
"""Text shorter than 500 chars is not padded."""
|
||||
result = produce_thought("timmy", "Short thought", 1)
|
||||
assert result["data"]["text"] == "Short thought"
|
||||
|
||||
def test_empty_text_handled(self):
|
||||
"""Empty text is handled gracefully."""
|
||||
result = produce_thought("timmy", "", 1)
|
||||
assert result["data"]["text"] == ""
|
||||
|
||||
def test_ts_is_unix_timestamp(self):
|
||||
"""ts should be an integer Unix timestamp."""
|
||||
result = produce_thought("timmy", "Hello!", 1)
|
||||
assert isinstance(result["ts"], int)
|
||||
assert result["ts"] > 0
|
||||
|
||||
def test_agent_id_passed_through(self):
|
||||
"""agent_id appears in the top-level message."""
|
||||
result = produce_thought("spark", "Hello!", 1)
|
||||
assert result["agent_id"] == "spark"
|
||||
|
||||
def test_thought_id_passed_through(self):
|
||||
"""thought_id appears in the data."""
|
||||
result = produce_thought("timmy", "Hello!", 999)
|
||||
assert result["data"]["thought_id"] == 999
|
||||
|
||||
def test_with_all_parameters(self):
|
||||
"""Full parameter set produces expected output."""
|
||||
result = produce_thought(
|
||||
agent_id="timmy",
|
||||
thought_text="Analyzing the situation...",
|
||||
thought_id=42,
|
||||
chain_id="chain-abc",
|
||||
)
|
||||
|
||||
assert result["type"] == "thought"
|
||||
assert result["agent_id"] == "timmy"
|
||||
assert result["data"]["text"] == "Analyzing the situation..."
|
||||
assert result["data"]["thought_id"] == 42
|
||||
assert result["data"]["chain_id"] == "chain-abc"
|
||||
|
||||
|
||||
class TestProduceSystemStatus:
|
||||
"""Tests for produce_system_status() — Matrix system_status message producer."""
|
||||
|
||||
@patch("infrastructure.presence.time")
|
||||
def test_full_message_structure(self, mock_time):
|
||||
"""Returns dict with type, data, and ts keys."""
|
||||
mock_time.time.return_value = 1742529600
|
||||
result = produce_system_status()
|
||||
|
||||
assert result["type"] == "system_status"
|
||||
assert result["ts"] == 1742529600
|
||||
assert isinstance(result["data"], dict)
|
||||
|
||||
def test_data_has_required_fields(self):
|
||||
"""data dict contains all required system status fields."""
|
||||
result = produce_system_status()
|
||||
data = result["data"]
|
||||
|
||||
assert "agents_online" in data
|
||||
assert "visitors" in data
|
||||
assert "uptime_seconds" in data
|
||||
assert "thinking_active" in data
|
||||
assert "memory_count" in data
|
||||
|
||||
def test_data_field_types(self):
|
||||
"""All data fields have correct types."""
|
||||
result = produce_system_status()
|
||||
data = result["data"]
|
||||
|
||||
assert isinstance(data["agents_online"], int)
|
||||
assert isinstance(data["visitors"], int)
|
||||
assert isinstance(data["uptime_seconds"], int)
|
||||
assert isinstance(data["thinking_active"], bool)
|
||||
assert isinstance(data["memory_count"], int)
|
||||
|
||||
def test_agents_online_is_non_negative(self):
|
||||
"""agents_online is never negative."""
|
||||
result = produce_system_status()
|
||||
assert result["data"]["agents_online"] >= 0
|
||||
|
||||
def test_visitors_is_non_negative(self):
|
||||
"""visitors is never negative."""
|
||||
result = produce_system_status()
|
||||
assert result["data"]["visitors"] >= 0
|
||||
|
||||
def test_uptime_seconds_is_non_negative(self):
|
||||
"""uptime_seconds is never negative."""
|
||||
result = produce_system_status()
|
||||
assert result["data"]["uptime_seconds"] >= 0
|
||||
|
||||
def test_memory_count_is_non_negative(self):
|
||||
"""memory_count is never negative."""
|
||||
result = produce_system_status()
|
||||
assert result["data"]["memory_count"] >= 0
|
||||
|
||||
@patch("infrastructure.presence.time")
|
||||
def test_ts_is_unix_timestamp(self, mock_time):
|
||||
"""ts should be an integer Unix timestamp."""
|
||||
mock_time.time.return_value = 1742529600
|
||||
result = produce_system_status()
|
||||
assert isinstance(result["ts"], int)
|
||||
assert result["ts"] == 1742529600
|
||||
|
||||
@patch("infrastructure.presence.logger")
|
||||
def test_graceful_degradation_on_import_errors(self, mock_logger):
|
||||
"""Function returns valid dict even when imports fail."""
|
||||
# This test verifies the function handles failures gracefully
|
||||
# by checking it always returns the expected structure
|
||||
result = produce_system_status()
|
||||
|
||||
assert result["type"] == "system_status"
|
||||
assert isinstance(result["data"], dict)
|
||||
assert isinstance(result["ts"], int)
|
||||
|
||||
def test_returns_dict(self):
|
||||
"""produce_system_status always returns a plain dict."""
|
||||
result = produce_system_status()
|
||||
assert isinstance(result, dict)
|
||||
173
tests/unit/test_protocol.py
Normal file
173
tests/unit/test_protocol.py
Normal file
@@ -0,0 +1,173 @@
|
||||
"""Tests for infrastructure.protocol — WebSocket message types."""
|
||||
|
||||
import json
|
||||
|
||||
import pytest
|
||||
|
||||
from infrastructure.protocol import (
|
||||
AgentStateMessage,
|
||||
BarkMessage,
|
||||
ConnectionAckMessage,
|
||||
ErrorMessage,
|
||||
MemoryFlashMessage,
|
||||
MessageType,
|
||||
SystemStatusMessage,
|
||||
TaskUpdateMessage,
|
||||
ThoughtMessage,
|
||||
VisitorStateMessage,
|
||||
WSMessage,
|
||||
)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# MessageType enum
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestMessageType:
|
||||
"""MessageType enum covers all 9 Matrix PROTOCOL.md types."""
|
||||
|
||||
def test_has_all_nine_types(self):
|
||||
assert len(MessageType) == 9
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"member,value",
|
||||
[
|
||||
(MessageType.AGENT_STATE, "agent_state"),
|
||||
(MessageType.VISITOR_STATE, "visitor_state"),
|
||||
(MessageType.BARK, "bark"),
|
||||
(MessageType.THOUGHT, "thought"),
|
||||
(MessageType.SYSTEM_STATUS, "system_status"),
|
||||
(MessageType.CONNECTION_ACK, "connection_ack"),
|
||||
(MessageType.ERROR, "error"),
|
||||
(MessageType.TASK_UPDATE, "task_update"),
|
||||
(MessageType.MEMORY_FLASH, "memory_flash"),
|
||||
],
|
||||
)
|
||||
def test_enum_values(self, member, value):
|
||||
assert member.value == value
|
||||
|
||||
def test_str_comparison(self):
|
||||
"""MessageType is a str enum so it can be compared to plain strings."""
|
||||
assert MessageType.BARK == "bark"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# to_json / from_json round-trip
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestAgentStateMessage:
|
||||
def test_defaults(self):
|
||||
msg = AgentStateMessage()
|
||||
assert msg.type == "agent_state"
|
||||
assert msg.agent_id == ""
|
||||
assert msg.data == {}
|
||||
|
||||
def test_round_trip(self):
|
||||
msg = AgentStateMessage(agent_id="timmy", data={"mood": "happy"}, ts=1000.0)
|
||||
raw = msg.to_json()
|
||||
restored = AgentStateMessage.from_json(raw)
|
||||
assert restored.agent_id == "timmy"
|
||||
assert restored.data == {"mood": "happy"}
|
||||
assert restored.ts == 1000.0
|
||||
|
||||
def test_to_json_structure(self):
|
||||
msg = AgentStateMessage(agent_id="timmy", data={"x": 1}, ts=123.0)
|
||||
parsed = json.loads(msg.to_json())
|
||||
assert parsed["type"] == "agent_state"
|
||||
assert parsed["agent_id"] == "timmy"
|
||||
assert parsed["data"] == {"x": 1}
|
||||
assert parsed["ts"] == 123.0
|
||||
|
||||
|
||||
class TestVisitorStateMessage:
|
||||
def test_round_trip(self):
|
||||
msg = VisitorStateMessage(visitor_id="v1", data={"page": "/"}, ts=1.0)
|
||||
restored = VisitorStateMessage.from_json(msg.to_json())
|
||||
assert restored.visitor_id == "v1"
|
||||
assert restored.data == {"page": "/"}
|
||||
|
||||
|
||||
class TestBarkMessage:
|
||||
def test_round_trip(self):
|
||||
msg = BarkMessage(agent_id="timmy", content="woof!", ts=1.0)
|
||||
restored = BarkMessage.from_json(msg.to_json())
|
||||
assert restored.agent_id == "timmy"
|
||||
assert restored.content == "woof!"
|
||||
|
||||
|
||||
class TestThoughtMessage:
|
||||
def test_round_trip(self):
|
||||
msg = ThoughtMessage(agent_id="timmy", content="hmm...", ts=1.0)
|
||||
restored = ThoughtMessage.from_json(msg.to_json())
|
||||
assert restored.content == "hmm..."
|
||||
|
||||
|
||||
class TestSystemStatusMessage:
|
||||
def test_round_trip(self):
|
||||
msg = SystemStatusMessage(status="healthy", data={"uptime": 3600}, ts=1.0)
|
||||
restored = SystemStatusMessage.from_json(msg.to_json())
|
||||
assert restored.status == "healthy"
|
||||
assert restored.data == {"uptime": 3600}
|
||||
|
||||
|
||||
class TestConnectionAckMessage:
|
||||
def test_round_trip(self):
|
||||
msg = ConnectionAckMessage(client_id="abc-123", ts=1.0)
|
||||
restored = ConnectionAckMessage.from_json(msg.to_json())
|
||||
assert restored.client_id == "abc-123"
|
||||
|
||||
|
||||
class TestErrorMessage:
|
||||
def test_round_trip(self):
|
||||
msg = ErrorMessage(code="INVALID", message="bad request", ts=1.0)
|
||||
restored = ErrorMessage.from_json(msg.to_json())
|
||||
assert restored.code == "INVALID"
|
||||
assert restored.message == "bad request"
|
||||
|
||||
|
||||
class TestTaskUpdateMessage:
|
||||
def test_round_trip(self):
|
||||
msg = TaskUpdateMessage(task_id="t1", status="completed", data={"result": "ok"}, ts=1.0)
|
||||
restored = TaskUpdateMessage.from_json(msg.to_json())
|
||||
assert restored.task_id == "t1"
|
||||
assert restored.status == "completed"
|
||||
assert restored.data == {"result": "ok"}
|
||||
|
||||
|
||||
class TestMemoryFlashMessage:
|
||||
def test_round_trip(self):
|
||||
msg = MemoryFlashMessage(agent_id="timmy", memory_key="fav_food", content="kibble", ts=1.0)
|
||||
restored = MemoryFlashMessage.from_json(msg.to_json())
|
||||
assert restored.memory_key == "fav_food"
|
||||
assert restored.content == "kibble"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# WSMessage.from_json dispatch
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestWSMessageDispatch:
|
||||
"""WSMessage.from_json dispatches to the correct subclass."""
|
||||
|
||||
def test_dispatch_to_bark(self):
|
||||
raw = json.dumps({"type": "bark", "agent_id": "t", "content": "woof", "ts": 1.0})
|
||||
msg = WSMessage.from_json(raw)
|
||||
assert isinstance(msg, BarkMessage)
|
||||
assert msg.content == "woof"
|
||||
|
||||
def test_dispatch_to_error(self):
|
||||
raw = json.dumps({"type": "error", "code": "E1", "message": "oops", "ts": 1.0})
|
||||
msg = WSMessage.from_json(raw)
|
||||
assert isinstance(msg, ErrorMessage)
|
||||
|
||||
def test_unknown_type_returns_base(self):
|
||||
raw = json.dumps({"type": "unknown_future_type", "ts": 1.0})
|
||||
msg = WSMessage.from_json(raw)
|
||||
assert type(msg) is WSMessage
|
||||
assert msg.type == "unknown_future_type"
|
||||
|
||||
def test_invalid_json_raises(self):
|
||||
with pytest.raises(json.JSONDecodeError):
|
||||
WSMessage.from_json("not json")
|
||||
489
tests/unit/test_quest_system.py
Normal file
489
tests/unit/test_quest_system.py
Normal file
@@ -0,0 +1,489 @@
|
||||
"""Unit tests for the quest system.
|
||||
|
||||
Tests quest definitions, progress tracking, completion detection,
|
||||
and token rewards.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import pytest
|
||||
|
||||
from timmy.quest_system import (
|
||||
QuestDefinition,
|
||||
QuestProgress,
|
||||
QuestStatus,
|
||||
QuestType,
|
||||
_is_on_cooldown,
|
||||
claim_quest_reward,
|
||||
evaluate_quest_progress,
|
||||
get_or_create_progress,
|
||||
get_quest_definition,
|
||||
get_quest_leaderboard,
|
||||
load_quest_config,
|
||||
reset_quest_progress,
|
||||
update_quest_progress,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def clean_quest_state():
|
||||
"""Reset quest progress between tests."""
|
||||
reset_quest_progress()
|
||||
yield
|
||||
reset_quest_progress()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_issue_count_quest():
|
||||
"""Create a sample issue_count quest definition."""
|
||||
return QuestDefinition(
|
||||
id="test_close_issues",
|
||||
name="Test Issue Closer",
|
||||
description="Close 3 test issues",
|
||||
reward_tokens=100,
|
||||
quest_type=QuestType.ISSUE_COUNT,
|
||||
enabled=True,
|
||||
repeatable=False,
|
||||
cooldown_hours=0,
|
||||
criteria={"target_count": 3, "issue_labels": ["test"]},
|
||||
notification_message="Test quest complete! Earned {tokens} tokens.",
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_daily_run_quest():
|
||||
"""Create a sample daily_run quest definition."""
|
||||
return QuestDefinition(
|
||||
id="test_daily_run",
|
||||
name="Test Daily Runner",
|
||||
description="Complete 5 sessions",
|
||||
reward_tokens=250,
|
||||
quest_type=QuestType.DAILY_RUN,
|
||||
enabled=True,
|
||||
repeatable=True,
|
||||
cooldown_hours=24,
|
||||
criteria={"min_sessions": 5},
|
||||
notification_message="Daily run quest complete! Earned {tokens} tokens.",
|
||||
)
|
||||
|
||||
|
||||
# ── Quest Definition Tests ───────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestQuestDefinition:
|
||||
def test_from_dict_minimal(self):
|
||||
data = {"id": "test_quest", "name": "Test Quest"}
|
||||
quest = QuestDefinition.from_dict(data)
|
||||
assert quest.id == "test_quest"
|
||||
assert quest.name == "Test Quest"
|
||||
assert quest.quest_type == QuestType.CUSTOM
|
||||
assert quest.enabled is True
|
||||
|
||||
def test_from_dict_full(self):
|
||||
data = {
|
||||
"id": "full_quest",
|
||||
"name": "Full Quest",
|
||||
"description": "A test quest",
|
||||
"reward_tokens": 500,
|
||||
"type": "issue_count",
|
||||
"enabled": False,
|
||||
"repeatable": True,
|
||||
"cooldown_hours": 12,
|
||||
"criteria": {"target_count": 5},
|
||||
"notification_message": "Done!",
|
||||
}
|
||||
quest = QuestDefinition.from_dict(data)
|
||||
assert quest.id == "full_quest"
|
||||
assert quest.reward_tokens == 500
|
||||
assert quest.quest_type == QuestType.ISSUE_COUNT
|
||||
assert quest.enabled is False
|
||||
assert quest.repeatable is True
|
||||
assert quest.cooldown_hours == 12
|
||||
|
||||
|
||||
# ── Quest Progress Tests ─────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestQuestProgress:
|
||||
def test_progress_creation(self):
|
||||
progress = QuestProgress(
|
||||
quest_id="test_quest",
|
||||
agent_id="test_agent",
|
||||
status=QuestStatus.NOT_STARTED,
|
||||
)
|
||||
assert progress.quest_id == "test_quest"
|
||||
assert progress.agent_id == "test_agent"
|
||||
assert progress.current_value == 0
|
||||
|
||||
def test_progress_to_dict(self):
|
||||
progress = QuestProgress(
|
||||
quest_id="test_quest",
|
||||
agent_id="test_agent",
|
||||
status=QuestStatus.IN_PROGRESS,
|
||||
current_value=2,
|
||||
target_value=5,
|
||||
)
|
||||
data = progress.to_dict()
|
||||
assert data["quest_id"] == "test_quest"
|
||||
assert data["status"] == "in_progress"
|
||||
assert data["current_value"] == 2
|
||||
|
||||
|
||||
# ── Quest Loading Tests ──────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestQuestLoading:
|
||||
def test_load_quest_config(self):
|
||||
definitions, settings = load_quest_config()
|
||||
assert isinstance(definitions, dict)
|
||||
assert isinstance(settings, dict)
|
||||
|
||||
def test_get_quest_definition_exists(self):
|
||||
# Should return None for non-existent quest in fresh state
|
||||
quest = get_quest_definition("nonexistent")
|
||||
# The function returns from loaded config, which may have quests
|
||||
# or be empty if config doesn't exist
|
||||
assert quest is None or isinstance(quest, QuestDefinition)
|
||||
|
||||
def test_get_quest_definition_not_found(self):
|
||||
quest = get_quest_definition("definitely_not_a_real_quest_12345")
|
||||
assert quest is None
|
||||
|
||||
|
||||
# ── Quest Progress Management Tests ─────────────────────────────────────
|
||||
|
||||
|
||||
class TestQuestProgressManagement:
|
||||
def test_get_or_create_progress_new(self):
|
||||
# First create a quest definition
|
||||
quest = QuestDefinition(
|
||||
id="progress_test",
|
||||
name="Progress Test",
|
||||
description="Test quest",
|
||||
reward_tokens=100,
|
||||
quest_type=QuestType.ISSUE_COUNT,
|
||||
enabled=True,
|
||||
repeatable=False,
|
||||
cooldown_hours=0,
|
||||
criteria={"target_count": 3},
|
||||
notification_message="Done!",
|
||||
)
|
||||
|
||||
# Need to inject into the definitions dict
|
||||
from timmy.quest_system import _quest_definitions
|
||||
|
||||
_quest_definitions["progress_test"] = quest
|
||||
|
||||
progress = get_or_create_progress("progress_test", "agent1")
|
||||
assert progress.quest_id == "progress_test"
|
||||
assert progress.agent_id == "agent1"
|
||||
assert progress.status == QuestStatus.NOT_STARTED
|
||||
assert progress.target_value == 3
|
||||
|
||||
del _quest_definitions["progress_test"]
|
||||
|
||||
def test_update_quest_progress(self):
|
||||
quest = QuestDefinition(
|
||||
id="update_test",
|
||||
name="Update Test",
|
||||
description="Test quest",
|
||||
reward_tokens=100,
|
||||
quest_type=QuestType.ISSUE_COUNT,
|
||||
enabled=True,
|
||||
repeatable=False,
|
||||
cooldown_hours=0,
|
||||
criteria={"target_count": 3},
|
||||
notification_message="Done!",
|
||||
)
|
||||
|
||||
from timmy.quest_system import _quest_definitions
|
||||
|
||||
_quest_definitions["update_test"] = quest
|
||||
|
||||
# Create initial progress
|
||||
progress = get_or_create_progress("update_test", "agent1")
|
||||
assert progress.current_value == 0
|
||||
|
||||
# Update progress
|
||||
updated = update_quest_progress("update_test", "agent1", 2)
|
||||
assert updated.current_value == 2
|
||||
assert updated.status == QuestStatus.NOT_STARTED
|
||||
|
||||
# Complete the quest
|
||||
completed = update_quest_progress("update_test", "agent1", 3)
|
||||
assert completed.current_value == 3
|
||||
assert completed.status == QuestStatus.COMPLETED
|
||||
assert completed.completed_at != ""
|
||||
|
||||
del _quest_definitions["update_test"]
|
||||
|
||||
|
||||
# ── Quest Evaluation Tests ───────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestQuestEvaluation:
|
||||
def test_evaluate_issue_count_quest(self):
|
||||
quest = QuestDefinition(
|
||||
id="eval_test",
|
||||
name="Eval Test",
|
||||
description="Test quest",
|
||||
reward_tokens=100,
|
||||
quest_type=QuestType.ISSUE_COUNT,
|
||||
enabled=True,
|
||||
repeatable=False,
|
||||
cooldown_hours=0,
|
||||
criteria={"target_count": 2, "issue_labels": ["test"]},
|
||||
notification_message="Done!",
|
||||
)
|
||||
|
||||
from timmy.quest_system import _quest_definitions
|
||||
|
||||
_quest_definitions["eval_test"] = quest
|
||||
|
||||
# Simulate closed issues
|
||||
closed_issues = [
|
||||
{"id": 1, "labels": [{"name": "test"}]},
|
||||
{"id": 2, "labels": [{"name": "test"}, {"name": "bug"}]},
|
||||
{"id": 3, "labels": [{"name": "other"}]},
|
||||
]
|
||||
|
||||
context = {"closed_issues": closed_issues}
|
||||
progress = evaluate_quest_progress("eval_test", "agent1", context)
|
||||
|
||||
assert progress is not None
|
||||
assert progress.current_value == 2 # Two issues with 'test' label
|
||||
|
||||
del _quest_definitions["eval_test"]
|
||||
|
||||
def test_evaluate_issue_reduce_quest(self):
|
||||
quest = QuestDefinition(
|
||||
id="reduce_test",
|
||||
name="Reduce Test",
|
||||
description="Test quest",
|
||||
reward_tokens=200,
|
||||
quest_type=QuestType.ISSUE_REDUCE,
|
||||
enabled=True,
|
||||
repeatable=False,
|
||||
cooldown_hours=0,
|
||||
criteria={"target_reduction": 2},
|
||||
notification_message="Done!",
|
||||
)
|
||||
|
||||
from timmy.quest_system import _quest_definitions
|
||||
|
||||
_quest_definitions["reduce_test"] = quest
|
||||
|
||||
context = {"previous_issue_count": 10, "current_issue_count": 7}
|
||||
progress = evaluate_quest_progress("reduce_test", "agent1", context)
|
||||
|
||||
assert progress is not None
|
||||
assert progress.current_value == 3 # Reduced by 3
|
||||
|
||||
del _quest_definitions["reduce_test"]
|
||||
|
||||
def test_evaluate_daily_run_quest(self):
|
||||
quest = QuestDefinition(
|
||||
id="daily_test",
|
||||
name="Daily Test",
|
||||
description="Test quest",
|
||||
reward_tokens=250,
|
||||
quest_type=QuestType.DAILY_RUN,
|
||||
enabled=True,
|
||||
repeatable=True,
|
||||
cooldown_hours=24,
|
||||
criteria={"min_sessions": 5},
|
||||
notification_message="Done!",
|
||||
)
|
||||
|
||||
from timmy.quest_system import _quest_definitions
|
||||
|
||||
_quest_definitions["daily_test"] = quest
|
||||
|
||||
context = {"sessions_completed": 5}
|
||||
progress = evaluate_quest_progress("daily_test", "agent1", context)
|
||||
|
||||
assert progress is not None
|
||||
assert progress.current_value == 5
|
||||
assert progress.status == QuestStatus.COMPLETED
|
||||
|
||||
del _quest_definitions["daily_test"]
|
||||
|
||||
|
||||
# ── Quest Cooldown Tests ─────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestQuestCooldown:
|
||||
def test_is_on_cooldown_no_cooldown(self):
|
||||
quest = QuestDefinition(
|
||||
id="cooldown_test",
|
||||
name="Cooldown Test",
|
||||
description="Test quest",
|
||||
reward_tokens=100,
|
||||
quest_type=QuestType.ISSUE_COUNT,
|
||||
enabled=True,
|
||||
repeatable=True,
|
||||
cooldown_hours=24,
|
||||
criteria={},
|
||||
notification_message="Done!",
|
||||
)
|
||||
|
||||
progress = QuestProgress(
|
||||
quest_id="cooldown_test",
|
||||
agent_id="agent1",
|
||||
status=QuestStatus.CLAIMED,
|
||||
)
|
||||
|
||||
# No last_completed_at means no cooldown
|
||||
assert _is_on_cooldown(progress, quest) is False
|
||||
|
||||
|
||||
# ── Quest Reward Tests ───────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestQuestReward:
|
||||
def test_claim_quest_reward_not_completed(self):
|
||||
quest = QuestDefinition(
|
||||
id="reward_test",
|
||||
name="Reward Test",
|
||||
description="Test quest",
|
||||
reward_tokens=100,
|
||||
quest_type=QuestType.ISSUE_COUNT,
|
||||
enabled=True,
|
||||
repeatable=False,
|
||||
cooldown_hours=0,
|
||||
criteria={"target_count": 3},
|
||||
notification_message="Done!",
|
||||
)
|
||||
|
||||
from timmy.quest_system import _quest_definitions, _quest_progress
|
||||
|
||||
_quest_definitions["reward_test"] = quest
|
||||
|
||||
# Create progress but don't complete
|
||||
progress = get_or_create_progress("reward_test", "agent1")
|
||||
_quest_progress["agent1:reward_test"] = progress
|
||||
|
||||
# Try to claim - should fail
|
||||
reward = claim_quest_reward("reward_test", "agent1")
|
||||
assert reward is None
|
||||
|
||||
del _quest_definitions["reward_test"]
|
||||
|
||||
|
||||
# ── Leaderboard Tests ────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestQuestLeaderboard:
|
||||
def test_get_quest_leaderboard_empty(self):
|
||||
reset_quest_progress()
|
||||
leaderboard = get_quest_leaderboard()
|
||||
assert leaderboard == []
|
||||
|
||||
def test_get_quest_leaderboard_with_data(self):
|
||||
# Create and complete a quest for two agents
|
||||
quest = QuestDefinition(
|
||||
id="leaderboard_test",
|
||||
name="Leaderboard Test",
|
||||
description="Test quest",
|
||||
reward_tokens=100,
|
||||
quest_type=QuestType.ISSUE_COUNT,
|
||||
enabled=True,
|
||||
repeatable=True,
|
||||
cooldown_hours=0,
|
||||
criteria={"target_count": 1},
|
||||
notification_message="Done!",
|
||||
)
|
||||
|
||||
from timmy.quest_system import _quest_definitions, _quest_progress
|
||||
|
||||
_quest_definitions["leaderboard_test"] = quest
|
||||
|
||||
# Create progress for agent1 with 2 completions
|
||||
progress1 = QuestProgress(
|
||||
quest_id="leaderboard_test",
|
||||
agent_id="agent1",
|
||||
status=QuestStatus.NOT_STARTED,
|
||||
completion_count=2,
|
||||
)
|
||||
_quest_progress["agent1:leaderboard_test"] = progress1
|
||||
|
||||
# Create progress for agent2 with 1 completion
|
||||
progress2 = QuestProgress(
|
||||
quest_id="leaderboard_test",
|
||||
agent_id="agent2",
|
||||
status=QuestStatus.NOT_STARTED,
|
||||
completion_count=1,
|
||||
)
|
||||
_quest_progress["agent2:leaderboard_test"] = progress2
|
||||
|
||||
leaderboard = get_quest_leaderboard()
|
||||
|
||||
assert len(leaderboard) == 2
|
||||
# agent1 should be first (more tokens)
|
||||
assert leaderboard[0]["agent_id"] == "agent1"
|
||||
assert leaderboard[0]["total_tokens"] == 200
|
||||
assert leaderboard[1]["agent_id"] == "agent2"
|
||||
assert leaderboard[1]["total_tokens"] == 100
|
||||
|
||||
del _quest_definitions["leaderboard_test"]
|
||||
|
||||
|
||||
# ── Quest Reset Tests ─────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestQuestReset:
|
||||
def test_reset_quest_progress_all(self):
|
||||
# Create some progress entries
|
||||
progress1 = QuestProgress(
|
||||
quest_id="quest1", agent_id="agent1", status=QuestStatus.NOT_STARTED
|
||||
)
|
||||
progress2 = QuestProgress(
|
||||
quest_id="quest2", agent_id="agent2", status=QuestStatus.NOT_STARTED
|
||||
)
|
||||
|
||||
from timmy.quest_system import _quest_progress
|
||||
|
||||
_quest_progress["agent1:quest1"] = progress1
|
||||
_quest_progress["agent2:quest2"] = progress2
|
||||
|
||||
assert len(_quest_progress) == 2
|
||||
|
||||
count = reset_quest_progress()
|
||||
assert count == 2
|
||||
assert len(_quest_progress) == 0
|
||||
|
||||
def test_reset_quest_progress_specific_quest(self):
|
||||
progress1 = QuestProgress(
|
||||
quest_id="quest1", agent_id="agent1", status=QuestStatus.NOT_STARTED
|
||||
)
|
||||
progress2 = QuestProgress(
|
||||
quest_id="quest2", agent_id="agent1", status=QuestStatus.NOT_STARTED
|
||||
)
|
||||
|
||||
from timmy.quest_system import _quest_progress
|
||||
|
||||
_quest_progress["agent1:quest1"] = progress1
|
||||
_quest_progress["agent1:quest2"] = progress2
|
||||
|
||||
count = reset_quest_progress(quest_id="quest1")
|
||||
assert count == 1
|
||||
assert "agent1:quest1" not in _quest_progress
|
||||
assert "agent1:quest2" in _quest_progress
|
||||
|
||||
def test_reset_quest_progress_specific_agent(self):
|
||||
progress1 = QuestProgress(
|
||||
quest_id="quest1", agent_id="agent1", status=QuestStatus.NOT_STARTED
|
||||
)
|
||||
progress2 = QuestProgress(
|
||||
quest_id="quest1", agent_id="agent2", status=QuestStatus.NOT_STARTED
|
||||
)
|
||||
|
||||
from timmy.quest_system import _quest_progress
|
||||
|
||||
_quest_progress["agent1:quest1"] = progress1
|
||||
_quest_progress["agent2:quest1"] = progress2
|
||||
|
||||
count = reset_quest_progress(agent_id="agent1")
|
||||
assert count == 1
|
||||
assert "agent1:quest1" not in _quest_progress
|
||||
assert "agent2:quest1" in _quest_progress
|
||||
446
tests/unit/test_rate_limit.py
Normal file
446
tests/unit/test_rate_limit.py
Normal file
@@ -0,0 +1,446 @@
|
||||
"""Tests for rate limiting middleware.
|
||||
|
||||
Tests the RateLimiter class and RateLimitMiddleware for correct
|
||||
rate limiting behavior, cleanup, and edge cases.
|
||||
"""
|
||||
|
||||
import time
|
||||
from unittest.mock import Mock
|
||||
|
||||
import pytest
|
||||
from starlette.requests import Request
|
||||
from starlette.responses import JSONResponse
|
||||
|
||||
from dashboard.middleware.rate_limit import RateLimiter, RateLimitMiddleware
|
||||
|
||||
|
||||
class TestRateLimiter:
|
||||
"""Tests for the RateLimiter class."""
|
||||
|
||||
def test_init_defaults(self):
|
||||
"""RateLimiter initializes with default values."""
|
||||
limiter = RateLimiter()
|
||||
assert limiter.requests_per_minute == 30
|
||||
assert limiter.cleanup_interval_seconds == 60
|
||||
assert limiter._storage == {}
|
||||
|
||||
def test_init_custom_values(self):
|
||||
"""RateLimiter accepts custom configuration."""
|
||||
limiter = RateLimiter(requests_per_minute=60, cleanup_interval_seconds=120)
|
||||
assert limiter.requests_per_minute == 60
|
||||
assert limiter.cleanup_interval_seconds == 120
|
||||
|
||||
def test_is_allowed_first_request(self):
|
||||
"""First request from an IP is always allowed."""
|
||||
limiter = RateLimiter(requests_per_minute=5)
|
||||
allowed, retry_after = limiter.is_allowed("192.168.1.1")
|
||||
assert allowed is True
|
||||
assert retry_after == 0.0
|
||||
assert "192.168.1.1" in limiter._storage
|
||||
assert len(limiter._storage["192.168.1.1"]) == 1
|
||||
|
||||
def test_is_allowed_under_limit(self):
|
||||
"""Requests under the limit are allowed."""
|
||||
limiter = RateLimiter(requests_per_minute=5)
|
||||
|
||||
# Make 4 requests (under limit of 5)
|
||||
for _ in range(4):
|
||||
allowed, _ = limiter.is_allowed("192.168.1.1")
|
||||
assert allowed is True
|
||||
|
||||
assert len(limiter._storage["192.168.1.1"]) == 4
|
||||
|
||||
def test_is_allowed_at_limit(self):
|
||||
"""Request at the limit is allowed."""
|
||||
limiter = RateLimiter(requests_per_minute=5)
|
||||
|
||||
# Make exactly 5 requests
|
||||
for _ in range(5):
|
||||
allowed, _ = limiter.is_allowed("192.168.1.1")
|
||||
assert allowed is True
|
||||
|
||||
assert len(limiter._storage["192.168.1.1"]) == 5
|
||||
|
||||
def test_is_allowed_over_limit(self):
|
||||
"""Request over the limit is denied."""
|
||||
limiter = RateLimiter(requests_per_minute=5)
|
||||
|
||||
# Make 5 requests to hit the limit
|
||||
for _ in range(5):
|
||||
limiter.is_allowed("192.168.1.1")
|
||||
|
||||
# 6th request should be denied
|
||||
allowed, retry_after = limiter.is_allowed("192.168.1.1")
|
||||
assert allowed is False
|
||||
assert retry_after > 0
|
||||
|
||||
def test_is_allowed_different_ips(self):
|
||||
"""Rate limiting is per-IP, not global."""
|
||||
limiter = RateLimiter(requests_per_minute=5)
|
||||
|
||||
# Hit limit for IP 1
|
||||
for _ in range(5):
|
||||
limiter.is_allowed("192.168.1.1")
|
||||
|
||||
# IP 1 is now rate limited
|
||||
allowed, _ = limiter.is_allowed("192.168.1.1")
|
||||
assert allowed is False
|
||||
|
||||
# IP 2 should still be allowed
|
||||
allowed, _ = limiter.is_allowed("192.168.1.2")
|
||||
assert allowed is True
|
||||
|
||||
def test_window_expiration_allows_new_requests(self):
|
||||
"""After window expires, new requests are allowed."""
|
||||
limiter = RateLimiter(requests_per_minute=5)
|
||||
|
||||
# Hit the limit
|
||||
for _ in range(5):
|
||||
limiter.is_allowed("192.168.1.1")
|
||||
|
||||
# Should be rate limited
|
||||
allowed, _ = limiter.is_allowed("192.168.1.1")
|
||||
assert allowed is False
|
||||
|
||||
# Simulate time passing by clearing timestamps manually
|
||||
# (we can't wait 60 seconds in a test)
|
||||
limiter._storage["192.168.1.1"].clear()
|
||||
|
||||
# Should now be allowed again
|
||||
allowed, _ = limiter.is_allowed("192.168.1.1")
|
||||
assert allowed is True
|
||||
|
||||
def test_cleanup_removes_stale_entries(self):
|
||||
"""Cleanup removes IPs with no recent requests."""
|
||||
limiter = RateLimiter(
|
||||
requests_per_minute=5,
|
||||
cleanup_interval_seconds=1, # Short interval for testing
|
||||
)
|
||||
|
||||
# Add some requests
|
||||
limiter.is_allowed("192.168.1.1")
|
||||
limiter.is_allowed("192.168.1.2")
|
||||
|
||||
# Both IPs should be in storage
|
||||
assert "192.168.1.1" in limiter._storage
|
||||
assert "192.168.1.2" in limiter._storage
|
||||
|
||||
# Manually clear timestamps to simulate stale data
|
||||
limiter._storage["192.168.1.1"].clear()
|
||||
limiter._last_cleanup = time.time() - 2 # Force cleanup
|
||||
|
||||
# Trigger cleanup via check_request with a mock
|
||||
mock_request = Mock()
|
||||
mock_request.headers = {}
|
||||
mock_request.client = Mock()
|
||||
mock_request.client.host = "192.168.1.3"
|
||||
mock_request.url.path = "/api/matrix/test"
|
||||
|
||||
limiter.check_request(mock_request)
|
||||
|
||||
# Stale IP should be removed
|
||||
assert "192.168.1.1" not in limiter._storage
|
||||
# IP with no requests (cleared) is also stale
|
||||
assert "192.168.1.2" in limiter._storage
|
||||
|
||||
def test_get_client_ip_direct(self):
|
||||
"""Extract client IP from direct connection."""
|
||||
limiter = RateLimiter()
|
||||
|
||||
mock_request = Mock()
|
||||
mock_request.headers = {}
|
||||
mock_request.client = Mock()
|
||||
mock_request.client.host = "192.168.1.100"
|
||||
|
||||
ip = limiter._get_client_ip(mock_request)
|
||||
assert ip == "192.168.1.100"
|
||||
|
||||
def test_get_client_ip_x_forwarded_for(self):
|
||||
"""Extract client IP from X-Forwarded-For header."""
|
||||
limiter = RateLimiter()
|
||||
|
||||
mock_request = Mock()
|
||||
mock_request.headers = {"x-forwarded-for": "10.0.0.1, 192.168.1.1"}
|
||||
mock_request.client = Mock()
|
||||
mock_request.client.host = "192.168.1.100"
|
||||
|
||||
ip = limiter._get_client_ip(mock_request)
|
||||
assert ip == "10.0.0.1"
|
||||
|
||||
def test_get_client_ip_x_real_ip(self):
|
||||
"""Extract client IP from X-Real-IP header."""
|
||||
limiter = RateLimiter()
|
||||
|
||||
mock_request = Mock()
|
||||
mock_request.headers = {"x-real-ip": "10.0.0.5"}
|
||||
mock_request.client = Mock()
|
||||
mock_request.client.host = "192.168.1.100"
|
||||
|
||||
ip = limiter._get_client_ip(mock_request)
|
||||
assert ip == "10.0.0.5"
|
||||
|
||||
def test_get_client_ip_no_client(self):
|
||||
"""Return 'unknown' when no client info available."""
|
||||
limiter = RateLimiter()
|
||||
|
||||
mock_request = Mock()
|
||||
mock_request.headers = {}
|
||||
mock_request.client = None
|
||||
|
||||
ip = limiter._get_client_ip(mock_request)
|
||||
assert ip == "unknown"
|
||||
|
||||
|
||||
class TestRateLimitMiddleware:
|
||||
"""Tests for the RateLimitMiddleware class."""
|
||||
|
||||
@pytest.fixture
|
||||
def mock_app(self):
|
||||
"""Create a mock ASGI app."""
|
||||
|
||||
async def app(scope, receive, send):
|
||||
response = JSONResponse({"status": "ok"})
|
||||
await response(scope, receive, send)
|
||||
|
||||
return app
|
||||
|
||||
@pytest.fixture
|
||||
def mock_request(self):
|
||||
"""Create a mock Request object."""
|
||||
request = Mock(spec=Request)
|
||||
request.url.path = "/api/matrix/test"
|
||||
request.headers = {}
|
||||
request.client = Mock()
|
||||
request.client.host = "192.168.1.1"
|
||||
return request
|
||||
|
||||
def test_init_defaults(self, mock_app):
|
||||
"""Middleware initializes with default values."""
|
||||
middleware = RateLimitMiddleware(mock_app)
|
||||
assert middleware.path_prefixes == []
|
||||
assert middleware.limiter.requests_per_minute == 30
|
||||
|
||||
def test_init_custom_values(self, mock_app):
|
||||
"""Middleware accepts custom configuration."""
|
||||
middleware = RateLimitMiddleware(
|
||||
mock_app,
|
||||
path_prefixes=["/api/matrix/"],
|
||||
requests_per_minute=60,
|
||||
)
|
||||
assert middleware.path_prefixes == ["/api/matrix/"]
|
||||
assert middleware.limiter.requests_per_minute == 60
|
||||
|
||||
def test_should_rate_limit_no_prefixes(self, mock_app):
|
||||
"""With no prefixes, all paths are rate limited."""
|
||||
middleware = RateLimitMiddleware(mock_app)
|
||||
assert middleware._should_rate_limit("/api/matrix/test") is True
|
||||
assert middleware._should_rate_limit("/api/other/test") is True
|
||||
assert middleware._should_rate_limit("/health") is True
|
||||
|
||||
def test_should_rate_limit_with_prefixes(self, mock_app):
|
||||
"""With prefixes, only matching paths are rate limited."""
|
||||
middleware = RateLimitMiddleware(
|
||||
mock_app,
|
||||
path_prefixes=["/api/matrix/", "/api/public/"],
|
||||
)
|
||||
assert middleware._should_rate_limit("/api/matrix/test") is True
|
||||
assert middleware._should_rate_limit("/api/matrix/") is True
|
||||
assert middleware._should_rate_limit("/api/public/data") is True
|
||||
assert middleware._should_rate_limit("/api/other/test") is False
|
||||
assert middleware._should_rate_limit("/health") is False
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dispatch_allows_matching_path_under_limit(self, mock_app):
|
||||
"""Request to matching path under limit is allowed."""
|
||||
middleware = RateLimitMiddleware(
|
||||
mock_app,
|
||||
path_prefixes=["/api/matrix/"],
|
||||
requests_per_minute=5,
|
||||
)
|
||||
|
||||
# Create a proper ASGI scope
|
||||
scope = {
|
||||
"type": "http",
|
||||
"method": "GET",
|
||||
"path": "/api/matrix/test",
|
||||
"headers": [],
|
||||
}
|
||||
|
||||
async def receive():
|
||||
return {"type": "http.request", "body": b""}
|
||||
|
||||
response_body = []
|
||||
|
||||
async def send(message):
|
||||
response_body.append(message)
|
||||
|
||||
await middleware(scope, receive, send)
|
||||
|
||||
# Should have sent response messages
|
||||
assert len(response_body) > 0
|
||||
# Check for 200 status in the response start message
|
||||
start_message = next(
|
||||
(m for m in response_body if m.get("type") == "http.response.start"), None
|
||||
)
|
||||
assert start_message is not None
|
||||
assert start_message["status"] == 200
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dispatch_skips_non_matching_path(self, mock_app):
|
||||
"""Request to non-matching path bypasses rate limiting."""
|
||||
middleware = RateLimitMiddleware(
|
||||
mock_app,
|
||||
path_prefixes=["/api/matrix/"],
|
||||
requests_per_minute=5,
|
||||
)
|
||||
|
||||
scope = {
|
||||
"type": "http",
|
||||
"method": "GET",
|
||||
"path": "/api/other/test", # Doesn't match /api/matrix/
|
||||
"headers": [],
|
||||
}
|
||||
|
||||
async def receive():
|
||||
return {"type": "http.request", "body": b""}
|
||||
|
||||
response_body = []
|
||||
|
||||
async def send(message):
|
||||
response_body.append(message)
|
||||
|
||||
await middleware(scope, receive, send)
|
||||
|
||||
# Should have sent response messages
|
||||
assert len(response_body) > 0
|
||||
start_message = next(
|
||||
(m for m in response_body if m.get("type") == "http.response.start"), None
|
||||
)
|
||||
assert start_message is not None
|
||||
assert start_message["status"] == 200
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dispatch_returns_429_when_rate_limited(self, mock_app):
|
||||
"""Request over limit returns 429 status."""
|
||||
middleware = RateLimitMiddleware(
|
||||
mock_app,
|
||||
path_prefixes=["/api/matrix/"],
|
||||
requests_per_minute=2, # Low limit for testing
|
||||
)
|
||||
|
||||
# First request - allowed
|
||||
test_scope = {
|
||||
"type": "http",
|
||||
"method": "GET",
|
||||
"path": "/api/matrix/test",
|
||||
"headers": [],
|
||||
}
|
||||
|
||||
async def receive():
|
||||
return {"type": "http.request", "body": b""}
|
||||
|
||||
# Helper to capture response
|
||||
def make_send(captured):
|
||||
async def send(message):
|
||||
captured.append(message)
|
||||
|
||||
return send
|
||||
|
||||
# Make requests to hit the limit
|
||||
for _ in range(2):
|
||||
response_body = []
|
||||
await middleware(test_scope, receive, make_send(response_body))
|
||||
|
||||
start_message = next(
|
||||
(m for m in response_body if m.get("type") == "http.response.start"),
|
||||
None,
|
||||
)
|
||||
assert start_message["status"] == 200
|
||||
|
||||
# 3rd request should be rate limited
|
||||
response_body = []
|
||||
await middleware(test_scope, receive, make_send(response_body))
|
||||
|
||||
start_message = next(
|
||||
(m for m in response_body if m.get("type") == "http.response.start"), None
|
||||
)
|
||||
assert start_message["status"] == 429
|
||||
|
||||
# Check for Retry-After header
|
||||
headers = dict(start_message.get("headers", []))
|
||||
assert b"retry-after" in headers or b"Retry-After" in headers
|
||||
|
||||
|
||||
class TestRateLimiterIntegration:
|
||||
"""Integration-style tests for rate limiter behavior."""
|
||||
|
||||
def test_multiple_ips_independent_limits(self):
|
||||
"""Each IP has its own independent rate limit."""
|
||||
limiter = RateLimiter(requests_per_minute=3)
|
||||
|
||||
# Use up limit for IP 1
|
||||
for _ in range(3):
|
||||
limiter.is_allowed("10.0.0.1")
|
||||
|
||||
# Use up limit for IP 2
|
||||
for _ in range(3):
|
||||
limiter.is_allowed("10.0.0.2")
|
||||
|
||||
# Both should now be rate limited
|
||||
assert limiter.is_allowed("10.0.0.1")[0] is False
|
||||
assert limiter.is_allowed("10.0.0.2")[0] is False
|
||||
|
||||
# IP 3 should still be allowed
|
||||
assert limiter.is_allowed("10.0.0.3")[0] is True
|
||||
|
||||
def test_timestamp_window_sliding(self):
|
||||
"""Rate limit window slides correctly as time passes."""
|
||||
from collections import deque
|
||||
|
||||
limiter = RateLimiter(requests_per_minute=3)
|
||||
|
||||
# Add 3 timestamps manually (simulating old requests)
|
||||
now = time.time()
|
||||
limiter._storage["test-ip"] = deque(
|
||||
[
|
||||
now - 100, # 100 seconds ago (outside 60s window)
|
||||
now - 50, # 50 seconds ago (inside window)
|
||||
now - 10, # 10 seconds ago (inside window)
|
||||
]
|
||||
)
|
||||
|
||||
# Currently have 2 requests in window, so 1 more allowed
|
||||
allowed, _ = limiter.is_allowed("test-ip")
|
||||
assert allowed is True
|
||||
|
||||
# Now 3 in window, should be rate limited
|
||||
allowed, _ = limiter.is_allowed("test-ip")
|
||||
assert allowed is False
|
||||
|
||||
def test_cleanup_preserves_active_ips(self):
|
||||
"""Cleanup only removes IPs with no recent requests."""
|
||||
from collections import deque
|
||||
|
||||
limiter = RateLimiter(
|
||||
requests_per_minute=3,
|
||||
cleanup_interval_seconds=1,
|
||||
)
|
||||
|
||||
now = time.time()
|
||||
# IP 1: active recently
|
||||
limiter._storage["active-ip"] = deque([now - 10])
|
||||
# IP 2: no timestamps (stale)
|
||||
limiter._storage["stale-ip"] = deque()
|
||||
# IP 3: old timestamps only
|
||||
limiter._storage["old-ip"] = deque([now - 100])
|
||||
|
||||
limiter._last_cleanup = now - 2 # Force cleanup
|
||||
|
||||
# Run cleanup
|
||||
limiter._cleanup_if_needed()
|
||||
|
||||
# Active IP should remain
|
||||
assert "active-ip" in limiter._storage
|
||||
# Stale IPs should be removed
|
||||
assert "stale-ip" not in limiter._storage
|
||||
assert "old-ip" not in limiter._storage
|
||||
294
tests/unit/test_stress_detector.py
Normal file
294
tests/unit/test_stress_detector.py
Normal file
@@ -0,0 +1,294 @@
|
||||
"""Unit tests for the stress detector module.
|
||||
|
||||
Tests stress signal calculation, mode detection, multipliers,
|
||||
and integration with the quest system.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import pytest
|
||||
|
||||
from timmy.stress_detector import (
|
||||
StressMode,
|
||||
StressSignal,
|
||||
StressSnapshot,
|
||||
StressThresholds,
|
||||
_calculate_stress_score,
|
||||
_get_multipliers_for_mode,
|
||||
apply_multiplier,
|
||||
get_default_config,
|
||||
reset_stress_state,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def clean_stress_state():
|
||||
"""Reset stress state between tests."""
|
||||
reset_stress_state()
|
||||
yield
|
||||
reset_stress_state()
|
||||
|
||||
|
||||
# ── Stress Mode Tests ──────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestStressMode:
|
||||
def test_stress_mode_values(self):
|
||||
"""StressMode enum has expected values."""
|
||||
assert StressMode.CALM.value == "calm"
|
||||
assert StressMode.ELEVATED.value == "elevated"
|
||||
assert StressMode.HIGH.value == "high"
|
||||
|
||||
|
||||
# ── Stress Signal Tests ────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestStressSignal:
|
||||
def test_signal_not_triggered(self):
|
||||
"""Signal with value below threshold is not triggered."""
|
||||
signal = StressSignal(
|
||||
name="test_signal",
|
||||
value=5.0,
|
||||
threshold=10.0,
|
||||
weight=0.5,
|
||||
)
|
||||
assert not signal.is_triggered
|
||||
assert signal.contribution == 0.0
|
||||
|
||||
def test_signal_triggered(self):
|
||||
"""Signal with value at threshold is triggered."""
|
||||
signal = StressSignal(
|
||||
name="test_signal",
|
||||
value=10.0,
|
||||
threshold=10.0,
|
||||
weight=0.5,
|
||||
)
|
||||
assert signal.is_triggered
|
||||
assert signal.contribution == 0.5 # weight * min(1, value/threshold)
|
||||
|
||||
def test_signal_contribution_capped(self):
|
||||
"""Signal contribution is capped at weight when value >> threshold."""
|
||||
signal = StressSignal(
|
||||
name="test_signal",
|
||||
value=100.0,
|
||||
threshold=10.0,
|
||||
weight=0.5,
|
||||
)
|
||||
assert signal.is_triggered
|
||||
assert signal.contribution == 0.5 # Capped at weight
|
||||
|
||||
def test_signal_partial_contribution(self):
|
||||
"""Signal contribution scales with value/threshold ratio."""
|
||||
signal = StressSignal(
|
||||
name="test_signal",
|
||||
value=15.0,
|
||||
threshold=10.0,
|
||||
weight=0.5,
|
||||
)
|
||||
assert signal.is_triggered
|
||||
# contribution = min(1, 15/10) * 0.5 = 0.5 (capped)
|
||||
assert signal.contribution == 0.5
|
||||
|
||||
|
||||
# ── Stress Thresholds Tests ────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestStressThresholds:
|
||||
def test_calm_mode(self):
|
||||
"""Score below elevated_min returns CALM mode."""
|
||||
thresholds = StressThresholds(elevated_min=0.3, high_min=0.6)
|
||||
assert thresholds.get_mode_for_score(0.0) == StressMode.CALM
|
||||
assert thresholds.get_mode_for_score(0.1) == StressMode.CALM
|
||||
assert thresholds.get_mode_for_score(0.29) == StressMode.CALM
|
||||
|
||||
def test_elevated_mode(self):
|
||||
"""Score between elevated_min and high_min returns ELEVATED mode."""
|
||||
thresholds = StressThresholds(elevated_min=0.3, high_min=0.6)
|
||||
assert thresholds.get_mode_for_score(0.3) == StressMode.ELEVATED
|
||||
assert thresholds.get_mode_for_score(0.5) == StressMode.ELEVATED
|
||||
assert thresholds.get_mode_for_score(0.59) == StressMode.ELEVATED
|
||||
|
||||
def test_high_mode(self):
|
||||
"""Score at or above high_min returns HIGH mode."""
|
||||
thresholds = StressThresholds(elevated_min=0.3, high_min=0.6)
|
||||
assert thresholds.get_mode_for_score(0.6) == StressMode.HIGH
|
||||
assert thresholds.get_mode_for_score(0.8) == StressMode.HIGH
|
||||
assert thresholds.get_mode_for_score(1.0) == StressMode.HIGH
|
||||
|
||||
|
||||
# ── Stress Score Calculation Tests ─────────────────────────────────────────
|
||||
|
||||
|
||||
class TestStressScoreCalculation:
|
||||
def test_empty_signals(self):
|
||||
"""Empty signal list returns zero stress score."""
|
||||
score = _calculate_stress_score([])
|
||||
assert score == 0.0
|
||||
|
||||
def test_no_triggered_signals(self):
|
||||
"""No triggered signals means zero stress score."""
|
||||
signals = [
|
||||
StressSignal(name="s1", value=1.0, threshold=10.0, weight=0.5),
|
||||
StressSignal(name="s2", value=2.0, threshold=10.0, weight=0.5),
|
||||
]
|
||||
score = _calculate_stress_score(signals)
|
||||
assert score == 0.0
|
||||
|
||||
def test_single_triggered_signal(self):
|
||||
"""Single triggered signal contributes its weight."""
|
||||
signals = [
|
||||
StressSignal(name="s1", value=10.0, threshold=10.0, weight=0.5),
|
||||
]
|
||||
score = _calculate_stress_score(signals)
|
||||
# contribution = 0.5, total_weight = 0.5, score = 0.5/0.5 = 1.0
|
||||
assert score == 1.0
|
||||
|
||||
def test_mixed_signals(self):
|
||||
"""Mix of triggered and non-triggered signals."""
|
||||
signals = [
|
||||
StressSignal(name="s1", value=10.0, threshold=10.0, weight=0.3),
|
||||
StressSignal(name="s2", value=1.0, threshold=10.0, weight=0.3),
|
||||
StressSignal(name="s3", value=10.0, threshold=10.0, weight=0.4),
|
||||
]
|
||||
score = _calculate_stress_score(signals)
|
||||
# triggered contributions: 0.3 + 0.4 = 0.7
|
||||
# total_weight: 0.3 + 0.3 + 0.4 = 1.0
|
||||
# score = 0.7 / 1.0 = 0.7
|
||||
assert score == 0.7
|
||||
|
||||
def test_score_capped_at_one(self):
|
||||
"""Stress score is capped at 1.0."""
|
||||
signals = [
|
||||
StressSignal(name="s1", value=100.0, threshold=10.0, weight=1.0),
|
||||
StressSignal(name="s2", value=100.0, threshold=10.0, weight=1.0),
|
||||
]
|
||||
score = _calculate_stress_score(signals)
|
||||
assert score == 1.0 # Capped
|
||||
|
||||
|
||||
# ── Multiplier Tests ───────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestMultipliers:
|
||||
def test_default_config_structure(self):
|
||||
"""Default config has expected structure."""
|
||||
config = get_default_config()
|
||||
assert "thresholds" in config
|
||||
assert "signals" in config
|
||||
assert "multipliers" in config
|
||||
|
||||
def test_calm_mode_multipliers(self):
|
||||
"""Calm mode has expected multipliers."""
|
||||
multipliers = _get_multipliers_for_mode(StressMode.CALM)
|
||||
assert multipliers["test_improve"] == 1.0
|
||||
assert multipliers["docs_update"] == 1.2
|
||||
assert multipliers["exploration"] == 1.3
|
||||
assert multipliers["refactor"] == 1.2
|
||||
|
||||
def test_elevated_mode_multipliers(self):
|
||||
"""Elevated mode has expected multipliers."""
|
||||
multipliers = _get_multipliers_for_mode(StressMode.ELEVATED)
|
||||
assert multipliers["test_improve"] == 1.2
|
||||
assert multipliers["issue_reduce"] == 1.1
|
||||
assert multipliers["refactor"] == 0.9
|
||||
|
||||
def test_high_mode_multipliers(self):
|
||||
"""High stress mode has expected multipliers."""
|
||||
multipliers = _get_multipliers_for_mode(StressMode.HIGH)
|
||||
assert multipliers["test_improve"] == 1.5
|
||||
assert multipliers["issue_reduce"] == 1.4
|
||||
assert multipliers["exploration"] == 0.7
|
||||
assert multipliers["refactor"] == 0.6
|
||||
|
||||
def test_multiplier_fallback_for_unknown_type(self):
|
||||
"""Unknown quest types return default multiplier of 1.0."""
|
||||
multipliers = _get_multipliers_for_mode(StressMode.CALM)
|
||||
assert multipliers.get("unknown_type", 1.0) == 1.0
|
||||
|
||||
|
||||
# ── Apply Multiplier Tests ─────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestApplyMultiplier:
|
||||
def test_apply_multiplier_calm(self):
|
||||
"""Multiplier applies correctly in calm mode."""
|
||||
# This test uses get_multiplier which reads from current stress mode
|
||||
# Since we can't easily mock the stress mode, we test the apply_multiplier logic
|
||||
base = 100
|
||||
# In calm mode with test_improve = 1.0
|
||||
result = apply_multiplier(base, "unknown_type")
|
||||
assert result >= 1 # At least 1 token
|
||||
|
||||
def test_apply_multiplier_minimum_one(self):
|
||||
"""Applied reward is at least 1 token."""
|
||||
# Even with very low multiplier, result should be >= 1
|
||||
result = apply_multiplier(1, "any_type")
|
||||
assert result >= 1
|
||||
|
||||
|
||||
# ── Stress Snapshot Tests ──────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestStressSnapshot:
|
||||
def test_snapshot_to_dict(self):
|
||||
"""Snapshot can be converted to dictionary."""
|
||||
signals = [
|
||||
StressSignal(name="test", value=10.0, threshold=5.0, weight=0.5),
|
||||
]
|
||||
snapshot = StressSnapshot(
|
||||
mode=StressMode.ELEVATED,
|
||||
score=0.5,
|
||||
signals=signals,
|
||||
multipliers={"test_improve": 1.2},
|
||||
)
|
||||
|
||||
data = snapshot.to_dict()
|
||||
assert data["mode"] == "elevated"
|
||||
assert data["score"] == 0.5
|
||||
assert len(data["signals"]) == 1
|
||||
assert data["multipliers"]["test_improve"] == 1.2
|
||||
|
||||
|
||||
# ── Integration Tests ──────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestStressDetectorIntegration:
|
||||
def test_reset_stress_state(self):
|
||||
"""Reset clears internal state."""
|
||||
# Just verify reset doesn't error
|
||||
reset_stress_state()
|
||||
|
||||
def test_default_config_contains_all_signals(self):
|
||||
"""Default config defines all expected signals."""
|
||||
config = get_default_config()
|
||||
signals = config["signals"]
|
||||
|
||||
expected_signals = [
|
||||
"flaky_test_rate",
|
||||
"p1_backlog_growth",
|
||||
"ci_failure_rate",
|
||||
"open_bug_count",
|
||||
]
|
||||
|
||||
for signal in expected_signals:
|
||||
assert signal in signals
|
||||
assert "threshold" in signals[signal]
|
||||
assert "weight" in signals[signal]
|
||||
|
||||
def test_default_config_contains_all_modes(self):
|
||||
"""Default config defines all stress modes."""
|
||||
config = get_default_config()
|
||||
multipliers = config["multipliers"]
|
||||
|
||||
assert "calm" in multipliers
|
||||
assert "elevated" in multipliers
|
||||
assert "high" in multipliers
|
||||
|
||||
def test_multiplier_weights_sum_approximately_one(self):
|
||||
"""Signal weights should approximately sum to 1.0."""
|
||||
config = get_default_config()
|
||||
signals = config["signals"]
|
||||
|
||||
total_weight = sum(s["weight"] for s in signals.values())
|
||||
# Allow some flexibility but should be close to 1.0
|
||||
assert 0.9 <= total_weight <= 1.1
|
||||
367
tests/unit/test_visitor.py
Normal file
367
tests/unit/test_visitor.py
Normal file
@@ -0,0 +1,367 @@
|
||||
"""Tests for infrastructure.visitor — visitor state tracking."""
|
||||
|
||||
from unittest.mock import patch
|
||||
|
||||
from infrastructure.visitor import VisitorRegistry, VisitorState
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# VisitorState dataclass tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestVisitorState:
|
||||
"""Tests for the VisitorState dataclass."""
|
||||
|
||||
def test_defaults(self):
|
||||
"""VisitorState has correct defaults when only visitor_id is provided."""
|
||||
v = VisitorState(visitor_id="v1")
|
||||
|
||||
assert v.visitor_id == "v1"
|
||||
assert v.display_name == "v1" # Defaults to visitor_id
|
||||
assert v.position == {"x": 0.0, "y": 0.0, "z": 0.0}
|
||||
assert v.rotation == 0.0
|
||||
assert "T" in v.connected_at # ISO format check
|
||||
|
||||
def test_custom_values(self):
|
||||
"""VisitorState accepts custom values for all fields."""
|
||||
v = VisitorState(
|
||||
visitor_id="v2",
|
||||
display_name="Alice",
|
||||
position={"x": 1.0, "y": 2.0, "z": 3.0},
|
||||
rotation=90.0,
|
||||
connected_at="2026-03-21T12:00:00Z",
|
||||
)
|
||||
|
||||
assert v.visitor_id == "v2"
|
||||
assert v.display_name == "Alice"
|
||||
assert v.position == {"x": 1.0, "y": 2.0, "z": 3.0}
|
||||
assert v.rotation == 90.0
|
||||
assert v.connected_at == "2026-03-21T12:00:00Z"
|
||||
|
||||
def test_display_name_defaults_to_visitor_id(self):
|
||||
"""Empty display_name falls back to visitor_id."""
|
||||
v = VisitorState(visitor_id="charlie", display_name="")
|
||||
assert v.display_name == "charlie"
|
||||
|
||||
def test_position_is_copied_not_shared(self):
|
||||
"""Each VisitorState has its own position dict."""
|
||||
pos = {"x": 1.0, "y": 2.0, "z": 3.0}
|
||||
v1 = VisitorState(visitor_id="v1", position=pos)
|
||||
v2 = VisitorState(visitor_id="v2", position=pos)
|
||||
|
||||
v1.position["x"] = 99.0
|
||||
assert v2.position["x"] == 1.0 # v2 unchanged
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# VisitorRegistry singleton tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestVisitorRegistrySingleton:
|
||||
"""Tests for the VisitorRegistry singleton behavior."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Clear registry before each test."""
|
||||
VisitorRegistry._instance = None
|
||||
|
||||
def teardown_method(self):
|
||||
"""Clean up after each test."""
|
||||
VisitorRegistry._instance = None
|
||||
|
||||
def test_singleton_returns_same_instance(self):
|
||||
"""Multiple calls return the same registry object."""
|
||||
r1 = VisitorRegistry()
|
||||
r2 = VisitorRegistry()
|
||||
assert r1 is r2
|
||||
|
||||
def test_singleton_shares_state(self):
|
||||
"""State is shared across all references to the singleton."""
|
||||
r1 = VisitorRegistry()
|
||||
r1.add("v1")
|
||||
|
||||
r2 = VisitorRegistry()
|
||||
assert len(r2) == 1
|
||||
assert r2.get("v1") is not None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# VisitorRegistry.add tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestVisitorRegistryAdd:
|
||||
"""Tests for VisitorRegistry.add()."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Clear registry before each test."""
|
||||
VisitorRegistry._instance = None
|
||||
self.registry = VisitorRegistry()
|
||||
|
||||
def teardown_method(self):
|
||||
"""Clean up after each test."""
|
||||
VisitorRegistry._instance = None
|
||||
|
||||
def test_add_returns_visitor_state(self):
|
||||
"""add() returns the created VisitorState."""
|
||||
result = self.registry.add("v1")
|
||||
assert isinstance(result, VisitorState)
|
||||
assert result.visitor_id == "v1"
|
||||
|
||||
def test_add_with_display_name(self):
|
||||
"""add() accepts a custom display name."""
|
||||
result = self.registry.add("v1", display_name="Alice")
|
||||
assert result.display_name == "Alice"
|
||||
|
||||
def test_add_with_position(self):
|
||||
"""add() accepts an initial position."""
|
||||
pos = {"x": 10.0, "y": 20.0, "z": 30.0}
|
||||
result = self.registry.add("v1", position=pos)
|
||||
assert result.position == pos
|
||||
|
||||
def test_add_increases_count(self):
|
||||
"""Each add increases the registry size."""
|
||||
assert len(self.registry) == 0
|
||||
self.registry.add("v1")
|
||||
assert len(self.registry) == 1
|
||||
self.registry.add("v2")
|
||||
assert len(self.registry) == 2
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# VisitorRegistry.remove tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestVisitorRegistryRemove:
|
||||
"""Tests for VisitorRegistry.remove()."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Clear registry and add test visitors."""
|
||||
VisitorRegistry._instance = None
|
||||
self.registry = VisitorRegistry()
|
||||
self.registry.add("v1")
|
||||
self.registry.add("v2")
|
||||
|
||||
def teardown_method(self):
|
||||
"""Clean up after each test."""
|
||||
VisitorRegistry._instance = None
|
||||
|
||||
def test_remove_existing_returns_true(self):
|
||||
"""Removing an existing visitor returns True."""
|
||||
result = self.registry.remove("v1")
|
||||
assert result is True
|
||||
assert len(self.registry) == 1
|
||||
|
||||
def test_remove_nonexistent_returns_false(self):
|
||||
"""Removing a non-existent visitor returns False."""
|
||||
result = self.registry.remove("unknown")
|
||||
assert result is False
|
||||
assert len(self.registry) == 2
|
||||
|
||||
def test_removes_correct_visitor(self):
|
||||
"""remove() only removes the specified visitor."""
|
||||
self.registry.remove("v1")
|
||||
assert self.registry.get("v1") is None
|
||||
assert self.registry.get("v2") is not None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# VisitorRegistry.update_position tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestVisitorRegistryUpdatePosition:
|
||||
"""Tests for VisitorRegistry.update_position()."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Clear registry and add test visitor."""
|
||||
VisitorRegistry._instance = None
|
||||
self.registry = VisitorRegistry()
|
||||
self.registry.add("v1", position={"x": 0.0, "y": 0.0, "z": 0.0})
|
||||
|
||||
def teardown_method(self):
|
||||
"""Clean up after each test."""
|
||||
VisitorRegistry._instance = None
|
||||
|
||||
def test_update_position_returns_true(self):
|
||||
"""update_position returns True for existing visitor."""
|
||||
result = self.registry.update_position("v1", {"x": 1.0, "y": 2.0, "z": 3.0})
|
||||
assert result is True
|
||||
|
||||
def test_update_position_returns_false_for_unknown(self):
|
||||
"""update_position returns False for non-existent visitor."""
|
||||
result = self.registry.update_position("unknown", {"x": 1.0, "y": 2.0, "z": 3.0})
|
||||
assert result is False
|
||||
|
||||
def test_update_position_changes_values(self):
|
||||
"""update_position updates the stored position."""
|
||||
new_pos = {"x": 10.0, "y": 20.0, "z": 30.0}
|
||||
self.registry.update_position("v1", new_pos)
|
||||
|
||||
visitor = self.registry.get("v1")
|
||||
assert visitor.position == new_pos
|
||||
|
||||
def test_update_position_with_rotation(self):
|
||||
"""update_position can also update rotation."""
|
||||
self.registry.update_position("v1", {"x": 1.0, "y": 0.0, "z": 0.0}, rotation=180.0)
|
||||
|
||||
visitor = self.registry.get("v1")
|
||||
assert visitor.rotation == 180.0
|
||||
|
||||
def test_update_position_without_rotation_preserves_it(self):
|
||||
"""Calling update_position without rotation preserves existing rotation."""
|
||||
self.registry.update_position("v1", {"x": 1.0, "y": 0.0, "z": 0.0}, rotation=90.0)
|
||||
self.registry.update_position("v1", {"x": 2.0, "y": 0.0, "z": 0.0})
|
||||
|
||||
visitor = self.registry.get("v1")
|
||||
assert visitor.rotation == 90.0
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# VisitorRegistry.get tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestVisitorRegistryGet:
|
||||
"""Tests for VisitorRegistry.get()."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Clear registry and add test visitor."""
|
||||
VisitorRegistry._instance = None
|
||||
self.registry = VisitorRegistry()
|
||||
self.registry.add("v1", display_name="Alice")
|
||||
|
||||
def teardown_method(self):
|
||||
"""Clean up after each test."""
|
||||
VisitorRegistry._instance = None
|
||||
|
||||
def test_get_existing_returns_visitor(self):
|
||||
"""get() returns VisitorState for existing visitor."""
|
||||
result = self.registry.get("v1")
|
||||
assert isinstance(result, VisitorState)
|
||||
assert result.visitor_id == "v1"
|
||||
assert result.display_name == "Alice"
|
||||
|
||||
def test_get_nonexistent_returns_none(self):
|
||||
"""get() returns None for non-existent visitor."""
|
||||
result = self.registry.get("unknown")
|
||||
assert result is None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# VisitorRegistry.get_all tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestVisitorRegistryGetAll:
|
||||
"""Tests for VisitorRegistry.get_all() — Matrix protocol format."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Clear registry and add test visitors."""
|
||||
VisitorRegistry._instance = None
|
||||
self.registry = VisitorRegistry()
|
||||
self.registry.add("v1", display_name="Alice", position={"x": 1.0, "y": 2.0, "z": 3.0})
|
||||
self.registry.add("v2", display_name="Bob", position={"x": 4.0, "y": 5.0, "z": 6.0})
|
||||
|
||||
def teardown_method(self):
|
||||
"""Clean up after each test."""
|
||||
VisitorRegistry._instance = None
|
||||
|
||||
def test_get_all_returns_list(self):
|
||||
"""get_all() returns a list."""
|
||||
result = self.registry.get_all()
|
||||
assert isinstance(result, list)
|
||||
assert len(result) == 2
|
||||
|
||||
def test_get_all_format_has_required_fields(self):
|
||||
"""Each entry has type, visitor_id, data, and ts."""
|
||||
result = self.registry.get_all()
|
||||
|
||||
for entry in result:
|
||||
assert "type" in entry
|
||||
assert "visitor_id" in entry
|
||||
assert "data" in entry
|
||||
assert "ts" in entry
|
||||
|
||||
def test_get_all_type_is_visitor_state(self):
|
||||
"""The type field is 'visitor_state'."""
|
||||
result = self.registry.get_all()
|
||||
assert all(entry["type"] == "visitor_state" for entry in result)
|
||||
|
||||
def test_get_all_data_has_required_fields(self):
|
||||
"""data dict contains display_name, position, rotation, connected_at."""
|
||||
result = self.registry.get_all()
|
||||
|
||||
for entry in result:
|
||||
data = entry["data"]
|
||||
assert "display_name" in data
|
||||
assert "position" in data
|
||||
assert "rotation" in data
|
||||
assert "connected_at" in data
|
||||
|
||||
def test_get_all_position_is_dict(self):
|
||||
"""position within data is a dict with x, y, z."""
|
||||
result = self.registry.get_all()
|
||||
|
||||
for entry in result:
|
||||
pos = entry["data"]["position"]
|
||||
assert isinstance(pos, dict)
|
||||
assert "x" in pos
|
||||
assert "y" in pos
|
||||
assert "z" in pos
|
||||
|
||||
def test_get_all_ts_is_unix_timestamp(self):
|
||||
"""ts is an integer Unix timestamp."""
|
||||
result = self.registry.get_all()
|
||||
|
||||
for entry in result:
|
||||
assert isinstance(entry["ts"], int)
|
||||
assert entry["ts"] > 0
|
||||
|
||||
@patch("infrastructure.visitor.time")
|
||||
def test_get_all_uses_current_time(self, mock_time):
|
||||
"""ts is set from time.time()."""
|
||||
mock_time.time.return_value = 1742529600
|
||||
|
||||
result = self.registry.get_all()
|
||||
assert all(entry["ts"] == 1742529600 for entry in result)
|
||||
|
||||
def test_get_all_empty_registry(self):
|
||||
"""get_all() returns empty list when no visitors."""
|
||||
self.registry.clear()
|
||||
result = self.registry.get_all()
|
||||
assert result == []
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# VisitorRegistry.clear tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestVisitorRegistryClear:
|
||||
"""Tests for VisitorRegistry.clear()."""
|
||||
|
||||
def setup_method(self):
|
||||
"""Clear registry and add test visitors."""
|
||||
VisitorRegistry._instance = None
|
||||
self.registry = VisitorRegistry()
|
||||
self.registry.add("v1")
|
||||
self.registry.add("v2")
|
||||
self.registry.add("v3")
|
||||
|
||||
def teardown_method(self):
|
||||
"""Clean up after each test."""
|
||||
VisitorRegistry._instance = None
|
||||
|
||||
def test_clear_removes_all_visitors(self):
|
||||
"""clear() removes all visitors from the registry."""
|
||||
assert len(self.registry) == 3
|
||||
self.registry.clear()
|
||||
assert len(self.registry) == 0
|
||||
|
||||
def test_clear_allows_readding(self):
|
||||
"""Visitors can be re-added after clear()."""
|
||||
self.registry.clear()
|
||||
self.registry.add("v1")
|
||||
assert len(self.registry) == 1
|
||||
283
timmy_automations/README.md
Normal file
283
timmy_automations/README.md
Normal file
@@ -0,0 +1,283 @@
|
||||
# Timmy Automations
|
||||
|
||||
Central home for all automated processes that keep the Timmy development loop running smoothly.
|
||||
|
||||
## Purpose
|
||||
|
||||
This directory consolidates scripts, configurations, and manifests for automations that operate on behalf of Timmy — the autonomous development agent. These automations handle everything from daily issue triage to cycle retrospectives, workspace management, and metrics collection.
|
||||
|
||||
**Design principle:** Automations should be discoverable, configurable, and observable. Every automation in this folder can be found by Timmy, enabled or disabled via configuration, and reports its status for dashboard integration.
|
||||
|
||||
---
|
||||
|
||||
## Directory Structure
|
||||
|
||||
| Directory | Purpose |
|
||||
|-----------|---------|
|
||||
| `daily_run/` | Scripts that run periodically (cycle retros, idle detection, daily triage) |
|
||||
| `triage/` | Deep triage helpers — intelligent issue refinement and prioritization |
|
||||
| `metrics/` | Dashboard and metrics integration — data collection for loop health |
|
||||
| `workspace/` | Agent workspace management — isolated environments per agent |
|
||||
| `config/` | Automation manifests and configuration files |
|
||||
|
||||
---
|
||||
|
||||
## Types of Automations
|
||||
|
||||
### 1. Daily Run Automations
|
||||
|
||||
These run continuously or on a schedule to keep the dev loop operational:
|
||||
|
||||
- **Cycle Retrospective** (`cycle_retro.py`) — Logs structured data after each development cycle
|
||||
- **Loop Guard** (`loop_guard.py`) — Idle detection with exponential backoff (prevents burning cycles on empty queues)
|
||||
- **Triage Scoring** (`triage_score.py`) — Mechanical issue scoring based on scope/acceptance/alignment
|
||||
- **Daily Run Orchestrator** (`orchestrator.py`) — The 10-minute ritual; fetches candidate issues and produces a concise agenda plus day summary
|
||||
|
||||
### 2. Deep Triage Automations
|
||||
|
||||
Intelligent, LLM-assisted workflows that run less frequently (~every 20 cycles):
|
||||
|
||||
- **Deep Triage** (`deep_triage.sh` + `deep_triage_prompt.md`) — Hermes-driven issue refinement, breaking down large issues, adding acceptance criteria
|
||||
- **Loop Introspection** (`loop_introspect.py`) — Self-improvement engine that analyzes retro data and produces recommendations
|
||||
|
||||
### 3. Workspace Automations
|
||||
|
||||
Environment management for multi-agent operation:
|
||||
|
||||
- **Agent Workspace** (`agent_workspace.sh`) — Creates isolated git clones, port ranges, and data directories per agent
|
||||
- **Bootstrap** (`bootstrap.sh`) — One-time setup for new Kimi workspaces
|
||||
- **Resume** (`resume.sh`) — Quick status check and resume prompt
|
||||
|
||||
### 4. Metrics & Integration
|
||||
|
||||
Data collection for dashboard visibility:
|
||||
|
||||
- **Backfill Retro** (`backfill_retro.py`) — Seeds retrospective data from Gitea PR history
|
||||
- **Pre-commit Checks** (`pre_commit_checks.py`) — CI hygiene validation before commits
|
||||
|
||||
---
|
||||
|
||||
## How Timmy Discovers Automations
|
||||
|
||||
Automations are discovered via manifest files in `config/`:
|
||||
|
||||
```
|
||||
config/
|
||||
├── automations.json # Master manifest of all automations
|
||||
├── daily_run.json # Daily run schedule configuration
|
||||
└── triage_rules.yaml # Triage scoring weights and thresholds
|
||||
```
|
||||
|
||||
### Discovery Protocol
|
||||
|
||||
1. **Scan** — Timmy scans `config/automations.json` on startup
|
||||
2. **Validate** — Each automation entry is validated (script exists, is executable)
|
||||
3. **Enable** — Automations marked `enabled: true` are registered
|
||||
4. **Schedule** — Daily runs are scheduled via the loop's internal scheduler
|
||||
5. **Report** — Status is written to `.loop/automation_state.json`
|
||||
|
||||
### Automation Manifest Format
|
||||
|
||||
```json
|
||||
{
|
||||
"automations": [
|
||||
{
|
||||
"id": "cycle_retro",
|
||||
"name": "Cycle Retrospective",
|
||||
"description": "Logs structured data after each dev cycle",
|
||||
"script": "daily_run/cycle_retro.py",
|
||||
"enabled": true,
|
||||
"trigger": "post_cycle",
|
||||
"config": {
|
||||
"retro_file": ".loop/retro/cycles.jsonl",
|
||||
"summary_window": 50
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "loop_guard",
|
||||
"name": "Loop Guard",
|
||||
"description": "Idle detection with exponential backoff",
|
||||
"script": "daily_run/loop_guard.py",
|
||||
"enabled": true,
|
||||
"trigger": "pre_cycle",
|
||||
"config": {
|
||||
"backoff_base": 60,
|
||||
"backoff_max": 600
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## How Timmy Enables/Disables Automations
|
||||
|
||||
### Method 1: Edit Manifest
|
||||
|
||||
Modify `config/automations.json` and set `enabled: true/false`:
|
||||
|
||||
```bash
|
||||
# Disable the loop guard
|
||||
jq '.automations[] | select(.id == "loop_guard").enabled = false' \
|
||||
config/automations.json > tmp.json && mv tmp.json config/automations.json
|
||||
```
|
||||
|
||||
### Method 2: CLI (Future)
|
||||
|
||||
```bash
|
||||
timmy automation enable loop_guard
|
||||
timmy automation disable cycle_retro
|
||||
timmy automation list
|
||||
```
|
||||
|
||||
### Method 3: Dashboard (Future)
|
||||
|
||||
Mission Control panel will have toggles for each automation with real-time status.
|
||||
|
||||
---
|
||||
|
||||
## How Timmy Configures Automations
|
||||
|
||||
Each automation reads configuration from the manifest + environment variables:
|
||||
|
||||
| Priority | Source | Override |
|
||||
|----------|--------|----------|
|
||||
| 1 | Environment variables | `TIMMY_AUTOMATION_*` prefix |
|
||||
| 2 | Manifest `config` object | Per-automation settings |
|
||||
| 3 | Code defaults | Fallback values |
|
||||
|
||||
Example environment overrides:
|
||||
|
||||
```bash
|
||||
export TIMMY_CYCLE_RETRO_WINDOW=100 # Override summary_window
|
||||
export TIMMY_LOOP_GUARD_MAX_BACKOFF=300 # Override backoff_max
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Script References
|
||||
|
||||
The following scripts live in their original locations but are conceptually part of Timmy Automations:
|
||||
|
||||
| Script | Location | Category | Purpose |
|
||||
|--------|----------|----------|---------|
|
||||
| `cycle_retro.py` | `../scripts/` | Daily Run | Log cycle retrospective data |
|
||||
| `loop_guard.py` | `../scripts/` | Daily Run | Idle detection & backoff |
|
||||
| `triage_score.py` | `../scripts/` | Daily Run | Mechanical issue scoring |
|
||||
| `orchestrator.py` | `daily_run/` | Daily Run | The 10-minute ritual — agenda + review |
|
||||
| `deep_triage.sh` | `../scripts/` | Triage | LLM-driven issue refinement |
|
||||
| `deep_triage_prompt.md` | `../scripts/` | Triage | Prompt template for deep triage |
|
||||
| `loop_introspect.py` | `../scripts/` | Triage | Self-improvement analysis |
|
||||
| `agent_workspace.sh` | `../scripts/` | Workspace | Agent environment management |
|
||||
| `backfill_retro.py` | `../scripts/` | Metrics | Seed retro data from history |
|
||||
| `pre_commit_checks.py` | `../scripts/` | Metrics | CI hygiene validation |
|
||||
|
||||
### Why scripts aren't moved here (yet)
|
||||
|
||||
These scripts are referenced rather than moved to maintain backward compatibility with:
|
||||
- Existing CI/CD pipelines
|
||||
- Agent workspace setups
|
||||
- Shell aliases and documentation
|
||||
|
||||
Future work may migrate scripts here with symlink redirects from original locations.
|
||||
|
||||
---
|
||||
|
||||
## Integration with Dashboard
|
||||
|
||||
Automations report status for dashboard visibility:
|
||||
|
||||
```
|
||||
.loop/
|
||||
├── automation_state.json # Current state of all automations
|
||||
├── queue.json # Current work queue (produced by triage)
|
||||
├── retro/
|
||||
│ ├── cycles.jsonl # Cycle retrospective log
|
||||
│ ├── deep-triage.jsonl # Deep triage history
|
||||
│ ├── triage.jsonl # Mechanical triage log
|
||||
│ └── insights.json # Loop introspection output
|
||||
└── quarantine.json # Quarantined issues (repeat failures)
|
||||
```
|
||||
|
||||
The Mission Control dashboard (`/mission-control`) displays:
|
||||
- Last run time for each automation
|
||||
- Success/failure counts
|
||||
- Queue depth and triage statistics
|
||||
- Repeat failure alerts
|
||||
|
||||
---
|
||||
|
||||
## Adding New Automations
|
||||
|
||||
1. **Create the script** in the appropriate subdirectory
|
||||
2. **Add manifest entry** to `config/automations.json`
|
||||
3. **Document in this README** — add to the relevant table
|
||||
4. **Add tests** in `tests/timmy_automations/`
|
||||
5. **Update dashboard** if the automation produces visible output
|
||||
|
||||
### Automation Script Template
|
||||
|
||||
```python
|
||||
#!/usr/bin/env python3
|
||||
"""Brief description of what this automation does.
|
||||
|
||||
Run: python3 timmy_automations/daily_run/my_automation.py
|
||||
Env: See config/automations.json for configuration
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Load automation config from manifest
|
||||
CONFIG_PATH = Path(__file__).parent.parent / "config" / "automations.json"
|
||||
|
||||
def load_config() -> dict:
|
||||
"""Load configuration for this automation."""
|
||||
manifest = json.loads(CONFIG_PATH.read_text())
|
||||
for auto in manifest["automations"]:
|
||||
if auto["id"] == "my_automation_id":
|
||||
return auto.get("config", {})
|
||||
return {}
|
||||
|
||||
def main() -> int:
|
||||
config = load_config()
|
||||
# Your automation logic here
|
||||
return 0
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Token Economy Integration
|
||||
|
||||
Automations participate in the token economy:
|
||||
|
||||
| Action | Token Cost/Reward | Reason |
|
||||
|--------|-------------------|--------|
|
||||
| Run daily automation | 1 token | Resource usage |
|
||||
| Successful cycle retro | +5 tokens | Data contribution |
|
||||
| Find quarantine candidate | +10 tokens | Quality improvement |
|
||||
| Deep triage refinement | +20 tokens | High-value work |
|
||||
| Automation failure | -2 tokens | Penalty |
|
||||
|
||||
See `src/lightning/` for token economy implementation.
|
||||
|
||||
---
|
||||
|
||||
## See Also
|
||||
|
||||
- `CLAUDE.md` — Architecture patterns and conventions
|
||||
- `AGENTS.md` — Agent roster and development standards
|
||||
- `.kimi/README.md` — Kimi agent workspace guide
|
||||
- `.loop/` — Runtime data directory (created on first run)
|
||||
|
||||
---
|
||||
|
||||
_Maintained by: Timmy Automations Subsystem_
|
||||
_Updated: 2026-03-21_
|
||||
271
timmy_automations/__init__.py
Normal file
271
timmy_automations/__init__.py
Normal file
@@ -0,0 +1,271 @@
|
||||
"""Timmy Automations — Central automation discovery and control module.
|
||||
|
||||
This module provides:
|
||||
- Discovery of all configured automations
|
||||
- Enable/disable control
|
||||
- Status reporting
|
||||
- Configuration management
|
||||
|
||||
Usage:
|
||||
from timmy_automations import AutomationRegistry
|
||||
|
||||
registry = AutomationRegistry()
|
||||
for auto in registry.list_automations():
|
||||
print(f"{auto.id}: {auto.name} ({'enabled' if auto.enabled else 'disabled'})")
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
|
||||
@dataclass
|
||||
class Automation:
|
||||
"""Represents a single automation configuration."""
|
||||
|
||||
id: str
|
||||
name: str
|
||||
description: str
|
||||
script: str
|
||||
category: str
|
||||
enabled: bool
|
||||
trigger: str
|
||||
executable: str
|
||||
config: dict[str, Any]
|
||||
outputs: list[str]
|
||||
depends_on: list[str]
|
||||
schedule: str | None = None
|
||||
|
||||
@property
|
||||
def full_script_path(self) -> Path:
|
||||
"""Resolve the script path relative to repo root."""
|
||||
repo_root = Path(__file__).parent.parent
|
||||
return repo_root / self.script
|
||||
|
||||
@property
|
||||
def is_executable(self) -> bool:
|
||||
"""Check if the script file exists and is executable."""
|
||||
path = self.full_script_path
|
||||
return path.exists() and os.access(path, os.X_OK)
|
||||
|
||||
@property
|
||||
def is_runnable(self) -> bool:
|
||||
"""Check if automation can be run (enabled + executable)."""
|
||||
return self.enabled and self.is_executable
|
||||
|
||||
|
||||
class AutomationRegistry:
|
||||
"""Registry for discovering and managing Timmy automations."""
|
||||
|
||||
MANIFEST_PATH = Path(__file__).parent / "config" / "automations.json"
|
||||
STATE_PATH = Path(__file__).parent.parent / ".loop" / "automation_state.json"
|
||||
|
||||
def __init__(self, manifest_path: Path | None = None) -> None:
|
||||
"""Initialize the registry, loading the manifest.
|
||||
|
||||
Args:
|
||||
manifest_path: Optional override for manifest file location.
|
||||
"""
|
||||
self._manifest_path = manifest_path or self.MANIFEST_PATH
|
||||
self._automations: dict[str, Automation] = {}
|
||||
self._load_manifest()
|
||||
|
||||
def _load_manifest(self) -> None:
|
||||
"""Load automations from the manifest file."""
|
||||
if not self._manifest_path.exists():
|
||||
self._automations = {}
|
||||
return
|
||||
|
||||
try:
|
||||
data = json.loads(self._manifest_path.read_text())
|
||||
for auto_data in data.get("automations", []):
|
||||
auto = Automation(
|
||||
id=auto_data["id"],
|
||||
name=auto_data["name"],
|
||||
description=auto_data["description"],
|
||||
script=auto_data["script"],
|
||||
category=auto_data["category"],
|
||||
enabled=auto_data.get("enabled", True),
|
||||
trigger=auto_data["trigger"],
|
||||
executable=auto_data.get("executable", "python3"),
|
||||
config=auto_data.get("config", {}),
|
||||
outputs=auto_data.get("outputs", []),
|
||||
depends_on=auto_data.get("depends_on", []),
|
||||
schedule=auto_data.get("schedule"),
|
||||
)
|
||||
self._automations[auto.id] = auto
|
||||
except (json.JSONDecodeError, KeyError) as e:
|
||||
raise AutomationError(f"Failed to load manifest: {e}")
|
||||
|
||||
def _save_manifest(self) -> None:
|
||||
"""Save current automation states back to manifest."""
|
||||
data = {
|
||||
"version": "1.0.0",
|
||||
"description": "Master manifest of all Timmy automations",
|
||||
"last_updated": "2026-03-21",
|
||||
"automations": []
|
||||
}
|
||||
|
||||
for auto in self._automations.values():
|
||||
auto_dict = {
|
||||
"id": auto.id,
|
||||
"name": auto.name,
|
||||
"description": auto.description,
|
||||
"script": auto.script,
|
||||
"category": auto.category,
|
||||
"enabled": auto.enabled,
|
||||
"trigger": auto.trigger,
|
||||
"executable": auto.executable,
|
||||
"config": auto.config,
|
||||
"outputs": auto.outputs,
|
||||
"depends_on": auto.depends_on,
|
||||
}
|
||||
if auto.schedule:
|
||||
auto_dict["schedule"] = auto.schedule
|
||||
data["automations"].append(auto_dict)
|
||||
|
||||
self._manifest_path.write_text(json.dumps(data, indent=2) + "\n")
|
||||
|
||||
def list_automations(
|
||||
self,
|
||||
category: str | None = None,
|
||||
enabled_only: bool = False,
|
||||
trigger: str | None = None,
|
||||
) -> list[Automation]:
|
||||
"""List automations with optional filtering.
|
||||
|
||||
Args:
|
||||
category: Filter by category (daily_run, triage, metrics, workspace)
|
||||
enabled_only: Only return enabled automations
|
||||
trigger: Filter by trigger type (pre_cycle, post_cycle, scheduled, manual)
|
||||
|
||||
Returns:
|
||||
List of matching Automation objects.
|
||||
"""
|
||||
results = []
|
||||
for auto in self._automations.values():
|
||||
if category and auto.category != category:
|
||||
continue
|
||||
if enabled_only and not auto.enabled:
|
||||
continue
|
||||
if trigger and auto.trigger != trigger:
|
||||
continue
|
||||
results.append(auto)
|
||||
return sorted(results, key=lambda a: (a.category, a.name))
|
||||
|
||||
def get_automation(self, automation_id: str) -> Automation | None:
|
||||
"""Get a specific automation by ID."""
|
||||
return self._automations.get(automation_id)
|
||||
|
||||
def enable(self, automation_id: str) -> bool:
|
||||
"""Enable an automation.
|
||||
|
||||
Returns:
|
||||
True if automation was found and enabled, False otherwise.
|
||||
"""
|
||||
if automation_id not in self._automations:
|
||||
return False
|
||||
self._automations[automation_id].enabled = True
|
||||
self._save_manifest()
|
||||
return True
|
||||
|
||||
def disable(self, automation_id: str) -> bool:
|
||||
"""Disable an automation.
|
||||
|
||||
Returns:
|
||||
True if automation was found and disabled, False otherwise.
|
||||
"""
|
||||
if automation_id not in self._automations:
|
||||
return False
|
||||
self._automations[automation_id].enabled = False
|
||||
self._save_manifest()
|
||||
return True
|
||||
|
||||
def get_by_trigger(self, trigger: str) -> list[Automation]:
|
||||
"""Get all automations for a specific trigger."""
|
||||
return [a for a in self._automations.values() if a.trigger == trigger]
|
||||
|
||||
def get_by_schedule(self, schedule: str) -> list[Automation]:
|
||||
"""Get all automations for a specific schedule."""
|
||||
return [
|
||||
a for a in self._automations.values()
|
||||
if a.schedule == schedule
|
||||
]
|
||||
|
||||
def validate_all(self) -> list[tuple[str, str]]:
|
||||
"""Validate all automations and return any issues.
|
||||
|
||||
Returns:
|
||||
List of (automation_id, error_message) tuples.
|
||||
"""
|
||||
issues = []
|
||||
for auto in self._automations.values():
|
||||
if not auto.full_script_path.exists():
|
||||
issues.append((auto.id, f"Script not found: {auto.script}"))
|
||||
elif auto.enabled and not auto.is_executable:
|
||||
# Check if file is readable even if not executable
|
||||
if not os.access(auto.full_script_path, os.R_OK):
|
||||
issues.append((auto.id, f"Script not readable: {auto.script}"))
|
||||
return issues
|
||||
|
||||
def get_status(self) -> dict[str, Any]:
|
||||
"""Get overall registry status."""
|
||||
total = len(self._automations)
|
||||
enabled = sum(1 for a in self._automations.values() if a.enabled)
|
||||
runnable = sum(1 for a in self._automations.values() if a.is_runnable)
|
||||
issues = self.validate_all()
|
||||
|
||||
return {
|
||||
"total_automations": total,
|
||||
"enabled": enabled,
|
||||
"disabled": total - enabled,
|
||||
"runnable": runnable,
|
||||
"validation_issues": len(issues),
|
||||
"issues": [{"id": i[0], "error": i[1]} for i in issues],
|
||||
"categories": sorted(set(a.category for a in self._automations.values())),
|
||||
}
|
||||
|
||||
def save_state(self) -> None:
|
||||
"""Save current automation state to .loop directory."""
|
||||
state = {
|
||||
"automations": {
|
||||
id: {
|
||||
"enabled": auto.enabled,
|
||||
"runnable": auto.is_runnable,
|
||||
"script_exists": auto.full_script_path.exists(),
|
||||
}
|
||||
for id, auto in self._automations.items()
|
||||
}
|
||||
}
|
||||
self.STATE_PATH.parent.mkdir(parents=True, exist_ok=True)
|
||||
self.STATE_PATH.write_text(json.dumps(state, indent=2) + "\n")
|
||||
|
||||
|
||||
class AutomationError(Exception):
|
||||
"""Raised when automation operations fail."""
|
||||
pass
|
||||
|
||||
|
||||
# Convenience functions for CLI usage
|
||||
def list_automations(category: str | None = None, enabled_only: bool = False) -> list[Automation]:
|
||||
"""List automations (convenience function)."""
|
||||
return AutomationRegistry().list_automations(category, enabled_only)
|
||||
|
||||
|
||||
def enable_automation(automation_id: str) -> bool:
|
||||
"""Enable an automation (convenience function)."""
|
||||
return AutomationRegistry().enable(automation_id)
|
||||
|
||||
|
||||
def disable_automation(automation_id: str) -> bool:
|
||||
"""Disable an automation (convenience function)."""
|
||||
return AutomationRegistry().disable(automation_id)
|
||||
|
||||
|
||||
def get_status() -> dict[str, Any]:
|
||||
"""Get registry status (convenience function)."""
|
||||
return AutomationRegistry().get_status()
|
||||
233
timmy_automations/config/automations.json
Normal file
233
timmy_automations/config/automations.json
Normal file
@@ -0,0 +1,233 @@
|
||||
{
|
||||
"version": "1.0.0",
|
||||
"description": "Master manifest of all Timmy automations",
|
||||
"last_updated": "2026-03-21",
|
||||
"automations": [
|
||||
{
|
||||
"id": "cycle_retro",
|
||||
"name": "Cycle Retrospective",
|
||||
"description": "Logs structured retrospective data after each development cycle",
|
||||
"script": "scripts/cycle_retro.py",
|
||||
"category": "daily_run",
|
||||
"enabled": true,
|
||||
"trigger": "post_cycle",
|
||||
"executable": "python3",
|
||||
"config": {
|
||||
"retro_file": ".loop/retro/cycles.jsonl",
|
||||
"summary_file": ".loop/retro/summary.json",
|
||||
"summary_window": 50,
|
||||
"epoch_enabled": true
|
||||
},
|
||||
"outputs": [
|
||||
".loop/retro/cycles.jsonl",
|
||||
".loop/retro/summary.json"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "loop_guard",
|
||||
"name": "Loop Guard",
|
||||
"description": "Idle detection with exponential backoff to prevent burning cycles on empty queues",
|
||||
"script": "scripts/loop_guard.py",
|
||||
"category": "daily_run",
|
||||
"enabled": true,
|
||||
"trigger": "pre_cycle",
|
||||
"executable": "python3",
|
||||
"config": {
|
||||
"queue_file": ".loop/queue.json",
|
||||
"idle_state_file": ".loop/idle_state.json",
|
||||
"backoff_base_seconds": 60,
|
||||
"backoff_max_seconds": 600,
|
||||
"backoff_multiplier": 2,
|
||||
"cycle_duration_seconds": 300
|
||||
},
|
||||
"outputs": [
|
||||
".loop/idle_state.json"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "triage_score",
|
||||
"name": "Mechanical Triage Scoring",
|
||||
"description": "Pure heuristic scoring of open issues based on scope, acceptance criteria, and alignment",
|
||||
"script": "scripts/triage_score.py",
|
||||
"category": "daily_run",
|
||||
"enabled": true,
|
||||
"trigger": "scheduled",
|
||||
"schedule": "every_10_cycles",
|
||||
"executable": "python3",
|
||||
"config": {
|
||||
"ready_threshold": 5,
|
||||
"quarantine_lookback": 20,
|
||||
"queue_file": ".loop/queue.json",
|
||||
"retro_file": ".loop/retro/triage.jsonl",
|
||||
"quarantine_file": ".loop/quarantine.json"
|
||||
},
|
||||
"outputs": [
|
||||
".loop/queue.json",
|
||||
".loop/retro/triage.jsonl",
|
||||
".loop/quarantine.json"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "deep_triage",
|
||||
"name": "Deep Triage",
|
||||
"description": "LLM-driven intelligent issue refinement, breaking down large issues, adding acceptance criteria",
|
||||
"script": "scripts/deep_triage.sh",
|
||||
"category": "triage",
|
||||
"enabled": true,
|
||||
"trigger": "scheduled",
|
||||
"schedule": "every_20_cycles",
|
||||
"executable": "bash",
|
||||
"depends_on": ["loop_introspect"],
|
||||
"config": {
|
||||
"queue_file": ".loop/queue.json",
|
||||
"retro_file": ".loop/retro/deep-triage.jsonl",
|
||||
"prompt_file": "scripts/deep_triage_prompt.md",
|
||||
"timmy_consultation": true,
|
||||
"timmy_timeout_seconds": 60
|
||||
},
|
||||
"outputs": [
|
||||
".loop/queue.json",
|
||||
".loop/retro/deep-triage.jsonl"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "loop_introspect",
|
||||
"name": "Loop Introspection",
|
||||
"description": "Self-improvement engine that analyzes retro data and produces structured recommendations",
|
||||
"script": "scripts/loop_introspect.py",
|
||||
"category": "triage",
|
||||
"enabled": true,
|
||||
"trigger": "scheduled",
|
||||
"schedule": "every_20_cycles",
|
||||
"executable": "python3",
|
||||
"config": {
|
||||
"cycles_file": ".loop/retro/cycles.jsonl",
|
||||
"deep_triage_file": ".loop/retro/deep-triage.jsonl",
|
||||
"triage_file": ".loop/retro/triage.jsonl",
|
||||
"quarantine_file": ".loop/quarantine.json",
|
||||
"insights_file": ".loop/retro/insights.json",
|
||||
"trend_window_days": 7
|
||||
},
|
||||
"outputs": [
|
||||
".loop/retro/insights.json"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "agent_workspace",
|
||||
"name": "Agent Workspace Manager",
|
||||
"description": "Creates and maintains isolated git clones, port ranges, and data directories per agent",
|
||||
"script": "scripts/agent_workspace.sh",
|
||||
"category": "workspace",
|
||||
"enabled": true,
|
||||
"trigger": "manual",
|
||||
"executable": "bash",
|
||||
"config": {
|
||||
"agents_dir": "/tmp/timmy-agents",
|
||||
"canonical_repo": "~/Timmy-Time-dashboard",
|
||||
"gitea_remote": "http://localhost:3000/rockachopa/Timmy-time-dashboard.git",
|
||||
"agents": ["hermes", "kimi-0", "kimi-1", "kimi-2", "kimi-3", "smoke"],
|
||||
"port_base_dashboard": 8100,
|
||||
"port_base_serve": 8200
|
||||
},
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"id": "kimi_bootstrap",
|
||||
"name": "Kimi Workspace Bootstrap",
|
||||
"description": "One-time setup script for new Kimi agent workspaces",
|
||||
"script": ".kimi/scripts/bootstrap.sh",
|
||||
"category": "workspace",
|
||||
"enabled": true,
|
||||
"trigger": "manual",
|
||||
"executable": "bash",
|
||||
"config": {},
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"id": "kimi_resume",
|
||||
"name": "Kimi Resume",
|
||||
"description": "Quick status check and resume prompt for Kimi workspaces",
|
||||
"script": ".kimi/scripts/resume.sh",
|
||||
"category": "workspace",
|
||||
"enabled": true,
|
||||
"trigger": "manual",
|
||||
"executable": "bash",
|
||||
"config": {},
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"id": "backfill_retro",
|
||||
"name": "Backfill Retrospective",
|
||||
"description": "One-time script to seed retrospective data from Gitea PR history",
|
||||
"script": "scripts/backfill_retro.py",
|
||||
"category": "metrics",
|
||||
"enabled": true,
|
||||
"trigger": "manual",
|
||||
"executable": "python3",
|
||||
"config": {
|
||||
"retro_file": ".loop/retro/cycles.jsonl",
|
||||
"summary_file": ".loop/retro/summary.json",
|
||||
"gitea_api": "http://localhost:3000/api/v1"
|
||||
},
|
||||
"outputs": [
|
||||
".loop/retro/cycles.jsonl",
|
||||
".loop/retro/summary.json"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "pre_commit_checks",
|
||||
"name": "Pre-commit Checks",
|
||||
"description": "CI hygiene validation before commits — import checks, model config, syntax, formatting",
|
||||
"script": "scripts/pre_commit_checks.py",
|
||||
"category": "metrics",
|
||||
"enabled": true,
|
||||
"trigger": "pre_commit",
|
||||
"executable": "python3",
|
||||
"config": {
|
||||
"check_imports": true,
|
||||
"check_model_config": true,
|
||||
"check_test_syntax": true,
|
||||
"check_platform_paths": true,
|
||||
"check_docker_tests": true,
|
||||
"check_black_formatting": true
|
||||
},
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"id": "daily_run_orchestrator",
|
||||
"name": "Daily Run Orchestrator",
|
||||
"description": "The 10-minute ritual — fetches candidate issues and produces a concise Daily Run agenda plus day summary",
|
||||
"script": "timmy_automations/daily_run/orchestrator.py",
|
||||
"category": "daily_run",
|
||||
"enabled": true,
|
||||
"trigger": "manual",
|
||||
"executable": "python3",
|
||||
"config": {
|
||||
"config_file": "timmy_automations/config/daily_run.json",
|
||||
"candidate_labels": ["daily-run"],
|
||||
"size_labels": ["size:XS", "size:S"],
|
||||
"max_agenda_items": 3,
|
||||
"lookback_hours": 24,
|
||||
"agenda_time_minutes": 10
|
||||
},
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"id": "golden_path",
|
||||
"name": "Golden Path Generator",
|
||||
"description": "Generates coherent 30-60 minute mini-sessions from real Gitea issues — triage, fixes, and tests",
|
||||
"script": "timmy_automations/daily_run/golden_path.py",
|
||||
"category": "daily_run",
|
||||
"enabled": true,
|
||||
"trigger": "manual",
|
||||
"executable": "python3",
|
||||
"config": {
|
||||
"target_minutes": 45,
|
||||
"size_labels": ["size:XS", "size:S", "size:M"],
|
||||
"min_items": 3,
|
||||
"max_items": 5
|
||||
},
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
48
timmy_automations/config/daily_run.json
Normal file
48
timmy_automations/config/daily_run.json
Normal file
@@ -0,0 +1,48 @@
|
||||
{
|
||||
"version": "1.0.0",
|
||||
"description": "Daily run schedule configuration",
|
||||
"schedules": {
|
||||
"every_cycle": {
|
||||
"description": "Run before/after every dev cycle",
|
||||
"automations": ["loop_guard", "cycle_retro"]
|
||||
},
|
||||
"every_10_cycles": {
|
||||
"description": "Run approximately every 10 cycles",
|
||||
"automations": ["triage_score"]
|
||||
},
|
||||
"every_20_cycles": {
|
||||
"description": "Run approximately every 20 cycles",
|
||||
"automations": ["loop_introspect", "deep_triage"]
|
||||
},
|
||||
"manual": {
|
||||
"description": "Run on-demand only",
|
||||
"automations": ["agent_workspace", "kimi_bootstrap", "kimi_resume", "backfill_retro"]
|
||||
}
|
||||
},
|
||||
"triggers": {
|
||||
"pre_cycle": {
|
||||
"description": "Run before each dev cycle begins",
|
||||
"automations": ["loop_guard"]
|
||||
},
|
||||
"post_cycle": {
|
||||
"description": "Run after each dev cycle completes",
|
||||
"automations": ["cycle_retro"]
|
||||
},
|
||||
"pre_commit": {
|
||||
"description": "Run before git commit",
|
||||
"automations": ["pre_commit_checks"]
|
||||
}
|
||||
},
|
||||
"orchestrator": {
|
||||
"description": "Daily Run orchestration script configuration",
|
||||
"gitea_api": "http://localhost:3000/api/v1",
|
||||
"repo_slug": "rockachopa/Timmy-time-dashboard",
|
||||
"token_file": "~/.hermes/gitea_token",
|
||||
"candidate_labels": ["daily-run"],
|
||||
"size_labels": ["size:XS", "size:S"],
|
||||
"layer_labels_prefix": "layer:",
|
||||
"max_agenda_items": 3,
|
||||
"lookback_hours": 24,
|
||||
"agenda_time_minutes": 10
|
||||
}
|
||||
}
|
||||
99
timmy_automations/config/triage_rules.yaml
Normal file
99
timmy_automations/config/triage_rules.yaml
Normal file
@@ -0,0 +1,99 @@
|
||||
# Triage scoring weights and thresholds
|
||||
# Used by: triage_score.py, deep_triage.sh
|
||||
|
||||
version: "1.0.0"
|
||||
|
||||
# Scoring thresholds
|
||||
thresholds:
|
||||
ready: 5 # Minimum score to be considered "ready" for work
|
||||
excellent: 8 # Score indicating well-scoped, actionable issue
|
||||
|
||||
# Scope scoring (0-3)
|
||||
scope:
|
||||
file_patterns:
|
||||
- pattern: '(?:src/|tests/|scripts/|\.py|\.html|\.js|\.yaml|\.toml|\.sh)'
|
||||
weight: 1
|
||||
description: "Mentions specific files"
|
||||
function_patterns:
|
||||
- pattern: '(?:def |class |function |method |`\w+\(\)`)'
|
||||
weight: 1
|
||||
description: "Mentions specific functions/classes"
|
||||
title_length:
|
||||
max_chars: 80
|
||||
weight: 1
|
||||
description: "Short, focused title"
|
||||
meta_penalty: -2 # Penalty for philosophy/meta issues
|
||||
|
||||
# Acceptance criteria scoring (0-3)
|
||||
acceptance:
|
||||
language_patterns:
|
||||
- pattern: '(?:should|must|expect|verify|assert|test.?case|acceptance|criteria|pass(?:es|ing)|fail(?:s|ing)|return(?:s)?|raise(?:s)?)'
|
||||
weight: 2
|
||||
min_matches: 3
|
||||
description: "Has acceptance-related language"
|
||||
test_patterns:
|
||||
- pattern: '(?:tox|pytest|test_\w+|\.test\.|assert\s)'
|
||||
weight: 1
|
||||
description: "Mentions specific tests"
|
||||
structure_patterns:
|
||||
- pattern: '##\s*(problem|solution|expected|actual|steps)'
|
||||
weight: 1
|
||||
description: "Has structured sections"
|
||||
|
||||
# Alignment scoring (0-3)
|
||||
alignment:
|
||||
bug_tags:
|
||||
- bug
|
||||
- broken
|
||||
- crash
|
||||
- error
|
||||
- fix
|
||||
- regression
|
||||
- hotfix
|
||||
bug_score: 3 # Bugs on main = highest priority
|
||||
|
||||
refactor_tags:
|
||||
- refactor
|
||||
- cleanup
|
||||
- tech-debt
|
||||
- optimization
|
||||
- perf
|
||||
refactor_score: 2
|
||||
|
||||
feature_tags:
|
||||
- feature
|
||||
- feat
|
||||
- enhancement
|
||||
- capability
|
||||
- timmy-capability
|
||||
feature_score: 2
|
||||
|
||||
loop_generated_bonus: 1 # Boost for loop-generated issues
|
||||
|
||||
meta_tags:
|
||||
- philosophy
|
||||
- soul-gap
|
||||
- discussion
|
||||
- question
|
||||
- rfc
|
||||
meta_score: 0 # Philosophy issues are valid but lowest priority
|
||||
|
||||
# Quarantine rules
|
||||
quarantine:
|
||||
failure_threshold: 2 # Failures before quarantine
|
||||
lookback_cycles: 20 # How many cycles to look back
|
||||
|
||||
# Issue type classification
|
||||
types:
|
||||
bug:
|
||||
tags: [bug, broken, crash, error, fix, regression, hotfix]
|
||||
priority_bonus: 0 # Handled by alignment scoring
|
||||
feature:
|
||||
tags: [feature, feat, enhancement, capability, timmy-capability]
|
||||
refactor:
|
||||
tags: [refactor, cleanup, tech-debt, optimization, perf]
|
||||
philosophy:
|
||||
tags: [philosophy, soul-gap, discussion, question, rfc]
|
||||
dev_actionable: false
|
||||
unknown:
|
||||
default: true
|
||||
103
timmy_automations/daily_run/README.md
Normal file
103
timmy_automations/daily_run/README.md
Normal file
@@ -0,0 +1,103 @@
|
||||
# Daily Run Automations
|
||||
|
||||
Scripts that run periodically to keep the development loop operational.
|
||||
|
||||
## Scripts
|
||||
|
||||
| Script | Source | Purpose | Trigger |
|
||||
|--------|--------|---------|---------|
|
||||
| `cycle_retro.py` | `../../scripts/cycle_retro.py` | Log structured retrospective data | Post-cycle |
|
||||
| `loop_guard.py` | `../../scripts/loop_guard.py` | Idle detection with exponential backoff | Pre-cycle |
|
||||
| `triage_score.py` | `../../scripts/triage_score.py` | Mechanical issue scoring | Every 10 cycles |
|
||||
| `orchestrator.py` | `orchestrator.py` | The 10-minute ritual — Daily Run agenda + review | Manual |
|
||||
|
||||
## Running
|
||||
|
||||
These scripts are invoked by the dev loop orchestrator (Hermes). Manual execution:
|
||||
|
||||
```bash
|
||||
# After a successful cycle
|
||||
python3 scripts/cycle_retro.py --cycle 42 --success --issue 123 --type bug
|
||||
|
||||
# Check if queue has work (exits 0 if ready, 1 if idle)
|
||||
python3 scripts/loop_guard.py
|
||||
|
||||
# Score open issues
|
||||
python3 scripts/triage_score.py
|
||||
|
||||
# Generate Daily Run agenda (10-minute ritual)
|
||||
python3 timmy_automations/daily_run/orchestrator.py
|
||||
|
||||
# Generate agenda with day summary (review mode)
|
||||
python3 timmy_automations/daily_run/orchestrator.py --review
|
||||
|
||||
# Output as JSON
|
||||
python3 timmy_automations/daily_run/orchestrator.py --review --json
|
||||
```
|
||||
|
||||
## Daily Run Orchestrator
|
||||
|
||||
The orchestrator script connects to local Gitea and:
|
||||
|
||||
1. **Fetches candidate issues** matching configured labels (default: `daily-run` + `size:XS`/`size:S`)
|
||||
2. **Generates a concise agenda** with up to 3 items for approximately 10 minutes of work
|
||||
3. **Review mode** (`--review`): Summarizes the last 24 hours — issues/PRs touched, items closed/merged, test failures
|
||||
|
||||
### Configuration
|
||||
|
||||
Edit `timmy_automations/config/daily_run.json` under the `orchestrator` section:
|
||||
|
||||
```json
|
||||
{
|
||||
"orchestrator": {
|
||||
"candidate_labels": ["daily-run"],
|
||||
"size_labels": ["size:XS", "size:S"],
|
||||
"max_agenda_items": 3,
|
||||
"lookback_hours": 24,
|
||||
"agenda_time_minutes": 10
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
| Option | Description | Default |
|
||||
|--------|-------------|---------|
|
||||
| `candidate_labels` | Labels to identify Daily Run candidates | `["daily-run"]` |
|
||||
| `size_labels` | Size labels to filter by | `["size:XS", "size:S"]` |
|
||||
| `max_agenda_items` | Maximum items in agenda | `3` |
|
||||
| `lookback_hours` | Hours to look back in review mode | `24` |
|
||||
| `agenda_time_minutes` | Target time budget for agenda | `10` |
|
||||
|
||||
### Environment Variables
|
||||
|
||||
Override config via environment:
|
||||
|
||||
```bash
|
||||
export TIMMY_GITEA_API="http://localhost:3000/api/v1"
|
||||
export TIMMY_REPO_SLUG="rockachopa/Timmy-time-dashboard"
|
||||
export TIMMY_GITEA_TOKEN="your-token-here" # Alternative to token file
|
||||
```
|
||||
|
||||
### Output Format
|
||||
|
||||
**Standard mode:**
|
||||
```
|
||||
============================================================
|
||||
📋 DAILY RUN AGENDA
|
||||
============================================================
|
||||
Generated: 2026-03-21T15:16:02+00:00
|
||||
Time budget: 10 minutes
|
||||
Candidates considered: 5
|
||||
|
||||
1. #123 [XS] [infra]
|
||||
Title: Fix config loading bug
|
||||
Action: FIX
|
||||
URL: http://localhost:3000/rockachopa/Timmy-time-dashboard/issues/123
|
||||
...
|
||||
```
|
||||
|
||||
**Review mode (`--review`):**
|
||||
Adds a day summary section showing issues touched, closed, PRs merged, and any test failures.
|
||||
|
||||
## Configuration
|
||||
|
||||
See `../config/automations.json` for automation manifests and `../config/daily_run.json` for scheduling and orchestrator settings.
|
||||
583
timmy_automations/daily_run/golden_path.py
Normal file
583
timmy_automations/daily_run/golden_path.py
Normal file
@@ -0,0 +1,583 @@
|
||||
"""Golden Path generator — coherent 30-60 minute mini-sessions from real issues.
|
||||
|
||||
Fetches issues from Gitea and assembles them into ordered sequences forming
|
||||
a coherent mini-session. Each Golden Path includes:
|
||||
- One small triage cleanup
|
||||
- Two micro-fixes (XS/S sized)
|
||||
- One test-improvement task
|
||||
|
||||
All tasks are real issues from the Gitea repository, never synthetic.
|
||||
|
||||
Usage:
|
||||
from timmy_automations.daily_run.golden_path import generate_golden_path
|
||||
path = generate_golden_path(minutes=45)
|
||||
print(path.to_json())
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from urllib.request import Request, urlopen
|
||||
from urllib.error import HTTPError, URLError
|
||||
|
||||
# ── Configuration ─────────────────────────────────────────────────────────
|
||||
|
||||
REPO_ROOT = Path(__file__).resolve().parent.parent.parent
|
||||
CONFIG_PATH = Path(__file__).parent.parent / "config" / "daily_run.json"
|
||||
|
||||
DEFAULT_CONFIG = {
|
||||
"gitea_api": "http://localhost:3000/api/v1",
|
||||
"repo_slug": "rockachopa/Timmy-time-dashboard",
|
||||
"token_file": "~/.hermes/gitea_token",
|
||||
"size_labels": ["size:XS", "size:S", "size:M"],
|
||||
}
|
||||
|
||||
# Time estimates (in minutes) by size and type
|
||||
TIME_ESTIMATES: dict[str, dict[str, int]] = {
|
||||
"XS": {"triage": 5, "fix": 10, "test": 10, "docs": 8, "refactor": 8},
|
||||
"S": {"triage": 10, "fix": 15, "test": 15, "docs": 12, "refactor": 12},
|
||||
"M": {"triage": 15, "fix": 25, "test": 25, "docs": 20, "refactor": 20},
|
||||
}
|
||||
|
||||
# Issue type detection patterns
|
||||
TYPE_PATTERNS: dict[str, dict[str, list[str]]] = {
|
||||
"triage": {
|
||||
"labels": ["triage", "cleanup", "organize", "sort", "categorize"],
|
||||
"title": ["triage", "cleanup", "organize", "sort", "categorize", "clean up"],
|
||||
},
|
||||
"fix": {
|
||||
"labels": ["bug", "fix", "error", "broken"],
|
||||
"title": ["fix", "bug", "error", "broken", "repair", "correct"],
|
||||
},
|
||||
"test": {
|
||||
"labels": ["test", "testing", "coverage", "pytest"],
|
||||
"title": ["test", "coverage", "pytest", "unit test", "integration test"],
|
||||
},
|
||||
"docs": {
|
||||
"labels": ["docs", "documentation", "readme", "docstring"],
|
||||
"title": ["doc", "readme", "comment", "guide", "tutorial"],
|
||||
},
|
||||
"refactor": {
|
||||
"labels": ["refactor", "cleanup", "debt", "maintainability"],
|
||||
"title": ["refactor", "cleanup", "simplify", "extract", "reorganize"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def load_config() -> dict:
|
||||
"""Load configuration from config file with fallback to defaults."""
|
||||
config = DEFAULT_CONFIG.copy()
|
||||
if CONFIG_PATH.exists():
|
||||
try:
|
||||
file_config = json.loads(CONFIG_PATH.read_text())
|
||||
if "orchestrator" in file_config:
|
||||
config.update(file_config["orchestrator"])
|
||||
except (json.JSONDecodeError, OSError) as exc:
|
||||
print(f"[golden_path] Warning: Could not load config: {exc}", file=sys.stderr)
|
||||
|
||||
# Environment variable overrides
|
||||
if os.environ.get("TIMMY_GITEA_API"):
|
||||
config["gitea_api"] = os.environ.get("TIMMY_GITEA_API")
|
||||
if os.environ.get("TIMMY_REPO_SLUG"):
|
||||
config["repo_slug"] = os.environ.get("TIMMY_REPO_SLUG")
|
||||
if os.environ.get("TIMMY_GITEA_TOKEN"):
|
||||
config["token"] = os.environ.get("TIMMY_GITEA_TOKEN")
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def get_token(config: dict) -> str | None:
|
||||
"""Get Gitea token from environment or file."""
|
||||
if "token" in config:
|
||||
return config["token"]
|
||||
|
||||
token_file = Path(config["token_file"]).expanduser()
|
||||
if token_file.exists():
|
||||
return token_file.read_text().strip()
|
||||
|
||||
return None
|
||||
|
||||
|
||||
# ── Gitea API Client ──────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class GiteaClient:
|
||||
"""Simple Gitea API client with graceful degradation."""
|
||||
|
||||
def __init__(self, config: dict, token: str | None):
|
||||
self.api_base = config["gitea_api"].rstrip("/")
|
||||
self.repo_slug = config["repo_slug"]
|
||||
self.token = token
|
||||
self._available: bool | None = None
|
||||
|
||||
def _headers(self) -> dict:
|
||||
headers = {"Accept": "application/json"}
|
||||
if self.token:
|
||||
headers["Authorization"] = f"token {self.token}"
|
||||
return headers
|
||||
|
||||
def _api_url(self, path: str) -> str:
|
||||
return f"{self.api_base}/repos/{self.repo_slug}/{path}"
|
||||
|
||||
def is_available(self) -> bool:
|
||||
"""Check if Gitea API is reachable."""
|
||||
if self._available is not None:
|
||||
return self._available
|
||||
|
||||
try:
|
||||
req = Request(
|
||||
f"{self.api_base}/version",
|
||||
headers=self._headers(),
|
||||
method="GET",
|
||||
)
|
||||
with urlopen(req, timeout=5) as resp:
|
||||
self._available = resp.status == 200
|
||||
return self._available
|
||||
except (HTTPError, URLError, TimeoutError):
|
||||
self._available = False
|
||||
return False
|
||||
|
||||
def get(self, path: str, params: dict | None = None) -> list | dict:
|
||||
"""Make a GET request to the Gitea API."""
|
||||
url = self._api_url(path)
|
||||
if params:
|
||||
query = "&".join(f"{k}={v}" for k, v in params.items())
|
||||
url = f"{url}?{query}"
|
||||
|
||||
req = Request(url, headers=self._headers(), method="GET")
|
||||
with urlopen(req, timeout=15) as resp:
|
||||
return json.loads(resp.read())
|
||||
|
||||
def get_paginated(self, path: str, params: dict | None = None) -> list:
|
||||
"""Fetch all pages of a paginated endpoint."""
|
||||
all_items = []
|
||||
page = 1
|
||||
limit = 50
|
||||
|
||||
while True:
|
||||
page_params = {"limit": limit, "page": page}
|
||||
if params:
|
||||
page_params.update(params)
|
||||
|
||||
batch = self.get(path, page_params)
|
||||
if not batch:
|
||||
break
|
||||
|
||||
all_items.extend(batch)
|
||||
if len(batch) < limit:
|
||||
break
|
||||
page += 1
|
||||
|
||||
return all_items
|
||||
|
||||
|
||||
# ── Issue Classification ──────────────────────────────────────────────────
|
||||
|
||||
|
||||
def extract_size(labels: list[dict]) -> str:
|
||||
"""Extract size label from issue labels."""
|
||||
for label in labels:
|
||||
name = label.get("name", "")
|
||||
if name.startswith("size:"):
|
||||
return name.replace("size:", "").upper()
|
||||
return "?"
|
||||
|
||||
|
||||
def classify_issue_type(issue: dict) -> str:
|
||||
"""Classify an issue into a type based on labels and title."""
|
||||
labels = [l.get("name", "").lower() for l in issue.get("labels", [])]
|
||||
title = issue.get("title", "").lower()
|
||||
|
||||
scores: dict[str, int] = {}
|
||||
|
||||
for issue_type, patterns in TYPE_PATTERNS.items():
|
||||
score = 0
|
||||
# Check labels
|
||||
for pattern in patterns["labels"]:
|
||||
if any(pattern in label for label in labels):
|
||||
score += 2
|
||||
# Check title
|
||||
for pattern in patterns["title"]:
|
||||
if pattern in title:
|
||||
score += 1
|
||||
scores[issue_type] = score
|
||||
|
||||
# Return the type with highest score, or "fix" as default
|
||||
if scores:
|
||||
best_type = max(scores, key=lambda k: scores[k])
|
||||
if scores[best_type] > 0:
|
||||
return best_type
|
||||
|
||||
return "fix" # Default to fix for uncategorized issues
|
||||
|
||||
|
||||
def estimate_time(issue: dict) -> int:
|
||||
"""Estimate time in minutes for an issue based on size and type."""
|
||||
size = extract_size(issue.get("labels", []))
|
||||
issue_type = classify_issue_type(issue)
|
||||
|
||||
# Default to fix time estimates if type not found
|
||||
type_map = issue_type if issue_type in TIME_ESTIMATES.get(size, {}) else "fix"
|
||||
|
||||
return TIME_ESTIMATES.get(size, TIME_ESTIMATES["S"]).get(type_map, 15)
|
||||
|
||||
|
||||
def score_issue_for_path(issue: dict) -> int:
|
||||
"""Score an issue for Golden Path suitability (higher = better fit)."""
|
||||
score = 0
|
||||
labels = [l.get("name", "").lower() for l in issue.get("labels", [])]
|
||||
issue_type = classify_issue_type(issue)
|
||||
|
||||
# Prefer smaller sizes for predictability
|
||||
if "size:xs" in labels:
|
||||
score += 10
|
||||
elif "size:s" in labels:
|
||||
score += 7
|
||||
elif "size:m" in labels:
|
||||
score += 3
|
||||
|
||||
# Prefer issues with clear type labels
|
||||
if issue_type in ["triage", "test", "fix"]:
|
||||
score += 3
|
||||
|
||||
# Prefer issues with acceptance criteria or good description
|
||||
body = issue.get("body", "")
|
||||
if body:
|
||||
if "## acceptance criteria" in body.lower() or "acceptance criteria" in body.lower():
|
||||
score += 3
|
||||
if len(body) > 200:
|
||||
score += 1
|
||||
|
||||
# Prefer issues with recent activity
|
||||
updated_at = issue.get("updated_at", "")
|
||||
if updated_at:
|
||||
try:
|
||||
updated = datetime.fromisoformat(updated_at.replace("Z", "+00:00"))
|
||||
days_old = (datetime.now(timezone.utc) - updated).days
|
||||
if days_old < 7:
|
||||
score += 2
|
||||
elif days_old < 30:
|
||||
score += 1
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
|
||||
return score
|
||||
|
||||
|
||||
# ── Golden Path Generation ────────────────────────────────────────────────
|
||||
|
||||
|
||||
@dataclass
|
||||
class PathItem:
|
||||
"""A single item in a Golden Path."""
|
||||
|
||||
number: int
|
||||
title: str
|
||||
size: str
|
||||
issue_type: str
|
||||
estimated_minutes: int
|
||||
url: str
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"number": self.number,
|
||||
"title": self.title,
|
||||
"size": self.size,
|
||||
"type": self.issue_type,
|
||||
"estimated_minutes": self.estimated_minutes,
|
||||
"url": self.url,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class GoldenPath:
|
||||
"""A complete Golden Path sequence."""
|
||||
|
||||
generated_at: str
|
||||
target_minutes: int
|
||||
items: list[PathItem] = field(default_factory=list)
|
||||
|
||||
@property
|
||||
def total_estimated_minutes(self) -> int:
|
||||
return sum(item.estimated_minutes for item in self.items)
|
||||
|
||||
@property
|
||||
def item_count(self) -> int:
|
||||
return len(self.items)
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"generated_at": self.generated_at,
|
||||
"target_minutes": self.target_minutes,
|
||||
"total_estimated_minutes": self.total_estimated_minutes,
|
||||
"item_count": self.item_count,
|
||||
"items": [item.to_dict() for item in self.items],
|
||||
}
|
||||
|
||||
def to_json(self, indent: int = 2) -> str:
|
||||
return json.dumps(self.to_dict(), indent=indent)
|
||||
|
||||
|
||||
def fetch_eligible_issues(client: GiteaClient, config: dict) -> list[dict]:
|
||||
"""Fetch open issues eligible for Golden Paths."""
|
||||
size_labels = config.get("size_labels", ["size:XS", "size:S", "size:M"])
|
||||
|
||||
try:
|
||||
# Fetch all open issues
|
||||
issues = client.get_paginated("issues", {"state": "open", "sort": "updated"})
|
||||
except (HTTPError, URLError) as exc:
|
||||
print(f"[golden_path] Warning: Failed to fetch issues: {exc}", file=sys.stderr)
|
||||
return []
|
||||
|
||||
# Filter by size labels if specified
|
||||
if size_labels:
|
||||
filtered = []
|
||||
size_names = {s.lower() for s in size_labels}
|
||||
for issue in issues:
|
||||
issue_labels = {l.get("name", "").lower() for l in issue.get("labels", [])}
|
||||
if issue_labels & size_names:
|
||||
filtered.append(issue)
|
||||
issues = filtered
|
||||
|
||||
return issues
|
||||
|
||||
|
||||
def group_issues_by_type(issues: list[dict]) -> dict[str, list[dict]]:
|
||||
"""Group issues by their classified type, sorted by score."""
|
||||
groups: dict[str, list[dict]] = {
|
||||
"triage": [],
|
||||
"fix": [],
|
||||
"test": [],
|
||||
"docs": [],
|
||||
"refactor": [],
|
||||
}
|
||||
|
||||
for issue in issues:
|
||||
issue_type = classify_issue_type(issue)
|
||||
if issue_type in groups:
|
||||
groups[issue_type].append(issue)
|
||||
|
||||
# Sort each group by score (highest first)
|
||||
for issue_type in groups:
|
||||
groups[issue_type] = sorted(
|
||||
groups[issue_type],
|
||||
key=lambda i: score_issue_for_path(i),
|
||||
reverse=True,
|
||||
)
|
||||
|
||||
return groups
|
||||
|
||||
|
||||
def build_golden_path(
|
||||
grouped_issues: dict[str, list[dict]],
|
||||
target_minutes: int = 45,
|
||||
) -> GoldenPath:
|
||||
"""Build a Golden Path from grouped issues.
|
||||
|
||||
The path follows a coherent sequence:
|
||||
1. One small triage cleanup (warm-up)
|
||||
2. One micro-fix (momentum building)
|
||||
3. One test-improvement (quality focus)
|
||||
4. One more micro-fix or docs (closure)
|
||||
"""
|
||||
path = GoldenPath(
|
||||
generated_at=datetime.now(timezone.utc).isoformat(),
|
||||
target_minutes=target_minutes,
|
||||
)
|
||||
|
||||
used_issue_numbers: set[int] = set()
|
||||
|
||||
def add_best_item(issues: list[dict], max_minutes: int | None = None) -> bool:
|
||||
"""Add the best available issue of a type to the path."""
|
||||
for issue in issues:
|
||||
number = issue.get("number", 0)
|
||||
if number in used_issue_numbers:
|
||||
continue
|
||||
|
||||
est_time = estimate_time(issue)
|
||||
if max_minutes and est_time > max_minutes:
|
||||
continue
|
||||
|
||||
used_issue_numbers.add(number)
|
||||
path.items.append(
|
||||
PathItem(
|
||||
number=number,
|
||||
title=issue.get("title", "Untitled"),
|
||||
size=extract_size(issue.get("labels", [])),
|
||||
issue_type=classify_issue_type(issue),
|
||||
estimated_minutes=est_time,
|
||||
url=issue.get("html_url", ""),
|
||||
)
|
||||
)
|
||||
return True
|
||||
return False
|
||||
|
||||
# Phase 1: Warm-up with triage (5-10 min)
|
||||
if grouped_issues["triage"]:
|
||||
add_best_item(grouped_issues["triage"], max_minutes=15)
|
||||
else:
|
||||
# Fallback: use smallest available issue
|
||||
all_issues = (
|
||||
grouped_issues["fix"]
|
||||
+ grouped_issues["docs"]
|
||||
+ grouped_issues["refactor"]
|
||||
)
|
||||
all_issues.sort(key=lambda i: score_issue_for_path(i), reverse=True)
|
||||
add_best_item(all_issues, max_minutes=10)
|
||||
|
||||
# Phase 2: First micro-fix (10-15 min)
|
||||
if grouped_issues["fix"]:
|
||||
add_best_item(grouped_issues["fix"], max_minutes=20)
|
||||
else:
|
||||
# Fallback to refactor
|
||||
add_best_item(grouped_issues["refactor"], max_minutes=15)
|
||||
|
||||
# Phase 3: Test improvement (10-15 min)
|
||||
if grouped_issues["test"]:
|
||||
add_best_item(grouped_issues["test"], max_minutes=20)
|
||||
else:
|
||||
# If no test issues, add another fix
|
||||
add_best_item(grouped_issues["fix"], max_minutes=15)
|
||||
|
||||
# Phase 4: Closure fix or docs (10-15 min)
|
||||
# Try to fill remaining time
|
||||
remaining_budget = target_minutes - path.total_estimated_minutes
|
||||
if remaining_budget >= 10:
|
||||
# Prefer fix, then docs
|
||||
if not add_best_item(grouped_issues["fix"], max_minutes=remaining_budget):
|
||||
if not add_best_item(grouped_issues["docs"], max_minutes=remaining_budget):
|
||||
add_best_item(grouped_issues["refactor"], max_minutes=remaining_budget)
|
||||
|
||||
return path
|
||||
|
||||
|
||||
def generate_golden_path(
|
||||
target_minutes: int = 45,
|
||||
config: dict | None = None,
|
||||
) -> GoldenPath:
|
||||
"""Generate a Golden Path for the specified time budget.
|
||||
|
||||
Args:
|
||||
target_minutes: Target session length (30-60 recommended)
|
||||
config: Optional config override
|
||||
|
||||
Returns:
|
||||
A GoldenPath with ordered items from real Gitea issues
|
||||
"""
|
||||
cfg = config or load_config()
|
||||
token = get_token(cfg)
|
||||
client = GiteaClient(cfg, token)
|
||||
|
||||
if not client.is_available():
|
||||
# Return empty path with error indication
|
||||
return GoldenPath(
|
||||
generated_at=datetime.now(timezone.utc).isoformat(),
|
||||
target_minutes=target_minutes,
|
||||
items=[],
|
||||
)
|
||||
|
||||
issues = fetch_eligible_issues(client, cfg)
|
||||
grouped = group_issues_by_type(issues)
|
||||
return build_golden_path(grouped, target_minutes)
|
||||
|
||||
|
||||
# ── Output Formatting ─────────────────────────────────────────────────────
|
||||
|
||||
|
||||
def print_golden_path(path: GoldenPath) -> None:
|
||||
"""Print a formatted Golden Path to stdout."""
|
||||
print("=" * 60)
|
||||
print("🌟 GOLDEN PATH")
|
||||
print("=" * 60)
|
||||
print(f"Generated: {path.generated_at}")
|
||||
print(f"Target: {path.target_minutes} minutes")
|
||||
print(f"Estimated: {path.total_estimated_minutes} minutes")
|
||||
print()
|
||||
|
||||
if not path.items:
|
||||
print("No eligible issues found for a Golden Path.")
|
||||
print()
|
||||
print("To create Golden Paths, ensure issues have:")
|
||||
print(" - Size labels: size:XS, size:S, or size:M")
|
||||
print(" - Type labels: bug, test, triage, docs, refactor")
|
||||
print()
|
||||
return
|
||||
|
||||
for i, item in enumerate(path.items, 1):
|
||||
type_emoji = {
|
||||
"triage": "🧹",
|
||||
"fix": "🔧",
|
||||
"test": "🧪",
|
||||
"docs": "📚",
|
||||
"refactor": "♻️",
|
||||
}.get(item.issue_type, "📋")
|
||||
|
||||
print(f"{i}. {type_emoji} #{item.number} [{item.size}] ({item.estimated_minutes}m)")
|
||||
print(f" Title: {item.title}")
|
||||
print(f" Type: {item.issue_type.upper()}")
|
||||
if item.url:
|
||||
print(f" URL: {item.url}")
|
||||
print()
|
||||
|
||||
print("-" * 60)
|
||||
print("Instructions:")
|
||||
print(" 1. Start with the triage item to warm up")
|
||||
print(" 2. Progress through fixes to build momentum")
|
||||
print(" 3. Use the test item for quality focus")
|
||||
print(" 4. Check off items as you complete them")
|
||||
print()
|
||||
|
||||
|
||||
# ── CLI ───────────────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
p = argparse.ArgumentParser(
|
||||
description="Golden Path generator — coherent 30-60 minute mini-sessions",
|
||||
)
|
||||
p.add_argument(
|
||||
"--minutes",
|
||||
"-m",
|
||||
type=int,
|
||||
default=45,
|
||||
help="Target session length in minutes (default: 45)",
|
||||
)
|
||||
p.add_argument(
|
||||
"--json",
|
||||
"-j",
|
||||
action="store_true",
|
||||
help="Output as JSON instead of formatted text",
|
||||
)
|
||||
return p.parse_args()
|
||||
|
||||
|
||||
def main() -> int:
|
||||
args = parse_args()
|
||||
|
||||
# Validate target minutes
|
||||
target = max(30, min(60, args.minutes))
|
||||
if target != args.minutes:
|
||||
print(
|
||||
f"[golden_path] Warning: Clamped {args.minutes}m to {target}m range",
|
||||
file=sys.stderr,
|
||||
)
|
||||
|
||||
path = generate_golden_path(target_minutes=target)
|
||||
|
||||
if args.json:
|
||||
print(path.to_json())
|
||||
else:
|
||||
print_golden_path(path)
|
||||
|
||||
return 0 if path.items else 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
539
timmy_automations/daily_run/orchestrator.py
Executable file
539
timmy_automations/daily_run/orchestrator.py
Executable file
@@ -0,0 +1,539 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Daily Run orchestration script — the 10-minute ritual.
|
||||
|
||||
Connects to local Gitea, fetches candidate issues, and produces a concise agenda
|
||||
plus a day summary (review mode).
|
||||
|
||||
Run: python3 timmy_automations/daily_run/orchestrator.py [--review]
|
||||
Env: See timmy_automations/config/daily_run.json for configuration
|
||||
|
||||
Refs: #703
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from urllib.request import Request, urlopen
|
||||
from urllib.error import HTTPError, URLError
|
||||
|
||||
# ── Configuration ─────────────────────────────────────────────────────────
|
||||
|
||||
REPO_ROOT = Path(__file__).resolve().parent.parent.parent
|
||||
CONFIG_PATH = Path(__file__).parent.parent / "config" / "daily_run.json"
|
||||
|
||||
DEFAULT_CONFIG = {
|
||||
"gitea_api": "http://localhost:3000/api/v1",
|
||||
"repo_slug": "rockachopa/Timmy-time-dashboard",
|
||||
"token_file": "~/.hermes/gitea_token",
|
||||
"candidate_labels": ["daily-run"],
|
||||
"size_labels": ["size:XS", "size:S"],
|
||||
"layer_labels_prefix": "layer:",
|
||||
"max_agenda_items": 3,
|
||||
"lookback_hours": 24,
|
||||
"agenda_time_minutes": 10,
|
||||
}
|
||||
|
||||
|
||||
def load_config() -> dict:
|
||||
"""Load configuration from config file with fallback to defaults."""
|
||||
config = DEFAULT_CONFIG.copy()
|
||||
if CONFIG_PATH.exists():
|
||||
try:
|
||||
file_config = json.loads(CONFIG_PATH.read_text())
|
||||
if "orchestrator" in file_config:
|
||||
config.update(file_config["orchestrator"])
|
||||
except (json.JSONDecodeError, OSError) as exc:
|
||||
print(f"[orchestrator] Warning: Could not load config: {exc}", file=sys.stderr)
|
||||
|
||||
# Environment variable overrides
|
||||
if os.environ.get("TIMMY_GITEA_API"):
|
||||
config["gitea_api"] = os.environ.get("TIMMY_GITEA_API")
|
||||
if os.environ.get("TIMMY_REPO_SLUG"):
|
||||
config["repo_slug"] = os.environ.get("TIMMY_REPO_SLUG")
|
||||
if os.environ.get("TIMMY_GITEA_TOKEN"):
|
||||
config["token"] = os.environ.get("TIMMY_GITEA_TOKEN")
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def get_token(config: dict) -> str | None:
|
||||
"""Get Gitea token from environment or file."""
|
||||
if "token" in config:
|
||||
return config["token"]
|
||||
|
||||
token_file = Path(config["token_file"]).expanduser()
|
||||
if token_file.exists():
|
||||
return token_file.read_text().strip()
|
||||
|
||||
return None
|
||||
|
||||
|
||||
# ── Gitea API Client ──────────────────────────────────────────────────────
|
||||
|
||||
class GiteaClient:
|
||||
"""Simple Gitea API client with graceful degradation."""
|
||||
|
||||
def __init__(self, config: dict, token: str | None):
|
||||
self.api_base = config["gitea_api"].rstrip("/")
|
||||
self.repo_slug = config["repo_slug"]
|
||||
self.token = token
|
||||
self._available: bool | None = None
|
||||
|
||||
def _headers(self) -> dict:
|
||||
headers = {"Accept": "application/json"}
|
||||
if self.token:
|
||||
headers["Authorization"] = f"token {self.token}"
|
||||
return headers
|
||||
|
||||
def _api_url(self, path: str) -> str:
|
||||
return f"{self.api_base}/repos/{self.repo_slug}/{path}"
|
||||
|
||||
def is_available(self) -> bool:
|
||||
"""Check if Gitea API is reachable."""
|
||||
if self._available is not None:
|
||||
return self._available
|
||||
|
||||
try:
|
||||
req = Request(
|
||||
f"{self.api_base}/version",
|
||||
headers=self._headers(),
|
||||
method="GET",
|
||||
)
|
||||
with urlopen(req, timeout=5) as resp:
|
||||
self._available = resp.status == 200
|
||||
return self._available
|
||||
except (HTTPError, URLError, TimeoutError):
|
||||
self._available = False
|
||||
return False
|
||||
|
||||
def get(self, path: str, params: dict | None = None) -> list | dict:
|
||||
"""Make a GET request to the Gitea API."""
|
||||
url = self._api_url(path)
|
||||
if params:
|
||||
query = "&".join(f"{k}={v}" for k, v in params.items())
|
||||
url = f"{url}?{query}"
|
||||
|
||||
req = Request(url, headers=self._headers(), method="GET")
|
||||
with urlopen(req, timeout=15) as resp:
|
||||
return json.loads(resp.read())
|
||||
|
||||
def get_paginated(self, path: str, params: dict | None = None) -> list:
|
||||
"""Fetch all pages of a paginated endpoint."""
|
||||
all_items = []
|
||||
page = 1
|
||||
limit = 50
|
||||
|
||||
while True:
|
||||
page_params = {"limit": limit, "page": page}
|
||||
if params:
|
||||
page_params.update(params)
|
||||
|
||||
batch = self.get(path, page_params)
|
||||
if not batch:
|
||||
break
|
||||
|
||||
all_items.extend(batch)
|
||||
if len(batch) < limit:
|
||||
break
|
||||
page += 1
|
||||
|
||||
return all_items
|
||||
|
||||
|
||||
# ── Issue Processing ──────────────────────────────────────────────────────
|
||||
|
||||
def extract_size(labels: list[dict]) -> str:
|
||||
"""Extract size label from issue labels."""
|
||||
for label in labels:
|
||||
name = label.get("name", "")
|
||||
if name.startswith("size:"):
|
||||
return name.replace("size:", "")
|
||||
return "?"
|
||||
|
||||
|
||||
def extract_layer(labels: list[dict]) -> str | None:
|
||||
"""Extract layer label from issue labels."""
|
||||
for label in labels:
|
||||
name = label.get("name", "")
|
||||
if name.startswith("layer:"):
|
||||
return name.replace("layer:", "")
|
||||
return None
|
||||
|
||||
|
||||
def suggest_action_type(issue: dict) -> str:
|
||||
"""Suggest an action type based on issue labels and content."""
|
||||
labels = [l.get("name", "").lower() for l in issue.get("labels", [])]
|
||||
title = issue.get("title", "").lower()
|
||||
|
||||
if "bug" in labels or "fix" in title:
|
||||
return "fix"
|
||||
if "feature" in labels or "feat" in title:
|
||||
return "implement"
|
||||
if "refactor" in labels or "chore" in title:
|
||||
return "refactor"
|
||||
if "test" in labels or "test" in title:
|
||||
return "test"
|
||||
if "docs" in labels or "doc" in title:
|
||||
return "document"
|
||||
|
||||
return "review"
|
||||
|
||||
|
||||
def score_issue(issue: dict) -> int:
|
||||
"""Score an issue for prioritization (higher = more suitable for daily run)."""
|
||||
score = 0
|
||||
labels = [l.get("name", "").lower() for l in issue.get("labels", [])]
|
||||
|
||||
# Prefer smaller sizes
|
||||
if "size:xs" in labels:
|
||||
score += 10
|
||||
elif "size:s" in labels:
|
||||
score += 5
|
||||
elif "size:m" in labels:
|
||||
score += 2
|
||||
|
||||
# Prefer daily-run labeled issues
|
||||
if "daily-run" in labels:
|
||||
score += 3
|
||||
|
||||
# Prefer issues with clear type labels
|
||||
if any(l in labels for l in ["bug", "feature", "refactor"]):
|
||||
score += 2
|
||||
|
||||
# Slight preference for issues with body content (more context)
|
||||
if issue.get("body") and len(issue.get("body", "")) > 100:
|
||||
score += 1
|
||||
|
||||
return score
|
||||
|
||||
|
||||
# ── Agenda Generation ─────────────────────────────────────────────────────
|
||||
|
||||
def fetch_candidates(client: GiteaClient, config: dict) -> list[dict]:
|
||||
"""Fetch issues matching candidate criteria."""
|
||||
candidate_labels = config["candidate_labels"]
|
||||
size_labels = config.get("size_labels", [])
|
||||
all_labels = candidate_labels + size_labels
|
||||
|
||||
# Build label filter (OR logic via multiple label queries doesn't work well,
|
||||
# so we fetch by candidate label and filter sizes client-side)
|
||||
params = {"state": "open", "sort": "created", "labels": ",".join(candidate_labels)}
|
||||
|
||||
try:
|
||||
issues = client.get_paginated("issues", params)
|
||||
except (HTTPError, URLError) as exc:
|
||||
print(f"[orchestrator] Warning: Failed to fetch issues: {exc}", file=sys.stderr)
|
||||
return []
|
||||
|
||||
# Filter by size labels if specified
|
||||
if size_labels:
|
||||
filtered = []
|
||||
size_names = {s.lower() for s in size_labels}
|
||||
for issue in issues:
|
||||
issue_labels = {l.get("name", "").lower() for l in issue.get("labels", [])}
|
||||
if issue_labels & size_names:
|
||||
filtered.append(issue)
|
||||
issues = filtered
|
||||
|
||||
return issues
|
||||
|
||||
|
||||
def generate_agenda(issues: list[dict], config: dict) -> dict:
|
||||
"""Generate a Daily Run agenda from candidate issues."""
|
||||
max_items = config.get("max_agenda_items", 3)
|
||||
agenda_time = config.get("agenda_time_minutes", 10)
|
||||
|
||||
# Score and sort issues
|
||||
scored = [(score_issue(issue), issue) for issue in issues]
|
||||
scored.sort(key=lambda x: (-x[0], x[1].get("number", 0)))
|
||||
|
||||
selected = scored[:max_items]
|
||||
|
||||
items = []
|
||||
for score, issue in selected:
|
||||
item = {
|
||||
"number": issue.get("number"),
|
||||
"title": issue.get("title", "Untitled"),
|
||||
"size": extract_size(issue.get("labels", [])),
|
||||
"layer": extract_layer(issue.get("labels", [])),
|
||||
"action": suggest_action_type(issue),
|
||||
"url": issue.get("html_url", ""),
|
||||
}
|
||||
items.append(item)
|
||||
|
||||
return {
|
||||
"generated_at": datetime.now(timezone.utc).isoformat(),
|
||||
"time_budget_minutes": agenda_time,
|
||||
"item_count": len(items),
|
||||
"items": items,
|
||||
"candidates_considered": len(issues),
|
||||
}
|
||||
|
||||
|
||||
def print_agenda(agenda: dict) -> None:
|
||||
"""Print a formatted agenda to stdout."""
|
||||
print("=" * 60)
|
||||
print("📋 DAILY RUN AGENDA")
|
||||
print("=" * 60)
|
||||
print(f"Generated: {agenda['generated_at']}")
|
||||
print(f"Time budget: {agenda['time_budget_minutes']} minutes")
|
||||
print(f"Candidates considered: {agenda['candidates_considered']}")
|
||||
print()
|
||||
|
||||
if not agenda["items"]:
|
||||
print("No items matched the criteria for today's Daily Run.")
|
||||
print()
|
||||
return
|
||||
|
||||
for i, item in enumerate(agenda["items"], 1):
|
||||
layer_str = f"[{item['layer']}]" if item["layer"] else ""
|
||||
print(f"{i}. #{item['number']} [{item['size']}] {layer_str}")
|
||||
print(f" Title: {item['title']}")
|
||||
print(f" Action: {item['action'].upper()}")
|
||||
if item['url']:
|
||||
print(f" URL: {item['url']}")
|
||||
print()
|
||||
|
||||
|
||||
# ── Review Mode (Day Summary) ─────────────────────────────────────────────
|
||||
|
||||
def fetch_recent_activity(client: GiteaClient, config: dict) -> dict:
|
||||
"""Fetch recent issues and PRs from the lookback window."""
|
||||
lookback_hours = config.get("lookback_hours", 24)
|
||||
since = datetime.now(timezone.utc) - timedelta(hours=lookback_hours)
|
||||
since_str = since.isoformat()
|
||||
|
||||
activity = {
|
||||
"issues_touched": [],
|
||||
"issues_closed": [],
|
||||
"prs_merged": [],
|
||||
"prs_opened": [],
|
||||
"lookback_since": since_str,
|
||||
}
|
||||
|
||||
try:
|
||||
# Fetch all open and closed issues updated recently
|
||||
for state in ["open", "closed"]:
|
||||
params = {"state": state, "sort": "updated", "limit": 100}
|
||||
issues = client.get_paginated("issues", params)
|
||||
|
||||
for issue in issues:
|
||||
updated_at = issue.get("updated_at", "")
|
||||
if updated_at and updated_at >= since_str:
|
||||
activity["issues_touched"].append({
|
||||
"number": issue.get("number"),
|
||||
"title": issue.get("title", "Untitled"),
|
||||
"state": issue.get("state"),
|
||||
"updated_at": updated_at,
|
||||
"url": issue.get("html_url", ""),
|
||||
})
|
||||
|
||||
if state == "closed":
|
||||
activity["issues_closed"].append({
|
||||
"number": issue.get("number"),
|
||||
"title": issue.get("title", "Untitled"),
|
||||
"closed_at": issue.get("closed_at", ""),
|
||||
})
|
||||
|
||||
# Fetch PRs
|
||||
prs = client.get_paginated("pulls", {"state": "all", "sort": "updated", "limit": 100})
|
||||
for pr in prs:
|
||||
updated_at = pr.get("updated_at", "")
|
||||
if updated_at and updated_at >= since_str:
|
||||
pr_info = {
|
||||
"number": pr.get("number"),
|
||||
"title": pr.get("title", "Untitled"),
|
||||
"state": pr.get("state"),
|
||||
"merged": pr.get("merged", False),
|
||||
"updated_at": updated_at,
|
||||
"url": pr.get("html_url", ""),
|
||||
}
|
||||
|
||||
if pr.get("merged"):
|
||||
merged_at = pr.get("merged_at", "")
|
||||
if merged_at and merged_at >= since_str:
|
||||
activity["prs_merged"].append(pr_info)
|
||||
elif pr.get("created_at", "") >= since_str:
|
||||
activity["prs_opened"].append(pr_info)
|
||||
|
||||
except (HTTPError, URLError) as exc:
|
||||
print(f"[orchestrator] Warning: Failed to fetch activity: {exc}", file=sys.stderr)
|
||||
|
||||
return activity
|
||||
|
||||
|
||||
def load_cycle_data() -> dict:
|
||||
"""Load cycle retrospective data if available."""
|
||||
retro_file = REPO_ROOT / ".loop" / "retro" / "cycles.jsonl"
|
||||
if not retro_file.exists():
|
||||
return {}
|
||||
|
||||
try:
|
||||
entries = []
|
||||
for line in retro_file.read_text().strip().splitlines():
|
||||
try:
|
||||
entries.append(json.loads(line))
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
|
||||
# Get entries from last 24 hours
|
||||
since = datetime.now(timezone.utc) - timedelta(hours=24)
|
||||
recent = [
|
||||
e for e in entries
|
||||
if e.get("timestamp") and datetime.fromisoformat(e["timestamp"].replace("Z", "+00:00")) >= since
|
||||
]
|
||||
|
||||
failures = [e for e in recent if not e.get("success", True)]
|
||||
|
||||
return {
|
||||
"cycles_count": len(recent),
|
||||
"failures_count": len(failures),
|
||||
"failures": [
|
||||
{
|
||||
"cycle": e.get("cycle"),
|
||||
"issue": e.get("issue"),
|
||||
"reason": e.get("reason", "Unknown"),
|
||||
}
|
||||
for e in failures[-5:] # Last 5 failures
|
||||
],
|
||||
}
|
||||
except (OSError, ValueError):
|
||||
return {}
|
||||
|
||||
|
||||
def generate_day_summary(activity: dict, cycles: dict) -> dict:
|
||||
"""Generate a day summary from activity data."""
|
||||
return {
|
||||
"generated_at": datetime.now(timezone.utc).isoformat(),
|
||||
"lookback_hours": 24,
|
||||
"issues_touched": len(activity.get("issues_touched", [])),
|
||||
"issues_closed": len(activity.get("issues_closed", [])),
|
||||
"prs_merged": len(activity.get("prs_merged", [])),
|
||||
"prs_opened": len(activity.get("prs_opened", [])),
|
||||
"cycles": cycles.get("cycles_count", 0),
|
||||
"test_failures": cycles.get("failures_count", 0),
|
||||
"recent_failures": cycles.get("failures", []),
|
||||
}
|
||||
|
||||
|
||||
def print_day_summary(summary: dict, activity: dict) -> None:
|
||||
"""Print a formatted day summary to stdout."""
|
||||
print("=" * 60)
|
||||
print("📊 DAY SUMMARY (Review Mode)")
|
||||
print("=" * 60)
|
||||
print(f"Period: Last {summary['lookback_hours']} hours")
|
||||
print()
|
||||
|
||||
print(f"📝 Issues touched: {summary['issues_touched']}")
|
||||
print(f"✅ Issues closed: {summary['issues_closed']}")
|
||||
print(f"🔀 PRs opened: {summary['prs_opened']}")
|
||||
print(f"🎉 PRs merged: {summary['prs_merged']}")
|
||||
print(f"🔄 Dev cycles: {summary['cycles']}")
|
||||
|
||||
if summary["test_failures"] > 0:
|
||||
print(f"⚠️ Test/Build failures: {summary['test_failures']}")
|
||||
else:
|
||||
print("✅ No test/build failures")
|
||||
print()
|
||||
|
||||
# Show recent failures if any
|
||||
if summary["recent_failures"]:
|
||||
print("Recent failures:")
|
||||
for f in summary["recent_failures"]:
|
||||
issue_str = f" (Issue #{f['issue']})" if f["issue"] else ""
|
||||
print(f" - Cycle {f['cycle']}{issue_str}: {f['reason']}")
|
||||
print()
|
||||
|
||||
# Show closed issues
|
||||
if activity.get("issues_closed"):
|
||||
print("Closed issues:")
|
||||
for issue in activity["issues_closed"][-5:]: # Last 5
|
||||
print(f" - #{issue['number']}: {issue['title'][:50]}")
|
||||
print()
|
||||
|
||||
# Show merged PRs
|
||||
if activity.get("prs_merged"):
|
||||
print("Merged PRs:")
|
||||
for pr in activity["prs_merged"][-5:]: # Last 5
|
||||
print(f" - #{pr['number']}: {pr['title'][:50]}")
|
||||
print()
|
||||
|
||||
|
||||
# ── Main ─────────────────────────────────────────────────────────────────
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
p = argparse.ArgumentParser(
|
||||
description="Daily Run orchestration script — the 10-minute ritual",
|
||||
)
|
||||
p.add_argument(
|
||||
"--review", "-r",
|
||||
action="store_true",
|
||||
help="Include day summary (review mode)",
|
||||
)
|
||||
p.add_argument(
|
||||
"--json", "-j",
|
||||
action="store_true",
|
||||
help="Output as JSON instead of formatted text",
|
||||
)
|
||||
p.add_argument(
|
||||
"--max-items",
|
||||
type=int,
|
||||
default=None,
|
||||
help="Override max agenda items",
|
||||
)
|
||||
return p.parse_args()
|
||||
|
||||
|
||||
def main() -> int:
|
||||
args = parse_args()
|
||||
config = load_config()
|
||||
|
||||
if args.max_items:
|
||||
config["max_agenda_items"] = args.max_items
|
||||
|
||||
token = get_token(config)
|
||||
client = GiteaClient(config, token)
|
||||
|
||||
# Check Gitea availability
|
||||
if not client.is_available():
|
||||
error_msg = "[orchestrator] Error: Gitea API is not available"
|
||||
if args.json:
|
||||
print(json.dumps({"error": error_msg}))
|
||||
else:
|
||||
print(error_msg, file=sys.stderr)
|
||||
return 1
|
||||
|
||||
# Fetch candidates and generate agenda
|
||||
candidates = fetch_candidates(client, config)
|
||||
agenda = generate_agenda(candidates, config)
|
||||
|
||||
# Review mode: fetch day summary
|
||||
day_summary = None
|
||||
activity = None
|
||||
if args.review:
|
||||
activity = fetch_recent_activity(client, config)
|
||||
cycles = load_cycle_data()
|
||||
day_summary = generate_day_summary(activity, cycles)
|
||||
|
||||
# Output
|
||||
if args.json:
|
||||
output = {"agenda": agenda}
|
||||
if day_summary:
|
||||
output["day_summary"] = day_summary
|
||||
print(json.dumps(output, indent=2))
|
||||
else:
|
||||
print_agenda(agenda)
|
||||
if day_summary and activity:
|
||||
print_day_summary(day_summary, activity)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
51
timmy_automations/metrics/README.md
Normal file
51
timmy_automations/metrics/README.md
Normal file
@@ -0,0 +1,51 @@
|
||||
# Metrics & Integration Automations
|
||||
|
||||
Data collection, validation, and dashboard integration.
|
||||
|
||||
## Scripts
|
||||
|
||||
| Script | Source | Purpose |
|
||||
|--------|--------|---------|
|
||||
| `backfill_retro.py` | `../../scripts/backfill_retro.py` | Seed retrospective data from Gitea history |
|
||||
| `pre_commit_checks.py` | `../../scripts/pre_commit_checks.py` | CI hygiene validation |
|
||||
|
||||
## Backfill Retrospective
|
||||
|
||||
One-time script to populate `.loop/retro/` from Gitea merged PRs:
|
||||
|
||||
```bash
|
||||
python3 scripts/backfill_retro.py
|
||||
```
|
||||
|
||||
This seeds the cycle retrospective log so the LOOPSTAT panel isn't empty on new setups.
|
||||
|
||||
## Pre-commit Checks
|
||||
|
||||
Runs automatically before commits to catch common issues:
|
||||
|
||||
- ImportError regressions
|
||||
- Model name assertions
|
||||
- Platform-specific path issues
|
||||
- Syntax errors in test files
|
||||
- Black formatting
|
||||
|
||||
```bash
|
||||
# Run manually
|
||||
python3 scripts/pre_commit_checks.py
|
||||
|
||||
# Or via pre-commit hook
|
||||
bash scripts/pre-commit-hook.sh
|
||||
```
|
||||
|
||||
## Dashboard Integration
|
||||
|
||||
Metrics automations write to:
|
||||
|
||||
| File | Purpose |
|
||||
|------|---------|
|
||||
| `.loop/retro/cycles.jsonl` | Cycle retrospective log |
|
||||
| `.loop/retro/summary.json` | Rolling statistics |
|
||||
| `.loop/retro/insights.json` | Introspection recommendations |
|
||||
| `.loop/automation_state.json` | Current automation states |
|
||||
|
||||
These feed the Mission Control dashboard at `/mission-control`.
|
||||
34
timmy_automations/triage/README.md
Normal file
34
timmy_automations/triage/README.md
Normal file
@@ -0,0 +1,34 @@
|
||||
# Deep Triage Automations
|
||||
|
||||
Intelligent, LLM-assisted workflows for issue refinement and prioritization.
|
||||
|
||||
## Scripts
|
||||
|
||||
| Script | Source | Purpose | Frequency |
|
||||
|--------|--------|---------|-----------|
|
||||
| `deep_triage.sh` | `../../scripts/deep_triage.sh` | LLM-driven issue refinement | Every 20 cycles |
|
||||
| `deep_triage_prompt.md` | `../../scripts/deep_triage_prompt.md` | Prompt template for deep triage | — |
|
||||
| `loop_introspect.py` | `../../scripts/loop_introspect.py` | Self-improvement analysis | Every 20 cycles |
|
||||
|
||||
## Deep Triage Protocol
|
||||
|
||||
1. **Mechanical scoring** runs first (`triage_score.py`)
|
||||
2. **Introspection** analyzes trends (`loop_introspect.py`)
|
||||
3. **Deep triage** consults Hermes + Timmy for refinement
|
||||
4. **Queue updated** with refined, prioritized issues
|
||||
|
||||
## Running
|
||||
|
||||
```bash
|
||||
# Full deep triage (includes introspection)
|
||||
bash scripts/deep_triage.sh
|
||||
|
||||
# Introspection only
|
||||
python3 scripts/loop_introspect.py
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
- `.loop/queue.json` — Updated work queue
|
||||
- `.loop/retro/deep-triage.jsonl` — Deep triage history
|
||||
- `.loop/retro/insights.json` — Introspection recommendations
|
||||
60
timmy_automations/workspace/README.md
Normal file
60
timmy_automations/workspace/README.md
Normal file
@@ -0,0 +1,60 @@
|
||||
# Workspace Automations
|
||||
|
||||
Agent workspace management for multi-agent operation.
|
||||
|
||||
## Scripts
|
||||
|
||||
| Script | Source | Purpose |
|
||||
|--------|--------|---------|
|
||||
| `agent_workspace.sh` | `../../scripts/agent_workspace.sh` | Manage isolated agent environments |
|
||||
| `bootstrap.sh` | `../../.kimi/scripts/bootstrap.sh` | One-time Kimi workspace setup |
|
||||
| `dev.sh` | `../../.kimi/scripts/dev.sh` | Development helper commands |
|
||||
| `resume.sh` | `../../.kimi/scripts/resume.sh` | Quick status check |
|
||||
|
||||
## Agent Workspace Layout
|
||||
|
||||
```
|
||||
/tmp/timmy-agents/
|
||||
├── hermes/ # Loop orchestrator
|
||||
├── kimi-0/ # Kimi pane 0
|
||||
├── kimi-1/ # Kimi pane 1
|
||||
├── kimi-2/ # Kimi pane 2
|
||||
├── kimi-3/ # Kimi pane 3
|
||||
└── smoke/ # Smoke testing environment
|
||||
```
|
||||
|
||||
Each workspace gets:
|
||||
- Isolated git clone (from Gitea, not local repo)
|
||||
- Unique port range (8100+, 8200+)
|
||||
- Separate data directory
|
||||
- Own TIMMY_HOME
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Initialize all workspaces
|
||||
bash scripts/agent_workspace.sh init-all
|
||||
|
||||
# Reset a specific workspace
|
||||
bash scripts/agent_workspace.sh reset kimi-0
|
||||
|
||||
# Create branch in workspace
|
||||
bash scripts/agent_workspace.sh branch kimi-0 feature/my-branch
|
||||
|
||||
# Bootstrap Kimi workspace
|
||||
bash .kimi/scripts/bootstrap.sh
|
||||
|
||||
# Check status
|
||||
bash .kimi/scripts/resume.sh
|
||||
```
|
||||
|
||||
## Port Allocation
|
||||
|
||||
| Agent | Dashboard | Serve |
|
||||
|-------|-----------|-------|
|
||||
| hermes | 8100 | 8200 |
|
||||
| kimi-0 | 8101 | 8201 |
|
||||
| kimi-1 | 8102 | 8202 |
|
||||
| kimi-2 | 8103 | 8203 |
|
||||
| kimi-3 | 8104 | 8204 |
|
||||
| smoke | 8109 | 8209 |
|
||||
Reference in New Issue
Block a user