Compare commits

..

9 Commits

Author SHA1 Message Date
c0f6ca9fc2 [claude] Add web_fetch tool (trafilatura) for full-page content extraction (#973) (#1004)
Some checks failed
Tests / lint (push) Has been cancelled
Tests / test (push) Has been cancelled
Tests / lint (pull_request) Successful in 19s
Tests / test (pull_request) Failing after 25m3s
2026-03-22 23:03:38 +00:00
9656a5e0d0 [claude] Add connection leak and pragma unit tests for db_pool.py (#944) (#1001)
Some checks failed
Tests / lint (push) Has been cancelled
Tests / test (push) Has been cancelled
2026-03-22 22:56:58 +00:00
Alexander Whitestone
e35a23cefa [claude] Add research prompt template library (#974) (#999)
Some checks failed
Tests / lint (push) Has been cancelled
Tests / test (push) Has been cancelled
Co-authored-by: Alexander Whitestone <alexpaynex@gmail.com>
Co-committed-by: Alexander Whitestone <alexpaynex@gmail.com>
2026-03-22 22:44:02 +00:00
Alexander Whitestone
3ab180b8a7 [claude] Add Gitea backup script (#990) (#996)
Some checks failed
Tests / lint (push) Has been cancelled
Tests / test (push) Has been cancelled
Co-authored-by: Alexander Whitestone <alexpaynex@gmail.com>
Co-committed-by: Alexander Whitestone <alexpaynex@gmail.com>
2026-03-22 22:36:51 +00:00
e24f49e58d [kimi] Add JSON validation guard to queue.json writes (#952) (#995)
Some checks failed
Tests / lint (push) Has been cancelled
Tests / test (push) Has been cancelled
2026-03-22 22:33:40 +00:00
1fa5cff5dc [kimi] Fix GITEA_API configuration in triage scripts (#951) (#994)
Some checks failed
Tests / lint (push) Has been cancelled
Tests / test (push) Has been cancelled
2026-03-22 22:28:23 +00:00
e255e7eb2a [kimi] Add docstrings to system.py route handlers (#940) (#992)
Some checks failed
Tests / lint (push) Has been cancelled
Tests / test (push) Has been cancelled
2026-03-22 22:12:36 +00:00
c3b6eb71c0 [kimi] Add docstrings to src/dashboard/routes/tasks.py (#939) (#991)
Some checks failed
Tests / lint (push) Has been cancelled
Tests / test (push) Has been cancelled
2026-03-22 22:08:28 +00:00
bebbe442b4 feat: WorldInterface + Heartbeat v2 (#871, #872) (#900)
Some checks failed
Tests / lint (push) Has been cancelled
Tests / test (push) Has been cancelled
Co-authored-by: Perplexity Computer <perplexity@tower.local>
Co-committed-by: Perplexity Computer <perplexity@tower.local>
2026-03-22 13:44:49 +00:00
43 changed files with 2461 additions and 320 deletions

View File

@@ -50,6 +50,7 @@ sounddevice = { version = ">=0.4.6", optional = true }
sentence-transformers = { version = ">=2.0.0", optional = true }
numpy = { version = ">=1.24.0", optional = true }
requests = { version = ">=2.31.0", optional = true }
trafilatura = { version = ">=1.6.0", optional = true }
GitPython = { version = ">=3.1.40", optional = true }
pytest = { version = ">=8.0.0", optional = true }
pytest-asyncio = { version = ">=0.24.0", optional = true }
@@ -67,6 +68,7 @@ voice = ["pyttsx3", "openai-whisper", "piper-tts", "sounddevice"]
celery = ["celery"]
embeddings = ["sentence-transformers", "numpy"]
git = ["GitPython"]
research = ["requests", "trafilatura"]
dev = ["pytest", "pytest-asyncio", "pytest-cov", "pytest-timeout", "pytest-randomly", "pytest-xdist", "selenium"]
[tool.poetry.group.dev.dependencies]

View File

@@ -17,8 +17,23 @@ REPO_ROOT = Path(__file__).resolve().parent.parent
RETRO_FILE = REPO_ROOT / ".loop" / "retro" / "cycles.jsonl"
SUMMARY_FILE = REPO_ROOT / ".loop" / "retro" / "summary.json"
GITEA_API = "http://localhost:3000/api/v1"
REPO_SLUG = "rockachopa/Timmy-time-dashboard"
def _get_gitea_api() -> str:
"""Read Gitea API URL from env var, then ~/.hermes/gitea_api file, then default."""
# Check env vars first (TIMMY_GITEA_API is preferred, GITEA_API for compatibility)
api_url = os.environ.get("TIMMY_GITEA_API") or os.environ.get("GITEA_API")
if api_url:
return api_url
# Check ~/.hermes/gitea_api file
api_file = Path.home() / ".hermes" / "gitea_api"
if api_file.exists():
return api_file.read_text().strip()
# Default fallback
return "http://localhost:3000/api/v1"
GITEA_API = _get_gitea_api()
REPO_SLUG = os.environ.get("REPO_SLUG", "rockachopa/Timmy-time-dashboard")
TOKEN_FILE = Path.home() / ".hermes" / "gitea_token"
TAG_RE = re.compile(r"\[([^\]]+)\]")

83
scripts/gitea_backup.sh Executable file
View File

@@ -0,0 +1,83 @@
#!/bin/bash
# Gitea backup script — run on the VPS before any hardening changes.
# Usage: sudo bash scripts/gitea_backup.sh [off-site-dest]
#
# off-site-dest: optional rsync/scp destination for off-site copy
# e.g. user@backup-host:/backups/gitea/
#
# Refs: #971, #990
set -euo pipefail
BACKUP_DIR="/opt/gitea/backups"
TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
GITEA_CONF="/etc/gitea/app.ini"
GITEA_WORK_DIR="/var/lib/gitea"
OFFSITE_DEST="${1:-}"
echo "=== Gitea Backup — $TIMESTAMP ==="
# Ensure backup directory exists
mkdir -p "$BACKUP_DIR"
cd "$BACKUP_DIR"
# Run the dump
echo "[1/4] Running gitea dump..."
gitea dump -c "$GITEA_CONF"
# Find the newest zip (gitea dump names it gitea-dump-*.zip)
BACKUP_FILE=$(ls -t "$BACKUP_DIR"/gitea-dump-*.zip 2>/dev/null | head -1)
if [ -z "$BACKUP_FILE" ]; then
echo "ERROR: No backup zip found in $BACKUP_DIR"
exit 1
fi
BACKUP_SIZE=$(stat -c%s "$BACKUP_FILE" 2>/dev/null || stat -f%z "$BACKUP_FILE")
echo "[2/4] Backup created: $BACKUP_FILE ($BACKUP_SIZE bytes)"
if [ "$BACKUP_SIZE" -eq 0 ]; then
echo "ERROR: Backup file is 0 bytes"
exit 1
fi
# Lock down permissions
chmod 600 "$BACKUP_FILE"
# Verify contents
echo "[3/4] Verifying backup contents..."
CONTENTS=$(unzip -l "$BACKUP_FILE" 2>/dev/null || true)
check_component() {
if echo "$CONTENTS" | grep -q "$1"; then
echo " OK: $2"
else
echo " WARN: $2 not found in backup"
fi
}
check_component "gitea-db.sql" "Database dump"
check_component "gitea-repo" "Repositories"
check_component "custom" "Custom config"
check_component "app.ini" "app.ini"
# Off-site copy
if [ -n "$OFFSITE_DEST" ]; then
echo "[4/4] Copying to off-site: $OFFSITE_DEST"
rsync -avz "$BACKUP_FILE" "$OFFSITE_DEST"
echo " Off-site copy complete."
else
echo "[4/4] No off-site destination provided. Skipping."
echo " To copy later: scp $BACKUP_FILE user@backup-host:/backups/gitea/"
fi
echo ""
echo "=== Backup complete ==="
echo "File: $BACKUP_FILE"
echo "Size: $BACKUP_SIZE bytes"
echo ""
echo "To verify restore on a clean instance:"
echo " 1. Copy zip to test machine"
echo " 2. unzip $BACKUP_FILE"
echo " 3. gitea restore --from <extracted-dir> -c /etc/gitea/app.ini"
echo " 4. Verify repos and DB are intact"

View File

@@ -30,7 +30,22 @@ IDLE_STATE_FILE = REPO_ROOT / ".loop" / "idle_state.json"
CYCLE_RESULT_FILE = REPO_ROOT / ".loop" / "cycle_result.json"
TOKEN_FILE = Path.home() / ".hermes" / "gitea_token"
GITEA_API = os.environ.get("GITEA_API", "http://localhost:3000/api/v1")
def _get_gitea_api() -> str:
"""Read Gitea API URL from env var, then ~/.hermes/gitea_api file, then default."""
# Check env vars first (TIMMY_GITEA_API is preferred, GITEA_API for compatibility)
api_url = os.environ.get("TIMMY_GITEA_API") or os.environ.get("GITEA_API")
if api_url:
return api_url
# Check ~/.hermes/gitea_api file
api_file = Path.home() / ".hermes" / "gitea_api"
if api_file.exists():
return api_file.read_text().strip()
# Default fallback
return "http://localhost:3000/api/v1"
GITEA_API = _get_gitea_api()
REPO_SLUG = os.environ.get("REPO_SLUG", "rockachopa/Timmy-time-dashboard")
# Default cycle duration in seconds (5 min); stale threshold = 2× this
@@ -187,7 +202,11 @@ def load_queue() -> list[dict]:
# Persist the cleaned queue so stale entries don't recur
_save_cleaned_queue(data, open_numbers)
return ready
except (json.JSONDecodeError, OSError):
except json.JSONDecodeError as exc:
print(f"[loop-guard] WARNING: Corrupt queue.json ({exc}) — returning empty queue")
return []
except OSError as exc:
print(f"[loop-guard] WARNING: Cannot read queue.json ({exc}) — returning empty queue")
return []

View File

@@ -1,56 +0,0 @@
import json
import urllib.request
import urllib.error
import os
BASE_URL = "http://143.198.27.163:3000/api/v1"
issues = [
{"title": "LHF: Fix 4 broken tests in test_setup_script.py", "body": "Add @pytest.mark.skip_ci or mock fixtures to stop these environment-specific script tests from failing CI."},
{"title": "LHF: Fix xdist and coverage conflict in pyproject.toml", "body": "The -n auto --dist worksteal arguments conflict with --cov flags during make test-cov. Reposition these flags so xdist and coverage play nicely."},
{"title": "LHF: Separate tox unit and integration environments", "body": "They currently alias the same command. Ensure `tox -e unit` uses `-m unit` and `tox -e integration` uses `-m integration`."},
{"title": "LHF: Add duration and coverage threshold strictness to pytest", "body": "Add `--durations=10` and `--cov-fail-under=60` directly to the tool.pytest.ini_options addopts or CI pipeline."},
{"title": "LHF: Enforce coverage threshold in CI workflow", "body": "Update .github/workflows/tests.yml to fail if coverage drops below the 60% floor threshold."},
{"title": "LHF: Extract hardcoded PRAGMA busy_timeout=5000", "body": "Move the SQLite busy_timeout hardcode to pydantic-settings config.py for better environment control."},
{"title": "LHF: Extract hardcoded sats limit in consult_grok()", "body": "The hardcoded sats limit for the grok L402 proxy should be controlled via config.py environment variables."},
{"title": "LHF: Remove bare pass clauses in src/timmy/tools.py", "body": "Logged exceptions should not be followed by bare `pass` clauses if they silently swallow critical tool errors. Refactor to return an error string or raise gracefully."},
{"title": "LHF: Add docstrings to src/dashboard/routes/tasks.py", "body": "Add proper module-level and function-level docstrings to all public methods."},
{"title": "LHF: Add docstrings to src/dashboard/routes/system.py", "body": "Add proper module-level and function-level docstrings to the system configuration endpoints."},
{"title": "LHF: Add docstrings to VoiceTTS setter methods", "body": "Document `set_rate()`, `set_volume()`, and `set_voice()` parameters and bounds."},
{"title": "LHF: DRY up tasks_pending/active/completed in tasks.py", "body": "Refactor and extract the shared filtering logic for these three similar list-filtering functions."},
{"title": "LHF: Add error handling for missing DB in tasks.py", "body": "If swarm.db is locked or missing, tasks.py endpoints currently crash. Add a try/except pattern matching the graceful degradation specs."},
{"title": "LHF: Write unit tests for db_pool.py", "body": "The SQLite connection pool infrastructure needs dedicated unit tests ensuring that connections do not leak and pragmas are applied."},
{"title": "LHF: Write unit tests for health.py", "body": "The health check route needs tests to ensure it correctly aggregates subsystem states (Ollama, Redis, DB) without blocking the event loop."}
]
def main():
token_path = os.path.join(os.getcwd(), ".antigravity_gitea_token")
if not os.path.exists(token_path):
print("Missing token.")
return
with open(token_path, "r") as f:
token = f.read().strip()
repo_owner = "rockachopa"
repo_name = "Timmy-time-dashboard"
count = 0
for i, issue in enumerate(issues):
print(f"Creating LHF issue {i+1}: {issue['title']}")
url = f"{BASE_URL}/repos/{repo_owner}/{repo_name}/issues"
payload = json.dumps(issue).encode("utf-8")
req = urllib.request.Request(url, data=payload, method="POST")
req.add_header("Authorization", f"token {token}")
req.add_header("Content-Type", "application/json")
try:
with urllib.request.urlopen(req) as resp:
if resp.status == 201:
count += 1
print(f" -> Success")
except urllib.error.HTTPError as e:
print(f" -> Failed: {e.code} {e.read().decode('utf-8')}")
print(f"Created {count}/{len(issues)} LHF issues.")
if __name__ == '__main__':
main()

View File

@@ -20,11 +20,28 @@ from datetime import datetime, timezone
from pathlib import Path
# ── Config ──────────────────────────────────────────────────────────────
GITEA_API = os.environ.get("GITEA_API", "http://localhost:3000/api/v1")
def _get_gitea_api() -> str:
"""Read Gitea API URL from env var, then ~/.hermes/gitea_api file, then default."""
# Check env vars first (TIMMY_GITEA_API is preferred, GITEA_API for compatibility)
api_url = os.environ.get("TIMMY_GITEA_API") or os.environ.get("GITEA_API")
if api_url:
return api_url
# Check ~/.hermes/gitea_api file
api_file = Path.home() / ".hermes" / "gitea_api"
if api_file.exists():
return api_file.read_text().strip()
# Default fallback
return "http://localhost:3000/api/v1"
GITEA_API = _get_gitea_api()
REPO_SLUG = os.environ.get("REPO_SLUG", "rockachopa/Timmy-time-dashboard")
TOKEN_FILE = Path.home() / ".hermes" / "gitea_token"
REPO_ROOT = Path(__file__).resolve().parent.parent
QUEUE_FILE = REPO_ROOT / ".loop" / "queue.json"
QUEUE_BACKUP_FILE = REPO_ROOT / ".loop" / "queue.json.bak"
RETRO_FILE = REPO_ROOT / ".loop" / "retro" / "triage.jsonl"
QUARANTINE_FILE = REPO_ROOT / ".loop" / "quarantine.json"
CYCLE_RETRO_FILE = REPO_ROOT / ".loop" / "retro" / "cycles.jsonl"
@@ -326,9 +343,38 @@ def run_triage() -> list[dict]:
ready = [s for s in scored if s["ready"]]
not_ready = [s for s in scored if not s["ready"]]
# Save backup before writing (if current file exists and is valid)
if QUEUE_FILE.exists():
try:
json.loads(QUEUE_FILE.read_text()) # Validate current file
QUEUE_BACKUP_FILE.write_text(QUEUE_FILE.read_text())
except (json.JSONDecodeError, OSError):
pass # Current file is corrupt, don't overwrite backup
# Write new queue file
QUEUE_FILE.parent.mkdir(parents=True, exist_ok=True)
QUEUE_FILE.write_text(json.dumps(ready, indent=2) + "\n")
# Validate the write by re-reading and parsing
try:
json.loads(QUEUE_FILE.read_text())
except (json.JSONDecodeError, OSError) as exc:
print(f"[triage] ERROR: queue.json validation failed: {exc}", file=sys.stderr)
# Restore from backup if available
if QUEUE_BACKUP_FILE.exists():
try:
backup_data = QUEUE_BACKUP_FILE.read_text()
json.loads(backup_data) # Validate backup
QUEUE_FILE.write_text(backup_data)
print(f"[triage] Restored queue.json from backup")
except (json.JSONDecodeError, OSError) as restore_exc:
print(f"[triage] ERROR: Backup restore failed: {restore_exc}", file=sys.stderr)
# Write empty list as last resort
QUEUE_FILE.write_text("[]\n")
else:
# No backup, write empty list
QUEUE_FILE.write_text("[]\n")
# Write retro entry
retro_entry = {
"timestamp": datetime.now(timezone.utc).isoformat(),

View File

@@ -0,0 +1,67 @@
---
name: Architecture Spike
type: research
typical_query_count: 2-4
expected_output_length: 600-1200 words
cascade_tier: groq_preferred
description: >
Investigate how to connect two systems or components. Produces an integration
architecture with sequence diagram, key decisions, and a proof-of-concept outline.
---
# Architecture Spike: Connect {system_a} to {system_b}
## Context
We need to integrate **{system_a}** with **{system_b}** in the context of
**{project_context}**. This spike answers: what is the best way to wire them
together, and what are the trade-offs?
## Constraints
- Prefer approaches that avoid adding new infrastructure dependencies.
- The integration should be **{sync_or_async}** (synchronous / asynchronous).
- Must work within: {environment_constraints}.
## Research Steps
1. Identify the APIs / protocols exposed by both systems.
2. List all known integration patterns (direct API, message queue, webhook, SDK, etc.).
3. Evaluate each pattern for complexity, reliability, and latency.
4. Select the recommended approach and outline a proof-of-concept.
## Output Format
### Integration Options
| Pattern | Complexity | Reliability | Latency | Notes |
|---------|-----------|-------------|---------|-------|
| ... | ... | ... | ... | ... |
### Recommended Approach
**Pattern:** {pattern_name}
**Why:** One paragraph explaining the choice.
### Sequence Diagram
```
{system_a} -> {middleware} -> {system_b}
```
Describe the data flow step by step:
1. {system_a} does X...
2. {middleware} transforms / routes...
3. {system_b} receives Y...
### Proof-of-Concept Outline
- Files to create or modify
- Key libraries / dependencies needed
- Estimated effort: {effort_estimate}
### Open Questions
Bullet list of decisions that need human input before proceeding.

View File

@@ -0,0 +1,74 @@
---
name: Competitive Scan
type: research
typical_query_count: 3-5
expected_output_length: 800-1500 words
cascade_tier: groq_preferred
description: >
Compare a project against its alternatives. Produces a feature matrix,
strengths/weaknesses analysis, and positioning summary.
---
# Competitive Scan: {project} vs Alternatives
## Context
Compare **{project}** against **{alternatives}** (comma-separated list of
competitors). The goal is to understand where {project} stands and identify
differentiation opportunities.
## Constraints
- Comparison date: {date}.
- Focus areas: {focus_areas} (e.g., features, pricing, community, performance).
- Perspective: {perspective} (user, developer, business).
## Research Steps
1. Gather key facts about {project} (features, pricing, community size, release cadence).
2. Gather the same data for each alternative in {alternatives}.
3. Build a feature comparison matrix.
4. Identify strengths and weaknesses for each entry.
5. Summarize positioning and recommend next steps.
## Output Format
### Overview
One paragraph: what space does {project} compete in, and who are the main players?
### Feature Matrix
| Feature / Attribute | {project} | {alt_1} | {alt_2} | {alt_3} |
|--------------------|-----------|---------|---------|---------|
| {feature_1} | ... | ... | ... | ... |
| {feature_2} | ... | ... | ... | ... |
| Pricing | ... | ... | ... | ... |
| License | ... | ... | ... | ... |
| Community Size | ... | ... | ... | ... |
| Last Major Release | ... | ... | ... | ... |
### Strengths & Weaknesses
#### {project}
- **Strengths:** ...
- **Weaknesses:** ...
#### {alt_1}
- **Strengths:** ...
- **Weaknesses:** ...
_(Repeat for each alternative)_
### Positioning Map
Describe where each project sits along the key dimensions (e.g., simplicity
vs power, free vs paid, niche vs general).
### Recommendations
Bullet list of actions based on the competitive landscape:
- **Differentiate on:** {differentiator}
- **Watch out for:** {threat}
- **Consider adopting from {alt}:** {feature_or_approach}

View File

@@ -0,0 +1,68 @@
---
name: Game Analysis
type: research
typical_query_count: 2-3
expected_output_length: 600-1000 words
cascade_tier: local_ok
description: >
Evaluate a game for AI agent playability. Assesses API availability,
observation/action spaces, and existing bot ecosystems.
---
# Game Analysis: {game}
## Context
Evaluate **{game}** to determine whether an AI agent can play it effectively.
Focus on programmatic access, observation space, action space, and existing
bot/AI ecosystems.
## Constraints
- Platform: {platform} (PC, console, mobile, browser).
- Agent type: {agent_type} (reinforcement learning, rule-based, LLM-driven, hybrid).
- Budget for API/licenses: {budget}.
## Research Steps
1. Identify official APIs, modding support, or programmatic access methods for {game}.
2. Characterize the observation space (screen pixels, game state JSON, memory reading, etc.).
3. Characterize the action space (keyboard/mouse, API calls, controller inputs).
4. Survey existing bots, AI projects, or research papers for {game}.
5. Assess feasibility and difficulty for the target agent type.
## Output Format
### Game Profile
| Property | Value |
|-------------------|------------------------|
| Game | {game} |
| Genre | {genre} |
| Platform | {platform} |
| API Available | Yes / No / Partial |
| Mod Support | Yes / No / Limited |
| Existing AI Work | Extensive / Some / None|
### Observation Space
Describe what data the agent can access and how (API, screen capture, memory hooks, etc.).
### Action Space
Describe how the agent can interact with the game (input methods, timing constraints, etc.).
### Existing Ecosystem
List known bots, frameworks, research papers, or communities working on AI for {game}.
### Feasibility Assessment
- **Difficulty:** Easy / Medium / Hard / Impractical
- **Best approach:** {recommended_agent_type}
- **Key challenges:** Bullet list
- **Estimated time to MVP:** {time_estimate}
### Recommendation
One paragraph: should we proceed, and if so, what is the first step?

View File

@@ -0,0 +1,79 @@
---
name: Integration Guide
type: research
typical_query_count: 3-5
expected_output_length: 1000-2000 words
cascade_tier: groq_preferred
description: >
Step-by-step guide to wire a specific tool into an existing stack,
complete with code samples, configuration, and testing steps.
---
# Integration Guide: Wire {tool} into {stack}
## Context
Integrate **{tool}** into our **{stack}** stack. The goal is to
**{integration_goal}** (e.g., "add vector search to the dashboard",
"send notifications via Telegram").
## Constraints
- Must follow existing project conventions (see CLAUDE.md).
- No new cloud AI dependencies unless explicitly approved.
- Environment config via `pydantic-settings` / `config.py`.
## Research Steps
1. Review {tool}'s official documentation for installation and setup.
2. Identify the minimal dependency set required.
3. Map {tool}'s API to our existing patterns (singletons, graceful degradation).
4. Write integration code with proper error handling.
5. Define configuration variables and their defaults.
## Output Format
### Prerequisites
- Dependencies to install (with versions)
- External services or accounts required
- Environment variables to configure
### Configuration
```python
# In config.py — add these fields to Settings:
{config_fields}
```
### Implementation
```python
# {file_path}
{implementation_code}
```
### Graceful Degradation
Describe how the integration behaves when {tool} is unavailable:
| Scenario | Behavior | Log Level |
|-----------------------|--------------------|-----------|
| {tool} not installed | {fallback} | WARNING |
| {tool} unreachable | {fallback} | WARNING |
| Invalid credentials | {fallback} | ERROR |
### Testing
```python
# tests/unit/test_{tool_snake}.py
{test_code}
```
### Verification Checklist
- [ ] Dependency added to pyproject.toml
- [ ] Config fields added with sensible defaults
- [ ] Graceful degradation tested (service down)
- [ ] Unit tests pass (`tox -e unit`)
- [ ] No new linting errors (`tox -e lint`)

View File

@@ -0,0 +1,67 @@
---
name: State of the Art
type: research
typical_query_count: 4-6
expected_output_length: 1000-2000 words
cascade_tier: groq_preferred
description: >
Comprehensive survey of what currently exists in a given field or domain.
Produces a structured landscape overview with key players, trends, and gaps.
---
# State of the Art: {field} (as of {date})
## Context
Survey the current landscape of **{field}**. Identify key players, recent
developments, dominant approaches, and notable gaps. This is a point-in-time
snapshot intended to inform decision-making.
## Constraints
- Focus on developments from the last {timeframe} (e.g., 12 months, 2 years).
- Prioritize {priority} (open-source, commercial, academic, or all).
- Target audience: {audience} (technical team, leadership, general).
## Research Steps
1. Identify the major categories or sub-domains within {field}.
2. For each category, list the leading projects, companies, or research groups.
3. Note recent milestones, releases, or breakthroughs.
4. Identify emerging trends and directions.
5. Highlight gaps — things that don't exist yet but should.
## Output Format
### Executive Summary
Two to three sentences: what is the state of {field} right now?
### Landscape Map
| Category | Key Players | Maturity | Trend |
|---------------|--------------------------|-------------|-------------|
| {category_1} | {player_a}, {player_b} | Early / GA | Growing / Stable / Declining |
| {category_2} | {player_c}, {player_d} | Early / GA | Growing / Stable / Declining |
### Recent Milestones
Chronological list of notable events in the last {timeframe}:
- **{date_1}:** {event_description}
- **{date_2}:** {event_description}
### Trends
Numbered list of the top 3-5 trends shaping {field}:
1. **{trend_name}** — {one-line description}
2. **{trend_name}** — {one-line description}
### Gaps & Opportunities
Bullet list of things that are missing, underdeveloped, or ripe for innovation.
### Implications for Us
One paragraph: what does this mean for our project? What should we do next?

View File

@@ -0,0 +1,52 @@
---
name: Tool Evaluation
type: research
typical_query_count: 3-5
expected_output_length: 800-1500 words
cascade_tier: groq_preferred
description: >
Discover and evaluate all shipping tools/libraries/services in a given domain.
Produces a ranked comparison table with pros, cons, and recommendation.
---
# Tool Evaluation: {domain}
## Context
You are researching tools, libraries, and services for **{domain}**.
The goal is to find everything that is currently shipping (not vaporware)
and produce a structured comparison.
## Constraints
- Only include tools that have public releases or hosted services available today.
- If a tool is in beta/preview, note that clearly.
- Focus on {focus_criteria} when evaluating (e.g., cost, ease of integration, community size).
## Research Steps
1. Identify all actively-maintained tools in the **{domain}** space.
2. For each tool, gather: name, URL, license/pricing, last release date, language/platform.
3. Evaluate each tool against the focus criteria.
4. Rank by overall fit for the use case: **{use_case}**.
## Output Format
### Summary
One paragraph: what the landscape looks like and the top recommendation.
### Comparison Table
| Tool | License / Price | Last Release | Language | {focus_criteria} Score | Notes |
|------|----------------|--------------|----------|----------------------|-------|
| ... | ... | ... | ... | ... | ... |
### Top Pick
- **Recommended:** {tool_name} — {one-line reason}
- **Runner-up:** {tool_name} — {one-line reason}
### Risks & Gaps
Bullet list of things to watch out for (missing features, vendor lock-in, etc.).

View File

@@ -56,13 +56,13 @@ async def self_modify_queue(request: Request):
@router.get("/swarm/mission-control", response_class=HTMLResponse)
async def mission_control(request: Request):
"""Render the primary swarm mission control terminal."""
"""Render the swarm mission control dashboard page."""
return templates.TemplateResponse(request, "mission_control.html", {})
@router.get("/bugs", response_class=HTMLResponse)
async def bugs_page(request: Request):
"""Render the systemic bugs and issue tracking page."""
"""Render the bug tracking page."""
return templates.TemplateResponse(
request,
"bugs.html",
@@ -77,19 +77,19 @@ async def bugs_page(request: Request):
@router.get("/self-coding", response_class=HTMLResponse)
async def self_coding(request: Request):
"""Render the self-coding and modifications statistics page."""
"""Render the self-coding automation status page."""
return templates.TemplateResponse(request, "self_coding.html", {"stats": {}})
@router.get("/hands", response_class=HTMLResponse)
async def hands_page(request: Request):
"""Render the physical 'hands' tracking page for environment-interacting agents."""
"""Render the hands (automation executions) page."""
return templates.TemplateResponse(request, "hands.html", {"executions": []})
@router.get("/creative/ui", response_class=HTMLResponse)
async def creative_ui(request: Request):
"""Render the creative/producer studio UI for image and media generation."""
"""Render the creative UI playground page."""
return templates.TemplateResponse(request, "creative.html", {})

View File

@@ -104,29 +104,25 @@ class _TaskView:
@router.get("/tasks", response_class=HTMLResponse)
async def tasks_page(request: Request):
"""Render the main task queue page with 3-column layout."""
pending, active, completed = [], [], []
try:
with _get_db() as db:
pending = [
_TaskView(_row_to_dict(r))
for r in db.execute(
"SELECT * FROM tasks WHERE status IN ('pending_approval') ORDER BY created_at DESC"
).fetchall()
]
active = [
_TaskView(_row_to_dict(r))
for r in db.execute(
"SELECT * FROM tasks WHERE status IN ('approved','running','paused') ORDER BY created_at DESC"
).fetchall()
]
completed = [
_TaskView(_row_to_dict(r))
for r in db.execute(
"SELECT * FROM tasks WHERE status IN ('completed','vetoed','failed') ORDER BY completed_at DESC LIMIT 50"
).fetchall()
]
except sqlite3.Error as e:
logger.error("Database error rendering tasks_page: %s", e)
with _get_db() as db:
pending = [
_TaskView(_row_to_dict(r))
for r in db.execute(
"SELECT * FROM tasks WHERE status IN ('pending_approval') ORDER BY created_at DESC"
).fetchall()
]
active = [
_TaskView(_row_to_dict(r))
for r in db.execute(
"SELECT * FROM tasks WHERE status IN ('approved','running','paused') ORDER BY created_at DESC"
).fetchall()
]
completed = [
_TaskView(_row_to_dict(r))
for r in db.execute(
"SELECT * FROM tasks WHERE status IN ('completed','vetoed','failed') ORDER BY completed_at DESC LIMIT 50"
).fetchall()
]
return templates.TemplateResponse(
request,
@@ -147,45 +143,64 @@ async def tasks_page(request: Request):
# ---------------------------------------------------------------------------
def _render_task_list(request: Request, query: str, empty_message: str) -> HTMLResponse:
"""Helper to fetch tasks from DB and render HTML partials safely, handling DB errors."""
try:
with _get_db() as db:
rows = db.execute(query).fetchall()
tasks = [_TaskView(_row_to_dict(r)) for r in rows]
if not tasks:
return HTMLResponse(f'<div class="empty-column">{empty_message}</div>')
parts = [
@router.get("/tasks/pending", response_class=HTMLResponse)
async def tasks_pending(request: Request):
"""Return HTMX partial for pending approval tasks."""
with _get_db() as db:
rows = db.execute(
"SELECT * FROM tasks WHERE status='pending_approval' ORDER BY created_at DESC"
).fetchall()
tasks = [_TaskView(_row_to_dict(r)) for r in rows]
parts = []
for task in tasks:
parts.append(
templates.TemplateResponse(
request, "partials/task_card.html", {"task": task}
).body.decode()
for task in tasks
]
return HTMLResponse("".join(parts))
except sqlite3.Error as e:
logger.error("Database error fetching tasks: %s", e)
return HTMLResponse('<div class="empty-column error">Database unavailable</div>')
@router.get("/tasks/pending", response_class=HTMLResponse)
async def tasks_pending(request: Request):
"""HTMX partial rendering the list of pending tasks."""
query = "SELECT * FROM tasks WHERE status='pending_approval' ORDER BY created_at DESC"
return _render_task_list(request, query, "No pending tasks")
)
if not parts:
return HTMLResponse('<div class="empty-column">No pending tasks</div>')
return HTMLResponse("".join(parts))
@router.get("/tasks/active", response_class=HTMLResponse)
async def tasks_active(request: Request):
"""HTMX partial rendering the list of active tasks."""
query = "SELECT * FROM tasks WHERE status IN ('approved','running','paused') ORDER BY created_at DESC"
return _render_task_list(request, query, "No active tasks")
"""Return HTMX partial for active (approved/running/paused) tasks."""
with _get_db() as db:
rows = db.execute(
"SELECT * FROM tasks WHERE status IN ('approved','running','paused') ORDER BY created_at DESC"
).fetchall()
tasks = [_TaskView(_row_to_dict(r)) for r in rows]
parts = []
for task in tasks:
parts.append(
templates.TemplateResponse(
request, "partials/task_card.html", {"task": task}
).body.decode()
)
if not parts:
return HTMLResponse('<div class="empty-column">No active tasks</div>')
return HTMLResponse("".join(parts))
@router.get("/tasks/completed", response_class=HTMLResponse)
async def tasks_completed(request: Request):
"""HTMX partial rendering the list of completed tasks."""
query = "SELECT * FROM tasks WHERE status IN ('completed','vetoed','failed') ORDER BY completed_at DESC LIMIT 50"
return _render_task_list(request, query, "No completed tasks yet")
"""Return HTMX partial for completed/vetoed/failed tasks (last 50)."""
with _get_db() as db:
rows = db.execute(
"SELECT * FROM tasks WHERE status IN ('completed','vetoed','failed') ORDER BY completed_at DESC LIMIT 50"
).fetchall()
tasks = [_TaskView(_row_to_dict(r)) for r in rows]
parts = []
for task in tasks:
parts.append(
templates.TemplateResponse(
request, "partials/task_card.html", {"task": task}
).body.decode()
)
if not parts:
return HTMLResponse('<div class="empty-column">No completed tasks yet</div>')
return HTMLResponse("".join(parts))
# ---------------------------------------------------------------------------
@@ -229,31 +244,31 @@ async def create_task_form(
@router.post("/tasks/{task_id}/approve", response_class=HTMLResponse)
async def approve_task(request: Request, task_id: str):
"""Approve a task for execution."""
"""Approve a pending task and move it to active queue."""
return await _set_status(request, task_id, "approved")
@router.post("/tasks/{task_id}/veto", response_class=HTMLResponse)
async def veto_task(request: Request, task_id: str):
"""Veto a task to prevent execution."""
"""Veto a task, marking it as rejected."""
return await _set_status(request, task_id, "vetoed")
@router.post("/tasks/{task_id}/pause", response_class=HTMLResponse)
async def pause_task(request: Request, task_id: str):
"""Pause an active task."""
"""Pause a running or approved task."""
return await _set_status(request, task_id, "paused")
@router.post("/tasks/{task_id}/cancel", response_class=HTMLResponse)
async def cancel_task(request: Request, task_id: str):
"""Cancel a task, moving it to vetoed state."""
"""Cancel a task (marks as vetoed)."""
return await _set_status(request, task_id, "vetoed")
@router.post("/tasks/{task_id}/retry", response_class=HTMLResponse)
async def retry_task(request: Request, task_id: str):
"""Retry a failed or completed task by re-approving it."""
"""Retry a failed/vetoed task by moving it back to approved."""
return await _set_status(request, task_id, "approved")
@@ -264,7 +279,7 @@ async def modify_task(
title: str = Form(...),
description: str = Form(""),
):
"""Modify the title and/or description of a specific task."""
"""Update task title and description."""
with _get_db() as db:
db.execute(
"UPDATE tasks SET title=?, description=? WHERE id=?",

View File

@@ -16,8 +16,6 @@ from datetime import UTC, datetime
from pathlib import Path
from typing import Any
from config import settings
logger = logging.getLogger(__name__)
@@ -104,7 +102,7 @@ class EventBus:
self._persistence_db_path.parent.mkdir(parents=True, exist_ok=True)
with closing(sqlite3.connect(str(self._persistence_db_path))) as conn:
conn.execute("PRAGMA journal_mode=WAL")
conn.execute(f"PRAGMA busy_timeout={settings.db_busy_timeout_ms}")
conn.execute("PRAGMA busy_timeout=5000")
conn.executescript(_EVENTS_SCHEMA)
conn.commit()
@@ -116,7 +114,7 @@ class EventBus:
return
with closing(sqlite3.connect(str(self._persistence_db_path))) as conn:
conn.row_factory = sqlite3.Row
conn.execute(f"PRAGMA busy_timeout={settings.db_busy_timeout_ms}")
conn.execute("PRAGMA busy_timeout=5000")
yield conn
def _persist_event(self, event: Event) -> None:

View File

@@ -18,8 +18,6 @@ from datetime import UTC, datetime
from enum import StrEnum
from pathlib import Path
from config import settings
logger = logging.getLogger(__name__)
DB_PATH = Path("data/swarm.db")
@@ -70,7 +68,7 @@ def _get_conn() -> Generator[sqlite3.Connection, None, None]:
with closing(sqlite3.connect(str(DB_PATH))) as conn:
conn.row_factory = sqlite3.Row
conn.execute("PRAGMA journal_mode=WAL")
conn.execute(f"PRAGMA busy_timeout={settings.db_busy_timeout_ms}")
conn.execute("PRAGMA busy_timeout=5000")
conn.execute("""
CREATE TABLE IF NOT EXISTS custom_models (
name TEXT PRIMARY KEY,

View File

@@ -0,0 +1,29 @@
"""World interface — engine-agnostic adapter pattern for embodied agents.
Provides the ``WorldInterface`` ABC and an adapter registry so Timmy can
observe, act, and speak in any game world (Morrowind, Luanti, Godot, …)
through a single contract.
Quick start::
from infrastructure.world import get_adapter, register_adapter
from infrastructure.world.interface import WorldInterface
register_adapter("mock", MockWorldAdapter)
world = get_adapter("mock")
perception = world.observe()
"""
from infrastructure.world.registry import AdapterRegistry
_registry = AdapterRegistry()
register_adapter = _registry.register
get_adapter = _registry.get
list_adapters = _registry.list_adapters
__all__ = [
"register_adapter",
"get_adapter",
"list_adapters",
]

View File

@@ -0,0 +1 @@
"""Built-in world adapters."""

View File

@@ -0,0 +1,99 @@
"""Mock world adapter — returns canned perception and logs commands.
Useful for testing the heartbeat loop and WorldInterface contract
without a running game server.
"""
from __future__ import annotations
import logging
from dataclasses import dataclass
from datetime import UTC, datetime
from infrastructure.world.interface import WorldInterface
from infrastructure.world.types import (
ActionResult,
ActionStatus,
CommandInput,
PerceptionOutput,
)
logger = logging.getLogger(__name__)
@dataclass
class _ActionLog:
"""Record of an action dispatched to the mock world."""
command: CommandInput
timestamp: datetime
class MockWorldAdapter(WorldInterface):
"""In-memory mock adapter for testing.
* ``observe()`` returns configurable canned perception.
* ``act()`` logs the command and returns success.
* ``speak()`` logs the message.
Inspect ``action_log`` and ``speech_log`` to verify behaviour in tests.
"""
def __init__(
self,
*,
location: str = "Test Chamber",
entities: list[str] | None = None,
events: list[str] | None = None,
) -> None:
self._location = location
self._entities = entities or ["TestNPC"]
self._events = events or []
self._connected = False
self.action_log: list[_ActionLog] = []
self.speech_log: list[dict] = []
# -- lifecycle ---------------------------------------------------------
def connect(self) -> None:
self._connected = True
logger.info("MockWorldAdapter connected")
def disconnect(self) -> None:
self._connected = False
logger.info("MockWorldAdapter disconnected")
@property
def is_connected(self) -> bool:
return self._connected
# -- core contract -----------------------------------------------------
def observe(self) -> PerceptionOutput:
logger.debug("MockWorldAdapter.observe()")
return PerceptionOutput(
timestamp=datetime.now(UTC),
location=self._location,
entities=list(self._entities),
events=list(self._events),
raw={"adapter": "mock"},
)
def act(self, command: CommandInput) -> ActionResult:
logger.debug("MockWorldAdapter.act(%s)", command.action)
self.action_log.append(_ActionLog(command=command, timestamp=datetime.now(UTC)))
return ActionResult(
status=ActionStatus.SUCCESS,
message=f"Mock executed: {command.action}",
data={"adapter": "mock"},
)
def speak(self, message: str, target: str | None = None) -> None:
logger.debug("MockWorldAdapter.speak(%r, target=%r)", message, target)
self.speech_log.append(
{
"message": message,
"target": target,
"timestamp": datetime.now(UTC).isoformat(),
}
)

View File

@@ -0,0 +1,58 @@
"""TES3MP world adapter — stub for Morrowind multiplayer via TES3MP.
This adapter will eventually connect to a TES3MP server and translate
the WorldInterface contract into TES3MP commands. For now every method
raises ``NotImplementedError`` with guidance on what needs wiring up.
Once PR #864 merges, import PerceptionOutput and CommandInput directly
from ``infrastructure.morrowind.schemas`` if their shapes differ from
the canonical types in ``infrastructure.world.types``.
"""
from __future__ import annotations
import logging
from infrastructure.world.interface import WorldInterface
from infrastructure.world.types import ActionResult, CommandInput, PerceptionOutput
logger = logging.getLogger(__name__)
class TES3MPWorldAdapter(WorldInterface):
"""Stub adapter for TES3MP (Morrowind multiplayer).
All core methods raise ``NotImplementedError``.
Implement ``connect()`` first — it should open a socket to the
TES3MP server and authenticate.
"""
def __init__(self, *, host: str = "localhost", port: int = 25565) -> None:
self._host = host
self._port = port
self._connected = False
# -- lifecycle ---------------------------------------------------------
def connect(self) -> None:
raise NotImplementedError("TES3MPWorldAdapter.connect() — wire up TES3MP server socket")
def disconnect(self) -> None:
raise NotImplementedError("TES3MPWorldAdapter.disconnect() — close TES3MP server socket")
@property
def is_connected(self) -> bool:
return self._connected
# -- core contract (stubs) ---------------------------------------------
def observe(self) -> PerceptionOutput:
raise NotImplementedError("TES3MPWorldAdapter.observe() — poll TES3MP for player/NPC state")
def act(self, command: CommandInput) -> ActionResult:
raise NotImplementedError(
"TES3MPWorldAdapter.act() — translate CommandInput to TES3MP packet"
)
def speak(self, message: str, target: str | None = None) -> None:
raise NotImplementedError("TES3MPWorldAdapter.speak() — send chat message via TES3MP")

View File

@@ -0,0 +1,64 @@
"""Abstract WorldInterface — the contract every game-world adapter must fulfil.
Follows a Gymnasium-inspired pattern: observe → act → speak, with each
method returning strongly-typed data structures.
Any future engine (TES3MP, Luanti, Godot, …) plugs in by subclassing
``WorldInterface`` and implementing the three methods.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from infrastructure.world.types import ActionResult, CommandInput, PerceptionOutput
class WorldInterface(ABC):
"""Engine-agnostic base class for world adapters.
Subclasses must implement:
- ``observe()`` — gather structured perception from the world
- ``act()`` — dispatch a command and return the outcome
- ``speak()`` — send a message to an NPC / player / broadcast
Lifecycle hooks ``connect()`` and ``disconnect()`` are optional.
"""
# -- lifecycle (optional overrides) ------------------------------------
def connect(self) -> None: # noqa: B027
"""Establish connection to the game world.
Default implementation is a no-op. Override to open sockets,
authenticate, etc.
"""
def disconnect(self) -> None: # noqa: B027
"""Tear down the connection.
Default implementation is a no-op.
"""
@property
def is_connected(self) -> bool:
"""Return ``True`` if the adapter has an active connection.
Default returns ``True``. Override for adapters that maintain
persistent connections.
"""
return True
# -- core contract (must implement) ------------------------------------
@abstractmethod
def observe(self) -> PerceptionOutput:
"""Return a structured snapshot of the current world state."""
@abstractmethod
def act(self, command: CommandInput) -> ActionResult:
"""Execute *command* in the world and return the result."""
@abstractmethod
def speak(self, message: str, target: str | None = None) -> None:
"""Send *message* in the world, optionally directed at *target*."""

View File

@@ -0,0 +1,54 @@
"""Adapter registry — register and instantiate world adapters by name.
Usage::
registry = AdapterRegistry()
registry.register("mock", MockWorldAdapter)
adapter = registry.get("mock", some_kwarg="value")
"""
from __future__ import annotations
import logging
from typing import Any
from infrastructure.world.interface import WorldInterface
logger = logging.getLogger(__name__)
class AdapterRegistry:
"""Name → WorldInterface class registry with instantiation."""
def __init__(self) -> None:
self._adapters: dict[str, type[WorldInterface]] = {}
def register(self, name: str, cls: type[WorldInterface]) -> None:
"""Register an adapter class under *name*.
Raises ``TypeError`` if *cls* is not a ``WorldInterface`` subclass.
"""
if not (isinstance(cls, type) and issubclass(cls, WorldInterface)):
raise TypeError(f"{cls!r} is not a WorldInterface subclass")
if name in self._adapters:
logger.warning("Overwriting adapter %r (was %r)", name, self._adapters[name])
self._adapters[name] = cls
logger.info("Registered world adapter: %s%s", name, cls.__name__)
def get(self, name: str, **kwargs: Any) -> WorldInterface:
"""Instantiate and return the adapter registered as *name*.
Raises ``KeyError`` if *name* is not registered.
"""
cls = self._adapters[name]
return cls(**kwargs)
def list_adapters(self) -> list[str]:
"""Return sorted list of registered adapter names."""
return sorted(self._adapters)
def __contains__(self, name: str) -> bool:
return name in self._adapters
def __len__(self) -> int:
return len(self._adapters)

View File

@@ -0,0 +1,71 @@
"""Canonical data types for world interaction.
These mirror the PerceptionOutput / CommandInput types from PR #864's
``morrowind/schemas.py``. When that PR merges, these can be replaced
with re-exports — but until then they serve as the stable contract for
every WorldInterface adapter.
"""
from __future__ import annotations
from dataclasses import dataclass, field
from datetime import UTC, datetime
from enum import StrEnum
class ActionStatus(StrEnum):
"""Outcome of an action dispatched to the world."""
SUCCESS = "success"
FAILURE = "failure"
PENDING = "pending"
NOOP = "noop"
@dataclass
class PerceptionOutput:
"""Structured world state returned by ``WorldInterface.observe()``.
Attributes:
timestamp: When the observation was captured.
location: Free-form location descriptor (e.g. "Balmora, Fighters Guild").
entities: List of nearby entity descriptions.
events: Recent game events since last observation.
raw: Optional raw / engine-specific payload for advanced consumers.
"""
timestamp: datetime = field(default_factory=lambda: datetime.now(UTC))
location: str = ""
entities: list[str] = field(default_factory=list)
events: list[str] = field(default_factory=list)
raw: dict = field(default_factory=dict)
@dataclass
class CommandInput:
"""Action command sent via ``WorldInterface.act()``.
Attributes:
action: Verb / action name (e.g. "move", "attack", "use_item").
target: Optional target identifier.
parameters: Arbitrary key-value payload for engine-specific params.
"""
action: str
target: str | None = None
parameters: dict = field(default_factory=dict)
@dataclass
class ActionResult:
"""Outcome returned by ``WorldInterface.act()``.
Attributes:
status: Whether the action succeeded, failed, etc.
message: Human-readable description of the outcome.
data: Arbitrary engine-specific result payload.
"""
status: ActionStatus = ActionStatus.SUCCESS
message: str = ""
data: dict = field(default_factory=dict)

286
src/loop/heartbeat.py Normal file
View File

@@ -0,0 +1,286 @@
"""Heartbeat v2 — WorldInterface-driven cognitive loop.
Drives real observe → reason → act → reflect cycles through whatever
``WorldInterface`` adapter is connected. When no adapter is present,
gracefully falls back to the existing ``run_cycle()`` behaviour.
Usage::
heartbeat = Heartbeat(world=adapter, interval=30.0)
await heartbeat.run_once() # single cycle
await heartbeat.start() # background loop
heartbeat.stop() # graceful shutdown
"""
from __future__ import annotations
import asyncio
import logging
import time
from dataclasses import dataclass, field
from datetime import UTC, datetime
from loop.phase1_gather import gather
from loop.phase2_reason import reason
from loop.phase3_act import act
from loop.schema import ContextPayload
logger = logging.getLogger(__name__)
# ---------------------------------------------------------------------------
# Cycle log entry
# ---------------------------------------------------------------------------
@dataclass
class CycleRecord:
"""One observe → reason → act → reflect cycle."""
cycle_id: int
timestamp: str
observation: dict = field(default_factory=dict)
reasoning_summary: str = ""
action_taken: str = ""
action_status: str = ""
reflect_notes: str = ""
duration_ms: int = 0
# ---------------------------------------------------------------------------
# Heartbeat
# ---------------------------------------------------------------------------
class Heartbeat:
"""Manages the recurring cognitive loop with optional world adapter.
Parameters
----------
world:
A ``WorldInterface`` instance (or ``None`` for passive mode).
interval:
Seconds between heartbeat ticks. 30 s for embodied mode,
300 s (5 min) for passive thinking.
on_cycle:
Optional async callback invoked after each cycle with the
``CycleRecord``.
"""
def __init__(
self,
*,
world=None, # WorldInterface | None
interval: float = 30.0,
on_cycle=None, # Callable[[CycleRecord], Awaitable[None]] | None
) -> None:
self._world = world
self._interval = interval
self._on_cycle = on_cycle
self._cycle_count: int = 0
self._running = False
self._task: asyncio.Task | None = None
self.history: list[CycleRecord] = []
# -- properties --------------------------------------------------------
@property
def world(self):
return self._world
@world.setter
def world(self, adapter) -> None:
self._world = adapter
@property
def interval(self) -> float:
return self._interval
@interval.setter
def interval(self, value: float) -> None:
self._interval = max(1.0, value)
@property
def is_running(self) -> bool:
return self._running
@property
def cycle_count(self) -> int:
return self._cycle_count
# -- single cycle ------------------------------------------------------
async def run_once(self) -> CycleRecord:
"""Execute one full heartbeat cycle.
If a world adapter is present:
1. Observe — ``world.observe()``
2. Gather + Reason + Act via the three-phase loop, with the
observation injected into the payload
3. Dispatch the decided action back to ``world.act()``
4. Reflect — log the cycle
Without an adapter the existing loop runs on a timer-sourced
payload (passive thinking).
"""
self._cycle_count += 1
start = time.monotonic()
record = CycleRecord(
cycle_id=self._cycle_count,
timestamp=datetime.now(UTC).isoformat(),
)
if self._world is not None:
record = await self._embodied_cycle(record)
else:
record = await self._passive_cycle(record)
record.duration_ms = int((time.monotonic() - start) * 1000)
self.history.append(record)
# Broadcast via WebSocket (best-effort)
await self._broadcast(record)
if self._on_cycle:
await self._on_cycle(record)
logger.info(
"Heartbeat cycle #%d complete (%d ms) — action=%s status=%s",
record.cycle_id,
record.duration_ms,
record.action_taken or "(passive)",
record.action_status or "n/a",
)
return record
# -- background loop ---------------------------------------------------
async def start(self) -> None:
"""Start the recurring heartbeat loop as a background task."""
if self._running:
logger.warning("Heartbeat already running")
return
self._running = True
self._task = asyncio.current_task() or asyncio.ensure_future(self._loop())
if self._task is not asyncio.current_task():
return
await self._loop()
async def _loop(self) -> None:
logger.info(
"Heartbeat loop started (interval=%.1fs, adapter=%s)",
self._interval,
type(self._world).__name__ if self._world else "None",
)
while self._running:
try:
await self.run_once()
except Exception:
logger.exception("Heartbeat cycle failed")
await asyncio.sleep(self._interval)
def stop(self) -> None:
"""Signal the heartbeat loop to stop after the current cycle."""
self._running = False
logger.info("Heartbeat stop requested")
# -- internal: embodied cycle ------------------------------------------
async def _embodied_cycle(self, record: CycleRecord) -> CycleRecord:
"""Cycle with a live world adapter: observe → reason → act → reflect."""
from infrastructure.world.types import ActionStatus, CommandInput
# 1. Observe
perception = self._world.observe()
record.observation = {
"location": perception.location,
"entities": perception.entities,
"events": perception.events,
}
# 2. Feed observation into the three-phase loop
obs_content = (
f"Location: {perception.location}\n"
f"Entities: {', '.join(perception.entities)}\n"
f"Events: {', '.join(perception.events)}"
)
payload = ContextPayload(
source="world",
content=obs_content,
metadata={"perception": record.observation},
)
gathered = gather(payload)
reasoned = reason(gathered)
acted = act(reasoned)
# Extract action decision from the acted payload
action_name = acted.metadata.get("action", "idle")
action_target = acted.metadata.get("action_target")
action_params = acted.metadata.get("action_params", {})
record.reasoning_summary = acted.metadata.get("reasoning", acted.content[:200])
# 3. Dispatch action to world
if action_name != "idle":
cmd = CommandInput(
action=action_name,
target=action_target,
parameters=action_params,
)
result = self._world.act(cmd)
record.action_taken = action_name
record.action_status = result.status.value
else:
record.action_taken = "idle"
record.action_status = ActionStatus.NOOP.value
# 4. Reflect
record.reflect_notes = (
f"Observed {len(perception.entities)} entities at {perception.location}. "
f"Action: {record.action_taken}{record.action_status}."
)
return record
# -- internal: passive cycle -------------------------------------------
async def _passive_cycle(self, record: CycleRecord) -> CycleRecord:
"""Cycle without a world adapter — existing think_once() behaviour."""
payload = ContextPayload(
source="timer",
content="heartbeat",
metadata={"mode": "passive"},
)
gathered = gather(payload)
reasoned = reason(gathered)
acted = act(reasoned)
record.reasoning_summary = acted.content[:200]
record.action_taken = "think"
record.action_status = "noop"
record.reflect_notes = "Passive thinking cycle — no world adapter connected."
return record
# -- broadcast ---------------------------------------------------------
async def _broadcast(self, record: CycleRecord) -> None:
"""Emit heartbeat cycle data via WebSocket (best-effort)."""
try:
from infrastructure.ws_manager.handler import ws_manager
await ws_manager.broadcast(
"heartbeat.cycle",
{
"cycle_id": record.cycle_id,
"timestamp": record.timestamp,
"action": record.action_taken,
"action_status": record.action_status,
"reasoning_summary": record.reasoning_summary[:300],
"observation": record.observation,
"duration_ms": record.duration_ms,
},
)
except (ImportError, AttributeError, ConnectionError, RuntimeError) as exc:
logger.debug("Heartbeat broadcast skipped: %s", exc)

View File

@@ -17,9 +17,9 @@ logger = logging.getLogger(__name__)
def gather(payload: ContextPayload) -> ContextPayload:
"""Accept raw input and return structured context for reasoning.
Stub: tags the payload with phase=gather and logs transit.
Timmy will flesh this out with context selection, memory lookup,
adapter polling, and attention-residual weighting.
When the payload carries a ``perception`` dict in metadata (injected by
the heartbeat loop from a WorldInterface adapter), that observation is
folded into the gathered context. Otherwise behaves as before.
"""
logger.info(
"Phase 1 (Gather) received: source=%s content_len=%d tokens=%d",
@@ -28,7 +28,20 @@ def gather(payload: ContextPayload) -> ContextPayload:
payload.token_count,
)
result = payload.with_metadata(phase="gather", gathered=True)
extra: dict = {"phase": "gather", "gathered": True}
# Enrich with world observation when present
perception = payload.metadata.get("perception")
if perception:
extra["world_observation"] = perception
logger.info(
"Phase 1 (Gather) world observation: location=%s entities=%d events=%d",
perception.get("location", "?"),
len(perception.get("entities", [])),
len(perception.get("events", [])),
)
result = payload.with_metadata(**extra)
logger.info(
"Phase 1 (Gather) produced: metadata_keys=%s",

View File

@@ -22,8 +22,6 @@ from dataclasses import dataclass
from datetime import UTC, datetime
from pathlib import Path
from config import settings
logger = logging.getLogger(__name__)
DB_PATH = Path("data/spark.db")
@@ -49,7 +47,7 @@ def _get_conn() -> Generator[sqlite3.Connection, None, None]:
with closing(sqlite3.connect(str(DB_PATH))) as conn:
conn.row_factory = sqlite3.Row
conn.execute("PRAGMA journal_mode=WAL")
conn.execute(f"PRAGMA busy_timeout={settings.db_busy_timeout_ms}")
conn.execute("PRAGMA busy_timeout=5000")
conn.execute("""
CREATE TABLE IF NOT EXISTS spark_predictions (
id TEXT PRIMARY KEY,

View File

@@ -19,8 +19,6 @@ from dataclasses import dataclass
from datetime import UTC, datetime
from pathlib import Path
from config import settings
logger = logging.getLogger(__name__)
DB_PATH = Path("data/spark.db")
@@ -65,7 +63,7 @@ def _get_conn() -> Generator[sqlite3.Connection, None, None]:
with closing(sqlite3.connect(str(DB_PATH))) as conn:
conn.row_factory = sqlite3.Row
conn.execute("PRAGMA journal_mode=WAL")
conn.execute(f"PRAGMA busy_timeout={settings.db_busy_timeout_ms}")
conn.execute("PRAGMA busy_timeout=5000")
conn.execute("""
CREATE TABLE IF NOT EXISTS spark_events (
id TEXT PRIMARY KEY,

View File

@@ -473,6 +473,69 @@ def consult_grok(query: str) -> str:
return response
def web_fetch(url: str, max_tokens: int = 4000) -> str:
"""Fetch a web page and return its main text content.
Downloads the URL, extracts readable text using trafilatura, and
truncates to a token budget. Use this to read full articles, docs,
or blog posts that web_search only returns snippets for.
Args:
url: The URL to fetch (must start with http:// or https://).
max_tokens: Maximum approximate token budget (default 4000).
Text is truncated to max_tokens * 4 characters.
Returns:
Extracted text content, or an error message on failure.
"""
if not url or not url.startswith(("http://", "https://")):
return f"Error: invalid URL — must start with http:// or https://: {url!r}"
try:
import requests as _requests
except ImportError:
return "Error: 'requests' package is not installed. Install with: pip install requests"
try:
import trafilatura
except ImportError:
return (
"Error: 'trafilatura' package is not installed. Install with: pip install trafilatura"
)
try:
resp = _requests.get(
url,
timeout=15,
headers={"User-Agent": "TimmyResearchBot/1.0"},
)
resp.raise_for_status()
except _requests.exceptions.Timeout:
return f"Error: request timed out after 15 seconds for {url}"
except _requests.exceptions.HTTPError as exc:
return f"Error: HTTP {exc.response.status_code} for {url}"
except _requests.exceptions.RequestException as exc:
return f"Error: failed to fetch {url}{exc}"
text = trafilatura.extract(resp.text, include_tables=True, include_links=True)
if not text:
return f"Error: could not extract readable content from {url}"
char_budget = max_tokens * 4
if len(text) > char_budget:
text = text[:char_budget] + f"\n\n[…truncated to ~{max_tokens} tokens]"
return text
def _register_web_fetch_tool(toolkit: Toolkit) -> None:
"""Register the web_fetch tool for full-page content extraction."""
try:
toolkit.register(web_fetch, name="web_fetch")
except Exception as exc:
logger.warning("Tool execution failed (web_fetch registration): %s", exc)
def _register_core_tools(toolkit: Toolkit, base_path: Path) -> None:
"""Register core execution and file tools."""
# Python execution
@@ -672,6 +735,7 @@ def create_full_toolkit(base_dir: str | Path | None = None):
base_path = Path(base_dir) if base_dir else Path(settings.repo_root)
_register_core_tools(toolkit, base_path)
_register_web_fetch_tool(toolkit)
_register_grok_tool(toolkit)
_register_memory_tools(toolkit)
_register_agentic_loop_tool(toolkit)
@@ -829,6 +893,11 @@ def _analysis_tool_catalog() -> dict:
"description": "Evaluate mathematical expressions with exact results",
"available_in": ["orchestrator"],
},
"web_fetch": {
"name": "Web Fetch",
"description": "Fetch a web page and extract clean readable text (trafilatura)",
"available_in": ["orchestrator"],
},
}

View File

@@ -68,13 +68,11 @@ class VoiceTTS:
logger.error("VoiceTTS: speech failed — %s", exc)
def set_rate(self, rate: int) -> None:
"""Set the speech rate in words per minute."""
self._rate = rate
if self._engine:
self._engine.setProperty("rate", rate)
def set_volume(self, volume: float) -> None:
"""Set the speech volume (0.0 to 1.0)."""
self._volume = max(0.0, min(1.0, volume))
if self._engine:
self._engine.setProperty("volume", self._volume)
@@ -94,7 +92,6 @@ class VoiceTTS:
return []
def set_voice(self, voice_id: str) -> None:
"""Set the active voice by its system ID."""
if self._engine:
self._engine.setProperty("voice", voice_id)

View File

@@ -20,9 +20,6 @@ pytestmark = pytest.mark.skipif(
@pytest.fixture(scope="module", autouse=True)
def setup_prod_env():
"""Ensure a clean environment and run the full installation."""
if not SETUP_SCRIPT_PATH.exists():
pytest.skip(f"Setup script not found at {SETUP_SCRIPT_PATH}")
if PROD_PROJECT_DIR.exists():
shutil.rmtree(PROD_PROJECT_DIR)

View File

@@ -242,6 +242,145 @@ class TestCloseAll:
conn.execute("SELECT 1")
class TestConnectionLeaks:
"""Test that connections do not leak."""
def test_get_connection_after_close_returns_fresh_connection(self, tmp_path):
"""After close, get_connection() returns a new working connection."""
pool = ConnectionPool(tmp_path / "test.db")
conn1 = pool.get_connection()
pool.close_connection()
conn2 = pool.get_connection()
assert conn2 is not conn1
# New connection must be usable
cursor = conn2.execute("SELECT 1")
assert cursor.fetchone()[0] == 1
pool.close_connection()
def test_context_manager_does_not_leak_connection(self, tmp_path):
"""After context manager exit, thread-local conn is cleared."""
pool = ConnectionPool(tmp_path / "test.db")
with pool.connection():
pass
# Thread-local should be cleaned up
assert pool._local.conn is None
def test_context_manager_exception_does_not_leak_connection(self, tmp_path):
"""Connection is cleaned up even when an exception occurs."""
pool = ConnectionPool(tmp_path / "test.db")
try:
with pool.connection():
raise RuntimeError("boom")
except RuntimeError:
pass
assert pool._local.conn is None
def test_threads_do_not_leak_into_each_other(self, tmp_path):
"""A connection opened in one thread is invisible to another."""
pool = ConnectionPool(tmp_path / "test.db")
# Open a connection on main thread
pool.get_connection()
visible_from_other_thread = []
def check():
has_conn = hasattr(pool._local, "conn") and pool._local.conn is not None
visible_from_other_thread.append(has_conn)
t = threading.Thread(target=check)
t.start()
t.join()
assert visible_from_other_thread == [False]
pool.close_connection()
def test_repeated_open_close_cycles(self, tmp_path):
"""Repeated open/close cycles do not accumulate leaked connections."""
pool = ConnectionPool(tmp_path / "test.db")
for _ in range(50):
with pool.connection() as conn:
conn.execute("SELECT 1")
# After each cycle, connection should be cleaned up
assert pool._local.conn is None
class TestPragmaApplication:
"""Test that SQLite pragmas can be applied and persist on pooled connections.
The codebase uses WAL journal mode and busy_timeout pragmas on connections
obtained from the pool. These tests verify that pattern works correctly.
"""
def test_wal_journal_mode_persists(self, tmp_path):
"""WAL journal mode set on a pooled connection persists for its lifetime."""
pool = ConnectionPool(tmp_path / "test.db")
conn = pool.get_connection()
conn.execute("PRAGMA journal_mode=WAL")
mode = conn.execute("PRAGMA journal_mode").fetchone()[0]
assert mode == "wal"
# Same connection should retain the pragma
same_conn = pool.get_connection()
mode2 = same_conn.execute("PRAGMA journal_mode").fetchone()[0]
assert mode2 == "wal"
pool.close_connection()
def test_busy_timeout_persists(self, tmp_path):
"""busy_timeout pragma set on a pooled connection persists."""
pool = ConnectionPool(tmp_path / "test.db")
conn = pool.get_connection()
conn.execute("PRAGMA busy_timeout=5000")
timeout = conn.execute("PRAGMA busy_timeout").fetchone()[0]
assert timeout == 5000
pool.close_connection()
def test_pragmas_apply_per_connection(self, tmp_path):
"""Pragmas set on one thread's connection are independent of another's."""
pool = ConnectionPool(tmp_path / "test.db")
conn_main = pool.get_connection()
conn_main.execute("PRAGMA cache_size=9999")
other_cache = []
def check_pragma():
conn = pool.get_connection()
# Don't set cache_size — should get the default, not 9999
val = conn.execute("PRAGMA cache_size").fetchone()[0]
other_cache.append(val)
pool.close_connection()
t = threading.Thread(target=check_pragma)
t.start()
t.join()
# Other thread's connection should NOT have our custom cache_size
assert other_cache[0] != 9999
pool.close_connection()
def test_session_pragma_resets_on_new_connection(self, tmp_path):
"""Session-level pragmas (cache_size) reset on a new connection."""
pool = ConnectionPool(tmp_path / "test.db")
conn1 = pool.get_connection()
conn1.execute("PRAGMA cache_size=9999")
assert conn1.execute("PRAGMA cache_size").fetchone()[0] == 9999
pool.close_connection()
conn2 = pool.get_connection()
cache = conn2.execute("PRAGMA cache_size").fetchone()[0]
# New connection gets default cache_size, not the previous value
assert cache != 9999
pool.close_connection()
def test_wal_mode_via_context_manager(self, tmp_path):
"""WAL mode can be set within a context manager block."""
pool = ConnectionPool(tmp_path / "test.db")
with pool.connection() as conn:
conn.execute("PRAGMA journal_mode=WAL")
mode = conn.execute("PRAGMA journal_mode").fetchone()[0]
assert mode == "wal"
class TestIntegration:
"""Integration tests for real-world usage patterns."""

View File

View File

@@ -0,0 +1,129 @@
"""Tests for the WorldInterface contract and type system."""
import pytest
from infrastructure.world.interface import WorldInterface
from infrastructure.world.types import (
ActionResult,
ActionStatus,
CommandInput,
PerceptionOutput,
)
# ---------------------------------------------------------------------------
# Type construction
# ---------------------------------------------------------------------------
class TestPerceptionOutput:
def test_defaults(self):
p = PerceptionOutput()
assert p.location == ""
assert p.entities == []
assert p.events == []
assert p.raw == {}
assert p.timestamp is not None
def test_custom_values(self):
p = PerceptionOutput(
location="Balmora",
entities=["Guard", "Merchant"],
events=["door_opened"],
)
assert p.location == "Balmora"
assert len(p.entities) == 2
assert "door_opened" in p.events
class TestCommandInput:
def test_minimal(self):
c = CommandInput(action="move")
assert c.action == "move"
assert c.target is None
assert c.parameters == {}
def test_with_target_and_params(self):
c = CommandInput(action="attack", target="Rat", parameters={"weapon": "sword"})
assert c.target == "Rat"
assert c.parameters["weapon"] == "sword"
class TestActionResult:
def test_defaults(self):
r = ActionResult()
assert r.status == ActionStatus.SUCCESS
assert r.message == ""
def test_failure(self):
r = ActionResult(status=ActionStatus.FAILURE, message="blocked")
assert r.status == ActionStatus.FAILURE
class TestActionStatus:
def test_values(self):
assert ActionStatus.SUCCESS.value == "success"
assert ActionStatus.FAILURE.value == "failure"
assert ActionStatus.PENDING.value == "pending"
assert ActionStatus.NOOP.value == "noop"
# ---------------------------------------------------------------------------
# Abstract contract
# ---------------------------------------------------------------------------
class TestWorldInterfaceContract:
"""Verify the ABC cannot be instantiated directly."""
def test_cannot_instantiate(self):
with pytest.raises(TypeError):
WorldInterface()
def test_subclass_must_implement_observe(self):
class Incomplete(WorldInterface):
def act(self, command):
pass
def speak(self, message, target=None):
pass
with pytest.raises(TypeError):
Incomplete()
def test_subclass_must_implement_act(self):
class Incomplete(WorldInterface):
def observe(self):
return PerceptionOutput()
def speak(self, message, target=None):
pass
with pytest.raises(TypeError):
Incomplete()
def test_subclass_must_implement_speak(self):
class Incomplete(WorldInterface):
def observe(self):
return PerceptionOutput()
def act(self, command):
return ActionResult()
with pytest.raises(TypeError):
Incomplete()
def test_complete_subclass_instantiates(self):
class Complete(WorldInterface):
def observe(self):
return PerceptionOutput()
def act(self, command):
return ActionResult()
def speak(self, message, target=None):
pass
adapter = Complete()
assert adapter.is_connected is True # default
assert isinstance(adapter.observe(), PerceptionOutput)
assert isinstance(adapter.act(CommandInput(action="test")), ActionResult)

View File

@@ -0,0 +1,80 @@
"""Tests for the MockWorldAdapter — full observe/act/speak cycle."""
from infrastructure.world.adapters.mock import MockWorldAdapter
from infrastructure.world.types import ActionStatus, CommandInput, PerceptionOutput
class TestMockWorldAdapter:
def test_observe_returns_perception(self):
adapter = MockWorldAdapter(location="Vivec")
perception = adapter.observe()
assert isinstance(perception, PerceptionOutput)
assert perception.location == "Vivec"
assert perception.raw == {"adapter": "mock"}
def test_observe_entities(self):
adapter = MockWorldAdapter(entities=["Jiub", "Silt Strider"])
perception = adapter.observe()
assert perception.entities == ["Jiub", "Silt Strider"]
def test_act_logs_command(self):
adapter = MockWorldAdapter()
cmd = CommandInput(action="move", target="north")
result = adapter.act(cmd)
assert result.status == ActionStatus.SUCCESS
assert "move" in result.message
assert len(adapter.action_log) == 1
assert adapter.action_log[0].command.action == "move"
def test_act_multiple_commands(self):
adapter = MockWorldAdapter()
adapter.act(CommandInput(action="attack"))
adapter.act(CommandInput(action="defend"))
adapter.act(CommandInput(action="retreat"))
assert len(adapter.action_log) == 3
def test_speak_logs_message(self):
adapter = MockWorldAdapter()
adapter.speak("Hello, traveler!")
assert len(adapter.speech_log) == 1
assert adapter.speech_log[0]["message"] == "Hello, traveler!"
assert adapter.speech_log[0]["target"] is None
def test_speak_with_target(self):
adapter = MockWorldAdapter()
adapter.speak("Die, scum!", target="Cliff Racer")
assert adapter.speech_log[0]["target"] == "Cliff Racer"
def test_lifecycle(self):
adapter = MockWorldAdapter()
assert adapter.is_connected is False
adapter.connect()
assert adapter.is_connected is True
adapter.disconnect()
assert adapter.is_connected is False
def test_full_observe_act_speak_cycle(self):
"""Acceptance criterion: full observe/act/speak cycle passes."""
adapter = MockWorldAdapter(
location="Seyda Neen",
entities=["Fargoth", "Hrisskar"],
events=["quest_started"],
)
adapter.connect()
# Observe
perception = adapter.observe()
assert perception.location == "Seyda Neen"
assert len(perception.entities) == 2
assert "quest_started" in perception.events
# Act
result = adapter.act(CommandInput(action="talk", target="Fargoth"))
assert result.status == ActionStatus.SUCCESS
# Speak
adapter.speak("Where is your ring, Fargoth?", target="Fargoth")
assert len(adapter.speech_log) == 1
adapter.disconnect()
assert adapter.is_connected is False

View File

@@ -0,0 +1,68 @@
"""Tests for the adapter registry."""
import pytest
from infrastructure.world.adapters.mock import MockWorldAdapter
from infrastructure.world.registry import AdapterRegistry
class TestAdapterRegistry:
def test_register_and_get(self):
reg = AdapterRegistry()
reg.register("mock", MockWorldAdapter)
adapter = reg.get("mock")
assert isinstance(adapter, MockWorldAdapter)
def test_register_with_kwargs(self):
reg = AdapterRegistry()
reg.register("mock", MockWorldAdapter)
adapter = reg.get("mock", location="Custom Room")
assert adapter._location == "Custom Room"
def test_get_unknown_raises(self):
reg = AdapterRegistry()
with pytest.raises(KeyError):
reg.get("nonexistent")
def test_register_non_subclass_raises(self):
reg = AdapterRegistry()
with pytest.raises(TypeError):
reg.register("bad", dict)
def test_list_adapters(self):
reg = AdapterRegistry()
reg.register("beta", MockWorldAdapter)
reg.register("alpha", MockWorldAdapter)
assert reg.list_adapters() == ["alpha", "beta"]
def test_contains(self):
reg = AdapterRegistry()
reg.register("mock", MockWorldAdapter)
assert "mock" in reg
assert "other" not in reg
def test_len(self):
reg = AdapterRegistry()
assert len(reg) == 0
reg.register("mock", MockWorldAdapter)
assert len(reg) == 1
def test_overwrite_warns(self, caplog):
import logging
reg = AdapterRegistry()
reg.register("mock", MockWorldAdapter)
with caplog.at_level(logging.WARNING):
reg.register("mock", MockWorldAdapter)
assert "Overwriting" in caplog.text
class TestModuleLevelRegistry:
"""Test the convenience functions in infrastructure.world.__init__."""
def test_register_and_get(self):
from infrastructure.world import get_adapter, register_adapter
register_adapter("test_mock", MockWorldAdapter)
adapter = get_adapter("test_mock")
assert isinstance(adapter, MockWorldAdapter)

View File

@@ -0,0 +1,44 @@
"""Tests for the TES3MP stub adapter."""
import pytest
from infrastructure.world.adapters.tes3mp import TES3MPWorldAdapter
from infrastructure.world.types import CommandInput
class TestTES3MPStub:
"""Acceptance criterion: stub imports cleanly and raises NotImplementedError."""
def test_instantiates(self):
adapter = TES3MPWorldAdapter(host="127.0.0.1", port=25565)
assert adapter._host == "127.0.0.1"
assert adapter._port == 25565
def test_is_connected_default_false(self):
adapter = TES3MPWorldAdapter()
assert adapter.is_connected is False
def test_connect_raises(self):
adapter = TES3MPWorldAdapter()
with pytest.raises(NotImplementedError, match="connect"):
adapter.connect()
def test_disconnect_raises(self):
adapter = TES3MPWorldAdapter()
with pytest.raises(NotImplementedError, match="disconnect"):
adapter.disconnect()
def test_observe_raises(self):
adapter = TES3MPWorldAdapter()
with pytest.raises(NotImplementedError, match="observe"):
adapter.observe()
def test_act_raises(self):
adapter = TES3MPWorldAdapter()
with pytest.raises(NotImplementedError, match="act"):
adapter.act(CommandInput(action="move"))
def test_speak_raises(self):
adapter = TES3MPWorldAdapter()
with pytest.raises(NotImplementedError, match="speak"):
adapter.speak("Hello")

View File

@@ -0,0 +1,176 @@
"""Tests for Heartbeat v2 — WorldInterface-driven cognitive loop.
Acceptance criteria:
- With MockWorldAdapter: heartbeat runs, logs show observe→reason→act→reflect
- Without adapter: existing think_once() behaviour unchanged
- WebSocket broadcasts include current action and reasoning summary
"""
from unittest.mock import AsyncMock, patch
import pytest
from infrastructure.world.adapters.mock import MockWorldAdapter
from infrastructure.world.types import ActionStatus
from loop.heartbeat import CycleRecord, Heartbeat
@pytest.fixture
def mock_adapter():
adapter = MockWorldAdapter(
location="Balmora",
entities=["Guard", "Merchant"],
events=["player_entered"],
)
adapter.connect()
return adapter
class TestHeartbeatWithAdapter:
"""With MockWorldAdapter: heartbeat runs full embodied cycle."""
@pytest.mark.asyncio
async def test_run_once_returns_cycle_record(self, mock_adapter):
hb = Heartbeat(world=mock_adapter)
record = await hb.run_once()
assert isinstance(record, CycleRecord)
assert record.cycle_id == 1
@pytest.mark.asyncio
async def test_observation_populated(self, mock_adapter):
hb = Heartbeat(world=mock_adapter)
record = await hb.run_once()
assert record.observation["location"] == "Balmora"
assert "Guard" in record.observation["entities"]
assert "player_entered" in record.observation["events"]
@pytest.mark.asyncio
async def test_action_dispatched_to_world(self, mock_adapter):
"""Act phase should dispatch to world.act() for non-idle actions."""
hb = Heartbeat(world=mock_adapter)
record = await hb.run_once()
# The default loop phases don't set an explicit action, so it
# falls through to "idle" → NOOP. That's correct behaviour —
# the real LLM-powered reason phase will set action metadata.
assert record.action_status in (
ActionStatus.NOOP.value,
ActionStatus.SUCCESS.value,
)
@pytest.mark.asyncio
async def test_reflect_notes_present(self, mock_adapter):
hb = Heartbeat(world=mock_adapter)
record = await hb.run_once()
assert "Balmora" in record.reflect_notes
@pytest.mark.asyncio
async def test_cycle_count_increments(self, mock_adapter):
hb = Heartbeat(world=mock_adapter)
await hb.run_once()
await hb.run_once()
assert hb.cycle_count == 2
assert len(hb.history) == 2
@pytest.mark.asyncio
async def test_duration_recorded(self, mock_adapter):
hb = Heartbeat(world=mock_adapter)
record = await hb.run_once()
assert record.duration_ms >= 0
@pytest.mark.asyncio
async def test_on_cycle_callback(self, mock_adapter):
received = []
async def callback(record):
received.append(record)
hb = Heartbeat(world=mock_adapter, on_cycle=callback)
await hb.run_once()
assert len(received) == 1
assert received[0].cycle_id == 1
class TestHeartbeatWithoutAdapter:
"""Without adapter: existing think_once() behaviour unchanged."""
@pytest.mark.asyncio
async def test_passive_cycle(self):
hb = Heartbeat(world=None)
record = await hb.run_once()
assert record.action_taken == "think"
assert record.action_status == "noop"
assert "Passive" in record.reflect_notes
@pytest.mark.asyncio
async def test_passive_no_observation(self):
hb = Heartbeat(world=None)
record = await hb.run_once()
assert record.observation == {}
class TestHeartbeatLifecycle:
def test_interval_property(self):
hb = Heartbeat(interval=60.0)
assert hb.interval == 60.0
hb.interval = 10.0
assert hb.interval == 10.0
def test_interval_minimum(self):
hb = Heartbeat()
hb.interval = 0.1
assert hb.interval == 1.0
def test_world_property(self):
hb = Heartbeat()
assert hb.world is None
adapter = MockWorldAdapter()
hb.world = adapter
assert hb.world is adapter
def test_stop_sets_flag(self):
hb = Heartbeat()
assert not hb.is_running
hb.stop()
assert not hb.is_running
class TestHeartbeatBroadcast:
"""WebSocket broadcasts include action and reasoning summary."""
@pytest.mark.asyncio
async def test_broadcast_called(self, mock_adapter):
with patch(
"loop.heartbeat.ws_manager",
create=True,
) as mock_ws:
mock_ws.broadcast = AsyncMock()
# Patch the import inside heartbeat
with patch("infrastructure.ws_manager.handler.ws_manager") as ws_mod:
ws_mod.broadcast = AsyncMock()
hb = Heartbeat(world=mock_adapter)
await hb.run_once()
ws_mod.broadcast.assert_called_once()
call_args = ws_mod.broadcast.call_args
assert call_args[0][0] == "heartbeat.cycle"
data = call_args[0][1]
assert "action" in data
assert "reasoning_summary" in data
assert "observation" in data
class TestHeartbeatLog:
"""Verify logging of observe→reason→act→reflect cycle."""
@pytest.mark.asyncio
async def test_embodied_cycle_logs(self, mock_adapter, caplog):
import logging
with caplog.at_level(logging.INFO):
hb = Heartbeat(world=mock_adapter)
await hb.run_once()
messages = caplog.text
assert "Phase 1 (Gather)" in messages
assert "Phase 2 (Reason)" in messages
assert "Phase 3 (Act)" in messages
assert "Heartbeat cycle #1 complete" in messages

View File

@@ -0,0 +1,97 @@
"""Tests for load_queue corrupt JSON handling in loop_guard.py."""
from __future__ import annotations
import json
from pathlib import Path
import pytest
import scripts.loop_guard as lg
@pytest.fixture(autouse=True)
def _isolate(tmp_path, monkeypatch):
"""Redirect loop_guard paths to tmp_path for isolation."""
monkeypatch.setattr(lg, "QUEUE_FILE", tmp_path / "queue.json")
monkeypatch.setattr(lg, "IDLE_STATE_FILE", tmp_path / "idle_state.json")
monkeypatch.setattr(lg, "CYCLE_RESULT_FILE", tmp_path / "cycle_result.json")
monkeypatch.setattr(lg, "GITEA_API", "http://test:3000/api/v1")
monkeypatch.setattr(lg, "REPO_SLUG", "owner/repo")
def test_load_queue_missing_file(tmp_path):
"""Missing queue file returns empty list."""
result = lg.load_queue()
assert result == []
def test_load_queue_valid_data(tmp_path):
"""Valid queue.json returns ready items."""
data = [
{"issue": 1, "title": "Ready issue", "ready": True},
{"issue": 2, "title": "Not ready", "ready": False},
]
lg.QUEUE_FILE.parent.mkdir(parents=True, exist_ok=True)
lg.QUEUE_FILE.write_text(json.dumps(data, indent=2))
result = lg.load_queue()
assert len(result) == 1
assert result[0]["issue"] == 1
def test_load_queue_corrupt_json_logs_warning(tmp_path, capsys):
"""Corrupt queue.json returns empty list and logs warning."""
lg.QUEUE_FILE.parent.mkdir(parents=True, exist_ok=True)
lg.QUEUE_FILE.write_text("not valid json {{{")
result = lg.load_queue()
assert result == []
captured = capsys.readouterr()
assert "WARNING" in captured.out
assert "Corrupt queue.json" in captured.out
def test_load_queue_not_a_list(tmp_path):
"""Queue.json that is not a list returns empty list."""
lg.QUEUE_FILE.parent.mkdir(parents=True, exist_ok=True)
lg.QUEUE_FILE.write_text(json.dumps({"not": "a list"}))
result = lg.load_queue()
assert result == []
def test_load_queue_no_ready_items(tmp_path):
"""Queue with no ready items returns empty list."""
data = [
{"issue": 1, "title": "Not ready 1", "ready": False},
{"issue": 2, "title": "Not ready 2", "ready": False},
]
lg.QUEUE_FILE.parent.mkdir(parents=True, exist_ok=True)
lg.QUEUE_FILE.write_text(json.dumps(data, indent=2))
result = lg.load_queue()
assert result == []
def test_load_queue_oserror_logs_warning(tmp_path, monkeypatch, capsys):
"""OSError when reading queue.json returns empty list and logs warning."""
lg.QUEUE_FILE.parent.mkdir(parents=True, exist_ok=True)
lg.QUEUE_FILE.write_text("[]")
# Mock Path.read_text to raise OSError
original_read_text = Path.read_text
def mock_read_text(self, *args, **kwargs):
if self.name == "queue.json":
raise OSError("Permission denied")
return original_read_text(self, *args, **kwargs)
monkeypatch.setattr(Path, "read_text", mock_read_text)
result = lg.load_queue()
assert result == []
captured = capsys.readouterr()
assert "WARNING" in captured.out
assert "Cannot read queue.json" in captured.out

View File

@@ -0,0 +1,159 @@
"""Tests for queue.json validation and backup in triage_score.py."""
from __future__ import annotations
import json
import pytest
import scripts.triage_score as ts
@pytest.fixture(autouse=True)
def _isolate(tmp_path, monkeypatch):
"""Redirect triage_score paths to tmp_path for isolation."""
monkeypatch.setattr(ts, "QUEUE_FILE", tmp_path / "queue.json")
monkeypatch.setattr(ts, "QUEUE_BACKUP_FILE", tmp_path / "queue.json.bak")
monkeypatch.setattr(ts, "RETRO_FILE", tmp_path / "retro" / "triage.jsonl")
monkeypatch.setattr(ts, "QUARANTINE_FILE", tmp_path / "quarantine.json")
monkeypatch.setattr(ts, "CYCLE_RETRO_FILE", tmp_path / "retro" / "cycles.jsonl")
def test_backup_created_on_write(tmp_path):
"""When writing queue.json, a backup should be created from previous valid file."""
# Create initial valid queue file
initial_data = [{"issue": 1, "title": "Test", "ready": True}]
ts.QUEUE_FILE.parent.mkdir(parents=True, exist_ok=True)
ts.QUEUE_FILE.write_text(json.dumps(initial_data))
# Write new data
new_data = [{"issue": 2, "title": "New", "ready": True}]
ts.QUEUE_FILE.write_text(json.dumps(new_data, indent=2) + "\n")
# Manually run the backup logic as run_triage would
if ts.QUEUE_FILE.exists():
try:
json.loads(ts.QUEUE_FILE.read_text())
ts.QUEUE_BACKUP_FILE.write_text(ts.QUEUE_FILE.read_text())
except (json.JSONDecodeError, OSError):
pass
# Both files should exist with same content
assert ts.QUEUE_BACKUP_FILE.exists()
assert json.loads(ts.QUEUE_BACKUP_FILE.read_text()) == new_data
def test_corrupt_queue_restored_from_backup(tmp_path, capsys):
"""If queue.json is corrupt, it should be restored from backup."""
# Create a valid backup
valid_data = [{"issue": 1, "title": "Backup", "ready": True}]
ts.QUEUE_BACKUP_FILE.parent.mkdir(parents=True, exist_ok=True)
ts.QUEUE_BACKUP_FILE.write_text(json.dumps(valid_data, indent=2) + "\n")
# Create a corrupt queue file
ts.QUEUE_FILE.parent.mkdir(parents=True, exist_ok=True)
ts.QUEUE_FILE.write_text("not valid json {{{")
# Run validation and restore logic
try:
json.loads(ts.QUEUE_FILE.read_text())
except (json.JSONDecodeError, OSError):
if ts.QUEUE_BACKUP_FILE.exists():
try:
backup_data = ts.QUEUE_BACKUP_FILE.read_text()
json.loads(backup_data) # Validate backup
ts.QUEUE_FILE.write_text(backup_data)
print("[triage] Restored queue.json from backup")
except (json.JSONDecodeError, OSError):
ts.QUEUE_FILE.write_text("[]\n")
else:
ts.QUEUE_FILE.write_text("[]\n")
# Queue should be restored from backup
assert json.loads(ts.QUEUE_FILE.read_text()) == valid_data
captured = capsys.readouterr()
assert "Restored queue.json from backup" in captured.out
def test_corrupt_queue_no_backup_writes_empty_list(tmp_path):
"""If queue.json is corrupt and no backup exists, write empty list."""
# Ensure no backup exists
assert not ts.QUEUE_BACKUP_FILE.exists()
# Create a corrupt queue file
ts.QUEUE_FILE.parent.mkdir(parents=True, exist_ok=True)
ts.QUEUE_FILE.write_text("not valid json {{{")
# Run validation and restore logic
try:
json.loads(ts.QUEUE_FILE.read_text())
except (json.JSONDecodeError, OSError):
if ts.QUEUE_BACKUP_FILE.exists():
try:
backup_data = ts.QUEUE_BACKUP_FILE.read_text()
json.loads(backup_data)
ts.QUEUE_FILE.write_text(backup_data)
except (json.JSONDecodeError, OSError):
ts.QUEUE_FILE.write_text("[]\n")
else:
ts.QUEUE_FILE.write_text("[]\n")
# Should have empty list
assert json.loads(ts.QUEUE_FILE.read_text()) == []
def test_corrupt_backup_writes_empty_list(tmp_path):
"""If both queue.json and backup are corrupt, write empty list."""
# Create a corrupt backup
ts.QUEUE_BACKUP_FILE.parent.mkdir(parents=True, exist_ok=True)
ts.QUEUE_BACKUP_FILE.write_text("also corrupt backup")
# Create a corrupt queue file
ts.QUEUE_FILE.parent.mkdir(parents=True, exist_ok=True)
ts.QUEUE_FILE.write_text("not valid json {{{")
# Run validation and restore logic
try:
json.loads(ts.QUEUE_FILE.read_text())
except (json.JSONDecodeError, OSError):
if ts.QUEUE_BACKUP_FILE.exists():
try:
backup_data = ts.QUEUE_BACKUP_FILE.read_text()
json.loads(backup_data)
ts.QUEUE_FILE.write_text(backup_data)
except (json.JSONDecodeError, OSError):
ts.QUEUE_FILE.write_text("[]\n")
else:
ts.QUEUE_FILE.write_text("[]\n")
# Should have empty list
assert json.loads(ts.QUEUE_FILE.read_text()) == []
def test_valid_queue_not_corrupt_no_backup_overwrite(tmp_path):
"""Don't overwrite backup if current queue.json is corrupt."""
# Create a valid backup
valid_backup = [{"issue": 99, "title": "Old Backup", "ready": True}]
ts.QUEUE_BACKUP_FILE.parent.mkdir(parents=True, exist_ok=True)
ts.QUEUE_BACKUP_FILE.write_text(json.dumps(valid_backup, indent=2) + "\n")
# Create a corrupt queue file
ts.QUEUE_FILE.parent.mkdir(parents=True, exist_ok=True)
ts.QUEUE_FILE.write_text("corrupt data")
# Try to save backup (should skip because current is corrupt)
if ts.QUEUE_FILE.exists():
try:
json.loads(ts.QUEUE_FILE.read_text()) # This will fail
ts.QUEUE_BACKUP_FILE.write_text(ts.QUEUE_FILE.read_text())
except (json.JSONDecodeError, OSError):
pass # Should hit this branch
# Backup should still have original valid data
assert json.loads(ts.QUEUE_BACKUP_FILE.read_text()) == valid_backup
def test_backup_path_configuration():
"""Ensure backup file path is properly configured relative to queue file."""
assert ts.QUEUE_BACKUP_FILE.parent == ts.QUEUE_FILE.parent
assert ts.QUEUE_BACKUP_FILE.name == "queue.json.bak"
assert ts.QUEUE_FILE.name == "queue.json"

View File

@@ -0,0 +1,158 @@
"""Unit tests for the web_fetch tool in timmy.tools."""
from __future__ import annotations
from unittest.mock import MagicMock, patch
from timmy.tools import web_fetch
class TestWebFetch:
"""Tests for web_fetch function."""
def test_invalid_url_no_scheme(self):
"""URLs without http(s) scheme are rejected."""
result = web_fetch("example.com")
assert "Error: invalid URL" in result
def test_invalid_url_empty(self):
"""Empty URL is rejected."""
result = web_fetch("")
assert "Error: invalid URL" in result
def test_invalid_url_ftp(self):
"""Non-HTTP schemes are rejected."""
result = web_fetch("ftp://example.com")
assert "Error: invalid URL" in result
@patch("timmy.tools.trafilatura", create=True)
@patch("timmy.tools._requests", create=True)
def test_successful_fetch(self, mock_requests, mock_trafilatura):
"""Happy path: fetch + extract returns text."""
# We need to patch at import level inside the function
mock_resp = MagicMock()
mock_resp.text = "<html><body><p>Hello world</p></body></html>"
with patch.dict(
"sys.modules", {"requests": mock_requests, "trafilatura": mock_trafilatura}
):
mock_requests.get.return_value = mock_resp
mock_requests.exceptions = _make_exceptions()
mock_trafilatura.extract.return_value = "Hello world"
result = web_fetch("https://example.com")
assert result == "Hello world"
@patch.dict("sys.modules", {"requests": MagicMock(), "trafilatura": MagicMock()})
def test_truncation(self):
"""Long text is truncated to max_tokens * 4 chars."""
import sys
mock_trafilatura = sys.modules["trafilatura"]
mock_requests = sys.modules["requests"]
long_text = "a" * 20000
mock_resp = MagicMock()
mock_resp.text = "<html><body>" + long_text + "</body></html>"
mock_requests.get.return_value = mock_resp
mock_requests.exceptions = _make_exceptions()
mock_trafilatura.extract.return_value = long_text
result = web_fetch("https://example.com", max_tokens=100)
# 100 tokens * 4 chars = 400 chars max
assert len(result) < 500
assert "[…truncated" in result
@patch.dict("sys.modules", {"requests": MagicMock(), "trafilatura": MagicMock()})
def test_extraction_failure(self):
"""Returns error when trafilatura can't extract text."""
import sys
mock_trafilatura = sys.modules["trafilatura"]
mock_requests = sys.modules["requests"]
mock_resp = MagicMock()
mock_resp.text = "<html></html>"
mock_requests.get.return_value = mock_resp
mock_requests.exceptions = _make_exceptions()
mock_trafilatura.extract.return_value = None
result = web_fetch("https://example.com")
assert "Error: could not extract" in result
@patch.dict("sys.modules", {"trafilatura": MagicMock()})
def test_timeout(self):
"""Timeout errors are handled gracefully."""
mock_requests = MagicMock()
exc_mod = _make_exceptions()
mock_requests.exceptions = exc_mod
mock_requests.get.side_effect = exc_mod.Timeout("timed out")
with patch.dict("sys.modules", {"requests": mock_requests}):
result = web_fetch("https://example.com")
assert "timed out" in result
@patch.dict("sys.modules", {"trafilatura": MagicMock()})
def test_http_error(self):
"""HTTP errors (404, 500, etc.) are handled gracefully."""
mock_requests = MagicMock()
exc_mod = _make_exceptions()
mock_requests.exceptions = exc_mod
mock_response = MagicMock()
mock_response.status_code = 404
mock_requests.get.return_value.raise_for_status.side_effect = exc_mod.HTTPError(
response=mock_response
)
with patch.dict("sys.modules", {"requests": mock_requests}):
result = web_fetch("https://example.com/nope")
assert "404" in result
def test_missing_requests(self):
"""Graceful error when requests not installed."""
with patch.dict("sys.modules", {"requests": None}):
result = web_fetch("https://example.com")
assert "requests" in result and "not installed" in result
def test_missing_trafilatura(self):
"""Graceful error when trafilatura not installed."""
mock_requests = MagicMock()
with patch.dict("sys.modules", {"requests": mock_requests, "trafilatura": None}):
result = web_fetch("https://example.com")
assert "trafilatura" in result and "not installed" in result
def test_catalog_entry_exists(self):
"""web_fetch should appear in the tool catalog."""
from timmy.tools import get_all_available_tools
catalog = get_all_available_tools()
assert "web_fetch" in catalog
assert "orchestrator" in catalog["web_fetch"]["available_in"]
def _make_exceptions():
"""Create a mock exceptions module with real exception classes."""
class Timeout(Exception):
pass
class HTTPError(Exception):
def __init__(self, *args, response=None, **kwargs):
super().__init__(*args, **kwargs)
self.response = response
class RequestException(Exception):
pass
mod = MagicMock()
mod.Timeout = Timeout
mod.HTTPError = HTTPError
mod.RequestException = RequestException
return mod

View File

@@ -1,80 +0,0 @@
"""Tests for the thread-local SQLite ConnectionPool."""
import sqlite3
import threading
from pathlib import Path
import pytest
from infrastructure.db_pool import ConnectionPool
pytestmark = pytest.mark.unit
def test_pool_creates_connection(tmp_path: Path):
"""Test that the pool successfully creates a SQLite connection."""
db_file = tmp_path / "test.db"
pool = ConnectionPool(db_file)
conn = pool.get_connection()
assert isinstance(conn, sqlite3.Connection)
cursor = conn.execute("SELECT 1")
assert cursor.fetchone()[0] == 1
def test_pool_reuses_connection_same_thread(tmp_path: Path):
"""Test that multiple calls in the same thread return the same connection."""
db_file = tmp_path / "test.db"
pool = ConnectionPool(db_file)
conn1 = pool.get_connection()
conn2 = pool.get_connection()
assert conn1 is conn2
def test_pool_different_connections_different_threads(tmp_path: Path):
"""Test that different threads receive distinct connections."""
db_file = tmp_path / "test.db"
pool = ConnectionPool(db_file)
conn1 = pool.get_connection()
conn2_list = []
def _worker():
conn2_list.append(pool.get_connection())
thread = threading.Thread(target=_worker)
thread.start()
thread.join()
assert len(conn2_list) == 1
conn2 = conn2_list[0]
assert conn1 is not conn2
def test_pool_close_connection(tmp_path: Path):
"""Test that connection is closed and cleared from thread local."""
db_file = tmp_path / "test.db"
pool = ConnectionPool(db_file)
conn1 = pool.get_connection()
pool.close_connection()
# Getting a new connection should create a new object
conn2 = pool.get_connection()
assert conn1 is not conn2
def test_pool_context_manager(tmp_path: Path):
"""Test that the context manager yields a connection and closes it after."""
db_file = tmp_path / "test.db"
pool = ConnectionPool(db_file)
with pool.connection() as conn1:
assert isinstance(conn1, sqlite3.Connection)
# After exiting the context manager, the connection should be closed implicitly
# resulting in a new connection object for the next request.
conn2 = pool.get_connection()
assert conn1 is not conn2

View File

@@ -1,91 +0,0 @@
"""Tests for the health and sovereignty endpoints."""
from unittest.mock import patch
import pytest
from fastapi.testclient import TestClient
from dashboard.app import app
from dashboard.routes.health import DependencyStatus
pytestmark = pytest.mark.unit
client = TestClient(app)
@pytest.fixture
def mock_ollama_healthy():
with patch("dashboard.routes.health.check_ollama", return_value=True):
yield
@pytest.fixture
def mock_ollama_unavailable():
with patch("dashboard.routes.health.check_ollama", return_value=False):
yield
@pytest.fixture
def mock_check_ollama_sovereignty():
dep = DependencyStatus(
name="Ollama AI",
status="healthy",
sovereignty_score=10,
details={"url": "http://localhost:11434"},
)
with patch("dashboard.routes.health._check_ollama", return_value=dep):
yield
def test_health_check_healthy(mock_ollama_healthy):
"""Test legacy health check endpoint when Ollama is up."""
response = client.get("/health")
assert response.status_code == 200
data = response.json()
assert data["status"] == "ok"
assert data["services"]["ollama"] == "up"
assert data["agents"]["agent"]["status"] == "idle"
def test_health_check_degraded(mock_ollama_unavailable):
"""Test legacy health check endpoint when Ollama is down."""
response = client.get("/health")
assert response.status_code == 200
data = response.json()
assert data["status"] == "degraded"
assert data["services"]["ollama"] == "down"
assert data["agents"]["agent"]["status"] == "offline"
def test_health_status_panel_healthy(mock_ollama_healthy):
"""Test HTML status panel rendering."""
response = client.get("/health/status")
assert response.status_code == 200
assert "text/html" in response.headers["content-type"]
assert "UP" in response.text
assert "#10b981" in response.text
def test_sovereignty_check(mock_check_ollama_sovereignty):
"""Test comprehensive sovereignty audit report."""
with (
patch("dashboard.routes.health._check_lightning") as mock_lightning,
patch("dashboard.routes.health._check_sqlite") as mock_sqlite,
):
mock_lightning.return_value = DependencyStatus(
name="Lightning", status="unavailable", sovereignty_score=8, details={}
)
mock_sqlite.return_value = DependencyStatus(
name="SQLite", status="healthy", sovereignty_score=10, details={}
)
response = client.get("/health/sovereignty")
assert response.status_code == 200
data = response.json()
# (10 + 8 + 10) / 3 = 9.3
assert data["overall_score"] == 9.3
assert len(data["dependencies"]) == 3
# Ensure recommendations contain note about unavailable dependency
recommendations = " ".join(data["recommendations"])
assert "unavailable" in recommendations.lower()

View File

@@ -50,17 +50,18 @@ commands =
description = Fast tests — excludes e2e, functional, and external services
commands =
pytest tests/ -q --tb=short \
-m "unit" \
--ignore=tests/e2e \
--ignore=tests/functional \
-m "not ollama and not docker and not selenium and not external_api and not skip_ci and not slow" \
-n auto --dist worksteal
[testenv:integration]
description = Integration tests (marked with @pytest.mark.integration)
commands =
pytest tests/ -q --tb=short \
-m "integration" \
-m "integration and not ollama and not docker and not selenium and not external_api and not slow" \
-n auto --dist worksteal
[testenv:functional]
description = Functional tests — real HTTP, no mocking (excl slow + selenium)
commands =