2026-03-08 16:07:02 -04:00
|
|
|
import logging as _logging
|
|
|
|
|
import os
|
|
|
|
|
import sys
|
2026-03-15 13:33:24 -04:00
|
|
|
from datetime import UTC
|
|
|
|
|
from datetime import datetime as _datetime
|
2026-02-21 16:53:16 +00:00
|
|
|
from typing import Literal
|
|
|
|
|
|
2026-02-19 19:31:48 +00:00
|
|
|
from pydantic_settings import BaseSettings, SettingsConfigDict
|
|
|
|
|
|
2026-03-15 12:52:18 -04:00
|
|
|
APP_START_TIME: _datetime = _datetime.now(UTC)
|
|
|
|
|
|
2026-02-19 19:31:48 +00:00
|
|
|
|
2026-03-19 19:18:22 -04:00
|
|
|
def normalize_ollama_url(url: str) -> str:
|
|
|
|
|
"""Replace localhost with 127.0.0.1 to avoid IPv6 resolution delays."""
|
|
|
|
|
return url.replace("localhost", "127.0.0.1")
|
|
|
|
|
|
|
|
|
|
|
2026-02-19 19:31:48 +00:00
|
|
|
class Settings(BaseSettings):
|
2026-03-12 21:40:32 -04:00
|
|
|
"""Central configuration — all env-var access goes through this class."""
|
|
|
|
|
|
2026-03-05 19:45:38 -05:00
|
|
|
# Display name for the primary agent — override with AGENT_NAME env var
|
|
|
|
|
agent_name: str = "Agent"
|
|
|
|
|
|
2026-02-19 19:31:48 +00:00
|
|
|
# Ollama host — override with OLLAMA_URL env var or .env file
|
|
|
|
|
ollama_url: str = "http://localhost:11434"
|
|
|
|
|
|
2026-03-19 19:18:22 -04:00
|
|
|
@property
|
|
|
|
|
def normalized_ollama_url(self) -> str:
|
|
|
|
|
"""Return ollama_url with localhost replaced by 127.0.0.1."""
|
|
|
|
|
return normalize_ollama_url(self.ollama_url)
|
|
|
|
|
|
2026-02-19 19:31:48 +00:00
|
|
|
# LLM model passed to Agno/Ollama — override with OLLAMA_MODEL
|
2026-03-23 18:35:57 +00:00
|
|
|
# qwen3:14b (Q5_K_M) is the primary model: tool calling F1 0.971, ~17.5 GB
|
|
|
|
|
# at 32K context — optimal for M3 Max 36 GB (Issue #1063).
|
|
|
|
|
# qwen3:30b exceeded memory budget at 32K+ context on 36 GB hardware.
|
|
|
|
|
ollama_model: str = "qwen3:14b"
|
|
|
|
|
|
|
|
|
|
# Fast routing model — override with OLLAMA_FAST_MODEL
|
|
|
|
|
# qwen3:8b (Q6_K): tool calling F1 0.933 at ~45-55 tok/s (2x speed of 14B).
|
|
|
|
|
# Use for routine tasks: simple tool calls, file reads, status checks.
|
|
|
|
|
# Combined memory with qwen3:14b: ~17 GB — both can stay loaded simultaneously.
|
|
|
|
|
ollama_fast_model: str = "qwen3:8b"
|
|
|
|
|
|
|
|
|
|
# Maximum concurrently loaded Ollama models — override with OLLAMA_MAX_LOADED_MODELS
|
|
|
|
|
# Set to 2 to keep qwen3:8b (fast) + qwen3:14b (primary) both hot.
|
|
|
|
|
# Requires setting OLLAMA_MAX_LOADED_MODELS=2 in the Ollama server environment.
|
|
|
|
|
ollama_max_loaded_models: int = 2
|
2026-02-19 19:31:48 +00:00
|
|
|
|
2026-03-14 18:54:43 -04:00
|
|
|
# Context window size for Ollama inference — override with OLLAMA_NUM_CTX
|
2026-03-23 18:35:57 +00:00
|
|
|
# qwen3:14b at 32K: ~17.5 GB total (weights + KV cache) on M3 Max 36 GB.
|
|
|
|
|
# Set to 0 to use model defaults.
|
|
|
|
|
ollama_num_ctx: int = 32768
|
2026-03-14 18:54:43 -04:00
|
|
|
|
2026-03-14 17:26:47 -04:00
|
|
|
# Fallback model chains — override with FALLBACK_MODELS / VISION_FALLBACK_MODELS
|
2026-03-23 18:35:57 +00:00
|
|
|
# as comma-separated strings, e.g. FALLBACK_MODELS="qwen3:8b,qwen2.5:14b"
|
2026-03-14 17:26:47 -04:00
|
|
|
# Or edit config/providers.yaml → fallback_chains for the canonical source.
|
|
|
|
|
fallback_models: list[str] = [
|
2026-03-23 18:35:57 +00:00
|
|
|
"qwen3:8b",
|
2026-03-14 17:26:47 -04:00
|
|
|
"qwen2.5:14b",
|
|
|
|
|
"qwen2.5:7b",
|
2026-03-23 18:35:57 +00:00
|
|
|
"llama3.1:8b-instruct",
|
|
|
|
|
"llama3.1",
|
2026-03-14 17:26:47 -04:00
|
|
|
"llama3.2:3b",
|
|
|
|
|
]
|
|
|
|
|
vision_fallback_models: list[str] = [
|
|
|
|
|
"llama3.2:3b",
|
|
|
|
|
"llava:7b",
|
|
|
|
|
"qwen2.5-vl:3b",
|
|
|
|
|
"moondream:1.8b",
|
|
|
|
|
]
|
|
|
|
|
|
2026-02-19 19:31:48 +00:00
|
|
|
# Set DEBUG=true to enable /docs and /redoc (disabled by default)
|
|
|
|
|
debug: bool = False
|
|
|
|
|
|
2026-02-22 17:16:12 +00:00
|
|
|
# Telegram bot token — set via TELEGRAM_TOKEN env var or the /telegram/setup endpoint
|
|
|
|
|
telegram_token: str = ""
|
|
|
|
|
|
2026-02-25 01:11:14 +00:00
|
|
|
# Discord bot token — set via DISCORD_TOKEN env var or the /discord/setup endpoint
|
|
|
|
|
discord_token: str = ""
|
|
|
|
|
|
2026-03-09 21:54:04 -04:00
|
|
|
# ── Discord action confirmation ──────────────────────────────────────────
|
|
|
|
|
# When True, dangerous tools (shell, write_file, python) require user
|
|
|
|
|
# confirmation via Discord button before executing.
|
|
|
|
|
discord_confirm_actions: bool = True
|
|
|
|
|
# Seconds to wait for user confirmation before auto-rejecting.
|
|
|
|
|
discord_confirm_timeout: int = 120
|
|
|
|
|
|
2026-03-19 15:24:43 -04:00
|
|
|
# ── Backend selection ────────────────────────────────────────────────────
|
2026-02-21 16:53:16 +00:00
|
|
|
# "ollama" — always use Ollama (default, safe everywhere)
|
2026-03-19 15:24:43 -04:00
|
|
|
# "auto" — pick best available local backend, fall back to Ollama
|
|
|
|
|
timmy_model_backend: Literal["ollama", "grok", "claude", "auto"] = "ollama"
|
2026-02-21 16:53:16 +00:00
|
|
|
|
feat: add Grok (xAI) as opt-in premium backend with monetization
- Add GrokBackend class in src/timmy/backends.py with full sync/async
support, health checks, usage stats, and cost estimation in sats
- Add consult_grok tool to Timmy's toolkit for proactive Grok queries
- Extend cascade router with Grok provider type for failover chain
- Add Grok Mode toggle card to Mission Control dashboard (HTMX live)
- Add "Ask Grok" button on chat input for direct Grok queries
- Add /grok/* routes: status, toggle, chat, stats endpoints
- Integrate Lightning invoice generation for Grok usage monetization
- Add GROK_ENABLED, XAI_API_KEY, GROK_DEFAULT_MODEL, GROK_MAX_SATS_PER_QUERY,
GROK_FREE config settings via pydantic-settings
- Update .env.example and docker-compose.yml with Grok env vars
- Add 21 tests covering backend, tools, and route endpoints (all green)
Local-first ethos preserved: Grok is premium augmentation only,
disabled by default, and Lightning-payable when enabled.
https://claude.ai/code/session_01FygwN8wS8J6WGZ8FPb7XGV
2026-02-27 01:12:51 +00:00
|
|
|
# ── Grok (xAI) — opt-in premium cloud backend ────────────────────────
|
|
|
|
|
# Grok is a premium augmentation layer — local-first ethos preserved.
|
|
|
|
|
# Only used when explicitly enabled and query complexity warrants it.
|
|
|
|
|
grok_enabled: bool = False
|
|
|
|
|
xai_api_key: str = ""
|
2026-03-20 22:47:05 +00:00
|
|
|
xai_base_url: str = "https://api.x.ai/v1"
|
feat: add Grok (xAI) as opt-in premium backend with monetization
- Add GrokBackend class in src/timmy/backends.py with full sync/async
support, health checks, usage stats, and cost estimation in sats
- Add consult_grok tool to Timmy's toolkit for proactive Grok queries
- Extend cascade router with Grok provider type for failover chain
- Add Grok Mode toggle card to Mission Control dashboard (HTMX live)
- Add "Ask Grok" button on chat input for direct Grok queries
- Add /grok/* routes: status, toggle, chat, stats endpoints
- Integrate Lightning invoice generation for Grok usage monetization
- Add GROK_ENABLED, XAI_API_KEY, GROK_DEFAULT_MODEL, GROK_MAX_SATS_PER_QUERY,
GROK_FREE config settings via pydantic-settings
- Update .env.example and docker-compose.yml with Grok env vars
- Add 21 tests covering backend, tools, and route endpoints (all green)
Local-first ethos preserved: Grok is premium augmentation only,
disabled by default, and Lightning-payable when enabled.
https://claude.ai/code/session_01FygwN8wS8J6WGZ8FPb7XGV
2026-02-27 01:12:51 +00:00
|
|
|
grok_default_model: str = "grok-3-fast"
|
|
|
|
|
grok_max_sats_per_query: int = 200
|
2026-03-22 01:46:15 +00:00
|
|
|
grok_sats_hard_cap: int = 100 # Absolute ceiling on sats per Grok query
|
feat: add Grok (xAI) as opt-in premium backend with monetization
- Add GrokBackend class in src/timmy/backends.py with full sync/async
support, health checks, usage stats, and cost estimation in sats
- Add consult_grok tool to Timmy's toolkit for proactive Grok queries
- Extend cascade router with Grok provider type for failover chain
- Add Grok Mode toggle card to Mission Control dashboard (HTMX live)
- Add "Ask Grok" button on chat input for direct Grok queries
- Add /grok/* routes: status, toggle, chat, stats endpoints
- Integrate Lightning invoice generation for Grok usage monetization
- Add GROK_ENABLED, XAI_API_KEY, GROK_DEFAULT_MODEL, GROK_MAX_SATS_PER_QUERY,
GROK_FREE config settings via pydantic-settings
- Update .env.example and docker-compose.yml with Grok env vars
- Add 21 tests covering backend, tools, and route endpoints (all green)
Local-first ethos preserved: Grok is premium augmentation only,
disabled by default, and Lightning-payable when enabled.
https://claude.ai/code/session_01FygwN8wS8J6WGZ8FPb7XGV
2026-02-27 01:12:51 +00:00
|
|
|
grok_free: bool = False # Skip Lightning invoice when user has own API key
|
|
|
|
|
|
2026-03-22 01:46:15 +00:00
|
|
|
# ── Database ──────────────────────────────────────────────────────────
|
|
|
|
|
db_busy_timeout_ms: int = 5000 # SQLite PRAGMA busy_timeout (ms)
|
|
|
|
|
|
2026-02-28 09:35:44 -05:00
|
|
|
# ── Claude (Anthropic) — cloud fallback backend ────────────────────────
|
|
|
|
|
# Used when Ollama is offline and local inference isn't available.
|
|
|
|
|
# Set ANTHROPIC_API_KEY to enable. Default model is Haiku (fast + cheap).
|
|
|
|
|
anthropic_api_key: str = ""
|
|
|
|
|
claude_model: str = "haiku"
|
|
|
|
|
|
2026-03-23 02:14:42 +00:00
|
|
|
# ── Content Moderation ──────────────────────────────────────────────
|
|
|
|
|
# Three-layer moderation pipeline for AI narrator output.
|
|
|
|
|
# Uses Llama Guard via Ollama with regex fallback.
|
|
|
|
|
moderation_enabled: bool = True
|
|
|
|
|
moderation_guard_model: str = "llama-guard3:1b"
|
|
|
|
|
# Default confidence threshold — per-game profiles can override.
|
|
|
|
|
moderation_threshold: float = 0.8
|
|
|
|
|
|
feat: integrate Spark Intelligence into Timmy swarm system
Adds a self-evolving cognitive layer inspired by vibeship-spark-intelligence,
adapted for Timmy's agent architecture. Spark captures swarm events, runs
EIDOS prediction-evaluation loops, consolidates memories, and generates
advisory recommendations — all backed by SQLite consistent with existing
patterns.
New modules:
- spark/memory.py — event capture with importance scoring + memory consolidation
- spark/eidos.py — EIDOS cognitive loop (predict → observe → evaluate → learn)
- spark/advisor.py — ranked advisory generation from accumulated intelligence
- spark/engine.py — top-level API wiring all subsystems together
Dashboard:
- /spark/ui — full Spark Intelligence dashboard (3-column: status/advisories,
predictions/memories, event timeline) with HTMX auto-refresh
- /spark — JSON API for programmatic access
- SPARK link added to navigation header
Integration:
- Coordinator hooks emit Spark events on task post, bid, assign, complete, fail
- EIDOS predictions generated when tasks are posted, evaluated on completion
- Memory consolidation triggers when agents accumulate enough outcomes
- SPARK_ENABLED config toggle (default: true)
Tests: 47 new tests covering all Spark subsystems + dashboard routes.
Full suite: 538 tests passing.
https://claude.ai/code/session_01KJm6jQkNi3aA3yoQJn636c
2026-02-24 15:51:15 +00:00
|
|
|
# ── Spark Intelligence ────────────────────────────────────────────────
|
|
|
|
|
# Enable/disable the Spark cognitive layer.
|
|
|
|
|
# When enabled, Spark captures swarm events, runs EIDOS predictions,
|
|
|
|
|
# consolidates memories, and generates advisory recommendations.
|
|
|
|
|
spark_enabled: bool = True
|
|
|
|
|
|
feat: add full creative studio + DevOps tools (Pixel, Lyra, Reel personas)
Adds 3 new personas (Pixel, Lyra, Reel) and 5 new tool modules:
- Git/DevOps tools (GitPython): clone, status, diff, log, blame, branch,
add, commit, push, pull, stash — wired to Forge and Helm personas
- Image generation (FLUX via diffusers): text-to-image, storyboards,
variations — Pixel persona
- Music generation (ACE-Step 1.5): full songs with vocals+instrumentals,
instrumental tracks, vocal-only tracks — Lyra persona
- Video generation (Wan 2.1 via diffusers): text-to-video, image-to-video
clips — Reel persona
- Creative Director pipeline: multi-step orchestration that chains
storyboard → music → video → assembly into 3+ minute final videos
- Video assembler (MoviePy + FFmpeg): stitch clips, overlay audio,
title cards, subtitles, final export
Also includes:
- Spark Intelligence tool-level + creative pipeline event capture
- Creative Studio dashboard page (/creative/ui) with 4 tabs
- Config settings for all new models and output directories
- pyproject.toml creative optional extra for GPU dependencies
- 107 new tests covering all modules (624 total, all passing)
https://claude.ai/code/session_01KJm6jQkNi3aA3yoQJn636c
2026-02-24 16:31:47 +00:00
|
|
|
# ── Git / DevOps ──────────────────────────────────────────────────────
|
|
|
|
|
git_default_repo_dir: str = "~/repos"
|
|
|
|
|
|
2026-02-26 23:39:13 -05:00
|
|
|
# Repository root - auto-detected but can be overridden
|
|
|
|
|
# This is the main project directory where .git lives
|
|
|
|
|
repo_root: str = ""
|
|
|
|
|
|
feat: add full creative studio + DevOps tools (Pixel, Lyra, Reel personas)
Adds 3 new personas (Pixel, Lyra, Reel) and 5 new tool modules:
- Git/DevOps tools (GitPython): clone, status, diff, log, blame, branch,
add, commit, push, pull, stash — wired to Forge and Helm personas
- Image generation (FLUX via diffusers): text-to-image, storyboards,
variations — Pixel persona
- Music generation (ACE-Step 1.5): full songs with vocals+instrumentals,
instrumental tracks, vocal-only tracks — Lyra persona
- Video generation (Wan 2.1 via diffusers): text-to-video, image-to-video
clips — Reel persona
- Creative Director pipeline: multi-step orchestration that chains
storyboard → music → video → assembly into 3+ minute final videos
- Video assembler (MoviePy + FFmpeg): stitch clips, overlay audio,
title cards, subtitles, final export
Also includes:
- Spark Intelligence tool-level + creative pipeline event capture
- Creative Studio dashboard page (/creative/ui) with 4 tabs
- Config settings for all new models and output directories
- pyproject.toml creative optional extra for GPU dependencies
- 107 new tests covering all modules (624 total, all passing)
https://claude.ai/code/session_01KJm6jQkNi3aA3yoQJn636c
2026-02-24 16:31:47 +00:00
|
|
|
# ── Creative — Image Generation (Pixel) ───────────────────────────────
|
|
|
|
|
flux_model_id: str = "black-forest-labs/FLUX.1-schnell"
|
|
|
|
|
image_output_dir: str = "data/images"
|
|
|
|
|
image_default_steps: int = 4
|
|
|
|
|
|
|
|
|
|
# ── Creative — Music Generation (Lyra) ────────────────────────────────
|
|
|
|
|
music_output_dir: str = "data/music"
|
|
|
|
|
ace_step_model: str = "ace-step/ACE-Step-v1.5"
|
|
|
|
|
|
|
|
|
|
# ── Creative — Video Generation (Reel) ────────────────────────────────
|
|
|
|
|
video_output_dir: str = "data/video"
|
|
|
|
|
wan_model_id: str = "Wan-AI/Wan2.1-T2V-1.3B"
|
|
|
|
|
video_default_resolution: str = "480p"
|
|
|
|
|
|
|
|
|
|
# ── Creative — Pipeline / Assembly ────────────────────────────────────
|
|
|
|
|
creative_output_dir: str = "data/creative"
|
|
|
|
|
video_transition_duration: float = 1.0
|
|
|
|
|
default_video_codec: str = "libx264"
|
|
|
|
|
|
2026-02-25 18:19:22 +00:00
|
|
|
# ── L402 Lightning ───────────────────────────────────────────────────
|
|
|
|
|
# HMAC secrets for macaroon signing and invoice verification.
|
|
|
|
|
# Generate with: python3 -c "import secrets; print(secrets.token_hex(32))"
|
2026-02-25 15:32:19 -05:00
|
|
|
# In production (TIMMY_ENV=production), these MUST be set or the app will refuse to start.
|
|
|
|
|
l402_hmac_secret: str = ""
|
|
|
|
|
l402_macaroon_secret: str = ""
|
2026-02-25 18:19:22 +00:00
|
|
|
lightning_backend: Literal["mock", "lnd"] = "mock"
|
|
|
|
|
|
2026-02-25 15:32:19 -05:00
|
|
|
# ── Privacy / Sovereignty ────────────────────────────────────────────
|
|
|
|
|
# Disable Agno telemetry for air-gapped/sovereign deployments.
|
|
|
|
|
# Default is False (telemetry disabled) to align with sovereign AI vision.
|
|
|
|
|
telemetry_enabled: bool = False
|
|
|
|
|
|
2026-03-23 14:09:03 +00:00
|
|
|
# ── Sovereignty Metrics ──────────────────────────────────────────────
|
|
|
|
|
# Alert when API cost per research task exceeds this threshold (USD).
|
|
|
|
|
sovereignty_api_cost_alert_threshold: float = 1.00
|
|
|
|
|
|
2026-03-11 18:36:42 -04:00
|
|
|
# CORS allowed origins for the web chat interface (Gitea Pages, etc.)
|
feat: replace GitHub page with embedded Timmy chat interface
Replaces the marketing landing page with a minimal, full-screen chat
interface that connects to a running Timmy instance. Mobile-first design
with single vertical scroll direction, looping scroll, no zoom, no
buttons — just type and press Enter to talk to Timmy.
- docs/index.html: full rewrite as a clean chat UI with dark terminal
theme, looping infinite scroll, markdown rendering, connection status,
and /connect, /clear, /help slash commands
- src/dashboard/app.py: add CORS middleware so the GitHub Pages site can
reach a local Timmy server cross-origin
- src/config.py: add cors_origins setting (defaults to ["*"])
https://claude.ai/code/session_01AWLxg6KDWsfCATiuvsRMGr
2026-02-27 00:35:33 +00:00
|
|
|
# Set CORS_ORIGINS as a comma-separated list, e.g. "http://localhost:3000,https://example.com"
|
2026-03-19 14:57:36 -04:00
|
|
|
cors_origins: list[str] = [
|
|
|
|
|
"http://localhost:3000",
|
|
|
|
|
"http://localhost:8000",
|
|
|
|
|
"http://127.0.0.1:3000",
|
|
|
|
|
"http://127.0.0.1:8000",
|
|
|
|
|
]
|
feat: replace GitHub page with embedded Timmy chat interface
Replaces the marketing landing page with a minimal, full-screen chat
interface that connects to a running Timmy instance. Mobile-first design
with single vertical scroll direction, looping scroll, no zoom, no
buttons — just type and press Enter to talk to Timmy.
- docs/index.html: full rewrite as a clean chat UI with dark terminal
theme, looping infinite scroll, markdown rendering, connection status,
and /connect, /clear, /help slash commands
- src/dashboard/app.py: add CORS middleware so the GitHub Pages site can
reach a local Timmy server cross-origin
- src/config.py: add cors_origins setting (defaults to ["*"])
https://claude.ai/code/session_01AWLxg6KDWsfCATiuvsRMGr
2026-02-27 00:35:33 +00:00
|
|
|
|
2026-03-21 14:56:43 +00:00
|
|
|
# ── Matrix Frontend Integration ────────────────────────────────────────
|
|
|
|
|
# URL of the Matrix frontend (Replit/Tailscale) for CORS.
|
|
|
|
|
# When set, this origin is added to CORS allowed_origins.
|
|
|
|
|
# Example: "http://100.124.176.28:8080" or "https://alexanderwhitestone.com"
|
|
|
|
|
matrix_frontend_url: str = "" # Empty = disabled
|
|
|
|
|
|
2026-03-21 16:14:05 +00:00
|
|
|
# WebSocket authentication token for Matrix connections.
|
|
|
|
|
# When set, clients must provide this token via ?token= query param
|
|
|
|
|
# or in the first message as {"type": "auth", "token": "..."}.
|
|
|
|
|
# Empty/unset = auth disabled (dev mode).
|
|
|
|
|
matrix_ws_token: str = ""
|
|
|
|
|
|
2026-03-11 10:37:20 -04:00
|
|
|
# Trusted hosts for the Host header check (TrustedHostMiddleware).
|
|
|
|
|
# Set TRUSTED_HOSTS as a comma-separated list. Wildcards supported (e.g. "*.ts.net").
|
|
|
|
|
# Defaults include localhost + Tailscale MagicDNS. Add your Tailscale IP if needed.
|
|
|
|
|
trusted_hosts: list[str] = [
|
|
|
|
|
"localhost",
|
|
|
|
|
"127.0.0.1",
|
|
|
|
|
"*.local",
|
|
|
|
|
"*.ts.net",
|
|
|
|
|
"testserver",
|
|
|
|
|
]
|
|
|
|
|
|
2026-02-25 15:32:19 -05:00
|
|
|
# Environment mode: development | production
|
|
|
|
|
# In production, security settings are strictly enforced.
|
|
|
|
|
timmy_env: Literal["development", "production"] = "development"
|
|
|
|
|
|
2026-03-07 22:34:30 -05:00
|
|
|
# ── Memory Management ──────────────────────────────────────────────
|
|
|
|
|
# Auto-prune vector store memories older than this many days on startup.
|
|
|
|
|
# Set to 0 to disable auto-pruning.
|
|
|
|
|
memory_prune_days: int = 90
|
|
|
|
|
# When True, fact-type memories are kept even when older than the TTL.
|
|
|
|
|
memory_prune_keep_facts: bool = True
|
|
|
|
|
# Maximum size in MB for the memory/notes/ vault directory.
|
|
|
|
|
# When exceeded, a warning is logged. Set to 0 to disable.
|
|
|
|
|
memory_vault_max_mb: int = 100
|
2026-03-12 11:23:18 -04:00
|
|
|
# Auto-prune thoughts older than this many days. 0 = disabled.
|
|
|
|
|
thoughts_prune_days: int = 90
|
|
|
|
|
# Minimum thoughts to keep regardless of age.
|
|
|
|
|
thoughts_prune_keep_min: int = 200
|
|
|
|
|
# Auto-prune system events older than this many days. 0 = disabled.
|
|
|
|
|
events_prune_days: int = 90
|
|
|
|
|
# Minimum events to keep regardless of age.
|
|
|
|
|
events_prune_keep_min: int = 200
|
2026-03-07 22:34:30 -05:00
|
|
|
|
feat: agentic loop for multi-step tasks + regression fixes (#148)
* fix: name extraction blocklist, memory preview escaping, and gitignore cleanup
- Add _NAME_BLOCKLIST to extract_user_name() to reject gerunds and UI-state
words like "Sending" that were incorrectly captured as user names
- Collapse whitespace in get_memory_status() preview so newlines survive
JSON serialization without showing raw \n escape sequences
- Broaden .gitignore from specific memory/self/user_profile.md to memory/self/
and untrack memory/self/methodology.md (runtime-edited file)
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* fix: catch Ollama connection errors in session.py + add 71 smoke tests
- Wrap agent.run() in session.py with try/except so Ollama connection
failures return a graceful fallback message instead of dumping raw
tracebacks to Docker logs
- Add tests/test_smoke.py with 71 tests covering every GET route:
core pages, feature pages, JSON APIs, and a parametrized no-500 sweep
— catches import errors, template failures, and schema mismatches
that unit tests miss
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* feat: agentic loop for multi-step tasks + Round 10 regression fixes
Agentic loop (Parts 1-4):
- Add multi-step chaining instructions to system prompt
- New agentic_loop.py with plan→execute→adapt→summarize flow
- Register plan_and_execute tool for background task execution
- Add max_agent_steps config setting (default: 10)
- Discord fix: 300s timeout, typing indicator, send error handling
- 16 new unit + e2e tests for agentic loop
Round 10 regressions (R1-R5, P1):
- R1: Fix literal \n escape sequences in tool responses
- R2: Chat timeout/error feedback in agent panel
- R3: /hands infinite spinner → static empty states
- R4: /self-coding infinite spinner → static stats + journal
- R5: /grok/status raw JSON → HTML dashboard template
- P1: VETO confirmation dialog on task cards
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* fix: briefing route 500 in CI when agno is MagicMock stub
_call_agent() returned a MagicMock instead of a string when agno is
stubbed in tests, causing SQLite "Error binding parameter 4" on save.
Ensure the return value is always an actual string.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* fix: briefing route 500 in CI — graceful degradation at route level
When agno is stubbed with MagicMock in CI, agent.run() returns a
MagicMock instead of raising — so the exception handler never fires
and a MagicMock propagates as the summary to SQLite, which can't
bind it.
Fix: catch at the route level and return a fallback Briefing object.
This follows the project's graceful degradation pattern — the briefing
page always renders, even when the backend is completely unavailable.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
---------
Co-authored-by: Trip T <trip@local>
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-08 01:46:29 -05:00
|
|
|
# ── Agentic Loop ──────────────────────────────────────────────────
|
|
|
|
|
# Maximum steps the agentic loop will execute before stopping.
|
|
|
|
|
max_agent_steps: int = 10
|
|
|
|
|
|
2026-03-07 18:49:37 -05:00
|
|
|
# ── Test / Diagnostics ─────────────────────────────────────────────
|
|
|
|
|
# Skip loading heavy embedding models (for tests / low-memory envs).
|
|
|
|
|
timmy_skip_embeddings: bool = False
|
|
|
|
|
# Disable CSRF middleware entirely (for tests).
|
|
|
|
|
timmy_disable_csrf: bool = False
|
|
|
|
|
# Mark the process as running in test mode.
|
|
|
|
|
timmy_test_mode: bool = False
|
|
|
|
|
|
|
|
|
|
# ── Brain / rqlite ─────────────────────────────────────────────────
|
|
|
|
|
# URL of the local rqlite node for distributed memory.
|
|
|
|
|
# Empty string means rqlite is not configured.
|
|
|
|
|
rqlite_url: str = ""
|
|
|
|
|
# Source identifier for brain memory entries.
|
|
|
|
|
brain_source: str = "default"
|
|
|
|
|
# Path override for the local brain SQLite database.
|
|
|
|
|
brain_db_path: str = ""
|
|
|
|
|
|
|
|
|
|
# ── Security Tuning ───────────────────────────────────────────────
|
|
|
|
|
# Set to True in production to mark CSRF cookies as Secure (HTTPS only).
|
|
|
|
|
csrf_cookie_secure: bool = False
|
|
|
|
|
# Maximum size in bytes for chat API request bodies.
|
|
|
|
|
chat_api_max_body_bytes: int = 1_048_576 # 1 MB
|
|
|
|
|
|
2026-02-25 17:18:58 -05:00
|
|
|
# ── Self-Modification ──────────────────────────────────────────────
|
Claude/remove persona system f vgt m (#126)
* Remove persona system, identity, and all Timmy references
Strip the codebase to pure orchestration logic:
- Delete TIMMY_IDENTITY.md and memory/self/identity.md
- Gut brain/identity.py to no-op stubs (empty returns)
- Remove all system prompts reinforcing Timmy's character, faith,
sovereignty, sign-off ("Sir, affirmative"), and agent roster
- Replace identity-laden prompts with generic local-AI-assistant prompts
- Remove "You work for Timmy" from all sub-agent system prompts
- Rename PersonaTools → AgentTools, PERSONA_TOOLKITS → AGENT_TOOLKITS
- Replace "timmy" agent ID with "orchestrator" across routes, marketplace,
tools catalog, and orchestrator class
- Strip Timmy references from config comments, templates, telegram bot,
chat API, and dashboard UI
- Delete tests/brain/test_identity.py entirely
- Fix all test assertions that checked for persona identity content
729 tests pass (2 pre-existing failures in test_calm.py unrelated).
https://claude.ai/code/session_01LjQGUE6nk9W9674zaxrYxy
* Add Taskosaur (PM + AI task execution) to docker-compose
Spins up Taskosaur alongside the dashboard on `docker compose up`:
- postgres:16-alpine (port 5432, Taskosaur DB)
- redis:7-alpine (Bull queue backend)
- taskosaur (ports 3000 API / 3001 UI)
- dashboard now depends_on taskosaur healthy
- TASKOSAUR_API_URL injected into dashboard environment
Dashboard can reach Taskosaur at http://taskosaur:3000/api on the
internal network. Frontend UI accessible at http://localhost:3001.
https://claude.ai/code/session_01LjQGUE6nk9W9674zaxrYxy
---------
Co-authored-by: Claude <noreply@anthropic.com>
2026-03-04 12:00:49 -05:00
|
|
|
# Enable self-modification capabilities. When enabled, the agent can
|
2026-02-25 17:18:58 -05:00
|
|
|
# edit its own source code, run tests, and commit changes.
|
|
|
|
|
self_modify_enabled: bool = False
|
|
|
|
|
self_modify_max_retries: int = 2
|
|
|
|
|
self_modify_allowed_dirs: str = "src,tests"
|
|
|
|
|
self_modify_backend: str = "auto" # "ollama", "anthropic", or "auto"
|
|
|
|
|
|
2026-02-26 10:27:08 -05:00
|
|
|
# ── Work Orders ──────────────────────────────────────────────────
|
|
|
|
|
# External users and agents can submit work orders for improvements.
|
|
|
|
|
work_orders_enabled: bool = True
|
|
|
|
|
work_orders_auto_execute: bool = False # Master switch for auto-execution
|
2026-02-26 23:39:13 -05:00
|
|
|
work_orders_auto_threshold: str = (
|
|
|
|
|
"low" # Max priority that auto-executes: "low" | "medium" | "high" | "none"
|
|
|
|
|
)
|
2026-02-26 10:27:08 -05:00
|
|
|
|
feat: add custom weights, model registry, per-agent models, and reward scoring
Inspired by OpenClaw-RL's multi-model orchestration, this adds four
features for custom model management:
1. Custom model registry (infrastructure/models/registry.py) — SQLite-backed
registry for GGUF, safetensors, HF checkpoint, and Ollama models with
role-based lookups (general, reward, teacher, judge).
2. Per-agent model assignment — each swarm persona can use a different model
instead of sharing the global default. Resolved via registry assignment >
persona default > global default.
3. Runtime model management API (/api/v1/models) — REST endpoints to register,
list, assign, enable/disable, and remove custom models without restart.
Includes a dashboard page at /models.
4. Reward model scoring (PRM-style) — majority-vote quality evaluation of
agent outputs using a configurable reward model. Scores persist in SQLite
and feed into the swarm learner.
New config settings: custom_weights_dir, reward_model_enabled,
reward_model_name, reward_model_votes.
54 new tests covering registry CRUD, API endpoints, agent assignments,
role lookups, and reward scoring.
https://claude.ai/code/session_01V4iTozMwcE2gjfnCJdCugC
2026-02-27 01:08:03 +00:00
|
|
|
# ── Custom Weights & Models ──────────────────────────────────────
|
|
|
|
|
# Directory for custom model weights (GGUF, safetensors, HF checkpoints).
|
|
|
|
|
# Models placed here can be registered at runtime and assigned to agents.
|
|
|
|
|
custom_weights_dir: str = "data/models"
|
|
|
|
|
# Enable the reward model for scoring agent outputs (PRM-style).
|
|
|
|
|
reward_model_enabled: bool = False
|
|
|
|
|
# Reward model name (must be available via Ollama or a custom weight path).
|
|
|
|
|
reward_model_name: str = ""
|
|
|
|
|
# Minimum votes for majority-vote reward scoring (odd number recommended).
|
|
|
|
|
reward_model_votes: int = 3
|
|
|
|
|
|
feat: add in-browser local model support for iPhone via WebLLM
Enable Timmy to run directly on iPhone by loading a small LLM into
the browser via WebGPU (Safari 26+ / iOS 26+). No server connection
required — fully sovereign, fully offline.
New files:
- static/local_llm.js: WebLLM wrapper with model catalogue, WebGPU
detection, streaming chat, and progress callbacks
- templates/mobile_local.html: Mobile-optimized UI with model
selector, download progress, LOCAL/SERVER badge, and chat
- tests/dashboard/test_local_models.py: 31 tests covering routes,
config, template UX, JS asset, and XSS prevention
Changes:
- config.py: browser_model_enabled, browser_model_id,
browser_model_fallback settings
- routes/mobile.py: /mobile/local page, /mobile/local-models API
- base.html: LOCAL AI nav link
Supported models: SmolLM2-360M (~200MB), Qwen2.5-0.5B (~350MB),
SmolLM2-1.7B (~1GB), Llama-3.2-1B (~700MB). Falls back to
server-side Ollama when local model is unavailable.
https://claude.ai/code/session_01Cqkvr4sZbED7T3iDu1rwSD
2026-02-27 00:03:05 +00:00
|
|
|
# ── Browser Local Models (iPhone / WebGPU) ───────────────────────
|
|
|
|
|
# Enable in-browser LLM inference via WebLLM for offline iPhone use.
|
|
|
|
|
# When enabled, the mobile dashboard loads a small model directly
|
|
|
|
|
# in the browser — no server or Ollama required.
|
|
|
|
|
browser_model_enabled: bool = True
|
|
|
|
|
# WebLLM model ID — must be a pre-compiled MLC model.
|
|
|
|
|
# Recommended for iPhone: SmolLM2-360M (fast) or Qwen3-0.6B (smart).
|
|
|
|
|
browser_model_id: str = "SmolLM2-360M-Instruct-q4f16_1-MLC"
|
|
|
|
|
# Fallback to server when browser model is unavailable or too slow.
|
|
|
|
|
browser_model_fallback: bool = True
|
|
|
|
|
|
2026-03-19 02:54:19 -04:00
|
|
|
# ── Deep Focus Mode ─────────────────────────────────────────────
|
|
|
|
|
# "deep" = single-problem context; "broad" = default multi-task.
|
|
|
|
|
focus_mode: Literal["deep", "broad"] = "broad"
|
|
|
|
|
|
2026-02-27 01:00:11 -05:00
|
|
|
# ── Default Thinking ──────────────────────────────────────────────
|
Claude/remove persona system f vgt m (#126)
* Remove persona system, identity, and all Timmy references
Strip the codebase to pure orchestration logic:
- Delete TIMMY_IDENTITY.md and memory/self/identity.md
- Gut brain/identity.py to no-op stubs (empty returns)
- Remove all system prompts reinforcing Timmy's character, faith,
sovereignty, sign-off ("Sir, affirmative"), and agent roster
- Replace identity-laden prompts with generic local-AI-assistant prompts
- Remove "You work for Timmy" from all sub-agent system prompts
- Rename PersonaTools → AgentTools, PERSONA_TOOLKITS → AGENT_TOOLKITS
- Replace "timmy" agent ID with "orchestrator" across routes, marketplace,
tools catalog, and orchestrator class
- Strip Timmy references from config comments, templates, telegram bot,
chat API, and dashboard UI
- Delete tests/brain/test_identity.py entirely
- Fix all test assertions that checked for persona identity content
729 tests pass (2 pre-existing failures in test_calm.py unrelated).
https://claude.ai/code/session_01LjQGUE6nk9W9674zaxrYxy
* Add Taskosaur (PM + AI task execution) to docker-compose
Spins up Taskosaur alongside the dashboard on `docker compose up`:
- postgres:16-alpine (port 5432, Taskosaur DB)
- redis:7-alpine (Bull queue backend)
- taskosaur (ports 3000 API / 3001 UI)
- dashboard now depends_on taskosaur healthy
- TASKOSAUR_API_URL injected into dashboard environment
Dashboard can reach Taskosaur at http://taskosaur:3000/api on the
internal network. Frontend UI accessible at http://localhost:3001.
https://claude.ai/code/session_01LjQGUE6nk9W9674zaxrYxy
---------
Co-authored-by: Claude <noreply@anthropic.com>
2026-03-04 12:00:49 -05:00
|
|
|
# When enabled, the agent starts an internal thought loop on server start.
|
2026-02-27 01:00:11 -05:00
|
|
|
thinking_enabled: bool = True
|
|
|
|
|
thinking_interval_seconds: int = 300 # 5 minutes between thoughts
|
2026-03-19 20:18:31 -04:00
|
|
|
thinking_timeout_seconds: int = 120 # max wall-clock time per thinking cycle
|
2026-03-12 11:23:18 -04:00
|
|
|
thinking_distill_every: int = 10 # distill facts from thoughts every Nth thought
|
feat: add Gitea issue creation — Timmy's self-improvement channel
Give Timmy the ability to file Gitea issues when he notices bugs,
stale state, or improvement opportunities in his own codebase.
Components:
- GiteaHand async API client (infrastructure/hands/gitea.py)
- Token auth with ~/.config/gitea/token fallback
- Create/list/close issues, dedup by title similarity
- Graceful degradation when Gitea unreachable
- Tool functions (timmy/tools_gitea.py)
- create_gitea_issue: file issues with dedup + work order bridge
- list_gitea_issues: check existing backlog
- Classified as SAFE (no confirmation needed)
- Thinking post-hook (_maybe_file_issues in thinking.py)
- Every 20 thoughts, LLM classifies recent thoughts for actionable items
- Auto-files bugs/improvements to Gitea with dedup
- Bridges to local work order system for dashboard tracking
- Config: gitea_url, gitea_token, gitea_repo, gitea_enabled,
gitea_timeout, thinking_issue_every
All 1426 tests pass, 74.17% coverage.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-12 18:36:06 -04:00
|
|
|
thinking_issue_every: int = 20 # file Gitea issues from thoughts every Nth thought
|
2026-03-18 18:26:53 -04:00
|
|
|
thinking_memory_check_every: int = 50 # check memory status every Nth thought
|
2026-03-18 19:12:14 -04:00
|
|
|
thinking_idle_timeout_minutes: int = 60 # pause thoughts after N minutes without user input
|
feat: add Gitea issue creation — Timmy's self-improvement channel
Give Timmy the ability to file Gitea issues when he notices bugs,
stale state, or improvement opportunities in his own codebase.
Components:
- GiteaHand async API client (infrastructure/hands/gitea.py)
- Token auth with ~/.config/gitea/token fallback
- Create/list/close issues, dedup by title similarity
- Graceful degradation when Gitea unreachable
- Tool functions (timmy/tools_gitea.py)
- create_gitea_issue: file issues with dedup + work order bridge
- list_gitea_issues: check existing backlog
- Classified as SAFE (no confirmation needed)
- Thinking post-hook (_maybe_file_issues in thinking.py)
- Every 20 thoughts, LLM classifies recent thoughts for actionable items
- Auto-files bugs/improvements to Gitea with dedup
- Bridges to local work order system for dashboard tracking
- Config: gitea_url, gitea_token, gitea_repo, gitea_enabled,
gitea_timeout, thinking_issue_every
All 1426 tests pass, 74.17% coverage.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-12 18:36:06 -04:00
|
|
|
|
|
|
|
|
# ── Gitea Integration ─────────────────────────────────────────────
|
|
|
|
|
# Local Gitea instance for issue tracking and self-improvement.
|
2026-03-12 21:40:32 -04:00
|
|
|
# These values are passed as env vars to the gitea-mcp server process.
|
feat: add Gitea issue creation — Timmy's self-improvement channel
Give Timmy the ability to file Gitea issues when he notices bugs,
stale state, or improvement opportunities in his own codebase.
Components:
- GiteaHand async API client (infrastructure/hands/gitea.py)
- Token auth with ~/.config/gitea/token fallback
- Create/list/close issues, dedup by title similarity
- Graceful degradation when Gitea unreachable
- Tool functions (timmy/tools_gitea.py)
- create_gitea_issue: file issues with dedup + work order bridge
- list_gitea_issues: check existing backlog
- Classified as SAFE (no confirmation needed)
- Thinking post-hook (_maybe_file_issues in thinking.py)
- Every 20 thoughts, LLM classifies recent thoughts for actionable items
- Auto-files bugs/improvements to Gitea with dedup
- Bridges to local work order system for dashboard tracking
- Config: gitea_url, gitea_token, gitea_repo, gitea_enabled,
gitea_timeout, thinking_issue_every
All 1426 tests pass, 74.17% coverage.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-12 18:36:06 -04:00
|
|
|
gitea_url: str = "http://localhost:3000"
|
2026-03-14 21:45:54 -04:00
|
|
|
gitea_token: str = "" # GITEA_TOKEN env var; falls back to .timmy_gitea_token
|
feat: add Gitea issue creation — Timmy's self-improvement channel
Give Timmy the ability to file Gitea issues when he notices bugs,
stale state, or improvement opportunities in his own codebase.
Components:
- GiteaHand async API client (infrastructure/hands/gitea.py)
- Token auth with ~/.config/gitea/token fallback
- Create/list/close issues, dedup by title similarity
- Graceful degradation when Gitea unreachable
- Tool functions (timmy/tools_gitea.py)
- create_gitea_issue: file issues with dedup + work order bridge
- list_gitea_issues: check existing backlog
- Classified as SAFE (no confirmation needed)
- Thinking post-hook (_maybe_file_issues in thinking.py)
- Every 20 thoughts, LLM classifies recent thoughts for actionable items
- Auto-files bugs/improvements to Gitea with dedup
- Bridges to local work order system for dashboard tracking
- Config: gitea_url, gitea_token, gitea_repo, gitea_enabled,
gitea_timeout, thinking_issue_every
All 1426 tests pass, 74.17% coverage.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-12 18:36:06 -04:00
|
|
|
gitea_repo: str = "rockachopa/Timmy-time-dashboard" # owner/repo
|
|
|
|
|
gitea_enabled: bool = True
|
2026-03-12 21:40:32 -04:00
|
|
|
|
|
|
|
|
# ── MCP Servers ────────────────────────────────────────────────────
|
|
|
|
|
# External tool servers connected via Model Context Protocol (stdio).
|
2026-03-14 21:29:11 -04:00
|
|
|
mcp_gitea_command: str = "gitea-mcp-server -t stdio"
|
2026-03-12 21:40:32 -04:00
|
|
|
mcp_filesystem_command: str = "npx -y @modelcontextprotocol/server-filesystem"
|
|
|
|
|
mcp_timeout: int = 15
|
2026-03-23 15:09:11 +00:00
|
|
|
mcp_bridge_timeout: int = 60 # HTTP timeout for MCP bridge Ollama calls (seconds)
|
2026-02-27 01:00:11 -05:00
|
|
|
|
2026-03-23 18:32:27 +00:00
|
|
|
# ── Backlog Triage Loop ────────────────────────────────────────────
|
|
|
|
|
# Autonomous loop: fetch open issues, score, assign to agents.
|
|
|
|
|
backlog_triage_enabled: bool = False
|
|
|
|
|
# Seconds between triage cycles (default: 15 minutes).
|
|
|
|
|
backlog_triage_interval_seconds: int = 900
|
|
|
|
|
# When True, score and summarize but don't write to Gitea.
|
|
|
|
|
backlog_triage_dry_run: bool = False
|
|
|
|
|
# Create a daily triage summary issue/comment.
|
|
|
|
|
backlog_triage_daily_summary: bool = True
|
|
|
|
|
|
feat: add Loop QA self-testing framework
Structured self-test framework that probes 6 capabilities (tool use,
multistep planning, memory read/write, self-coding, lightning econ) in
round-robin. Reuses existing infra: event_log for persistence,
create_task() for upgrade proposals, capture_error() for crash handling,
and in-memory circuit breaker for failure tracking.
- src/timmy/loop_qa.py: Capability enum, 6 async probes, orchestrator
- src/dashboard/routes/loop_qa.py: JSON + HTMX health endpoints
- HTMX partial polls every 30s on the health panel
- Background scheduler in app.py lifespan
- 25 tests covering probes, orchestrator, health snapshot, routes
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-11 22:33:16 -04:00
|
|
|
# ── Loop QA (Self-Testing) ─────────────────────────────────────────
|
|
|
|
|
# Self-test orchestrator that probes capabilities alongside the thinking loop.
|
|
|
|
|
loop_qa_enabled: bool = True
|
|
|
|
|
loop_qa_interval_ticks: int = 5 # run 1 self-test every Nth thinking tick (~25 min)
|
|
|
|
|
loop_qa_upgrade_threshold: int = 3 # consecutive failures → file task
|
|
|
|
|
loop_qa_max_per_hour: int = 12 # safety throttle
|
|
|
|
|
|
2026-03-23 18:33:15 +00:00
|
|
|
# ── Vassal Protocol (Autonomous Orchestrator) ─────────────────────
|
|
|
|
|
# Timmy as lead decision-maker: triage backlog, dispatch agents, monitor health.
|
|
|
|
|
# See timmy/vassal/ for implementation.
|
|
|
|
|
vassal_enabled: bool = False # off by default — enable when Qwen3-14B is loaded
|
|
|
|
|
vassal_cycle_interval: int = 300 # seconds between orchestration cycles (5 min)
|
|
|
|
|
vassal_max_dispatch_per_cycle: int = 10 # cap on new dispatches per cycle
|
|
|
|
|
vassal_stuck_threshold_minutes: int = 120 # minutes before agent issue is "stuck"
|
|
|
|
|
vassal_idle_threshold_minutes: int = 30 # minutes before agent is "idle"
|
|
|
|
|
|
2026-03-06 01:20:14 -05:00
|
|
|
# ── Paperclip AI — orchestration bridge ────────────────────────────
|
|
|
|
|
# URL where the Paperclip server listens.
|
|
|
|
|
# For VPS deployment behind nginx, use the public domain.
|
|
|
|
|
paperclip_url: str = "http://localhost:3100"
|
|
|
|
|
# Enable/disable the Paperclip integration.
|
|
|
|
|
paperclip_enabled: bool = False
|
|
|
|
|
# API key or auth-gate cookie for authenticating with Paperclip.
|
|
|
|
|
paperclip_api_key: str = ""
|
|
|
|
|
# Timmy's agent ID in the Paperclip org chart.
|
|
|
|
|
paperclip_agent_id: str = ""
|
|
|
|
|
# Company ID in Paperclip — required for most API calls.
|
|
|
|
|
paperclip_company_id: str = ""
|
|
|
|
|
# Timeout in seconds for Paperclip HTTP calls.
|
|
|
|
|
paperclip_timeout: int = 30
|
|
|
|
|
# How often (seconds) Timmy polls Paperclip for work (0 = disabled).
|
|
|
|
|
paperclip_poll_interval: int = 0
|
|
|
|
|
|
2026-02-28 19:27:48 -05:00
|
|
|
# ── OpenFang — vendored agent runtime ─────────────────────────────
|
|
|
|
|
# URL where the OpenFang sidecar listens. Set to the Docker service
|
|
|
|
|
# name when running in compose, or localhost for bare-metal dev.
|
|
|
|
|
openfang_url: str = "http://localhost:8080"
|
|
|
|
|
# Enable/disable OpenFang integration. When disabled, the tool
|
|
|
|
|
# executor falls back to Timmy's native (simulated) execution.
|
|
|
|
|
openfang_enabled: bool = False
|
|
|
|
|
# Timeout in seconds for OpenFang hand execution (some hands are slow).
|
|
|
|
|
openfang_timeout: int = 120
|
|
|
|
|
|
2026-03-08 12:50:44 -04:00
|
|
|
# ── Autoresearch — autonomous ML experiment loops ──────────────────
|
|
|
|
|
# Integrates Karpathy's autoresearch pattern: agents modify training
|
|
|
|
|
# code, run time-boxed experiments, evaluate metrics, and iterate.
|
|
|
|
|
autoresearch_enabled: bool = False
|
|
|
|
|
autoresearch_workspace: str = "data/experiments"
|
|
|
|
|
autoresearch_time_budget: int = 300 # seconds per experiment run
|
|
|
|
|
autoresearch_max_iterations: int = 100
|
|
|
|
|
autoresearch_metric: str = "val_bpb" # metric to optimise (lower = better)
|
|
|
|
|
|
2026-03-21 21:36:40 +00:00
|
|
|
# ── Weekly Narrative Summary ───────────────────────────────────────
|
|
|
|
|
# Generates a human-readable weekly summary of development activity.
|
|
|
|
|
# Disabling this will stop the weekly narrative generation.
|
|
|
|
|
weekly_narrative_enabled: bool = True
|
|
|
|
|
weekly_narrative_lookback_days: int = 7
|
|
|
|
|
weekly_narrative_output_dir: str = ".loop"
|
|
|
|
|
|
2026-03-06 09:01:24 -05:00
|
|
|
# ── Local Hands (Shell + Git) ──────────────────────────────────────
|
|
|
|
|
# Enable local shell/git execution hands.
|
|
|
|
|
hands_shell_enabled: bool = True
|
|
|
|
|
# Default timeout in seconds for shell commands.
|
|
|
|
|
hands_shell_timeout: int = 60
|
|
|
|
|
# Comma-separated additional command prefixes to allow.
|
|
|
|
|
hands_shell_extra_allowed: str = ""
|
|
|
|
|
# Enable the git hand for version-control operations.
|
|
|
|
|
hands_git_enabled: bool = True
|
|
|
|
|
# Default timeout for git operations.
|
|
|
|
|
hands_git_timeout: int = 60
|
|
|
|
|
|
2026-03-23 18:36:06 +00:00
|
|
|
# ── Hermes Health Monitor ─────────────────────────────────────────
|
|
|
|
|
# Enable the Hermes system health monitor (memory, disk, Ollama, processes, network).
|
|
|
|
|
hermes_enabled: bool = True
|
|
|
|
|
# How often Hermes runs a full health cycle (seconds). Default: 5 minutes.
|
|
|
|
|
hermes_interval_seconds: int = 300
|
|
|
|
|
# Alert threshold: free memory below this triggers model unloading / alert (GB).
|
|
|
|
|
hermes_memory_free_min_gb: float = 4.0
|
|
|
|
|
# Alert threshold: free disk below this triggers cleanup / alert (GB).
|
|
|
|
|
hermes_disk_free_min_gb: float = 10.0
|
|
|
|
|
|
2026-02-27 19:51:37 -05:00
|
|
|
# ── Error Logging ─────────────────────────────────────────────────
|
|
|
|
|
error_log_enabled: bool = True
|
|
|
|
|
error_log_dir: str = "logs"
|
|
|
|
|
error_log_max_bytes: int = 5_242_880 # 5 MB
|
|
|
|
|
error_log_backup_count: int = 5
|
|
|
|
|
error_feedback_enabled: bool = True # Auto-create bug report tasks
|
|
|
|
|
error_dedup_window_seconds: int = 300 # 5-min dedup window
|
|
|
|
|
|
2026-03-23 18:23:52 +00:00
|
|
|
# ── Bannerlord / GABS ────────────────────────────────────────────
|
|
|
|
|
# GABS (Game Action Bridge Server) TCP JSON-RPC endpoint.
|
|
|
|
|
# The GABS mod runs inside the Windows VM and exposes a JSON-RPC server
|
|
|
|
|
# on port 4825 that Timmy uses to read and act on Bannerlord game state.
|
|
|
|
|
# Set GABS_HOST to the VM's LAN IP (e.g. "10.0.0.50") to enable.
|
|
|
|
|
gabs_enabled: bool = False
|
|
|
|
|
gabs_host: str = "127.0.0.1"
|
|
|
|
|
gabs_port: int = 4825
|
|
|
|
|
gabs_timeout: float = 5.0 # socket timeout in seconds
|
|
|
|
|
# How often (seconds) the observer polls GABS for fresh game state.
|
|
|
|
|
gabs_poll_interval: int = 60
|
|
|
|
|
# Path to the Bannerlord journal inside the memory vault.
|
|
|
|
|
# Relative to repo root. Written by the GABS observer loop.
|
|
|
|
|
gabs_journal_path: str = "memory/bannerlord/journal.md"
|
|
|
|
|
|
2026-02-26 17:06:00 +00:00
|
|
|
# ── Scripture / Biblical Integration ──────────────────────────────
|
Claude/remove persona system f vgt m (#126)
* Remove persona system, identity, and all Timmy references
Strip the codebase to pure orchestration logic:
- Delete TIMMY_IDENTITY.md and memory/self/identity.md
- Gut brain/identity.py to no-op stubs (empty returns)
- Remove all system prompts reinforcing Timmy's character, faith,
sovereignty, sign-off ("Sir, affirmative"), and agent roster
- Replace identity-laden prompts with generic local-AI-assistant prompts
- Remove "You work for Timmy" from all sub-agent system prompts
- Rename PersonaTools → AgentTools, PERSONA_TOOLKITS → AGENT_TOOLKITS
- Replace "timmy" agent ID with "orchestrator" across routes, marketplace,
tools catalog, and orchestrator class
- Strip Timmy references from config comments, templates, telegram bot,
chat API, and dashboard UI
- Delete tests/brain/test_identity.py entirely
- Fix all test assertions that checked for persona identity content
729 tests pass (2 pre-existing failures in test_calm.py unrelated).
https://claude.ai/code/session_01LjQGUE6nk9W9674zaxrYxy
* Add Taskosaur (PM + AI task execution) to docker-compose
Spins up Taskosaur alongside the dashboard on `docker compose up`:
- postgres:16-alpine (port 5432, Taskosaur DB)
- redis:7-alpine (Bull queue backend)
- taskosaur (ports 3000 API / 3001 UI)
- dashboard now depends_on taskosaur healthy
- TASKOSAUR_API_URL injected into dashboard environment
Dashboard can reach Taskosaur at http://taskosaur:3000/api on the
internal network. Frontend UI accessible at http://localhost:3001.
https://claude.ai/code/session_01LjQGUE6nk9W9674zaxrYxy
---------
Co-authored-by: Claude <noreply@anthropic.com>
2026-03-04 12:00:49 -05:00
|
|
|
# Enable the biblical text module.
|
2026-02-26 17:06:00 +00:00
|
|
|
scripture_enabled: bool = True
|
|
|
|
|
# Primary translation for retrieval and citation.
|
|
|
|
|
scripture_translation: str = "ESV"
|
|
|
|
|
# Meditation mode: sequential | thematic | lectionary
|
|
|
|
|
scripture_meditation_mode: str = "sequential"
|
|
|
|
|
# Background meditation interval in seconds (0 = disabled).
|
|
|
|
|
scripture_meditation_interval: int = 0
|
|
|
|
|
|
2026-02-26 23:39:13 -05:00
|
|
|
def _compute_repo_root(self) -> str:
|
|
|
|
|
"""Auto-detect repo root if not set."""
|
|
|
|
|
if self.repo_root:
|
|
|
|
|
return self.repo_root
|
|
|
|
|
# Walk up from this file to find .git
|
|
|
|
|
import os
|
|
|
|
|
|
|
|
|
|
path = os.path.dirname(os.path.abspath(__file__))
|
|
|
|
|
path = os.path.dirname(os.path.dirname(path)) # src/ -> project root
|
|
|
|
|
while path != os.path.dirname(path):
|
|
|
|
|
if os.path.exists(os.path.join(path, ".git")):
|
|
|
|
|
return path
|
|
|
|
|
path = os.path.dirname(path)
|
|
|
|
|
return os.getcwd()
|
|
|
|
|
|
2026-03-12 21:40:32 -04:00
|
|
|
def model_post_init(self, __context) -> None:
|
|
|
|
|
"""Post-init: resolve gitea_token from file if not set via env."""
|
|
|
|
|
if not self.gitea_token:
|
2026-03-14 21:45:54 -04:00
|
|
|
# Priority: Timmy's own token → legacy admin token
|
|
|
|
|
repo_root = self._compute_repo_root()
|
|
|
|
|
timmy_token_path = os.path.join(repo_root, ".timmy_gitea_token")
|
|
|
|
|
legacy_token_path = os.path.expanduser("~/.config/gitea/token")
|
|
|
|
|
for token_path in (timmy_token_path, legacy_token_path):
|
|
|
|
|
try:
|
|
|
|
|
if os.path.isfile(token_path):
|
|
|
|
|
token = open(token_path).read().strip() # noqa: SIM115
|
|
|
|
|
if token:
|
|
|
|
|
self.gitea_token = token
|
|
|
|
|
break
|
|
|
|
|
except OSError:
|
|
|
|
|
pass
|
2026-03-12 21:40:32 -04:00
|
|
|
|
2026-02-19 19:31:48 +00:00
|
|
|
model_config = SettingsConfigDict(
|
|
|
|
|
env_file=".env",
|
|
|
|
|
env_file_encoding="utf-8",
|
|
|
|
|
extra="ignore",
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
settings = Settings()
|
2026-02-26 23:39:13 -05:00
|
|
|
# Ensure repo_root is computed if not set
|
|
|
|
|
if not settings.repo_root:
|
|
|
|
|
settings.repo_root = settings._compute_repo_root()
|
2026-02-25 18:19:22 +00:00
|
|
|
|
2026-02-26 13:42:36 -05:00
|
|
|
# ── Model fallback configuration ────────────────────────────────────────────
|
2026-03-14 17:26:47 -04:00
|
|
|
# Fallback chains are now in settings.fallback_models / settings.vision_fallback_models.
|
|
|
|
|
# Override via env vars (FALLBACK_MODELS, VISION_FALLBACK_MODELS) or
|
|
|
|
|
# edit config/providers.yaml → fallback_chains.
|
2026-02-26 13:42:36 -05:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def check_ollama_model_available(model_name: str) -> bool:
|
|
|
|
|
"""Check if a specific Ollama model is available locally."""
|
|
|
|
|
try:
|
2026-03-05 18:56:52 -05:00
|
|
|
import json
|
2026-02-26 13:42:36 -05:00
|
|
|
import urllib.request
|
2026-02-26 23:39:13 -05:00
|
|
|
|
2026-03-19 19:18:22 -04:00
|
|
|
url = settings.normalized_ollama_url
|
2026-02-26 13:42:36 -05:00
|
|
|
req = urllib.request.Request(
|
|
|
|
|
f"{url}/api/tags",
|
|
|
|
|
method="GET",
|
|
|
|
|
headers={"Accept": "application/json"},
|
|
|
|
|
)
|
|
|
|
|
with urllib.request.urlopen(req, timeout=5) as response:
|
|
|
|
|
data = json.loads(response.read().decode())
|
2026-03-05 18:56:52 -05:00
|
|
|
models = [m.get("name", "") for m in data.get("models", [])]
|
|
|
|
|
return any(
|
|
|
|
|
model_name == m or model_name == m.split(":")[0] or m.startswith(model_name)
|
|
|
|
|
for m in models
|
|
|
|
|
)
|
2026-03-15 11:48:54 -04:00
|
|
|
except (OSError, ValueError) as exc:
|
2026-03-14 19:07:14 -04:00
|
|
|
_startup_logger.debug("Ollama model check failed: %s", exc)
|
2026-02-26 13:42:36 -05:00
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_effective_ollama_model() -> str:
|
2026-03-14 17:26:47 -04:00
|
|
|
"""Get the effective Ollama model, with fallback logic.
|
|
|
|
|
|
|
|
|
|
Walks the configurable ``settings.fallback_models`` chain when the
|
|
|
|
|
user's preferred model is not available locally.
|
|
|
|
|
"""
|
2026-02-26 13:42:36 -05:00
|
|
|
user_model = settings.ollama_model
|
2026-02-26 23:39:13 -05:00
|
|
|
|
2026-02-26 13:42:36 -05:00
|
|
|
if check_ollama_model_available(user_model):
|
|
|
|
|
return user_model
|
2026-02-26 23:39:13 -05:00
|
|
|
|
2026-03-14 17:26:47 -04:00
|
|
|
# Walk the configurable fallback chain
|
|
|
|
|
for fallback in settings.fallback_models:
|
|
|
|
|
if check_ollama_model_available(fallback):
|
|
|
|
|
_startup_logger.warning(
|
|
|
|
|
"Requested model '%s' not available. Using fallback: %s",
|
|
|
|
|
user_model,
|
|
|
|
|
fallback,
|
|
|
|
|
)
|
|
|
|
|
return fallback
|
2026-02-26 23:39:13 -05:00
|
|
|
|
2026-02-26 13:42:36 -05:00
|
|
|
# Last resort - return user's setting and hope for the best
|
|
|
|
|
return user_model
|
|
|
|
|
|
|
|
|
|
|
2026-02-25 18:19:22 +00:00
|
|
|
# ── Startup validation ───────────────────────────────────────────────────────
|
|
|
|
|
_startup_logger = _logging.getLogger("config")
|
2026-03-08 16:07:02 -04:00
|
|
|
_startup_validated = False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def validate_startup(*, force: bool = False) -> None:
|
|
|
|
|
"""Enforce security requirements — call from app entry points, not import.
|
|
|
|
|
|
|
|
|
|
Skipped in test mode (TIMMY_TEST_MODE=1) unless force=True.
|
|
|
|
|
In production: sys.exit(1) if required secrets are missing.
|
|
|
|
|
In development: log warnings only.
|
|
|
|
|
"""
|
|
|
|
|
global _startup_validated
|
|
|
|
|
if _startup_validated and not force:
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
if os.environ.get("TIMMY_TEST_MODE") == "1" and not force:
|
|
|
|
|
_startup_validated = True
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
if settings.timmy_env == "production":
|
|
|
|
|
_missing = []
|
|
|
|
|
if not settings.l402_hmac_secret:
|
|
|
|
|
_missing.append("L402_HMAC_SECRET")
|
|
|
|
|
if not settings.l402_macaroon_secret:
|
|
|
|
|
_missing.append("L402_MACAROON_SECRET")
|
|
|
|
|
if _missing:
|
|
|
|
|
_startup_logger.error(
|
|
|
|
|
"PRODUCTION SECURITY ERROR: The following secrets must be set: %s\n"
|
|
|
|
|
'Generate with: python3 -c "import secrets; print(secrets.token_hex(32))"\n'
|
|
|
|
|
"Set in .env file or environment variables.",
|
|
|
|
|
", ".join(_missing),
|
|
|
|
|
)
|
|
|
|
|
sys.exit(1)
|
2026-03-19 15:29:26 -04:00
|
|
|
if "*" in settings.cors_origins:
|
|
|
|
|
_startup_logger.error(
|
|
|
|
|
"PRODUCTION SECURITY ERROR: CORS wildcard '*' is not allowed "
|
|
|
|
|
"in production. Set CORS_ORIGINS to explicit origins."
|
|
|
|
|
)
|
|
|
|
|
sys.exit(1)
|
2026-03-08 16:07:02 -04:00
|
|
|
_startup_logger.info("Production mode: security secrets validated ✓")
|
|
|
|
|
else:
|
2026-03-19 15:29:26 -04:00
|
|
|
if "*" in settings.cors_origins:
|
|
|
|
|
_startup_logger.warning(
|
|
|
|
|
"SEC: CORS_ORIGINS contains wildcard '*' — "
|
|
|
|
|
"restrict to explicit origins before deploying to production."
|
|
|
|
|
)
|
2026-03-08 16:07:02 -04:00
|
|
|
if not settings.l402_hmac_secret:
|
|
|
|
|
_startup_logger.warning(
|
|
|
|
|
"SEC: L402_HMAC_SECRET is not set — "
|
|
|
|
|
"set a unique secret in .env before deploying to production."
|
|
|
|
|
)
|
|
|
|
|
if not settings.l402_macaroon_secret:
|
|
|
|
|
_startup_logger.warning(
|
|
|
|
|
"SEC: L402_MACAROON_SECRET is not set — "
|
|
|
|
|
"set a unique secret in .env before deploying to production."
|
|
|
|
|
)
|
2026-02-25 18:19:22 +00:00
|
|
|
|
2026-03-08 16:07:02 -04:00
|
|
|
_startup_validated = True
|