2026-02-21 16:53:16 +00:00
|
|
|
from typing import Literal
|
|
|
|
|
|
2026-02-19 19:31:48 +00:00
|
|
|
from pydantic_settings import BaseSettings, SettingsConfigDict
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class Settings(BaseSettings):
|
|
|
|
|
# Ollama host — override with OLLAMA_URL env var or .env file
|
|
|
|
|
ollama_url: str = "http://localhost:11434"
|
|
|
|
|
|
|
|
|
|
# LLM model passed to Agno/Ollama — override with OLLAMA_MODEL
|
2026-02-26 13:42:36 -05:00
|
|
|
# llama3.1:8b-instruct is used instead of llama3.2 because it is
|
|
|
|
|
# specifically fine-tuned for reliable tool/function calling.
|
|
|
|
|
# llama3.2 (3B) hallucinated tool output consistently in testing.
|
|
|
|
|
# Fallback: qwen2.5:14b if llama3.1:8b-instruct not available.
|
|
|
|
|
ollama_model: str = "llama3.1:8b-instruct"
|
2026-02-19 19:31:48 +00:00
|
|
|
|
|
|
|
|
# Set DEBUG=true to enable /docs and /redoc (disabled by default)
|
|
|
|
|
debug: bool = False
|
|
|
|
|
|
2026-02-22 17:16:12 +00:00
|
|
|
# Telegram bot token — set via TELEGRAM_TOKEN env var or the /telegram/setup endpoint
|
|
|
|
|
telegram_token: str = ""
|
|
|
|
|
|
2026-02-25 01:11:14 +00:00
|
|
|
# Discord bot token — set via DISCORD_TOKEN env var or the /discord/setup endpoint
|
|
|
|
|
discord_token: str = ""
|
|
|
|
|
|
2026-02-21 16:53:16 +00:00
|
|
|
# ── AirLLM / backend selection ───────────────────────────────────────────
|
|
|
|
|
# "ollama" — always use Ollama (default, safe everywhere)
|
|
|
|
|
# "airllm" — always use AirLLM (requires pip install ".[bigbrain]")
|
|
|
|
|
# "auto" — use AirLLM on Apple Silicon if airllm is installed,
|
|
|
|
|
# fall back to Ollama otherwise
|
feat: add Grok (xAI) as opt-in premium backend with monetization
- Add GrokBackend class in src/timmy/backends.py with full sync/async
support, health checks, usage stats, and cost estimation in sats
- Add consult_grok tool to Timmy's toolkit for proactive Grok queries
- Extend cascade router with Grok provider type for failover chain
- Add Grok Mode toggle card to Mission Control dashboard (HTMX live)
- Add "Ask Grok" button on chat input for direct Grok queries
- Add /grok/* routes: status, toggle, chat, stats endpoints
- Integrate Lightning invoice generation for Grok usage monetization
- Add GROK_ENABLED, XAI_API_KEY, GROK_DEFAULT_MODEL, GROK_MAX_SATS_PER_QUERY,
GROK_FREE config settings via pydantic-settings
- Update .env.example and docker-compose.yml with Grok env vars
- Add 21 tests covering backend, tools, and route endpoints (all green)
Local-first ethos preserved: Grok is premium augmentation only,
disabled by default, and Lightning-payable when enabled.
https://claude.ai/code/session_01FygwN8wS8J6WGZ8FPb7XGV
2026-02-27 01:12:51 +00:00
|
|
|
timmy_model_backend: Literal["ollama", "airllm", "grok", "auto"] = "ollama"
|
2026-02-21 16:53:16 +00:00
|
|
|
|
|
|
|
|
# AirLLM model size when backend is airllm or auto.
|
|
|
|
|
# Larger = smarter, but needs more RAM / disk.
|
|
|
|
|
# 8b ~16 GB | 70b ~140 GB | 405b ~810 GB
|
|
|
|
|
airllm_model_size: Literal["8b", "70b", "405b"] = "70b"
|
|
|
|
|
|
feat: add Grok (xAI) as opt-in premium backend with monetization
- Add GrokBackend class in src/timmy/backends.py with full sync/async
support, health checks, usage stats, and cost estimation in sats
- Add consult_grok tool to Timmy's toolkit for proactive Grok queries
- Extend cascade router with Grok provider type for failover chain
- Add Grok Mode toggle card to Mission Control dashboard (HTMX live)
- Add "Ask Grok" button on chat input for direct Grok queries
- Add /grok/* routes: status, toggle, chat, stats endpoints
- Integrate Lightning invoice generation for Grok usage monetization
- Add GROK_ENABLED, XAI_API_KEY, GROK_DEFAULT_MODEL, GROK_MAX_SATS_PER_QUERY,
GROK_FREE config settings via pydantic-settings
- Update .env.example and docker-compose.yml with Grok env vars
- Add 21 tests covering backend, tools, and route endpoints (all green)
Local-first ethos preserved: Grok is premium augmentation only,
disabled by default, and Lightning-payable when enabled.
https://claude.ai/code/session_01FygwN8wS8J6WGZ8FPb7XGV
2026-02-27 01:12:51 +00:00
|
|
|
# ── Grok (xAI) — opt-in premium cloud backend ────────────────────────
|
|
|
|
|
# Grok is a premium augmentation layer — local-first ethos preserved.
|
|
|
|
|
# Only used when explicitly enabled and query complexity warrants it.
|
|
|
|
|
grok_enabled: bool = False
|
|
|
|
|
xai_api_key: str = ""
|
|
|
|
|
grok_default_model: str = "grok-3-fast"
|
|
|
|
|
grok_max_sats_per_query: int = 200
|
|
|
|
|
grok_free: bool = False # Skip Lightning invoice when user has own API key
|
|
|
|
|
|
feat: integrate Spark Intelligence into Timmy swarm system
Adds a self-evolving cognitive layer inspired by vibeship-spark-intelligence,
adapted for Timmy's agent architecture. Spark captures swarm events, runs
EIDOS prediction-evaluation loops, consolidates memories, and generates
advisory recommendations — all backed by SQLite consistent with existing
patterns.
New modules:
- spark/memory.py — event capture with importance scoring + memory consolidation
- spark/eidos.py — EIDOS cognitive loop (predict → observe → evaluate → learn)
- spark/advisor.py — ranked advisory generation from accumulated intelligence
- spark/engine.py — top-level API wiring all subsystems together
Dashboard:
- /spark/ui — full Spark Intelligence dashboard (3-column: status/advisories,
predictions/memories, event timeline) with HTMX auto-refresh
- /spark — JSON API for programmatic access
- SPARK link added to navigation header
Integration:
- Coordinator hooks emit Spark events on task post, bid, assign, complete, fail
- EIDOS predictions generated when tasks are posted, evaluated on completion
- Memory consolidation triggers when agents accumulate enough outcomes
- SPARK_ENABLED config toggle (default: true)
Tests: 47 new tests covering all Spark subsystems + dashboard routes.
Full suite: 538 tests passing.
https://claude.ai/code/session_01KJm6jQkNi3aA3yoQJn636c
2026-02-24 15:51:15 +00:00
|
|
|
# ── Spark Intelligence ────────────────────────────────────────────────
|
|
|
|
|
# Enable/disable the Spark cognitive layer.
|
|
|
|
|
# When enabled, Spark captures swarm events, runs EIDOS predictions,
|
|
|
|
|
# consolidates memories, and generates advisory recommendations.
|
|
|
|
|
spark_enabled: bool = True
|
|
|
|
|
|
feat: add full creative studio + DevOps tools (Pixel, Lyra, Reel personas)
Adds 3 new personas (Pixel, Lyra, Reel) and 5 new tool modules:
- Git/DevOps tools (GitPython): clone, status, diff, log, blame, branch,
add, commit, push, pull, stash — wired to Forge and Helm personas
- Image generation (FLUX via diffusers): text-to-image, storyboards,
variations — Pixel persona
- Music generation (ACE-Step 1.5): full songs with vocals+instrumentals,
instrumental tracks, vocal-only tracks — Lyra persona
- Video generation (Wan 2.1 via diffusers): text-to-video, image-to-video
clips — Reel persona
- Creative Director pipeline: multi-step orchestration that chains
storyboard → music → video → assembly into 3+ minute final videos
- Video assembler (MoviePy + FFmpeg): stitch clips, overlay audio,
title cards, subtitles, final export
Also includes:
- Spark Intelligence tool-level + creative pipeline event capture
- Creative Studio dashboard page (/creative/ui) with 4 tabs
- Config settings for all new models and output directories
- pyproject.toml creative optional extra for GPU dependencies
- 107 new tests covering all modules (624 total, all passing)
https://claude.ai/code/session_01KJm6jQkNi3aA3yoQJn636c
2026-02-24 16:31:47 +00:00
|
|
|
# ── Git / DevOps ──────────────────────────────────────────────────────
|
|
|
|
|
git_default_repo_dir: str = "~/repos"
|
|
|
|
|
|
2026-02-26 23:39:13 -05:00
|
|
|
# Repository root - auto-detected but can be overridden
|
|
|
|
|
# This is the main project directory where .git lives
|
|
|
|
|
repo_root: str = ""
|
|
|
|
|
|
feat: add full creative studio + DevOps tools (Pixel, Lyra, Reel personas)
Adds 3 new personas (Pixel, Lyra, Reel) and 5 new tool modules:
- Git/DevOps tools (GitPython): clone, status, diff, log, blame, branch,
add, commit, push, pull, stash — wired to Forge and Helm personas
- Image generation (FLUX via diffusers): text-to-image, storyboards,
variations — Pixel persona
- Music generation (ACE-Step 1.5): full songs with vocals+instrumentals,
instrumental tracks, vocal-only tracks — Lyra persona
- Video generation (Wan 2.1 via diffusers): text-to-video, image-to-video
clips — Reel persona
- Creative Director pipeline: multi-step orchestration that chains
storyboard → music → video → assembly into 3+ minute final videos
- Video assembler (MoviePy + FFmpeg): stitch clips, overlay audio,
title cards, subtitles, final export
Also includes:
- Spark Intelligence tool-level + creative pipeline event capture
- Creative Studio dashboard page (/creative/ui) with 4 tabs
- Config settings for all new models and output directories
- pyproject.toml creative optional extra for GPU dependencies
- 107 new tests covering all modules (624 total, all passing)
https://claude.ai/code/session_01KJm6jQkNi3aA3yoQJn636c
2026-02-24 16:31:47 +00:00
|
|
|
# ── Creative — Image Generation (Pixel) ───────────────────────────────
|
|
|
|
|
flux_model_id: str = "black-forest-labs/FLUX.1-schnell"
|
|
|
|
|
image_output_dir: str = "data/images"
|
|
|
|
|
image_default_steps: int = 4
|
|
|
|
|
|
|
|
|
|
# ── Creative — Music Generation (Lyra) ────────────────────────────────
|
|
|
|
|
music_output_dir: str = "data/music"
|
|
|
|
|
ace_step_model: str = "ace-step/ACE-Step-v1.5"
|
|
|
|
|
|
|
|
|
|
# ── Creative — Video Generation (Reel) ────────────────────────────────
|
|
|
|
|
video_output_dir: str = "data/video"
|
|
|
|
|
wan_model_id: str = "Wan-AI/Wan2.1-T2V-1.3B"
|
|
|
|
|
video_default_resolution: str = "480p"
|
|
|
|
|
|
|
|
|
|
# ── Creative — Pipeline / Assembly ────────────────────────────────────
|
|
|
|
|
creative_output_dir: str = "data/creative"
|
|
|
|
|
video_transition_duration: float = 1.0
|
|
|
|
|
default_video_codec: str = "libx264"
|
|
|
|
|
|
2026-02-25 18:19:22 +00:00
|
|
|
# ── L402 Lightning ───────────────────────────────────────────────────
|
|
|
|
|
# HMAC secrets for macaroon signing and invoice verification.
|
|
|
|
|
# Generate with: python3 -c "import secrets; print(secrets.token_hex(32))"
|
2026-02-25 15:32:19 -05:00
|
|
|
# In production (TIMMY_ENV=production), these MUST be set or the app will refuse to start.
|
|
|
|
|
l402_hmac_secret: str = ""
|
|
|
|
|
l402_macaroon_secret: str = ""
|
2026-02-25 18:19:22 +00:00
|
|
|
lightning_backend: Literal["mock", "lnd"] = "mock"
|
|
|
|
|
|
2026-02-25 15:32:19 -05:00
|
|
|
# ── Privacy / Sovereignty ────────────────────────────────────────────
|
|
|
|
|
# Disable Agno telemetry for air-gapped/sovereign deployments.
|
|
|
|
|
# Default is False (telemetry disabled) to align with sovereign AI vision.
|
|
|
|
|
telemetry_enabled: bool = False
|
|
|
|
|
|
feat: replace GitHub page with embedded Timmy chat interface
Replaces the marketing landing page with a minimal, full-screen chat
interface that connects to a running Timmy instance. Mobile-first design
with single vertical scroll direction, looping scroll, no zoom, no
buttons — just type and press Enter to talk to Timmy.
- docs/index.html: full rewrite as a clean chat UI with dark terminal
theme, looping infinite scroll, markdown rendering, connection status,
and /connect, /clear, /help slash commands
- src/dashboard/app.py: add CORS middleware so the GitHub Pages site can
reach a local Timmy server cross-origin
- src/config.py: add cors_origins setting (defaults to ["*"])
https://claude.ai/code/session_01AWLxg6KDWsfCATiuvsRMGr
2026-02-27 00:35:33 +00:00
|
|
|
# CORS allowed origins for the web chat interface (GitHub Pages, etc.)
|
|
|
|
|
# Set CORS_ORIGINS as a comma-separated list, e.g. "http://localhost:3000,https://example.com"
|
|
|
|
|
cors_origins: list[str] = ["*"]
|
|
|
|
|
|
2026-02-25 15:32:19 -05:00
|
|
|
# Environment mode: development | production
|
|
|
|
|
# In production, security settings are strictly enforced.
|
|
|
|
|
timmy_env: Literal["development", "production"] = "development"
|
|
|
|
|
|
2026-02-25 17:18:58 -05:00
|
|
|
# ── Self-Modification ──────────────────────────────────────────────
|
|
|
|
|
# Enable self-modification capabilities. When enabled, Timmy can
|
|
|
|
|
# edit its own source code, run tests, and commit changes.
|
|
|
|
|
self_modify_enabled: bool = False
|
|
|
|
|
self_modify_max_retries: int = 2
|
|
|
|
|
self_modify_allowed_dirs: str = "src,tests"
|
|
|
|
|
self_modify_backend: str = "auto" # "ollama", "anthropic", or "auto"
|
|
|
|
|
|
2026-02-26 10:27:08 -05:00
|
|
|
# ── Work Orders ──────────────────────────────────────────────────
|
|
|
|
|
# External users and agents can submit work orders for improvements.
|
|
|
|
|
work_orders_enabled: bool = True
|
|
|
|
|
work_orders_auto_execute: bool = False # Master switch for auto-execution
|
2026-02-26 23:39:13 -05:00
|
|
|
work_orders_auto_threshold: str = (
|
|
|
|
|
"low" # Max priority that auto-executes: "low" | "medium" | "high" | "none"
|
|
|
|
|
)
|
2026-02-26 10:27:08 -05:00
|
|
|
|
feat: add custom weights, model registry, per-agent models, and reward scoring
Inspired by OpenClaw-RL's multi-model orchestration, this adds four
features for custom model management:
1. Custom model registry (infrastructure/models/registry.py) — SQLite-backed
registry for GGUF, safetensors, HF checkpoint, and Ollama models with
role-based lookups (general, reward, teacher, judge).
2. Per-agent model assignment — each swarm persona can use a different model
instead of sharing the global default. Resolved via registry assignment >
persona default > global default.
3. Runtime model management API (/api/v1/models) — REST endpoints to register,
list, assign, enable/disable, and remove custom models without restart.
Includes a dashboard page at /models.
4. Reward model scoring (PRM-style) — majority-vote quality evaluation of
agent outputs using a configurable reward model. Scores persist in SQLite
and feed into the swarm learner.
New config settings: custom_weights_dir, reward_model_enabled,
reward_model_name, reward_model_votes.
54 new tests covering registry CRUD, API endpoints, agent assignments,
role lookups, and reward scoring.
https://claude.ai/code/session_01V4iTozMwcE2gjfnCJdCugC
2026-02-27 01:08:03 +00:00
|
|
|
# ── Custom Weights & Models ──────────────────────────────────────
|
|
|
|
|
# Directory for custom model weights (GGUF, safetensors, HF checkpoints).
|
|
|
|
|
# Models placed here can be registered at runtime and assigned to agents.
|
|
|
|
|
custom_weights_dir: str = "data/models"
|
|
|
|
|
# Enable the reward model for scoring agent outputs (PRM-style).
|
|
|
|
|
reward_model_enabled: bool = False
|
|
|
|
|
# Reward model name (must be available via Ollama or a custom weight path).
|
|
|
|
|
reward_model_name: str = ""
|
|
|
|
|
# Minimum votes for majority-vote reward scoring (odd number recommended).
|
|
|
|
|
reward_model_votes: int = 3
|
|
|
|
|
|
feat: add in-browser local model support for iPhone via WebLLM
Enable Timmy to run directly on iPhone by loading a small LLM into
the browser via WebGPU (Safari 26+ / iOS 26+). No server connection
required — fully sovereign, fully offline.
New files:
- static/local_llm.js: WebLLM wrapper with model catalogue, WebGPU
detection, streaming chat, and progress callbacks
- templates/mobile_local.html: Mobile-optimized UI with model
selector, download progress, LOCAL/SERVER badge, and chat
- tests/dashboard/test_local_models.py: 31 tests covering routes,
config, template UX, JS asset, and XSS prevention
Changes:
- config.py: browser_model_enabled, browser_model_id,
browser_model_fallback settings
- routes/mobile.py: /mobile/local page, /mobile/local-models API
- base.html: LOCAL AI nav link
Supported models: SmolLM2-360M (~200MB), Qwen2.5-0.5B (~350MB),
SmolLM2-1.7B (~1GB), Llama-3.2-1B (~700MB). Falls back to
server-side Ollama when local model is unavailable.
https://claude.ai/code/session_01Cqkvr4sZbED7T3iDu1rwSD
2026-02-27 00:03:05 +00:00
|
|
|
# ── Browser Local Models (iPhone / WebGPU) ───────────────────────
|
|
|
|
|
# Enable in-browser LLM inference via WebLLM for offline iPhone use.
|
|
|
|
|
# When enabled, the mobile dashboard loads a small model directly
|
|
|
|
|
# in the browser — no server or Ollama required.
|
|
|
|
|
browser_model_enabled: bool = True
|
|
|
|
|
# WebLLM model ID — must be a pre-compiled MLC model.
|
|
|
|
|
# Recommended for iPhone: SmolLM2-360M (fast) or Qwen3-0.6B (smart).
|
|
|
|
|
browser_model_id: str = "SmolLM2-360M-Instruct-q4f16_1-MLC"
|
|
|
|
|
# Fallback to server when browser model is unavailable or too slow.
|
|
|
|
|
browser_model_fallback: bool = True
|
|
|
|
|
|
2026-02-27 01:00:11 -05:00
|
|
|
# ── Default Thinking ──────────────────────────────────────────────
|
|
|
|
|
# When enabled, Timmy starts an internal thought loop on server start.
|
|
|
|
|
# He ponders his existence, recent activity, scripture, and creative ideas.
|
|
|
|
|
thinking_enabled: bool = True
|
|
|
|
|
thinking_interval_seconds: int = 300 # 5 minutes between thoughts
|
|
|
|
|
|
2026-02-26 17:06:00 +00:00
|
|
|
# ── Scripture / Biblical Integration ──────────────────────────────
|
|
|
|
|
# Enable the sovereign biblical text module. When enabled, Timmy
|
|
|
|
|
# loads the local ESV text corpus and runs meditation workflows.
|
|
|
|
|
scripture_enabled: bool = True
|
|
|
|
|
# Primary translation for retrieval and citation.
|
|
|
|
|
scripture_translation: str = "ESV"
|
|
|
|
|
# Meditation mode: sequential | thematic | lectionary
|
|
|
|
|
scripture_meditation_mode: str = "sequential"
|
|
|
|
|
# Background meditation interval in seconds (0 = disabled).
|
|
|
|
|
scripture_meditation_interval: int = 0
|
|
|
|
|
|
2026-02-26 23:39:13 -05:00
|
|
|
def _compute_repo_root(self) -> str:
|
|
|
|
|
"""Auto-detect repo root if not set."""
|
|
|
|
|
if self.repo_root:
|
|
|
|
|
return self.repo_root
|
|
|
|
|
# Walk up from this file to find .git
|
|
|
|
|
import os
|
|
|
|
|
|
|
|
|
|
path = os.path.dirname(os.path.abspath(__file__))
|
|
|
|
|
path = os.path.dirname(os.path.dirname(path)) # src/ -> project root
|
|
|
|
|
while path != os.path.dirname(path):
|
|
|
|
|
if os.path.exists(os.path.join(path, ".git")):
|
|
|
|
|
return path
|
|
|
|
|
path = os.path.dirname(path)
|
|
|
|
|
return os.getcwd()
|
|
|
|
|
|
2026-02-19 19:31:48 +00:00
|
|
|
model_config = SettingsConfigDict(
|
|
|
|
|
env_file=".env",
|
|
|
|
|
env_file_encoding="utf-8",
|
|
|
|
|
extra="ignore",
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
settings = Settings()
|
2026-02-26 23:39:13 -05:00
|
|
|
# Ensure repo_root is computed if not set
|
|
|
|
|
if not settings.repo_root:
|
|
|
|
|
settings.repo_root = settings._compute_repo_root()
|
2026-02-25 18:19:22 +00:00
|
|
|
|
2026-02-26 13:42:36 -05:00
|
|
|
# ── Model fallback configuration ────────────────────────────────────────────
|
|
|
|
|
# Primary model for reliable tool calling (llama3.1:8b-instruct)
|
|
|
|
|
# Fallback if primary not available: qwen2.5:14b
|
|
|
|
|
OLLAMA_MODEL_PRIMARY: str = "llama3.1:8b-instruct"
|
|
|
|
|
OLLAMA_MODEL_FALLBACK: str = "qwen2.5:14b"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def check_ollama_model_available(model_name: str) -> bool:
|
|
|
|
|
"""Check if a specific Ollama model is available locally."""
|
|
|
|
|
try:
|
|
|
|
|
import urllib.request
|
2026-02-26 23:39:13 -05:00
|
|
|
|
2026-02-26 13:42:36 -05:00
|
|
|
url = settings.ollama_url.replace("localhost", "127.0.0.1")
|
|
|
|
|
req = urllib.request.Request(
|
|
|
|
|
f"{url}/api/tags",
|
|
|
|
|
method="GET",
|
|
|
|
|
headers={"Accept": "application/json"},
|
|
|
|
|
)
|
|
|
|
|
with urllib.request.urlopen(req, timeout=5) as response:
|
|
|
|
|
import json
|
2026-02-26 23:39:13 -05:00
|
|
|
|
2026-02-26 13:42:36 -05:00
|
|
|
data = json.loads(response.read().decode())
|
|
|
|
|
models = [m.get("name", "").split(":")[0] for m in data.get("models", [])]
|
|
|
|
|
# Check for exact match or model name without tag
|
|
|
|
|
return any(model_name in m or m in model_name for m in models)
|
|
|
|
|
except Exception:
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_effective_ollama_model() -> str:
|
|
|
|
|
"""Get the effective Ollama model, with fallback logic."""
|
|
|
|
|
# If user has overridden, use their setting
|
|
|
|
|
user_model = settings.ollama_model
|
2026-02-26 23:39:13 -05:00
|
|
|
|
2026-02-26 13:42:36 -05:00
|
|
|
# Check if user's model is available
|
|
|
|
|
if check_ollama_model_available(user_model):
|
|
|
|
|
return user_model
|
2026-02-26 23:39:13 -05:00
|
|
|
|
2026-02-26 13:42:36 -05:00
|
|
|
# Try primary
|
|
|
|
|
if check_ollama_model_available(OLLAMA_MODEL_PRIMARY):
|
|
|
|
|
_startup_logger.warning(
|
|
|
|
|
f"Requested model '{user_model}' not available. "
|
|
|
|
|
f"Using primary: {OLLAMA_MODEL_PRIMARY}"
|
|
|
|
|
)
|
|
|
|
|
return OLLAMA_MODEL_PRIMARY
|
2026-02-26 23:39:13 -05:00
|
|
|
|
2026-02-26 13:42:36 -05:00
|
|
|
# Try fallback
|
|
|
|
|
if check_ollama_model_available(OLLAMA_MODEL_FALLBACK):
|
|
|
|
|
_startup_logger.warning(
|
|
|
|
|
f"Primary model '{OLLAMA_MODEL_PRIMARY}' not available. "
|
|
|
|
|
f"Using fallback: {OLLAMA_MODEL_FALLBACK}"
|
|
|
|
|
)
|
|
|
|
|
return OLLAMA_MODEL_FALLBACK
|
2026-02-26 23:39:13 -05:00
|
|
|
|
2026-02-26 13:42:36 -05:00
|
|
|
# Last resort - return user's setting and hope for the best
|
|
|
|
|
return user_model
|
|
|
|
|
|
|
|
|
|
|
2026-02-25 18:19:22 +00:00
|
|
|
# ── Startup validation ───────────────────────────────────────────────────────
|
2026-02-25 15:32:19 -05:00
|
|
|
# Enforce security requirements — fail fast in production.
|
2026-02-25 18:19:22 +00:00
|
|
|
import logging as _logging
|
2026-02-25 15:32:19 -05:00
|
|
|
import sys
|
2026-02-25 18:19:22 +00:00
|
|
|
|
|
|
|
|
_startup_logger = _logging.getLogger("config")
|
|
|
|
|
|
2026-02-25 15:32:19 -05:00
|
|
|
# Production mode: require secrets to be set
|
|
|
|
|
if settings.timmy_env == "production":
|
|
|
|
|
_missing = []
|
|
|
|
|
if not settings.l402_hmac_secret:
|
|
|
|
|
_missing.append("L402_HMAC_SECRET")
|
|
|
|
|
if not settings.l402_macaroon_secret:
|
|
|
|
|
_missing.append("L402_MACAROON_SECRET")
|
|
|
|
|
if _missing:
|
|
|
|
|
_startup_logger.error(
|
|
|
|
|
"PRODUCTION SECURITY ERROR: The following secrets must be set: %s\n"
|
2026-02-26 23:39:13 -05:00
|
|
|
'Generate with: python3 -c "import secrets; print(secrets.token_hex(32))"\n'
|
2026-02-25 15:32:19 -05:00
|
|
|
"Set in .env file or environment variables.",
|
|
|
|
|
", ".join(_missing),
|
|
|
|
|
)
|
|
|
|
|
sys.exit(1)
|
|
|
|
|
_startup_logger.info("Production mode: security secrets validated ✓")
|
|
|
|
|
else:
|
|
|
|
|
# Development mode: warn but continue
|
|
|
|
|
if not settings.l402_hmac_secret:
|
|
|
|
|
_startup_logger.warning(
|
|
|
|
|
"SEC: L402_HMAC_SECRET is not set — "
|
|
|
|
|
"set a unique secret in .env before deploying to production."
|
|
|
|
|
)
|
|
|
|
|
if not settings.l402_macaroon_secret:
|
|
|
|
|
_startup_logger.warning(
|
|
|
|
|
"SEC: L402_MACAROON_SECRET is not set — "
|
|
|
|
|
"set a unique secret in .env before deploying to production."
|
|
|
|
|
)
|