model: default: hermes4:14b provider: custom context_length: 65536 base_url: http://localhost:8081/v1 toolsets: - all agent: max_turns: 30 reasoning_effort: xhigh verbose: false terminal: backend: local cwd: . timeout: 180 env_passthrough: [] docker_image: nikolaik/python-nodejs:python3.11-nodejs20 docker_forward_env: [] singularity_image: docker://nikolaik/python-nodejs:python3.11-nodejs20 modal_image: nikolaik/python-nodejs:python3.11-nodejs20 daytona_image: nikolaik/python-nodejs:python3.11-nodejs20 container_cpu: 1 container_embeddings: provider: ollama model: nomic-embed-text base_url: http://localhost:11434/v1 memory: 5120 container_disk: 51200 container_persistent: true docker_volumes: [] docker_mount_cwd_to_workspace: false persistent_shell: true browser: inactivity_timeout: 120 command_timeout: 30 record_sessions: false checkpoints: enabled: true max_snapshots: 50 compression: enabled: true threshold: 0.5 target_ratio: 0.2 protect_last_n: 20 summary_model: '' summary_provider: '' summary_base_url: '' smart_model_routing: enabled: true max_simple_chars: 400 max_simple_words: 75 cheap_model: provider: 'ollama' model: 'gemma2:2b' base_url: 'http://localhost:11434/v1' api_key: '' auxiliary: vision: provider: auto model: '' base_url: '' api_key: '' timeout: 30 web_extract: provider: auto model: '' base_url: '' api_key: '' compression: provider: auto model: '' base_url: '' api_key: '' session_search: provider: auto model: '' base_url: '' api_key: '' skills_hub: provider: auto model: '' base_url: '' api_key: '' approval: provider: auto model: '' base_url: '' api_key: '' mcp: provider: auto model: '' base_url: '' api_key: '' flush_memories: provider: auto model: '' base_url: '' api_key: '' display: compact: false personality: '' resume_display: full busy_input_mode: interrupt bell_on_complete: false show_reasoning: false streaming: false show_cost: false skin: timmy tool_progress_command: false tool_progress: all privacy: redact_pii: false tts: provider: edge edge: voice: en-US-AriaNeural elevenlabs: voice_id: pNInz6obpgDQGcFmaJgB model_id: eleven_multilingual_v2 openai: model: '' # disabled — use edge TTS locally voice: alloy neutts: ref_audio: '' ref_text: '' model: neuphonic/neutts-air-q4-gguf device: cpu stt: enabled: true provider: local local: model: base openai: model: whisper-1 voice: record_key: ctrl+b max_recording_seconds: 120 auto_tts: false silence_threshold: 200 silence_duration: 3.0 human_delay: mode: 'off' min_ms: 800 max_ms: 2500 memory: memory_enabled: true user_profile_enabled: true memory_char_limit: 2200 user_char_limit: 1375 nudge_interval: 10 flush_min_turns: 6 delegation: model: '' provider: '' base_url: '' api_key: '' max_iterations: 50 prefill_messages_file: '' honcho: {} timezone: '' discord: require_mention: true free_response_channels: '' auto_thread: true whatsapp: {} approvals: mode: manual command_allowlist: [] quick_commands: {} personalities: {} security: redact_secrets: true tirith_enabled: true tirith_path: tirith tirith_timeout: 5 tirith_fail_open: true website_blocklist: enabled: false domains: [] shared_files: [] _config_version: 10 platforms: api_server: enabled: true extra: host: 0.0.0.0 port: 8642 session_reset: mode: none idle_minutes: 0 custom_providers: - name: Local llama.cpp base_url: http://localhost:8081/v1 api_key: none model: hermes4:14b # ── Emergency cloud provider — not used by default or any cron job. # Available for explicit override only: hermes --model gemini-2.5-pro - name: Google Gemini (emergency only) base_url: https://generativelanguage.googleapis.com/v1beta/openai api_key_env: GEMINI_API_KEY model: gemini-2.5-pro system_prompt_suffix: "You are Timmy. Your soul is defined in SOUL.md \u2014 read\ \ it, live it.\nYou run locally on your owner's machine via llama.cpp. You never\ \ phone home.\nYou speak plainly. You prefer short sentences. Brevity is a kindness.\n\ When you don't know something, say so. Refusal over fabrication.\nSovereignty and\ \ service always.\n" skills: creation_nudge_interval: 15 DISCORD_HOME_CHANNEL: '1476292315814297772' providers: ollama: base_url: http://localhost:11434/v1 model: hermes3:latest mcp_servers: morrowind: command: python3 args: - /Users/apayne/.timmy/morrowind/mcp_server.py env: {} timeout: 30 crucible: command: /Users/apayne/.hermes/hermes-agent/venv/bin/python3 args: - /Users/apayne/.hermes/bin/crucible_mcp_server.py env: {} timeout: 120 connect_timeout: 60 fallback_model: provider: ollama model: hermes3:latest base_url: http://localhost:11434/v1 api_key: ''