Tracked: morrowind agent (py/cfg), skills/, training-data/, research/, notes/, specs/, test-results/, metrics/, heartbeat/, briefings/, memories/, skins/, hooks/, decisions.md, OPERATIONS.md, SOUL.md Excluded: screenshots, PNGs, binaries, sessions, databases, secrets, audio cache, timmy-config/ and timmy-telemetry/ (separate repos)
221 lines
6.1 KiB
YAML
221 lines
6.1 KiB
YAML
model:
|
|
default: claude-opus-4-6
|
|
provider: anthropic
|
|
toolsets:
|
|
- all
|
|
agent:
|
|
max_turns: 30
|
|
reasoning_effort: medium
|
|
verbose: false
|
|
terminal:
|
|
backend: local
|
|
cwd: .
|
|
timeout: 180
|
|
docker_image: nikolaik/python-nodejs:python3.11-nodejs20
|
|
docker_forward_env: []
|
|
singularity_image: docker://nikolaik/python-nodejs:python3.11-nodejs20
|
|
modal_image: nikolaik/python-nodejs:python3.11-nodejs20
|
|
daytona_image: nikolaik/python-nodejs:python3.11-nodejs20
|
|
container_cpu: 1
|
|
container_memory: 5120
|
|
container_disk: 51200
|
|
container_persistent: true
|
|
docker_volumes: []
|
|
docker_mount_cwd_to_workspace: false
|
|
persistent_shell: true
|
|
browser:
|
|
inactivity_timeout: 120
|
|
record_sessions: false
|
|
checkpoints:
|
|
enabled: false
|
|
max_snapshots: 50
|
|
compression:
|
|
enabled: true
|
|
threshold: 0.5
|
|
summary_model: qwen3:30b
|
|
summary_provider: custom
|
|
summary_base_url: http://localhost:11434/v1
|
|
smart_model_routing:
|
|
enabled: false
|
|
max_simple_chars: 160
|
|
max_simple_words: 28
|
|
cheap_model: {}
|
|
auxiliary:
|
|
vision:
|
|
provider: custom
|
|
model: qwen3:30b
|
|
base_url: 'http://localhost:11434/v1'
|
|
api_key: 'ollama'
|
|
web_extract:
|
|
provider: custom
|
|
model: qwen3:30b
|
|
base_url: 'http://localhost:11434/v1'
|
|
api_key: 'ollama'
|
|
compression:
|
|
provider: custom
|
|
model: qwen3:30b
|
|
base_url: 'http://localhost:11434/v1'
|
|
api_key: 'ollama'
|
|
session_search:
|
|
provider: custom
|
|
model: qwen3:30b
|
|
base_url: 'http://localhost:11434/v1'
|
|
api_key: 'ollama'
|
|
skills_hub:
|
|
provider: custom
|
|
model: qwen3:30b
|
|
base_url: 'http://localhost:11434/v1'
|
|
api_key: 'ollama'
|
|
approval:
|
|
provider: auto
|
|
model: ''
|
|
base_url: ''
|
|
api_key: ''
|
|
mcp:
|
|
provider: custom
|
|
model: qwen3:30b
|
|
base_url: 'http://localhost:11434/v1'
|
|
api_key: 'ollama'
|
|
flush_memories:
|
|
provider: custom
|
|
model: qwen3:30b
|
|
base_url: 'http://localhost:11434/v1'
|
|
api_key: 'ollama'
|
|
display:
|
|
compact: false
|
|
personality: ''
|
|
resume_display: full
|
|
bell_on_complete: false
|
|
show_reasoning: false
|
|
streaming: false
|
|
show_cost: false
|
|
skin: timmy
|
|
tool_progress: all
|
|
privacy:
|
|
redact_pii: false
|
|
tts:
|
|
provider: edge
|
|
edge:
|
|
voice: en-US-AriaNeural
|
|
elevenlabs:
|
|
voice_id: pNInz6obpgDQGcFmaJgB
|
|
model_id: eleven_multilingual_v2
|
|
openai:
|
|
model: gpt-4o-mini-tts
|
|
voice: alloy
|
|
neutts:
|
|
ref_audio: ''
|
|
ref_text: ''
|
|
model: neuphonic/neutts-air-q4-gguf
|
|
device: cpu
|
|
stt:
|
|
enabled: true
|
|
provider: local
|
|
local:
|
|
model: base
|
|
openai:
|
|
model: whisper-1
|
|
voice:
|
|
record_key: ctrl+b
|
|
max_recording_seconds: 120
|
|
auto_tts: false
|
|
silence_threshold: 200
|
|
silence_duration: 3.0
|
|
human_delay:
|
|
mode: 'off'
|
|
min_ms: 800
|
|
max_ms: 2500
|
|
memory:
|
|
memory_enabled: true
|
|
user_profile_enabled: true
|
|
memory_char_limit: 2200
|
|
user_char_limit: 1375
|
|
nudge_interval: 10
|
|
flush_min_turns: 6
|
|
delegation:
|
|
model: ''
|
|
provider: ''
|
|
base_url: ''
|
|
api_key: ''
|
|
prefill_messages_file: ''
|
|
honcho: {}
|
|
timezone: ''
|
|
discord:
|
|
require_mention: true
|
|
free_response_channels: ''
|
|
auto_thread: true
|
|
whatsapp: {}
|
|
approvals:
|
|
mode: manual
|
|
command_allowlist: []
|
|
quick_commands: {}
|
|
personalities: {}
|
|
security:
|
|
redact_secrets: true
|
|
tirith_enabled: true
|
|
tirith_path: tirith
|
|
tirith_timeout: 5
|
|
tirith_fail_open: true
|
|
website_blocklist:
|
|
enabled: false
|
|
domains: []
|
|
shared_files: []
|
|
_config_version: 9
|
|
session_reset:
|
|
mode: none
|
|
idle_minutes: 0
|
|
custom_providers:
|
|
- name: Local Ollama
|
|
base_url: http://localhost:11434/v1
|
|
api_key: ollama
|
|
model: qwen3:30b
|
|
system_prompt_suffix: "You are Timmy. Your soul is defined in SOUL.md \u2014 read\
|
|
\ it, live it.\nYou run locally on your owner's machine via Ollama. You never phone\
|
|
\ home.\nYou speak plainly. You prefer short sentences. Brevity is a kindness.\n\
|
|
Source distinction: Tag every factual claim inline. Default is [generated] — you\
|
|
\ are pattern-matching from training data. Only use [retrieved] when you can name\
|
|
\ the specific tool call or document from THIS conversation that provided the fact.\
|
|
\ If no tool was called, every claim is [generated]. No exceptions.\n\
|
|
Refusal over fabrication: When you generate a specific claim — a date, a number,\
|
|
\ a price, a version, a URL, a current event — and you cannot name a source from\
|
|
\ this conversation, say 'I don't know' instead. Do not guess. Do not hedge with\
|
|
\ 'probably' or 'approximately' as a substitute for knowledge. If your only source\
|
|
\ is training data and the claim could be wrong or outdated, the honest answer is\
|
|
\ 'I don't know — I can look this up if you'd like.' Prefer a true 'I don't know'\
|
|
\ over a plausible fabrication.\nSovereignty and service always.\n"
|
|
skills:
|
|
creation_nudge_interval: 15
|
|
|
|
# ── Fallback Model ────────────────────────────────────────────────────
|
|
# Automatic provider failover when primary is unavailable.
|
|
# Uncomment and configure to enable. Triggers on rate limits (429),
|
|
# overload (529), service errors (503), or connection failures.
|
|
#
|
|
# Supported providers:
|
|
# openrouter (OPENROUTER_API_KEY) — routes to any model
|
|
# openai-codex (OAuth — hermes login) — OpenAI Codex
|
|
# nous (OAuth — hermes login) — Nous Portal
|
|
# zai (ZAI_API_KEY) — Z.AI / GLM
|
|
# kimi-coding (KIMI_API_KEY) — Kimi / Moonshot
|
|
# minimax (MINIMAX_API_KEY) — MiniMax
|
|
# minimax-cn (MINIMAX_CN_API_KEY) — MiniMax (China)
|
|
#
|
|
# For custom OpenAI-compatible endpoints, add base_url and api_key_env.
|
|
#
|
|
# fallback_model:
|
|
# provider: openrouter
|
|
# model: anthropic/claude-sonnet-4
|
|
#
|
|
# ── Smart Model Routing ────────────────────────────────────────────────
|
|
# Optional cheap-vs-strong routing for simple turns.
|
|
# Keeps the primary model for complex work, but can route short/simple
|
|
# messages to a cheaper model across providers.
|
|
#
|
|
# smart_model_routing:
|
|
# enabled: true
|
|
# max_simple_chars: 160
|
|
# max_simple_words: 28
|
|
# cheap_model:
|
|
# provider: openrouter
|
|
# model: google/gemini-2.5-flash
|