2026-02-19 19:31:48 +00:00
|
|
|
# Timmy Time — Mission Control
|
|
|
|
|
# Copy this file to .env and uncomment lines you want to override.
|
|
|
|
|
# .env is gitignored and never committed.
|
|
|
|
|
|
|
|
|
|
# Ollama host (default: http://localhost:11434)
|
|
|
|
|
# Override if Ollama is running on another machine or port.
|
|
|
|
|
# OLLAMA_URL=http://localhost:11434
|
|
|
|
|
|
|
|
|
|
# LLM model to use via Ollama (default: llama3.2)
|
|
|
|
|
# OLLAMA_MODEL=llama3.2
|
|
|
|
|
|
|
|
|
|
# Enable FastAPI interactive docs at /docs and /redoc (default: false)
|
|
|
|
|
# DEBUG=true
|
2026-02-21 16:53:16 +00:00
|
|
|
|
|
|
|
|
# ── AirLLM / big-brain backend ───────────────────────────────────────────────
|
|
|
|
|
# Inference backend: "ollama" (default) | "airllm" | "auto"
|
|
|
|
|
# "auto" → uses AirLLM on Apple Silicon if installed, otherwise Ollama.
|
|
|
|
|
# Requires: pip install ".[bigbrain]"
|
|
|
|
|
# TIMMY_MODEL_BACKEND=ollama
|
|
|
|
|
|
|
|
|
|
# AirLLM model size (default: 70b).
|
|
|
|
|
# 8b ~16 GB RAM | 70b ~140 GB RAM | 405b ~810 GB RAM
|
|
|
|
|
# AIRLLM_MODEL_SIZE=70b
|
2026-02-21 13:46:12 -05:00
|
|
|
|
|
|
|
|
# ── L402 Lightning secrets ───────────────────────────────────────────────────
|
|
|
|
|
# HMAC secret for invoice verification. MUST be changed in production.
|
|
|
|
|
# Generate with: python3 -c "import secrets; print(secrets.token_hex(32))"
|
|
|
|
|
# L402_HMAC_SECRET=<your-secret-here>
|
|
|
|
|
|
|
|
|
|
# HMAC secret for macaroon signing. MUST be changed in production.
|
|
|
|
|
# L402_MACAROON_SECRET=<your-secret-here>
|
|
|
|
|
|
|
|
|
|
# Lightning backend: "mock" (default) | "lnd"
|
|
|
|
|
# LIGHTNING_BACKEND=mock
|
2026-02-22 17:16:12 +00:00
|
|
|
|
|
|
|
|
# ── Telegram bot ──────────────────────────────────────────────────────────────
|
|
|
|
|
# Bot token from @BotFather on Telegram.
|
|
|
|
|
# Alternatively, configure via the /telegram/setup dashboard endpoint at runtime.
|
|
|
|
|
# Requires: pip install ".[telegram]"
|
|
|
|
|
# TELEGRAM_TOKEN=
|