# Timmy Time — Mission Control # Copy this file to .env and uncomment lines you want to override. # .env is gitignored and never committed. # Ollama host (default: http://localhost:11434) # Override if Ollama is running on another machine or port. # OLLAMA_URL=http://localhost:11434 # LLM model to use via Ollama (default: llama3.2) # OLLAMA_MODEL=llama3.2 # Enable FastAPI interactive docs at /docs and /redoc (default: false) # DEBUG=true # ── AirLLM / big-brain backend ─────────────────────────────────────────────── # Inference backend: "ollama" (default) | "airllm" | "auto" # "auto" → uses AirLLM on Apple Silicon if installed, otherwise Ollama. # Requires: pip install ".[bigbrain]" # TIMMY_MODEL_BACKEND=ollama # AirLLM model size (default: 70b). # 8b ~16 GB RAM | 70b ~140 GB RAM | 405b ~810 GB RAM # AIRLLM_MODEL_SIZE=70b