fix: restore ollama as default backend to fix broken build (#161)

This commit is contained in:
Alexander Whitestone
2026-03-10 18:17:47 -04:00
committed by GitHub
parent 755b7e7658
commit 1de97619e8
2 changed files with 4 additions and 9 deletions

View File

@@ -41,13 +41,12 @@ class Settings(BaseSettings):
# "airllm" — always use AirLLM (requires pip install ".[bigbrain]")
# "auto" — use AirLLM on Apple Silicon if airllm is installed,
# fall back to Ollama otherwise
timmy_model_backend: Literal["ollama", "airllm", "grok", "claude", "auto"] = "airllm"
timmy_model_backend: Literal["ollama", "airllm", "grok", "claude", "auto"] = "ollama"
# AirLLM model size when backend is airllm or auto.
# Larger = smarter, but needs more RAM / disk.
# 8b ~16 GB | 70b ~140 GB | 405b ~810 GB
# Optimization: Llama-3.3-70B-Instruct-MLX is recommended for Mac M3 36GB.
airllm_model_size: str = "70b"
airllm_model_size: Literal["8b", "70b", "405b"] = "70b"
# ── Grok (xAI) — opt-in premium cloud backend ────────────────────────
# Grok is a premium augmentation layer — local-first ethos preserved.