forked from Rockachopa/Timmy-time-dashboard
Bridges Telegram messages to Timmy via python-telegram-bot (optional dependency). The bot token can be supplied through the TELEGRAM_TOKEN env var or at runtime via the new POST /telegram/setup dashboard endpoint, which (re)starts the bot without a restart. Changes: - src/telegram_bot/bot.py — TelegramBot singleton: token persistence (telegram_state.json), lifecycle (start/stop), /start command and message handler that forwards to Timmy - src/dashboard/routes/telegram.py — /telegram/setup and /telegram/status FastAPI routes - src/dashboard/app.py — register telegram router; auto-start/stop bot in lifespan hook - src/config.py — TELEGRAM_TOKEN setting (pydantic-settings) - pyproject.toml — [telegram] optional extra (python-telegram-bot>=21), telegram_bot wheel include - .env.example — TELEGRAM_TOKEN section - .gitignore — exclude telegram_state.json (contains token) - tests/conftest.py — stub telegram/telegram.ext for offline test runs - tests/test_telegram_bot.py — 16 tests covering token helpers, lifecycle, and all dashboard routes (370 total, all passing) https://claude.ai/code/session_01CNBm3ZLobtx3Z1YogHq8ZS
39 lines
1.4 KiB
Python
39 lines
1.4 KiB
Python
from typing import Literal
|
|
|
|
from pydantic_settings import BaseSettings, SettingsConfigDict
|
|
|
|
|
|
class Settings(BaseSettings):
|
|
# Ollama host — override with OLLAMA_URL env var or .env file
|
|
ollama_url: str = "http://localhost:11434"
|
|
|
|
# LLM model passed to Agno/Ollama — override with OLLAMA_MODEL
|
|
ollama_model: str = "llama3.2"
|
|
|
|
# Set DEBUG=true to enable /docs and /redoc (disabled by default)
|
|
debug: bool = False
|
|
|
|
# Telegram bot token — set via TELEGRAM_TOKEN env var or the /telegram/setup endpoint
|
|
telegram_token: str = ""
|
|
|
|
# ── AirLLM / backend selection ───────────────────────────────────────────
|
|
# "ollama" — always use Ollama (default, safe everywhere)
|
|
# "airllm" — always use AirLLM (requires pip install ".[bigbrain]")
|
|
# "auto" — use AirLLM on Apple Silicon if airllm is installed,
|
|
# fall back to Ollama otherwise
|
|
timmy_model_backend: Literal["ollama", "airllm", "auto"] = "ollama"
|
|
|
|
# AirLLM model size when backend is airllm or auto.
|
|
# Larger = smarter, but needs more RAM / disk.
|
|
# 8b ~16 GB | 70b ~140 GB | 405b ~810 GB
|
|
airllm_model_size: Literal["8b", "70b", "405b"] = "70b"
|
|
|
|
model_config = SettingsConfigDict(
|
|
env_file=".env",
|
|
env_file_encoding="utf-8",
|
|
extra="ignore",
|
|
)
|
|
|
|
|
|
settings = Settings()
|