From c19be679f0a9ad79e5a814e137f56e702f0ab2fb Mon Sep 17 00:00:00 2001 From: Timmy Date: Thu, 19 Mar 2026 19:47:33 -0400 Subject: [PATCH] refactor: remove airllm references from docs, docstrings, and README (#486) --- README.md | 2 -- TEST_COVERAGE_ANALYSIS.md | 2 +- docs/AUDIT_REPORT.md | 4 ++-- docs/REFACTORING_PLAN.md | 2 +- docs/SECURITY.md | 2 +- docs/SOVEREIGN_AGI_RESEARCH.md | 4 ++-- docs/adr/020-cascade-router-integration.md | 6 +----- src/timmy/__init__.py | 2 +- tests/dashboard/test_mobile_scenarios.py | 4 ++-- tests/timmy/test_backends.py | 2 +- 10 files changed, 12 insertions(+), 18 deletions(-) diff --git a/README.md b/README.md index 909ef86c..10da148c 100644 --- a/README.md +++ b/README.md @@ -82,7 +82,6 @@ cp .env.example .env | `OLLAMA_MODEL` | `qwen3:30b` | Primary model for reasoning and tool calling. Fallback: `llama3.1:8b-instruct` | | `DEBUG` | `false` | Enable `/docs` and `/redoc` | | `TIMMY_MODEL_BACKEND` | `ollama` | `ollama` \| `airllm` \| `auto` | -| `AIRLLM_MODEL_SIZE` | `70b` | `8b` \| `70b` \| `405b` | | `L402_HMAC_SECRET` | *(default — change in prod)* | HMAC signing key for macaroons | | `L402_MACAROON_SECRET` | *(default — change in prod)* | Macaroon secret | | `LIGHTNING_BACKEND` | `mock` | `mock` (production-ready) \| `lnd` (scaffolded, not yet functional) | @@ -177,7 +176,6 @@ timmy chat "Explain self-custody" --backend airllm --model-size 70b Or set once in `.env`: ```bash TIMMY_MODEL_BACKEND=auto -AIRLLM_MODEL_SIZE=70b ``` | Flag | Parameters | RAM needed | diff --git a/TEST_COVERAGE_ANALYSIS.md b/TEST_COVERAGE_ANALYSIS.md index 2baa6522..15ea6d79 100644 --- a/TEST_COVERAGE_ANALYSIS.md +++ b/TEST_COVERAGE_ANALYSIS.md @@ -111,7 +111,7 @@ pytest: error: unrecognized arguments: -n --dist worksteal ### 4a. Missing Error-Path Testing Many modules have happy-path tests but lack coverage for: -- **Graceful degradation paths**: The architecture mandates graceful degradation when Ollama/Redis/AirLLM are unavailable, but most fallback paths are untested (e.g., `cascade.py` lines 563–655) +- **Graceful degradation paths**: The architecture mandates graceful degradation when Ollama/Redis are unavailable, but most fallback paths are untested (e.g., `cascade.py` lines 563–655) - **`brain/client.py`**: Only 14.8% covered — connection failures, retries, and error handling are untested - **`infrastructure/error_capture.py`**: 0% — the error capture system itself has no tests diff --git a/docs/AUDIT_REPORT.md b/docs/AUDIT_REPORT.md index 0f7761a4..1c58b551 100644 --- a/docs/AUDIT_REPORT.md +++ b/docs/AUDIT_REPORT.md @@ -63,11 +63,11 @@ $ python -m pytest -q ## 2. Feature-by-Feature Audit ### 2.1 Timmy Agent -**Claimed**: Agno-powered conversational agent backed by Ollama, AirLLM for 70B-405B models, SQLite memory +**Claimed**: Agno-powered conversational agent backed by Ollama, SQLite memory **Verdict: REAL & FUNCTIONAL** - `src/timmy/agent.py` (79 lines): Creates a genuine `agno.Agent` with Ollama model, SQLite persistence, tools, and system prompt -- Backend selection (`backends.py`) implements real Ollama/AirLLM switching with Apple Silicon detection +- Backend selection (`backends.py`) implements real Ollama switching with Apple Silicon detection - CLI (`cli.py`) provides working `timmy chat`, `timmy think`, `timmy status` commands - Approval workflow (`approvals.py`) implements real human-in-the-loop with SQLite-backed state - Briefing system (`briefing.py`) generates real scheduled briefings diff --git a/docs/REFACTORING_PLAN.md b/docs/REFACTORING_PLAN.md index 8b0a5010..523dd94a 100644 --- a/docs/REFACTORING_PLAN.md +++ b/docs/REFACTORING_PLAN.md @@ -100,7 +100,7 @@ Bitcoin Lightning economics. No cloud AI. make install && make dev → http://localhost:8000 ## What's Here - - Timmy Agent (Ollama/AirLLM) + - Timmy Agent (Ollama) - Mission Control Dashboard (FastAPI + HTMX) - Swarm Coordinator (multi-agent auctions) - Lightning Payments (L402 gating) diff --git a/docs/SECURITY.md b/docs/SECURITY.md index f31beb5b..965dd8db 100644 --- a/docs/SECURITY.md +++ b/docs/SECURITY.md @@ -6,7 +6,7 @@ This document outlines the security architecture, threat model, and recent audit Timmy Time is built on the principle of **AI Sovereignty**. Security is not just about preventing unauthorized access, but about ensuring the user maintains full control over their data and AI models. -1. **Local-First Execution:** All primary AI inference (Ollama/AirLLM) runs on localhost. No data is sent to third-party cloud providers unless explicitly configured (e.g., Grok). +1. **Local-First Execution:** All primary AI inference (Ollama) runs on localhost. No data is sent to third-party cloud providers unless explicitly configured (e.g., Grok). 2. **Air-Gapped Ready:** The system is designed to run without an internet connection once dependencies and models are cached. 3. **Secret Management:** Secrets are never hard-coded. They are managed via Pydantic-settings from `.env` or environment variables. diff --git a/docs/SOVEREIGN_AGI_RESEARCH.md b/docs/SOVEREIGN_AGI_RESEARCH.md index 939a4b6c..781a504e 100644 --- a/docs/SOVEREIGN_AGI_RESEARCH.md +++ b/docs/SOVEREIGN_AGI_RESEARCH.md @@ -59,7 +59,7 @@ already works. | LLM routing | CascadeRouter with circuit breakers | Good | | Memory tiers | Hot (MEMORY.md) → Vault (markdown) → Semantic (SQLite+vectors) | Good foundation | | Module boundaries | 8 packages with clear responsibilities | Good | -| Multi-backend LLM | Ollama/AirLLM/Grok/Claude with auto-detection | Good | +| Multi-backend LLM | Ollama/Grok/Claude with auto-detection | Good | | Security posture | CSRF, security headers, secret validation, telemetry off | Good | ### Architecture Diagram (Current State) @@ -473,7 +473,7 @@ The proposal enforces a strict 2,000-line limit for `src/timmy/`: | `workflow_engine.py` | ~200 | YAML loader, step executor, state machine | | `tool_registry.py` | ~200 | Dynamic tool discovery, spawn, health check | | `memory_system.py` | ~300 | Hot/Vault/Semantic memory interface (existing) | -| `backends.py` | ~200 | Ollama/AirLLM/Claude/Grok adapters | +| `backends.py` | ~200 | Ollama/Claude/Grok adapters | | `config.py` | ~150 | Pydantic-settings (existing) | | `lightning_wallet.py` | ~200 | L402 handling, invoice generation, balance | | `utils/` | ~300 | Shared helpers, logging, serialization | diff --git a/docs/adr/020-cascade-router-integration.md b/docs/adr/020-cascade-router-integration.md index fdb8e00e..2215e4f3 100644 --- a/docs/adr/020-cascade-router-integration.md +++ b/docs/adr/020-cascade-router-integration.md @@ -4,7 +4,6 @@ Proposed ## Context -Currently, the Timmy agent (`src/timmy/agent.py`) uses `src/timmy/backends.py` which provides a simple abstraction over Ollama and AirLLM. However, this lacks: - Automatic failover between multiple LLM providers - Circuit breaker pattern for failing providers - Cost and latency tracking per provider @@ -19,14 +18,13 @@ Integrate the Cascade Router as the primary LLM routing layer for Timmy, replaci ### Current Flow ``` -User Request → Timmy Agent → backends.py → Ollama/AirLLM +User Request → Timmy Agent → backends.py → Ollama ``` ### Proposed Flow ``` User Request → Timmy Agent → Cascade Router → Provider 1 (Ollama) ↓ (if fail) - Provider 2 (Local AirLLM) ↓ (if fail) Provider 3 (API - optional) ↓ @@ -41,7 +39,6 @@ User Request → Timmy Agent → Cascade Router → Provider 1 (Ollama) - Expose provider status in agent responses 2. **Cascade Router** (`src/router/cascade.py`) - - Already supports: Ollama, OpenAI, Anthropic, AirLLM - Already has: Circuit breakers, metrics, failover logic - Add: Integration with existing `src/timmy/prompts.py` @@ -57,7 +54,6 @@ User Request → Timmy Agent → Cascade Router → Provider 1 (Ollama) ### Provider Priority Order 1. **Ollama (local)** - Priority 1, always try first -2. **AirLLM (local)** - Priority 2, if Ollama unavailable 3. **API providers** - Priority 3+, only if configured ### Data Flow diff --git a/src/timmy/__init__.py b/src/timmy/__init__.py index 09f8e7fb..f614a4fc 100644 --- a/src/timmy/__init__.py +++ b/src/timmy/__init__.py @@ -1 +1 @@ -"""Timmy — Core AI agent (Ollama/AirLLM backends, CLI, prompts).""" +"""Timmy — Core AI agent (Ollama backend, CLI, prompts).""" diff --git a/tests/dashboard/test_mobile_scenarios.py b/tests/dashboard/test_mobile_scenarios.py index 7a72b4bb..93fb34b2 100644 --- a/tests/dashboard/test_mobile_scenarios.py +++ b/tests/dashboard/test_mobile_scenarios.py @@ -10,7 +10,7 @@ Categories: M3xx iOS keyboard & zoom prevention M4xx HTMX robustness (double-submit, sync) M5xx Safe-area / notch support - M6xx AirLLM backend interface contract + M6xx Backend interface contract """ import re @@ -208,7 +208,7 @@ def test_M505_dvh_units_used(): assert "dvh" in css -# ── M6xx — AirLLM backend interface contract ────────────────────────────────── +# ── M6xx — Backend interface contract ────────────────────────────────── def test_M601_airllm_agent_has_run_method(): diff --git a/tests/timmy/test_backends.py b/tests/timmy/test_backends.py index 52935f30..d93bb95b 100644 --- a/tests/timmy/test_backends.py +++ b/tests/timmy/test_backends.py @@ -1,4 +1,4 @@ -"""Tests for src/timmy/backends.py — AirLLM wrapper and helpers.""" +"""Tests for src/timmy/backends.py — backend helpers.""" import sys from unittest.mock import MagicMock, patch