refactor: break up create_timmy() into testable helpers (#520)

Co-authored-by: Kimi Agent <kimi@timmy.local>
Co-committed-by: Kimi Agent <kimi@timmy.local>
This commit is contained in:
2026-03-19 19:51:59 -04:00
committed by Timmy Time
parent 7c823ab59c
commit b45b543f2d
2 changed files with 236 additions and 72 deletions

View File

@@ -197,6 +197,90 @@ def _resolve_backend(requested: str | None) -> str:
return "ollama"
def _build_tools_list(use_tools: bool, skip_mcp: bool, model_name: str) -> list:
"""Assemble the tools list based on model capability and MCP flags.
Returns a list of Toolkit / MCPTools objects, or an empty list.
"""
if not use_tools:
logger.info("Tools disabled for model %s (too small for reliable tool calling)", model_name)
return []
tools_list: list = [create_full_toolkit()]
# Add MCP tool servers (lazy-connected on first arun()).
# Skipped when skip_mcp=True — MCP's stdio transport uses anyio cancel
# scopes that conflict with asyncio background task cancellation (#72).
if not skip_mcp:
try:
from timmy.mcp_tools import create_filesystem_mcp_tools, create_gitea_mcp_tools
gitea_mcp = create_gitea_mcp_tools()
if gitea_mcp:
tools_list.append(gitea_mcp)
fs_mcp = create_filesystem_mcp_tools()
if fs_mcp:
tools_list.append(fs_mcp)
except Exception as exc:
logger.debug("MCP tools unavailable: %s", exc)
return tools_list
def _build_prompt(use_tools: bool, session_id: str) -> str:
"""Build the full system prompt with optional memory context."""
base_prompt = get_system_prompt(tools_enabled=use_tools, session_id=session_id)
try:
from timmy.memory_system import memory_system
memory_context = memory_system.get_system_context()
if memory_context:
# Smaller budget for small models — expanded prompt uses more tokens
max_context = 2000 if not use_tools else 8000
if len(memory_context) > max_context:
memory_context = memory_context[:max_context] + "\n... [truncated]"
return (
f"{base_prompt}\n\n"
f"## GROUNDED CONTEXT (verified sources — cite when using)\n\n"
f"{memory_context}"
)
except Exception as exc:
logger.warning("Failed to load memory context: %s", exc)
return base_prompt
def _create_ollama_agent(
*,
db_file: str,
model_name: str,
tools_list: list,
full_prompt: str,
use_tools: bool,
) -> Agent:
"""Construct the Agno Agent with Ollama backend and warm up the model."""
model_kwargs = {}
if settings.ollama_num_ctx > 0:
model_kwargs["options"] = {"num_ctx": settings.ollama_num_ctx}
agent = Agent(
name="Agent",
model=Ollama(id=model_name, host=settings.ollama_url, timeout=300, **model_kwargs),
db=SqliteDb(db_file=db_file),
description=full_prompt,
add_history_to_context=True,
num_history_runs=20,
markdown=False,
tools=tools_list if tools_list else None,
tool_call_limit=settings.max_agent_steps if use_tools else None,
telemetry=settings.telemetry_enabled,
)
_warmup_model(model_name)
return agent
def create_timmy(
db_file: str = "timmy.db",
backend: str | None = None,
@@ -238,16 +322,12 @@ def create_timmy(
return TimmyAirLLMAgent(model_size=size)
# Default: Ollama via Agno.
# Resolve model with automatic pulling and fallback
model_name, is_fallback = _resolve_model_with_fallback(
requested_model=None,
require_vision=False,
auto_pull=True,
)
# If Ollama is completely unreachable, fail loudly.
# Sovereignty: never silently send data to a cloud API.
# Use --backend claude explicitly if you want cloud inference.
if not _check_model_available(model_name):
logger.error(
"Ollama unreachable and no local models available. "
@@ -258,76 +338,16 @@ def create_timmy(
logger.info("Using fallback model %s (requested was unavailable)", model_name)
use_tools = _model_supports_tools(model_name)
tools_list = _build_tools_list(use_tools, skip_mcp, model_name)
full_prompt = _build_prompt(use_tools, session_id)
# Conditionally include tools — small models get none
toolkit = create_full_toolkit() if use_tools else None
if not use_tools:
logger.info("Tools disabled for model %s (too small for reliable tool calling)", model_name)
# Build the tools list — Agno accepts a list of Toolkit / MCPTools
tools_list: list = []
if toolkit:
tools_list.append(toolkit)
# Add MCP tool servers (lazy-connected on first arun()).
# Skipped when skip_mcp=True — MCP's stdio transport uses anyio cancel
# scopes that conflict with asyncio background task cancellation (#72).
if use_tools and not skip_mcp:
try:
from timmy.mcp_tools import create_filesystem_mcp_tools, create_gitea_mcp_tools
gitea_mcp = create_gitea_mcp_tools()
if gitea_mcp:
tools_list.append(gitea_mcp)
fs_mcp = create_filesystem_mcp_tools()
if fs_mcp:
tools_list.append(fs_mcp)
except Exception as exc:
logger.debug("MCP tools unavailable: %s", exc)
# Select prompt tier based on tool capability
base_prompt = get_system_prompt(tools_enabled=use_tools, session_id=session_id)
# Try to load memory context
try:
from timmy.memory_system import memory_system
memory_context = memory_system.get_system_context()
if memory_context:
# Truncate if too long — smaller budget for small models
# since the expanded prompt (roster, guardrails) uses more tokens
max_context = 2000 if not use_tools else 8000
if len(memory_context) > max_context:
memory_context = memory_context[:max_context] + "\n... [truncated]"
full_prompt = (
f"{base_prompt}\n\n"
f"## GROUNDED CONTEXT (verified sources — cite when using)\n\n"
f"{memory_context}"
)
else:
full_prompt = base_prompt
except Exception as exc:
logger.warning("Failed to load memory context: %s", exc)
full_prompt = base_prompt
model_kwargs = {}
if settings.ollama_num_ctx > 0:
model_kwargs["options"] = {"num_ctx": settings.ollama_num_ctx}
agent = Agent(
name="Agent",
model=Ollama(id=model_name, host=settings.ollama_url, timeout=300, **model_kwargs),
db=SqliteDb(db_file=db_file),
description=full_prompt,
add_history_to_context=True,
num_history_runs=20,
markdown=False,
tools=tools_list if tools_list else None,
tool_call_limit=settings.max_agent_steps if use_tools else None,
telemetry=settings.telemetry_enabled,
return _create_ollama_agent(
db_file=db_file,
model_name=model_name,
tools_list=tools_list,
full_prompt=full_prompt,
use_tools=use_tools,
)
_warmup_model(model_name)
return agent
class TimmyWithMemory: