This repository has been archived on 2026-03-24. You can view files and clone it. You cannot open issues or pull requests or push a commit.
Files
Timmy-time-dashboard/tests/timmy/test_introspection.py
Kimi Agent 7132b42ff3 fix: model introspection uses exact match, queries /api/ps first
_get_ollama_model() used prefix match (startswith) on /api/tags,
causing qwen3:30b to match qwen3.5:latest. Now:
1. Queries /api/ps (loaded models) first — most accurate
2. Falls back to /api/tags with exact name match
3. Reports actual running model, not just configured one

Updated test_get_system_info_contains_model to not assume model==config.

Fixes #77. 5 regression tests added.
2026-03-14 18:03:59 -04:00

161 lines
5.5 KiB
Python

"""Tests for system introspection tools."""
from unittest.mock import MagicMock, patch
import httpx
def test_get_system_info_returns_dict():
"""System info should return a dictionary."""
from timmy.tools_intro import get_system_info
info = get_system_info()
assert isinstance(info, dict)
assert "python_version" in info
assert "platform" in info
assert "model" in info
assert "repo_root" in info
def test_get_system_info_contains_model():
"""System info should include a model name (may differ from config if
the actual running model is different — see issue #77)."""
from timmy.tools_intro import get_system_info
info = get_system_info()
assert "model" in info
# Model should be a non-empty string — exact value depends on what
# Ollama has loaded (verified by TestGetOllamaModelExactMatch tests)
assert isinstance(info["model"], str)
assert len(info["model"]) > 0
def test_get_system_info_contains_repo_root():
"""System info should include repo_root."""
from config import settings
from timmy.tools_intro import get_system_info
info = get_system_info()
assert "repo_root" in info
assert info["repo_root"] == settings.repo_root
# In Docker the CWD is /app, so just verify it's a non-empty path
assert len(info["repo_root"]) > 0
def test_check_ollama_health_returns_dict():
"""Ollama health check should return a dictionary."""
from timmy.tools_intro import check_ollama_health
result = check_ollama_health()
assert isinstance(result, dict)
assert "accessible" in result
assert "model" in result
def test_get_memory_status_returns_dict():
"""Memory status should return a dictionary with tier info."""
from timmy.tools_intro import get_memory_status
status = get_memory_status()
assert isinstance(status, dict)
assert "tier1_hot_memory" in status
assert "tier2_vault" in status
# --- _get_ollama_model exact-match tests (issue #77) ---
def _mock_response(json_data, status_code=200):
"""Create a mock httpx response."""
resp = MagicMock(spec=httpx.Response)
resp.status_code = status_code
resp.json.return_value = json_data
return resp
class TestGetOllamaModelExactMatch:
"""Ensure _get_ollama_model uses exact match, not prefix match."""
@patch("timmy.tools_intro.httpx.get")
def test_exact_match_from_ps(self, mock_get):
"""Should return exact model from /api/ps."""
from timmy.tools_intro import _get_ollama_model
ps_resp = _mock_response({"models": [{"name": "qwen3:30b"}]})
mock_get.return_value = ps_resp
with patch("config.settings") as mock_settings:
mock_settings.ollama_model = "qwen3:30b"
mock_settings.ollama_url = "http://localhost:11434"
result = _get_ollama_model()
assert result == "qwen3:30b"
@patch("timmy.tools_intro.httpx.get")
def test_prefix_collision_returns_correct_model(self, mock_get):
"""qwen3:30b configured — must NOT match qwen3.5:latest (prefix bug)."""
from timmy.tools_intro import _get_ollama_model
# /api/ps has both models loaded; configured is qwen3:30b
ps_resp = _mock_response({"models": [{"name": "qwen3.5:latest"}, {"name": "qwen3:30b"}]})
mock_get.return_value = ps_resp
with patch("config.settings") as mock_settings:
mock_settings.ollama_model = "qwen3:30b"
mock_settings.ollama_url = "http://localhost:11434"
result = _get_ollama_model()
assert result == "qwen3:30b", f"Got '{result}' — prefix collision bug!"
@patch("timmy.tools_intro.httpx.get")
def test_configured_model_not_running_returns_actual(self, mock_get):
"""If configured model isn't loaded, report what IS running."""
from timmy.tools_intro import _get_ollama_model
ps_resp = _mock_response({"models": [{"name": "qwen3.5:latest"}]})
mock_get.return_value = ps_resp
with patch("config.settings") as mock_settings:
mock_settings.ollama_model = "qwen3:30b"
mock_settings.ollama_url = "http://localhost:11434"
result = _get_ollama_model()
# Should report actual running model, not configured one
assert result == "qwen3.5:latest"
@patch("timmy.tools_intro.httpx.get")
def test_latest_suffix_match(self, mock_get):
"""'qwen3:30b' config should match 'qwen3:30b:latest' from API."""
from timmy.tools_intro import _get_ollama_model
ps_resp = _mock_response({"models": []})
tags_resp = _mock_response({"models": [{"name": "qwen3:30b:latest"}]})
mock_get.side_effect = [ps_resp, tags_resp]
with patch("config.settings") as mock_settings:
mock_settings.ollama_model = "qwen3:30b"
mock_settings.ollama_url = "http://localhost:11434"
result = _get_ollama_model()
# Falls back to configured since no exact match
assert result == "qwen3:30b"
@patch("timmy.tools_intro.httpx.get")
def test_ollama_down_returns_configured(self, mock_get):
"""If Ollama is unreachable, return configured model."""
from timmy.tools_intro import _get_ollama_model
mock_get.side_effect = httpx.ConnectError("connection refused")
with patch("config.settings") as mock_settings:
mock_settings.ollama_model = "qwen3:30b"
mock_settings.ollama_url = "http://localhost:11434"
result = _get_ollama_model()
assert result == "qwen3:30b"