277 lines
9.6 KiB
Python
277 lines
9.6 KiB
Python
"""Tests for system introspection tools."""
|
|
|
|
from unittest.mock import MagicMock, patch
|
|
|
|
import httpx
|
|
|
|
|
|
def test_get_system_info_returns_dict():
|
|
"""System info should return a dictionary."""
|
|
from timmy.tools_intro import get_system_info
|
|
|
|
info = get_system_info()
|
|
|
|
assert isinstance(info, dict)
|
|
assert "python_version" in info
|
|
assert "platform" in info
|
|
assert "model" in info
|
|
assert "repo_root" in info
|
|
|
|
|
|
def test_get_system_info_contains_model():
|
|
"""System info should include a model name (may differ from config if
|
|
the actual running model is different — see issue #77)."""
|
|
from timmy.tools_intro import get_system_info
|
|
|
|
info = get_system_info()
|
|
|
|
assert "model" in info
|
|
# Model should be a non-empty string — exact value depends on what
|
|
# Ollama has loaded (verified by TestGetOllamaModelExactMatch tests)
|
|
assert isinstance(info["model"], str)
|
|
assert len(info["model"]) > 0
|
|
|
|
|
|
def test_get_system_info_contains_repo_root():
|
|
"""System info should include repo_root."""
|
|
from config import settings
|
|
from timmy.tools_intro import get_system_info
|
|
|
|
info = get_system_info()
|
|
|
|
assert "repo_root" in info
|
|
assert info["repo_root"] == settings.repo_root
|
|
# In Docker the CWD is /app, so just verify it's a non-empty path
|
|
assert len(info["repo_root"]) > 0
|
|
|
|
|
|
def test_check_ollama_health_returns_dict():
|
|
"""Ollama health check should return a dictionary."""
|
|
from timmy.tools_intro import check_ollama_health
|
|
|
|
result = check_ollama_health()
|
|
|
|
assert isinstance(result, dict)
|
|
assert "accessible" in result
|
|
assert "model" in result
|
|
|
|
|
|
def test_get_memory_status_returns_dict():
|
|
"""Memory status should return a dictionary with tier info."""
|
|
from timmy.tools_intro import get_memory_status
|
|
|
|
status = get_memory_status()
|
|
|
|
assert isinstance(status, dict)
|
|
assert "tier1_hot_memory" in status
|
|
assert "tier2_vault" in status
|
|
|
|
|
|
# --- _get_ollama_model exact-match tests (issue #77) ---
|
|
|
|
|
|
def _mock_response(json_data, status_code=200):
|
|
"""Create a mock httpx response."""
|
|
resp = MagicMock(spec=httpx.Response)
|
|
resp.status_code = status_code
|
|
resp.json.return_value = json_data
|
|
return resp
|
|
|
|
|
|
class TestGetOllamaModelExactMatch:
|
|
"""Ensure _get_ollama_model uses exact match, not prefix match."""
|
|
|
|
@patch("timmy.tools_intro.httpx.get")
|
|
def test_exact_match_from_ps(self, mock_get):
|
|
"""Should return exact model from /api/ps."""
|
|
from timmy.tools_intro import _get_ollama_model
|
|
|
|
ps_resp = _mock_response({"models": [{"name": "qwen3:30b"}]})
|
|
mock_get.return_value = ps_resp
|
|
|
|
with patch("config.settings") as mock_settings:
|
|
mock_settings.ollama_model = "qwen3:30b"
|
|
mock_settings.ollama_url = "http://localhost:11434"
|
|
result = _get_ollama_model()
|
|
|
|
assert result == "qwen3:30b"
|
|
|
|
@patch("timmy.tools_intro.httpx.get")
|
|
def test_prefix_collision_returns_correct_model(self, mock_get):
|
|
"""qwen3:30b configured — must NOT match qwen3.5:latest (prefix bug)."""
|
|
from timmy.tools_intro import _get_ollama_model
|
|
|
|
# /api/ps has both models loaded; configured is qwen3:30b
|
|
ps_resp = _mock_response({"models": [{"name": "qwen3.5:latest"}, {"name": "qwen3:30b"}]})
|
|
mock_get.return_value = ps_resp
|
|
|
|
with patch("config.settings") as mock_settings:
|
|
mock_settings.ollama_model = "qwen3:30b"
|
|
mock_settings.ollama_url = "http://localhost:11434"
|
|
result = _get_ollama_model()
|
|
|
|
assert result == "qwen3:30b", f"Got '{result}' — prefix collision bug!"
|
|
|
|
@patch("timmy.tools_intro.httpx.get")
|
|
def test_configured_model_not_running_returns_actual(self, mock_get):
|
|
"""If configured model isn't loaded, report what IS running."""
|
|
from timmy.tools_intro import _get_ollama_model
|
|
|
|
ps_resp = _mock_response({"models": [{"name": "qwen3.5:latest"}]})
|
|
mock_get.return_value = ps_resp
|
|
|
|
with patch("config.settings") as mock_settings:
|
|
mock_settings.ollama_model = "qwen3:30b"
|
|
mock_settings.ollama_url = "http://localhost:11434"
|
|
result = _get_ollama_model()
|
|
|
|
# Should report actual running model, not configured one
|
|
assert result == "qwen3.5:latest"
|
|
|
|
@patch("timmy.tools_intro.httpx.get")
|
|
def test_latest_suffix_match(self, mock_get):
|
|
"""'qwen3:30b' config should match 'qwen3:30b:latest' from API."""
|
|
from timmy.tools_intro import _get_ollama_model
|
|
|
|
ps_resp = _mock_response({"models": []})
|
|
tags_resp = _mock_response({"models": [{"name": "qwen3:30b:latest"}]})
|
|
mock_get.side_effect = [ps_resp, tags_resp]
|
|
|
|
with patch("config.settings") as mock_settings:
|
|
mock_settings.ollama_model = "qwen3:30b"
|
|
mock_settings.ollama_url = "http://localhost:11434"
|
|
result = _get_ollama_model()
|
|
|
|
# Falls back to configured since no exact match
|
|
assert result == "qwen3:30b"
|
|
|
|
@patch("timmy.tools_intro.httpx.get")
|
|
def test_ollama_down_returns_configured(self, mock_get):
|
|
"""If Ollama is unreachable, return configured model."""
|
|
from timmy.tools_intro import _get_ollama_model
|
|
|
|
mock_get.side_effect = httpx.ConnectError("connection refused")
|
|
|
|
with patch("config.settings") as mock_settings:
|
|
mock_settings.ollama_model = "qwen3:30b"
|
|
mock_settings.ollama_url = "http://localhost:11434"
|
|
result = _get_ollama_model()
|
|
|
|
assert result == "qwen3:30b"
|
|
|
|
|
|
class TestRunSelfTests:
|
|
"""Tests for run_self_tests() — Timmy's self-verification tool."""
|
|
|
|
def test_returns_dict_with_expected_keys(self, monkeypatch, tmp_path):
|
|
"""run_self_tests should return structured test results."""
|
|
import subprocess
|
|
|
|
def mock_run(*args, **kwargs):
|
|
return subprocess.CompletedProcess(
|
|
args=args[0] if args else [],
|
|
returncode=0,
|
|
stdout="5 passed in 0.5s",
|
|
stderr="",
|
|
)
|
|
|
|
monkeypatch.setattr(subprocess, "run", mock_run)
|
|
|
|
# Create fake venv so check passes
|
|
venv_python = tmp_path / ".venv" / "bin" / "python"
|
|
venv_python.parent.mkdir(parents=True)
|
|
venv_python.write_text("#!/bin/sh\necho mock")
|
|
|
|
from timmy.tools_intro import run_self_tests
|
|
|
|
result = run_self_tests(scope="tests/timmy/test_introspection.py", _repo_root=str(tmp_path))
|
|
assert isinstance(result, dict)
|
|
assert "success" in result
|
|
assert result["success"] is True
|
|
assert "passed" in result
|
|
assert "failed" in result
|
|
assert "total" in result
|
|
assert result["passed"] == 5
|
|
assert result["total"] == 5
|
|
|
|
def test_fast_scope_skips_integration(self, monkeypatch, tmp_path):
|
|
"""Fast scope should exclude functional/e2e/integration dirs."""
|
|
import subprocess
|
|
|
|
calls = []
|
|
|
|
def capture_run(*args, **kwargs):
|
|
calls.append(args[0] if args else kwargs.get("cmd"))
|
|
# Return a fake result
|
|
return subprocess.CompletedProcess(
|
|
args=args[0] if args else [], returncode=0, stdout="1 passed in 0.5s", stderr=""
|
|
)
|
|
|
|
monkeypatch.setattr(subprocess, "run", capture_run)
|
|
|
|
# Create fake venv so check passes
|
|
venv_python = tmp_path / ".venv" / "bin" / "python"
|
|
venv_python.parent.mkdir(parents=True)
|
|
venv_python.write_text("#!/bin/sh\necho mock")
|
|
|
|
from timmy.tools_intro import run_self_tests
|
|
|
|
run_self_tests(scope="fast", _repo_root=str(tmp_path))
|
|
assert len(calls) == 1
|
|
cmd = calls[0]
|
|
assert "--ignore=tests/functional" in cmd
|
|
assert "--ignore=tests/e2e" in cmd
|
|
|
|
def test_specific_path_scope(self, monkeypatch, tmp_path):
|
|
"""Specific path scope passes path directly to pytest."""
|
|
import subprocess
|
|
|
|
calls = []
|
|
|
|
def capture_run(*args, **kwargs):
|
|
calls.append(args[0] if args else kwargs.get("cmd"))
|
|
return subprocess.CompletedProcess(
|
|
args=args[0] if args else [], returncode=0, stdout="5 passed in 1.0s", stderr=""
|
|
)
|
|
|
|
monkeypatch.setattr(subprocess, "run", capture_run)
|
|
|
|
# Create fake venv so check passes
|
|
venv_python = tmp_path / ".venv" / "bin" / "python"
|
|
venv_python.parent.mkdir(parents=True)
|
|
venv_python.write_text("#!/bin/sh\necho mock")
|
|
|
|
from timmy.tools_intro import run_self_tests
|
|
|
|
run_self_tests(scope="tests/timmy/", _repo_root=str(tmp_path))
|
|
assert len(calls) == 1
|
|
assert "tests/timmy/" in calls[0]
|
|
|
|
def test_missing_venv_returns_error(self, monkeypatch, tmp_path):
|
|
"""Should handle missing venv gracefully."""
|
|
from timmy.tools_intro import run_self_tests
|
|
|
|
result = run_self_tests(_repo_root=str(tmp_path))
|
|
assert result["success"] is False
|
|
assert "venv" in result.get("error", "").lower()
|
|
|
|
def test_timeout_returns_error(self, monkeypatch, tmp_path):
|
|
"""Should handle subprocess timeout gracefully."""
|
|
import subprocess
|
|
|
|
def timeout_run(*args, **kwargs):
|
|
raise subprocess.TimeoutExpired(cmd="pytest", timeout=120)
|
|
|
|
monkeypatch.setattr(subprocess, "run", timeout_run)
|
|
|
|
# Create fake venv so check passes
|
|
venv_python = tmp_path / ".venv" / "bin" / "python"
|
|
venv_python.parent.mkdir(parents=True)
|
|
venv_python.write_text("#!/bin/sh\necho mock")
|
|
|
|
from timmy.tools_intro import run_self_tests
|
|
|
|
result = run_self_tests(_repo_root=str(tmp_path))
|
|
assert result["success"] is False
|
|
assert "timed out" in result.get("error", "").lower()
|