Files
Timmy-time-dashboard/tests/conftest.py

277 lines
7.5 KiB
Python
Raw Normal View History

"""Pytest configuration and fixtures for the test suite."""
import os
import sqlite3
import sys
from pathlib import Path
from unittest.mock import MagicMock
import pytest
# Import pytest marker configuration
try:
from . import conftest_markers # noqa: F401
except ImportError:
import conftest_markers # noqa: F401
# ── Stub heavy optional dependencies so tests run without them installed ──────
# Uses setdefault: real module is used if already installed, mock otherwise.
for _mod in [
"agno",
"agno.agent",
"agno.models",
"agno.models.ollama",
"agno.db",
"agno.db.sqlite",
"airllm",
"telegram",
"telegram.ext",
"discord",
"discord.ext",
"discord.ext.commands",
"pyzbar",
"pyzbar.pyzbar",
"requests",
"celery",
"celery.app",
"celery.result",
]:
sys.modules.setdefault(_mod, MagicMock())
# ── Test mode setup ──────────────────────────────────────────────────────────
os.environ["TIMMY_TEST_MODE"] = "1"
os.environ["TIMMY_DISABLE_CSRF"] = "1"
os.environ["TIMMY_SKIP_EMBEDDINGS"] = "1"
@pytest.fixture(autouse=True)
def reset_message_log():
"""Clear the in-memory chat log before and after every test."""
from dashboard.store import message_log
message_log.clear()
yield
message_log.clear()
@pytest.fixture(autouse=True)
def clean_database(tmp_path):
"""Clean up database tables between tests for isolation.
Redirects every module-level DB_PATH to the per-test temp directory.
"""
tmp_swarm_db = tmp_path / "swarm.db"
tmp_spark_db = tmp_path / "spark.db"
tmp_self_coding_db = tmp_path / "self_coding.db"
_swarm_db_modules = [
"timmy.memory.vector_store",
"infrastructure.models.registry",
]
_spark_db_modules = [
"spark.memory",
"spark.eidos",
]
_self_coding_db_modules = []
originals = {}
for mod_name in _swarm_db_modules:
try:
mod = __import__(mod_name, fromlist=["DB_PATH"])
attr = "DB_PATH"
originals[(mod_name, attr)] = getattr(mod, attr)
setattr(mod, attr, tmp_swarm_db)
except Exception:
pass
for mod_name in _spark_db_modules:
try:
mod = __import__(mod_name, fromlist=["DB_PATH"])
originals[(mod_name, "DB_PATH")] = getattr(mod, "DB_PATH")
setattr(mod, "DB_PATH", tmp_spark_db)
except Exception:
pass
for mod_name in _self_coding_db_modules:
try:
mod = __import__(mod_name, fromlist=["DEFAULT_DB_PATH"])
originals[(mod_name, "DEFAULT_DB_PATH")] = getattr(mod, "DEFAULT_DB_PATH")
setattr(mod, "DEFAULT_DB_PATH", tmp_self_coding_db)
except Exception:
pass
yield
for (mod_name, attr), original in originals.items():
try:
mod = __import__(mod_name, fromlist=[attr])
setattr(mod, attr, original)
except Exception:
pass
@pytest.fixture(autouse=True)
def cleanup_event_loops():
"""Clean up any leftover event loops after each test."""
import asyncio
import warnings
yield
try:
try:
loop = asyncio.get_running_loop()
return
except RuntimeError:
pass
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
loop = asyncio.get_event_loop_policy().get_event_loop()
if loop and not loop.is_closed():
loop.close()
except RuntimeError:
pass
@pytest.fixture
def client():
"""FastAPI test client with fresh app instance."""
from fastapi.testclient import TestClient
from dashboard.app import app
with TestClient(app) as c:
yield c
@pytest.fixture
def db_connection():
"""Provide a fresh in-memory SQLite connection for tests."""
conn = sqlite3.connect(":memory:")
conn.row_factory = sqlite3.Row
conn.executescript("""
CREATE TABLE IF NOT EXISTS agents (
id TEXT PRIMARY KEY,
name TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'idle',
capabilities TEXT DEFAULT '',
registered_at TEXT NOT NULL,
last_seen TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS tasks (
id TEXT PRIMARY KEY,
description TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'pending',
assigned_agent TEXT,
result TEXT,
created_at TEXT NOT NULL,
completed_at TEXT
);
""")
conn.commit()
yield conn
conn.close()
feat: microservices refactoring with TDD and Docker optimization (#88) ## Summary Complete refactoring of Timmy Time from monolithic architecture to microservices using Test-Driven Development (TDD) and optimized Docker builds. ## Changes ### Core Improvements - Optimized dashboard startup: moved blocking tasks to async background processes - Fixed model fallback logic in agent configuration - Enhanced test fixtures with comprehensive conftest.py ### Microservices Architecture - Created separate Dockerfiles for dashboard, Ollama, and agent services - Implemented docker-compose.microservices.yml for service orchestration - Added health checks and non-root user execution for security - Multi-stage Docker builds for lean, fast images ### Testing - Added E2E tests for dashboard responsiveness - Added E2E tests for Ollama integration - Added E2E tests for microservices architecture validation - All 36 tests passing, 8 skipped (environment-specific) ### Documentation - Created comprehensive final report - Generated issue resolution plan - Added interview transcript demonstrating core agent functionality ### New Modules - skill_absorption.py: Dynamic skill loading and integration system for Timmy ## Test Results ✅ 36 passed, 8 skipped, 6 warnings ✅ All microservices tests passing ✅ Dashboard responsiveness verified ✅ Ollama integration validated ## Files Added/Modified - docker/: Multi-stage Dockerfiles for all services - tests/e2e/: Comprehensive E2E test suite - src/timmy/skill_absorption.py: Skill absorption system - src/dashboard/app.py: Optimized startup logic - tests/conftest.py: Enhanced test fixtures - docker-compose.microservices.yml: Service orchestration ## Breaking Changes None - all changes are backward compatible ## Next Steps - Integrate skill absorption system into agent workflow - Test with microservices-tdd-refactor skill - Deploy to production with docker-compose orchestration
2026-02-28 11:07:19 -05:00
feat: microservices refactoring with TDD and Docker optimization (#88) ## Summary Complete refactoring of Timmy Time from monolithic architecture to microservices using Test-Driven Development (TDD) and optimized Docker builds. ## Changes ### Core Improvements - Optimized dashboard startup: moved blocking tasks to async background processes - Fixed model fallback logic in agent configuration - Enhanced test fixtures with comprehensive conftest.py ### Microservices Architecture - Created separate Dockerfiles for dashboard, Ollama, and agent services - Implemented docker-compose.microservices.yml for service orchestration - Added health checks and non-root user execution for security - Multi-stage Docker builds for lean, fast images ### Testing - Added E2E tests for dashboard responsiveness - Added E2E tests for Ollama integration - Added E2E tests for microservices architecture validation - All 36 tests passing, 8 skipped (environment-specific) ### Documentation - Created comprehensive final report - Generated issue resolution plan - Added interview transcript demonstrating core agent functionality ### New Modules - skill_absorption.py: Dynamic skill loading and integration system for Timmy ## Test Results ✅ 36 passed, 8 skipped, 6 warnings ✅ All microservices tests passing ✅ Dashboard responsiveness verified ✅ Ollama integration validated ## Files Added/Modified - docker/: Multi-stage Dockerfiles for all services - tests/e2e/: Comprehensive E2E test suite - src/timmy/skill_absorption.py: Skill absorption system - src/dashboard/app.py: Optimized startup logic - tests/conftest.py: Enhanced test fixtures - docker-compose.microservices.yml: Service orchestration ## Breaking Changes None - all changes are backward compatible ## Next Steps - Integrate skill absorption system into agent workflow - Test with microservices-tdd-refactor skill - Deploy to production with docker-compose orchestration
2026-02-28 11:07:19 -05:00
@pytest.fixture
def mock_ollama_client():
"""Provide a mock Ollama client for unit tests."""
client = MagicMock()
client.generate = MagicMock(return_value={"response": "Test response"})
client.chat = MagicMock(return_value={"message": {"content": "Test chat response"}})
client.list = MagicMock(return_value={"models": [{"name": "llama3.2"}]})
return client
@pytest.fixture
def mock_timmy_agent():
"""Provide a mock Timmy agent for testing."""
agent = MagicMock()
agent.name = "Timmy"
agent.run = MagicMock(return_value="Test response from Timmy")
agent.chat = MagicMock(return_value="Test chat response")
return agent
@pytest.fixture
def mock_memory_system():
"""Provide a mock memory system."""
memory = MagicMock()
memory.get_system_context = MagicMock(return_value="Test memory context")
memory.add_memory = MagicMock()
memory.search = MagicMock(return_value=[])
return memory
@pytest.fixture
def mock_event_log():
"""Provide a mock event logger."""
logger = MagicMock()
logger.log_event = MagicMock()
logger.get_events = MagicMock(return_value=[])
return logger
@pytest.fixture
def mock_ws_manager():
"""Provide a mock WebSocket manager."""
manager = MagicMock()
manager.broadcast = MagicMock()
manager.broadcast_json = MagicMock()
manager.send = MagicMock()
return manager
@pytest.fixture
def mock_settings():
"""Provide mock settings."""
settings = MagicMock()
settings.ollama_url = "http://localhost:11434"
settings.ollama_model = "llama3.2"
settings.thinking_enabled = True
settings.thinking_interval_seconds = 300
settings.error_log_enabled = False
settings.repo_root = str(Path(__file__).parent.parent)
return settings
@pytest.fixture
def sample_interview_data():
"""Provide sample interview data for testing."""
return {
"questions": [
{
"category": "Identity",
"question": "Who are you?",
"expected_keywords": ["Timmy", "agent"],
},
{
"category": "Capabilities",
"question": "What can you do?",
"expected_keywords": ["agent", "brain"],
feat: microservices refactoring with TDD and Docker optimization (#88) ## Summary Complete refactoring of Timmy Time from monolithic architecture to microservices using Test-Driven Development (TDD) and optimized Docker builds. ## Changes ### Core Improvements - Optimized dashboard startup: moved blocking tasks to async background processes - Fixed model fallback logic in agent configuration - Enhanced test fixtures with comprehensive conftest.py ### Microservices Architecture - Created separate Dockerfiles for dashboard, Ollama, and agent services - Implemented docker-compose.microservices.yml for service orchestration - Added health checks and non-root user execution for security - Multi-stage Docker builds for lean, fast images ### Testing - Added E2E tests for dashboard responsiveness - Added E2E tests for Ollama integration - Added E2E tests for microservices architecture validation - All 36 tests passing, 8 skipped (environment-specific) ### Documentation - Created comprehensive final report - Generated issue resolution plan - Added interview transcript demonstrating core agent functionality ### New Modules - skill_absorption.py: Dynamic skill loading and integration system for Timmy ## Test Results ✅ 36 passed, 8 skipped, 6 warnings ✅ All microservices tests passing ✅ Dashboard responsiveness verified ✅ Ollama integration validated ## Files Added/Modified - docker/: Multi-stage Dockerfiles for all services - tests/e2e/: Comprehensive E2E test suite - src/timmy/skill_absorption.py: Skill absorption system - src/dashboard/app.py: Optimized startup logic - tests/conftest.py: Enhanced test fixtures - docker-compose.microservices.yml: Service orchestration ## Breaking Changes None - all changes are backward compatible ## Next Steps - Integrate skill absorption system into agent workflow - Test with microservices-tdd-refactor skill - Deploy to production with docker-compose orchestration
2026-02-28 11:07:19 -05:00
},
],
"expected_response_format": "string",
}
@pytest.fixture
def sample_task_data():
"""Provide sample task data for testing."""
return {
"id": "task-1",
"title": "Test Task",
"description": "This is a test task",
"assigned_to": "timmy",
"status": "pending",
"priority": "normal",
}
@pytest.fixture
def sample_agent_data():
"""Provide sample agent data for testing."""
return {
"id": "agent-1",
"name": "Test Agent",
"capabilities": ["chat", "reasoning"],
"status": "active",
}