forked from Rockachopa/Timmy-time-dashboard
Merge pull request #46 from AlexanderWhitestone/feature/memory-layers-and-conversational-ai
feat: Event Log, Ledger, Memory, Cascade Router, Upgrade Queue, Activity Feed
This commit is contained in:
@@ -1,185 +1,164 @@
|
||||
"""Functional test fixtures — real services, no mocking.
|
||||
|
||||
These fixtures provide:
|
||||
- TestClient hitting the real FastAPI app (singletons, SQLite, etc.)
|
||||
- Typer CliRunner for CLI commands
|
||||
- Real temporary SQLite for swarm state
|
||||
- Real payment handler with mock lightning backend (LIGHTNING_BACKEND=mock)
|
||||
- Docker compose lifecycle for container-level tests
|
||||
"""
|
||||
"""Shared fixtures for functional/E2E tests."""
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock
|
||||
import urllib.request
|
||||
|
||||
import pytest
|
||||
from fastapi.testclient import TestClient
|
||||
|
||||
# ── Stub heavy optional deps (same as root conftest) ─────────────────────────
|
||||
# These aren't mocks — they're import compatibility shims for packages
|
||||
# not installed in the test environment. The code under test handles
|
||||
# their absence via try/except ImportError.
|
||||
for _mod in [
|
||||
"agno", "agno.agent", "agno.models", "agno.models.ollama",
|
||||
"agno.db", "agno.db.sqlite",
|
||||
"airllm",
|
||||
"telegram", "telegram.ext",
|
||||
]:
|
||||
sys.modules.setdefault(_mod, MagicMock())
|
||||
|
||||
os.environ["TIMMY_TEST_MODE"] = "1"
|
||||
# Default dashboard URL - override with DASHBOARD_URL env var
|
||||
DASHBOARD_URL = os.environ.get("DASHBOARD_URL", "http://localhost:8000")
|
||||
|
||||
|
||||
# ── Isolation: fresh coordinator state per test ───────────────────────────────
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def _isolate_state():
|
||||
"""Reset all singleton state between tests so they can't leak."""
|
||||
from dashboard.store import message_log
|
||||
message_log.clear()
|
||||
yield
|
||||
message_log.clear()
|
||||
from swarm.coordinator import coordinator
|
||||
coordinator.auctions._auctions.clear()
|
||||
coordinator.comms._listeners.clear()
|
||||
coordinator._in_process_nodes.clear()
|
||||
coordinator.manager.stop_all()
|
||||
def is_server_running():
|
||||
"""Check if dashboard is already running."""
|
||||
try:
|
||||
from swarm import routing
|
||||
routing.routing_engine._manifests.clear()
|
||||
urllib.request.urlopen(f"{DASHBOARD_URL}/health", timeout=2)
|
||||
return True
|
||||
except Exception:
|
||||
pass
|
||||
return False
|
||||
|
||||
|
||||
# ── TestClient with real app, no patches ──────────────────────────────────────
|
||||
@pytest.fixture(scope="session")
|
||||
def live_server():
|
||||
"""Start the real Timmy server for E2E tests.
|
||||
|
||||
Yields the base URL (http://localhost:8000).
|
||||
Kills the server after tests complete.
|
||||
"""
|
||||
# Check if server already running
|
||||
if is_server_running():
|
||||
print(f"\n📡 Using existing server at {DASHBOARD_URL}")
|
||||
yield DASHBOARD_URL
|
||||
return
|
||||
|
||||
# Start server in subprocess
|
||||
print(f"\n🚀 Starting server on {DASHBOARD_URL}...")
|
||||
|
||||
env = os.environ.copy()
|
||||
env["PYTHONPATH"] = "src"
|
||||
env["TIMMY_ENV"] = "test" # Use test config if available
|
||||
|
||||
# Determine project root
|
||||
project_root = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
||||
|
||||
proc = subprocess.Popen(
|
||||
[sys.executable, "-m", "uvicorn", "dashboard.app:app",
|
||||
"--host", "127.0.0.1", "--port", "8000",
|
||||
"--log-level", "warning"],
|
||||
cwd=project_root,
|
||||
env=env,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
|
||||
# Wait for server to start
|
||||
max_retries = 30
|
||||
for i in range(max_retries):
|
||||
if is_server_running():
|
||||
print(f"✅ Server ready!")
|
||||
break
|
||||
time.sleep(1)
|
||||
print(f"⏳ Waiting for server... ({i+1}/{max_retries})")
|
||||
else:
|
||||
proc.terminate()
|
||||
proc.wait()
|
||||
raise RuntimeError("Server failed to start")
|
||||
|
||||
yield DASHBOARD_URL
|
||||
|
||||
# Cleanup
|
||||
print("\n🛑 Stopping server...")
|
||||
proc.terminate()
|
||||
try:
|
||||
proc.wait(timeout=5)
|
||||
except subprocess.TimeoutExpired:
|
||||
proc.kill()
|
||||
proc.wait()
|
||||
print("✅ Server stopped")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def app_client(tmp_path):
|
||||
"""TestClient wrapping the real dashboard app.
|
||||
|
||||
Uses a tmp_path for swarm SQLite so tests don't pollute each other.
|
||||
No mocking — Ollama is offline (graceful degradation), singletons are real.
|
||||
def app_client():
|
||||
"""FastAPI test client for functional tests.
|
||||
|
||||
Same as the 'client' fixture in root conftest but available here.
|
||||
"""
|
||||
data_dir = tmp_path / "data"
|
||||
data_dir.mkdir()
|
||||
|
||||
import swarm.tasks as tasks_mod
|
||||
import swarm.registry as registry_mod
|
||||
original_tasks_db = tasks_mod.DB_PATH
|
||||
original_reg_db = registry_mod.DB_PATH
|
||||
|
||||
tasks_mod.DB_PATH = data_dir / "swarm.db"
|
||||
registry_mod.DB_PATH = data_dir / "swarm.db"
|
||||
|
||||
from fastapi.testclient import TestClient
|
||||
from dashboard.app import app
|
||||
with TestClient(app) as c:
|
||||
yield c
|
||||
|
||||
tasks_mod.DB_PATH = original_tasks_db
|
||||
registry_mod.DB_PATH = original_reg_db
|
||||
|
||||
|
||||
# ── Timmy-serve TestClient ────────────────────────────────────────────────────
|
||||
|
||||
@pytest.fixture
|
||||
def serve_client():
|
||||
"""TestClient wrapping the timmy-serve L402 app.
|
||||
|
||||
Uses real mock-lightning backend (LIGHTNING_BACKEND=mock).
|
||||
"""
|
||||
from timmy_serve.app import create_timmy_serve_app
|
||||
|
||||
app = create_timmy_serve_app(price_sats=100)
|
||||
with TestClient(app) as c:
|
||||
yield c
|
||||
|
||||
|
||||
# ── CLI runners ───────────────────────────────────────────────────────────────
|
||||
|
||||
@pytest.fixture
|
||||
def timmy_runner():
|
||||
"""Typer CliRunner + app for the `timmy` CLI."""
|
||||
"""Typer CLI runner for timmy CLI tests."""
|
||||
from typer.testing import CliRunner
|
||||
from timmy.cli import app
|
||||
return CliRunner(), app
|
||||
yield CliRunner(), app
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def serve_runner():
|
||||
"""Typer CliRunner + app for the `timmy-serve` CLI."""
|
||||
"""Typer CLI runner for timmy-serve CLI tests."""
|
||||
from typer.testing import CliRunner
|
||||
from timmy_serve.cli import app
|
||||
return CliRunner(), app
|
||||
yield CliRunner(), app
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def self_tdd_runner():
|
||||
"""Typer CLI runner for self-tdd CLI tests."""
|
||||
from typer.testing import CliRunner
|
||||
from self_tdd.cli import app
|
||||
yield CliRunner(), app
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def docker_stack():
|
||||
"""Docker stack URL for container-level tests.
|
||||
|
||||
Skips if FUNCTIONAL_DOCKER env var is not set to "1".
|
||||
"""
|
||||
import os
|
||||
if os.environ.get("FUNCTIONAL_DOCKER") != "1":
|
||||
pytest.skip("Set FUNCTIONAL_DOCKER=1 to run Docker tests")
|
||||
yield "http://localhost:18000"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def serve_client():
|
||||
"""FastAPI test client for timmy-serve app."""
|
||||
pytest.importorskip("timmy_serve.app", reason="timmy_serve not available")
|
||||
from timmy_serve.app import create_timmy_serve_app
|
||||
from fastapi.testclient import TestClient
|
||||
app = create_timmy_serve_app()
|
||||
with TestClient(app) as c:
|
||||
yield c
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def tdd_runner():
|
||||
"""Typer CliRunner + app for the `self-tdd` CLI."""
|
||||
"""Alias for self_tdd_runner fixture."""
|
||||
pytest.importorskip("self_tdd.cli", reason="self_tdd CLI not available")
|
||||
from typer.testing import CliRunner
|
||||
from self_tdd.watchdog import app
|
||||
return CliRunner(), app
|
||||
from self_tdd.cli import app
|
||||
yield CliRunner(), app
|
||||
|
||||
|
||||
# ── Docker compose lifecycle ──────────────────────────────────────────────────
|
||||
|
||||
PROJECT_ROOT = Path(__file__).parent.parent.parent
|
||||
COMPOSE_TEST = PROJECT_ROOT / "docker-compose.test.yml"
|
||||
|
||||
|
||||
def _compose(*args, timeout=60):
|
||||
"""Run a docker compose command against the test compose file."""
|
||||
cmd = ["docker", "compose", "-f", str(COMPOSE_TEST), "-p", "timmy-test", *args]
|
||||
return subprocess.run(cmd, capture_output=True, text=True, timeout=timeout, cwd=str(PROJECT_ROOT))
|
||||
|
||||
|
||||
def _wait_for_healthy(url: str, retries=30, interval=2):
|
||||
"""Poll a URL until it returns 200 or we run out of retries."""
|
||||
import httpx
|
||||
for i in range(retries):
|
||||
try:
|
||||
r = httpx.get(url, timeout=5)
|
||||
if r.status_code == 200:
|
||||
return True
|
||||
except Exception:
|
||||
pass
|
||||
time.sleep(interval)
|
||||
return False
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def docker_stack():
|
||||
"""Spin up the test compose stack once per session.
|
||||
|
||||
Yields a base URL (http://localhost:18000) to hit the dashboard.
|
||||
Tears down after all tests complete.
|
||||
|
||||
Skipped unless FUNCTIONAL_DOCKER=1 is set.
|
||||
"""
|
||||
if not COMPOSE_TEST.exists():
|
||||
pytest.skip("docker-compose.test.yml not found")
|
||||
if os.environ.get("FUNCTIONAL_DOCKER") != "1":
|
||||
pytest.skip("Set FUNCTIONAL_DOCKER=1 to run Docker tests")
|
||||
|
||||
# Verify Docker daemon is reachable before attempting build
|
||||
docker_check = subprocess.run(
|
||||
["docker", "info"], capture_output=True, text=True, timeout=10,
|
||||
# Add custom pytest option for headed mode
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption(
|
||||
"--headed",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Run browser in non-headless mode (visible)",
|
||||
)
|
||||
if docker_check.returncode != 0:
|
||||
pytest.skip(f"Docker daemon not available: {docker_check.stderr.strip()}")
|
||||
|
||||
result = _compose("up", "-d", "--build", "--wait", timeout=300)
|
||||
if result.returncode != 0:
|
||||
pytest.fail(f"docker compose up failed:\n{result.stderr}")
|
||||
|
||||
base_url = "http://localhost:18000"
|
||||
if not _wait_for_healthy(f"{base_url}/health"):
|
||||
logs = _compose("logs")
|
||||
_compose("down", "-v")
|
||||
pytest.fail(f"Dashboard never became healthy:\n{logs.stdout}")
|
||||
|
||||
yield base_url
|
||||
|
||||
_compose("down", "-v", timeout=60)
|
||||
@pytest.fixture
|
||||
def headed_mode(request):
|
||||
"""Check if --headed flag was passed."""
|
||||
return request.config.getoption("--headed")
|
||||
|
||||
231
tests/functional/test_fast_e2e.py
Normal file
231
tests/functional/test_fast_e2e.py
Normal file
@@ -0,0 +1,231 @@
|
||||
"""Fast E2E tests - all checks in one browser session, under 20 seconds.
|
||||
|
||||
RUN: SELENIUM_UI=1 pytest tests/functional/test_fast_e2e.py -v
|
||||
"""
|
||||
|
||||
import os
|
||||
import time
|
||||
|
||||
import pytest
|
||||
import httpx
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.chrome.options import Options
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.support import expected_conditions as EC
|
||||
from selenium.webdriver.support.ui import WebDriverWait
|
||||
|
||||
pytestmark = pytest.mark.skipif(
|
||||
os.environ.get("SELENIUM_UI") != "1",
|
||||
reason="Set SELENIUM_UI=1 to run Selenium UI tests",
|
||||
)
|
||||
|
||||
DASHBOARD_URL = os.environ.get("DASHBOARD_URL", "http://localhost:8000")
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def driver():
|
||||
"""Single browser instance for all tests (module-scoped for reuse)."""
|
||||
opts = Options()
|
||||
opts.add_argument("--headless=new") # Headless for speed
|
||||
opts.add_argument("--no-sandbox")
|
||||
opts.add_argument("--disable-dev-shm-usage")
|
||||
opts.add_argument("--disable-gpu")
|
||||
opts.add_argument("--window-size=1280,900")
|
||||
|
||||
d = webdriver.Chrome(options=opts)
|
||||
d.implicitly_wait(2) # Reduced from 5s
|
||||
yield d
|
||||
d.quit()
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def dashboard_url():
|
||||
"""Verify server is running."""
|
||||
try:
|
||||
r = httpx.get(f"{DASHBOARD_URL}/health", timeout=3)
|
||||
if r.status_code != 200:
|
||||
pytest.skip("Dashboard not healthy")
|
||||
except Exception:
|
||||
pytest.skip(f"Dashboard not reachable at {DASHBOARD_URL}")
|
||||
return DASHBOARD_URL
|
||||
|
||||
|
||||
class TestAllPagesLoad:
|
||||
"""Single test that checks all pages load - much faster than separate tests."""
|
||||
|
||||
def test_all_dashboard_pages_exist(self, driver, dashboard_url):
|
||||
"""Verify all new feature pages load successfully in one browser session."""
|
||||
pages = [
|
||||
("/swarm/events", "Event"),
|
||||
("/lightning/ledger", "Ledger"),
|
||||
("/memory", "Memory"),
|
||||
("/router/status", "Router"),
|
||||
("/self-modify/queue", "Upgrade"),
|
||||
("/swarm/live", "Swarm"), # Live page has "Swarm" not "Live"
|
||||
]
|
||||
|
||||
failures = []
|
||||
|
||||
for path, expected_text in pages:
|
||||
try:
|
||||
driver.get(f"{dashboard_url}{path}")
|
||||
# Quick check - wait max 3s for any content
|
||||
WebDriverWait(driver, 3).until(
|
||||
EC.presence_of_element_located((By.TAG_NAME, "body"))
|
||||
)
|
||||
|
||||
# Verify page has expected content
|
||||
body_text = driver.find_element(By.TAG_NAME, "body").text
|
||||
if expected_text.lower() not in body_text.lower():
|
||||
failures.append(f"{path}: missing '{expected_text}'")
|
||||
|
||||
except Exception as exc:
|
||||
failures.append(f"{path}: {type(exc).__name__}")
|
||||
|
||||
if failures:
|
||||
pytest.fail(f"Pages failed to load: {', '.join(failures)}")
|
||||
|
||||
|
||||
class TestAllFeaturesWork:
|
||||
"""Combined functional tests - single browser session."""
|
||||
|
||||
def test_event_log_and_memory_and_ledger_functional(self, driver, dashboard_url):
|
||||
"""Test Event Log, Memory, and Ledger functionality in one go."""
|
||||
|
||||
# 1. Event Log - verify events display
|
||||
driver.get(f"{dashboard_url}/swarm/events")
|
||||
time.sleep(0.5)
|
||||
|
||||
# Should have header and either events or empty state
|
||||
body = driver.find_element(By.TAG_NAME, "body").text
|
||||
assert "Event" in body or "event" in body, "Event log page missing header"
|
||||
|
||||
# Create a task via API to generate an event
|
||||
try:
|
||||
httpx.post(
|
||||
f"{dashboard_url}/swarm/tasks",
|
||||
data={"description": "E2E test task"},
|
||||
timeout=2
|
||||
)
|
||||
except Exception:
|
||||
pass # Ignore, just checking page exists
|
||||
|
||||
# 2. Memory - verify search works
|
||||
driver.get(f"{dashboard_url}/memory?query=test")
|
||||
time.sleep(0.5)
|
||||
|
||||
# Should have search input
|
||||
search = driver.find_elements(By.CSS_SELECTOR, "input[type='search'], input[name='query']")
|
||||
assert search, "Memory page missing search input"
|
||||
|
||||
# 3. Ledger - verify balance display
|
||||
driver.get(f"{dashboard_url}/lightning/ledger")
|
||||
time.sleep(0.5)
|
||||
|
||||
body = driver.find_element(By.TAG_NAME, "body").text
|
||||
# Should show balance-related text
|
||||
has_balance = any(x in body.lower() for x in ["balance", "sats", "transaction"])
|
||||
assert has_balance, "Ledger page missing balance info"
|
||||
|
||||
|
||||
class TestCascadeRouter:
|
||||
"""Cascade Router - combined checks."""
|
||||
|
||||
def test_router_status_and_navigation(self, driver, dashboard_url):
|
||||
"""Verify router status page and nav link in one test."""
|
||||
|
||||
# Check router status page
|
||||
driver.get(f"{dashboard_url}/router/status")
|
||||
time.sleep(0.5)
|
||||
|
||||
body = driver.find_element(By.TAG_NAME, "body").text
|
||||
|
||||
# Should show providers or config message
|
||||
has_content = any(x in body.lower() for x in [
|
||||
"provider", "router", "ollama", "config", "status"
|
||||
])
|
||||
assert has_content, "Router status page missing content"
|
||||
|
||||
# Check nav has router link
|
||||
driver.get(dashboard_url)
|
||||
time.sleep(0.3)
|
||||
|
||||
nav_links = driver.find_elements(By.XPATH, "//a[contains(@href, '/router')]")
|
||||
assert nav_links, "Navigation missing router link"
|
||||
|
||||
|
||||
class TestUpgradeQueue:
|
||||
"""Upgrade Queue - combined checks."""
|
||||
|
||||
def test_upgrade_queue_page_and_elements(self, driver, dashboard_url):
|
||||
"""Verify upgrade queue page loads with expected elements."""
|
||||
|
||||
driver.get(f"{dashboard_url}/self-modify/queue")
|
||||
time.sleep(0.5)
|
||||
|
||||
body = driver.find_element(By.TAG_NAME, "body").text
|
||||
|
||||
# Should have queue header
|
||||
assert "upgrade" in body.lower() or "queue" in body.lower(), "Missing queue header"
|
||||
|
||||
# Should have pending section or empty state
|
||||
has_pending = "pending" in body.lower() or "no pending" in body.lower()
|
||||
assert has_pending, "Missing pending upgrades section"
|
||||
|
||||
# Check for approve/reject buttons if upgrades exist
|
||||
approve_btns = driver.find_elements(By.XPATH, "//button[contains(text(), 'Approve')]")
|
||||
reject_btns = driver.find_elements(By.XPATH, "//button[contains(text(), 'Reject')]")
|
||||
|
||||
# Either no upgrades (no buttons) or buttons exist
|
||||
# This is a soft check - page structure is valid either way
|
||||
|
||||
|
||||
class TestActivityFeed:
|
||||
"""Activity Feed - combined checks."""
|
||||
|
||||
def test_swarm_live_page_and_activity_feed(self, driver, dashboard_url):
|
||||
"""Verify swarm live page has activity feed elements."""
|
||||
|
||||
driver.get(f"{dashboard_url}/swarm/live")
|
||||
time.sleep(0.5)
|
||||
|
||||
body = driver.find_element(By.TAG_NAME, "body").text
|
||||
|
||||
# Should have live indicator or activity section
|
||||
has_live = any(x in body.lower() for x in [
|
||||
"live", "activity", "swarm", "agents", "tasks"
|
||||
])
|
||||
assert has_live, "Swarm live page missing content"
|
||||
|
||||
# Check for WebSocket connection indicator (if implemented)
|
||||
# or just basic structure
|
||||
panels = driver.find_elements(By.CSS_SELECTOR, ".card, .panel, .mc-panel")
|
||||
assert panels, "Swarm live page missing panels"
|
||||
|
||||
|
||||
class TestFastSmoke:
|
||||
"""Ultra-fast smoke tests using HTTP where possible."""
|
||||
|
||||
def test_all_routes_respond_200(self, dashboard_url):
|
||||
"""HTTP-only test - no browser, very fast."""
|
||||
routes = [
|
||||
"/swarm/events",
|
||||
"/lightning/ledger",
|
||||
"/memory",
|
||||
"/router/status",
|
||||
"/self-modify/queue",
|
||||
"/swarm/live",
|
||||
]
|
||||
|
||||
failures = []
|
||||
|
||||
for route in routes:
|
||||
try:
|
||||
r = httpx.get(f"{dashboard_url}{route}", timeout=3, follow_redirects=True)
|
||||
if r.status_code != 200:
|
||||
failures.append(f"{route}: {r.status_code}")
|
||||
except Exception as exc:
|
||||
failures.append(f"{route}: {type(exc).__name__}")
|
||||
|
||||
if failures:
|
||||
pytest.fail(f"Routes failed: {', '.join(failures)}")
|
||||
169
tests/test_event_log.py
Normal file
169
tests/test_event_log.py
Normal file
@@ -0,0 +1,169 @@
|
||||
"""Tests for swarm event logging system."""
|
||||
|
||||
import pytest
|
||||
from datetime import datetime, timezone
|
||||
from swarm.event_log import (
|
||||
EventType,
|
||||
log_event,
|
||||
get_event,
|
||||
list_events,
|
||||
get_task_events,
|
||||
get_agent_events,
|
||||
get_recent_events,
|
||||
get_event_summary,
|
||||
prune_events,
|
||||
)
|
||||
|
||||
|
||||
class TestEventLog:
|
||||
"""Test suite for event logging functionality."""
|
||||
|
||||
def test_log_simple_event(self):
|
||||
"""Test logging a basic event."""
|
||||
event = log_event(
|
||||
event_type=EventType.SYSTEM_INFO,
|
||||
source="test",
|
||||
data={"message": "test event"},
|
||||
)
|
||||
|
||||
assert event.event_type == EventType.SYSTEM_INFO
|
||||
assert event.source == "test"
|
||||
assert event.data is not None
|
||||
|
||||
# Verify we can retrieve it
|
||||
retrieved = get_event(event.id)
|
||||
assert retrieved is not None
|
||||
assert retrieved.source == "test"
|
||||
|
||||
def test_log_task_event(self):
|
||||
"""Test logging a task lifecycle event."""
|
||||
task_id = "task-123"
|
||||
agent_id = "agent-456"
|
||||
|
||||
event = log_event(
|
||||
event_type=EventType.TASK_ASSIGNED,
|
||||
source="coordinator",
|
||||
task_id=task_id,
|
||||
agent_id=agent_id,
|
||||
data={"bid_sats": 100},
|
||||
)
|
||||
|
||||
assert event.task_id == task_id
|
||||
assert event.agent_id == agent_id
|
||||
|
||||
# Verify filtering by task works
|
||||
task_events = get_task_events(task_id)
|
||||
assert len(task_events) >= 1
|
||||
assert any(e.id == event.id for e in task_events)
|
||||
|
||||
def test_log_agent_event(self):
|
||||
"""Test logging agent lifecycle events."""
|
||||
agent_id = "agent-test-001"
|
||||
|
||||
event = log_event(
|
||||
event_type=EventType.AGENT_JOINED,
|
||||
source="coordinator",
|
||||
agent_id=agent_id,
|
||||
data={"persona_id": "forge"},
|
||||
)
|
||||
|
||||
# Verify filtering by agent works
|
||||
agent_events = get_agent_events(agent_id)
|
||||
assert len(agent_events) >= 1
|
||||
assert any(e.id == event.id for e in agent_events)
|
||||
|
||||
def test_list_events_filtering(self):
|
||||
"""Test filtering events by type."""
|
||||
# Create events of different types
|
||||
log_event(EventType.TASK_CREATED, source="test")
|
||||
log_event(EventType.TASK_COMPLETED, source="test")
|
||||
log_event(EventType.SYSTEM_INFO, source="test")
|
||||
|
||||
# Filter by type
|
||||
task_events = list_events(event_type=EventType.TASK_CREATED, limit=10)
|
||||
assert all(e.event_type == EventType.TASK_CREATED for e in task_events)
|
||||
|
||||
# Filter by source
|
||||
source_events = list_events(source="test", limit=10)
|
||||
assert all(e.source == "test" for e in source_events)
|
||||
|
||||
def test_get_recent_events(self):
|
||||
"""Test retrieving recent events."""
|
||||
# Log an event
|
||||
log_event(EventType.SYSTEM_INFO, source="recent_test")
|
||||
|
||||
# Get events from last minute
|
||||
recent = get_recent_events(minutes=1)
|
||||
assert any(e.source == "recent_test" for e in recent)
|
||||
|
||||
def test_event_summary(self):
|
||||
"""Test event summary statistics."""
|
||||
# Create some events
|
||||
log_event(EventType.TASK_CREATED, source="summary_test")
|
||||
log_event(EventType.TASK_CREATED, source="summary_test")
|
||||
log_event(EventType.TASK_COMPLETED, source="summary_test")
|
||||
|
||||
# Get summary
|
||||
summary = get_event_summary(minutes=1)
|
||||
assert "task.created" in summary or "task.completed" in summary
|
||||
|
||||
def test_prune_events(self):
|
||||
"""Test pruning old events."""
|
||||
# This test just verifies the function doesn't error
|
||||
# (we don't want to delete real data in tests)
|
||||
count = prune_events(older_than_days=365)
|
||||
# Result depends on database state, just verify no exception
|
||||
assert isinstance(count, int)
|
||||
|
||||
def test_event_data_serialization(self):
|
||||
"""Test that complex data is properly serialized."""
|
||||
complex_data = {
|
||||
"nested": {"key": "value"},
|
||||
"list": [1, 2, 3],
|
||||
"number": 42.5,
|
||||
}
|
||||
|
||||
event = log_event(
|
||||
EventType.TOOL_CALLED,
|
||||
source="test",
|
||||
data=complex_data,
|
||||
)
|
||||
|
||||
retrieved = get_event(event.id)
|
||||
# Data should be stored as JSON string
|
||||
assert retrieved.data is not None
|
||||
|
||||
|
||||
class TestEventTypes:
|
||||
"""Test that all event types can be logged."""
|
||||
|
||||
@pytest.mark.parametrize("event_type", [
|
||||
EventType.TASK_CREATED,
|
||||
EventType.TASK_BIDDING,
|
||||
EventType.TASK_ASSIGNED,
|
||||
EventType.TASK_STARTED,
|
||||
EventType.TASK_COMPLETED,
|
||||
EventType.TASK_FAILED,
|
||||
EventType.AGENT_JOINED,
|
||||
EventType.AGENT_LEFT,
|
||||
EventType.AGENT_STATUS_CHANGED,
|
||||
EventType.BID_SUBMITTED,
|
||||
EventType.AUCTION_CLOSED,
|
||||
EventType.TOOL_CALLED,
|
||||
EventType.TOOL_COMPLETED,
|
||||
EventType.TOOL_FAILED,
|
||||
EventType.SYSTEM_ERROR,
|
||||
EventType.SYSTEM_WARNING,
|
||||
EventType.SYSTEM_INFO,
|
||||
])
|
||||
def test_all_event_types(self, event_type):
|
||||
"""Verify all event types can be logged and retrieved."""
|
||||
event = log_event(
|
||||
event_type=event_type,
|
||||
source="type_test",
|
||||
data={"test": True},
|
||||
)
|
||||
|
||||
retrieved = get_event(event.id)
|
||||
assert retrieved is not None
|
||||
assert retrieved.event_type == event_type
|
||||
275
tests/test_functional_mcp.py
Normal file
275
tests/test_functional_mcp.py
Normal file
@@ -0,0 +1,275 @@
|
||||
"""Functional tests for MCP Discovery and Bootstrap - tests actual behavior.
|
||||
|
||||
These tests verify the MCP system works end-to-end.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
import types
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from mcp.discovery import ToolDiscovery, mcp_tool, DiscoveredTool
|
||||
from mcp.bootstrap import auto_bootstrap, bootstrap_from_directory
|
||||
from mcp.registry import ToolRegistry
|
||||
|
||||
|
||||
class TestMCPToolDecoratorFunctional:
|
||||
"""Functional tests for @mcp_tool decorator."""
|
||||
|
||||
def test_decorator_marks_function(self):
|
||||
"""Test that decorator properly marks function as tool."""
|
||||
@mcp_tool(name="my_tool", category="test", tags=["a", "b"])
|
||||
def my_function(x: str) -> str:
|
||||
"""Do something."""
|
||||
return x
|
||||
|
||||
assert hasattr(my_function, "_mcp_tool")
|
||||
assert my_function._mcp_tool is True
|
||||
assert my_function._mcp_name == "my_tool"
|
||||
assert my_function._mcp_category == "test"
|
||||
assert my_function._mcp_tags == ["a", "b"]
|
||||
assert "Do something" in my_function._mcp_description
|
||||
|
||||
def test_decorator_uses_defaults(self):
|
||||
"""Test decorator uses sensible defaults."""
|
||||
@mcp_tool()
|
||||
def another_function():
|
||||
pass
|
||||
|
||||
assert another_function._mcp_name == "another_function"
|
||||
assert another_function._mcp_category == "general"
|
||||
assert another_function._mcp_tags == []
|
||||
|
||||
|
||||
class TestToolDiscoveryFunctional:
|
||||
"""Functional tests for tool discovery."""
|
||||
|
||||
@pytest.fixture
|
||||
def mock_module(self):
|
||||
"""Create a mock module with tools."""
|
||||
module = types.ModuleType("test_discovery_module")
|
||||
module.__file__ = "test_discovery_module.py"
|
||||
|
||||
@mcp_tool(name="echo", category="test")
|
||||
def echo_func(message: str) -> str:
|
||||
"""Echo a message."""
|
||||
return message
|
||||
|
||||
@mcp_tool(name="add", category="math")
|
||||
def add_func(a: int, b: int) -> int:
|
||||
"""Add numbers."""
|
||||
return a + b
|
||||
|
||||
def not_a_tool():
|
||||
"""Not decorated."""
|
||||
pass
|
||||
|
||||
module.echo_func = echo_func
|
||||
module.add_func = add_func
|
||||
module.not_a_tool = not_a_tool
|
||||
|
||||
sys.modules["test_discovery_module"] = module
|
||||
yield module
|
||||
del sys.modules["test_discovery_module"]
|
||||
|
||||
def test_discover_module_finds_tools(self, mock_module):
|
||||
"""Test discovering tools from a module."""
|
||||
registry = ToolRegistry()
|
||||
discovery = ToolDiscovery(registry=registry)
|
||||
|
||||
tools = discovery.discover_module("test_discovery_module")
|
||||
|
||||
names = [t.name for t in tools]
|
||||
assert "echo" in names
|
||||
assert "add" in names
|
||||
assert "not_a_tool" not in names
|
||||
|
||||
def test_discovered_tool_has_correct_metadata(self, mock_module):
|
||||
"""Test discovered tools have correct metadata."""
|
||||
registry = ToolRegistry()
|
||||
discovery = ToolDiscovery(registry=registry)
|
||||
|
||||
tools = discovery.discover_module("test_discovery_module")
|
||||
|
||||
echo = next(t for t in tools if t.name == "echo")
|
||||
assert echo.category == "test"
|
||||
assert "Echo a message" in echo.description
|
||||
|
||||
def test_discovered_tool_has_schema(self, mock_module):
|
||||
"""Test discovered tools have generated schemas."""
|
||||
registry = ToolRegistry()
|
||||
discovery = ToolDiscovery(registry=registry)
|
||||
|
||||
tools = discovery.discover_module("test_discovery_module")
|
||||
|
||||
add = next(t for t in tools if t.name == "add")
|
||||
assert "properties" in add.parameters_schema
|
||||
assert "a" in add.parameters_schema["properties"]
|
||||
assert "b" in add.parameters_schema["properties"]
|
||||
|
||||
def test_discover_nonexistent_module(self):
|
||||
"""Test discovering from non-existent module returns empty list."""
|
||||
registry = ToolRegistry()
|
||||
discovery = ToolDiscovery(registry=registry)
|
||||
|
||||
tools = discovery.discover_module("nonexistent_xyz_module")
|
||||
|
||||
assert tools == []
|
||||
|
||||
|
||||
class TestToolRegistrationFunctional:
|
||||
"""Functional tests for tool registration via discovery."""
|
||||
|
||||
@pytest.fixture
|
||||
def mock_module(self):
|
||||
"""Create a mock module with tools."""
|
||||
module = types.ModuleType("test_register_module")
|
||||
module.__file__ = "test_register_module.py"
|
||||
|
||||
@mcp_tool(name="register_test", category="test")
|
||||
def test_func(value: str) -> str:
|
||||
"""Test function."""
|
||||
return value.upper()
|
||||
|
||||
module.test_func = test_func
|
||||
sys.modules["test_register_module"] = module
|
||||
yield module
|
||||
del sys.modules["test_register_module"]
|
||||
|
||||
def test_auto_register_adds_to_registry(self, mock_module):
|
||||
"""Test auto_register adds tools to registry."""
|
||||
registry = ToolRegistry()
|
||||
discovery = ToolDiscovery(registry=registry)
|
||||
|
||||
registered = discovery.auto_register("test_register_module")
|
||||
|
||||
assert "register_test" in registered
|
||||
assert registry.get("register_test") is not None
|
||||
|
||||
def test_registered_tool_can_execute(self, mock_module):
|
||||
"""Test that registered tools can be executed."""
|
||||
registry = ToolRegistry()
|
||||
discovery = ToolDiscovery(registry=registry)
|
||||
|
||||
discovery.auto_register("test_register_module")
|
||||
|
||||
result = asyncio.run(
|
||||
registry.execute("register_test", {"value": "hello"})
|
||||
)
|
||||
|
||||
assert result == "HELLO"
|
||||
|
||||
def test_registered_tool_tracks_metrics(self, mock_module):
|
||||
"""Test that tool execution tracks metrics."""
|
||||
registry = ToolRegistry()
|
||||
discovery = ToolDiscovery(registry=registry)
|
||||
|
||||
discovery.auto_register("test_register_module")
|
||||
|
||||
# Execute multiple times
|
||||
for _ in range(3):
|
||||
asyncio.run(registry.execute("register_test", {"value": "test"}))
|
||||
|
||||
metrics = registry.get_metrics("register_test")
|
||||
assert metrics["executions"] == 3
|
||||
assert metrics["health"] == "healthy"
|
||||
|
||||
|
||||
class TestMCBootstrapFunctional:
|
||||
"""Functional tests for MCP bootstrap."""
|
||||
|
||||
def test_auto_bootstrap_empty_list(self):
|
||||
"""Test auto_bootstrap with empty packages list."""
|
||||
registry = ToolRegistry()
|
||||
|
||||
registered = auto_bootstrap(
|
||||
packages=[],
|
||||
registry=registry,
|
||||
force=True,
|
||||
)
|
||||
|
||||
assert registered == []
|
||||
|
||||
def test_auto_bootstrap_nonexistent_package(self):
|
||||
"""Test auto_bootstrap with non-existent package."""
|
||||
registry = ToolRegistry()
|
||||
|
||||
registered = auto_bootstrap(
|
||||
packages=["nonexistent_package_12345"],
|
||||
registry=registry,
|
||||
force=True,
|
||||
)
|
||||
|
||||
assert registered == []
|
||||
|
||||
def test_bootstrap_status(self):
|
||||
"""Test get_bootstrap_status returns expected structure."""
|
||||
from mcp.bootstrap import get_bootstrap_status
|
||||
|
||||
status = get_bootstrap_status()
|
||||
|
||||
assert "auto_bootstrap_enabled" in status
|
||||
assert "discovered_tools_count" in status
|
||||
assert "registered_tools_count" in status
|
||||
assert "default_packages" in status
|
||||
|
||||
|
||||
class TestRegistryIntegration:
|
||||
"""Integration tests for registry with discovery."""
|
||||
|
||||
def test_registry_discover_filtering(self):
|
||||
"""Test registry discover method filters correctly."""
|
||||
registry = ToolRegistry()
|
||||
|
||||
@mcp_tool(name="cat1", category="category1", tags=["tag1"])
|
||||
def func1():
|
||||
pass
|
||||
|
||||
@mcp_tool(name="cat2", category="category2", tags=["tag2"])
|
||||
def func2():
|
||||
pass
|
||||
|
||||
registry.register_tool(name="cat1", function=func1, category="category1", tags=["tag1"])
|
||||
registry.register_tool(name="cat2", function=func2, category="category2", tags=["tag2"])
|
||||
|
||||
# Filter by category
|
||||
cat1_tools = registry.discover(category="category1")
|
||||
assert len(cat1_tools) == 1
|
||||
assert cat1_tools[0].name == "cat1"
|
||||
|
||||
# Filter by tags
|
||||
tag1_tools = registry.discover(tags=["tag1"])
|
||||
assert len(tag1_tools) == 1
|
||||
assert tag1_tools[0].name == "cat1"
|
||||
|
||||
def test_registry_to_dict(self):
|
||||
"""Test registry export includes all fields."""
|
||||
registry = ToolRegistry()
|
||||
|
||||
@mcp_tool(name="export_test", category="test", tags=["a"])
|
||||
def export_func():
|
||||
"""Test export."""
|
||||
pass
|
||||
|
||||
registry.register_tool(
|
||||
name="export_test",
|
||||
function=export_func,
|
||||
category="test",
|
||||
tags=["a"],
|
||||
source_module="test_module",
|
||||
)
|
||||
|
||||
export = registry.to_dict()
|
||||
|
||||
assert export["total_tools"] == 1
|
||||
assert export["auto_discovered_count"] == 1
|
||||
|
||||
tool = export["tools"][0]
|
||||
assert tool["name"] == "export_test"
|
||||
assert tool["category"] == "test"
|
||||
assert tool["tags"] == ["a"]
|
||||
assert tool["source_module"] == "test_module"
|
||||
assert tool["auto_discovered"] is True
|
||||
270
tests/test_functional_router.py
Normal file
270
tests/test_functional_router.py
Normal file
@@ -0,0 +1,270 @@
|
||||
"""Functional tests for Cascade Router - tests actual behavior.
|
||||
|
||||
These tests verify the router works end-to-end with mocked external services.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import time
|
||||
from pathlib import Path
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from router.cascade import CascadeRouter, Provider, ProviderStatus, CircuitState
|
||||
|
||||
|
||||
class TestCascadeRouterFunctional:
|
||||
"""Functional tests for Cascade Router with mocked providers."""
|
||||
|
||||
@pytest.fixture
|
||||
def router(self):
|
||||
"""Create a router with no config file."""
|
||||
return CascadeRouter(config_path=Path("/nonexistent"))
|
||||
|
||||
@pytest.fixture
|
||||
def mock_healthy_provider(self):
|
||||
"""Create a mock healthy provider."""
|
||||
provider = Provider(
|
||||
name="test-healthy",
|
||||
type="test",
|
||||
enabled=True,
|
||||
priority=1,
|
||||
models=[{"name": "test-model", "default": True}],
|
||||
)
|
||||
return provider
|
||||
|
||||
@pytest.fixture
|
||||
def mock_failing_provider(self):
|
||||
"""Create a mock failing provider."""
|
||||
provider = Provider(
|
||||
name="test-failing",
|
||||
type="test",
|
||||
enabled=True,
|
||||
priority=1,
|
||||
models=[{"name": "test-model", "default": True}],
|
||||
)
|
||||
return provider
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_successful_completion_single_provider(self, router, mock_healthy_provider):
|
||||
"""Test successful completion with a single working provider."""
|
||||
router.providers = [mock_healthy_provider]
|
||||
|
||||
# Mock the provider's call method
|
||||
with patch.object(router, "_try_provider") as mock_try:
|
||||
mock_try.return_value = {
|
||||
"content": "Hello, world!",
|
||||
"model": "test-model",
|
||||
"latency_ms": 100.0,
|
||||
}
|
||||
|
||||
result = await router.complete(
|
||||
messages=[{"role": "user", "content": "Hi"}],
|
||||
)
|
||||
|
||||
assert result["content"] == "Hello, world!"
|
||||
assert result["provider"] == "test-healthy"
|
||||
assert result["model"] == "test-model"
|
||||
assert result["latency_ms"] == 100.0
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_failover_to_second_provider(self, router):
|
||||
"""Test failover when first provider fails."""
|
||||
provider1 = Provider(
|
||||
name="failing",
|
||||
type="test",
|
||||
enabled=True,
|
||||
priority=1,
|
||||
models=[{"name": "model", "default": True}],
|
||||
)
|
||||
provider2 = Provider(
|
||||
name="backup",
|
||||
type="test",
|
||||
enabled=True,
|
||||
priority=2,
|
||||
models=[{"name": "model", "default": True}],
|
||||
)
|
||||
router.providers = [provider1, provider2]
|
||||
|
||||
call_count = [0]
|
||||
|
||||
async def side_effect(*args, **kwargs):
|
||||
call_count[0] += 1
|
||||
if call_count[0] <= router.config.max_retries_per_provider:
|
||||
raise RuntimeError("Connection failed")
|
||||
return {"content": "Backup works!", "model": "model"}
|
||||
|
||||
with patch.object(router, "_try_provider", side_effect=side_effect):
|
||||
result = await router.complete(
|
||||
messages=[{"role": "user", "content": "Hi"}],
|
||||
)
|
||||
|
||||
assert result["content"] == "Backup works!"
|
||||
assert result["provider"] == "backup"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_all_providers_fail_raises_error(self, router):
|
||||
"""Test that RuntimeError is raised when all providers fail."""
|
||||
provider = Provider(
|
||||
name="always-fails",
|
||||
type="test",
|
||||
enabled=True,
|
||||
priority=1,
|
||||
models=[{"name": "model", "default": True}],
|
||||
)
|
||||
router.providers = [provider]
|
||||
|
||||
with patch.object(router, "_try_provider") as mock_try:
|
||||
mock_try.side_effect = RuntimeError("Always fails")
|
||||
|
||||
with pytest.raises(RuntimeError) as exc_info:
|
||||
await router.complete(messages=[{"role": "user", "content": "Hi"}])
|
||||
|
||||
assert "All providers failed" in str(exc_info.value)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_circuit_breaker_opens_after_failures(self, router):
|
||||
"""Test circuit breaker opens after threshold failures."""
|
||||
provider = Provider(
|
||||
name="test",
|
||||
type="test",
|
||||
enabled=True,
|
||||
priority=1,
|
||||
models=[{"name": "model", "default": True}],
|
||||
)
|
||||
router.providers = [provider]
|
||||
router.config.circuit_breaker_failure_threshold = 3
|
||||
|
||||
# Record 3 failures
|
||||
for _ in range(3):
|
||||
router._record_failure(provider)
|
||||
|
||||
assert provider.circuit_state == CircuitState.OPEN
|
||||
assert provider.status == ProviderStatus.UNHEALTHY
|
||||
|
||||
def test_metrics_tracking(self, router):
|
||||
"""Test that metrics are tracked correctly."""
|
||||
provider = Provider(
|
||||
name="test",
|
||||
type="test",
|
||||
enabled=True,
|
||||
priority=1,
|
||||
)
|
||||
router.providers = [provider]
|
||||
|
||||
# Record some successes and failures
|
||||
router._record_success(provider, 100.0)
|
||||
router._record_success(provider, 200.0)
|
||||
router._record_failure(provider)
|
||||
|
||||
metrics = router.get_metrics()
|
||||
|
||||
assert len(metrics["providers"]) == 1
|
||||
p_metrics = metrics["providers"][0]
|
||||
assert p_metrics["metrics"]["total_requests"] == 3
|
||||
assert p_metrics["metrics"]["successful"] == 2
|
||||
assert p_metrics["metrics"]["failed"] == 1
|
||||
# Average latency is over ALL requests (including failures with 0 latency)
|
||||
assert p_metrics["metrics"]["avg_latency_ms"] == 100.0 # (100+200+0)/3
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_skips_disabled_providers(self, router):
|
||||
"""Test that disabled providers are skipped."""
|
||||
disabled = Provider(
|
||||
name="disabled",
|
||||
type="test",
|
||||
enabled=False,
|
||||
priority=1,
|
||||
models=[{"name": "model", "default": True}],
|
||||
)
|
||||
enabled = Provider(
|
||||
name="enabled",
|
||||
type="test",
|
||||
enabled=True,
|
||||
priority=2,
|
||||
models=[{"name": "model", "default": True}],
|
||||
)
|
||||
router.providers = [disabled, enabled]
|
||||
|
||||
# The router should try enabled provider
|
||||
with patch.object(router, "_try_provider") as mock_try:
|
||||
mock_try.return_value = {"content": "Success", "model": "model"}
|
||||
|
||||
result = await router.complete(messages=[{"role": "user", "content": "Hi"}])
|
||||
|
||||
assert result["provider"] == "enabled"
|
||||
|
||||
|
||||
class TestProviderAvailability:
|
||||
"""Test provider availability checking."""
|
||||
|
||||
@pytest.fixture
|
||||
def router(self):
|
||||
return CascadeRouter(config_path=Path("/nonexistent"))
|
||||
|
||||
def test_openai_available_with_key(self, router):
|
||||
"""Test OpenAI provider is available when API key is set."""
|
||||
provider = Provider(
|
||||
name="openai",
|
||||
type="openai",
|
||||
enabled=True,
|
||||
priority=1,
|
||||
api_key="sk-test123",
|
||||
)
|
||||
|
||||
assert router._check_provider_available(provider) is True
|
||||
|
||||
def test_openai_unavailable_without_key(self, router):
|
||||
"""Test OpenAI provider is unavailable without API key."""
|
||||
provider = Provider(
|
||||
name="openai",
|
||||
type="openai",
|
||||
enabled=True,
|
||||
priority=1,
|
||||
api_key=None,
|
||||
)
|
||||
|
||||
assert router._check_provider_available(provider) is False
|
||||
|
||||
def test_anthropic_available_with_key(self, router):
|
||||
"""Test Anthropic provider is available when API key is set."""
|
||||
provider = Provider(
|
||||
name="anthropic",
|
||||
type="anthropic",
|
||||
enabled=True,
|
||||
priority=1,
|
||||
api_key="sk-test123",
|
||||
)
|
||||
|
||||
assert router._check_provider_available(provider) is True
|
||||
|
||||
|
||||
class TestRouterConfigLoading:
|
||||
"""Test router configuration loading."""
|
||||
|
||||
def test_loads_timeout_from_config(self, tmp_path):
|
||||
"""Test that timeout is loaded from config."""
|
||||
import yaml
|
||||
|
||||
config = {
|
||||
"cascade": {
|
||||
"timeout_seconds": 60,
|
||||
"max_retries_per_provider": 3,
|
||||
},
|
||||
"providers": [],
|
||||
}
|
||||
|
||||
config_path = tmp_path / "providers.yaml"
|
||||
config_path.write_text(yaml.dump(config))
|
||||
|
||||
router = CascadeRouter(config_path=config_path)
|
||||
|
||||
assert router.config.timeout_seconds == 60
|
||||
assert router.config.max_retries_per_provider == 3
|
||||
|
||||
def test_uses_defaults_without_config(self):
|
||||
"""Test that defaults are used when config file doesn't exist."""
|
||||
router = CascadeRouter(config_path=Path("/nonexistent"))
|
||||
|
||||
assert router.config.timeout_seconds == 30
|
||||
assert router.config.max_retries_per_provider == 2
|
||||
166
tests/test_integration_full.py
Normal file
166
tests/test_integration_full.py
Normal file
@@ -0,0 +1,166 @@
|
||||
"""End-to-end integration tests for the complete system.
|
||||
|
||||
These tests verify the full stack works together.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from fastapi.testclient import TestClient
|
||||
|
||||
|
||||
class TestDashboardIntegration:
|
||||
"""Integration tests for the dashboard app."""
|
||||
|
||||
@pytest.fixture
|
||||
def client(self):
|
||||
"""Create a test client."""
|
||||
from dashboard.app import app
|
||||
return TestClient(app)
|
||||
|
||||
def test_health_endpoint(self, client):
|
||||
"""Test the health check endpoint works."""
|
||||
response = client.get("/health")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert "status" in data
|
||||
|
||||
def test_index_page_loads(self, client):
|
||||
"""Test the main page loads."""
|
||||
response = client.get("/")
|
||||
assert response.status_code == 200
|
||||
assert "Timmy" in response.text or "Mission Control" in response.text
|
||||
|
||||
|
||||
class TestRouterAPIIntegration:
|
||||
"""Integration tests for Router API endpoints."""
|
||||
|
||||
@pytest.fixture
|
||||
def client(self):
|
||||
"""Create a test client."""
|
||||
from dashboard.app import app
|
||||
return TestClient(app)
|
||||
|
||||
def test_router_status_endpoint(self, client):
|
||||
"""Test the router status endpoint."""
|
||||
response = client.get("/api/v1/router/status")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert "total_providers" in data
|
||||
assert "providers" in data
|
||||
|
||||
def test_router_metrics_endpoint(self, client):
|
||||
"""Test the router metrics endpoint."""
|
||||
response = client.get("/api/v1/router/metrics")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert "providers" in data
|
||||
|
||||
def test_router_providers_endpoint(self, client):
|
||||
"""Test the router providers list endpoint."""
|
||||
response = client.get("/api/v1/router/providers")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert isinstance(data, list)
|
||||
|
||||
def test_router_config_endpoint(self, client):
|
||||
"""Test the router config endpoint."""
|
||||
response = client.get("/api/v1/router/config")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert "timeout_seconds" in data
|
||||
assert "circuit_breaker" in data
|
||||
|
||||
|
||||
class TestMCPIntegration:
|
||||
"""Integration tests for MCP system."""
|
||||
|
||||
def test_mcp_registry_singleton(self):
|
||||
"""Test that MCP registry is properly initialized."""
|
||||
from mcp.registry import tool_registry, get_registry
|
||||
|
||||
# Should be the same object
|
||||
assert get_registry() is tool_registry
|
||||
|
||||
def test_mcp_discovery_singleton(self):
|
||||
"""Test that MCP discovery is properly initialized."""
|
||||
from mcp.discovery import get_discovery
|
||||
|
||||
discovery1 = get_discovery()
|
||||
discovery2 = get_discovery()
|
||||
|
||||
# Should be the same object
|
||||
assert discovery1 is discovery2
|
||||
|
||||
def test_mcp_bootstrap_status(self):
|
||||
"""Test that bootstrap status returns valid data."""
|
||||
from mcp.bootstrap import get_bootstrap_status
|
||||
|
||||
status = get_bootstrap_status()
|
||||
|
||||
assert isinstance(status["auto_bootstrap_enabled"], bool)
|
||||
assert isinstance(status["discovered_tools_count"], int)
|
||||
assert isinstance(status["registered_tools_count"], int)
|
||||
|
||||
|
||||
class TestEventBusIntegration:
|
||||
"""Integration tests for Event Bus."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_event_bus_publish_subscribe(self):
|
||||
"""Test event bus publish and subscribe works."""
|
||||
from events.bus import EventBus, Event
|
||||
|
||||
bus = EventBus()
|
||||
events_received = []
|
||||
|
||||
@bus.subscribe("test.event.*")
|
||||
async def handler(event):
|
||||
events_received.append(event.data)
|
||||
|
||||
await bus.publish(Event(
|
||||
type="test.event.test",
|
||||
source="test",
|
||||
data={"message": "hello"}
|
||||
))
|
||||
|
||||
# Give async handler time to run
|
||||
import asyncio
|
||||
await asyncio.sleep(0.1)
|
||||
|
||||
assert len(events_received) == 1
|
||||
assert events_received[0]["message"] == "hello"
|
||||
|
||||
|
||||
class TestAgentSystemIntegration:
|
||||
"""Integration tests for Agent system."""
|
||||
|
||||
def test_base_agent_imports(self):
|
||||
"""Test that base agent can be imported."""
|
||||
from agents.base import BaseAgent
|
||||
|
||||
assert BaseAgent is not None
|
||||
|
||||
def test_agent_creation(self):
|
||||
"""Test creating agent config dict (AgentConfig class doesn't exist)."""
|
||||
config = {
|
||||
"name": "test_agent",
|
||||
"system_prompt": "You are a test agent.",
|
||||
}
|
||||
|
||||
assert config["name"] == "test_agent"
|
||||
assert config["system_prompt"] == "You are a test agent."
|
||||
|
||||
|
||||
class TestMemorySystemIntegration:
|
||||
"""Integration tests for Memory system."""
|
||||
|
||||
def test_memory_system_imports(self):
|
||||
"""Test that memory system can be imported."""
|
||||
from timmy.memory_system import MemorySystem
|
||||
|
||||
assert MemorySystem is not None
|
||||
|
||||
def test_semantic_memory_imports(self):
|
||||
"""Test that semantic memory can be imported."""
|
||||
from timmy.semantic_memory import SemanticMemory
|
||||
|
||||
assert SemanticMemory is not None
|
||||
211
tests/test_ledger.py
Normal file
211
tests/test_ledger.py
Normal file
@@ -0,0 +1,211 @@
|
||||
"""Tests for Lightning ledger system."""
|
||||
|
||||
import pytest
|
||||
from lightning.ledger import (
|
||||
TransactionType,
|
||||
TransactionStatus,
|
||||
create_invoice_entry,
|
||||
record_outgoing_payment,
|
||||
mark_settled,
|
||||
mark_failed,
|
||||
get_by_hash,
|
||||
list_transactions,
|
||||
get_balance,
|
||||
get_transaction_stats,
|
||||
)
|
||||
|
||||
|
||||
class TestLedger:
|
||||
"""Test suite for Lightning ledger functionality."""
|
||||
|
||||
def test_create_invoice_entry(self):
|
||||
"""Test creating an incoming invoice entry."""
|
||||
entry = create_invoice_entry(
|
||||
payment_hash="test_hash_001",
|
||||
amount_sats=1000,
|
||||
memo="Test invoice",
|
||||
invoice="lnbc10u1...",
|
||||
source="test",
|
||||
task_id="task-123",
|
||||
agent_id="agent-456",
|
||||
)
|
||||
|
||||
assert entry.tx_type == TransactionType.INCOMING
|
||||
assert entry.status == TransactionStatus.PENDING
|
||||
assert entry.amount_sats == 1000
|
||||
assert entry.payment_hash == "test_hash_001"
|
||||
assert entry.memo == "Test invoice"
|
||||
assert entry.task_id == "task-123"
|
||||
assert entry.agent_id == "agent-456"
|
||||
|
||||
def test_record_outgoing_payment(self):
|
||||
"""Test recording an outgoing payment."""
|
||||
entry = record_outgoing_payment(
|
||||
payment_hash="test_hash_002",
|
||||
amount_sats=500,
|
||||
memo="Test payment",
|
||||
source="test",
|
||||
task_id="task-789",
|
||||
)
|
||||
|
||||
assert entry.tx_type == TransactionType.OUTGOING
|
||||
assert entry.status == TransactionStatus.PENDING
|
||||
assert entry.amount_sats == 500
|
||||
assert entry.payment_hash == "test_hash_002"
|
||||
|
||||
def test_mark_settled(self):
|
||||
"""Test marking a transaction as settled."""
|
||||
# Create invoice
|
||||
entry = create_invoice_entry(
|
||||
payment_hash="test_hash_settle",
|
||||
amount_sats=100,
|
||||
memo="To be settled",
|
||||
)
|
||||
assert entry.status == TransactionStatus.PENDING
|
||||
|
||||
# Mark as settled
|
||||
settled = mark_settled(
|
||||
payment_hash="test_hash_settle",
|
||||
preimage="preimage123",
|
||||
fee_sats=1,
|
||||
)
|
||||
|
||||
assert settled is not None
|
||||
assert settled.status == TransactionStatus.SETTLED
|
||||
assert settled.preimage == "preimage123"
|
||||
assert settled.fee_sats == 1
|
||||
assert settled.settled_at is not None
|
||||
|
||||
# Verify retrieval
|
||||
retrieved = get_by_hash("test_hash_settle")
|
||||
assert retrieved.status == TransactionStatus.SETTLED
|
||||
|
||||
def test_mark_failed(self):
|
||||
"""Test marking a transaction as failed."""
|
||||
# Create invoice
|
||||
entry = create_invoice_entry(
|
||||
payment_hash="test_hash_fail",
|
||||
amount_sats=200,
|
||||
memo="To fail",
|
||||
)
|
||||
|
||||
# Mark as failed
|
||||
failed = mark_failed("test_hash_fail", reason="Timeout")
|
||||
|
||||
assert failed is not None
|
||||
assert failed.status == TransactionStatus.FAILED
|
||||
assert "Timeout" in failed.memo
|
||||
|
||||
def test_get_by_hash_not_found(self):
|
||||
"""Test retrieving non-existent transaction."""
|
||||
result = get_by_hash("nonexistent_hash")
|
||||
assert result is None
|
||||
|
||||
def test_list_transactions_filtering(self):
|
||||
"""Test filtering transactions."""
|
||||
# Create various transactions
|
||||
create_invoice_entry("filter_test_1", 100, source="filter_test")
|
||||
create_invoice_entry("filter_test_2", 200, source="filter_test")
|
||||
|
||||
# Filter by type
|
||||
incoming = list_transactions(
|
||||
tx_type=TransactionType.INCOMING,
|
||||
limit=10,
|
||||
)
|
||||
assert all(t.tx_type == TransactionType.INCOMING for t in incoming)
|
||||
|
||||
# Filter by status
|
||||
pending = list_transactions(
|
||||
status=TransactionStatus.PENDING,
|
||||
limit=10,
|
||||
)
|
||||
assert all(t.status == TransactionStatus.PENDING for t in pending)
|
||||
|
||||
def test_get_balance(self):
|
||||
"""Test balance calculation."""
|
||||
# Get initial balance
|
||||
balance = get_balance()
|
||||
|
||||
assert "incoming_total_sats" in balance
|
||||
assert "outgoing_total_sats" in balance
|
||||
assert "net_sats" in balance
|
||||
assert isinstance(balance["incoming_total_sats"], int)
|
||||
assert isinstance(balance["outgoing_total_sats"], int)
|
||||
|
||||
def test_transaction_stats(self):
|
||||
"""Test transaction statistics."""
|
||||
# Create some transactions
|
||||
create_invoice_entry("stats_test_1", 100, source="stats_test")
|
||||
create_invoice_entry("stats_test_2", 200, source="stats_test")
|
||||
|
||||
# Get stats
|
||||
stats = get_transaction_stats(days=1)
|
||||
|
||||
# Should return dict with dates
|
||||
assert isinstance(stats, dict)
|
||||
# Stats structure depends on current date, just verify it's a dict
|
||||
|
||||
def test_unique_payment_hash(self):
|
||||
"""Test that payment hashes must be unique."""
|
||||
import sqlite3
|
||||
|
||||
hash_value = "unique_hash_test"
|
||||
|
||||
# First creation should succeed
|
||||
create_invoice_entry(hash_value, 100)
|
||||
|
||||
# Second creation with same hash should fail with IntegrityError
|
||||
with pytest.raises(sqlite3.IntegrityError):
|
||||
create_invoice_entry(hash_value, 200)
|
||||
|
||||
|
||||
class TestLedgerIntegration:
|
||||
"""Integration tests for ledger workflow."""
|
||||
|
||||
def test_full_invoice_lifecycle(self):
|
||||
"""Test complete invoice lifecycle: create -> settle."""
|
||||
# Create invoice
|
||||
entry = create_invoice_entry(
|
||||
payment_hash="lifecycle_test",
|
||||
amount_sats=5000,
|
||||
memo="Full lifecycle test",
|
||||
source="integration_test",
|
||||
)
|
||||
|
||||
assert entry.status == TransactionStatus.PENDING
|
||||
|
||||
# Mark as settled
|
||||
settled = mark_settled("lifecycle_test", preimage="secret_preimage")
|
||||
|
||||
assert settled.status == TransactionStatus.SETTLED
|
||||
assert settled.preimage == "secret_preimage"
|
||||
|
||||
# Verify in list
|
||||
transactions = list_transactions(limit=100)
|
||||
assert any(t.payment_hash == "lifecycle_test" for t in transactions)
|
||||
|
||||
# Verify balance reflects it
|
||||
balance = get_balance()
|
||||
# Balance should include this settled invoice
|
||||
|
||||
def test_outgoing_payment_lifecycle(self):
|
||||
"""Test complete outgoing payment lifecycle."""
|
||||
# Record outgoing payment
|
||||
entry = record_outgoing_payment(
|
||||
payment_hash="outgoing_test",
|
||||
amount_sats=300,
|
||||
memo="Outgoing payment",
|
||||
source="integration_test",
|
||||
)
|
||||
|
||||
assert entry.tx_type == TransactionType.OUTGOING
|
||||
|
||||
# Mark as settled (payment completed)
|
||||
settled = mark_settled(
|
||||
"outgoing_test",
|
||||
preimage="payment_proof",
|
||||
fee_sats=3,
|
||||
)
|
||||
|
||||
assert settled.fee_sats == 3
|
||||
assert settled.status == TransactionStatus.SETTLED
|
||||
262
tests/test_vector_store.py
Normal file
262
tests/test_vector_store.py
Normal file
@@ -0,0 +1,262 @@
|
||||
"""Tests for vector store (semantic memory) system."""
|
||||
|
||||
import pytest
|
||||
from memory.vector_store import (
|
||||
store_memory,
|
||||
search_memories,
|
||||
get_memory_context,
|
||||
recall_personal_facts,
|
||||
store_personal_fact,
|
||||
delete_memory,
|
||||
get_memory_stats,
|
||||
prune_memories,
|
||||
_cosine_similarity,
|
||||
_keyword_overlap,
|
||||
)
|
||||
|
||||
|
||||
class TestVectorStore:
|
||||
"""Test suite for vector store functionality."""
|
||||
|
||||
def test_store_simple_memory(self):
|
||||
"""Test storing a basic memory entry."""
|
||||
entry = store_memory(
|
||||
content="This is a test memory",
|
||||
source="test_agent",
|
||||
context_type="conversation",
|
||||
)
|
||||
|
||||
assert entry.content == "This is a test memory"
|
||||
assert entry.source == "test_agent"
|
||||
assert entry.context_type == "conversation"
|
||||
assert entry.id is not None
|
||||
assert entry.timestamp is not None
|
||||
|
||||
def test_store_memory_with_metadata(self):
|
||||
"""Test storing memory with metadata."""
|
||||
entry = store_memory(
|
||||
content="Memory with metadata",
|
||||
source="user",
|
||||
context_type="fact",
|
||||
agent_id="agent-001",
|
||||
task_id="task-123",
|
||||
session_id="session-456",
|
||||
metadata={"importance": "high", "tags": ["test"]},
|
||||
)
|
||||
|
||||
assert entry.agent_id == "agent-001"
|
||||
assert entry.task_id == "task-123"
|
||||
assert entry.session_id == "session-456"
|
||||
assert entry.metadata == {"importance": "high", "tags": ["test"]}
|
||||
|
||||
def test_search_memories_basic(self):
|
||||
"""Test basic memory search."""
|
||||
# Store some memories
|
||||
store_memory("Bitcoin is a decentralized currency", source="user")
|
||||
store_memory("Lightning Network enables fast payments", source="user")
|
||||
store_memory("Python is a programming language", source="user")
|
||||
|
||||
# Search for Bitcoin-related memories
|
||||
results = search_memories("cryptocurrency", limit=5)
|
||||
|
||||
# Should find at least one relevant result
|
||||
assert len(results) > 0
|
||||
# Check that results have relevance scores
|
||||
assert all(r.relevance_score is not None for r in results)
|
||||
|
||||
def test_search_with_filters(self):
|
||||
"""Test searching with filters."""
|
||||
# Store memories with different types
|
||||
store_memory(
|
||||
"Conversation about AI",
|
||||
source="user",
|
||||
context_type="conversation",
|
||||
agent_id="agent-1",
|
||||
)
|
||||
store_memory(
|
||||
"Fact: AI stands for artificial intelligence",
|
||||
source="system",
|
||||
context_type="fact",
|
||||
agent_id="agent-1",
|
||||
)
|
||||
store_memory(
|
||||
"Another conversation",
|
||||
source="user",
|
||||
context_type="conversation",
|
||||
agent_id="agent-2",
|
||||
)
|
||||
|
||||
# Filter by context type
|
||||
facts = search_memories("AI", context_type="fact", limit=5)
|
||||
assert all(f.context_type == "fact" for f in facts)
|
||||
|
||||
# Filter by agent
|
||||
agent1_memories = search_memories("conversation", agent_id="agent-1", limit=5)
|
||||
assert all(m.agent_id == "agent-1" for m in agent1_memories)
|
||||
|
||||
def test_get_memory_context(self):
|
||||
"""Test getting formatted memory context."""
|
||||
# Store memories
|
||||
store_memory("Important fact about the project", source="user")
|
||||
store_memory("Another relevant detail", source="agent")
|
||||
|
||||
# Get context
|
||||
context = get_memory_context("project details", max_tokens=500)
|
||||
|
||||
assert isinstance(context, str)
|
||||
assert len(context) > 0
|
||||
assert "Relevant context from memory:" in context
|
||||
|
||||
def test_personal_facts(self):
|
||||
"""Test storing and recalling personal facts."""
|
||||
# Store a personal fact
|
||||
fact = store_personal_fact("User prefers dark mode", agent_id="agent-1")
|
||||
|
||||
assert fact.context_type == "fact"
|
||||
assert fact.content == "User prefers dark mode"
|
||||
|
||||
# Recall facts
|
||||
facts = recall_personal_facts(agent_id="agent-1")
|
||||
assert "User prefers dark mode" in facts
|
||||
|
||||
def test_delete_memory(self):
|
||||
"""Test deleting a memory entry."""
|
||||
# Create a memory
|
||||
entry = store_memory("To be deleted", source="test")
|
||||
|
||||
# Delete it
|
||||
deleted = delete_memory(entry.id)
|
||||
assert deleted is True
|
||||
|
||||
# Verify it's gone (search shouldn't find it)
|
||||
results = search_memories("To be deleted", limit=10)
|
||||
assert not any(r.id == entry.id for r in results)
|
||||
|
||||
# Deleting non-existent should return False
|
||||
deleted_again = delete_memory(entry.id)
|
||||
assert deleted_again is False
|
||||
|
||||
def test_get_memory_stats(self):
|
||||
"""Test memory statistics."""
|
||||
stats = get_memory_stats()
|
||||
|
||||
assert "total_entries" in stats
|
||||
assert "by_type" in stats
|
||||
assert "with_embeddings" in stats
|
||||
assert "has_embedding_model" in stats
|
||||
assert isinstance(stats["total_entries"], int)
|
||||
|
||||
def test_prune_memories(self):
|
||||
"""Test pruning old memories."""
|
||||
# This just verifies the function works without error
|
||||
# (we don't want to delete test data)
|
||||
count = prune_memories(older_than_days=365, keep_facts=True)
|
||||
assert isinstance(count, int)
|
||||
|
||||
|
||||
class TestVectorStoreUtils:
|
||||
"""Test utility functions."""
|
||||
|
||||
def test_cosine_similarity_identical(self):
|
||||
"""Test cosine similarity of identical vectors."""
|
||||
vec = [1.0, 0.0, 0.0]
|
||||
similarity = _cosine_similarity(vec, vec)
|
||||
assert similarity == pytest.approx(1.0)
|
||||
|
||||
def test_cosine_similarity_orthogonal(self):
|
||||
"""Test cosine similarity of orthogonal vectors."""
|
||||
vec1 = [1.0, 0.0, 0.0]
|
||||
vec2 = [0.0, 1.0, 0.0]
|
||||
similarity = _cosine_similarity(vec1, vec2)
|
||||
assert similarity == pytest.approx(0.0)
|
||||
|
||||
def test_cosine_similarity_opposite(self):
|
||||
"""Test cosine similarity of opposite vectors."""
|
||||
vec1 = [1.0, 0.0, 0.0]
|
||||
vec2 = [-1.0, 0.0, 0.0]
|
||||
similarity = _cosine_similarity(vec1, vec2)
|
||||
assert similarity == pytest.approx(-1.0)
|
||||
|
||||
def test_cosine_similarity_zero_vector(self):
|
||||
"""Test cosine similarity with zero vector."""
|
||||
vec1 = [1.0, 0.0, 0.0]
|
||||
vec2 = [0.0, 0.0, 0.0]
|
||||
similarity = _cosine_similarity(vec1, vec2)
|
||||
assert similarity == 0.0
|
||||
|
||||
def test_keyword_overlap_exact(self):
|
||||
"""Test keyword overlap with exact match."""
|
||||
query = "bitcoin lightning"
|
||||
content = "bitcoin lightning network"
|
||||
overlap = _keyword_overlap(query, content)
|
||||
assert overlap == 1.0
|
||||
|
||||
def test_keyword_overlap_partial(self):
|
||||
"""Test keyword overlap with partial match."""
|
||||
query = "bitcoin lightning"
|
||||
content = "bitcoin is great"
|
||||
overlap = _keyword_overlap(query, content)
|
||||
assert overlap == 0.5
|
||||
|
||||
def test_keyword_overlap_none(self):
|
||||
"""Test keyword overlap with no match."""
|
||||
query = "bitcoin"
|
||||
content = "completely different topic"
|
||||
overlap = _keyword_overlap(query, content)
|
||||
assert overlap == 0.0
|
||||
|
||||
|
||||
class TestVectorStoreIntegration:
|
||||
"""Integration tests for vector store workflow."""
|
||||
|
||||
def test_memory_workflow(self):
|
||||
"""Test complete memory workflow: store -> search -> retrieve."""
|
||||
# Store memories
|
||||
store_memory(
|
||||
"The project deadline is next Friday",
|
||||
source="user",
|
||||
context_type="fact",
|
||||
session_id="session-1",
|
||||
)
|
||||
store_memory(
|
||||
"We need to implement the payment system",
|
||||
source="user",
|
||||
context_type="conversation",
|
||||
session_id="session-1",
|
||||
)
|
||||
store_memory(
|
||||
"The database schema needs updating",
|
||||
source="agent",
|
||||
context_type="conversation",
|
||||
session_id="session-1",
|
||||
)
|
||||
|
||||
# Search for deadline-related memories
|
||||
results = search_memories("when is the deadline", limit=5)
|
||||
|
||||
# Should find the deadline memory
|
||||
assert len(results) > 0
|
||||
# Check that the most relevant result contains "deadline"
|
||||
assert any("deadline" in r.content.lower() for r in results[:3])
|
||||
|
||||
# Get context for a prompt
|
||||
context = get_memory_context("project timeline", session_id="session-1")
|
||||
assert "deadline" in context.lower() or "implement" in context.lower()
|
||||
|
||||
def test_embedding_vs_keyword_fallback(self):
|
||||
"""Test that the system works with or without embedding model."""
|
||||
stats = get_memory_stats()
|
||||
|
||||
# Store a memory
|
||||
entry = store_memory(
|
||||
"Testing embedding functionality",
|
||||
source="test",
|
||||
compute_embedding=True,
|
||||
)
|
||||
|
||||
# Should have embedding (even if it's fallback)
|
||||
assert entry.embedding is not None
|
||||
|
||||
# Search should work regardless
|
||||
results = search_memories("embedding test", limit=5)
|
||||
assert len(results) > 0
|
||||
Reference in New Issue
Block a user