refactor: Phase 3 — reorganize tests into module-mirroring subdirectories
Move 97 test files from flat tests/ into 13 subdirectories: tests/dashboard/ (8 files — routes, mobile, mission control) tests/swarm/ (17 files — coordinator, docker, routing, tasks) tests/timmy/ (12 files — agent, backends, CLI, tools) tests/self_coding/ (14 files — git safety, indexer, self-modify) tests/lightning/ (3 files — L402, LND, interface) tests/creative/ (8 files — assembler, director, image/music/video) tests/integrations/ (10 files — chat bridge, telegram, voice, websocket) tests/mcp/ (4 files — bootstrap, discovery, executor) tests/spark/ (3 files — engine, tools, events) tests/hands/ (3 files — registry, oracle, phase5) tests/scripture/ (1 file) tests/infrastructure/ (3 files — router cascade, API) tests/security/ (3 files — XSS, regression) Fix Path(__file__) reference in test_mobile_scenarios.py for new depth. Add __init__.py to all test subdirectories. Tests: 1503 passed, 9 failed (pre-existing), 53 errors (pre-existing) https://claude.ai/code/session_019oMFNvD8uSGSSmBMGkBfQN
This commit is contained in:
0
tests/spark/__init__.py
Normal file
0
tests/spark/__init__.py
Normal file
169
tests/spark/test_event_log.py
Normal file
169
tests/spark/test_event_log.py
Normal file
@@ -0,0 +1,169 @@
|
||||
"""Tests for swarm event logging system."""
|
||||
|
||||
import pytest
|
||||
from datetime import datetime, timezone
|
||||
from swarm.event_log import (
|
||||
EventType,
|
||||
log_event,
|
||||
get_event,
|
||||
list_events,
|
||||
get_task_events,
|
||||
get_agent_events,
|
||||
get_recent_events,
|
||||
get_event_summary,
|
||||
prune_events,
|
||||
)
|
||||
|
||||
|
||||
class TestEventLog:
|
||||
"""Test suite for event logging functionality."""
|
||||
|
||||
def test_log_simple_event(self):
|
||||
"""Test logging a basic event."""
|
||||
event = log_event(
|
||||
event_type=EventType.SYSTEM_INFO,
|
||||
source="test",
|
||||
data={"message": "test event"},
|
||||
)
|
||||
|
||||
assert event.event_type == EventType.SYSTEM_INFO
|
||||
assert event.source == "test"
|
||||
assert event.data is not None
|
||||
|
||||
# Verify we can retrieve it
|
||||
retrieved = get_event(event.id)
|
||||
assert retrieved is not None
|
||||
assert retrieved.source == "test"
|
||||
|
||||
def test_log_task_event(self):
|
||||
"""Test logging a task lifecycle event."""
|
||||
task_id = "task-123"
|
||||
agent_id = "agent-456"
|
||||
|
||||
event = log_event(
|
||||
event_type=EventType.TASK_ASSIGNED,
|
||||
source="coordinator",
|
||||
task_id=task_id,
|
||||
agent_id=agent_id,
|
||||
data={"bid_sats": 100},
|
||||
)
|
||||
|
||||
assert event.task_id == task_id
|
||||
assert event.agent_id == agent_id
|
||||
|
||||
# Verify filtering by task works
|
||||
task_events = get_task_events(task_id)
|
||||
assert len(task_events) >= 1
|
||||
assert any(e.id == event.id for e in task_events)
|
||||
|
||||
def test_log_agent_event(self):
|
||||
"""Test logging agent lifecycle events."""
|
||||
agent_id = "agent-test-001"
|
||||
|
||||
event = log_event(
|
||||
event_type=EventType.AGENT_JOINED,
|
||||
source="coordinator",
|
||||
agent_id=agent_id,
|
||||
data={"persona_id": "forge"},
|
||||
)
|
||||
|
||||
# Verify filtering by agent works
|
||||
agent_events = get_agent_events(agent_id)
|
||||
assert len(agent_events) >= 1
|
||||
assert any(e.id == event.id for e in agent_events)
|
||||
|
||||
def test_list_events_filtering(self):
|
||||
"""Test filtering events by type."""
|
||||
# Create events of different types
|
||||
log_event(EventType.TASK_CREATED, source="test")
|
||||
log_event(EventType.TASK_COMPLETED, source="test")
|
||||
log_event(EventType.SYSTEM_INFO, source="test")
|
||||
|
||||
# Filter by type
|
||||
task_events = list_events(event_type=EventType.TASK_CREATED, limit=10)
|
||||
assert all(e.event_type == EventType.TASK_CREATED for e in task_events)
|
||||
|
||||
# Filter by source
|
||||
source_events = list_events(source="test", limit=10)
|
||||
assert all(e.source == "test" for e in source_events)
|
||||
|
||||
def test_get_recent_events(self):
|
||||
"""Test retrieving recent events."""
|
||||
# Log an event
|
||||
log_event(EventType.SYSTEM_INFO, source="recent_test")
|
||||
|
||||
# Get events from last minute
|
||||
recent = get_recent_events(minutes=1)
|
||||
assert any(e.source == "recent_test" for e in recent)
|
||||
|
||||
def test_event_summary(self):
|
||||
"""Test event summary statistics."""
|
||||
# Create some events
|
||||
log_event(EventType.TASK_CREATED, source="summary_test")
|
||||
log_event(EventType.TASK_CREATED, source="summary_test")
|
||||
log_event(EventType.TASK_COMPLETED, source="summary_test")
|
||||
|
||||
# Get summary
|
||||
summary = get_event_summary(minutes=1)
|
||||
assert "task.created" in summary or "task.completed" in summary
|
||||
|
||||
def test_prune_events(self):
|
||||
"""Test pruning old events."""
|
||||
# This test just verifies the function doesn't error
|
||||
# (we don't want to delete real data in tests)
|
||||
count = prune_events(older_than_days=365)
|
||||
# Result depends on database state, just verify no exception
|
||||
assert isinstance(count, int)
|
||||
|
||||
def test_event_data_serialization(self):
|
||||
"""Test that complex data is properly serialized."""
|
||||
complex_data = {
|
||||
"nested": {"key": "value"},
|
||||
"list": [1, 2, 3],
|
||||
"number": 42.5,
|
||||
}
|
||||
|
||||
event = log_event(
|
||||
EventType.TOOL_CALLED,
|
||||
source="test",
|
||||
data=complex_data,
|
||||
)
|
||||
|
||||
retrieved = get_event(event.id)
|
||||
# Data should be stored as JSON string
|
||||
assert retrieved.data is not None
|
||||
|
||||
|
||||
class TestEventTypes:
|
||||
"""Test that all event types can be logged."""
|
||||
|
||||
@pytest.mark.parametrize("event_type", [
|
||||
EventType.TASK_CREATED,
|
||||
EventType.TASK_BIDDING,
|
||||
EventType.TASK_ASSIGNED,
|
||||
EventType.TASK_STARTED,
|
||||
EventType.TASK_COMPLETED,
|
||||
EventType.TASK_FAILED,
|
||||
EventType.AGENT_JOINED,
|
||||
EventType.AGENT_LEFT,
|
||||
EventType.AGENT_STATUS_CHANGED,
|
||||
EventType.BID_SUBMITTED,
|
||||
EventType.AUCTION_CLOSED,
|
||||
EventType.TOOL_CALLED,
|
||||
EventType.TOOL_COMPLETED,
|
||||
EventType.TOOL_FAILED,
|
||||
EventType.SYSTEM_ERROR,
|
||||
EventType.SYSTEM_WARNING,
|
||||
EventType.SYSTEM_INFO,
|
||||
])
|
||||
def test_all_event_types(self, event_type):
|
||||
"""Verify all event types can be logged and retrieved."""
|
||||
event = log_event(
|
||||
event_type=event_type,
|
||||
source="type_test",
|
||||
data={"test": True},
|
||||
)
|
||||
|
||||
retrieved = get_event(event.id)
|
||||
assert retrieved is not None
|
||||
assert retrieved.event_type == event_type
|
||||
431
tests/spark/test_spark.py
Normal file
431
tests/spark/test_spark.py
Normal file
@@ -0,0 +1,431 @@
|
||||
"""Tests for the Spark Intelligence integration.
|
||||
|
||||
Covers:
|
||||
- spark.memory: event capture, memory consolidation, importance scoring
|
||||
- spark.eidos: predictions, evaluations, accuracy stats
|
||||
- spark.advisor: advisory generation from patterns
|
||||
- spark.engine: top-level engine wiring all subsystems
|
||||
- dashboard.routes.spark: HTTP endpoints
|
||||
"""
|
||||
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
# ── Fixtures ────────────────────────────────────────────────────────────────
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def tmp_spark_db(tmp_path, monkeypatch):
|
||||
"""Redirect all Spark SQLite writes to a temp directory."""
|
||||
db_path = tmp_path / "spark.db"
|
||||
monkeypatch.setattr("spark.memory.DB_PATH", db_path)
|
||||
monkeypatch.setattr("spark.eidos.DB_PATH", db_path)
|
||||
yield db_path
|
||||
|
||||
|
||||
# ── spark.memory ────────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestImportanceScoring:
|
||||
def test_failure_scores_high(self):
|
||||
from spark.memory import score_importance
|
||||
score = score_importance("task_failed", {})
|
||||
assert score >= 0.9
|
||||
|
||||
def test_bid_scores_low(self):
|
||||
from spark.memory import score_importance
|
||||
score = score_importance("bid_submitted", {})
|
||||
assert score <= 0.3
|
||||
|
||||
def test_high_bid_boosts_score(self):
|
||||
from spark.memory import score_importance
|
||||
low = score_importance("bid_submitted", {"bid_sats": 10})
|
||||
high = score_importance("bid_submitted", {"bid_sats": 100})
|
||||
assert high > low
|
||||
|
||||
def test_unknown_event_default(self):
|
||||
from spark.memory import score_importance
|
||||
score = score_importance("unknown_type", {})
|
||||
assert score == 0.5
|
||||
|
||||
|
||||
class TestEventRecording:
|
||||
def test_record_and_query(self):
|
||||
from spark.memory import record_event, get_events
|
||||
eid = record_event("task_posted", "Test task", task_id="t1")
|
||||
assert eid
|
||||
events = get_events(task_id="t1")
|
||||
assert len(events) == 1
|
||||
assert events[0].event_type == "task_posted"
|
||||
assert events[0].description == "Test task"
|
||||
|
||||
def test_record_with_agent(self):
|
||||
from spark.memory import record_event, get_events
|
||||
record_event("bid_submitted", "Agent bid", agent_id="a1", task_id="t2",
|
||||
data='{"bid_sats": 50}')
|
||||
events = get_events(agent_id="a1")
|
||||
assert len(events) == 1
|
||||
assert events[0].agent_id == "a1"
|
||||
|
||||
def test_filter_by_event_type(self):
|
||||
from spark.memory import record_event, get_events
|
||||
record_event("task_posted", "posted", task_id="t3")
|
||||
record_event("task_completed", "completed", task_id="t3")
|
||||
posted = get_events(event_type="task_posted")
|
||||
assert len(posted) == 1
|
||||
|
||||
def test_filter_by_min_importance(self):
|
||||
from spark.memory import record_event, get_events
|
||||
record_event("bid_submitted", "low", importance=0.1)
|
||||
record_event("task_failed", "high", importance=0.9)
|
||||
high_events = get_events(min_importance=0.5)
|
||||
assert len(high_events) == 1
|
||||
assert high_events[0].event_type == "task_failed"
|
||||
|
||||
def test_count_events(self):
|
||||
from spark.memory import record_event, count_events
|
||||
record_event("task_posted", "a")
|
||||
record_event("task_posted", "b")
|
||||
record_event("task_completed", "c")
|
||||
assert count_events() == 3
|
||||
assert count_events("task_posted") == 2
|
||||
|
||||
def test_limit_results(self):
|
||||
from spark.memory import record_event, get_events
|
||||
for i in range(10):
|
||||
record_event("bid_submitted", f"bid {i}")
|
||||
events = get_events(limit=3)
|
||||
assert len(events) == 3
|
||||
|
||||
|
||||
class TestMemoryConsolidation:
|
||||
def test_store_and_query_memory(self):
|
||||
from spark.memory import store_memory, get_memories
|
||||
mid = store_memory("pattern", "agent-x", "Strong performer", confidence=0.8)
|
||||
assert mid
|
||||
memories = get_memories(subject="agent-x")
|
||||
assert len(memories) == 1
|
||||
assert memories[0].content == "Strong performer"
|
||||
|
||||
def test_filter_by_type(self):
|
||||
from spark.memory import store_memory, get_memories
|
||||
store_memory("pattern", "system", "Good pattern")
|
||||
store_memory("anomaly", "system", "Bad anomaly")
|
||||
patterns = get_memories(memory_type="pattern")
|
||||
assert len(patterns) == 1
|
||||
assert patterns[0].memory_type == "pattern"
|
||||
|
||||
def test_filter_by_confidence(self):
|
||||
from spark.memory import store_memory, get_memories
|
||||
store_memory("pattern", "a", "Low conf", confidence=0.2)
|
||||
store_memory("pattern", "b", "High conf", confidence=0.9)
|
||||
high = get_memories(min_confidence=0.5)
|
||||
assert len(high) == 1
|
||||
assert high[0].content == "High conf"
|
||||
|
||||
def test_count_memories(self):
|
||||
from spark.memory import store_memory, count_memories
|
||||
store_memory("pattern", "a", "X")
|
||||
store_memory("anomaly", "b", "Y")
|
||||
assert count_memories() == 2
|
||||
assert count_memories("pattern") == 1
|
||||
|
||||
|
||||
# ── spark.eidos ─────────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestPredictions:
|
||||
def test_predict_stores_prediction(self):
|
||||
from spark.eidos import predict_task_outcome, get_predictions
|
||||
result = predict_task_outcome("t1", "Fix the bug", ["agent-a", "agent-b"])
|
||||
assert "prediction_id" in result
|
||||
assert result["likely_winner"] == "agent-a"
|
||||
preds = get_predictions(task_id="t1")
|
||||
assert len(preds) == 1
|
||||
|
||||
def test_predict_with_history(self):
|
||||
from spark.eidos import predict_task_outcome
|
||||
history = {
|
||||
"agent-a": {"success_rate": 0.3, "avg_winning_bid": 40},
|
||||
"agent-b": {"success_rate": 0.9, "avg_winning_bid": 30},
|
||||
}
|
||||
result = predict_task_outcome(
|
||||
"t2", "Research topic", ["agent-a", "agent-b"],
|
||||
agent_history=history,
|
||||
)
|
||||
assert result["likely_winner"] == "agent-b"
|
||||
assert result["success_probability"] > 0.5
|
||||
|
||||
def test_predict_empty_candidates(self):
|
||||
from spark.eidos import predict_task_outcome
|
||||
result = predict_task_outcome("t3", "No agents", [])
|
||||
assert result["likely_winner"] is None
|
||||
|
||||
|
||||
class TestEvaluation:
|
||||
def test_evaluate_correct_prediction(self):
|
||||
from spark.eidos import predict_task_outcome, evaluate_prediction
|
||||
predict_task_outcome("t4", "Task", ["agent-a"])
|
||||
result = evaluate_prediction("t4", "agent-a", task_succeeded=True, winning_bid=30)
|
||||
assert result is not None
|
||||
assert result["accuracy"] > 0.0
|
||||
|
||||
def test_evaluate_wrong_prediction(self):
|
||||
from spark.eidos import predict_task_outcome, evaluate_prediction
|
||||
predict_task_outcome("t5", "Task", ["agent-a"])
|
||||
result = evaluate_prediction("t5", "agent-b", task_succeeded=False)
|
||||
assert result is not None
|
||||
# Wrong winner + failed = lower accuracy
|
||||
assert result["accuracy"] < 1.0
|
||||
|
||||
def test_evaluate_no_prediction_returns_none(self):
|
||||
from spark.eidos import evaluate_prediction
|
||||
result = evaluate_prediction("no-task", "agent-a", task_succeeded=True)
|
||||
assert result is None
|
||||
|
||||
def test_double_evaluation_returns_none(self):
|
||||
from spark.eidos import predict_task_outcome, evaluate_prediction
|
||||
predict_task_outcome("t6", "Task", ["agent-a"])
|
||||
evaluate_prediction("t6", "agent-a", task_succeeded=True)
|
||||
# Second evaluation should return None (already evaluated)
|
||||
result = evaluate_prediction("t6", "agent-a", task_succeeded=True)
|
||||
assert result is None
|
||||
|
||||
|
||||
class TestAccuracyStats:
|
||||
def test_empty_stats(self):
|
||||
from spark.eidos import get_accuracy_stats
|
||||
stats = get_accuracy_stats()
|
||||
assert stats["total_predictions"] == 0
|
||||
assert stats["evaluated"] == 0
|
||||
assert stats["avg_accuracy"] == 0.0
|
||||
|
||||
def test_stats_after_evaluations(self):
|
||||
from spark.eidos import predict_task_outcome, evaluate_prediction, get_accuracy_stats
|
||||
for i in range(3):
|
||||
predict_task_outcome(f"task-{i}", "Description", ["agent-a"])
|
||||
evaluate_prediction(f"task-{i}", "agent-a", task_succeeded=True, winning_bid=30)
|
||||
stats = get_accuracy_stats()
|
||||
assert stats["total_predictions"] == 3
|
||||
assert stats["evaluated"] == 3
|
||||
assert stats["pending"] == 0
|
||||
assert stats["avg_accuracy"] > 0.0
|
||||
|
||||
|
||||
class TestComputeAccuracy:
|
||||
def test_perfect_prediction(self):
|
||||
from spark.eidos import _compute_accuracy
|
||||
predicted = {
|
||||
"likely_winner": "agent-a",
|
||||
"success_probability": 1.0,
|
||||
"estimated_bid_range": [20, 40],
|
||||
}
|
||||
actual = {"winner": "agent-a", "succeeded": True, "winning_bid": 30}
|
||||
acc = _compute_accuracy(predicted, actual)
|
||||
assert acc == pytest.approx(1.0, abs=0.01)
|
||||
|
||||
def test_all_wrong(self):
|
||||
from spark.eidos import _compute_accuracy
|
||||
predicted = {
|
||||
"likely_winner": "agent-a",
|
||||
"success_probability": 1.0,
|
||||
"estimated_bid_range": [10, 20],
|
||||
}
|
||||
actual = {"winner": "agent-b", "succeeded": False, "winning_bid": 100}
|
||||
acc = _compute_accuracy(predicted, actual)
|
||||
assert acc < 0.5
|
||||
|
||||
def test_partial_credit(self):
|
||||
from spark.eidos import _compute_accuracy
|
||||
predicted = {
|
||||
"likely_winner": "agent-a",
|
||||
"success_probability": 0.5,
|
||||
"estimated_bid_range": [20, 40],
|
||||
}
|
||||
actual = {"winner": "agent-b", "succeeded": True, "winning_bid": 30}
|
||||
acc = _compute_accuracy(predicted, actual)
|
||||
# Wrong winner but right success and in bid range → partial
|
||||
assert 0.2 < acc < 0.8
|
||||
|
||||
|
||||
# ── spark.advisor ───────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestAdvisor:
|
||||
def test_insufficient_data(self):
|
||||
from spark.advisor import generate_advisories
|
||||
advisories = generate_advisories()
|
||||
assert len(advisories) >= 1
|
||||
assert advisories[0].category == "system_health"
|
||||
assert "Insufficient" in advisories[0].title
|
||||
|
||||
def test_failure_detection(self):
|
||||
from spark.memory import record_event
|
||||
from spark.advisor import generate_advisories
|
||||
# Record enough events to pass the minimum threshold
|
||||
for i in range(5):
|
||||
record_event("task_failed", f"Failed task {i}",
|
||||
agent_id="agent-bad", task_id=f"t-{i}")
|
||||
advisories = generate_advisories()
|
||||
failure_advisories = [a for a in advisories if a.category == "failure_prevention"]
|
||||
assert len(failure_advisories) >= 1
|
||||
assert "agent-ba" in failure_advisories[0].title
|
||||
|
||||
def test_advisories_sorted_by_priority(self):
|
||||
from spark.memory import record_event
|
||||
from spark.advisor import generate_advisories
|
||||
for i in range(4):
|
||||
record_event("task_posted", f"posted {i}", task_id=f"p-{i}")
|
||||
record_event("task_completed", f"done {i}",
|
||||
agent_id="agent-good", task_id=f"p-{i}")
|
||||
advisories = generate_advisories()
|
||||
if len(advisories) >= 2:
|
||||
assert advisories[0].priority >= advisories[-1].priority
|
||||
|
||||
def test_no_activity_advisory(self):
|
||||
from spark.advisor import _check_system_activity
|
||||
advisories = _check_system_activity()
|
||||
assert len(advisories) >= 1
|
||||
assert "No swarm activity" in advisories[0].title
|
||||
|
||||
|
||||
# ── spark.engine ────────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestSparkEngine:
|
||||
def test_engine_enabled(self):
|
||||
from spark.engine import SparkEngine
|
||||
engine = SparkEngine(enabled=True)
|
||||
assert engine.enabled
|
||||
|
||||
def test_engine_disabled(self):
|
||||
from spark.engine import SparkEngine
|
||||
engine = SparkEngine(enabled=False)
|
||||
result = engine.on_task_posted("t1", "Ignored task")
|
||||
assert result is None
|
||||
|
||||
def test_on_task_posted(self):
|
||||
from spark.engine import SparkEngine
|
||||
from spark.memory import get_events
|
||||
engine = SparkEngine(enabled=True)
|
||||
eid = engine.on_task_posted("t1", "Test task", ["agent-a"])
|
||||
assert eid is not None
|
||||
events = get_events(task_id="t1")
|
||||
assert len(events) == 1
|
||||
|
||||
def test_on_bid_submitted(self):
|
||||
from spark.engine import SparkEngine
|
||||
from spark.memory import get_events
|
||||
engine = SparkEngine(enabled=True)
|
||||
eid = engine.on_bid_submitted("t1", "agent-a", 50)
|
||||
assert eid is not None
|
||||
events = get_events(event_type="bid_submitted")
|
||||
assert len(events) == 1
|
||||
|
||||
def test_on_task_assigned(self):
|
||||
from spark.engine import SparkEngine
|
||||
from spark.memory import get_events
|
||||
engine = SparkEngine(enabled=True)
|
||||
eid = engine.on_task_assigned("t1", "agent-a")
|
||||
assert eid is not None
|
||||
events = get_events(event_type="task_assigned")
|
||||
assert len(events) == 1
|
||||
|
||||
def test_on_task_completed_evaluates_prediction(self):
|
||||
from spark.engine import SparkEngine
|
||||
from spark.eidos import get_predictions
|
||||
engine = SparkEngine(enabled=True)
|
||||
engine.on_task_posted("t1", "Fix bug", ["agent-a"])
|
||||
eid = engine.on_task_completed("t1", "agent-a", "Fixed it")
|
||||
assert eid is not None
|
||||
preds = get_predictions(task_id="t1")
|
||||
# Should have prediction(s) evaluated
|
||||
assert len(preds) >= 1
|
||||
|
||||
def test_on_task_failed(self):
|
||||
from spark.engine import SparkEngine
|
||||
from spark.memory import get_events
|
||||
engine = SparkEngine(enabled=True)
|
||||
engine.on_task_posted("t1", "Deploy server", ["agent-a"])
|
||||
eid = engine.on_task_failed("t1", "agent-a", "Connection timeout")
|
||||
assert eid is not None
|
||||
events = get_events(event_type="task_failed")
|
||||
assert len(events) == 1
|
||||
|
||||
def test_on_agent_joined(self):
|
||||
from spark.engine import SparkEngine
|
||||
from spark.memory import get_events
|
||||
engine = SparkEngine(enabled=True)
|
||||
eid = engine.on_agent_joined("agent-a", "Echo")
|
||||
assert eid is not None
|
||||
events = get_events(event_type="agent_joined")
|
||||
assert len(events) == 1
|
||||
|
||||
def test_status(self):
|
||||
from spark.engine import SparkEngine
|
||||
engine = SparkEngine(enabled=True)
|
||||
engine.on_task_posted("t1", "Test", ["agent-a"])
|
||||
engine.on_bid_submitted("t1", "agent-a", 30)
|
||||
status = engine.status()
|
||||
assert status["enabled"] is True
|
||||
assert status["events_captured"] >= 2
|
||||
assert "predictions" in status
|
||||
assert "event_types" in status
|
||||
|
||||
def test_get_advisories(self):
|
||||
from spark.engine import SparkEngine
|
||||
engine = SparkEngine(enabled=True)
|
||||
advisories = engine.get_advisories()
|
||||
assert isinstance(advisories, list)
|
||||
|
||||
def test_get_advisories_disabled(self):
|
||||
from spark.engine import SparkEngine
|
||||
engine = SparkEngine(enabled=False)
|
||||
advisories = engine.get_advisories()
|
||||
assert advisories == []
|
||||
|
||||
def test_get_timeline(self):
|
||||
from spark.engine import SparkEngine
|
||||
engine = SparkEngine(enabled=True)
|
||||
engine.on_task_posted("t1", "Task 1")
|
||||
engine.on_task_posted("t2", "Task 2")
|
||||
timeline = engine.get_timeline(limit=10)
|
||||
assert len(timeline) == 2
|
||||
|
||||
def test_memory_consolidation(self):
|
||||
from spark.engine import SparkEngine
|
||||
from spark.memory import get_memories
|
||||
engine = SparkEngine(enabled=True)
|
||||
# Generate enough completions to trigger consolidation (>=5 events, >=3 outcomes)
|
||||
for i in range(6):
|
||||
engine.on_task_completed(f"t-{i}", "agent-star", f"Result {i}")
|
||||
memories = get_memories(subject="agent-star")
|
||||
# Should have at least one consolidated memory about strong performance
|
||||
assert len(memories) >= 1
|
||||
|
||||
|
||||
# ── Dashboard routes ────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestSparkRoutes:
|
||||
def test_spark_json(self, client):
|
||||
resp = client.get("/spark")
|
||||
assert resp.status_code == 200
|
||||
data = resp.json()
|
||||
assert "status" in data
|
||||
assert "advisories" in data
|
||||
|
||||
def test_spark_ui(self, client):
|
||||
resp = client.get("/spark/ui")
|
||||
assert resp.status_code == 200
|
||||
assert "SPARK INTELLIGENCE" in resp.text
|
||||
|
||||
def test_spark_timeline(self, client):
|
||||
resp = client.get("/spark/timeline")
|
||||
assert resp.status_code == 200
|
||||
|
||||
def test_spark_insights(self, client):
|
||||
resp = client.get("/spark/insights")
|
||||
assert resp.status_code == 200
|
||||
110
tests/spark/test_spark_tools_creative.py
Normal file
110
tests/spark/test_spark_tools_creative.py
Normal file
@@ -0,0 +1,110 @@
|
||||
"""Tests for Spark engine tool-level and creative pipeline event capture.
|
||||
|
||||
Covers the new on_tool_executed() and on_creative_step() methods added
|
||||
in Phase 6.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
|
||||
from spark.engine import SparkEngine
|
||||
from spark.memory import get_events, count_events
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def tmp_spark_db(tmp_path, monkeypatch):
|
||||
db_path = tmp_path / "spark.db"
|
||||
monkeypatch.setattr("spark.memory.DB_PATH", db_path)
|
||||
monkeypatch.setattr("spark.eidos.DB_PATH", db_path)
|
||||
yield db_path
|
||||
|
||||
|
||||
class TestOnToolExecuted:
|
||||
def test_captures_tool_event(self):
|
||||
engine = SparkEngine(enabled=True)
|
||||
eid = engine.on_tool_executed("agent-a", "git_commit", task_id="t1")
|
||||
assert eid is not None
|
||||
events = get_events(event_type="tool_executed")
|
||||
assert len(events) == 1
|
||||
assert "git_commit" in events[0].description
|
||||
|
||||
def test_captures_tool_failure(self):
|
||||
engine = SparkEngine(enabled=True)
|
||||
eid = engine.on_tool_executed("agent-a", "generate_image", success=False)
|
||||
assert eid is not None
|
||||
events = get_events(event_type="tool_executed")
|
||||
assert len(events) == 1
|
||||
assert "FAIL" in events[0].description
|
||||
|
||||
def test_captures_duration(self):
|
||||
engine = SparkEngine(enabled=True)
|
||||
engine.on_tool_executed("agent-a", "generate_song", duration_ms=5000)
|
||||
events = get_events(event_type="tool_executed")
|
||||
assert len(events) == 1
|
||||
|
||||
def test_disabled_returns_none(self):
|
||||
engine = SparkEngine(enabled=False)
|
||||
result = engine.on_tool_executed("agent-a", "git_push")
|
||||
assert result is None
|
||||
|
||||
def test_multiple_tool_events(self):
|
||||
engine = SparkEngine(enabled=True)
|
||||
engine.on_tool_executed("agent-a", "git_add")
|
||||
engine.on_tool_executed("agent-a", "git_commit")
|
||||
engine.on_tool_executed("agent-a", "git_push")
|
||||
assert count_events("tool_executed") == 3
|
||||
|
||||
|
||||
class TestOnCreativeStep:
|
||||
def test_captures_creative_step(self):
|
||||
engine = SparkEngine(enabled=True)
|
||||
eid = engine.on_creative_step(
|
||||
project_id="proj-1",
|
||||
step_name="storyboard",
|
||||
agent_id="pixel-001",
|
||||
output_path="/data/images/frame.png",
|
||||
)
|
||||
assert eid is not None
|
||||
events = get_events(event_type="creative_step")
|
||||
assert len(events) == 1
|
||||
assert "storyboard" in events[0].description
|
||||
|
||||
def test_captures_failed_step(self):
|
||||
engine = SparkEngine(enabled=True)
|
||||
engine.on_creative_step(
|
||||
project_id="proj-1",
|
||||
step_name="music",
|
||||
agent_id="lyra-001",
|
||||
success=False,
|
||||
)
|
||||
events = get_events(event_type="creative_step")
|
||||
assert len(events) == 1
|
||||
assert "FAIL" in events[0].description
|
||||
|
||||
def test_disabled_returns_none(self):
|
||||
engine = SparkEngine(enabled=False)
|
||||
result = engine.on_creative_step("p1", "storyboard", "pixel-001")
|
||||
assert result is None
|
||||
|
||||
def test_full_pipeline_events(self):
|
||||
engine = SparkEngine(enabled=True)
|
||||
steps = ["storyboard", "music", "video", "assembly"]
|
||||
agents = ["pixel-001", "lyra-001", "reel-001", "reel-001"]
|
||||
for step, agent in zip(steps, agents):
|
||||
engine.on_creative_step("proj-1", step, agent)
|
||||
assert count_events("creative_step") == 4
|
||||
|
||||
|
||||
class TestSparkStatusIncludesNewTypes:
|
||||
def test_status_includes_tool_executed(self):
|
||||
engine = SparkEngine(enabled=True)
|
||||
engine.on_tool_executed("a", "git_commit")
|
||||
status = engine.status()
|
||||
assert "tool_executed" in status["event_types"]
|
||||
assert status["event_types"]["tool_executed"] == 1
|
||||
|
||||
def test_status_includes_creative_step(self):
|
||||
engine = SparkEngine(enabled=True)
|
||||
engine.on_creative_step("p1", "storyboard", "pixel-001")
|
||||
status = engine.status()
|
||||
assert "creative_step" in status["event_types"]
|
||||
assert status["event_types"]["creative_step"] == 1
|
||||
Reference in New Issue
Block a user