diff --git a/src/dashboard/app.py b/src/dashboard/app.py
index 43c980f..7e1ccba 100644
--- a/src/dashboard/app.py
+++ b/src/dashboard/app.py
@@ -44,6 +44,7 @@ from dashboard.routes.mobile import router as mobile_router
from dashboard.routes.models import api_router as models_api_router
from dashboard.routes.models import router as models_router
from dashboard.routes.quests import router as quests_router
+from dashboard.routes.scorecards import router as scorecards_router
from dashboard.routes.spark import router as spark_router
from dashboard.routes.system import router as system_router
from dashboard.routes.tasks import router as tasks_router
@@ -629,6 +630,7 @@ app.include_router(matrix_router)
app.include_router(tower_router)
app.include_router(daily_run_router)
app.include_router(quests_router)
+app.include_router(scorecards_router)
@app.websocket("/ws")
diff --git a/src/dashboard/routes/scorecards.py b/src/dashboard/routes/scorecards.py
new file mode 100644
index 0000000..c94f17d
--- /dev/null
+++ b/src/dashboard/routes/scorecards.py
@@ -0,0 +1,353 @@
+"""Agent scorecard routes — API endpoints for generating and viewing scorecards."""
+
+from __future__ import annotations
+
+import logging
+from datetime import datetime
+
+from fastapi import APIRouter, Query, Request
+from fastapi.responses import HTMLResponse, JSONResponse
+
+from dashboard.services.scorecard_service import (
+ PeriodType,
+ generate_all_scorecards,
+ generate_scorecard,
+ get_tracked_agents,
+)
+from dashboard.templating import templates
+
+logger = logging.getLogger(__name__)
+
+router = APIRouter(prefix="/scorecards", tags=["scorecards"])
+
+
+def _format_period_label(period_type: PeriodType) -> str:
+ """Format a period type for display."""
+ return "Daily" if period_type == PeriodType.daily else "Weekly"
+
+
+@router.get("/api/agents")
+async def list_tracked_agents() -> dict[str, list[str]]:
+ """Return the list of tracked agent IDs.
+
+ Returns:
+ Dict with "agents" key containing list of agent IDs
+ """
+ return {"agents": get_tracked_agents()}
+
+
+@router.get("/api/{agent_id}")
+async def get_agent_scorecard(
+ agent_id: str,
+ period: str = Query(default="daily", description="Period type: 'daily' or 'weekly'"),
+) -> JSONResponse:
+ """Generate a scorecard for a specific agent.
+
+ Args:
+ agent_id: The agent ID (e.g., 'kimi', 'claude')
+ period: 'daily' or 'weekly' (default: daily)
+
+ Returns:
+ JSON response with scorecard data
+ """
+ try:
+ period_type = PeriodType(period.lower())
+ except ValueError:
+ return JSONResponse(
+ status_code=400,
+ content={"error": f"Invalid period '{period}'. Use 'daily' or 'weekly'."},
+ )
+
+ try:
+ scorecard = generate_scorecard(agent_id, period_type)
+
+ if scorecard is None:
+ return JSONResponse(
+ status_code=404,
+ content={"error": f"No scorecard found for agent '{agent_id}'"},
+ )
+
+ return JSONResponse(content=scorecard.to_dict())
+
+ except Exception as exc:
+ logger.error("Failed to generate scorecard for %s: %s", agent_id, exc)
+ return JSONResponse(
+ status_code=500,
+ content={"error": f"Failed to generate scorecard: {str(exc)}"},
+ )
+
+
+@router.get("/api")
+async def get_all_scorecards(
+ period: str = Query(default="daily", description="Period type: 'daily' or 'weekly'"),
+) -> JSONResponse:
+ """Generate scorecards for all tracked agents.
+
+ Args:
+ period: 'daily' or 'weekly' (default: daily)
+
+ Returns:
+ JSON response with list of scorecard data
+ """
+ try:
+ period_type = PeriodType(period.lower())
+ except ValueError:
+ return JSONResponse(
+ status_code=400,
+ content={"error": f"Invalid period '{period}'. Use 'daily' or 'weekly'."},
+ )
+
+ try:
+ scorecards = generate_all_scorecards(period_type)
+ return JSONResponse(
+ content={
+ "period": period_type.value,
+ "scorecards": [s.to_dict() for s in scorecards],
+ "count": len(scorecards),
+ }
+ )
+
+ except Exception as exc:
+ logger.error("Failed to generate scorecards: %s", exc)
+ return JSONResponse(
+ status_code=500,
+ content={"error": f"Failed to generate scorecards: {str(exc)}"},
+ )
+
+
+@router.get("", response_class=HTMLResponse)
+async def scorecards_page(request: Request) -> HTMLResponse:
+ """Render the scorecards dashboard page.
+
+ Returns:
+ HTML page with scorecard interface
+ """
+ agents = get_tracked_agents()
+ return templates.TemplateResponse(
+ request,
+ "scorecards.html",
+ {
+ "agents": agents,
+ "periods": ["daily", "weekly"],
+ },
+ )
+
+
+@router.get("/panel/{agent_id}", response_class=HTMLResponse)
+async def agent_scorecard_panel(
+ request: Request,
+ agent_id: str,
+ period: str = Query(default="daily"),
+) -> HTMLResponse:
+ """Render an individual agent scorecard panel (for HTMX).
+
+ Args:
+ request: The request object
+ agent_id: The agent ID
+ period: 'daily' or 'weekly'
+
+ Returns:
+ HTML panel with scorecard content
+ """
+ try:
+ period_type = PeriodType(period.lower())
+ except ValueError:
+ period_type = PeriodType.daily
+
+ try:
+ scorecard = generate_scorecard(agent_id, period_type)
+
+ if scorecard is None:
+ return HTMLResponse(
+ content=f"""
+
+
{agent_id.title()}
+
No activity recorded for this period.
+
+ """,
+ status_code=200,
+ )
+
+ data = scorecard.to_dict()
+
+ # Build patterns HTML
+ patterns_html = ""
+ if data["patterns"]:
+ patterns_list = "".join([f"{p}" for p in data["patterns"]])
+ patterns_html = f"""
+
+ """
+
+ # Build bullets HTML
+ bullets_html = "".join([f"{b}" for b in data["narrative_bullets"]])
+
+ # Build metrics summary
+ metrics = data["metrics"]
+
+ html_content = f"""
+
+
+
+
+
+
+
+
PRs
+
{metrics["prs_opened"]}/{metrics["prs_merged"]}
+
+ {int(metrics["pr_merge_rate"] * 100)}% merged
+
+
+
+
Issues
+
{metrics["issues_touched"]}
+
+
+
Tests
+
{metrics["tests_affected"]}
+
+
+
Tokens
+
= 0 else "text-danger"}">
+ {"+" if metrics["token_net"] > 0 else ""}{metrics["token_net"]}
+
+
+
+
+ {patterns_html}
+
+
+ """
+
+ return HTMLResponse(content=html_content)
+
+ except Exception as exc:
+ logger.error("Failed to render scorecard panel for %s: %s", agent_id, exc)
+ return HTMLResponse(
+ content=f"""
+
+
{agent_id.title()}
+
Error loading scorecard: {str(exc)}
+
+ """,
+ status_code=200,
+ )
+
+
+@router.get("/all/panels", response_class=HTMLResponse)
+async def all_scorecard_panels(
+ request: Request,
+ period: str = Query(default="daily"),
+) -> HTMLResponse:
+ """Render all agent scorecard panels (for HTMX).
+
+ Args:
+ request: The request object
+ period: 'daily' or 'weekly'
+
+ Returns:
+ HTML with all scorecard panels
+ """
+ try:
+ period_type = PeriodType(period.lower())
+ except ValueError:
+ period_type = PeriodType.daily
+
+ try:
+ scorecards = generate_all_scorecards(period_type)
+
+ panels: list[str] = []
+ for scorecard in scorecards:
+ data = scorecard.to_dict()
+
+ # Build patterns HTML
+ patterns_html = ""
+ if data["patterns"]:
+ patterns_list = "".join([f"{p}" for p in data["patterns"]])
+ patterns_html = f"""
+
+ """
+
+ # Build bullets HTML
+ bullets_html = "".join([f"{b}" for b in data["narrative_bullets"]])
+ metrics = data["metrics"]
+
+ panel_html = f"""
+
+
+
+
+
+
+
+
+
PRs
+
{metrics["prs_opened"]}/{metrics["prs_merged"]}
+
+ {int(metrics["pr_merge_rate"] * 100)}% merged
+
+
+
+
Issues
+
{metrics["issues_touched"]}
+
+
+
Tests
+
{metrics["tests_affected"]}
+
+
+
Tokens
+
= 0 else "text-danger"}">
+ {"+" if metrics["token_net"] > 0 else ""}{metrics["token_net"]}
+
+
+
+
+ {patterns_html}
+
+
+
+ """
+ panels.append(panel_html)
+
+ html_content = f"""
+
+ {"".join(panels)}
+
+
+ Generated: {datetime.now().strftime("%Y-%m-%d %H:%M:%S UTC")}
+
+ """
+
+ return HTMLResponse(content=html_content)
+
+ except Exception as exc:
+ logger.error("Failed to render all scorecard panels: %s", exc)
+ return HTMLResponse(
+ content=f"""
+
+ Error loading scorecards: {str(exc)}
+
+ """,
+ status_code=200,
+ )
diff --git a/src/dashboard/services/__init__.py b/src/dashboard/services/__init__.py
new file mode 100644
index 0000000..fcee553
--- /dev/null
+++ b/src/dashboard/services/__init__.py
@@ -0,0 +1,17 @@
+"""Dashboard services for business logic."""
+
+from dashboard.services.scorecard_service import (
+ PeriodType,
+ ScorecardSummary,
+ generate_all_scorecards,
+ generate_scorecard,
+ get_tracked_agents,
+)
+
+__all__ = [
+ "PeriodType",
+ "ScorecardSummary",
+ "generate_all_scorecards",
+ "generate_scorecard",
+ "get_tracked_agents",
+]
diff --git a/src/dashboard/services/scorecard_service.py b/src/dashboard/services/scorecard_service.py
new file mode 100644
index 0000000..60a9da2
--- /dev/null
+++ b/src/dashboard/services/scorecard_service.py
@@ -0,0 +1,515 @@
+"""Agent scorecard service — track and summarize agent performance.
+
+Generates daily/weekly scorecards showing:
+- Issues touched, PRs opened/merged
+- Tests affected, tokens earned/spent
+- Pattern highlights (merge rate, activity quality)
+"""
+
+from __future__ import annotations
+
+import logging
+from dataclasses import dataclass, field
+from datetime import UTC, datetime, timedelta
+from enum import StrEnum
+from typing import Any
+
+from infrastructure.events.bus import Event, get_event_bus
+
+logger = logging.getLogger(__name__)
+
+# Bot/agent usernames to track
+TRACKED_AGENTS = frozenset({"hermes", "kimi", "manus", "claude", "gemini"})
+
+
+class PeriodType(StrEnum):
+ daily = "daily"
+ weekly = "weekly"
+
+
+@dataclass
+class AgentMetrics:
+ """Raw metrics collected for an agent over a period."""
+
+ agent_id: str
+ issues_touched: set[int] = field(default_factory=set)
+ prs_opened: set[int] = field(default_factory=set)
+ prs_merged: set[int] = field(default_factory=set)
+ tests_affected: set[str] = field(default_factory=set)
+ tokens_earned: int = 0
+ tokens_spent: int = 0
+ commits: int = 0
+ comments: int = 0
+
+ @property
+ def pr_merge_rate(self) -> float:
+ """Calculate PR merge rate (0.0 - 1.0)."""
+ opened = len(self.prs_opened)
+ if opened == 0:
+ return 0.0
+ return len(self.prs_merged) / opened
+
+
+@dataclass
+class ScorecardSummary:
+ """A generated scorecard with narrative summary."""
+
+ agent_id: str
+ period_type: PeriodType
+ period_start: datetime
+ period_end: datetime
+ metrics: AgentMetrics
+ narrative_bullets: list[str] = field(default_factory=list)
+ patterns: list[str] = field(default_factory=list)
+
+ def to_dict(self) -> dict[str, Any]:
+ """Convert scorecard to dictionary for JSON serialization."""
+ return {
+ "agent_id": self.agent_id,
+ "period_type": self.period_type.value,
+ "period_start": self.period_start.isoformat(),
+ "period_end": self.period_end.isoformat(),
+ "metrics": {
+ "issues_touched": len(self.metrics.issues_touched),
+ "prs_opened": len(self.metrics.prs_opened),
+ "prs_merged": len(self.metrics.prs_merged),
+ "pr_merge_rate": round(self.metrics.pr_merge_rate, 2),
+ "tests_affected": len(self.tests_affected),
+ "commits": self.metrics.commits,
+ "comments": self.metrics.comments,
+ "tokens_earned": self.metrics.tokens_earned,
+ "tokens_spent": self.metrics.tokens_spent,
+ "token_net": self.metrics.tokens_earned - self.metrics.tokens_spent,
+ },
+ "narrative_bullets": self.narrative_bullets,
+ "patterns": self.patterns,
+ }
+
+ @property
+ def tests_affected(self) -> set[str]:
+ """Alias for metrics.tests_affected."""
+ return self.metrics.tests_affected
+
+
+def _get_period_bounds(
+ period_type: PeriodType, reference_date: datetime | None = None
+) -> tuple[datetime, datetime]:
+ """Calculate start and end timestamps for a period.
+
+ Args:
+ period_type: daily or weekly
+ reference_date: The date to calculate from (defaults to now)
+
+ Returns:
+ Tuple of (period_start, period_end) in UTC
+ """
+ if reference_date is None:
+ reference_date = datetime.now(UTC)
+
+ # Normalize to start of day
+ end = reference_date.replace(hour=0, minute=0, second=0, microsecond=0)
+
+ if period_type == PeriodType.daily:
+ start = end - timedelta(days=1)
+ else: # weekly
+ start = end - timedelta(days=7)
+
+ return start, end
+
+
+def _collect_events_for_period(
+ start: datetime, end: datetime, agent_id: str | None = None
+) -> list[Event]:
+ """Collect events from the event bus for a time period.
+
+ Args:
+ start: Period start time
+ end: Period end time
+ agent_id: Optional agent filter
+
+ Returns:
+ List of matching events
+ """
+ bus = get_event_bus()
+ events: list[Event] = []
+
+ # Query persisted events for relevant types
+ event_types = [
+ "gitea.push",
+ "gitea.issue.opened",
+ "gitea.issue.comment",
+ "gitea.pull_request",
+ "agent.task.completed",
+ "test.execution",
+ ]
+
+ for event_type in event_types:
+ try:
+ type_events = bus.replay(
+ event_type=event_type,
+ source=agent_id,
+ limit=1000,
+ )
+ events.extend(type_events)
+ except Exception as exc:
+ logger.debug("Failed to replay events for %s: %s", event_type, exc)
+
+ # Filter by timestamp
+ filtered = []
+ for event in events:
+ try:
+ event_time = datetime.fromisoformat(event.timestamp.replace("Z", "+00:00"))
+ if start <= event_time < end:
+ filtered.append(event)
+ except (ValueError, AttributeError):
+ continue
+
+ return filtered
+
+
+def _extract_actor_from_event(event: Event) -> str:
+ """Extract the actor/agent from an event."""
+ # Try data fields first
+ if "actor" in event.data:
+ return event.data["actor"]
+ if "agent_id" in event.data:
+ return event.data["agent_id"]
+ # Fall back to source
+ return event.source
+
+
+def _is_tracked_agent(actor: str) -> bool:
+ """Check if an actor is a tracked agent."""
+ return actor.lower() in TRACKED_AGENTS
+
+
+def _aggregate_metrics(events: list[Event]) -> dict[str, AgentMetrics]:
+ """Aggregate metrics from events grouped by agent.
+
+ Args:
+ events: List of events to process
+
+ Returns:
+ Dict mapping agent_id -> AgentMetrics
+ """
+ metrics_by_agent: dict[str, AgentMetrics] = {}
+
+ for event in events:
+ actor = _extract_actor_from_event(event)
+
+ # Skip non-agent events unless they explicitly have an agent_id
+ if not _is_tracked_agent(actor) and "agent_id" not in event.data:
+ continue
+
+ if actor not in metrics_by_agent:
+ metrics_by_agent[actor] = AgentMetrics(agent_id=actor)
+
+ metrics = metrics_by_agent[actor]
+
+ # Process based on event type
+ event_type = event.type
+
+ if event_type == "gitea.push":
+ metrics.commits += event.data.get("num_commits", 1)
+
+ elif event_type == "gitea.issue.opened":
+ issue_num = event.data.get("issue_number", 0)
+ if issue_num:
+ metrics.issues_touched.add(issue_num)
+
+ elif event_type == "gitea.issue.comment":
+ metrics.comments += 1
+ issue_num = event.data.get("issue_number", 0)
+ if issue_num:
+ metrics.issues_touched.add(issue_num)
+
+ elif event_type == "gitea.pull_request":
+ pr_num = event.data.get("pr_number", 0)
+ action = event.data.get("action", "")
+ merged = event.data.get("merged", False)
+
+ if pr_num:
+ if action == "opened":
+ metrics.prs_opened.add(pr_num)
+ elif action == "closed" and merged:
+ metrics.prs_merged.add(pr_num)
+ # Also count as touched issue for tracking
+ metrics.issues_touched.add(pr_num)
+
+ elif event_type == "agent.task.completed":
+ # Extract test files from task data
+ affected = event.data.get("tests_affected", [])
+ for test in affected:
+ metrics.tests_affected.add(test)
+
+ # Token rewards from task completion
+ reward = event.data.get("token_reward", 0)
+ if reward:
+ metrics.tokens_earned += reward
+
+ elif event_type == "test.execution":
+ # Track test files that were executed
+ test_files = event.data.get("test_files", [])
+ for test in test_files:
+ metrics.tests_affected.add(test)
+
+ return metrics_by_agent
+
+
+def _query_token_transactions(agent_id: str, start: datetime, end: datetime) -> tuple[int, int]:
+ """Query the lightning ledger for token transactions.
+
+ Args:
+ agent_id: The agent to query for
+ start: Period start
+ end: Period end
+
+ Returns:
+ Tuple of (tokens_earned, tokens_spent)
+ """
+ try:
+ from lightning.ledger import get_transactions
+
+ transactions = get_transactions(limit=1000)
+
+ earned = 0
+ spent = 0
+
+ for tx in transactions:
+ # Filter by agent if specified
+ if tx.agent_id and tx.agent_id != agent_id:
+ continue
+
+ # Filter by timestamp
+ try:
+ tx_time = datetime.fromisoformat(tx.created_at.replace("Z", "+00:00"))
+ if not (start <= tx_time < end):
+ continue
+ except (ValueError, AttributeError):
+ continue
+
+ if tx.tx_type.value == "incoming":
+ earned += tx.amount_sats
+ else:
+ spent += tx.amount_sats
+
+ return earned, spent
+
+ except Exception as exc:
+ logger.debug("Failed to query token transactions: %s", exc)
+ return 0, 0
+
+
+def _generate_narrative_bullets(metrics: AgentMetrics, period_type: PeriodType) -> list[str]:
+ """Generate narrative summary bullets for a scorecard.
+
+ Args:
+ metrics: The agent's metrics
+ period_type: daily or weekly
+
+ Returns:
+ List of narrative bullet points
+ """
+ bullets: list[str] = []
+ period_label = "day" if period_type == PeriodType.daily else "week"
+
+ # Activity summary
+ activities = []
+ if metrics.commits:
+ activities.append(f"{metrics.commits} commit{'s' if metrics.commits != 1 else ''}")
+ if len(metrics.prs_opened):
+ activities.append(
+ f"{len(metrics.prs_opened)} PR{'s' if len(metrics.prs_opened) != 1 else ''} opened"
+ )
+ if len(metrics.prs_merged):
+ activities.append(
+ f"{len(metrics.prs_merged)} PR{'s' if len(metrics.prs_merged) != 1 else ''} merged"
+ )
+ if len(metrics.issues_touched):
+ activities.append(
+ f"{len(metrics.issues_touched)} issue{'s' if len(metrics.issues_touched) != 1 else ''} touched"
+ )
+ if metrics.comments:
+ activities.append(f"{metrics.comments} comment{'s' if metrics.comments != 1 else ''}")
+
+ if activities:
+ bullets.append(f"Active across {', '.join(activities)} this {period_label}.")
+
+ # Test activity
+ if len(metrics.tests_affected):
+ bullets.append(
+ f"Affected {len(metrics.tests_affected)} test file{'s' if len(metrics.tests_affected) != 1 else ''}."
+ )
+
+ # Token summary
+ net_tokens = metrics.tokens_earned - metrics.tokens_spent
+ if metrics.tokens_earned or metrics.tokens_spent:
+ if net_tokens > 0:
+ bullets.append(
+ f"Net earned {net_tokens} tokens ({metrics.tokens_earned} earned, {metrics.tokens_spent} spent)."
+ )
+ elif net_tokens < 0:
+ bullets.append(
+ f"Net spent {abs(net_tokens)} tokens ({metrics.tokens_earned} earned, {metrics.tokens_spent} spent)."
+ )
+ else:
+ bullets.append(
+ f"Balanced token flow ({metrics.tokens_earned} earned, {metrics.tokens_spent} spent)."
+ )
+
+ # Handle empty case
+ if not bullets:
+ bullets.append(f"No recorded activity this {period_label}.")
+
+ return bullets
+
+
+def _detect_patterns(metrics: AgentMetrics) -> list[str]:
+ """Detect interesting patterns in agent behavior.
+
+ Args:
+ metrics: The agent's metrics
+
+ Returns:
+ List of pattern descriptions
+ """
+ patterns: list[str] = []
+
+ pr_opened = len(metrics.prs_opened)
+ merge_rate = metrics.pr_merge_rate
+
+ # Merge rate patterns
+ if pr_opened >= 3:
+ if merge_rate >= 0.8:
+ patterns.append("High merge rate with few failures — code quality focus.")
+ elif merge_rate <= 0.3:
+ patterns.append("Lots of noisy PRs, low merge rate — may need review support.")
+
+ # Activity patterns
+ if metrics.commits > 10 and pr_opened == 0:
+ patterns.append("High commit volume without PRs — working directly on main?")
+
+ if len(metrics.issues_touched) > 5 and metrics.comments == 0:
+ patterns.append("Touching many issues but low comment volume — silent worker.")
+
+ if metrics.comments > len(metrics.issues_touched) * 2:
+ patterns.append("Highly communicative — lots of discussion relative to work items.")
+
+ # Token patterns
+ net_tokens = metrics.tokens_earned - metrics.tokens_spent
+ if net_tokens > 100:
+ patterns.append("Strong token accumulation — high value delivery.")
+ elif net_tokens < -50:
+ patterns.append("High token spend — may be in experimentation phase.")
+
+ return patterns
+
+
+def generate_scorecard(
+ agent_id: str,
+ period_type: PeriodType = PeriodType.daily,
+ reference_date: datetime | None = None,
+) -> ScorecardSummary | None:
+ """Generate a scorecard for a single agent.
+
+ Args:
+ agent_id: The agent to generate scorecard for
+ period_type: daily or weekly
+ reference_date: The date to calculate from (defaults to now)
+
+ Returns:
+ ScorecardSummary or None if agent has no activity
+ """
+ start, end = _get_period_bounds(period_type, reference_date)
+
+ # Collect events
+ events = _collect_events_for_period(start, end, agent_id)
+
+ # Aggregate metrics
+ all_metrics = _aggregate_metrics(events)
+
+ # Get metrics for this specific agent
+ if agent_id not in all_metrics:
+ # Create empty metrics - still generate a scorecard
+ metrics = AgentMetrics(agent_id=agent_id)
+ else:
+ metrics = all_metrics[agent_id]
+
+ # Augment with token data from ledger
+ tokens_earned, tokens_spent = _query_token_transactions(agent_id, start, end)
+ metrics.tokens_earned = max(metrics.tokens_earned, tokens_earned)
+ metrics.tokens_spent = max(metrics.tokens_spent, tokens_spent)
+
+ # Generate narrative and patterns
+ narrative = _generate_narrative_bullets(metrics, period_type)
+ patterns = _detect_patterns(metrics)
+
+ return ScorecardSummary(
+ agent_id=agent_id,
+ period_type=period_type,
+ period_start=start,
+ period_end=end,
+ metrics=metrics,
+ narrative_bullets=narrative,
+ patterns=patterns,
+ )
+
+
+def generate_all_scorecards(
+ period_type: PeriodType = PeriodType.daily,
+ reference_date: datetime | None = None,
+) -> list[ScorecardSummary]:
+ """Generate scorecards for all tracked agents.
+
+ Args:
+ period_type: daily or weekly
+ reference_date: The date to calculate from (defaults to now)
+
+ Returns:
+ List of ScorecardSummary for all agents with activity
+ """
+ start, end = _get_period_bounds(period_type, reference_date)
+
+ # Collect all events
+ events = _collect_events_for_period(start, end)
+
+ # Aggregate metrics for all agents
+ all_metrics = _aggregate_metrics(events)
+
+ # Include tracked agents even if no activity
+ for agent_id in TRACKED_AGENTS:
+ if agent_id not in all_metrics:
+ all_metrics[agent_id] = AgentMetrics(agent_id=agent_id)
+
+ # Generate scorecards
+ scorecards: list[ScorecardSummary] = []
+
+ for agent_id, metrics in all_metrics.items():
+ # Augment with token data
+ tokens_earned, tokens_spent = _query_token_transactions(agent_id, start, end)
+ metrics.tokens_earned = max(metrics.tokens_earned, tokens_earned)
+ metrics.tokens_spent = max(metrics.tokens_spent, tokens_spent)
+
+ narrative = _generate_narrative_bullets(metrics, period_type)
+ patterns = _detect_patterns(metrics)
+
+ scorecard = ScorecardSummary(
+ agent_id=agent_id,
+ period_type=period_type,
+ period_start=start,
+ period_end=end,
+ metrics=metrics,
+ narrative_bullets=narrative,
+ patterns=patterns,
+ )
+ scorecards.append(scorecard)
+
+ # Sort by agent_id for consistent ordering
+ scorecards.sort(key=lambda s: s.agent_id)
+
+ return scorecards
+
+
+def get_tracked_agents() -> list[str]:
+ """Return the list of tracked agent IDs."""
+ return sorted(TRACKED_AGENTS)
diff --git a/src/dashboard/templates/base.html b/src/dashboard/templates/base.html
index 70800be..7a1b234 100644
--- a/src/dashboard/templates/base.html
+++ b/src/dashboard/templates/base.html
@@ -51,6 +51,7 @@
THINKING
MISSION CTRL
SWARM
+ SCORECARDS
BUGS
@@ -123,6 +124,7 @@
THINKING
MISSION CONTROL
SWARM
+ SCORECARDS
BUGS
INTELLIGENCE
SPARK
diff --git a/src/dashboard/templates/scorecards.html b/src/dashboard/templates/scorecards.html
new file mode 100644
index 0000000..019512e
--- /dev/null
+++ b/src/dashboard/templates/scorecards.html
@@ -0,0 +1,113 @@
+{% extends "base.html" %}
+
+{% block title %}Agent Scorecards - Timmy Time{% endblock %}
+
+{% block extra_styles %}{% endblock %}
+
+{% block content %}
+
+
+
+
+
AGENT SCORECARDS
+
Track agent performance across issues, PRs, tests, and tokens
+
+
+
+
+
+
+
+
+
+
+
+ Loading...
+
+
Loading scorecards...
+
+
+
+
+
+
API Reference
+
+
+
+
+
List Tracked Agents
+
GET /scorecards/api/agents
+
Returns all tracked agent IDs
+
+
+
+
+
+
+
Get All Scorecards
+
GET /scorecards/api?period=daily|weekly
+
Returns scorecards for all agents
+
+
+
+
+
+
+
Get Agent Scorecard
+
GET /scorecards/api/{agent_id}?period=daily|weekly
+
Returns scorecard for a specific agent
+
+
+
+
+
+
+
HTML Panel (HTMX)
+
GET /scorecards/panel/{agent_id}?period=daily|weekly
+
Returns HTML panel for embedding
+
+
+
+
+
+
+
+
+{% endblock %}
diff --git a/tests/dashboard/test_scorecards.py b/tests/dashboard/test_scorecards.py
new file mode 100644
index 0000000..ce1c874
--- /dev/null
+++ b/tests/dashboard/test_scorecards.py
@@ -0,0 +1,680 @@
+"""Tests for agent scorecard functionality."""
+
+from datetime import UTC, datetime, timedelta
+from unittest.mock import MagicMock, patch
+
+from dashboard.services.scorecard_service import (
+ AgentMetrics,
+ PeriodType,
+ ScorecardSummary,
+ _aggregate_metrics,
+ _detect_patterns,
+ _extract_actor_from_event,
+ _generate_narrative_bullets,
+ _get_period_bounds,
+ _is_tracked_agent,
+ _query_token_transactions,
+ generate_all_scorecards,
+ generate_scorecard,
+ get_tracked_agents,
+)
+from infrastructure.events.bus import Event
+
+
+class TestPeriodBounds:
+ """Test period boundary calculations."""
+
+ def test_daily_period_bounds(self):
+ """Test daily period returns correct 24-hour window."""
+ reference = datetime(2026, 3, 21, 12, 30, 45, tzinfo=UTC)
+ start, end = _get_period_bounds(PeriodType.daily, reference)
+
+ assert end == datetime(2026, 3, 21, 0, 0, 0, tzinfo=UTC)
+ assert start == datetime(2026, 3, 20, 0, 0, 0, tzinfo=UTC)
+ assert (end - start) == timedelta(days=1)
+
+ def test_weekly_period_bounds(self):
+ """Test weekly period returns correct 7-day window."""
+ reference = datetime(2026, 3, 21, 12, 30, 45, tzinfo=UTC)
+ start, end = _get_period_bounds(PeriodType.weekly, reference)
+
+ assert end == datetime(2026, 3, 21, 0, 0, 0, tzinfo=UTC)
+ assert start == datetime(2026, 3, 14, 0, 0, 0, tzinfo=UTC)
+ assert (end - start) == timedelta(days=7)
+
+ def test_default_reference_date(self):
+ """Test default reference date uses current time."""
+ start, end = _get_period_bounds(PeriodType.daily)
+ now = datetime.now(UTC)
+
+ # End should be start of current day (midnight)
+ expected_end = now.replace(hour=0, minute=0, second=0, microsecond=0)
+ assert end == expected_end
+ # Start should be 24 hours before end
+ assert (end - start) == timedelta(days=1)
+
+
+class TestTrackedAgents:
+ """Test agent tracking functions."""
+
+ def test_get_tracked_agents(self):
+ """Test get_tracked_agents returns sorted list."""
+ agents = get_tracked_agents()
+ assert isinstance(agents, list)
+ assert "kimi" in agents
+ assert "claude" in agents
+ assert "gemini" in agents
+ assert "hermes" in agents
+ assert "manus" in agents
+ assert agents == sorted(agents)
+
+ def test_is_tracked_agent_true(self):
+ """Test _is_tracked_agent returns True for tracked agents."""
+ assert _is_tracked_agent("kimi") is True
+ assert _is_tracked_agent("KIMI") is True # case insensitive
+ assert _is_tracked_agent("claude") is True
+ assert _is_tracked_agent("hermes") is True
+
+ def test_is_tracked_agent_false(self):
+ """Test _is_tracked_agent returns False for untracked agents."""
+ assert _is_tracked_agent("unknown") is False
+ assert _is_tracked_agent("rockachopa") is False
+ assert _is_tracked_agent("") is False
+
+
+class TestExtractActor:
+ """Test actor extraction from events."""
+
+ def test_extract_from_actor_field(self):
+ """Test extraction from data.actor field."""
+ event = Event(type="test", source="system", data={"actor": "kimi"})
+ assert _extract_actor_from_event(event) == "kimi"
+
+ def test_extract_from_agent_id_field(self):
+ """Test extraction from data.agent_id field."""
+ event = Event(type="test", source="system", data={"agent_id": "claude"})
+ assert _extract_actor_from_event(event) == "claude"
+
+ def test_extract_from_source_fallback(self):
+ """Test fallback to event.source."""
+ event = Event(type="test", source="gemini", data={})
+ assert _extract_actor_from_event(event) == "gemini"
+
+ def test_actor_priority_over_agent_id(self):
+ """Test actor field takes priority over agent_id."""
+ event = Event(type="test", source="system", data={"actor": "kimi", "agent_id": "claude"})
+ assert _extract_actor_from_event(event) == "kimi"
+
+
+class TestAggregateMetrics:
+ """Test metrics aggregation from events."""
+
+ def test_empty_events(self):
+ """Test aggregation with no events returns empty dict."""
+ result = _aggregate_metrics([])
+ assert result == {}
+
+ def test_push_event_aggregation(self):
+ """Test push events aggregate commits correctly."""
+ events = [
+ Event(type="gitea.push", source="gitea", data={"actor": "kimi", "num_commits": 3}),
+ Event(type="gitea.push", source="gitea", data={"actor": "kimi", "num_commits": 2}),
+ ]
+ result = _aggregate_metrics(events)
+
+ assert "kimi" in result
+ assert result["kimi"].commits == 5
+
+ def test_issue_opened_aggregation(self):
+ """Test issue opened events aggregate correctly."""
+ events = [
+ Event(
+ type="gitea.issue.opened",
+ source="gitea",
+ data={"actor": "claude", "issue_number": 100},
+ ),
+ Event(
+ type="gitea.issue.opened",
+ source="gitea",
+ data={"actor": "claude", "issue_number": 101},
+ ),
+ ]
+ result = _aggregate_metrics(events)
+
+ assert "claude" in result
+ assert len(result["claude"].issues_touched) == 2
+ assert 100 in result["claude"].issues_touched
+ assert 101 in result["claude"].issues_touched
+
+ def test_comment_aggregation(self):
+ """Test comment events aggregate correctly."""
+ events = [
+ Event(
+ type="gitea.issue.comment",
+ source="gitea",
+ data={"actor": "gemini", "issue_number": 100},
+ ),
+ Event(
+ type="gitea.issue.comment",
+ source="gitea",
+ data={"actor": "gemini", "issue_number": 101},
+ ),
+ ]
+ result = _aggregate_metrics(events)
+
+ assert "gemini" in result
+ assert result["gemini"].comments == 2
+ assert len(result["gemini"].issues_touched) == 2 # Comments touch issues too
+
+ def test_pr_events_aggregation(self):
+ """Test PR open and merge events aggregate correctly."""
+ events = [
+ Event(
+ type="gitea.pull_request",
+ source="gitea",
+ data={"actor": "kimi", "pr_number": 50, "action": "opened"},
+ ),
+ Event(
+ type="gitea.pull_request",
+ source="gitea",
+ data={"actor": "kimi", "pr_number": 50, "action": "closed", "merged": True},
+ ),
+ Event(
+ type="gitea.pull_request",
+ source="gitea",
+ data={"actor": "kimi", "pr_number": 51, "action": "opened"},
+ ),
+ ]
+ result = _aggregate_metrics(events)
+
+ assert "kimi" in result
+ assert len(result["kimi"].prs_opened) == 2
+ assert len(result["kimi"].prs_merged) == 1
+ assert 50 in result["kimi"].prs_merged
+
+ def test_untracked_agent_filtered(self):
+ """Test events from untracked agents are filtered out."""
+ events = [
+ Event(
+ type="gitea.push", source="gitea", data={"actor": "rockachopa", "num_commits": 5}
+ ),
+ ]
+ result = _aggregate_metrics(events)
+
+ assert "rockachopa" not in result
+
+ def test_task_completion_aggregation(self):
+ """Test task completion events aggregate test files."""
+ events = [
+ Event(
+ type="agent.task.completed",
+ source="gitea",
+ data={
+ "agent_id": "kimi",
+ "tests_affected": ["test_foo.py", "test_bar.py"],
+ "token_reward": 10,
+ },
+ ),
+ ]
+ result = _aggregate_metrics(events)
+
+ assert "kimi" in result
+ assert len(result["kimi"].tests_affected) == 2
+ assert "test_foo.py" in result["kimi"].tests_affected
+ assert result["kimi"].tokens_earned == 10
+
+
+class TestAgentMetrics:
+ """Test AgentMetrics class."""
+
+ def test_merge_rate_zero_prs(self):
+ """Test merge rate is 0 when no PRs opened."""
+ metrics = AgentMetrics(agent_id="kimi")
+ assert metrics.pr_merge_rate == 0.0
+
+ def test_merge_rate_perfect(self):
+ """Test 100% merge rate calculation."""
+ metrics = AgentMetrics(agent_id="kimi", prs_opened={1, 2, 3}, prs_merged={1, 2, 3})
+ assert metrics.pr_merge_rate == 1.0
+
+ def test_merge_rate_partial(self):
+ """Test partial merge rate calculation."""
+ metrics = AgentMetrics(agent_id="kimi", prs_opened={1, 2, 3, 4}, prs_merged={1, 2})
+ assert metrics.pr_merge_rate == 0.5
+
+
+class TestDetectPatterns:
+ """Test pattern detection logic."""
+
+ def test_high_merge_rate_pattern(self):
+ """Test detection of high merge rate pattern."""
+ metrics = AgentMetrics(
+ agent_id="kimi",
+ prs_opened={1, 2, 3, 4, 5},
+ prs_merged={1, 2, 3, 4}, # 80% merge rate
+ )
+ patterns = _detect_patterns(metrics)
+
+ assert any("High merge rate" in p for p in patterns)
+
+ def test_low_merge_rate_pattern(self):
+ """Test detection of low merge rate pattern."""
+ metrics = AgentMetrics(
+ agent_id="kimi",
+ prs_opened={1, 2, 3, 4, 5},
+ prs_merged={1}, # 20% merge rate
+ )
+ patterns = _detect_patterns(metrics)
+
+ assert any("low merge rate" in p for p in patterns)
+
+ def test_high_commits_no_prs_pattern(self):
+ """Test detection of direct-to-main commits pattern."""
+ metrics = AgentMetrics(
+ agent_id="kimi",
+ commits=15,
+ prs_opened=set(),
+ )
+ patterns = _detect_patterns(metrics)
+
+ assert any("High commit volume without PRs" in p for p in patterns)
+
+ def test_silent_worker_pattern(self):
+ """Test detection of silent worker pattern."""
+ metrics = AgentMetrics(
+ agent_id="kimi",
+ issues_touched={1, 2, 3, 4, 5, 6},
+ comments=0,
+ )
+ patterns = _detect_patterns(metrics)
+
+ assert any("silent worker" in p for p in patterns)
+
+ def test_communicative_pattern(self):
+ """Test detection of highly communicative pattern."""
+ metrics = AgentMetrics(
+ agent_id="kimi",
+ issues_touched={1, 2}, # 2 issues
+ comments=10, # 5x comments per issue
+ )
+ patterns = _detect_patterns(metrics)
+
+ assert any("Highly communicative" in p for p in patterns)
+
+ def test_token_accumulation_pattern(self):
+ """Test detection of token accumulation pattern."""
+ metrics = AgentMetrics(
+ agent_id="kimi",
+ tokens_earned=150,
+ tokens_spent=10,
+ )
+ patterns = _detect_patterns(metrics)
+
+ assert any("Strong token accumulation" in p for p in patterns)
+
+ def test_token_spend_pattern(self):
+ """Test detection of high token spend pattern."""
+ metrics = AgentMetrics(
+ agent_id="kimi",
+ tokens_earned=10,
+ tokens_spent=100,
+ )
+ patterns = _detect_patterns(metrics)
+
+ assert any("High token spend" in p for p in patterns)
+
+
+class TestGenerateNarrative:
+ """Test narrative bullet generation."""
+
+ def test_empty_metrics_narrative(self):
+ """Test narrative for empty metrics mentions no activity."""
+ metrics = AgentMetrics(agent_id="kimi")
+ bullets = _generate_narrative_bullets(metrics, PeriodType.daily)
+
+ assert len(bullets) == 1
+ assert "No recorded activity" in bullets[0]
+
+ def test_activity_summary_narrative(self):
+ """Test narrative includes activity summary."""
+ metrics = AgentMetrics(
+ agent_id="kimi",
+ commits=5,
+ prs_opened={1, 2},
+ prs_merged={1},
+ )
+ bullets = _generate_narrative_bullets(metrics, PeriodType.daily)
+
+ activity_bullet = next((b for b in bullets if "Active across" in b), None)
+ assert activity_bullet is not None
+ assert "5 commits" in activity_bullet
+ assert "2 PRs opened" in activity_bullet
+ assert "1 PR merged" in activity_bullet
+
+ def test_tests_affected_narrative(self):
+ """Test narrative includes tests affected."""
+ metrics = AgentMetrics(
+ agent_id="kimi",
+ tests_affected={"test_a.py", "test_b.py"},
+ )
+ bullets = _generate_narrative_bullets(metrics, PeriodType.daily)
+
+ assert any("2 test files" in b for b in bullets)
+
+ def test_tokens_earned_narrative(self):
+ """Test narrative includes token earnings."""
+ metrics = AgentMetrics(
+ agent_id="kimi",
+ tokens_earned=100,
+ tokens_spent=20,
+ )
+ bullets = _generate_narrative_bullets(metrics, PeriodType.daily)
+
+ assert any("Net earned 80 tokens" in b for b in bullets)
+
+ def test_tokens_spent_narrative(self):
+ """Test narrative includes token spending."""
+ metrics = AgentMetrics(
+ agent_id="kimi",
+ tokens_earned=20,
+ tokens_spent=100,
+ )
+ bullets = _generate_narrative_bullets(metrics, PeriodType.daily)
+
+ assert any("Net spent 80 tokens" in b for b in bullets)
+
+ def test_balanced_tokens_narrative(self):
+ """Test narrative for balanced token flow."""
+ metrics = AgentMetrics(
+ agent_id="kimi",
+ tokens_earned=100,
+ tokens_spent=100,
+ )
+ bullets = _generate_narrative_bullets(metrics, PeriodType.daily)
+
+ assert any("Balanced token flow" in b for b in bullets)
+
+
+class TestScorecardSummary:
+ """Test ScorecardSummary dataclass."""
+
+ def test_to_dict_structure(self):
+ """Test to_dict returns expected structure."""
+ metrics = AgentMetrics(
+ agent_id="kimi",
+ issues_touched={1, 2},
+ prs_opened={10, 11},
+ prs_merged={10},
+ tokens_earned=100,
+ tokens_spent=20,
+ )
+ summary = ScorecardSummary(
+ agent_id="kimi",
+ period_type=PeriodType.daily,
+ period_start=datetime.now(UTC),
+ period_end=datetime.now(UTC),
+ metrics=metrics,
+ narrative_bullets=["Test bullet"],
+ patterns=["Test pattern"],
+ )
+ data = summary.to_dict()
+
+ assert data["agent_id"] == "kimi"
+ assert data["period_type"] == "daily"
+ assert "metrics" in data
+ assert data["metrics"]["issues_touched"] == 2
+ assert data["metrics"]["prs_opened"] == 2
+ assert data["metrics"]["prs_merged"] == 1
+ assert data["metrics"]["pr_merge_rate"] == 0.5
+ assert data["metrics"]["tokens_earned"] == 100
+ assert data["metrics"]["token_net"] == 80
+ assert data["narrative_bullets"] == ["Test bullet"]
+ assert data["patterns"] == ["Test pattern"]
+
+
+class TestQueryTokenTransactions:
+ """Test token transaction querying."""
+
+ def test_empty_ledger(self):
+ """Test empty ledger returns zero values."""
+ with patch("lightning.ledger.get_transactions", return_value=[]):
+ earned, spent = _query_token_transactions("kimi", datetime.now(UTC), datetime.now(UTC))
+ assert earned == 0
+ assert spent == 0
+
+ def test_ledger_with_transactions(self):
+ """Test ledger aggregation of transactions."""
+ now = datetime.now(UTC)
+ mock_tx = [
+ MagicMock(
+ agent_id="kimi",
+ tx_type=MagicMock(value="incoming"),
+ amount_sats=100,
+ created_at=now.isoformat(),
+ ),
+ MagicMock(
+ agent_id="kimi",
+ tx_type=MagicMock(value="outgoing"),
+ amount_sats=30,
+ created_at=now.isoformat(),
+ ),
+ ]
+ with patch("lightning.ledger.get_transactions", return_value=mock_tx):
+ earned, spent = _query_token_transactions(
+ "kimi", now - timedelta(hours=1), now + timedelta(hours=1)
+ )
+ assert earned == 100
+ assert spent == 30
+
+ def test_ledger_filters_by_agent(self):
+ """Test ledger filters transactions by agent_id."""
+ now = datetime.now(UTC)
+ mock_tx = [
+ MagicMock(
+ agent_id="claude",
+ tx_type=MagicMock(value="incoming"),
+ amount_sats=100,
+ created_at=now.isoformat(),
+ ),
+ ]
+ with patch("lightning.ledger.get_transactions", return_value=mock_tx):
+ earned, spent = _query_token_transactions(
+ "kimi", now - timedelta(hours=1), now + timedelta(hours=1)
+ )
+ assert earned == 0 # Transaction was for claude, not kimi
+
+ def test_ledger_filters_by_time(self):
+ """Test ledger filters transactions by time range."""
+ now = datetime.now(UTC)
+ old_time = now - timedelta(days=2)
+ mock_tx = [
+ MagicMock(
+ agent_id="kimi",
+ tx_type=MagicMock(value="incoming"),
+ amount_sats=100,
+ created_at=old_time.isoformat(),
+ ),
+ ]
+ with patch("lightning.ledger.get_transactions", return_value=mock_tx):
+ # Query for today only
+ earned, spent = _query_token_transactions(
+ "kimi", now - timedelta(hours=1), now + timedelta(hours=1)
+ )
+ assert earned == 0 # Transaction was 2 days ago
+
+
+class TestGenerateScorecard:
+ """Test scorecard generation."""
+
+ def test_generate_scorecard_no_activity(self):
+ """Test scorecard generation for agent with no activity."""
+ with patch(
+ "dashboard.services.scorecard_service._collect_events_for_period", return_value=[]
+ ):
+ with patch(
+ "dashboard.services.scorecard_service._query_token_transactions",
+ return_value=(0, 0),
+ ):
+ scorecard = generate_scorecard("kimi", PeriodType.daily)
+
+ assert scorecard is not None
+ assert scorecard.agent_id == "kimi"
+ assert scorecard.period_type == PeriodType.daily
+ assert len(scorecard.narrative_bullets) == 1
+ assert "No recorded activity" in scorecard.narrative_bullets[0]
+
+ def test_generate_scorecard_with_activity(self):
+ """Test scorecard generation includes activity."""
+ events = [
+ Event(type="gitea.push", source="gitea", data={"actor": "kimi", "num_commits": 5}),
+ ]
+ with patch(
+ "dashboard.services.scorecard_service._collect_events_for_period", return_value=events
+ ):
+ with patch(
+ "dashboard.services.scorecard_service._query_token_transactions",
+ return_value=(100, 20),
+ ):
+ scorecard = generate_scorecard("kimi", PeriodType.daily)
+
+ assert scorecard is not None
+ assert scorecard.metrics.commits == 5
+ assert scorecard.metrics.tokens_earned == 100
+ assert scorecard.metrics.tokens_spent == 20
+
+
+class TestGenerateAllScorecards:
+ """Test generating scorecards for all agents."""
+
+ def test_generates_for_all_tracked_agents(self):
+ """Test all tracked agents get scorecards even with no activity."""
+ with patch(
+ "dashboard.services.scorecard_service._collect_events_for_period", return_value=[]
+ ):
+ with patch(
+ "dashboard.services.scorecard_service._query_token_transactions",
+ return_value=(0, 0),
+ ):
+ scorecards = generate_all_scorecards(PeriodType.daily)
+
+ agent_ids = {s.agent_id for s in scorecards}
+ expected = {"kimi", "claude", "gemini", "hermes", "manus"}
+ assert expected.issubset(agent_ids)
+
+ def test_scorecards_sorted(self):
+ """Test scorecards are sorted by agent_id."""
+ with patch(
+ "dashboard.services.scorecard_service._collect_events_for_period", return_value=[]
+ ):
+ with patch(
+ "dashboard.services.scorecard_service._query_token_transactions",
+ return_value=(0, 0),
+ ):
+ scorecards = generate_all_scorecards(PeriodType.daily)
+
+ agent_ids = [s.agent_id for s in scorecards]
+ assert agent_ids == sorted(agent_ids)
+
+
+class TestScorecardRoutes:
+ """Test scorecard API routes."""
+
+ def test_list_agents_endpoint(self, client):
+ """Test GET /scorecards/api/agents returns tracked agents."""
+ response = client.get("/scorecards/api/agents")
+ assert response.status_code == 200
+ data = response.json()
+ assert "agents" in data
+ assert "kimi" in data["agents"]
+ assert "claude" in data["agents"]
+
+ def test_get_scorecard_endpoint(self, client):
+ """Test GET /scorecards/api/{agent_id} returns scorecard."""
+ with patch("dashboard.routes.scorecards.generate_scorecard") as mock_generate:
+ mock_generate.return_value = ScorecardSummary(
+ agent_id="kimi",
+ period_type=PeriodType.daily,
+ period_start=datetime.now(UTC),
+ period_end=datetime.now(UTC),
+ metrics=AgentMetrics(agent_id="kimi"),
+ narrative_bullets=["Test bullet"],
+ patterns=[],
+ )
+ response = client.get("/scorecards/api/kimi?period=daily")
+
+ assert response.status_code == 200
+ data = response.json()
+ assert data["agent_id"] == "kimi"
+ assert data["period_type"] == "daily"
+
+ def test_get_scorecard_invalid_period(self, client):
+ """Test GET with invalid period returns 400."""
+ response = client.get("/scorecards/api/kimi?period=invalid")
+ assert response.status_code == 400
+ assert "error" in response.json()
+
+ def test_get_all_scorecards_endpoint(self, client):
+ """Test GET /scorecards/api returns all scorecards."""
+ with patch("dashboard.routes.scorecards.generate_all_scorecards") as mock_generate:
+ mock_generate.return_value = [
+ ScorecardSummary(
+ agent_id="kimi",
+ period_type=PeriodType.daily,
+ period_start=datetime.now(UTC),
+ period_end=datetime.now(UTC),
+ metrics=AgentMetrics(agent_id="kimi"),
+ narrative_bullets=[],
+ patterns=[],
+ ),
+ ]
+ response = client.get("/scorecards/api?period=daily")
+
+ assert response.status_code == 200
+ data = response.json()
+ assert data["period"] == "daily"
+ assert "scorecards" in data
+ assert len(data["scorecards"]) == 1
+
+ def test_scorecards_page_renders(self, client):
+ """Test GET /scorecards returns HTML page."""
+ response = client.get("/scorecards")
+ assert response.status_code == 200
+ assert "text/html" in response.headers.get("content-type", "")
+ assert "AGENT SCORECARDS" in response.text
+
+ def test_scorecard_panel_renders(self, client):
+ """Test GET /scorecards/panel/{agent_id} returns HTML."""
+ with patch("dashboard.routes.scorecards.generate_scorecard") as mock_generate:
+ mock_generate.return_value = ScorecardSummary(
+ agent_id="kimi",
+ period_type=PeriodType.daily,
+ period_start=datetime.now(UTC),
+ period_end=datetime.now(UTC),
+ metrics=AgentMetrics(agent_id="kimi", commits=5),
+ narrative_bullets=["Active across 5 commits this day."],
+ patterns=["High activity"],
+ )
+ response = client.get("/scorecards/panel/kimi?period=daily")
+
+ assert response.status_code == 200
+ assert "text/html" in response.headers.get("content-type", "")
+ assert "Kimi" in response.text
+
+ def test_all_panels_renders(self, client):
+ """Test GET /scorecards/all/panels returns HTML with all panels."""
+ with patch("dashboard.routes.scorecards.generate_all_scorecards") as mock_generate:
+ mock_generate.return_value = [
+ ScorecardSummary(
+ agent_id="kimi",
+ period_type=PeriodType.daily,
+ period_start=datetime.now(UTC),
+ period_end=datetime.now(UTC),
+ metrics=AgentMetrics(agent_id="kimi"),
+ narrative_bullets=[],
+ patterns=[],
+ ),
+ ]
+ response = client.get("/scorecards/all/panels?period=daily")
+
+ assert response.status_code == 200
+ assert "text/html" in response.headers.get("content-type", "")