forked from Rockachopa/Timmy-time-dashboard
Compare commits
1 Commits
claude/iss
...
claude/iss
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fb69f01b72 |
149
src/infrastructure/world/adapters/threejs.py
Normal file
149
src/infrastructure/world/adapters/threejs.py
Normal file
@@ -0,0 +1,149 @@
|
||||
"""Three.js world adapter — bridges Kimi's AI World Builder to WorldInterface.
|
||||
|
||||
Studied from Kimisworld.zip (issue #870). Kimi's world is a React +
|
||||
Three.js app ("AI World Builder v1.0") that exposes a JSON state API and
|
||||
accepts ``addObject`` / ``updateObject`` / ``removeObject`` commands.
|
||||
|
||||
This adapter is a stub: ``connect()`` and the core methods outline the
|
||||
HTTP / WebSocket wiring that would be needed to talk to a running instance.
|
||||
The ``observe()`` response maps Kimi's ``WorldObject`` schema to
|
||||
``PerceptionOutput`` entities so that any WorldInterface consumer can
|
||||
treat the Three.js canvas like any other game world.
|
||||
|
||||
Usage::
|
||||
|
||||
registry.register("threejs", ThreeJSWorldAdapter)
|
||||
adapter = registry.get("threejs", base_url="http://localhost:5173")
|
||||
adapter.connect()
|
||||
perception = adapter.observe()
|
||||
adapter.act(CommandInput(action="add_object", parameters={"geometry": "sphere", ...}))
|
||||
adapter.speak("Hello from Timmy", target="broadcast")
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
|
||||
from infrastructure.world.interface import WorldInterface
|
||||
from infrastructure.world.types import ActionResult, ActionStatus, CommandInput, PerceptionOutput
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Kimi's WorldObject geometry / material vocabulary (from WorldObjects.tsx)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_VALID_GEOMETRIES = {"box", "sphere", "cylinder", "torus", "cone", "dodecahedron"}
|
||||
_VALID_MATERIALS = {"standard", "wireframe", "glass", "glow"}
|
||||
_VALID_TYPES = {"mesh", "light", "particle", "custom"}
|
||||
|
||||
|
||||
def _object_to_entity_description(obj: dict) -> str:
|
||||
"""Render a Kimi WorldObject dict as a human-readable entity string.
|
||||
|
||||
Example output: ``sphere/glow #ff006e at (2.1, 3.0, -1.5)``
|
||||
"""
|
||||
geometry = obj.get("geometry", "unknown")
|
||||
material = obj.get("material", "unknown")
|
||||
color = obj.get("color", "#ffffff")
|
||||
pos = obj.get("position", [0, 0, 0])
|
||||
obj_type = obj.get("type", "mesh")
|
||||
pos_str = "({:.1f}, {:.1f}, {:.1f})".format(*pos)
|
||||
return f"{obj_type}/{geometry}/{material} {color} at {pos_str}"
|
||||
|
||||
|
||||
class ThreeJSWorldAdapter(WorldInterface):
|
||||
"""Adapter for Kimi's Three.js AI World Builder.
|
||||
|
||||
Connects to a running Three.js world that exposes:
|
||||
- ``GET /api/world/state`` — returns current WorldObject list
|
||||
- ``POST /api/world/execute`` — accepts addObject / updateObject code
|
||||
- WebSocket ``/ws/world`` — streams state change events
|
||||
|
||||
All core methods raise ``NotImplementedError`` until HTTP wiring is
|
||||
added. Implement ``connect()`` first — it should verify that the
|
||||
Three.js app is running and optionally open a WebSocket for live events.
|
||||
|
||||
Key insight from studying Kimi's world (issue #870):
|
||||
- Objects carry a geometry, material, color, position, rotation, scale,
|
||||
and an optional *animation* string executed via ``new Function()``
|
||||
each animation frame.
|
||||
- The AI agent (``AIAgent.tsx``) moves through the world with lerp()
|
||||
targeting, cycles through moods, and pulses its core during "thinking"
|
||||
states — a model for how Timmy could manifest presence in a 3D world.
|
||||
- World complexity is tracked as a simple counter (one unit per object)
|
||||
which the AI uses to decide whether to create, modify, or upgrade.
|
||||
"""
|
||||
|
||||
def __init__(self, *, base_url: str = "http://localhost:5173") -> None:
|
||||
self._base_url = base_url.rstrip("/")
|
||||
self._connected = False
|
||||
|
||||
# -- lifecycle ---------------------------------------------------------
|
||||
|
||||
def connect(self) -> None:
|
||||
raise NotImplementedError(
|
||||
"ThreeJSWorldAdapter.connect() — verify Three.js app is running at "
|
||||
f"{self._base_url} and optionally open a WebSocket to /ws/world"
|
||||
)
|
||||
|
||||
def disconnect(self) -> None:
|
||||
self._connected = False
|
||||
logger.info("ThreeJSWorldAdapter disconnected")
|
||||
|
||||
@property
|
||||
def is_connected(self) -> bool:
|
||||
return self._connected
|
||||
|
||||
# -- core contract (stubs) ---------------------------------------------
|
||||
|
||||
def observe(self) -> PerceptionOutput:
|
||||
"""Return current Three.js world state as structured perception.
|
||||
|
||||
Expected HTTP call::
|
||||
|
||||
GET {base_url}/api/world/state
|
||||
→ {"objects": [...WorldObject], "worldComplexity": int, ...}
|
||||
|
||||
Each WorldObject becomes an entity description string.
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
"ThreeJSWorldAdapter.observe() — GET /api/world/state, "
|
||||
"map each WorldObject via _object_to_entity_description()"
|
||||
)
|
||||
|
||||
def act(self, command: CommandInput) -> ActionResult:
|
||||
"""Dispatch a command to the Three.js world.
|
||||
|
||||
Supported actions (mirrors Kimi's CodeExecutor API):
|
||||
- ``add_object`` — parameters: WorldObject fields (geometry, material, …)
|
||||
- ``update_object`` — parameters: id + partial WorldObject fields
|
||||
- ``remove_object`` — parameters: id
|
||||
- ``clear_world`` — parameters: (none)
|
||||
|
||||
Expected HTTP call::
|
||||
|
||||
POST {base_url}/api/world/execute
|
||||
Content-Type: application/json
|
||||
{"action": "add_object", "parameters": {...}}
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
f"ThreeJSWorldAdapter.act({command.action!r}) — "
|
||||
"POST /api/world/execute with serialised CommandInput"
|
||||
)
|
||||
|
||||
def speak(self, message: str, target: str | None = None) -> None:
|
||||
"""Inject a text message into the Three.js world.
|
||||
|
||||
Kimi's world does not have a native chat layer, so the recommended
|
||||
implementation is to create a short-lived ``Text`` entity at a
|
||||
visible position (or broadcast via the world WebSocket).
|
||||
|
||||
Expected WebSocket frame::
|
||||
|
||||
{"type": "timmy_speech", "text": message, "target": target}
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
"ThreeJSWorldAdapter.speak() — send timmy_speech frame over "
|
||||
"/ws/world WebSocket, or POST a temporary Text entity"
|
||||
)
|
||||
@@ -1,458 +0,0 @@
|
||||
"""Unit tests for dashboard/services/scorecard_service.py.
|
||||
|
||||
Focuses on edge cases and scenarios not covered in test_scorecards.py:
|
||||
- _aggregate_metrics: test.execution events, PR-closed-without-merge,
|
||||
push default commit count, untracked agent with agent_id passthrough
|
||||
- _detect_patterns: boundary conditions (< 3 PRs, exactly 3, exactly 80%)
|
||||
- _generate_narrative_bullets: singular/plural forms
|
||||
- generate_scorecard: token augmentation max() logic
|
||||
- ScorecardSummary.to_dict(): ISO timestamp format, tests_affected count
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import UTC, datetime
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
pytestmark = pytest.mark.unit
|
||||
|
||||
from dashboard.services.scorecard_service import (
|
||||
AgentMetrics,
|
||||
PeriodType,
|
||||
ScorecardSummary,
|
||||
_aggregate_metrics,
|
||||
_detect_patterns,
|
||||
_generate_narrative_bullets,
|
||||
generate_scorecard,
|
||||
)
|
||||
from infrastructure.events.bus import Event
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _aggregate_metrics — edge cases
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestAggregateMetricsEdgeCases:
|
||||
"""Edge cases for _aggregate_metrics not covered in test_scorecards.py."""
|
||||
|
||||
def test_push_event_defaults_to_one_commit(self):
|
||||
"""Push event with no num_commits key should count as 1 commit."""
|
||||
events = [
|
||||
Event(type="gitea.push", source="gitea", data={"actor": "claude"}),
|
||||
]
|
||||
result = _aggregate_metrics(events)
|
||||
|
||||
assert result["claude"].commits == 1
|
||||
|
||||
def test_pr_closed_without_merge_not_counted(self):
|
||||
"""PR closed without merge should not appear in prs_merged."""
|
||||
events = [
|
||||
Event(
|
||||
type="gitea.pull_request",
|
||||
source="gitea",
|
||||
data={"actor": "kimi", "pr_number": 99, "action": "closed", "merged": False},
|
||||
),
|
||||
]
|
||||
result = _aggregate_metrics(events)
|
||||
|
||||
# PR was not merged — should not be in prs_merged
|
||||
assert "kimi" in result
|
||||
assert 99 not in result["kimi"].prs_merged
|
||||
# Also not counted as opened (action != "opened")
|
||||
assert 99 not in result["kimi"].prs_opened
|
||||
# Not touched (only merged PRs add to issues_touched)
|
||||
assert 99 not in result["kimi"].issues_touched
|
||||
|
||||
def test_test_execution_event_aggregation(self):
|
||||
"""test.execution events should populate tests_affected."""
|
||||
events = [
|
||||
Event(
|
||||
type="test.execution",
|
||||
source="ci",
|
||||
data={"actor": "gemini", "test_files": ["tests/test_alpha.py", "tests/test_beta.py"]},
|
||||
),
|
||||
]
|
||||
result = _aggregate_metrics(events)
|
||||
|
||||
assert "gemini" in result
|
||||
assert "tests/test_alpha.py" in result["gemini"].tests_affected
|
||||
assert "tests/test_beta.py" in result["gemini"].tests_affected
|
||||
|
||||
def test_untracked_agent_with_agent_id_field_included(self):
|
||||
"""An untracked actor that carries agent_id in data should be included."""
|
||||
events = [
|
||||
Event(
|
||||
type="agent.task.completed",
|
||||
source="system",
|
||||
data={"agent_id": "kimi", "tests_affected": [], "token_reward": 5},
|
||||
),
|
||||
]
|
||||
result = _aggregate_metrics(events)
|
||||
|
||||
# kimi is tracked and agent_id is present in data
|
||||
assert "kimi" in result
|
||||
assert result["kimi"].tokens_earned == 5
|
||||
|
||||
def test_untracked_actor_without_agent_id_excluded(self):
|
||||
"""Actor that is not tracked and has no agent_id in data is skipped."""
|
||||
events = [
|
||||
Event(
|
||||
type="gitea.push",
|
||||
source="gitea",
|
||||
data={"actor": "anon-bot", "num_commits": 10},
|
||||
),
|
||||
]
|
||||
result = _aggregate_metrics(events)
|
||||
|
||||
assert "anon-bot" not in result
|
||||
|
||||
def test_issue_opened_with_no_issue_number_ignored(self):
|
||||
"""Issue opened event with issue_number=0 should not add to issues_touched."""
|
||||
events = [
|
||||
Event(
|
||||
type="gitea.issue.opened",
|
||||
source="gitea",
|
||||
data={"actor": "hermes", "issue_number": 0},
|
||||
),
|
||||
]
|
||||
result = _aggregate_metrics(events)
|
||||
|
||||
assert "hermes" in result
|
||||
assert len(result["hermes"].issues_touched) == 0
|
||||
|
||||
def test_comment_with_no_issue_number_still_increments_counter(self):
|
||||
"""Comment event with issue_number=0 increments comment count but not issues_touched."""
|
||||
events = [
|
||||
Event(
|
||||
type="gitea.issue.comment",
|
||||
source="gitea",
|
||||
data={"actor": "manus", "issue_number": 0},
|
||||
),
|
||||
]
|
||||
result = _aggregate_metrics(events)
|
||||
|
||||
assert "manus" in result
|
||||
assert result["manus"].comments == 1
|
||||
assert len(result["manus"].issues_touched) == 0
|
||||
|
||||
def test_task_completion_no_tests_affected(self):
|
||||
"""Task completion with empty tests_affected list should work fine."""
|
||||
events = [
|
||||
Event(
|
||||
type="agent.task.completed",
|
||||
source="system",
|
||||
data={"agent_id": "claude", "tests_affected": [], "token_reward": 20},
|
||||
),
|
||||
]
|
||||
result = _aggregate_metrics(events)
|
||||
|
||||
assert "claude" in result
|
||||
assert len(result["claude"].tests_affected) == 0
|
||||
assert result["claude"].tokens_earned == 20
|
||||
|
||||
def test_multiple_agents_independent_metrics(self):
|
||||
"""Events from multiple agents are tracked independently."""
|
||||
events = [
|
||||
Event(type="gitea.push", source="gitea", data={"actor": "claude", "num_commits": 3}),
|
||||
Event(type="gitea.push", source="gitea", data={"actor": "gemini", "num_commits": 7}),
|
||||
]
|
||||
result = _aggregate_metrics(events)
|
||||
|
||||
assert result["claude"].commits == 3
|
||||
assert result["gemini"].commits == 7
|
||||
|
||||
def test_pr_with_no_pr_number_not_recorded(self):
|
||||
"""PR event with pr_number=0 should not add to prs_opened."""
|
||||
events = [
|
||||
Event(
|
||||
type="gitea.pull_request",
|
||||
source="gitea",
|
||||
data={"actor": "kimi", "pr_number": 0, "action": "opened"},
|
||||
),
|
||||
]
|
||||
result = _aggregate_metrics(events)
|
||||
|
||||
assert "kimi" in result
|
||||
assert len(result["kimi"].prs_opened) == 0
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _detect_patterns — boundary conditions
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestDetectPatternsBoundaries:
|
||||
"""Boundary conditions for _detect_patterns."""
|
||||
|
||||
def test_no_patterns_with_empty_metrics(self):
|
||||
"""Empty metrics should not trigger any patterns."""
|
||||
metrics = AgentMetrics(agent_id="kimi")
|
||||
patterns = _detect_patterns(metrics)
|
||||
|
||||
assert patterns == []
|
||||
|
||||
def test_merge_rate_requires_three_or_more_prs(self):
|
||||
"""Merge rate pattern requires at least 3 PRs opened."""
|
||||
metrics = AgentMetrics(
|
||||
agent_id="kimi",
|
||||
prs_opened={1, 2},
|
||||
prs_merged={1, 2}, # 100% rate but only 2 PRs
|
||||
)
|
||||
patterns = _detect_patterns(metrics)
|
||||
|
||||
# Should NOT trigger high-merge-rate pattern (< 3 PRs)
|
||||
assert not any("High merge rate" in p for p in patterns)
|
||||
assert not any("low merge rate" in p for p in patterns)
|
||||
|
||||
def test_merge_rate_exactly_3_prs_triggers_pattern(self):
|
||||
"""Exactly 3 PRs opened triggers merge rate evaluation."""
|
||||
metrics = AgentMetrics(
|
||||
agent_id="kimi",
|
||||
prs_opened={1, 2, 3},
|
||||
prs_merged={1, 2, 3}, # 100% rate, 3 PRs
|
||||
)
|
||||
patterns = _detect_patterns(metrics)
|
||||
|
||||
assert any("High merge rate" in p for p in patterns)
|
||||
|
||||
def test_merge_rate_80_percent_is_high(self):
|
||||
"""Exactly 80% merge rate triggers high merge rate pattern."""
|
||||
metrics = AgentMetrics(
|
||||
agent_id="kimi",
|
||||
prs_opened={1, 2, 3, 4, 5},
|
||||
prs_merged={1, 2, 3, 4}, # 80%
|
||||
)
|
||||
patterns = _detect_patterns(metrics)
|
||||
|
||||
assert any("High merge rate" in p for p in patterns)
|
||||
|
||||
def test_merge_rate_below_80_not_high(self):
|
||||
"""79% merge rate should NOT trigger high merge rate pattern."""
|
||||
metrics = AgentMetrics(
|
||||
agent_id="kimi",
|
||||
prs_opened={1, 2, 3, 4, 5, 6, 7}, # 7 PRs
|
||||
prs_merged={1, 2, 3, 4, 5}, # ~71.4% — below 80%
|
||||
)
|
||||
patterns = _detect_patterns(metrics)
|
||||
|
||||
assert not any("High merge rate" in p for p in patterns)
|
||||
|
||||
def test_commit_pattern_requires_over_10_commits(self):
|
||||
"""Exactly 10 commits does NOT trigger the high-commit/no-PR pattern."""
|
||||
metrics = AgentMetrics(
|
||||
agent_id="kimi",
|
||||
commits=10,
|
||||
prs_opened=set(),
|
||||
)
|
||||
patterns = _detect_patterns(metrics)
|
||||
|
||||
assert not any("High commit volume" in p for p in patterns)
|
||||
|
||||
def test_commit_pattern_triggered_at_11_commits(self):
|
||||
"""11 commits with no PRs triggers the high-commit pattern."""
|
||||
metrics = AgentMetrics(
|
||||
agent_id="kimi",
|
||||
commits=11,
|
||||
prs_opened=set(),
|
||||
)
|
||||
patterns = _detect_patterns(metrics)
|
||||
|
||||
assert any("High commit volume without PRs" in p for p in patterns)
|
||||
|
||||
def test_token_accumulation_exact_boundary(self):
|
||||
"""Net tokens = 100 does NOT trigger accumulation pattern (must be > 100)."""
|
||||
metrics = AgentMetrics(agent_id="kimi", tokens_earned=100, tokens_spent=0)
|
||||
patterns = _detect_patterns(metrics)
|
||||
|
||||
assert not any("Strong token accumulation" in p for p in patterns)
|
||||
|
||||
def test_token_spend_exact_boundary(self):
|
||||
"""Net tokens = -50 does NOT trigger high spend pattern (must be < -50)."""
|
||||
metrics = AgentMetrics(agent_id="kimi", tokens_earned=0, tokens_spent=50)
|
||||
patterns = _detect_patterns(metrics)
|
||||
|
||||
assert not any("High token spend" in p for p in patterns)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _generate_narrative_bullets — singular/plural
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestGenerateNarrativeSingularPlural:
|
||||
"""Test singular and plural forms in narrative bullets."""
|
||||
|
||||
def test_singular_commit(self):
|
||||
"""One commit should use singular form."""
|
||||
metrics = AgentMetrics(agent_id="kimi", commits=1)
|
||||
bullets = _generate_narrative_bullets(metrics, PeriodType.daily)
|
||||
|
||||
activity = next((b for b in bullets if "Active across" in b), None)
|
||||
assert activity is not None
|
||||
assert "1 commit" in activity
|
||||
assert "1 commits" not in activity
|
||||
|
||||
def test_singular_pr_opened(self):
|
||||
"""One opened PR should use singular form."""
|
||||
metrics = AgentMetrics(agent_id="kimi", prs_opened={1})
|
||||
bullets = _generate_narrative_bullets(metrics, PeriodType.daily)
|
||||
|
||||
activity = next((b for b in bullets if "Active across" in b), None)
|
||||
assert activity is not None
|
||||
assert "1 PR opened" in activity
|
||||
|
||||
def test_singular_pr_merged(self):
|
||||
"""One merged PR should use singular form."""
|
||||
metrics = AgentMetrics(agent_id="kimi", prs_merged={1})
|
||||
bullets = _generate_narrative_bullets(metrics, PeriodType.daily)
|
||||
|
||||
activity = next((b for b in bullets if "Active across" in b), None)
|
||||
assert activity is not None
|
||||
assert "1 PR merged" in activity
|
||||
|
||||
def test_singular_issue_touched(self):
|
||||
"""One issue touched should use singular form."""
|
||||
metrics = AgentMetrics(agent_id="kimi", issues_touched={42})
|
||||
bullets = _generate_narrative_bullets(metrics, PeriodType.daily)
|
||||
|
||||
activity = next((b for b in bullets if "Active across" in b), None)
|
||||
assert activity is not None
|
||||
assert "1 issue touched" in activity
|
||||
|
||||
def test_singular_comment(self):
|
||||
"""One comment should use singular form."""
|
||||
metrics = AgentMetrics(agent_id="kimi", comments=1)
|
||||
bullets = _generate_narrative_bullets(metrics, PeriodType.daily)
|
||||
|
||||
activity = next((b for b in bullets if "Active across" in b), None)
|
||||
assert activity is not None
|
||||
assert "1 comment" in activity
|
||||
|
||||
def test_singular_test_file(self):
|
||||
"""One test file should use singular form."""
|
||||
metrics = AgentMetrics(agent_id="kimi", tests_affected={"test_foo.py"})
|
||||
bullets = _generate_narrative_bullets(metrics, PeriodType.daily)
|
||||
|
||||
assert any("1 test file." in b for b in bullets)
|
||||
|
||||
def test_weekly_period_label(self):
|
||||
"""Weekly period uses 'week' label in no-activity message."""
|
||||
metrics = AgentMetrics(agent_id="kimi")
|
||||
bullets = _generate_narrative_bullets(metrics, PeriodType.weekly)
|
||||
|
||||
assert any("this week" in b for b in bullets)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# generate_scorecard — token augmentation (max logic)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestGenerateScorecardTokenAugmentation:
|
||||
"""Test the max() token augmentation logic in generate_scorecard."""
|
||||
|
||||
def test_event_tokens_win_over_ledger_when_higher(self):
|
||||
"""When event tokens > ledger tokens, event tokens are preserved."""
|
||||
events = [
|
||||
Event(
|
||||
type="agent.task.completed",
|
||||
source="system",
|
||||
data={"agent_id": "kimi", "tests_affected": [], "token_reward": 200},
|
||||
),
|
||||
]
|
||||
with patch(
|
||||
"dashboard.services.scorecard_service._collect_events_for_period",
|
||||
return_value=events,
|
||||
):
|
||||
with patch(
|
||||
"dashboard.services.scorecard_service._query_token_transactions",
|
||||
return_value=(50, 0), # ledger says 50 earned
|
||||
):
|
||||
scorecard = generate_scorecard("kimi", PeriodType.daily)
|
||||
|
||||
# max(200, 50) = 200 should win
|
||||
assert scorecard.metrics.tokens_earned == 200
|
||||
|
||||
def test_ledger_tokens_win_when_higher(self):
|
||||
"""When ledger tokens > event tokens, ledger tokens are used."""
|
||||
events = [
|
||||
Event(
|
||||
type="agent.task.completed",
|
||||
source="system",
|
||||
data={"agent_id": "kimi", "tests_affected": [], "token_reward": 10},
|
||||
),
|
||||
]
|
||||
with patch(
|
||||
"dashboard.services.scorecard_service._collect_events_for_period",
|
||||
return_value=events,
|
||||
):
|
||||
with patch(
|
||||
"dashboard.services.scorecard_service._query_token_transactions",
|
||||
return_value=(500, 100), # ledger says 500 earned, 100 spent
|
||||
):
|
||||
scorecard = generate_scorecard("kimi", PeriodType.daily)
|
||||
|
||||
# max(10, 500) = 500
|
||||
assert scorecard.metrics.tokens_earned == 500
|
||||
# max(0, 100) = 100
|
||||
assert scorecard.metrics.tokens_spent == 100
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# ScorecardSummary.to_dict — timestamp format and tests_affected
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestScorecardSummaryToDict:
|
||||
"""Additional to_dict tests."""
|
||||
|
||||
def test_timestamps_are_iso_strings(self):
|
||||
"""period_start and period_end should be ISO format strings."""
|
||||
start = datetime(2026, 3, 20, 0, 0, 0, tzinfo=UTC)
|
||||
end = datetime(2026, 3, 21, 0, 0, 0, tzinfo=UTC)
|
||||
summary = ScorecardSummary(
|
||||
agent_id="kimi",
|
||||
period_type=PeriodType.daily,
|
||||
period_start=start,
|
||||
period_end=end,
|
||||
metrics=AgentMetrics(agent_id="kimi"),
|
||||
)
|
||||
data = summary.to_dict()
|
||||
|
||||
assert data["period_start"] == start.isoformat()
|
||||
assert data["period_end"] == end.isoformat()
|
||||
|
||||
def test_tests_affected_count_in_dict(self):
|
||||
"""to_dict metrics.tests_affected should be a count (int)."""
|
||||
metrics = AgentMetrics(
|
||||
agent_id="kimi",
|
||||
tests_affected={"test_a.py", "test_b.py", "test_c.py"},
|
||||
)
|
||||
summary = ScorecardSummary(
|
||||
agent_id="kimi",
|
||||
period_type=PeriodType.daily,
|
||||
period_start=datetime.now(UTC),
|
||||
period_end=datetime.now(UTC),
|
||||
metrics=metrics,
|
||||
)
|
||||
data = summary.to_dict()
|
||||
|
||||
assert data["metrics"]["tests_affected"] == 3
|
||||
|
||||
def test_empty_narrative_and_patterns(self):
|
||||
"""to_dict with default empty lists should serialize correctly."""
|
||||
summary = ScorecardSummary(
|
||||
agent_id="claude",
|
||||
period_type=PeriodType.weekly,
|
||||
period_start=datetime.now(UTC),
|
||||
period_end=datetime.now(UTC),
|
||||
metrics=AgentMetrics(agent_id="claude"),
|
||||
)
|
||||
data = summary.to_dict()
|
||||
|
||||
assert data["narrative_bullets"] == []
|
||||
assert data["patterns"] == []
|
||||
assert data["period_type"] == "weekly"
|
||||
Reference in New Issue
Block a user