forked from Rockachopa/Timmy-time-dashboard
Compare commits
1 Commits
kimi/issue
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 6dd48685e7 |
@@ -330,6 +330,13 @@ class Settings(BaseSettings):
|
|||||||
autoresearch_max_iterations: int = 100
|
autoresearch_max_iterations: int = 100
|
||||||
autoresearch_metric: str = "val_bpb" # metric to optimise (lower = better)
|
autoresearch_metric: str = "val_bpb" # metric to optimise (lower = better)
|
||||||
|
|
||||||
|
# ── Weekly Narrative Summary ───────────────────────────────────────
|
||||||
|
# Generates a human-readable weekly summary of development activity.
|
||||||
|
# Disabling this will stop the weekly narrative generation.
|
||||||
|
weekly_narrative_enabled: bool = True
|
||||||
|
weekly_narrative_lookback_days: int = 7
|
||||||
|
weekly_narrative_output_dir: str = ".loop"
|
||||||
|
|
||||||
# ── Local Hands (Shell + Git) ──────────────────────────────────────
|
# ── Local Hands (Shell + Git) ──────────────────────────────────────
|
||||||
# Enable local shell/git execution hands.
|
# Enable local shell/git execution hands.
|
||||||
hands_shell_enabled: bool = True
|
hands_shell_enabled: bool = True
|
||||||
|
|||||||
343
tests/timmy_automations/test_weekly_narrative.py
Normal file
343
tests/timmy_automations/test_weekly_narrative.py
Normal file
@@ -0,0 +1,343 @@
|
|||||||
|
"""Tests for weekly_narrative.py script."""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import sys
|
||||||
|
from datetime import UTC, datetime, timedelta
|
||||||
|
from pathlib import Path
|
||||||
|
from unittest.mock import MagicMock, patch
|
||||||
|
|
||||||
|
# Add timmy_automations to path for imports
|
||||||
|
sys.path.insert(
|
||||||
|
0, str(Path(__file__).resolve().parent.parent.parent / "timmy_automations" / "daily_run")
|
||||||
|
)
|
||||||
|
|
||||||
|
import weekly_narrative as wn
|
||||||
|
|
||||||
|
|
||||||
|
class TestParseTimestamp:
|
||||||
|
"""Test timestamp parsing."""
|
||||||
|
|
||||||
|
def test_parse_iso_with_z(self):
|
||||||
|
"""Parse ISO timestamp with Z suffix."""
|
||||||
|
result = wn.parse_ts("2026-03-21T12:00:00Z")
|
||||||
|
assert result is not None
|
||||||
|
assert result.year == 2026
|
||||||
|
assert result.month == 3
|
||||||
|
assert result.day == 21
|
||||||
|
|
||||||
|
def test_parse_iso_with_offset(self):
|
||||||
|
"""Parse ISO timestamp with timezone offset."""
|
||||||
|
result = wn.parse_ts("2026-03-21T12:00:00+00:00")
|
||||||
|
assert result is not None
|
||||||
|
assert result.year == 2026
|
||||||
|
|
||||||
|
def test_parse_empty_string(self):
|
||||||
|
"""Empty string returns None."""
|
||||||
|
result = wn.parse_ts("")
|
||||||
|
assert result is None
|
||||||
|
|
||||||
|
def test_parse_invalid_string(self):
|
||||||
|
"""Invalid string returns None."""
|
||||||
|
result = wn.parse_ts("not-a-timestamp")
|
||||||
|
assert result is None
|
||||||
|
|
||||||
|
|
||||||
|
class TestCollectCyclesData:
|
||||||
|
"""Test cycle data collection."""
|
||||||
|
|
||||||
|
def test_no_cycles_file(self, tmp_path):
|
||||||
|
"""Handle missing cycles file gracefully."""
|
||||||
|
with patch.object(wn, "REPO_ROOT", tmp_path):
|
||||||
|
since = datetime.now(UTC) - timedelta(days=7)
|
||||||
|
result = wn.collect_cycles_data(since)
|
||||||
|
assert result["total"] == 0
|
||||||
|
assert result["successes"] == 0
|
||||||
|
assert result["failures"] == 0
|
||||||
|
|
||||||
|
def test_collect_recent_cycles(self, tmp_path):
|
||||||
|
"""Collect cycles within lookback period."""
|
||||||
|
retro_dir = tmp_path / ".loop" / "retro"
|
||||||
|
retro_dir.mkdir(parents=True)
|
||||||
|
|
||||||
|
now = datetime.now(UTC)
|
||||||
|
cycles = [
|
||||||
|
{"timestamp": now.isoformat(), "success": True, "cycle": 1},
|
||||||
|
{"timestamp": now.isoformat(), "success": False, "cycle": 2},
|
||||||
|
{"timestamp": (now - timedelta(days=10)).isoformat(), "success": True, "cycle": 3},
|
||||||
|
]
|
||||||
|
|
||||||
|
with open(retro_dir / "cycles.jsonl", "w") as f:
|
||||||
|
for c in cycles:
|
||||||
|
f.write(json.dumps(c) + "\n")
|
||||||
|
|
||||||
|
with patch.object(wn, "REPO_ROOT", tmp_path):
|
||||||
|
since = now - timedelta(days=7)
|
||||||
|
result = wn.collect_cycles_data(since)
|
||||||
|
assert result["total"] == 2 # Only recent 2
|
||||||
|
assert result["successes"] == 1
|
||||||
|
assert result["failures"] == 1
|
||||||
|
|
||||||
|
|
||||||
|
class TestExtractThemes:
|
||||||
|
"""Test theme extraction from issues."""
|
||||||
|
|
||||||
|
def test_extract_layer_labels(self):
|
||||||
|
"""Extract layer labels from issues."""
|
||||||
|
issues = [
|
||||||
|
{"labels": [{"name": "layer:triage"}, {"name": "bug"}]},
|
||||||
|
{"labels": [{"name": "layer:tests"}, {"name": "bug"}]},
|
||||||
|
{"labels": [{"name": "layer:triage"}, {"name": "feature"}]},
|
||||||
|
]
|
||||||
|
|
||||||
|
result = wn.extract_themes(issues)
|
||||||
|
|
||||||
|
assert len(result["layers"]) == 2
|
||||||
|
layer_names = {layer["name"] for layer in result["layers"]}
|
||||||
|
assert "triage" in layer_names
|
||||||
|
assert "tests" in layer_names
|
||||||
|
|
||||||
|
def test_extract_type_labels(self):
|
||||||
|
"""Extract type labels (bug/feature/etc)."""
|
||||||
|
issues = [
|
||||||
|
{"labels": [{"name": "bug"}]},
|
||||||
|
{"labels": [{"name": "feature"}]},
|
||||||
|
{"labels": [{"name": "bug"}]},
|
||||||
|
]
|
||||||
|
|
||||||
|
result = wn.extract_themes(issues)
|
||||||
|
|
||||||
|
type_names = {t_type["name"] for t_type in result["types"]}
|
||||||
|
assert "bug" in type_names
|
||||||
|
assert "feature" in type_names
|
||||||
|
|
||||||
|
def test_empty_issues(self):
|
||||||
|
"""Handle empty issue list."""
|
||||||
|
result = wn.extract_themes([])
|
||||||
|
assert result["layers"] == []
|
||||||
|
assert result["types"] == []
|
||||||
|
assert result["top_labels"] == []
|
||||||
|
|
||||||
|
|
||||||
|
class TestExtractAgentContributions:
|
||||||
|
"""Test agent contribution extraction."""
|
||||||
|
|
||||||
|
def test_extract_assignees(self):
|
||||||
|
"""Extract assignee counts."""
|
||||||
|
issues = [
|
||||||
|
{"assignee": {"login": "kimi"}},
|
||||||
|
{"assignee": {"login": "hermes"}},
|
||||||
|
{"assignee": {"login": "kimi"}},
|
||||||
|
]
|
||||||
|
|
||||||
|
result = wn.extract_agent_contributions(issues, [], [])
|
||||||
|
|
||||||
|
assert len(result["active_assignees"]) == 2
|
||||||
|
assignee_logins = {a["login"] for a in result["active_assignees"]} # noqa: E741
|
||||||
|
assert "kimi" in assignee_logins
|
||||||
|
assert "hermes" in assignee_logins
|
||||||
|
|
||||||
|
def test_extract_pr_authors(self):
|
||||||
|
"""Extract PR author counts."""
|
||||||
|
prs = [
|
||||||
|
{"user": {"login": "kimi"}},
|
||||||
|
{"user": {"login": "claude"}},
|
||||||
|
{"user": {"login": "kimi"}},
|
||||||
|
]
|
||||||
|
|
||||||
|
result = wn.extract_agent_contributions([], prs, [])
|
||||||
|
|
||||||
|
assert len(result["pr_authors"]) == 2
|
||||||
|
|
||||||
|
def test_kimi_mentions_in_cycles(self):
|
||||||
|
"""Count Kimi mentions in cycle notes."""
|
||||||
|
cycles = [
|
||||||
|
{"notes": "Kimi did great work", "reason": ""},
|
||||||
|
{"notes": "", "reason": "Kimi timeout"},
|
||||||
|
{"notes": "All good", "reason": ""},
|
||||||
|
]
|
||||||
|
|
||||||
|
result = wn.extract_agent_contributions([], [], cycles)
|
||||||
|
assert result["kimi_mentioned_cycles"] == 2
|
||||||
|
|
||||||
|
|
||||||
|
class TestAnalyzeTestShifts:
|
||||||
|
"""Test test pattern analysis."""
|
||||||
|
|
||||||
|
def test_no_cycles(self):
|
||||||
|
"""Handle no cycle data."""
|
||||||
|
result = wn.analyze_test_shifts([])
|
||||||
|
assert "note" in result
|
||||||
|
|
||||||
|
def test_test_metrics(self):
|
||||||
|
"""Calculate test metrics from cycles."""
|
||||||
|
cycles = [
|
||||||
|
{"tests_passed": 100, "tests_added": 5},
|
||||||
|
{"tests_passed": 150, "tests_added": 3},
|
||||||
|
]
|
||||||
|
|
||||||
|
result = wn.analyze_test_shifts(cycles)
|
||||||
|
|
||||||
|
assert result["total_tests_passed"] == 250
|
||||||
|
assert result["total_tests_added"] == 8
|
||||||
|
|
||||||
|
|
||||||
|
class TestGenerateVibeSummary:
|
||||||
|
"""Test vibe summary generation."""
|
||||||
|
|
||||||
|
def test_productive_vibe(self):
|
||||||
|
"""High success rate and activity = productive vibe."""
|
||||||
|
cycles_data = {"success_rate": 0.95, "successes": 10, "failures": 1}
|
||||||
|
issues_data = {"closed_count": 5}
|
||||||
|
|
||||||
|
result = wn.generate_vibe_summary(cycles_data, issues_data, {}, {"layers": []}, {}, {}, {})
|
||||||
|
|
||||||
|
assert result["overall"] == "productive"
|
||||||
|
assert "strong week" in result["description"].lower()
|
||||||
|
|
||||||
|
def test_struggling_vibe(self):
|
||||||
|
"""More failures than successes = struggling vibe."""
|
||||||
|
cycles_data = {"success_rate": 0.3, "successes": 3, "failures": 7}
|
||||||
|
issues_data = {"closed_count": 0}
|
||||||
|
|
||||||
|
result = wn.generate_vibe_summary(cycles_data, issues_data, {}, {"layers": []}, {}, {}, {})
|
||||||
|
|
||||||
|
assert result["overall"] == "struggling"
|
||||||
|
|
||||||
|
def test_quiet_vibe(self):
|
||||||
|
"""Low activity = quiet vibe."""
|
||||||
|
cycles_data = {"success_rate": 0.0, "successes": 0, "failures": 0}
|
||||||
|
issues_data = {"closed_count": 0}
|
||||||
|
|
||||||
|
result = wn.generate_vibe_summary(cycles_data, issues_data, {}, {"layers": []}, {}, {}, {})
|
||||||
|
|
||||||
|
assert result["overall"] == "quiet"
|
||||||
|
|
||||||
|
|
||||||
|
class TestGenerateMarkdownSummary:
|
||||||
|
"""Test markdown summary generation."""
|
||||||
|
|
||||||
|
def test_includes_header(self):
|
||||||
|
"""Markdown includes header."""
|
||||||
|
narrative = {
|
||||||
|
"period": {"start": "2026-03-14T00:00:00", "end": "2026-03-21T00:00:00"},
|
||||||
|
"vibe": {"overall": "productive", "description": "Good week"},
|
||||||
|
"activity": {
|
||||||
|
"cycles": {"total": 10, "successes": 9, "failures": 1},
|
||||||
|
"issues": {"closed": 5, "opened": 3},
|
||||||
|
"pull_requests": {"merged": 4, "opened": 2},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result = wn.generate_markdown_summary(narrative)
|
||||||
|
|
||||||
|
assert "# Weekly Narrative Summary" in result
|
||||||
|
assert "productive" in result.lower()
|
||||||
|
assert "10 total" in result or "10" in result
|
||||||
|
|
||||||
|
def test_includes_focus_areas(self):
|
||||||
|
"""Markdown includes focus areas when present."""
|
||||||
|
narrative = {
|
||||||
|
"period": {"start": "2026-03-14", "end": "2026-03-21"},
|
||||||
|
"vibe": {
|
||||||
|
"overall": "productive",
|
||||||
|
"description": "Good week",
|
||||||
|
"focus_areas": ["triage (5 items)", "tests (3 items)"],
|
||||||
|
},
|
||||||
|
"activity": {
|
||||||
|
"cycles": {"total": 0, "successes": 0, "failures": 0},
|
||||||
|
"issues": {"closed": 0, "opened": 0},
|
||||||
|
"pull_requests": {"merged": 0, "opened": 0},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result = wn.generate_markdown_summary(narrative)
|
||||||
|
|
||||||
|
assert "Focus Areas" in result
|
||||||
|
assert "triage" in result
|
||||||
|
|
||||||
|
|
||||||
|
class TestConfigLoading:
|
||||||
|
"""Test configuration loading."""
|
||||||
|
|
||||||
|
def test_default_config(self, tmp_path):
|
||||||
|
"""Default config when manifest missing."""
|
||||||
|
with patch.object(wn, "CONFIG_PATH", tmp_path / "nonexistent.json"):
|
||||||
|
config = wn.load_automation_config()
|
||||||
|
assert config["lookback_days"] == 7
|
||||||
|
assert config["enabled"] is True
|
||||||
|
|
||||||
|
def test_environment_override(self, tmp_path):
|
||||||
|
"""Environment variables override config."""
|
||||||
|
with patch.dict("os.environ", {"TIMMY_WEEKLY_NARRATIVE_ENABLED": "false"}):
|
||||||
|
with patch.object(wn, "CONFIG_PATH", tmp_path / "nonexistent.json"):
|
||||||
|
config = wn.load_automation_config()
|
||||||
|
assert config["enabled"] is False
|
||||||
|
|
||||||
|
|
||||||
|
class TestMain:
|
||||||
|
"""Test main function."""
|
||||||
|
|
||||||
|
def test_disabled_exits_cleanly(self, tmp_path):
|
||||||
|
"""When disabled and no --force, exits cleanly."""
|
||||||
|
with patch.object(wn, "REPO_ROOT", tmp_path):
|
||||||
|
with patch.object(wn, "load_automation_config", return_value={"enabled": False}):
|
||||||
|
with patch("sys.argv", ["weekly_narrative"]):
|
||||||
|
result = wn.main()
|
||||||
|
assert result == 0
|
||||||
|
|
||||||
|
def test_force_runs_when_disabled(self, tmp_path):
|
||||||
|
"""--force runs even when disabled."""
|
||||||
|
# Setup minimal structure
|
||||||
|
(tmp_path / ".loop" / "retro").mkdir(parents=True)
|
||||||
|
|
||||||
|
with patch.object(wn, "REPO_ROOT", tmp_path):
|
||||||
|
with patch.object(
|
||||||
|
wn,
|
||||||
|
"load_automation_config",
|
||||||
|
return_value={
|
||||||
|
"enabled": False,
|
||||||
|
"lookback_days": 7,
|
||||||
|
"gitea_api": "http://localhost:3000/api/v1",
|
||||||
|
"repo_slug": "test/repo",
|
||||||
|
"token_file": "~/.hermes/gitea_token",
|
||||||
|
},
|
||||||
|
):
|
||||||
|
with patch.object(wn, "GiteaClient") as mock_client:
|
||||||
|
mock_instance = MagicMock()
|
||||||
|
mock_instance.is_available.return_value = False
|
||||||
|
mock_client.return_value = mock_instance
|
||||||
|
|
||||||
|
with patch("sys.argv", ["weekly_narrative", "--force"]):
|
||||||
|
result = wn.main()
|
||||||
|
# Should complete without error even though Gitea unavailable
|
||||||
|
assert result == 0
|
||||||
|
|
||||||
|
|
||||||
|
class TestGiteaClient:
|
||||||
|
"""Test Gitea API client."""
|
||||||
|
|
||||||
|
def test_is_available_when_unavailable(self):
|
||||||
|
"""is_available returns False when server down."""
|
||||||
|
config = {"gitea_api": "http://localhost:99999", "repo_slug": "test/repo"}
|
||||||
|
client = wn.GiteaClient(config, None)
|
||||||
|
|
||||||
|
# Should return False without raising
|
||||||
|
assert client.is_available() is False
|
||||||
|
|
||||||
|
def test_headers_with_token(self):
|
||||||
|
"""Headers include Authorization when token provided."""
|
||||||
|
config = {"gitea_api": "http://localhost:3000", "repo_slug": "test/repo"}
|
||||||
|
client = wn.GiteaClient(config, "test-token")
|
||||||
|
|
||||||
|
headers = client._headers()
|
||||||
|
assert headers["Authorization"] == "token test-token"
|
||||||
|
|
||||||
|
def test_headers_without_token(self):
|
||||||
|
"""Headers don't include Authorization when no token."""
|
||||||
|
config = {"gitea_api": "http://localhost:3000", "repo_slug": "test/repo"}
|
||||||
|
client = wn.GiteaClient(config, None)
|
||||||
|
|
||||||
|
headers = client._headers()
|
||||||
|
assert "Authorization" not in headers
|
||||||
@@ -228,6 +228,27 @@
|
|||||||
"max_items": 5
|
"max_items": 5
|
||||||
},
|
},
|
||||||
"outputs": []
|
"outputs": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "weekly_narrative",
|
||||||
|
"name": "Weekly Narrative Summary",
|
||||||
|
"description": "Generates a human-readable weekly summary of work themes, agent contributions, and token economy shifts",
|
||||||
|
"script": "timmy_automations/daily_run/weekly_narrative.py",
|
||||||
|
"category": "daily_run",
|
||||||
|
"enabled": true,
|
||||||
|
"trigger": "scheduled",
|
||||||
|
"schedule": "weekly",
|
||||||
|
"executable": "python3",
|
||||||
|
"config": {
|
||||||
|
"lookback_days": 7,
|
||||||
|
"output_file": ".loop/weekly_narrative.json",
|
||||||
|
"gitea_api": "http://localhost:3000/api/v1",
|
||||||
|
"repo_slug": "rockachopa/Timmy-time-dashboard"
|
||||||
|
},
|
||||||
|
"outputs": [
|
||||||
|
".loop/weekly_narrative.json",
|
||||||
|
".loop/weekly_narrative.md"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,6 +17,10 @@
|
|||||||
"manual": {
|
"manual": {
|
||||||
"description": "Run on-demand only",
|
"description": "Run on-demand only",
|
||||||
"automations": ["agent_workspace", "kimi_bootstrap", "kimi_resume", "backfill_retro"]
|
"automations": ["agent_workspace", "kimi_bootstrap", "kimi_resume", "backfill_retro"]
|
||||||
|
},
|
||||||
|
"weekly": {
|
||||||
|
"description": "Run once per week (Sundays)",
|
||||||
|
"automations": ["weekly_narrative"]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"triggers": {
|
"triggers": {
|
||||||
|
|||||||
745
timmy_automations/daily_run/weekly_narrative.py
Normal file
745
timmy_automations/daily_run/weekly_narrative.py
Normal file
@@ -0,0 +1,745 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""Weekly narrative summary generator — human-readable loop analysis.
|
||||||
|
|
||||||
|
Analyzes the past week's activity across the development loop to produce
|
||||||
|
a narrative summary of:
|
||||||
|
- What changed (themes, areas of focus)
|
||||||
|
- How agents and Timmy contributed
|
||||||
|
- Any shifts in tests, triage, or token economy
|
||||||
|
|
||||||
|
The output is designed to be skimmable — a quick read that gives context
|
||||||
|
on the week's progress without drowning in metrics.
|
||||||
|
|
||||||
|
Run: python3 timmy_automations/daily_run/weekly_narrative.py [--json]
|
||||||
|
Env: See timmy_automations/config/automations.json for configuration
|
||||||
|
|
||||||
|
Refs: #719
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
from collections import Counter
|
||||||
|
from datetime import UTC, datetime, timedelta
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any
|
||||||
|
from urllib.error import HTTPError, URLError
|
||||||
|
from urllib.request import Request, urlopen
|
||||||
|
|
||||||
|
# ── Configuration ─────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
REPO_ROOT = Path(__file__).resolve().parent.parent.parent
|
||||||
|
CONFIG_PATH = Path(__file__).parent.parent / "config" / "automations.json"
|
||||||
|
|
||||||
|
DEFAULT_CONFIG = {
|
||||||
|
"gitea_api": "http://localhost:3000/api/v1",
|
||||||
|
"repo_slug": "rockachopa/Timmy-time-dashboard",
|
||||||
|
"token_file": "~/.hermes/gitea_token",
|
||||||
|
"lookback_days": 7,
|
||||||
|
"output_file": ".loop/weekly_narrative.json",
|
||||||
|
"enabled": True,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ── Data Loading ───────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
|
||||||
|
def load_automation_config() -> dict:
|
||||||
|
"""Load configuration for weekly_narrative from automations manifest."""
|
||||||
|
config = DEFAULT_CONFIG.copy()
|
||||||
|
if CONFIG_PATH.exists():
|
||||||
|
try:
|
||||||
|
manifest = json.loads(CONFIG_PATH.read_text())
|
||||||
|
for auto in manifest.get("automations", []):
|
||||||
|
if auto.get("id") == "weekly_narrative":
|
||||||
|
config.update(auto.get("config", {}))
|
||||||
|
config["enabled"] = auto.get("enabled", True)
|
||||||
|
break
|
||||||
|
except (json.JSONDecodeError, OSError) as exc:
|
||||||
|
print(f"[weekly_narrative] Warning: Could not load config: {exc}", file=sys.stderr)
|
||||||
|
|
||||||
|
# Environment variable overrides
|
||||||
|
if os.environ.get("TIMMY_GITEA_API"):
|
||||||
|
config["gitea_api"] = os.environ.get("TIMMY_GITEA_API")
|
||||||
|
if os.environ.get("TIMMY_REPO_SLUG"):
|
||||||
|
config["repo_slug"] = os.environ.get("TIMMY_REPO_SLUG")
|
||||||
|
if os.environ.get("TIMMY_GITEA_TOKEN"):
|
||||||
|
config["token"] = os.environ.get("TIMMY_GITEA_TOKEN")
|
||||||
|
if os.environ.get("TIMMY_WEEKLY_NARRATIVE_ENABLED"):
|
||||||
|
config["enabled"] = os.environ.get("TIMMY_WEEKLY_NARRATIVE_ENABLED", "true").lower() == "true"
|
||||||
|
|
||||||
|
return config
|
||||||
|
|
||||||
|
|
||||||
|
def get_token(config: dict) -> str | None:
|
||||||
|
"""Get Gitea token from environment or file."""
|
||||||
|
if "token" in config:
|
||||||
|
return config["token"]
|
||||||
|
|
||||||
|
token_file = Path(config["token_file"]).expanduser()
|
||||||
|
if token_file.exists():
|
||||||
|
return token_file.read_text().strip()
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def load_jsonl(path: Path) -> list[dict]:
|
||||||
|
"""Load a JSONL file, skipping bad lines."""
|
||||||
|
if not path.exists():
|
||||||
|
return []
|
||||||
|
entries = []
|
||||||
|
for line in path.read_text().strip().splitlines():
|
||||||
|
try:
|
||||||
|
entries.append(json.loads(line))
|
||||||
|
except (json.JSONDecodeError, ValueError):
|
||||||
|
continue
|
||||||
|
return entries
|
||||||
|
|
||||||
|
|
||||||
|
def parse_ts(ts_str: str) -> datetime | None:
|
||||||
|
"""Parse an ISO timestamp, tolerating missing tz."""
|
||||||
|
if not ts_str:
|
||||||
|
return None
|
||||||
|
try:
|
||||||
|
dt = datetime.fromisoformat(ts_str.replace("Z", "+00:00"))
|
||||||
|
if dt.tzinfo is None:
|
||||||
|
dt = dt.replace(tzinfo=UTC)
|
||||||
|
return dt
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
# ── Gitea API Client ───────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
|
||||||
|
class GiteaClient:
|
||||||
|
"""Simple Gitea API client with graceful degradation."""
|
||||||
|
|
||||||
|
def __init__(self, config: dict, token: str | None):
|
||||||
|
self.api_base = config["gitea_api"].rstrip("/")
|
||||||
|
self.repo_slug = config["repo_slug"]
|
||||||
|
self.token = token
|
||||||
|
self._available: bool | None = None
|
||||||
|
|
||||||
|
def _headers(self) -> dict:
|
||||||
|
headers = {"Accept": "application/json"}
|
||||||
|
if self.token:
|
||||||
|
headers["Authorization"] = f"token {self.token}"
|
||||||
|
return headers
|
||||||
|
|
||||||
|
def _api_url(self, path: str) -> str:
|
||||||
|
return f"{self.api_base}/repos/{self.repo_slug}/{path}"
|
||||||
|
|
||||||
|
def is_available(self) -> bool:
|
||||||
|
"""Check if Gitea API is reachable."""
|
||||||
|
if self._available is not None:
|
||||||
|
return self._available
|
||||||
|
|
||||||
|
try:
|
||||||
|
req = Request(
|
||||||
|
f"{self.api_base}/version",
|
||||||
|
headers=self._headers(),
|
||||||
|
method="GET",
|
||||||
|
)
|
||||||
|
with urlopen(req, timeout=5) as resp:
|
||||||
|
self._available = resp.status == 200
|
||||||
|
return self._available
|
||||||
|
except (HTTPError, URLError, TimeoutError):
|
||||||
|
self._available = False
|
||||||
|
return False
|
||||||
|
|
||||||
|
def get_paginated(self, path: str, params: dict | None = None) -> list:
|
||||||
|
"""Fetch all pages of a paginated endpoint."""
|
||||||
|
all_items = []
|
||||||
|
page = 1
|
||||||
|
limit = 50
|
||||||
|
|
||||||
|
while True:
|
||||||
|
url = self._api_url(path)
|
||||||
|
query_parts = [f"limit={limit}", f"page={page}"]
|
||||||
|
if params:
|
||||||
|
for key, val in params.items():
|
||||||
|
query_parts.append(f"{key}={val}")
|
||||||
|
url = f"{url}?{'&'.join(query_parts)}"
|
||||||
|
|
||||||
|
req = Request(url, headers=self._headers(), method="GET")
|
||||||
|
with urlopen(req, timeout=15) as resp:
|
||||||
|
batch = json.loads(resp.read())
|
||||||
|
|
||||||
|
if not batch:
|
||||||
|
break
|
||||||
|
|
||||||
|
all_items.extend(batch)
|
||||||
|
if len(batch) < limit:
|
||||||
|
break
|
||||||
|
page += 1
|
||||||
|
|
||||||
|
return all_items
|
||||||
|
|
||||||
|
|
||||||
|
# ── Data Collection ────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
|
||||||
|
def collect_cycles_data(since: datetime) -> dict:
|
||||||
|
"""Load cycle retrospective data from the lookback period."""
|
||||||
|
cycles_file = REPO_ROOT / ".loop" / "retro" / "cycles.jsonl"
|
||||||
|
if not cycles_file.exists():
|
||||||
|
return {"cycles": [], "total": 0, "successes": 0, "failures": 0}
|
||||||
|
|
||||||
|
entries = load_jsonl(cycles_file)
|
||||||
|
recent = []
|
||||||
|
for e in entries:
|
||||||
|
ts = parse_ts(e.get("timestamp", ""))
|
||||||
|
if ts and ts >= since:
|
||||||
|
recent.append(e)
|
||||||
|
|
||||||
|
successes = [e for e in recent if e.get("success")]
|
||||||
|
failures = [e for e in recent if not e.get("success")]
|
||||||
|
|
||||||
|
return {
|
||||||
|
"cycles": recent,
|
||||||
|
"total": len(recent),
|
||||||
|
"successes": len(successes),
|
||||||
|
"failures": len(failures),
|
||||||
|
"success_rate": round(len(successes) / len(recent), 2) if recent else 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def collect_issues_data(client: GiteaClient, since: datetime) -> dict:
|
||||||
|
"""Collect issue activity from Gitea."""
|
||||||
|
if not client.is_available():
|
||||||
|
return {"error": "Gitea unavailable", "issues": [], "closed": [], "opened": []}
|
||||||
|
|
||||||
|
try:
|
||||||
|
issues = client.get_paginated("issues", {"state": "all", "sort": "updated", "limit": 100})
|
||||||
|
except (HTTPError, URLError) as exc:
|
||||||
|
return {"error": str(exc), "issues": [], "closed": [], "opened": []}
|
||||||
|
|
||||||
|
touched = []
|
||||||
|
closed = []
|
||||||
|
opened = []
|
||||||
|
|
||||||
|
for issue in issues:
|
||||||
|
updated_at = issue.get("updated_at", "")
|
||||||
|
created_at = issue.get("created_at", "")
|
||||||
|
|
||||||
|
updated = parse_ts(updated_at)
|
||||||
|
created = parse_ts(created_at)
|
||||||
|
|
||||||
|
if updated and updated >= since:
|
||||||
|
touched.append(issue)
|
||||||
|
|
||||||
|
if issue.get("state") == "closed":
|
||||||
|
closed_at = issue.get("closed_at", "")
|
||||||
|
closed_dt = parse_ts(closed_at)
|
||||||
|
if closed_dt and closed_dt >= since:
|
||||||
|
closed.append(issue)
|
||||||
|
elif created and created >= since:
|
||||||
|
opened.append(issue)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"issues": touched,
|
||||||
|
"closed": closed,
|
||||||
|
"opened": opened,
|
||||||
|
"touched_count": len(touched),
|
||||||
|
"closed_count": len(closed),
|
||||||
|
"opened_count": len(opened),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def collect_prs_data(client: GiteaClient, since: datetime) -> dict:
|
||||||
|
"""Collect PR activity from Gitea."""
|
||||||
|
if not client.is_available():
|
||||||
|
return {"error": "Gitea unavailable", "prs": [], "merged": [], "opened": []}
|
||||||
|
|
||||||
|
try:
|
||||||
|
prs = client.get_paginated("pulls", {"state": "all", "sort": "updated", "limit": 100})
|
||||||
|
except (HTTPError, URLError) as exc:
|
||||||
|
return {"error": str(exc), "prs": [], "merged": [], "opened": []}
|
||||||
|
|
||||||
|
touched = []
|
||||||
|
merged = []
|
||||||
|
opened = []
|
||||||
|
|
||||||
|
for pr in prs:
|
||||||
|
updated_at = pr.get("updated_at", "")
|
||||||
|
created_at = pr.get("created_at", "")
|
||||||
|
merged_at = pr.get("merged_at", "")
|
||||||
|
|
||||||
|
updated = parse_ts(updated_at)
|
||||||
|
created = parse_ts(created_at)
|
||||||
|
merged_dt = parse_ts(merged_at) if merged_at else None
|
||||||
|
|
||||||
|
if updated and updated >= since:
|
||||||
|
touched.append(pr)
|
||||||
|
|
||||||
|
if pr.get("merged") and merged_dt and merged_dt >= since:
|
||||||
|
merged.append(pr)
|
||||||
|
elif created and created >= since:
|
||||||
|
opened.append(pr)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"prs": touched,
|
||||||
|
"merged": merged,
|
||||||
|
"opened": opened,
|
||||||
|
"touched_count": len(touched),
|
||||||
|
"merged_count": len(merged),
|
||||||
|
"opened_count": len(opened),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def collect_triage_data(since: datetime) -> dict:
|
||||||
|
"""Load triage and introspection data."""
|
||||||
|
triage_file = REPO_ROOT / ".loop" / "retro" / "triage.jsonl"
|
||||||
|
insights_file = REPO_ROOT / ".loop" / "retro" / "insights.json"
|
||||||
|
|
||||||
|
triage_entries = load_jsonl(triage_file)
|
||||||
|
recent_triage = [
|
||||||
|
e for e in triage_entries
|
||||||
|
if parse_ts(e.get("timestamp", "")) and parse_ts(e.get("timestamp", "")) >= since
|
||||||
|
]
|
||||||
|
|
||||||
|
insights = {}
|
||||||
|
if insights_file.exists():
|
||||||
|
try:
|
||||||
|
insights = json.loads(insights_file.read_text())
|
||||||
|
except (json.JSONDecodeError, OSError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
return {
|
||||||
|
"triage_runs": len(recent_triage),
|
||||||
|
"triage_entries": recent_triage,
|
||||||
|
"latest_insights": insights,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def collect_token_data(since: datetime) -> dict:
|
||||||
|
"""Load token economy data from the lightning ledger."""
|
||||||
|
# The ledger is in-memory but we can look for any persisted data
|
||||||
|
# For now, return placeholder that will be filled by the ledger module
|
||||||
|
return {
|
||||||
|
"note": "Token economy data is ephemeral — check dashboard for live metrics",
|
||||||
|
"balance_sats": 0, # Placeholder
|
||||||
|
"transactions_week": 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ── Analysis Functions ─────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
|
||||||
|
def extract_themes(issues: list[dict]) -> list[dict]:
|
||||||
|
"""Extract themes from issue labels."""
|
||||||
|
label_counts = Counter()
|
||||||
|
layer_counts = Counter()
|
||||||
|
type_counts = Counter()
|
||||||
|
|
||||||
|
for issue in issues:
|
||||||
|
for label in issue.get("labels", []):
|
||||||
|
name = label.get("name", "")
|
||||||
|
label_counts[name] += 1
|
||||||
|
|
||||||
|
if name.startswith("layer:"):
|
||||||
|
layer_counts[name.replace("layer:", "")] += 1
|
||||||
|
if name in ("bug", "feature", "refactor", "docs", "test", "chore"):
|
||||||
|
type_counts[name] += 1
|
||||||
|
|
||||||
|
# Top themes (labels excluding layer prefixes)
|
||||||
|
themes = [
|
||||||
|
{"name": name, "count": count}
|
||||||
|
for name, count in label_counts.most_common(10)
|
||||||
|
if not name.startswith(("layer:", "size:"))
|
||||||
|
]
|
||||||
|
|
||||||
|
# Layers
|
||||||
|
layers = [
|
||||||
|
{"name": name, "count": count}
|
||||||
|
for name, count in layer_counts.most_common()
|
||||||
|
]
|
||||||
|
|
||||||
|
# Types
|
||||||
|
types = [
|
||||||
|
{"name": name, "count": count}
|
||||||
|
for name, count in type_counts.most_common()
|
||||||
|
]
|
||||||
|
|
||||||
|
return {
|
||||||
|
"top_labels": themes,
|
||||||
|
"layers": layers,
|
||||||
|
"types": types,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def extract_agent_contributions(issues: list[dict], prs: list[dict], cycles: list[dict]) -> dict:
|
||||||
|
"""Extract agent contribution patterns."""
|
||||||
|
# Count by assignee
|
||||||
|
assignee_counts = Counter()
|
||||||
|
for issue in issues:
|
||||||
|
assignee = issue.get("assignee")
|
||||||
|
if assignee and isinstance(assignee, dict):
|
||||||
|
assignee_counts[assignee.get("login", "unknown")] += 1
|
||||||
|
|
||||||
|
# Count PR authors
|
||||||
|
pr_authors = Counter()
|
||||||
|
for pr in prs:
|
||||||
|
user = pr.get("user")
|
||||||
|
if user and isinstance(user, dict):
|
||||||
|
pr_authors[user.get("login", "unknown")] += 1
|
||||||
|
|
||||||
|
# Check for Kimi mentions in cycle notes
|
||||||
|
kimi_mentions = sum(
|
||||||
|
1 for c in cycles
|
||||||
|
if "kimi" in c.get("notes", "").lower() or "kimi" in c.get("reason", "").lower()
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"active_assignees": [
|
||||||
|
{"login": login, "issues_count": count}
|
||||||
|
for login, count in assignee_counts.most_common()
|
||||||
|
],
|
||||||
|
"pr_authors": [
|
||||||
|
{"login": login, "prs_count": count}
|
||||||
|
for login, count in pr_authors.most_common()
|
||||||
|
],
|
||||||
|
"kimi_mentioned_cycles": kimi_mentions,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def analyze_test_shifts(cycles: list[dict]) -> dict:
|
||||||
|
"""Analyze shifts in test patterns."""
|
||||||
|
if not cycles:
|
||||||
|
return {"note": "No cycle data available"}
|
||||||
|
|
||||||
|
total_tests_passed = sum(c.get("tests_passed", 0) for c in cycles)
|
||||||
|
total_tests_added = sum(c.get("tests_added", 0) for c in cycles)
|
||||||
|
avg_tests_per_cycle = round(total_tests_passed / len(cycles), 1) if cycles else 0
|
||||||
|
|
||||||
|
# Look for test-related issues
|
||||||
|
test_focused = [
|
||||||
|
c for c in cycles
|
||||||
|
if c.get("type") == "test" or "test" in c.get("notes", "").lower()
|
||||||
|
]
|
||||||
|
|
||||||
|
return {
|
||||||
|
"total_tests_passed": total_tests_passed,
|
||||||
|
"total_tests_added": total_tests_added,
|
||||||
|
"avg_tests_per_cycle": avg_tests_per_cycle,
|
||||||
|
"test_focused_cycles": len(test_focused),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def analyze_triage_shifts(triage_data: dict) -> dict:
|
||||||
|
"""Analyze shifts in triage patterns."""
|
||||||
|
insights = triage_data.get("latest_insights", {})
|
||||||
|
recommendations = insights.get("recommendations", [])
|
||||||
|
|
||||||
|
high_priority_recs = [
|
||||||
|
r for r in recommendations
|
||||||
|
if r.get("severity") == "high"
|
||||||
|
]
|
||||||
|
|
||||||
|
return {
|
||||||
|
"triage_runs": triage_data.get("triage_runs", 0),
|
||||||
|
"insights_generated": insights.get("generated_at") is not None,
|
||||||
|
"high_priority_recommendations": len(high_priority_recs),
|
||||||
|
"recent_recommendations": recommendations[:3] if recommendations else [],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def generate_vibe_summary(
|
||||||
|
cycles_data: dict,
|
||||||
|
issues_data: dict,
|
||||||
|
prs_data: dict,
|
||||||
|
themes: dict,
|
||||||
|
agent_contrib: dict,
|
||||||
|
test_shifts: dict,
|
||||||
|
triage_shifts: dict,
|
||||||
|
) -> dict:
|
||||||
|
"""Generate the human-readable 'vibe' summary."""
|
||||||
|
# Determine overall vibe
|
||||||
|
success_rate = cycles_data.get("success_rate", 0)
|
||||||
|
failures = cycles_data.get("failures", 0)
|
||||||
|
closed_count = issues_data.get("closed_count", 0)
|
||||||
|
merged_count = prs_data.get("merged_count", 0)
|
||||||
|
|
||||||
|
if success_rate >= 0.9 and closed_count > 0:
|
||||||
|
vibe = "productive"
|
||||||
|
vibe_description = "A strong week with solid delivery and healthy success rates."
|
||||||
|
elif success_rate >= 0.7:
|
||||||
|
vibe = "steady"
|
||||||
|
vibe_description = "Steady progress with some bumps. Things are moving forward."
|
||||||
|
elif failures > cycles_data.get("successes", 0):
|
||||||
|
vibe = "struggling"
|
||||||
|
vibe_description = "A challenging week with more failures than successes. Time to regroup."
|
||||||
|
else:
|
||||||
|
vibe = "quiet"
|
||||||
|
vibe_description = "A lighter week with limited activity."
|
||||||
|
|
||||||
|
# Focus areas from themes
|
||||||
|
focus_areas = []
|
||||||
|
for layer in themes.get("layers", [])[:3]:
|
||||||
|
focus_areas.append(f"{layer['name']} ({layer['count']} items)")
|
||||||
|
|
||||||
|
# Agent activity summary
|
||||||
|
agent_summary = ""
|
||||||
|
active_assignees = agent_contrib.get("active_assignees", [])
|
||||||
|
if active_assignees:
|
||||||
|
top_agent = active_assignees[0]
|
||||||
|
agent_summary = f"{top_agent['login']} led with {top_agent['issues_count']} assigned issues."
|
||||||
|
|
||||||
|
# Notable events
|
||||||
|
notable = []
|
||||||
|
if merged_count > 5:
|
||||||
|
notable.append(f"{merged_count} PRs merged — high integration velocity")
|
||||||
|
if triage_shifts.get("high_priority_recommendations", 0) > 0:
|
||||||
|
notable.append("High-priority recommendations from loop introspection")
|
||||||
|
if test_shifts.get("test_focused_cycles", 0) > 3:
|
||||||
|
notable.append("Strong test coverage focus")
|
||||||
|
if not notable:
|
||||||
|
notable.append("Regular development flow")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"overall": vibe,
|
||||||
|
"description": vibe_description,
|
||||||
|
"focus_areas": focus_areas,
|
||||||
|
"agent_summary": agent_summary,
|
||||||
|
"notable_events": notable,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ── Narrative Generation ───────────────────────────────────────────────────
|
||||||
|
|
||||||
|
|
||||||
|
def generate_narrative(
|
||||||
|
cycles_data: dict,
|
||||||
|
issues_data: dict,
|
||||||
|
prs_data: dict,
|
||||||
|
triage_data: dict,
|
||||||
|
themes: dict,
|
||||||
|
agent_contrib: dict,
|
||||||
|
test_shifts: dict,
|
||||||
|
triage_shifts: dict,
|
||||||
|
token_data: dict,
|
||||||
|
since: datetime,
|
||||||
|
until: datetime,
|
||||||
|
) -> dict:
|
||||||
|
"""Generate the complete weekly narrative."""
|
||||||
|
vibe = generate_vibe_summary(
|
||||||
|
cycles_data, issues_data, prs_data, themes, agent_contrib, test_shifts, triage_shifts
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"generated_at": datetime.now(UTC).isoformat(),
|
||||||
|
"period": {
|
||||||
|
"start": since.isoformat(),
|
||||||
|
"end": until.isoformat(),
|
||||||
|
"days": 7,
|
||||||
|
},
|
||||||
|
"vibe": vibe,
|
||||||
|
"activity": {
|
||||||
|
"cycles": {
|
||||||
|
"total": cycles_data.get("total", 0),
|
||||||
|
"successes": cycles_data.get("successes", 0),
|
||||||
|
"failures": cycles_data.get("failures", 0),
|
||||||
|
"success_rate": cycles_data.get("success_rate", 0),
|
||||||
|
},
|
||||||
|
"issues": {
|
||||||
|
"touched": issues_data.get("touched_count", 0),
|
||||||
|
"closed": issues_data.get("closed_count", 0),
|
||||||
|
"opened": issues_data.get("opened_count", 0),
|
||||||
|
},
|
||||||
|
"pull_requests": {
|
||||||
|
"touched": prs_data.get("touched_count", 0),
|
||||||
|
"merged": prs_data.get("merged_count", 0),
|
||||||
|
"opened": prs_data.get("opened_count", 0),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"themes": themes,
|
||||||
|
"agents": agent_contrib,
|
||||||
|
"test_health": test_shifts,
|
||||||
|
"triage_health": triage_shifts,
|
||||||
|
"token_economy": token_data,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def generate_markdown_summary(narrative: dict) -> str:
|
||||||
|
"""Generate a human-readable markdown summary."""
|
||||||
|
vibe = narrative.get("vibe", {})
|
||||||
|
activity = narrative.get("activity", {})
|
||||||
|
cycles = activity.get("cycles", {})
|
||||||
|
issues = activity.get("issues", {})
|
||||||
|
prs = activity.get("pull_requests", {})
|
||||||
|
|
||||||
|
lines = [
|
||||||
|
"# Weekly Narrative Summary",
|
||||||
|
"",
|
||||||
|
f"**Period:** {narrative['period']['start'][:10]} to {narrative['period']['end'][:10]}",
|
||||||
|
f"**Vibe:** {vibe.get('overall', 'unknown').title()}",
|
||||||
|
"",
|
||||||
|
f"{vibe.get('description', '')}",
|
||||||
|
"",
|
||||||
|
"## Activity Highlights",
|
||||||
|
"",
|
||||||
|
f"- **Development Cycles:** {cycles.get('total', 0)} total ({cycles.get('successes', 0)} success, {cycles.get('failures', 0)} failure)",
|
||||||
|
f"- **Issues:** {issues.get('closed', 0)} closed, {issues.get('opened', 0)} opened",
|
||||||
|
f"- **Pull Requests:** {prs.get('merged', 0)} merged, {prs.get('opened', 0)} opened",
|
||||||
|
"",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Focus areas
|
||||||
|
focus = vibe.get("focus_areas", [])
|
||||||
|
if focus:
|
||||||
|
lines.append("## Focus Areas")
|
||||||
|
lines.append("")
|
||||||
|
for area in focus:
|
||||||
|
lines.append(f"- {area}")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Agent contributions
|
||||||
|
agent_summary = vibe.get("agent_summary", "")
|
||||||
|
if agent_summary:
|
||||||
|
lines.append("## Agent Activity")
|
||||||
|
lines.append("")
|
||||||
|
lines.append(agent_summary)
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Notable events
|
||||||
|
notable = vibe.get("notable_events", [])
|
||||||
|
if notable:
|
||||||
|
lines.append("## Notable Events")
|
||||||
|
lines.append("")
|
||||||
|
for event in notable:
|
||||||
|
lines.append(f"- {event}")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Triage health
|
||||||
|
triage = narrative.get("triage_health", {})
|
||||||
|
if triage.get("high_priority_recommendations", 0) > 0:
|
||||||
|
lines.append("## Triage Notes")
|
||||||
|
lines.append("")
|
||||||
|
lines.append(f"⚠️ {triage['high_priority_recommendations']} high-priority recommendation(s) from loop introspection.")
|
||||||
|
lines.append("")
|
||||||
|
for rec in triage.get("recent_recommendations", [])[:2]:
|
||||||
|
lines.append(f"- **{rec.get('category', 'general')}:** {rec.get('finding', '')}")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
|
||||||
|
# ── Main ───────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
|
||||||
|
def parse_args() -> argparse.Namespace:
|
||||||
|
p = argparse.ArgumentParser(
|
||||||
|
description="Generate weekly narrative summary of work and vibes",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"--json", "-j",
|
||||||
|
action="store_true",
|
||||||
|
help="Output as JSON instead of markdown",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"--output", "-o",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="Output file path (default from config)",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"--days",
|
||||||
|
type=int,
|
||||||
|
default=None,
|
||||||
|
help="Override lookback days (default 7)",
|
||||||
|
)
|
||||||
|
p.add_argument(
|
||||||
|
"--force",
|
||||||
|
action="store_true",
|
||||||
|
help="Run even if disabled in config",
|
||||||
|
)
|
||||||
|
return p.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> int:
|
||||||
|
args = parse_args()
|
||||||
|
config = load_automation_config()
|
||||||
|
|
||||||
|
# Check if enabled
|
||||||
|
if not config.get("enabled", True) and not args.force:
|
||||||
|
print("[weekly_narrative] Skipped — weekly narrative is disabled in config")
|
||||||
|
print("[weekly_narrative] Use --force to run anyway")
|
||||||
|
return 0
|
||||||
|
|
||||||
|
# Determine lookback period
|
||||||
|
days = args.days if args.days is not None else config.get("lookback_days", 7)
|
||||||
|
until = datetime.now(UTC)
|
||||||
|
since = until - timedelta(days=days)
|
||||||
|
|
||||||
|
print(f"[weekly_narrative] Generating narrative for the past {days} days...")
|
||||||
|
|
||||||
|
# Setup Gitea client
|
||||||
|
token = get_token(config)
|
||||||
|
client = GiteaClient(config, token)
|
||||||
|
|
||||||
|
if not client.is_available():
|
||||||
|
print("[weekly_narrative] Warning: Gitea API unavailable — will use local data only")
|
||||||
|
|
||||||
|
# Collect data
|
||||||
|
cycles_data = collect_cycles_data(since)
|
||||||
|
issues_data = collect_issues_data(client, since)
|
||||||
|
prs_data = collect_prs_data(client, since)
|
||||||
|
triage_data = collect_triage_data(since)
|
||||||
|
token_data = collect_token_data(since)
|
||||||
|
|
||||||
|
# Analyze
|
||||||
|
themes = extract_themes(issues_data.get("issues", []))
|
||||||
|
agent_contrib = extract_agent_contributions(
|
||||||
|
issues_data.get("issues", []),
|
||||||
|
prs_data.get("prs", []),
|
||||||
|
cycles_data.get("cycles", []),
|
||||||
|
)
|
||||||
|
test_shifts = analyze_test_shifts(cycles_data.get("cycles", []))
|
||||||
|
triage_shifts = analyze_triage_shifts(triage_data)
|
||||||
|
|
||||||
|
# Generate narrative
|
||||||
|
narrative = generate_narrative(
|
||||||
|
cycles_data,
|
||||||
|
issues_data,
|
||||||
|
prs_data,
|
||||||
|
triage_data,
|
||||||
|
themes,
|
||||||
|
agent_contrib,
|
||||||
|
test_shifts,
|
||||||
|
triage_shifts,
|
||||||
|
token_data,
|
||||||
|
since,
|
||||||
|
until,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Determine output path
|
||||||
|
output_path = args.output or config.get("output_file", ".loop/weekly_narrative.json")
|
||||||
|
output_file = REPO_ROOT / output_path
|
||||||
|
output_file.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
# Write JSON output
|
||||||
|
output_file.write_text(json.dumps(narrative, indent=2) + "\n")
|
||||||
|
|
||||||
|
# Write markdown summary alongside JSON
|
||||||
|
md_output_file = output_file.with_suffix(".md")
|
||||||
|
md_output_file.write_text(generate_markdown_summary(narrative))
|
||||||
|
|
||||||
|
# Print output
|
||||||
|
if args.json:
|
||||||
|
print(json.dumps(narrative, indent=2))
|
||||||
|
else:
|
||||||
|
print()
|
||||||
|
print(generate_markdown_summary(narrative))
|
||||||
|
|
||||||
|
print(f"\n[weekly_narrative] Written to: {output_file}")
|
||||||
|
print(f"[weekly_narrative] Markdown summary: {md_output_file}")
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
sys.exit(main())
|
||||||
Reference in New Issue
Block a user