Compare commits
3 Commits
kimi/issue
...
kimi/issue
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f6100b111d | ||
| 7e983fcdb3 | |||
| 46f89d59db |
@@ -32,6 +32,7 @@ from dashboard.routes.briefing import router as briefing_router
|
||||
from dashboard.routes.calm import router as calm_router
|
||||
from dashboard.routes.chat_api import router as chat_api_router
|
||||
from dashboard.routes.chat_api_v1 import router as chat_api_v1_router
|
||||
from dashboard.routes.daily_run import router as daily_run_router
|
||||
from dashboard.routes.db_explorer import router as db_explorer_router
|
||||
from dashboard.routes.discord import router as discord_router
|
||||
from dashboard.routes.experiments import router as experiments_router
|
||||
@@ -625,6 +626,7 @@ app.include_router(db_explorer_router)
|
||||
app.include_router(world_router)
|
||||
app.include_router(matrix_router)
|
||||
app.include_router(tower_router)
|
||||
app.include_router(daily_run_router)
|
||||
|
||||
|
||||
@app.websocket("/ws")
|
||||
|
||||
425
src/dashboard/routes/daily_run.py
Normal file
425
src/dashboard/routes/daily_run.py
Normal file
@@ -0,0 +1,425 @@
|
||||
"""Daily Run metrics routes — dashboard card for triage and session metrics."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from dataclasses import dataclass
|
||||
from datetime import UTC, datetime, timedelta
|
||||
from pathlib import Path
|
||||
from urllib.error import HTTPError, URLError
|
||||
from urllib.request import Request as UrlRequest
|
||||
from urllib.request import urlopen
|
||||
|
||||
from fastapi import APIRouter, Request
|
||||
from fastapi.responses import HTMLResponse, JSONResponse
|
||||
|
||||
from config import settings
|
||||
from dashboard.templating import templates
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(tags=["daily-run"])
|
||||
|
||||
REPO_ROOT = Path(settings.repo_root)
|
||||
CONFIG_PATH = REPO_ROOT / "timmy_automations" / "config" / "daily_run.json"
|
||||
|
||||
DEFAULT_CONFIG = {
|
||||
"gitea_api": "http://localhost:3000/api/v1",
|
||||
"repo_slug": "rockachopa/Timmy-time-dashboard",
|
||||
"token_file": "~/.hermes/gitea_token",
|
||||
"layer_labels_prefix": "layer:",
|
||||
}
|
||||
|
||||
LAYER_LABELS = ["layer:triage", "layer:micro-fix", "layer:tests", "layer:economy"]
|
||||
|
||||
|
||||
def _load_config() -> dict:
|
||||
"""Load configuration from config file with fallback to defaults."""
|
||||
config = DEFAULT_CONFIG.copy()
|
||||
if CONFIG_PATH.exists():
|
||||
try:
|
||||
file_config = json.loads(CONFIG_PATH.read_text())
|
||||
if "orchestrator" in file_config:
|
||||
config.update(file_config["orchestrator"])
|
||||
except (json.JSONDecodeError, OSError) as exc:
|
||||
logger.debug("Could not load daily_run config: %s", exc)
|
||||
|
||||
# Environment variable overrides
|
||||
if os.environ.get("TIMMY_GITEA_API"):
|
||||
config["gitea_api"] = os.environ.get("TIMMY_GITEA_API")
|
||||
if os.environ.get("TIMMY_REPO_SLUG"):
|
||||
config["repo_slug"] = os.environ.get("TIMMY_REPO_SLUG")
|
||||
if os.environ.get("TIMMY_GITEA_TOKEN"):
|
||||
config["token"] = os.environ.get("TIMMY_GITEA_TOKEN")
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def _get_token(config: dict) -> str | None:
|
||||
"""Get Gitea token from environment or file."""
|
||||
if "token" in config:
|
||||
return config["token"]
|
||||
|
||||
token_file = Path(config["token_file"]).expanduser()
|
||||
if token_file.exists():
|
||||
return token_file.read_text().strip()
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class GiteaClient:
|
||||
"""Simple Gitea API client with graceful degradation."""
|
||||
|
||||
def __init__(self, config: dict, token: str | None):
|
||||
self.api_base = config["gitea_api"].rstrip("/")
|
||||
self.repo_slug = config["repo_slug"]
|
||||
self.token = token
|
||||
self._available: bool | None = None
|
||||
|
||||
def _headers(self) -> dict:
|
||||
headers = {"Accept": "application/json"}
|
||||
if self.token:
|
||||
headers["Authorization"] = f"token {self.token}"
|
||||
return headers
|
||||
|
||||
def _api_url(self, path: str) -> str:
|
||||
return f"{self.api_base}/repos/{self.repo_slug}/{path}"
|
||||
|
||||
def is_available(self) -> bool:
|
||||
"""Check if Gitea API is reachable."""
|
||||
if self._available is not None:
|
||||
return self._available
|
||||
|
||||
try:
|
||||
req = UrlRequest(
|
||||
f"{self.api_base}/version",
|
||||
headers=self._headers(),
|
||||
method="GET",
|
||||
)
|
||||
with urlopen(req, timeout=5) as resp:
|
||||
self._available = resp.status == 200
|
||||
return self._available
|
||||
except (HTTPError, URLError, TimeoutError):
|
||||
self._available = False
|
||||
return False
|
||||
|
||||
def get_paginated(self, path: str, params: dict | None = None) -> list:
|
||||
"""Fetch all pages of a paginated endpoint."""
|
||||
all_items = []
|
||||
page = 1
|
||||
limit = 50
|
||||
|
||||
while True:
|
||||
url = self._api_url(path)
|
||||
query_parts = [f"limit={limit}", f"page={page}"]
|
||||
if params:
|
||||
for key, val in params.items():
|
||||
query_parts.append(f"{key}={val}")
|
||||
url = f"{url}?{'&'.join(query_parts)}"
|
||||
|
||||
req = UrlRequest(url, headers=self._headers(), method="GET")
|
||||
with urlopen(req, timeout=15) as resp:
|
||||
batch = json.loads(resp.read())
|
||||
|
||||
if not batch:
|
||||
break
|
||||
|
||||
all_items.extend(batch)
|
||||
if len(batch) < limit:
|
||||
break
|
||||
page += 1
|
||||
|
||||
return all_items
|
||||
|
||||
|
||||
@dataclass
|
||||
class LayerMetrics:
|
||||
"""Metrics for a single layer."""
|
||||
|
||||
name: str
|
||||
label: str
|
||||
current_count: int
|
||||
previous_count: int
|
||||
|
||||
@property
|
||||
def trend(self) -> str:
|
||||
"""Return trend indicator."""
|
||||
if self.previous_count == 0:
|
||||
return "→" if self.current_count == 0 else "↑"
|
||||
diff = self.current_count - self.previous_count
|
||||
pct = (diff / self.previous_count) * 100
|
||||
if pct > 20:
|
||||
return "↑↑"
|
||||
elif pct > 5:
|
||||
return "↑"
|
||||
elif pct < -20:
|
||||
return "↓↓"
|
||||
elif pct < -5:
|
||||
return "↓"
|
||||
return "→"
|
||||
|
||||
@property
|
||||
def trend_color(self) -> str:
|
||||
"""Return color for trend (CSS variable name)."""
|
||||
trend = self.trend
|
||||
if trend in ("↑↑", "↑"):
|
||||
return "var(--green)" # More work = positive
|
||||
elif trend in ("↓↓", "↓"):
|
||||
return "var(--amber)" # Less work = caution
|
||||
return "var(--text-dim)"
|
||||
|
||||
|
||||
@dataclass
|
||||
class DailyRunMetrics:
|
||||
"""Complete Daily Run metrics."""
|
||||
|
||||
sessions_completed: int
|
||||
sessions_previous: int
|
||||
layers: list[LayerMetrics]
|
||||
total_touched_current: int
|
||||
total_touched_previous: int
|
||||
lookback_days: int
|
||||
generated_at: str
|
||||
|
||||
@property
|
||||
def sessions_trend(self) -> str:
|
||||
"""Return sessions trend indicator."""
|
||||
if self.sessions_previous == 0:
|
||||
return "→" if self.sessions_completed == 0 else "↑"
|
||||
diff = self.sessions_completed - self.sessions_previous
|
||||
pct = (diff / self.sessions_previous) * 100
|
||||
if pct > 20:
|
||||
return "↑↑"
|
||||
elif pct > 5:
|
||||
return "↑"
|
||||
elif pct < -20:
|
||||
return "↓↓"
|
||||
elif pct < -5:
|
||||
return "↓"
|
||||
return "→"
|
||||
|
||||
@property
|
||||
def sessions_trend_color(self) -> str:
|
||||
"""Return color for sessions trend."""
|
||||
trend = self.sessions_trend
|
||||
if trend in ("↑↑", "↑"):
|
||||
return "var(--green)"
|
||||
elif trend in ("↓↓", "↓"):
|
||||
return "var(--amber)"
|
||||
return "var(--text-dim)"
|
||||
|
||||
|
||||
def _extract_layer(labels: list[dict]) -> str | None:
|
||||
"""Extract layer label from issue labels."""
|
||||
for label in labels:
|
||||
name = label.get("name", "")
|
||||
if name.startswith("layer:"):
|
||||
return name.replace("layer:", "")
|
||||
return None
|
||||
|
||||
|
||||
def _load_cycle_data(days: int = 14) -> dict:
|
||||
"""Load cycle retrospective data for session counting."""
|
||||
retro_file = REPO_ROOT / ".loop" / "retro" / "cycles.jsonl"
|
||||
if not retro_file.exists():
|
||||
return {"current": 0, "previous": 0}
|
||||
|
||||
try:
|
||||
entries = []
|
||||
for line in retro_file.read_text().strip().splitlines():
|
||||
try:
|
||||
entries.append(json.loads(line))
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
|
||||
now = datetime.now(UTC)
|
||||
current_cutoff = now - timedelta(days=days)
|
||||
previous_cutoff = now - timedelta(days=days * 2)
|
||||
|
||||
current_count = 0
|
||||
previous_count = 0
|
||||
|
||||
for entry in entries:
|
||||
ts_str = entry.get("timestamp", "")
|
||||
if not ts_str:
|
||||
continue
|
||||
try:
|
||||
ts = datetime.fromisoformat(ts_str.replace("Z", "+00:00"))
|
||||
if ts >= current_cutoff:
|
||||
if entry.get("success", False):
|
||||
current_count += 1
|
||||
elif ts >= previous_cutoff:
|
||||
if entry.get("success", False):
|
||||
previous_count += 1
|
||||
except (ValueError, TypeError):
|
||||
continue
|
||||
|
||||
return {"current": current_count, "previous": previous_count}
|
||||
except (OSError, ValueError) as exc:
|
||||
logger.debug("Failed to load cycle data: %s", exc)
|
||||
return {"current": 0, "previous": 0}
|
||||
|
||||
|
||||
def _fetch_layer_metrics(
|
||||
client: GiteaClient, lookback_days: int = 7
|
||||
) -> tuple[list[LayerMetrics], int, int]:
|
||||
"""Fetch metrics for each layer from Gitea issues."""
|
||||
now = datetime.now(UTC)
|
||||
current_cutoff = now - timedelta(days=lookback_days)
|
||||
previous_cutoff = now - timedelta(days=lookback_days * 2)
|
||||
|
||||
layers = []
|
||||
total_current = 0
|
||||
total_previous = 0
|
||||
|
||||
for layer_label in LAYER_LABELS:
|
||||
layer_name = layer_label.replace("layer:", "")
|
||||
try:
|
||||
# Fetch all issues with this layer label (both open and closed)
|
||||
issues = client.get_paginated(
|
||||
"issues",
|
||||
{"state": "all", "labels": layer_label, "limit": 100},
|
||||
)
|
||||
|
||||
current_count = 0
|
||||
previous_count = 0
|
||||
|
||||
for issue in issues:
|
||||
updated_at = issue.get("updated_at", "")
|
||||
if not updated_at:
|
||||
continue
|
||||
try:
|
||||
updated = datetime.fromisoformat(updated_at.replace("Z", "+00:00"))
|
||||
if updated >= current_cutoff:
|
||||
current_count += 1
|
||||
elif updated >= previous_cutoff:
|
||||
previous_count += 1
|
||||
except (ValueError, TypeError):
|
||||
continue
|
||||
|
||||
layers.append(
|
||||
LayerMetrics(
|
||||
name=layer_name,
|
||||
label=layer_label,
|
||||
current_count=current_count,
|
||||
previous_count=previous_count,
|
||||
)
|
||||
)
|
||||
total_current += current_count
|
||||
total_previous += previous_count
|
||||
|
||||
except (HTTPError, URLError) as exc:
|
||||
logger.debug("Failed to fetch issues for %s: %s", layer_label, exc)
|
||||
layers.append(
|
||||
LayerMetrics(
|
||||
name=layer_name,
|
||||
label=layer_label,
|
||||
current_count=0,
|
||||
previous_count=0,
|
||||
)
|
||||
)
|
||||
|
||||
return layers, total_current, total_previous
|
||||
|
||||
|
||||
def _get_metrics(lookback_days: int = 7) -> DailyRunMetrics | None:
|
||||
"""Get Daily Run metrics from Gitea API."""
|
||||
config = _load_config()
|
||||
token = _get_token(config)
|
||||
client = GiteaClient(config, token)
|
||||
|
||||
if not client.is_available():
|
||||
logger.debug("Gitea API not available for Daily Run metrics")
|
||||
return None
|
||||
|
||||
try:
|
||||
# Get layer metrics from issues
|
||||
layers, total_current, total_previous = _fetch_layer_metrics(client, lookback_days)
|
||||
|
||||
# Get session data from cycle retrospectives
|
||||
cycle_data = _load_cycle_data(days=lookback_days)
|
||||
|
||||
return DailyRunMetrics(
|
||||
sessions_completed=cycle_data["current"],
|
||||
sessions_previous=cycle_data["previous"],
|
||||
layers=layers,
|
||||
total_touched_current=total_current,
|
||||
total_touched_previous=total_previous,
|
||||
lookback_days=lookback_days,
|
||||
generated_at=datetime.now(UTC).isoformat(),
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.debug("Error fetching Daily Run metrics: %s", exc)
|
||||
return None
|
||||
|
||||
|
||||
@router.get("/daily-run/metrics", response_class=JSONResponse)
|
||||
async def daily_run_metrics_api(lookback_days: int = 7):
|
||||
"""Return Daily Run metrics as JSON API."""
|
||||
metrics = _get_metrics(lookback_days)
|
||||
if not metrics:
|
||||
return JSONResponse(
|
||||
{"error": "Gitea API unavailable", "status": "unavailable"},
|
||||
status_code=503,
|
||||
)
|
||||
|
||||
return JSONResponse(
|
||||
{
|
||||
"status": "ok",
|
||||
"lookback_days": metrics.lookback_days,
|
||||
"sessions": {
|
||||
"completed": metrics.sessions_completed,
|
||||
"previous": metrics.sessions_previous,
|
||||
"trend": metrics.sessions_trend,
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"name": layer.name,
|
||||
"label": layer.label,
|
||||
"current": layer.current_count,
|
||||
"previous": layer.previous_count,
|
||||
"trend": layer.trend,
|
||||
}
|
||||
for layer in metrics.layers
|
||||
],
|
||||
"totals": {
|
||||
"current": metrics.total_touched_current,
|
||||
"previous": metrics.total_touched_previous,
|
||||
},
|
||||
"generated_at": metrics.generated_at,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@router.get("/daily-run/panel", response_class=HTMLResponse)
|
||||
async def daily_run_panel(request: Request, lookback_days: int = 7):
|
||||
"""Return Daily Run metrics panel HTML for HTMX polling."""
|
||||
metrics = _get_metrics(lookback_days)
|
||||
|
||||
# Build Gitea URLs for filtered issue lists
|
||||
config = _load_config()
|
||||
repo_slug = config.get("repo_slug", "rockachopa/Timmy-time-dashboard")
|
||||
gitea_base = config.get("gitea_api", "http://localhost:3000/api/v1").replace("/api/v1", "")
|
||||
|
||||
# Logbook URL (link to issues with any layer label)
|
||||
layer_labels = ",".join(LAYER_LABELS)
|
||||
logbook_url = f"{gitea_base}/{repo_slug}/issues?labels={layer_labels}&state=all"
|
||||
|
||||
# Layer-specific URLs
|
||||
layer_urls = {
|
||||
layer: f"{gitea_base}/{repo_slug}/issues?labels=layer:{layer}&state=all"
|
||||
for layer in ["triage", "micro-fix", "tests", "economy"]
|
||||
}
|
||||
|
||||
return templates.TemplateResponse(
|
||||
request,
|
||||
"partials/daily_run_panel.html",
|
||||
{
|
||||
"metrics": metrics,
|
||||
"logbook_url": logbook_url,
|
||||
"layer_urls": layer_urls,
|
||||
"gitea_available": metrics is not None,
|
||||
},
|
||||
)
|
||||
@@ -21,6 +21,11 @@
|
||||
</div>
|
||||
{% endcall %}
|
||||
|
||||
<!-- Daily Run Metrics (HTMX polled) -->
|
||||
{% call panel("DAILY RUN", hx_get="/daily-run/panel", hx_trigger="every 60s") %}
|
||||
<div class="mc-loading-placeholder">LOADING...</div>
|
||||
{% endcall %}
|
||||
|
||||
</div>
|
||||
|
||||
<!-- Main panel — swappable via HTMX; defaults to Timmy on load -->
|
||||
|
||||
54
src/dashboard/templates/partials/daily_run_panel.html
Normal file
54
src/dashboard/templates/partials/daily_run_panel.html
Normal file
@@ -0,0 +1,54 @@
|
||||
<div class="card-header mc-panel-header">// DAILY RUN METRICS</div>
|
||||
<div class="card-body p-3">
|
||||
{% if not gitea_available %}
|
||||
<div class="mc-muted" style="font-size: 0.85rem; padding: 8px 0;">
|
||||
<span style="color: var(--amber);">⚠</span> Gitea API unavailable
|
||||
</div>
|
||||
{% else %}
|
||||
{% set m = metrics %}
|
||||
|
||||
<!-- Sessions summary -->
|
||||
<div class="dr-section" style="margin-bottom: 16px;">
|
||||
<div class="dr-row" style="display: flex; justify-content: space-between; align-items: center; margin-bottom: 8px;">
|
||||
<span class="dr-label" style="font-size: 0.85rem; color: var(--text-dim);">Sessions ({{ m.lookback_days }}d)</span>
|
||||
<a href="{{ logbook_url }}" target="_blank" class="dr-link" style="font-size: 0.75rem; color: var(--green); text-decoration: none;">
|
||||
Logbook →
|
||||
</a>
|
||||
</div>
|
||||
<div class="dr-stat" style="display: flex; align-items: baseline; gap: 8px;">
|
||||
<span class="dr-value" style="font-size: 1.5rem; font-weight: 600; color: var(--text-bright);">{{ m.sessions_completed }}</span>
|
||||
<span class="dr-trend" style="font-size: 0.9rem; color: {{ m.sessions_trend_color }};">{{ m.sessions_trend }}</span>
|
||||
<span class="dr-prev" style="font-size: 0.75rem; color: var(--text-dim);">vs {{ m.sessions_previous }} prev</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Layer breakdown -->
|
||||
<div class="dr-section">
|
||||
<div class="dr-label" style="font-size: 0.85rem; color: var(--text-dim); margin-bottom: 8px;">Issues by Layer</div>
|
||||
<div class="dr-layers" style="display: flex; flex-direction: column; gap: 6px;">
|
||||
{% for layer in m.layers %}
|
||||
<div class="dr-layer-row" style="display: flex; justify-content: space-between; align-items: center;">
|
||||
<a href="{{ layer_urls[layer.name] }}" target="_blank" class="dr-layer-name" style="font-size: 0.8rem; color: var(--text); text-decoration: none; text-transform: capitalize;">
|
||||
{{ layer.name.replace('-', ' ') }}
|
||||
</a>
|
||||
<div class="dr-layer-stat" style="display: flex; align-items: center; gap: 6px;">
|
||||
<span class="dr-layer-value" style="font-size: 0.9rem; font-weight: 500; color: var(--text-bright);">{{ layer.current_count }}</span>
|
||||
<span class="dr-layer-trend" style="font-size: 0.75rem; color: {{ layer.trend_color }}; width: 18px; text-align: center;">{{ layer.trend }}</span>
|
||||
</div>
|
||||
</div>
|
||||
{% endfor %}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Total touched -->
|
||||
<div class="dr-section" style="margin-top: 12px; padding-top: 12px; border-top: 1px solid var(--border);">
|
||||
<div class="dr-row" style="display: flex; justify-content: space-between; align-items: center;">
|
||||
<span class="dr-label" style="font-size: 0.8rem; color: var(--text-dim);">Total Issues Touched</span>
|
||||
<div class="dr-total-stat" style="display: flex; align-items: center; gap: 6px;">
|
||||
<span class="dr-total-value" style="font-size: 1rem; font-weight: 600; color: var(--text-bright);">{{ m.total_touched_current }}</span>
|
||||
<span class="dr-total-prev" style="font-size: 0.7rem; color: var(--text-dim);">/ {{ m.total_touched_previous }} prev</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
536
tests/timmy/test_golden_path.py
Normal file
536
tests/timmy/test_golden_path.py
Normal file
@@ -0,0 +1,536 @@
|
||||
"""Tests for the Golden Path generator."""
|
||||
|
||||
import json
|
||||
from datetime import UTC, datetime
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from timmy_automations.daily_run.golden_path import (
|
||||
TIME_ESTIMATES,
|
||||
TYPE_PATTERNS,
|
||||
GiteaClient,
|
||||
GoldenPath,
|
||||
PathItem,
|
||||
build_golden_path,
|
||||
classify_issue_type,
|
||||
estimate_time,
|
||||
extract_size,
|
||||
generate_golden_path,
|
||||
get_token,
|
||||
group_issues_by_type,
|
||||
load_config,
|
||||
score_issue_for_path,
|
||||
)
|
||||
|
||||
|
||||
class TestLoadConfig:
|
||||
"""Tests for configuration loading."""
|
||||
|
||||
def test_load_config_defaults(self):
|
||||
"""Config should have sensible defaults."""
|
||||
config = load_config()
|
||||
assert "gitea_api" in config
|
||||
assert "repo_slug" in config
|
||||
assert "size_labels" in config
|
||||
|
||||
def test_load_config_env_override(self, monkeypatch):
|
||||
"""Environment variables should override defaults."""
|
||||
monkeypatch.setenv("TIMMY_GITEA_API", "http://custom:3000/api/v1")
|
||||
monkeypatch.setenv("TIMMY_REPO_SLUG", "custom/repo")
|
||||
monkeypatch.setenv("TIMMY_GITEA_TOKEN", "test-token")
|
||||
|
||||
config = load_config()
|
||||
assert config["gitea_api"] == "http://custom:3000/api/v1"
|
||||
assert config["repo_slug"] == "custom/repo"
|
||||
assert config["token"] == "test-token"
|
||||
|
||||
|
||||
class TestGetToken:
|
||||
"""Tests for token retrieval."""
|
||||
|
||||
def test_get_token_from_config(self):
|
||||
"""Token from config takes precedence."""
|
||||
config = {"token": "config-token", "token_file": "~/.test"}
|
||||
assert get_token(config) == "config-token"
|
||||
|
||||
@patch("pathlib.Path.exists")
|
||||
@patch("pathlib.Path.read_text")
|
||||
def test_get_token_from_file(self, mock_read, mock_exists):
|
||||
"""Token can be read from file."""
|
||||
mock_exists.return_value = True
|
||||
mock_read.return_value = "file-token\n"
|
||||
|
||||
config = {"token_file": "~/.hermes/test_token"}
|
||||
assert get_token(config) == "file-token"
|
||||
|
||||
def test_get_token_none(self):
|
||||
"""Returns None if no token available."""
|
||||
config = {"token_file": "/nonexistent/path"}
|
||||
assert get_token(config) is None
|
||||
|
||||
|
||||
class TestExtractSize:
|
||||
"""Tests for size label extraction."""
|
||||
|
||||
def test_extract_size_xs(self):
|
||||
"""Should extract XS size."""
|
||||
labels = [{"name": "size:XS"}, {"name": "bug"}]
|
||||
assert extract_size(labels) == "XS"
|
||||
|
||||
def test_extract_size_s(self):
|
||||
"""Should extract S size."""
|
||||
labels = [{"name": "bug"}, {"name": "size:S"}]
|
||||
assert extract_size(labels) == "S"
|
||||
|
||||
def test_extract_size_m(self):
|
||||
"""Should extract M size."""
|
||||
labels = [{"name": "size:M"}]
|
||||
assert extract_size(labels) == "M"
|
||||
|
||||
def test_extract_size_unknown(self):
|
||||
"""Should return ? for unknown size."""
|
||||
labels = [{"name": "bug"}, {"name": "feature"}]
|
||||
assert extract_size(labels) == "?"
|
||||
|
||||
def test_extract_size_empty(self):
|
||||
"""Should return ? for empty labels."""
|
||||
assert extract_size([]) == "?"
|
||||
|
||||
|
||||
class TestClassifyIssueType:
|
||||
"""Tests for issue type classification."""
|
||||
|
||||
def test_classify_triage(self):
|
||||
"""Should classify triage issues."""
|
||||
issue = {
|
||||
"title": "Triage new issues",
|
||||
"labels": [{"name": "triage"}],
|
||||
}
|
||||
assert classify_issue_type(issue) == "triage"
|
||||
|
||||
def test_classify_test(self):
|
||||
"""Should classify test issues."""
|
||||
issue = {
|
||||
"title": "Add unit tests for parser",
|
||||
"labels": [{"name": "test"}],
|
||||
}
|
||||
assert classify_issue_type(issue) == "test"
|
||||
|
||||
def test_classify_fix(self):
|
||||
"""Should classify fix issues."""
|
||||
issue = {
|
||||
"title": "Fix login bug",
|
||||
"labels": [{"name": "bug"}],
|
||||
}
|
||||
assert classify_issue_type(issue) == "fix"
|
||||
|
||||
def test_classify_docs(self):
|
||||
"""Should classify docs issues."""
|
||||
issue = {
|
||||
"title": "Update README",
|
||||
"labels": [{"name": "docs"}],
|
||||
}
|
||||
assert classify_issue_type(issue) == "docs"
|
||||
|
||||
def test_classify_refactor(self):
|
||||
"""Should classify refactor issues."""
|
||||
issue = {
|
||||
"title": "Refactor validation logic",
|
||||
"labels": [{"name": "refactor"}],
|
||||
}
|
||||
assert classify_issue_type(issue) == "refactor"
|
||||
|
||||
def test_classify_default_to_fix(self):
|
||||
"""Should default to fix for uncategorized."""
|
||||
issue = {
|
||||
"title": "Something vague",
|
||||
"labels": [{"name": "question"}],
|
||||
}
|
||||
assert classify_issue_type(issue) == "fix"
|
||||
|
||||
def test_classify_title_priority(self):
|
||||
"""Title patterns should contribute to classification."""
|
||||
issue = {
|
||||
"title": "Fix the broken parser",
|
||||
"labels": [],
|
||||
}
|
||||
assert classify_issue_type(issue) == "fix"
|
||||
|
||||
|
||||
class TestEstimateTime:
|
||||
"""Tests for time estimation."""
|
||||
|
||||
def test_estimate_xs_fix(self):
|
||||
"""XS fix should be 10 minutes."""
|
||||
issue = {
|
||||
"title": "Fix typo",
|
||||
"labels": [{"name": "size:XS"}, {"name": "bug"}],
|
||||
}
|
||||
assert estimate_time(issue) == 10
|
||||
|
||||
def test_estimate_s_test(self):
|
||||
"""S test should be 15 minutes."""
|
||||
issue = {
|
||||
"title": "Add test coverage",
|
||||
"labels": [{"name": "size:S"}, {"name": "test"}],
|
||||
}
|
||||
assert estimate_time(issue) == 15
|
||||
|
||||
def test_estimate_m_fix(self):
|
||||
"""M fix should be 25 minutes."""
|
||||
issue = {
|
||||
"title": "Fix complex bug",
|
||||
"labels": [{"name": "size:M"}, {"name": "bug"}],
|
||||
}
|
||||
assert estimate_time(issue) == 25
|
||||
|
||||
def test_estimate_unknown_size(self):
|
||||
"""Unknown size should fallback to S."""
|
||||
issue = {
|
||||
"title": "Some fix",
|
||||
"labels": [{"name": "bug"}],
|
||||
}
|
||||
# Falls back to S/fix = 15
|
||||
assert estimate_time(issue) == 15
|
||||
|
||||
|
||||
class TestScoreIssueForPath:
|
||||
"""Tests for issue scoring."""
|
||||
|
||||
def test_score_prefers_xs(self):
|
||||
"""XS issues should score higher."""
|
||||
xs = {"title": "Fix", "labels": [{"name": "size:XS"}]}
|
||||
s = {"title": "Fix", "labels": [{"name": "size:S"}]}
|
||||
m = {"title": "Fix", "labels": [{"name": "size:M"}]}
|
||||
|
||||
assert score_issue_for_path(xs) > score_issue_for_path(s)
|
||||
assert score_issue_for_path(s) > score_issue_for_path(m)
|
||||
|
||||
def test_score_prefers_clear_types(self):
|
||||
"""Issues with clear type labels score higher."""
|
||||
# Bug label adds score, so with bug should be >= without bug
|
||||
with_type = {
|
||||
"title": "Fix bug",
|
||||
"labels": [{"name": "size:S"}, {"name": "bug"}],
|
||||
}
|
||||
without_type = {
|
||||
"title": "Something",
|
||||
"labels": [{"name": "size:S"}],
|
||||
}
|
||||
|
||||
assert score_issue_for_path(with_type) >= score_issue_for_path(without_type)
|
||||
|
||||
def test_score_accepts_criteria(self):
|
||||
"""Issues with acceptance criteria score higher."""
|
||||
with_criteria = {
|
||||
"title": "Fix",
|
||||
"labels": [{"name": "size:S"}],
|
||||
"body": "## Acceptance Criteria\n- [ ] Fix it",
|
||||
}
|
||||
without_criteria = {
|
||||
"title": "Fix",
|
||||
"labels": [{"name": "size:S"}],
|
||||
"body": "Just fix it",
|
||||
}
|
||||
|
||||
assert score_issue_for_path(with_criteria) > score_issue_for_path(without_criteria)
|
||||
|
||||
|
||||
class TestGroupIssuesByType:
|
||||
"""Tests for issue grouping."""
|
||||
|
||||
def test_groups_by_type(self):
|
||||
"""Issues should be grouped by their type."""
|
||||
issues = [
|
||||
{"title": "Fix bug", "labels": [{"name": "bug"}], "number": 1},
|
||||
{"title": "Add test", "labels": [{"name": "test"}], "number": 2},
|
||||
{"title": "Another fix", "labels": [{"name": "bug"}], "number": 3},
|
||||
]
|
||||
|
||||
grouped = group_issues_by_type(issues)
|
||||
|
||||
assert len(grouped["fix"]) == 2
|
||||
assert len(grouped["test"]) == 1
|
||||
assert len(grouped["triage"]) == 0
|
||||
|
||||
def test_sorts_by_score(self):
|
||||
"""Issues within groups should be sorted by score."""
|
||||
issues = [
|
||||
{"title": "Fix", "labels": [{"name": "size:M"}], "number": 1},
|
||||
{"title": "Fix", "labels": [{"name": "size:XS"}], "number": 2},
|
||||
{"title": "Fix", "labels": [{"name": "size:S"}], "number": 3},
|
||||
]
|
||||
|
||||
grouped = group_issues_by_type(issues)
|
||||
|
||||
# XS should be first (highest score)
|
||||
assert grouped["fix"][0]["number"] == 2
|
||||
# M should be last (lowest score)
|
||||
assert grouped["fix"][2]["number"] == 1
|
||||
|
||||
|
||||
class TestBuildGoldenPath:
|
||||
"""Tests for Golden Path building."""
|
||||
|
||||
def test_builds_path_with_all_types(self):
|
||||
"""Path should include items from different types."""
|
||||
grouped = {
|
||||
"triage": [
|
||||
{"title": "Triage", "labels": [{"name": "size:XS"}], "number": 1, "html_url": ""},
|
||||
],
|
||||
"fix": [
|
||||
{"title": "Fix 1", "labels": [{"name": "size:S"}], "number": 2, "html_url": ""},
|
||||
{"title": "Fix 2", "labels": [{"name": "size:XS"}], "number": 3, "html_url": ""},
|
||||
],
|
||||
"test": [
|
||||
{"title": "Test", "labels": [{"name": "size:S"}], "number": 4, "html_url": ""},
|
||||
],
|
||||
"docs": [],
|
||||
"refactor": [],
|
||||
}
|
||||
|
||||
path = build_golden_path(grouped, target_minutes=45)
|
||||
|
||||
assert path.item_count >= 3
|
||||
assert path.items[0].issue_type == "triage" # Warm-up
|
||||
assert any(item.issue_type == "test" for item in path.items)
|
||||
|
||||
def test_respects_time_budget(self):
|
||||
"""Path should stay within reasonable time budget."""
|
||||
grouped = {
|
||||
"triage": [
|
||||
{"title": "Triage", "labels": [{"name": "size:S"}], "number": 1, "html_url": ""},
|
||||
],
|
||||
"fix": [
|
||||
{"title": "Fix 1", "labels": [{"name": "size:S"}], "number": 2, "html_url": ""},
|
||||
{"title": "Fix 2", "labels": [{"name": "size:S"}], "number": 3, "html_url": ""},
|
||||
],
|
||||
"test": [
|
||||
{"title": "Test", "labels": [{"name": "size:S"}], "number": 4, "html_url": ""},
|
||||
],
|
||||
"docs": [],
|
||||
"refactor": [],
|
||||
}
|
||||
|
||||
path = build_golden_path(grouped, target_minutes=45)
|
||||
|
||||
# Should be in 30-60 minute range
|
||||
assert 20 <= path.total_estimated_minutes <= 70
|
||||
|
||||
def test_no_duplicate_issues(self):
|
||||
"""Path should not include the same issue twice."""
|
||||
grouped = {
|
||||
"triage": [],
|
||||
"fix": [
|
||||
{"title": "Fix", "labels": [{"name": "size:S"}], "number": 1, "html_url": ""},
|
||||
],
|
||||
"test": [],
|
||||
"docs": [],
|
||||
"refactor": [],
|
||||
}
|
||||
|
||||
path = build_golden_path(grouped, target_minutes=45)
|
||||
|
||||
numbers = [item.number for item in path.items]
|
||||
assert len(numbers) == len(set(numbers)) # No duplicates
|
||||
|
||||
def test_fallback_when_triage_missing(self):
|
||||
"""Should use fallback when no triage issues available."""
|
||||
grouped = {
|
||||
"triage": [],
|
||||
"fix": [
|
||||
{"title": "Fix", "labels": [{"name": "size:XS"}], "number": 1, "html_url": ""},
|
||||
],
|
||||
"test": [
|
||||
{"title": "Test", "labels": [{"name": "size:XS"}], "number": 2, "html_url": ""},
|
||||
],
|
||||
"docs": [],
|
||||
"refactor": [],
|
||||
}
|
||||
|
||||
path = build_golden_path(grouped, target_minutes=45)
|
||||
|
||||
assert path.item_count > 0
|
||||
|
||||
|
||||
class TestGoldenPathDataclass:
|
||||
"""Tests for the GoldenPath dataclass."""
|
||||
|
||||
def test_total_time_calculation(self):
|
||||
"""Should sum item times correctly."""
|
||||
path = GoldenPath(
|
||||
generated_at=datetime.now(UTC).isoformat(),
|
||||
target_minutes=45,
|
||||
items=[
|
||||
PathItem(1, "Test 1", "XS", "fix", 10, ""),
|
||||
PathItem(2, "Test 2", "S", "test", 15, ""),
|
||||
],
|
||||
)
|
||||
|
||||
assert path.total_estimated_minutes == 25
|
||||
|
||||
def test_to_dict(self):
|
||||
"""Should convert to dict correctly."""
|
||||
path = GoldenPath(
|
||||
generated_at="2024-01-01T00:00:00+00:00",
|
||||
target_minutes=45,
|
||||
items=[PathItem(1, "Test", "XS", "fix", 10, "http://test")],
|
||||
)
|
||||
|
||||
data = path.to_dict()
|
||||
|
||||
assert data["target_minutes"] == 45
|
||||
assert data["total_estimated_minutes"] == 10
|
||||
assert data["item_count"] == 1
|
||||
assert len(data["items"]) == 1
|
||||
|
||||
def test_to_json(self):
|
||||
"""Should convert to JSON correctly."""
|
||||
path = GoldenPath(
|
||||
generated_at="2024-01-01T00:00:00+00:00",
|
||||
target_minutes=45,
|
||||
items=[],
|
||||
)
|
||||
|
||||
json_str = path.to_json()
|
||||
data = json.loads(json_str)
|
||||
|
||||
assert data["target_minutes"] == 45
|
||||
|
||||
|
||||
class TestGiteaClient:
|
||||
"""Tests for the GiteaClient."""
|
||||
|
||||
def test_client_initialization(self):
|
||||
"""Client should initialize with config."""
|
||||
config = {
|
||||
"gitea_api": "http://test:3000/api/v1",
|
||||
"repo_slug": "test/repo",
|
||||
}
|
||||
client = GiteaClient(config, "token123")
|
||||
|
||||
assert client.api_base == "http://test:3000/api/v1"
|
||||
assert client.repo_slug == "test/repo"
|
||||
assert client.token == "token123"
|
||||
|
||||
def test_headers_with_token(self):
|
||||
"""Headers should include auth token."""
|
||||
config = {"gitea_api": "http://test", "repo_slug": "test/repo"}
|
||||
client = GiteaClient(config, "mytoken")
|
||||
|
||||
headers = client._headers()
|
||||
|
||||
assert headers["Authorization"] == "token mytoken"
|
||||
assert headers["Accept"] == "application/json"
|
||||
|
||||
def test_headers_without_token(self):
|
||||
"""Headers should work without token."""
|
||||
config = {"gitea_api": "http://test", "repo_slug": "test/repo"}
|
||||
client = GiteaClient(config, None)
|
||||
|
||||
headers = client._headers()
|
||||
|
||||
assert "Authorization" not in headers
|
||||
assert headers["Accept"] == "application/json"
|
||||
|
||||
@patch("timmy_automations.daily_run.golden_path.urlopen")
|
||||
def test_is_available_success(self, mock_urlopen):
|
||||
"""Should detect API availability."""
|
||||
mock_response = MagicMock()
|
||||
mock_response.status = 200
|
||||
mock_context = MagicMock()
|
||||
mock_context.__enter__ = MagicMock(return_value=mock_response)
|
||||
mock_context.__exit__ = MagicMock(return_value=False)
|
||||
mock_urlopen.return_value = mock_context
|
||||
|
||||
config = {"gitea_api": "http://test", "repo_slug": "test/repo"}
|
||||
client = GiteaClient(config, None)
|
||||
|
||||
assert client.is_available() is True
|
||||
|
||||
@patch("urllib.request.urlopen")
|
||||
def test_is_available_failure(self, mock_urlopen):
|
||||
"""Should handle API unavailability."""
|
||||
from urllib.error import URLError
|
||||
|
||||
mock_urlopen.side_effect = URLError("Connection refused")
|
||||
|
||||
config = {"gitea_api": "http://test", "repo_slug": "test/repo"}
|
||||
client = GiteaClient(config, None)
|
||||
|
||||
assert client.is_available() is False
|
||||
|
||||
|
||||
class TestIntegration:
|
||||
"""Integration-style tests."""
|
||||
|
||||
@patch("timmy_automations.daily_run.golden_path.GiteaClient")
|
||||
def test_generate_golden_path_integration(self, mock_client_class):
|
||||
"""End-to-end test with mocked Gitea."""
|
||||
# Setup mock
|
||||
mock_client = MagicMock()
|
||||
mock_client.is_available.return_value = True
|
||||
mock_client.get_paginated.return_value = [
|
||||
{
|
||||
"number": 1,
|
||||
"title": "Triage issues",
|
||||
"labels": [{"name": "size:XS"}, {"name": "triage"}],
|
||||
"html_url": "http://test/1",
|
||||
},
|
||||
{
|
||||
"number": 2,
|
||||
"title": "Fix bug",
|
||||
"labels": [{"name": "size:S"}, {"name": "bug"}],
|
||||
"html_url": "http://test/2",
|
||||
},
|
||||
{
|
||||
"number": 3,
|
||||
"title": "Add tests",
|
||||
"labels": [{"name": "size:S"}, {"name": "test"}],
|
||||
"html_url": "http://test/3",
|
||||
},
|
||||
{
|
||||
"number": 4,
|
||||
"title": "Another fix",
|
||||
"labels": [{"name": "size:XS"}, {"name": "bug"}],
|
||||
"html_url": "http://test/4",
|
||||
},
|
||||
]
|
||||
mock_client_class.return_value = mock_client
|
||||
|
||||
path = generate_golden_path(target_minutes=45)
|
||||
|
||||
assert path.item_count >= 3
|
||||
assert all(item.url.startswith("http://test/") for item in path.items)
|
||||
|
||||
@patch("timmy_automations.daily_run.golden_path.GiteaClient")
|
||||
def test_generate_when_unavailable(self, mock_client_class):
|
||||
"""Should return empty path when Gitea unavailable."""
|
||||
mock_client = MagicMock()
|
||||
mock_client.is_available.return_value = False
|
||||
mock_client_class.return_value = mock_client
|
||||
|
||||
path = generate_golden_path(target_minutes=45)
|
||||
|
||||
assert path.item_count == 0
|
||||
assert path.items == []
|
||||
|
||||
|
||||
class TestTypePatterns:
|
||||
"""Tests for type pattern definitions."""
|
||||
|
||||
def test_type_patterns_structure(self):
|
||||
"""Type patterns should have required keys."""
|
||||
for _issue_type, patterns in TYPE_PATTERNS.items():
|
||||
assert "labels" in patterns
|
||||
assert "title" in patterns
|
||||
assert isinstance(patterns["labels"], list)
|
||||
assert isinstance(patterns["title"], list)
|
||||
|
||||
def test_time_estimates_structure(self):
|
||||
"""Time estimates should have all sizes."""
|
||||
for size in ["XS", "S", "M"]:
|
||||
assert size in TIME_ESTIMATES
|
||||
for issue_type in ["triage", "fix", "test", "docs", "refactor"]:
|
||||
assert issue_type in TIME_ESTIMATES[size]
|
||||
assert isinstance(TIME_ESTIMATES[size][issue_type], int)
|
||||
assert TIME_ESTIMATES[size][issue_type] > 0
|
||||
232
timmy_automations/BACKLOG_ORGANIZATION.md
Normal file
232
timmy_automations/BACKLOG_ORGANIZATION.md
Normal file
@@ -0,0 +1,232 @@
|
||||
# Timmy Automations Backlog Organization
|
||||
|
||||
**Date:** 2026-03-21
|
||||
**Issue:** #720 - Refine and group Timmy Automations backlog
|
||||
**Organized by:** Kimi agent
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
The Timmy Automations backlog has been organized into **10 milestones** grouping related work into coherent iterations. This document serves as the authoritative reference for milestone purposes and issue assignments.
|
||||
|
||||
---
|
||||
|
||||
## Milestones Overview
|
||||
|
||||
| Milestone | Issues | Due Date | Description |
|
||||
|-----------|--------|----------|-------------|
|
||||
| **Automation Hub v1** | 2 open | 2026-04-10 | Core automation infrastructure - Timmy Automations module, orchestration, and workflow management |
|
||||
| **Daily Run v1** | 8 open | 2026-04-15 | First iteration of the Daily Run automation system - 10-minute ritual, agenda generation, and focus presets |
|
||||
| **Infrastructure** | 3 open | 2026-04-15 | Infrastructure and deployment tasks - DNS, SSL, VPS, and DevOps |
|
||||
| **Dashboard v1** | 0 open | 2026-04-20 | Mission Control dashboard enhancements - Daily Run metrics, triage visibility, and agent scorecards |
|
||||
| **Inbox & Focus v1** | 1 open | 2026-04-25 | Unified inbox view for Timmy - issue triage, focus management, and work selection |
|
||||
| **Token Economy v1** | 4 open | 2026-04-30 | Token-based reward system for agents - rules, scorecards, quests, and adaptive rewards |
|
||||
| **Code Hygiene** | 14 open | 2026-04-30 | Code quality improvements - tests, docstrings, refactoring, and hardcoded value extraction |
|
||||
| **Matrix Staging** | 19 open | 2026-04-05 | The Matrix 3D world staging deployment - UI fixes, WebSocket, Workshop integration |
|
||||
| **OpenClaw Sovereignty** | 11 open | 2026-05-15 | Deploy sovereign AI agent on Hermes VPS - Ollama, OpenClaw, and Matrix portal integration |
|
||||
|
||||
---
|
||||
|
||||
## Detailed Breakdown
|
||||
|
||||
### Automation Hub v1 (Due: 2026-04-10)
|
||||
Core automation infrastructure - the foundation for all other automation work.
|
||||
|
||||
| Issue | Title | Status |
|
||||
|-------|-------|--------|
|
||||
| #720 | Refine and group Timmy Automations backlog | **In Progress** |
|
||||
| #719 | Generate weekly narrative summary of work and vibes | Open |
|
||||
|
||||
**Recommendation:** Complete #719 first to establish the narrative logging pattern before other milestones.
|
||||
|
||||
---
|
||||
|
||||
### Daily Run v1 (Due: 2026-04-15)
|
||||
The 10-minute ritual that starts Timmy's day - agenda generation, focus presets, and health checks.
|
||||
|
||||
| Issue | Title | Status |
|
||||
|-------|-------|--------|
|
||||
| #716 | Add focus-day presets for Daily Run and work selection | Open |
|
||||
| #704 | Enrich Daily Run agenda with classifications and suggestions | Open |
|
||||
| #705 | Add helper to log Daily Run sessions to a logbook issue | Open |
|
||||
| #706 | Capture Daily Run feels notes and surface nudges | Open |
|
||||
| #707 | Integrate Deep Triage outputs into Daily Run agenda | Open |
|
||||
| #708 | Map flakiness and risky areas for test tightening | Open |
|
||||
| #709 | Add a library of test-tightening recipes for Daily Run | Open |
|
||||
| #710 | Implement quick health snapshot before coding | Open |
|
||||
|
||||
**Recommendation:** Start with #710 (health snapshot) as it provides immediate value and informs other Daily Run features. Then #716 (focus presets) to establish the work selection pattern.
|
||||
|
||||
---
|
||||
|
||||
### Infrastructure (Due: 2026-04-15)
|
||||
DevOps and deployment tasks required for production stability.
|
||||
|
||||
| Issue | Title | Status |
|
||||
|-------|-------|--------|
|
||||
| #687 | Pre-commit and pre-push hooks fail on main due to 256 ModuleNotFoundErrors | Open |
|
||||
| #688 | Point all 4 domains to Hermes VPS in GoDaddy DNS | Open |
|
||||
| #689 | Run SSL provisioning after DNS is pointed | Open |
|
||||
|
||||
**Recommendation:** These are sequential - #687 blocks commits, #688 blocks #689. Prioritize #687 for code hygiene.
|
||||
|
||||
---
|
||||
|
||||
### Dashboard v1 (Due: 2026-04-20)
|
||||
Mission Control dashboard for automation visibility. Currently empty as related work is in Token Economy (#712).
|
||||
|
||||
**Note:** Issue #718 (dashboard card for Daily Run) is already closed. Issue #712 (agent scorecards) spans both Token Economy and Dashboard milestones.
|
||||
|
||||
---
|
||||
|
||||
### Inbox & Focus v1 (Due: 2026-04-25)
|
||||
Unified view for issue triage and work selection.
|
||||
|
||||
| Issue | Title | Status |
|
||||
|-------|-------|--------|
|
||||
| #715 | Implement Timmy Inbox unified view | Open |
|
||||
|
||||
**Note:** This is a significant feature that may need to be broken down further once work begins.
|
||||
|
||||
---
|
||||
|
||||
### Token Economy v1 (Due: 2026-04-30)
|
||||
Reward system for agent participation and quality work.
|
||||
|
||||
| Issue | Title | Status |
|
||||
|-------|-------|--------|
|
||||
| #711 | Centralize agent token rules and hooks for automations | Open |
|
||||
| #712 | Generate daily/weekly agent scorecards | Open |
|
||||
| #713 | Implement token quest system for agents | Open |
|
||||
| #714 | Adapt token rewards based on system stress signals | Open |
|
||||
|
||||
**Recommendation:** Start with #711 to establish the token infrastructure, then #712 for visibility. #713 and #714 are enhancements that build on the base system.
|
||||
|
||||
---
|
||||
|
||||
### Code Hygiene (Due: 2026-04-30)
|
||||
Ongoing code quality improvements. These are good "filler" tasks between larger features.
|
||||
|
||||
| Issue | Title | Status |
|
||||
|-------|-------|--------|
|
||||
| #769 | Add unit tests for src/infrastructure/db_pool.py | Open |
|
||||
| #770 | Add unit tests for src/dashboard/routes/health.py | Open |
|
||||
| #771 | Refactor run_agentic_loop() — 120 lines, extract helpers | Open |
|
||||
| #772 | Refactor produce_system_status() — 88 lines, split into sections | Open |
|
||||
| #773 | Add docstrings to public functions in src/dashboard/routes/tasks.py | Open |
|
||||
| #774 | Add docstrings to VoiceTTS.set_rate(), set_volume(), set_voice() | Open |
|
||||
| #775 | Add docstrings to system route functions in src/dashboard/routes/system.py | Open |
|
||||
| #776 | Extract hardcoded PRAGMA busy_timeout=5000 to config | Open |
|
||||
| #777 | DRY up tasks_pending/active/completed — extract shared helper | Open |
|
||||
| #778 | Remove bare `pass` after logged exceptions in src/timmy/tools.py | Open |
|
||||
| #779 | Add unit tests for src/timmy/conversation.py | Open |
|
||||
| #780 | Add unit tests for src/timmy/interview.py | Open |
|
||||
| #781 | Add error handling for missing DB in src/dashboard/routes/tasks.py | Open |
|
||||
| #782 | Extract hardcoded sats limit in consult_grok() to config | Open |
|
||||
|
||||
**Recommendation:** These are independent and can be picked up in any order. Good candidates for when blocked on larger features.
|
||||
|
||||
---
|
||||
|
||||
### Matrix Staging (Due: 2026-04-05)
|
||||
The Matrix 3D world - UI fixes and WebSocket integration for the Workshop.
|
||||
|
||||
**QA Issues:**
|
||||
| Issue | Title |
|
||||
|-------|-------|
|
||||
| #733 | The Matrix staging deployment — 3 issues to fix |
|
||||
| #757 | No landing page or enter button — site loads directly into 3D world |
|
||||
| #758 | WebSocket never connects — VITE_WS_URL is empty in production build |
|
||||
| #759 | Missing Submit Job and Fund Session UI buttons |
|
||||
| #760 | Chat messages silently dropped when WebSocket is offline |
|
||||
| #761 | All routes serve identical content — no client-side router |
|
||||
| #762 | All 5 agents permanently show IDLE state |
|
||||
| #763 | Chat clear button overlaps connection status on small viewports |
|
||||
| #764 | Mobile: status panel overlaps HUD agent count on narrow viewports |
|
||||
|
||||
**UI Enhancement Issues:**
|
||||
| Issue | Title |
|
||||
|-------|-------|
|
||||
| #747 | Add graceful offline mode — show demo mode instead of hanging |
|
||||
| #748 | Add loading spinner/progress bar while 3D scene initializes |
|
||||
| #749 | Add keyboard shortcuts — Escape to close modals, Enter to submit chat |
|
||||
| #750 | Chat input should auto-focus when Workshop panel opens |
|
||||
| #751 | Add connection status indicator with color coding |
|
||||
| #752 | Add dark/light theme toggle |
|
||||
| #753 | Fund Session modal should show explanatory text about what sats do |
|
||||
| #754 | Submit Job modal should validate input before submission |
|
||||
| #755 | Add About/Info panel explaining what The Matrix/Workshop is |
|
||||
| #756 | Add FPS counter visibility toggle — debug-only by default |
|
||||
|
||||
**Note:** This milestone has the earliest due date (2026-04-05) and most issues. Consider splitting into "Matrix Critical" (QA blockers) and "Matrix Polish" (UI enhancements).
|
||||
|
||||
---
|
||||
|
||||
### OpenClaw Sovereignty (Due: 2026-05-15)
|
||||
Deploy a sovereign AI agent on Hermes VPS - the long-term goal of Timmy's independence from cloud APIs.
|
||||
|
||||
| Issue | Title | Status |
|
||||
|-------|-------|--------|
|
||||
| #721 | Research: OpenClaw architecture, deployment modes, and Ollama integration | Open |
|
||||
| #722 | Research: Best small LLMs for agentic tool-calling on constrained hardware | Open |
|
||||
| #723 | Research: OpenClaw SOUL.md and AGENTS.md patterns | Open |
|
||||
| #724 | [1/8] Audit Hermes VPS resources and prepare for OpenClaw deployment | Open |
|
||||
| #725 | [2/8] Install and configure Ollama on Hermes VPS | Open |
|
||||
| #726 | [3/8] Install OpenClaw on Hermes VPS and complete onboarding | Open |
|
||||
| #727 | [4/8] Expose OpenClaw gateway via Tailscale for Matrix portal access | Open |
|
||||
| #728 | [5/8] Create Timmy's SOUL.md and AGENTS.md — sovereign agent persona | Open |
|
||||
| #729 | [6/8] Integrate OpenClaw chat as a portal/scroll in The Matrix frontend | Open |
|
||||
| #730 | [7/8] Create openclaw-tools Gitea repo — Timmy's sovereign toolbox | Open |
|
||||
| #731 | [8/8] Write sovereignty migration plan — offload tasks from Anthropic to OpenClaw | Open |
|
||||
|
||||
**Note:** This is a research-heavy, sequential milestone. Issues #721-#723 should be completed before implementation begins. Consider creating a research summary document as output from the research issues.
|
||||
|
||||
---
|
||||
|
||||
## Issues Intentionally Left Unassigned
|
||||
|
||||
The following issues remain without milestone assignment by design:
|
||||
|
||||
### Philosophy Issues
|
||||
Ongoing discussion threads that don't fit a milestone structure:
|
||||
- #502, #511, #521, #528, #536, #543, #548, #556, #566, #571, #583, #588, #596, #602, #608, #613, #623, #630, #642
|
||||
|
||||
### Feature Ideas / Future Work
|
||||
Ideas that need more definition before milestone assignment:
|
||||
- #654, #653, #652, #651, #650 (ASCII Video showcase)
|
||||
- #664 (Chain Memory song)
|
||||
- #578, #577, #579 (Autonomous action, identity evolution, contextual mastery)
|
||||
|
||||
### Completed Issues
|
||||
Already closed issues remain in their original state without milestone assignment.
|
||||
|
||||
---
|
||||
|
||||
## Recommended Execution Order
|
||||
|
||||
Based on priority and dependencies:
|
||||
|
||||
1. **Automation Hub v1** (April 10) - Foundation for all automation work
|
||||
2. **Daily Run v1** (April 15) - Core developer experience improvement
|
||||
3. **Infrastructure** (April 15) - Unblocks production deployments
|
||||
4. **Matrix Staging** (April 5) - *Parallel track* - UI team work
|
||||
5. **Inbox & Focus v1** (April 25) - Builds on Daily Run patterns
|
||||
6. **Dashboard v1** (April 20) - Visualizes Token Economy data
|
||||
7. **Token Economy v1** (April 30) - Gamification layer
|
||||
8. **Code Hygiene** (April 30) - *Ongoing* - Fill gaps between features
|
||||
9. **OpenClaw Sovereignty** (May 15) - Long-term research and deployment
|
||||
|
||||
---
|
||||
|
||||
## Notes for Future Triage
|
||||
|
||||
- Issues should be assigned to milestones at creation time
|
||||
- Each milestone should have a "Definition of Done" documented
|
||||
- Consider creating epic issues for large milestones (OpenClaw, Matrix)
|
||||
- Weekly triage should review unassigned issues and new arrivals
|
||||
- Milestone due dates should be adjusted based on velocity
|
||||
|
||||
---
|
||||
|
||||
*This document is maintained as part of the Timmy Automations subsystem. Update it when milestone structure changes.*
|
||||
@@ -196,7 +196,7 @@
|
||||
{
|
||||
"id": "daily_run_orchestrator",
|
||||
"name": "Daily Run Orchestrator",
|
||||
"description": "The 10-minute ritual — fetches candidate issues and produces a concise Daily Run agenda plus day summary. Supports focus-day presets for themed work sessions.",
|
||||
"description": "The 10-minute ritual — fetches candidate issues and produces a concise Daily Run agenda plus day summary",
|
||||
"script": "timmy_automations/daily_run/orchestrator.py",
|
||||
"category": "daily_run",
|
||||
"enabled": true,
|
||||
@@ -208,14 +208,24 @@
|
||||
"size_labels": ["size:XS", "size:S"],
|
||||
"max_agenda_items": 3,
|
||||
"lookback_hours": 24,
|
||||
"agenda_time_minutes": 10,
|
||||
"focus_day_presets": [
|
||||
"tests-day",
|
||||
"triage-day",
|
||||
"economy-day",
|
||||
"docs-day",
|
||||
"refactor-day"
|
||||
]
|
||||
"agenda_time_minutes": 10
|
||||
},
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"id": "golden_path",
|
||||
"name": "Golden Path Generator",
|
||||
"description": "Generates coherent 30-60 minute mini-sessions from real Gitea issues — triage, fixes, and tests",
|
||||
"script": "timmy_automations/daily_run/golden_path.py",
|
||||
"category": "daily_run",
|
||||
"enabled": true,
|
||||
"trigger": "manual",
|
||||
"executable": "python3",
|
||||
"config": {
|
||||
"target_minutes": 45,
|
||||
"size_labels": ["size:XS", "size:S", "size:M"],
|
||||
"min_items": 3,
|
||||
"max_items": 5
|
||||
},
|
||||
"outputs": []
|
||||
}
|
||||
|
||||
@@ -1,48 +1,6 @@
|
||||
{
|
||||
"version": "1.0.0",
|
||||
"description": "Daily run schedule configuration",
|
||||
"focus_day_presets": {
|
||||
"tests-day": {
|
||||
"description": "Focus on test-related work",
|
||||
"candidate_labels": ["test", "testing", "tests", "coverage"],
|
||||
"size_labels": ["size:XS", "size:S", "size:M"],
|
||||
"title_keywords": ["test", "tests", "testing", "coverage", "pytest", "tox"],
|
||||
"priority_boost": ["test", "testing", "tests"],
|
||||
"agenda_title": "🧪 Tests Day — Focus on Quality"
|
||||
},
|
||||
"triage-day": {
|
||||
"description": "Focus on issue triage and backlog grooming",
|
||||
"candidate_labels": ["triage", "backlog", "needs-review", "grooming"],
|
||||
"size_labels": ["size:XS", "size:S", "size:M", "size:L"],
|
||||
"title_keywords": ["triage", "review", "categorize", "organize"],
|
||||
"priority_boost": ["triage", "backlog"],
|
||||
"agenda_title": "📋 Triage Day — Organize and Prioritize"
|
||||
},
|
||||
"economy-day": {
|
||||
"description": "Focus on payment, pricing, and economic features",
|
||||
"candidate_labels": ["economy", "payment", "pricing", "l402", "lightning", "bitcoin"],
|
||||
"size_labels": ["size:XS", "size:S", "size:M"],
|
||||
"title_keywords": ["payment", "price", "economy", "invoice", "lightning", "bitcoin", "l402"],
|
||||
"priority_boost": ["economy", "payment", "lightning"],
|
||||
"agenda_title": "⚡ Economy Day — Build the Circular Economy"
|
||||
},
|
||||
"docs-day": {
|
||||
"description": "Focus on documentation and guides",
|
||||
"candidate_labels": ["docs", "documentation", "readme", "guide"],
|
||||
"size_labels": ["size:XS", "size:S", "size:M"],
|
||||
"title_keywords": ["doc", "docs", "documentation", "readme", "guide", "tutorial"],
|
||||
"priority_boost": ["docs", "documentation"],
|
||||
"agenda_title": "📚 Docs Day — Knowledge and Clarity"
|
||||
},
|
||||
"refactor-day": {
|
||||
"description": "Focus on code cleanup and refactoring",
|
||||
"candidate_labels": ["refactor", "cleanup", "debt", "tech-debt"],
|
||||
"size_labels": ["size:XS", "size:S", "size:M"],
|
||||
"title_keywords": ["refactor", "cleanup", "simplify", "organize", "restructure"],
|
||||
"priority_boost": ["refactor", "cleanup", "debt"],
|
||||
"agenda_title": "🔧 Refactor Day — Clean Code, Clear Mind"
|
||||
}
|
||||
},
|
||||
"schedules": {
|
||||
"every_cycle": {
|
||||
"description": "Run before/after every dev cycle",
|
||||
|
||||
@@ -33,12 +33,6 @@ python3 timmy_automations/daily_run/orchestrator.py --review
|
||||
|
||||
# Output as JSON
|
||||
python3 timmy_automations/daily_run/orchestrator.py --review --json
|
||||
|
||||
# Use a focus-day preset (see Focus-Day Presets section below)
|
||||
python3 timmy_automations/daily_run/orchestrator.py --preset tests-day
|
||||
|
||||
# List available presets
|
||||
python3 timmy_automations/daily_run/orchestrator.py --list-presets
|
||||
```
|
||||
|
||||
## Daily Run Orchestrator
|
||||
@@ -48,7 +42,6 @@ The orchestrator script connects to local Gitea and:
|
||||
1. **Fetches candidate issues** matching configured labels (default: `daily-run` + `size:XS`/`size:S`)
|
||||
2. **Generates a concise agenda** with up to 3 items for approximately 10 minutes of work
|
||||
3. **Review mode** (`--review`): Summarizes the last 24 hours — issues/PRs touched, items closed/merged, test failures
|
||||
4. **Focus-Day Presets** (`--preset`): Biases the agenda toward specific types of work
|
||||
|
||||
### Configuration
|
||||
|
||||
@@ -105,65 +98,6 @@ Candidates considered: 5
|
||||
**Review mode (`--review`):**
|
||||
Adds a day summary section showing issues touched, closed, PRs merged, and any test failures.
|
||||
|
||||
### Focus-Day Presets
|
||||
|
||||
Focus-day presets bias the Daily Run agenda toward specific types of work. Use `--preset <name>` to activate a preset.
|
||||
|
||||
| Preset | Description | Candidate Labels | Use When |
|
||||
|--------|-------------|------------------|----------|
|
||||
| `tests-day` | Focus on test-related work | `test`, `testing`, `tests`, `coverage` | Improving test coverage, fixing flaky tests |
|
||||
| `triage-day` | Issue triage and backlog grooming | `triage`, `backlog`, `needs-review`, `grooming` | Organizing the backlog, reviewing stale issues |
|
||||
| `economy-day` | Payment and economic features | `economy`, `payment`, `pricing`, `l402`, `lightning`, `bitcoin` | Working on Lightning/L402 integration |
|
||||
| `docs-day` | Documentation and guides | `docs`, `documentation`, `readme`, `guide` | Writing docs, updating READMEs |
|
||||
| `refactor-day` | Code cleanup and refactoring | `refactor`, `cleanup`, `debt`, `tech-debt` | Paying down technical debt |
|
||||
|
||||
**How Presets Work:**
|
||||
|
||||
1. **Label Filtering**: Each preset defines its own `candidate_labels` — issues with any of these labels are fetched
|
||||
2. **Size Filtering**: Presets can include larger sizes (e.g., `tests-day` includes `size:M` for bigger test refactors)
|
||||
3. **Priority Boosting**: Issues matching preset priority labels get a scoring bonus (+15 for labels, +8 for title keywords)
|
||||
4. **Title Filtering**: Some presets filter by title keywords to find relevant issues even without labels
|
||||
5. **Custom Agenda Title**: The output header reflects the focus (e.g., "🧪 Tests Day — Focus on Quality")
|
||||
|
||||
**Example:**
|
||||
|
||||
```bash
|
||||
# Run with tests-day preset
|
||||
$ python3 timmy_automations/daily_run/orchestrator.py --preset tests-day
|
||||
|
||||
============================================================
|
||||
🧪 Tests Day — Focus on Quality
|
||||
============================================================
|
||||
Generated: 2026-03-21T15:16:02+00:00
|
||||
Time budget: 10 minutes
|
||||
Candidates considered: 5
|
||||
Focus preset: tests-day
|
||||
|
||||
1. #123 [M] [infra]
|
||||
Title: Add integration tests for payment flow
|
||||
Action: TEST
|
||||
URL: http://localhost:3000/rockachopa/Timmy-time-dashboard/issues/123
|
||||
```
|
||||
|
||||
### Extending Presets
|
||||
|
||||
Presets are defined in `timmy_automations/config/daily_run.json` under `focus_day_presets`. To add a new preset:
|
||||
|
||||
```json
|
||||
{
|
||||
"focus_day_presets": {
|
||||
"my-preset": {
|
||||
"description": "What this preset is for",
|
||||
"candidate_labels": ["label-1", "label-2"],
|
||||
"size_labels": ["size:XS", "size:S"],
|
||||
"title_keywords": ["keyword1", "keyword2"],
|
||||
"priority_boost": ["label-1"],
|
||||
"agenda_title": "🎯 My Preset — Description"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
See `../config/automations.json` for automation manifests and `../config/daily_run.json` for scheduling, orchestrator settings, and focus-day presets.
|
||||
See `../config/automations.json` for automation manifests and `../config/daily_run.json` for scheduling and orchestrator settings.
|
||||
|
||||
583
timmy_automations/daily_run/golden_path.py
Normal file
583
timmy_automations/daily_run/golden_path.py
Normal file
@@ -0,0 +1,583 @@
|
||||
"""Golden Path generator — coherent 30-60 minute mini-sessions from real issues.
|
||||
|
||||
Fetches issues from Gitea and assembles them into ordered sequences forming
|
||||
a coherent mini-session. Each Golden Path includes:
|
||||
- One small triage cleanup
|
||||
- Two micro-fixes (XS/S sized)
|
||||
- One test-improvement task
|
||||
|
||||
All tasks are real issues from the Gitea repository, never synthetic.
|
||||
|
||||
Usage:
|
||||
from timmy_automations.daily_run.golden_path import generate_golden_path
|
||||
path = generate_golden_path(minutes=45)
|
||||
print(path.to_json())
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from urllib.request import Request, urlopen
|
||||
from urllib.error import HTTPError, URLError
|
||||
|
||||
# ── Configuration ─────────────────────────────────────────────────────────
|
||||
|
||||
REPO_ROOT = Path(__file__).resolve().parent.parent.parent
|
||||
CONFIG_PATH = Path(__file__).parent.parent / "config" / "daily_run.json"
|
||||
|
||||
DEFAULT_CONFIG = {
|
||||
"gitea_api": "http://localhost:3000/api/v1",
|
||||
"repo_slug": "rockachopa/Timmy-time-dashboard",
|
||||
"token_file": "~/.hermes/gitea_token",
|
||||
"size_labels": ["size:XS", "size:S", "size:M"],
|
||||
}
|
||||
|
||||
# Time estimates (in minutes) by size and type
|
||||
TIME_ESTIMATES: dict[str, dict[str, int]] = {
|
||||
"XS": {"triage": 5, "fix": 10, "test": 10, "docs": 8, "refactor": 8},
|
||||
"S": {"triage": 10, "fix": 15, "test": 15, "docs": 12, "refactor": 12},
|
||||
"M": {"triage": 15, "fix": 25, "test": 25, "docs": 20, "refactor": 20},
|
||||
}
|
||||
|
||||
# Issue type detection patterns
|
||||
TYPE_PATTERNS: dict[str, dict[str, list[str]]] = {
|
||||
"triage": {
|
||||
"labels": ["triage", "cleanup", "organize", "sort", "categorize"],
|
||||
"title": ["triage", "cleanup", "organize", "sort", "categorize", "clean up"],
|
||||
},
|
||||
"fix": {
|
||||
"labels": ["bug", "fix", "error", "broken"],
|
||||
"title": ["fix", "bug", "error", "broken", "repair", "correct"],
|
||||
},
|
||||
"test": {
|
||||
"labels": ["test", "testing", "coverage", "pytest"],
|
||||
"title": ["test", "coverage", "pytest", "unit test", "integration test"],
|
||||
},
|
||||
"docs": {
|
||||
"labels": ["docs", "documentation", "readme", "docstring"],
|
||||
"title": ["doc", "readme", "comment", "guide", "tutorial"],
|
||||
},
|
||||
"refactor": {
|
||||
"labels": ["refactor", "cleanup", "debt", "maintainability"],
|
||||
"title": ["refactor", "cleanup", "simplify", "extract", "reorganize"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def load_config() -> dict:
|
||||
"""Load configuration from config file with fallback to defaults."""
|
||||
config = DEFAULT_CONFIG.copy()
|
||||
if CONFIG_PATH.exists():
|
||||
try:
|
||||
file_config = json.loads(CONFIG_PATH.read_text())
|
||||
if "orchestrator" in file_config:
|
||||
config.update(file_config["orchestrator"])
|
||||
except (json.JSONDecodeError, OSError) as exc:
|
||||
print(f"[golden_path] Warning: Could not load config: {exc}", file=sys.stderr)
|
||||
|
||||
# Environment variable overrides
|
||||
if os.environ.get("TIMMY_GITEA_API"):
|
||||
config["gitea_api"] = os.environ.get("TIMMY_GITEA_API")
|
||||
if os.environ.get("TIMMY_REPO_SLUG"):
|
||||
config["repo_slug"] = os.environ.get("TIMMY_REPO_SLUG")
|
||||
if os.environ.get("TIMMY_GITEA_TOKEN"):
|
||||
config["token"] = os.environ.get("TIMMY_GITEA_TOKEN")
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def get_token(config: dict) -> str | None:
|
||||
"""Get Gitea token from environment or file."""
|
||||
if "token" in config:
|
||||
return config["token"]
|
||||
|
||||
token_file = Path(config["token_file"]).expanduser()
|
||||
if token_file.exists():
|
||||
return token_file.read_text().strip()
|
||||
|
||||
return None
|
||||
|
||||
|
||||
# ── Gitea API Client ──────────────────────────────────────────────────────
|
||||
|
||||
|
||||
class GiteaClient:
|
||||
"""Simple Gitea API client with graceful degradation."""
|
||||
|
||||
def __init__(self, config: dict, token: str | None):
|
||||
self.api_base = config["gitea_api"].rstrip("/")
|
||||
self.repo_slug = config["repo_slug"]
|
||||
self.token = token
|
||||
self._available: bool | None = None
|
||||
|
||||
def _headers(self) -> dict:
|
||||
headers = {"Accept": "application/json"}
|
||||
if self.token:
|
||||
headers["Authorization"] = f"token {self.token}"
|
||||
return headers
|
||||
|
||||
def _api_url(self, path: str) -> str:
|
||||
return f"{self.api_base}/repos/{self.repo_slug}/{path}"
|
||||
|
||||
def is_available(self) -> bool:
|
||||
"""Check if Gitea API is reachable."""
|
||||
if self._available is not None:
|
||||
return self._available
|
||||
|
||||
try:
|
||||
req = Request(
|
||||
f"{self.api_base}/version",
|
||||
headers=self._headers(),
|
||||
method="GET",
|
||||
)
|
||||
with urlopen(req, timeout=5) as resp:
|
||||
self._available = resp.status == 200
|
||||
return self._available
|
||||
except (HTTPError, URLError, TimeoutError):
|
||||
self._available = False
|
||||
return False
|
||||
|
||||
def get(self, path: str, params: dict | None = None) -> list | dict:
|
||||
"""Make a GET request to the Gitea API."""
|
||||
url = self._api_url(path)
|
||||
if params:
|
||||
query = "&".join(f"{k}={v}" for k, v in params.items())
|
||||
url = f"{url}?{query}"
|
||||
|
||||
req = Request(url, headers=self._headers(), method="GET")
|
||||
with urlopen(req, timeout=15) as resp:
|
||||
return json.loads(resp.read())
|
||||
|
||||
def get_paginated(self, path: str, params: dict | None = None) -> list:
|
||||
"""Fetch all pages of a paginated endpoint."""
|
||||
all_items = []
|
||||
page = 1
|
||||
limit = 50
|
||||
|
||||
while True:
|
||||
page_params = {"limit": limit, "page": page}
|
||||
if params:
|
||||
page_params.update(params)
|
||||
|
||||
batch = self.get(path, page_params)
|
||||
if not batch:
|
||||
break
|
||||
|
||||
all_items.extend(batch)
|
||||
if len(batch) < limit:
|
||||
break
|
||||
page += 1
|
||||
|
||||
return all_items
|
||||
|
||||
|
||||
# ── Issue Classification ──────────────────────────────────────────────────
|
||||
|
||||
|
||||
def extract_size(labels: list[dict]) -> str:
|
||||
"""Extract size label from issue labels."""
|
||||
for label in labels:
|
||||
name = label.get("name", "")
|
||||
if name.startswith("size:"):
|
||||
return name.replace("size:", "").upper()
|
||||
return "?"
|
||||
|
||||
|
||||
def classify_issue_type(issue: dict) -> str:
|
||||
"""Classify an issue into a type based on labels and title."""
|
||||
labels = [l.get("name", "").lower() for l in issue.get("labels", [])]
|
||||
title = issue.get("title", "").lower()
|
||||
|
||||
scores: dict[str, int] = {}
|
||||
|
||||
for issue_type, patterns in TYPE_PATTERNS.items():
|
||||
score = 0
|
||||
# Check labels
|
||||
for pattern in patterns["labels"]:
|
||||
if any(pattern in label for label in labels):
|
||||
score += 2
|
||||
# Check title
|
||||
for pattern in patterns["title"]:
|
||||
if pattern in title:
|
||||
score += 1
|
||||
scores[issue_type] = score
|
||||
|
||||
# Return the type with highest score, or "fix" as default
|
||||
if scores:
|
||||
best_type = max(scores, key=lambda k: scores[k])
|
||||
if scores[best_type] > 0:
|
||||
return best_type
|
||||
|
||||
return "fix" # Default to fix for uncategorized issues
|
||||
|
||||
|
||||
def estimate_time(issue: dict) -> int:
|
||||
"""Estimate time in minutes for an issue based on size and type."""
|
||||
size = extract_size(issue.get("labels", []))
|
||||
issue_type = classify_issue_type(issue)
|
||||
|
||||
# Default to fix time estimates if type not found
|
||||
type_map = issue_type if issue_type in TIME_ESTIMATES.get(size, {}) else "fix"
|
||||
|
||||
return TIME_ESTIMATES.get(size, TIME_ESTIMATES["S"]).get(type_map, 15)
|
||||
|
||||
|
||||
def score_issue_for_path(issue: dict) -> int:
|
||||
"""Score an issue for Golden Path suitability (higher = better fit)."""
|
||||
score = 0
|
||||
labels = [l.get("name", "").lower() for l in issue.get("labels", [])]
|
||||
issue_type = classify_issue_type(issue)
|
||||
|
||||
# Prefer smaller sizes for predictability
|
||||
if "size:xs" in labels:
|
||||
score += 10
|
||||
elif "size:s" in labels:
|
||||
score += 7
|
||||
elif "size:m" in labels:
|
||||
score += 3
|
||||
|
||||
# Prefer issues with clear type labels
|
||||
if issue_type in ["triage", "test", "fix"]:
|
||||
score += 3
|
||||
|
||||
# Prefer issues with acceptance criteria or good description
|
||||
body = issue.get("body", "")
|
||||
if body:
|
||||
if "## acceptance criteria" in body.lower() or "acceptance criteria" in body.lower():
|
||||
score += 3
|
||||
if len(body) > 200:
|
||||
score += 1
|
||||
|
||||
# Prefer issues with recent activity
|
||||
updated_at = issue.get("updated_at", "")
|
||||
if updated_at:
|
||||
try:
|
||||
updated = datetime.fromisoformat(updated_at.replace("Z", "+00:00"))
|
||||
days_old = (datetime.now(timezone.utc) - updated).days
|
||||
if days_old < 7:
|
||||
score += 2
|
||||
elif days_old < 30:
|
||||
score += 1
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
|
||||
return score
|
||||
|
||||
|
||||
# ── Golden Path Generation ────────────────────────────────────────────────
|
||||
|
||||
|
||||
@dataclass
|
||||
class PathItem:
|
||||
"""A single item in a Golden Path."""
|
||||
|
||||
number: int
|
||||
title: str
|
||||
size: str
|
||||
issue_type: str
|
||||
estimated_minutes: int
|
||||
url: str
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"number": self.number,
|
||||
"title": self.title,
|
||||
"size": self.size,
|
||||
"type": self.issue_type,
|
||||
"estimated_minutes": self.estimated_minutes,
|
||||
"url": self.url,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class GoldenPath:
|
||||
"""A complete Golden Path sequence."""
|
||||
|
||||
generated_at: str
|
||||
target_minutes: int
|
||||
items: list[PathItem] = field(default_factory=list)
|
||||
|
||||
@property
|
||||
def total_estimated_minutes(self) -> int:
|
||||
return sum(item.estimated_minutes for item in self.items)
|
||||
|
||||
@property
|
||||
def item_count(self) -> int:
|
||||
return len(self.items)
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"generated_at": self.generated_at,
|
||||
"target_minutes": self.target_minutes,
|
||||
"total_estimated_minutes": self.total_estimated_minutes,
|
||||
"item_count": self.item_count,
|
||||
"items": [item.to_dict() for item in self.items],
|
||||
}
|
||||
|
||||
def to_json(self, indent: int = 2) -> str:
|
||||
return json.dumps(self.to_dict(), indent=indent)
|
||||
|
||||
|
||||
def fetch_eligible_issues(client: GiteaClient, config: dict) -> list[dict]:
|
||||
"""Fetch open issues eligible for Golden Paths."""
|
||||
size_labels = config.get("size_labels", ["size:XS", "size:S", "size:M"])
|
||||
|
||||
try:
|
||||
# Fetch all open issues
|
||||
issues = client.get_paginated("issues", {"state": "open", "sort": "updated"})
|
||||
except (HTTPError, URLError) as exc:
|
||||
print(f"[golden_path] Warning: Failed to fetch issues: {exc}", file=sys.stderr)
|
||||
return []
|
||||
|
||||
# Filter by size labels if specified
|
||||
if size_labels:
|
||||
filtered = []
|
||||
size_names = {s.lower() for s in size_labels}
|
||||
for issue in issues:
|
||||
issue_labels = {l.get("name", "").lower() for l in issue.get("labels", [])}
|
||||
if issue_labels & size_names:
|
||||
filtered.append(issue)
|
||||
issues = filtered
|
||||
|
||||
return issues
|
||||
|
||||
|
||||
def group_issues_by_type(issues: list[dict]) -> dict[str, list[dict]]:
|
||||
"""Group issues by their classified type, sorted by score."""
|
||||
groups: dict[str, list[dict]] = {
|
||||
"triage": [],
|
||||
"fix": [],
|
||||
"test": [],
|
||||
"docs": [],
|
||||
"refactor": [],
|
||||
}
|
||||
|
||||
for issue in issues:
|
||||
issue_type = classify_issue_type(issue)
|
||||
if issue_type in groups:
|
||||
groups[issue_type].append(issue)
|
||||
|
||||
# Sort each group by score (highest first)
|
||||
for issue_type in groups:
|
||||
groups[issue_type] = sorted(
|
||||
groups[issue_type],
|
||||
key=lambda i: score_issue_for_path(i),
|
||||
reverse=True,
|
||||
)
|
||||
|
||||
return groups
|
||||
|
||||
|
||||
def build_golden_path(
|
||||
grouped_issues: dict[str, list[dict]],
|
||||
target_minutes: int = 45,
|
||||
) -> GoldenPath:
|
||||
"""Build a Golden Path from grouped issues.
|
||||
|
||||
The path follows a coherent sequence:
|
||||
1. One small triage cleanup (warm-up)
|
||||
2. One micro-fix (momentum building)
|
||||
3. One test-improvement (quality focus)
|
||||
4. One more micro-fix or docs (closure)
|
||||
"""
|
||||
path = GoldenPath(
|
||||
generated_at=datetime.now(timezone.utc).isoformat(),
|
||||
target_minutes=target_minutes,
|
||||
)
|
||||
|
||||
used_issue_numbers: set[int] = set()
|
||||
|
||||
def add_best_item(issues: list[dict], max_minutes: int | None = None) -> bool:
|
||||
"""Add the best available issue of a type to the path."""
|
||||
for issue in issues:
|
||||
number = issue.get("number", 0)
|
||||
if number in used_issue_numbers:
|
||||
continue
|
||||
|
||||
est_time = estimate_time(issue)
|
||||
if max_minutes and est_time > max_minutes:
|
||||
continue
|
||||
|
||||
used_issue_numbers.add(number)
|
||||
path.items.append(
|
||||
PathItem(
|
||||
number=number,
|
||||
title=issue.get("title", "Untitled"),
|
||||
size=extract_size(issue.get("labels", [])),
|
||||
issue_type=classify_issue_type(issue),
|
||||
estimated_minutes=est_time,
|
||||
url=issue.get("html_url", ""),
|
||||
)
|
||||
)
|
||||
return True
|
||||
return False
|
||||
|
||||
# Phase 1: Warm-up with triage (5-10 min)
|
||||
if grouped_issues["triage"]:
|
||||
add_best_item(grouped_issues["triage"], max_minutes=15)
|
||||
else:
|
||||
# Fallback: use smallest available issue
|
||||
all_issues = (
|
||||
grouped_issues["fix"]
|
||||
+ grouped_issues["docs"]
|
||||
+ grouped_issues["refactor"]
|
||||
)
|
||||
all_issues.sort(key=lambda i: score_issue_for_path(i), reverse=True)
|
||||
add_best_item(all_issues, max_minutes=10)
|
||||
|
||||
# Phase 2: First micro-fix (10-15 min)
|
||||
if grouped_issues["fix"]:
|
||||
add_best_item(grouped_issues["fix"], max_minutes=20)
|
||||
else:
|
||||
# Fallback to refactor
|
||||
add_best_item(grouped_issues["refactor"], max_minutes=15)
|
||||
|
||||
# Phase 3: Test improvement (10-15 min)
|
||||
if grouped_issues["test"]:
|
||||
add_best_item(grouped_issues["test"], max_minutes=20)
|
||||
else:
|
||||
# If no test issues, add another fix
|
||||
add_best_item(grouped_issues["fix"], max_minutes=15)
|
||||
|
||||
# Phase 4: Closure fix or docs (10-15 min)
|
||||
# Try to fill remaining time
|
||||
remaining_budget = target_minutes - path.total_estimated_minutes
|
||||
if remaining_budget >= 10:
|
||||
# Prefer fix, then docs
|
||||
if not add_best_item(grouped_issues["fix"], max_minutes=remaining_budget):
|
||||
if not add_best_item(grouped_issues["docs"], max_minutes=remaining_budget):
|
||||
add_best_item(grouped_issues["refactor"], max_minutes=remaining_budget)
|
||||
|
||||
return path
|
||||
|
||||
|
||||
def generate_golden_path(
|
||||
target_minutes: int = 45,
|
||||
config: dict | None = None,
|
||||
) -> GoldenPath:
|
||||
"""Generate a Golden Path for the specified time budget.
|
||||
|
||||
Args:
|
||||
target_minutes: Target session length (30-60 recommended)
|
||||
config: Optional config override
|
||||
|
||||
Returns:
|
||||
A GoldenPath with ordered items from real Gitea issues
|
||||
"""
|
||||
cfg = config or load_config()
|
||||
token = get_token(cfg)
|
||||
client = GiteaClient(cfg, token)
|
||||
|
||||
if not client.is_available():
|
||||
# Return empty path with error indication
|
||||
return GoldenPath(
|
||||
generated_at=datetime.now(timezone.utc).isoformat(),
|
||||
target_minutes=target_minutes,
|
||||
items=[],
|
||||
)
|
||||
|
||||
issues = fetch_eligible_issues(client, cfg)
|
||||
grouped = group_issues_by_type(issues)
|
||||
return build_golden_path(grouped, target_minutes)
|
||||
|
||||
|
||||
# ── Output Formatting ─────────────────────────────────────────────────────
|
||||
|
||||
|
||||
def print_golden_path(path: GoldenPath) -> None:
|
||||
"""Print a formatted Golden Path to stdout."""
|
||||
print("=" * 60)
|
||||
print("🌟 GOLDEN PATH")
|
||||
print("=" * 60)
|
||||
print(f"Generated: {path.generated_at}")
|
||||
print(f"Target: {path.target_minutes} minutes")
|
||||
print(f"Estimated: {path.total_estimated_minutes} minutes")
|
||||
print()
|
||||
|
||||
if not path.items:
|
||||
print("No eligible issues found for a Golden Path.")
|
||||
print()
|
||||
print("To create Golden Paths, ensure issues have:")
|
||||
print(" - Size labels: size:XS, size:S, or size:M")
|
||||
print(" - Type labels: bug, test, triage, docs, refactor")
|
||||
print()
|
||||
return
|
||||
|
||||
for i, item in enumerate(path.items, 1):
|
||||
type_emoji = {
|
||||
"triage": "🧹",
|
||||
"fix": "🔧",
|
||||
"test": "🧪",
|
||||
"docs": "📚",
|
||||
"refactor": "♻️",
|
||||
}.get(item.issue_type, "📋")
|
||||
|
||||
print(f"{i}. {type_emoji} #{item.number} [{item.size}] ({item.estimated_minutes}m)")
|
||||
print(f" Title: {item.title}")
|
||||
print(f" Type: {item.issue_type.upper()}")
|
||||
if item.url:
|
||||
print(f" URL: {item.url}")
|
||||
print()
|
||||
|
||||
print("-" * 60)
|
||||
print("Instructions:")
|
||||
print(" 1. Start with the triage item to warm up")
|
||||
print(" 2. Progress through fixes to build momentum")
|
||||
print(" 3. Use the test item for quality focus")
|
||||
print(" 4. Check off items as you complete them")
|
||||
print()
|
||||
|
||||
|
||||
# ── CLI ───────────────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
p = argparse.ArgumentParser(
|
||||
description="Golden Path generator — coherent 30-60 minute mini-sessions",
|
||||
)
|
||||
p.add_argument(
|
||||
"--minutes",
|
||||
"-m",
|
||||
type=int,
|
||||
default=45,
|
||||
help="Target session length in minutes (default: 45)",
|
||||
)
|
||||
p.add_argument(
|
||||
"--json",
|
||||
"-j",
|
||||
action="store_true",
|
||||
help="Output as JSON instead of formatted text",
|
||||
)
|
||||
return p.parse_args()
|
||||
|
||||
|
||||
def main() -> int:
|
||||
args = parse_args()
|
||||
|
||||
# Validate target minutes
|
||||
target = max(30, min(60, args.minutes))
|
||||
if target != args.minutes:
|
||||
print(
|
||||
f"[golden_path] Warning: Clamped {args.minutes}m to {target}m range",
|
||||
file=sys.stderr,
|
||||
)
|
||||
|
||||
path = generate_golden_path(target_minutes=target)
|
||||
|
||||
if args.json:
|
||||
print(path.to_json())
|
||||
else:
|
||||
print_golden_path(path)
|
||||
|
||||
return 0 if path.items else 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -4,10 +4,10 @@
|
||||
Connects to local Gitea, fetches candidate issues, and produces a concise agenda
|
||||
plus a day summary (review mode).
|
||||
|
||||
Run: python3 timmy_automations/daily_run/orchestrator.py [--review] [--preset NAME]
|
||||
Run: python3 timmy_automations/daily_run/orchestrator.py [--review]
|
||||
Env: See timmy_automations/config/daily_run.json for configuration
|
||||
|
||||
Refs: #703, #716
|
||||
Refs: #703
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
@@ -40,42 +40,17 @@ DEFAULT_CONFIG = {
|
||||
}
|
||||
|
||||
|
||||
def load_config(preset: str | None = None) -> dict:
|
||||
"""Load configuration from config file with fallback to defaults.
|
||||
|
||||
If a preset is specified, merge preset configuration with defaults.
|
||||
"""
|
||||
def load_config() -> dict:
|
||||
"""Load configuration from config file with fallback to defaults."""
|
||||
config = DEFAULT_CONFIG.copy()
|
||||
presets = {}
|
||||
|
||||
if CONFIG_PATH.exists():
|
||||
try:
|
||||
file_config = json.loads(CONFIG_PATH.read_text())
|
||||
if "orchestrator" in file_config:
|
||||
config.update(file_config["orchestrator"])
|
||||
# Load presets if available
|
||||
presets = file_config.get("focus_day_presets", {})
|
||||
except (json.JSONDecodeError, OSError) as exc:
|
||||
print(f"[orchestrator] Warning: Could not load config: {exc}", file=sys.stderr)
|
||||
|
||||
# Apply preset configuration if specified and exists
|
||||
if preset:
|
||||
if preset not in presets:
|
||||
available = ", ".join(presets.keys()) if presets else "none defined"
|
||||
print(
|
||||
f"[orchestrator] Warning: Preset '{preset}' not found. "
|
||||
f"Available: {available}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
else:
|
||||
preset_config = presets[preset]
|
||||
config["_preset"] = preset
|
||||
config["_preset_title"] = preset_config.get("agenda_title", f"Focus: {preset}")
|
||||
# Override config with preset values
|
||||
for key in ["candidate_labels", "size_labels", "title_keywords", "priority_boost"]:
|
||||
if key in preset_config:
|
||||
config[f"_preset_{key}"] = preset_config[key]
|
||||
|
||||
# Environment variable overrides
|
||||
if os.environ.get("TIMMY_GITEA_API"):
|
||||
config["gitea_api"] = os.environ.get("TIMMY_GITEA_API")
|
||||
@@ -210,20 +185,10 @@ def suggest_action_type(issue: dict) -> str:
|
||||
return "review"
|
||||
|
||||
|
||||
def score_issue(issue: dict, config: dict) -> int:
|
||||
def score_issue(issue: dict) -> int:
|
||||
"""Score an issue for prioritization (higher = more suitable for daily run)."""
|
||||
score = 0
|
||||
labels = [l.get("name", "").lower() for l in issue.get("labels", [])]
|
||||
title = issue.get("title", "").lower()
|
||||
|
||||
# Preset-specific scoring: boost issues matching preset priority labels
|
||||
preset_priority = config.get("_preset_priority_boost", [])
|
||||
if preset_priority:
|
||||
for priority_label in preset_priority:
|
||||
if priority_label.lower() in labels:
|
||||
score += 15 # Strong boost for preset-matching labels
|
||||
if priority_label.lower() in title:
|
||||
score += 8 # Medium boost for preset-matching title
|
||||
|
||||
# Prefer smaller sizes
|
||||
if "size:xs" in labels:
|
||||
@@ -233,8 +198,8 @@ def score_issue(issue: dict, config: dict) -> int:
|
||||
elif "size:m" in labels:
|
||||
score += 2
|
||||
|
||||
# Prefer daily-run labeled issues (when not using a preset)
|
||||
if "daily-run" in labels and not config.get("_preset"):
|
||||
# Prefer daily-run labeled issues
|
||||
if "daily-run" in labels:
|
||||
score += 3
|
||||
|
||||
# Prefer issues with clear type labels
|
||||
@@ -252,57 +217,31 @@ def score_issue(issue: dict, config: dict) -> int:
|
||||
|
||||
def fetch_candidates(client: GiteaClient, config: dict) -> list[dict]:
|
||||
"""Fetch issues matching candidate criteria."""
|
||||
# Use preset labels if available, otherwise fall back to config defaults
|
||||
candidate_labels = config.get("_preset_candidate_labels") or config["candidate_labels"]
|
||||
size_labels = config.get("_preset_size_labels") or config.get("size_labels", [])
|
||||
candidate_labels = config["candidate_labels"]
|
||||
size_labels = config.get("size_labels", [])
|
||||
all_labels = candidate_labels + size_labels
|
||||
|
||||
all_issues = []
|
||||
# Build label filter (OR logic via multiple label queries doesn't work well,
|
||||
# so we fetch by candidate label and filter sizes client-side)
|
||||
params = {"state": "open", "sort": "created", "labels": ",".join(candidate_labels)}
|
||||
|
||||
# Fetch issues for each candidate label separately and combine
|
||||
for label in candidate_labels:
|
||||
params = {"state": "open", "sort": "created", "labels": label}
|
||||
try:
|
||||
issues = client.get_paginated("issues", params)
|
||||
all_issues.extend(issues)
|
||||
except (HTTPError, URLError) as exc:
|
||||
print(
|
||||
f"[orchestrator] Warning: Failed to fetch issues for label '{label}': {exc}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
|
||||
# Remove duplicates (in case issues have multiple matching labels)
|
||||
seen = set()
|
||||
unique_issues = []
|
||||
for issue in all_issues:
|
||||
issue_id = issue.get("number")
|
||||
if issue_id not in seen:
|
||||
seen.add(issue_id)
|
||||
unique_issues.append(issue)
|
||||
try:
|
||||
issues = client.get_paginated("issues", params)
|
||||
except (HTTPError, URLError) as exc:
|
||||
print(f"[orchestrator] Warning: Failed to fetch issues: {exc}", file=sys.stderr)
|
||||
return []
|
||||
|
||||
# Filter by size labels if specified
|
||||
if size_labels:
|
||||
filtered = []
|
||||
size_names = {s.lower() for s in size_labels}
|
||||
for issue in unique_issues:
|
||||
for issue in issues:
|
||||
issue_labels = {l.get("name", "").lower() for l in issue.get("labels", [])}
|
||||
if issue_labels & size_names:
|
||||
filtered.append(issue)
|
||||
unique_issues = filtered
|
||||
issues = filtered
|
||||
|
||||
# Additional filtering by title keywords if preset specifies them
|
||||
title_keywords = config.get("_preset_title_keywords", [])
|
||||
if title_keywords:
|
||||
keyword_filtered = []
|
||||
keywords = [k.lower() for k in title_keywords]
|
||||
for issue in unique_issues:
|
||||
title = issue.get("title", "").lower()
|
||||
if any(kw in title for kw in keywords):
|
||||
keyword_filtered.append(issue)
|
||||
# Only apply keyword filter if it doesn't eliminate all candidates
|
||||
if keyword_filtered:
|
||||
unique_issues = keyword_filtered
|
||||
|
||||
return unique_issues
|
||||
return issues
|
||||
|
||||
|
||||
def generate_agenda(issues: list[dict], config: dict) -> dict:
|
||||
@@ -311,7 +250,7 @@ def generate_agenda(issues: list[dict], config: dict) -> dict:
|
||||
agenda_time = config.get("agenda_time_minutes", 10)
|
||||
|
||||
# Score and sort issues
|
||||
scored = [(score_issue(issue, config), issue) for issue in issues]
|
||||
scored = [(score_issue(issue), issue) for issue in issues]
|
||||
scored.sort(key=lambda x: (-x[0], x[1].get("number", 0)))
|
||||
|
||||
selected = scored[:max_items]
|
||||
@@ -328,7 +267,7 @@ def generate_agenda(issues: list[dict], config: dict) -> dict:
|
||||
}
|
||||
items.append(item)
|
||||
|
||||
agenda = {
|
||||
return {
|
||||
"generated_at": datetime.now(timezone.utc).isoformat(),
|
||||
"time_budget_minutes": agenda_time,
|
||||
"item_count": len(items),
|
||||
@@ -336,27 +275,15 @@ def generate_agenda(issues: list[dict], config: dict) -> dict:
|
||||
"candidates_considered": len(issues),
|
||||
}
|
||||
|
||||
# Include preset info if active
|
||||
if config.get("_preset"):
|
||||
agenda["preset"] = config["_preset"]
|
||||
agenda["preset_title"] = config.get("_preset_title", f"Focus: {config['_preset']}")
|
||||
|
||||
return agenda
|
||||
|
||||
|
||||
def print_agenda(agenda: dict) -> None:
|
||||
"""Print a formatted agenda to stdout."""
|
||||
# Use preset title if available, otherwise default
|
||||
title = agenda.get("preset_title", "📋 DAILY RUN AGENDA")
|
||||
|
||||
print("=" * 60)
|
||||
print(title)
|
||||
print("📋 DAILY RUN AGENDA")
|
||||
print("=" * 60)
|
||||
print(f"Generated: {agenda['generated_at']}")
|
||||
print(f"Time budget: {agenda['time_budget_minutes']} minutes")
|
||||
print(f"Candidates considered: {agenda['candidates_considered']}")
|
||||
if agenda.get("preset"):
|
||||
print(f"Focus preset: {agenda['preset']}")
|
||||
print()
|
||||
|
||||
if not agenda["items"]:
|
||||
@@ -560,64 +487,12 @@ def parse_args() -> argparse.Namespace:
|
||||
default=None,
|
||||
help="Override max agenda items",
|
||||
)
|
||||
p.add_argument(
|
||||
"--preset", "-p",
|
||||
type=str,
|
||||
default=None,
|
||||
metavar="NAME",
|
||||
help="Use a focus-day preset (tests-day, triage-day, economy-day, docs-day, refactor-day)",
|
||||
)
|
||||
p.add_argument(
|
||||
"--list-presets",
|
||||
action="store_true",
|
||||
help="List available focus-day presets and exit",
|
||||
)
|
||||
return p.parse_args()
|
||||
|
||||
|
||||
def list_presets() -> None:
|
||||
"""List available focus-day presets."""
|
||||
if not CONFIG_PATH.exists():
|
||||
print("No configuration file found.", file=sys.stderr)
|
||||
return
|
||||
|
||||
try:
|
||||
file_config = json.loads(CONFIG_PATH.read_text())
|
||||
presets = file_config.get("focus_day_presets", {})
|
||||
|
||||
if not presets:
|
||||
print("No focus-day presets configured.")
|
||||
return
|
||||
|
||||
print("=" * 60)
|
||||
print("🎯 AVAILABLE FOCUS-DAY PRESETS")
|
||||
print("=" * 60)
|
||||
print()
|
||||
|
||||
for name, config in presets.items():
|
||||
title = config.get("agenda_title", f"Focus: {name}")
|
||||
description = config.get("description", "No description")
|
||||
labels = config.get("candidate_labels", [])
|
||||
|
||||
print(f"{name}")
|
||||
print(f" {title}")
|
||||
print(f" {description}")
|
||||
print(f" Labels: {', '.join(labels) if labels else 'none'}")
|
||||
print()
|
||||
|
||||
except (json.JSONDecodeError, OSError) as exc:
|
||||
print(f"Error loading presets: {exc}", file=sys.stderr)
|
||||
|
||||
|
||||
def main() -> int:
|
||||
args = parse_args()
|
||||
|
||||
# Handle --list-presets
|
||||
if args.list_presets:
|
||||
list_presets()
|
||||
return 0
|
||||
|
||||
config = load_config(preset=args.preset)
|
||||
config = load_config()
|
||||
|
||||
if args.max_items:
|
||||
config["max_agenda_items"] = args.max_items
|
||||
|
||||
Reference in New Issue
Block a user