forked from Rockachopa/Timmy-time-dashboard
584 lines
18 KiB
Python
584 lines
18 KiB
Python
"""Golden Path generator — coherent 30-60 minute mini-sessions from real issues.
|
|
|
|
Fetches issues from Gitea and assembles them into ordered sequences forming
|
|
a coherent mini-session. Each Golden Path includes:
|
|
- One small triage cleanup
|
|
- Two micro-fixes (XS/S sized)
|
|
- One test-improvement task
|
|
|
|
All tasks are real issues from the Gitea repository, never synthetic.
|
|
|
|
Usage:
|
|
from timmy_automations.daily_run.golden_path import generate_golden_path
|
|
path = generate_golden_path(minutes=45)
|
|
print(path.to_json())
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
import argparse
|
|
import json
|
|
import os
|
|
import sys
|
|
from dataclasses import dataclass, field
|
|
from datetime import datetime, timezone
|
|
from pathlib import Path
|
|
from typing import Any
|
|
from urllib.request import Request, urlopen
|
|
from urllib.error import HTTPError, URLError
|
|
|
|
# ── Configuration ─────────────────────────────────────────────────────────
|
|
|
|
REPO_ROOT = Path(__file__).resolve().parent.parent.parent
|
|
CONFIG_PATH = Path(__file__).parent.parent / "config" / "daily_run.json"
|
|
|
|
DEFAULT_CONFIG = {
|
|
"gitea_api": "http://localhost:3000/api/v1",
|
|
"repo_slug": "rockachopa/Timmy-time-dashboard",
|
|
"token_file": "~/.hermes/gitea_token",
|
|
"size_labels": ["size:XS", "size:S", "size:M"],
|
|
}
|
|
|
|
# Time estimates (in minutes) by size and type
|
|
TIME_ESTIMATES: dict[str, dict[str, int]] = {
|
|
"XS": {"triage": 5, "fix": 10, "test": 10, "docs": 8, "refactor": 8},
|
|
"S": {"triage": 10, "fix": 15, "test": 15, "docs": 12, "refactor": 12},
|
|
"M": {"triage": 15, "fix": 25, "test": 25, "docs": 20, "refactor": 20},
|
|
}
|
|
|
|
# Issue type detection patterns
|
|
TYPE_PATTERNS: dict[str, dict[str, list[str]]] = {
|
|
"triage": {
|
|
"labels": ["triage", "cleanup", "organize", "sort", "categorize"],
|
|
"title": ["triage", "cleanup", "organize", "sort", "categorize", "clean up"],
|
|
},
|
|
"fix": {
|
|
"labels": ["bug", "fix", "error", "broken"],
|
|
"title": ["fix", "bug", "error", "broken", "repair", "correct"],
|
|
},
|
|
"test": {
|
|
"labels": ["test", "testing", "coverage", "pytest"],
|
|
"title": ["test", "coverage", "pytest", "unit test", "integration test"],
|
|
},
|
|
"docs": {
|
|
"labels": ["docs", "documentation", "readme", "docstring"],
|
|
"title": ["doc", "readme", "comment", "guide", "tutorial"],
|
|
},
|
|
"refactor": {
|
|
"labels": ["refactor", "cleanup", "debt", "maintainability"],
|
|
"title": ["refactor", "cleanup", "simplify", "extract", "reorganize"],
|
|
},
|
|
}
|
|
|
|
|
|
def load_config() -> dict:
|
|
"""Load configuration from config file with fallback to defaults."""
|
|
config = DEFAULT_CONFIG.copy()
|
|
if CONFIG_PATH.exists():
|
|
try:
|
|
file_config = json.loads(CONFIG_PATH.read_text())
|
|
if "orchestrator" in file_config:
|
|
config.update(file_config["orchestrator"])
|
|
except (json.JSONDecodeError, OSError) as exc:
|
|
print(f"[golden_path] Warning: Could not load config: {exc}", file=sys.stderr)
|
|
|
|
# Environment variable overrides
|
|
if os.environ.get("TIMMY_GITEA_API"):
|
|
config["gitea_api"] = os.environ.get("TIMMY_GITEA_API")
|
|
if os.environ.get("TIMMY_REPO_SLUG"):
|
|
config["repo_slug"] = os.environ.get("TIMMY_REPO_SLUG")
|
|
if os.environ.get("TIMMY_GITEA_TOKEN"):
|
|
config["token"] = os.environ.get("TIMMY_GITEA_TOKEN")
|
|
|
|
return config
|
|
|
|
|
|
def get_token(config: dict) -> str | None:
|
|
"""Get Gitea token from environment or file."""
|
|
if "token" in config:
|
|
return config["token"]
|
|
|
|
token_file = Path(config["token_file"]).expanduser()
|
|
if token_file.exists():
|
|
return token_file.read_text().strip()
|
|
|
|
return None
|
|
|
|
|
|
# ── Gitea API Client ──────────────────────────────────────────────────────
|
|
|
|
|
|
class GiteaClient:
|
|
"""Simple Gitea API client with graceful degradation."""
|
|
|
|
def __init__(self, config: dict, token: str | None):
|
|
self.api_base = config["gitea_api"].rstrip("/")
|
|
self.repo_slug = config["repo_slug"]
|
|
self.token = token
|
|
self._available: bool | None = None
|
|
|
|
def _headers(self) -> dict:
|
|
headers = {"Accept": "application/json"}
|
|
if self.token:
|
|
headers["Authorization"] = f"token {self.token}"
|
|
return headers
|
|
|
|
def _api_url(self, path: str) -> str:
|
|
return f"{self.api_base}/repos/{self.repo_slug}/{path}"
|
|
|
|
def is_available(self) -> bool:
|
|
"""Check if Gitea API is reachable."""
|
|
if self._available is not None:
|
|
return self._available
|
|
|
|
try:
|
|
req = Request(
|
|
f"{self.api_base}/version",
|
|
headers=self._headers(),
|
|
method="GET",
|
|
)
|
|
with urlopen(req, timeout=5) as resp:
|
|
self._available = resp.status == 200
|
|
return self._available
|
|
except (HTTPError, URLError, TimeoutError):
|
|
self._available = False
|
|
return False
|
|
|
|
def get(self, path: str, params: dict | None = None) -> list | dict:
|
|
"""Make a GET request to the Gitea API."""
|
|
url = self._api_url(path)
|
|
if params:
|
|
query = "&".join(f"{k}={v}" for k, v in params.items())
|
|
url = f"{url}?{query}"
|
|
|
|
req = Request(url, headers=self._headers(), method="GET")
|
|
with urlopen(req, timeout=15) as resp:
|
|
return json.loads(resp.read())
|
|
|
|
def get_paginated(self, path: str, params: dict | None = None) -> list:
|
|
"""Fetch all pages of a paginated endpoint."""
|
|
all_items = []
|
|
page = 1
|
|
limit = 50
|
|
|
|
while True:
|
|
page_params = {"limit": limit, "page": page}
|
|
if params:
|
|
page_params.update(params)
|
|
|
|
batch = self.get(path, page_params)
|
|
if not batch:
|
|
break
|
|
|
|
all_items.extend(batch)
|
|
if len(batch) < limit:
|
|
break
|
|
page += 1
|
|
|
|
return all_items
|
|
|
|
|
|
# ── Issue Classification ──────────────────────────────────────────────────
|
|
|
|
|
|
def extract_size(labels: list[dict]) -> str:
|
|
"""Extract size label from issue labels."""
|
|
for label in labels:
|
|
name = label.get("name", "")
|
|
if name.startswith("size:"):
|
|
return name.replace("size:", "").upper()
|
|
return "?"
|
|
|
|
|
|
def classify_issue_type(issue: dict) -> str:
|
|
"""Classify an issue into a type based on labels and title."""
|
|
labels = [l.get("name", "").lower() for l in issue.get("labels", [])]
|
|
title = issue.get("title", "").lower()
|
|
|
|
scores: dict[str, int] = {}
|
|
|
|
for issue_type, patterns in TYPE_PATTERNS.items():
|
|
score = 0
|
|
# Check labels
|
|
for pattern in patterns["labels"]:
|
|
if any(pattern in label for label in labels):
|
|
score += 2
|
|
# Check title
|
|
for pattern in patterns["title"]:
|
|
if pattern in title:
|
|
score += 1
|
|
scores[issue_type] = score
|
|
|
|
# Return the type with highest score, or "fix" as default
|
|
if scores:
|
|
best_type = max(scores, key=lambda k: scores[k])
|
|
if scores[best_type] > 0:
|
|
return best_type
|
|
|
|
return "fix" # Default to fix for uncategorized issues
|
|
|
|
|
|
def estimate_time(issue: dict) -> int:
|
|
"""Estimate time in minutes for an issue based on size and type."""
|
|
size = extract_size(issue.get("labels", []))
|
|
issue_type = classify_issue_type(issue)
|
|
|
|
# Default to fix time estimates if type not found
|
|
type_map = issue_type if issue_type in TIME_ESTIMATES.get(size, {}) else "fix"
|
|
|
|
return TIME_ESTIMATES.get(size, TIME_ESTIMATES["S"]).get(type_map, 15)
|
|
|
|
|
|
def score_issue_for_path(issue: dict) -> int:
|
|
"""Score an issue for Golden Path suitability (higher = better fit)."""
|
|
score = 0
|
|
labels = [l.get("name", "").lower() for l in issue.get("labels", [])]
|
|
issue_type = classify_issue_type(issue)
|
|
|
|
# Prefer smaller sizes for predictability
|
|
if "size:xs" in labels:
|
|
score += 10
|
|
elif "size:s" in labels:
|
|
score += 7
|
|
elif "size:m" in labels:
|
|
score += 3
|
|
|
|
# Prefer issues with clear type labels
|
|
if issue_type in ["triage", "test", "fix"]:
|
|
score += 3
|
|
|
|
# Prefer issues with acceptance criteria or good description
|
|
body = issue.get("body", "")
|
|
if body:
|
|
if "## acceptance criteria" in body.lower() or "acceptance criteria" in body.lower():
|
|
score += 3
|
|
if len(body) > 200:
|
|
score += 1
|
|
|
|
# Prefer issues with recent activity
|
|
updated_at = issue.get("updated_at", "")
|
|
if updated_at:
|
|
try:
|
|
updated = datetime.fromisoformat(updated_at.replace("Z", "+00:00"))
|
|
days_old = (datetime.now(timezone.utc) - updated).days
|
|
if days_old < 7:
|
|
score += 2
|
|
elif days_old < 30:
|
|
score += 1
|
|
except (ValueError, TypeError):
|
|
pass
|
|
|
|
return score
|
|
|
|
|
|
# ── Golden Path Generation ────────────────────────────────────────────────
|
|
|
|
|
|
@dataclass
|
|
class PathItem:
|
|
"""A single item in a Golden Path."""
|
|
|
|
number: int
|
|
title: str
|
|
size: str
|
|
issue_type: str
|
|
estimated_minutes: int
|
|
url: str
|
|
|
|
def to_dict(self) -> dict:
|
|
return {
|
|
"number": self.number,
|
|
"title": self.title,
|
|
"size": self.size,
|
|
"type": self.issue_type,
|
|
"estimated_minutes": self.estimated_minutes,
|
|
"url": self.url,
|
|
}
|
|
|
|
|
|
@dataclass
|
|
class GoldenPath:
|
|
"""A complete Golden Path sequence."""
|
|
|
|
generated_at: str
|
|
target_minutes: int
|
|
items: list[PathItem] = field(default_factory=list)
|
|
|
|
@property
|
|
def total_estimated_minutes(self) -> int:
|
|
return sum(item.estimated_minutes for item in self.items)
|
|
|
|
@property
|
|
def item_count(self) -> int:
|
|
return len(self.items)
|
|
|
|
def to_dict(self) -> dict:
|
|
return {
|
|
"generated_at": self.generated_at,
|
|
"target_minutes": self.target_minutes,
|
|
"total_estimated_minutes": self.total_estimated_minutes,
|
|
"item_count": self.item_count,
|
|
"items": [item.to_dict() for item in self.items],
|
|
}
|
|
|
|
def to_json(self, indent: int = 2) -> str:
|
|
return json.dumps(self.to_dict(), indent=indent)
|
|
|
|
|
|
def fetch_eligible_issues(client: GiteaClient, config: dict) -> list[dict]:
|
|
"""Fetch open issues eligible for Golden Paths."""
|
|
size_labels = config.get("size_labels", ["size:XS", "size:S", "size:M"])
|
|
|
|
try:
|
|
# Fetch all open issues
|
|
issues = client.get_paginated("issues", {"state": "open", "sort": "updated"})
|
|
except (HTTPError, URLError) as exc:
|
|
print(f"[golden_path] Warning: Failed to fetch issues: {exc}", file=sys.stderr)
|
|
return []
|
|
|
|
# Filter by size labels if specified
|
|
if size_labels:
|
|
filtered = []
|
|
size_names = {s.lower() for s in size_labels}
|
|
for issue in issues:
|
|
issue_labels = {l.get("name", "").lower() for l in issue.get("labels", [])}
|
|
if issue_labels & size_names:
|
|
filtered.append(issue)
|
|
issues = filtered
|
|
|
|
return issues
|
|
|
|
|
|
def group_issues_by_type(issues: list[dict]) -> dict[str, list[dict]]:
|
|
"""Group issues by their classified type, sorted by score."""
|
|
groups: dict[str, list[dict]] = {
|
|
"triage": [],
|
|
"fix": [],
|
|
"test": [],
|
|
"docs": [],
|
|
"refactor": [],
|
|
}
|
|
|
|
for issue in issues:
|
|
issue_type = classify_issue_type(issue)
|
|
if issue_type in groups:
|
|
groups[issue_type].append(issue)
|
|
|
|
# Sort each group by score (highest first)
|
|
for issue_type in groups:
|
|
groups[issue_type] = sorted(
|
|
groups[issue_type],
|
|
key=lambda i: score_issue_for_path(i),
|
|
reverse=True,
|
|
)
|
|
|
|
return groups
|
|
|
|
|
|
def build_golden_path(
|
|
grouped_issues: dict[str, list[dict]],
|
|
target_minutes: int = 45,
|
|
) -> GoldenPath:
|
|
"""Build a Golden Path from grouped issues.
|
|
|
|
The path follows a coherent sequence:
|
|
1. One small triage cleanup (warm-up)
|
|
2. One micro-fix (momentum building)
|
|
3. One test-improvement (quality focus)
|
|
4. One more micro-fix or docs (closure)
|
|
"""
|
|
path = GoldenPath(
|
|
generated_at=datetime.now(timezone.utc).isoformat(),
|
|
target_minutes=target_minutes,
|
|
)
|
|
|
|
used_issue_numbers: set[int] = set()
|
|
|
|
def add_best_item(issues: list[dict], max_minutes: int | None = None) -> bool:
|
|
"""Add the best available issue of a type to the path."""
|
|
for issue in issues:
|
|
number = issue.get("number", 0)
|
|
if number in used_issue_numbers:
|
|
continue
|
|
|
|
est_time = estimate_time(issue)
|
|
if max_minutes and est_time > max_minutes:
|
|
continue
|
|
|
|
used_issue_numbers.add(number)
|
|
path.items.append(
|
|
PathItem(
|
|
number=number,
|
|
title=issue.get("title", "Untitled"),
|
|
size=extract_size(issue.get("labels", [])),
|
|
issue_type=classify_issue_type(issue),
|
|
estimated_minutes=est_time,
|
|
url=issue.get("html_url", ""),
|
|
)
|
|
)
|
|
return True
|
|
return False
|
|
|
|
# Phase 1: Warm-up with triage (5-10 min)
|
|
if grouped_issues["triage"]:
|
|
add_best_item(grouped_issues["triage"], max_minutes=15)
|
|
else:
|
|
# Fallback: use smallest available issue
|
|
all_issues = (
|
|
grouped_issues["fix"]
|
|
+ grouped_issues["docs"]
|
|
+ grouped_issues["refactor"]
|
|
)
|
|
all_issues.sort(key=lambda i: score_issue_for_path(i), reverse=True)
|
|
add_best_item(all_issues, max_minutes=10)
|
|
|
|
# Phase 2: First micro-fix (10-15 min)
|
|
if grouped_issues["fix"]:
|
|
add_best_item(grouped_issues["fix"], max_minutes=20)
|
|
else:
|
|
# Fallback to refactor
|
|
add_best_item(grouped_issues["refactor"], max_minutes=15)
|
|
|
|
# Phase 3: Test improvement (10-15 min)
|
|
if grouped_issues["test"]:
|
|
add_best_item(grouped_issues["test"], max_minutes=20)
|
|
else:
|
|
# If no test issues, add another fix
|
|
add_best_item(grouped_issues["fix"], max_minutes=15)
|
|
|
|
# Phase 4: Closure fix or docs (10-15 min)
|
|
# Try to fill remaining time
|
|
remaining_budget = target_minutes - path.total_estimated_minutes
|
|
if remaining_budget >= 10:
|
|
# Prefer fix, then docs
|
|
if not add_best_item(grouped_issues["fix"], max_minutes=remaining_budget):
|
|
if not add_best_item(grouped_issues["docs"], max_minutes=remaining_budget):
|
|
add_best_item(grouped_issues["refactor"], max_minutes=remaining_budget)
|
|
|
|
return path
|
|
|
|
|
|
def generate_golden_path(
|
|
target_minutes: int = 45,
|
|
config: dict | None = None,
|
|
) -> GoldenPath:
|
|
"""Generate a Golden Path for the specified time budget.
|
|
|
|
Args:
|
|
target_minutes: Target session length (30-60 recommended)
|
|
config: Optional config override
|
|
|
|
Returns:
|
|
A GoldenPath with ordered items from real Gitea issues
|
|
"""
|
|
cfg = config or load_config()
|
|
token = get_token(cfg)
|
|
client = GiteaClient(cfg, token)
|
|
|
|
if not client.is_available():
|
|
# Return empty path with error indication
|
|
return GoldenPath(
|
|
generated_at=datetime.now(timezone.utc).isoformat(),
|
|
target_minutes=target_minutes,
|
|
items=[],
|
|
)
|
|
|
|
issues = fetch_eligible_issues(client, cfg)
|
|
grouped = group_issues_by_type(issues)
|
|
return build_golden_path(grouped, target_minutes)
|
|
|
|
|
|
# ── Output Formatting ─────────────────────────────────────────────────────
|
|
|
|
|
|
def print_golden_path(path: GoldenPath) -> None:
|
|
"""Print a formatted Golden Path to stdout."""
|
|
print("=" * 60)
|
|
print("🌟 GOLDEN PATH")
|
|
print("=" * 60)
|
|
print(f"Generated: {path.generated_at}")
|
|
print(f"Target: {path.target_minutes} minutes")
|
|
print(f"Estimated: {path.total_estimated_minutes} minutes")
|
|
print()
|
|
|
|
if not path.items:
|
|
print("No eligible issues found for a Golden Path.")
|
|
print()
|
|
print("To create Golden Paths, ensure issues have:")
|
|
print(" - Size labels: size:XS, size:S, or size:M")
|
|
print(" - Type labels: bug, test, triage, docs, refactor")
|
|
print()
|
|
return
|
|
|
|
for i, item in enumerate(path.items, 1):
|
|
type_emoji = {
|
|
"triage": "🧹",
|
|
"fix": "🔧",
|
|
"test": "🧪",
|
|
"docs": "📚",
|
|
"refactor": "♻️",
|
|
}.get(item.issue_type, "📋")
|
|
|
|
print(f"{i}. {type_emoji} #{item.number} [{item.size}] ({item.estimated_minutes}m)")
|
|
print(f" Title: {item.title}")
|
|
print(f" Type: {item.issue_type.upper()}")
|
|
if item.url:
|
|
print(f" URL: {item.url}")
|
|
print()
|
|
|
|
print("-" * 60)
|
|
print("Instructions:")
|
|
print(" 1. Start with the triage item to warm up")
|
|
print(" 2. Progress through fixes to build momentum")
|
|
print(" 3. Use the test item for quality focus")
|
|
print(" 4. Check off items as you complete them")
|
|
print()
|
|
|
|
|
|
# ── CLI ───────────────────────────────────────────────────────────────────
|
|
|
|
|
|
def parse_args() -> argparse.Namespace:
|
|
p = argparse.ArgumentParser(
|
|
description="Golden Path generator — coherent 30-60 minute mini-sessions",
|
|
)
|
|
p.add_argument(
|
|
"--minutes",
|
|
"-m",
|
|
type=int,
|
|
default=45,
|
|
help="Target session length in minutes (default: 45)",
|
|
)
|
|
p.add_argument(
|
|
"--json",
|
|
"-j",
|
|
action="store_true",
|
|
help="Output as JSON instead of formatted text",
|
|
)
|
|
return p.parse_args()
|
|
|
|
|
|
def main() -> int:
|
|
args = parse_args()
|
|
|
|
# Validate target minutes
|
|
target = max(30, min(60, args.minutes))
|
|
if target != args.minutes:
|
|
print(
|
|
f"[golden_path] Warning: Clamped {args.minutes}m to {target}m range",
|
|
file=sys.stderr,
|
|
)
|
|
|
|
path = generate_golden_path(target_minutes=target)
|
|
|
|
if args.json:
|
|
print(path.to_json())
|
|
else:
|
|
print_golden_path(path)
|
|
|
|
return 0 if path.items else 1
|
|
|
|
|
|
if __name__ == "__main__":
|
|
sys.exit(main())
|