Compare commits

..

1 Commits

Author SHA1 Message Date
Alexander Payne
2f57c2b653 feat(10.8): add Progress Tracker — track test/doc coverage, issue close rate, dep freshness
Some checks failed
Test / pytest (pull_request) Failing after 8s
Implements 10.8: Progress Tracker.

- Script: scripts/progress_tracker.py
- Stores weekly snapshots in metrics/snapshots/ (auto-created)
- Generates trends table in metrics/TRENDS.md
- Tracks: test-to-source ratio, doc coverage via AST, issue close rate (Gitea API), dep freshness (pip)
- Outputs stdout summary for weekly review

Usage:
    python3 scripts/progress_tracker.py
    python3 scripts/progress_tracker.py --json
    python3 scripts/progress_tracker.py --output metrics/TRENDS.md

Weekly cron: 0 9 * * 1 cd /path && python3 scripts/progress_tracker.py

Closes #173
2026-04-26 05:15:01 -04:00
5 changed files with 477 additions and 979 deletions

View File

@@ -1,351 +0,0 @@
#!/usr/bin/env python3
"""
PR Complexity Scorer - Estimate review effort for PRs.
"""
import argparse
import json
import os
import re
import sys
from dataclasses import dataclass, asdict
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Dict, List, Optional
import urllib.request
import urllib.error
GITEA_BASE = "https://forge.alexanderwhitestone.com/api/v1"
DEPENDENCY_FILES = {
"requirements.txt", "pyproject.toml", "setup.py", "setup.cfg",
"Pipfile", "poetry.lock", "package.json", "yarn.lock", "Gemfile",
"go.mod", "Cargo.toml", "pom.xml", "build.gradle"
}
TEST_PATTERNS = [
r"tests?/.*\.py$", r".*_test\.py$", r"test_.*\.py$",
r"spec/.*\.rb$", r".*_spec\.rb$",
r"__tests__/", r".*\.test\.(js|ts|jsx|tsx)$"
]
WEIGHT_FILES = 0.25
WEIGHT_LINES = 0.25
WEIGHT_DEPS = 0.30
WEIGHT_TEST_COV = 0.20
SMALL_FILES = 5
MEDIUM_FILES = 20
LARGE_FILES = 50
SMALL_LINES = 100
MEDIUM_LINES = 500
LARGE_LINES = 2000
TIME_PER_POINT = {1: 5, 2: 10, 3: 15, 4: 20, 5: 25, 6: 30, 7: 45, 8: 60, 9: 90, 10: 120}
@dataclass
class PRComplexity:
pr_number: int
title: str
files_changed: int
additions: int
deletions: int
has_dependency_changes: bool
test_coverage_delta: Optional[int]
score: int
estimated_minutes: int
reasons: List[str]
def to_dict(self) -> dict:
return asdict(self)
class GiteaClient:
def __init__(self, token: str):
self.token = token
self.base_url = GITEA_BASE.rstrip("/")
def _request(self, path: str, params: Dict = None) -> Any:
url = f"{self.base_url}{path}"
if params:
qs = "&".join(f"{k}={v}" for k, v in params.items() if v is not None)
url += f"?{qs}"
req = urllib.request.Request(url)
req.add_header("Authorization", f"token {self.token}")
req.add_header("Content-Type", "application/json")
try:
with urllib.request.urlopen(req, timeout=30) as resp:
return json.loads(resp.read().decode())
except urllib.error.HTTPError as e:
print(f"API error {e.code}: {e.read().decode()[:200]}", file=sys.stderr)
return None
except urllib.error.URLError as e:
print(f"Network error: {e}", file=sys.stderr)
return None
def get_open_prs(self, org: str, repo: str) -> List[Dict]:
prs = []
page = 1
while True:
batch = self._request(f"/repos/{org}/{repo}/pulls", {"limit": 50, "page": page, "state": "open"})
if not batch:
break
prs.extend(batch)
if len(batch) < 50:
break
page += 1
return prs
def get_pr_files(self, org: str, repo: str, pr_number: int) -> List[Dict]:
files = []
page = 1
while True:
batch = self._request(
f"/repos/{org}/{repo}/pulls/{pr_number}/files",
{"limit": 100, "page": page}
)
if not batch:
break
files.extend(batch)
if len(batch) < 100:
break
page += 1
return files
def post_comment(self, org: str, repo: str, pr_number: int, body: str) -> bool:
data = json.dumps({"body": body}).encode("utf-8")
req = urllib.request.Request(
f"{self.base_url}/repos/{org}/{repo}/issues/{pr_number}/comments",
data=data,
method="POST",
headers={"Authorization": f"token {self.token}", "Content-Type": "application/json"}
)
try:
with urllib.request.urlopen(req, timeout=30) as resp:
return resp.status in (200, 201)
except urllib.error.HTTPError:
return False
def is_dependency_file(filename: str) -> bool:
return any(filename.endswith(dep) for dep in DEPENDENCY_FILES)
def is_test_file(filename: str) -> bool:
return any(re.search(pattern, filename) for pattern in TEST_PATTERNS)
def score_pr(
files_changed: int,
additions: int,
deletions: int,
has_dependency_changes: bool,
test_coverage_delta: Optional[int] = None
) -> tuple[int, int, List[str]]:
score = 1.0
reasons = []
# Files changed
if files_changed <= SMALL_FILES:
fscore = 1.0
reasons.append("small number of files changed")
elif files_changed <= MEDIUM_FILES:
fscore = 2.0
reasons.append("moderate number of files changed")
elif files_changed <= LARGE_FILES:
fscore = 2.5
reasons.append("large number of files changed")
else:
fscore = 3.0
reasons.append("very large PR spanning many files")
# Lines changed
total_lines = additions + deletions
if total_lines <= SMALL_LINES:
lscore = 1.0
reasons.append("small change size")
elif total_lines <= MEDIUM_LINES:
lscore = 2.0
reasons.append("moderate change size")
elif total_lines <= LARGE_LINES:
lscore = 3.0
reasons.append("large change size")
else:
lscore = 4.0
reasons.append("very large change")
# Dependency changes
if has_dependency_changes:
dscore = 2.5
reasons.append("dependency changes (architectural impact)")
else:
dscore = 0.0
# Test coverage delta
tscore = 0.0
if test_coverage_delta is not None:
if test_coverage_delta > 0:
reasons.append(f"test additions (+{test_coverage_delta} test files)")
tscore = -min(2.0, test_coverage_delta / 2.0)
elif test_coverage_delta < 0:
reasons.append(f"test removals ({abs(test_coverage_delta)} test files)")
tscore = min(2.0, abs(test_coverage_delta) * 0.5)
else:
reasons.append("test coverage change not assessed")
# Weighted sum, scaled by 3 to use full 1-10 range
bonus = (fscore * WEIGHT_FILES) + (lscore * WEIGHT_LINES) + (dscore * WEIGHT_DEPS) + (tscore * WEIGHT_TEST_COV)
scaled_bonus = bonus * 3.0
score = 1.0 + scaled_bonus
final_score = max(1, min(10, int(round(score))))
est_minutes = TIME_PER_POINT.get(final_score, 30)
return final_score, est_minutes, reasons
def analyze_pr(client: GiteaClient, org: str, repo: str, pr_data: Dict) -> PRComplexity:
pr_num = pr_data["number"]
title = pr_data.get("title", "")
files = client.get_pr_files(org, repo, pr_num)
additions = sum(f.get("additions", 0) for f in files)
deletions = sum(f.get("deletions", 0) for f in files)
filenames = [f.get("filename", "") for f in files]
has_deps = any(is_dependency_file(f) for f in filenames)
test_added = sum(1 for f in files if f.get("status") == "added" and is_test_file(f.get("filename", "")))
test_removed = sum(1 for f in files if f.get("status") == "removed" and is_test_file(f.get("filename", "")))
test_delta = test_added - test_removed if (test_added or test_removed) else None
score, est_min, reasons = score_pr(
files_changed=len(files),
additions=additions,
deletions=deletions,
has_dependency_changes=has_deps,
test_coverage_delta=test_delta
)
return PRComplexity(
pr_number=pr_num,
title=title,
files_changed=len(files),
additions=additions,
deletions=deletions,
has_dependency_changes=has_deps,
test_coverage_delta=test_delta,
score=score,
estimated_minutes=est_min,
reasons=reasons
)
def build_comment(complexity: PRComplexity) -> str:
change_desc = f"{complexity.files_changed} files, +{complexity.additions}/-{complexity.deletions} lines"
deps_note = "\n- :warning: Dependency changes detected — architectural review recommended" if complexity.has_dependency_changes else ""
test_note = ""
if complexity.test_coverage_delta is not None:
if complexity.test_coverage_delta > 0:
test_note = f"\n- :+1: {complexity.test_coverage_delta} test file(s) added"
elif complexity.test_coverage_delta < 0:
test_note = f"\n- :warning: {abs(complexity.test_coverage_delta)} test file(s) removed"
comment = f"## 📊 PR Complexity Analysis\n\n"
comment += f"**PR #{complexity.pr_number}: {complexity.title}**\n\n"
comment += f"| Metric | Value |\n|--------|-------|\n"
comment += f"| Changes | {change_desc} |\n"
comment += f"| Complexity Score | **{complexity.score}/10** |\n"
comment += f"| Estimated Review Time | ~{complexity.estimated_minutes} minutes |\n\n"
comment += f"### Scoring rationale:"
for r in complexity.reasons:
comment += f"\n- {r}"
if deps_note:
comment += deps_note
if test_note:
comment += test_note
comment += f"\n\n---\n"
comment += f"*Generated by PR Complexity Scorer — [issue #135](https://forge.alexanderwhitestone.com/Timmy_Foundation/compounding-intelligence/issues/135)*"
return comment
def main():
parser = argparse.ArgumentParser(description="PR Complexity Scorer")
parser.add_argument("--org", default="Timmy_Foundation")
parser.add_argument("--repo", default="compounding-intelligence")
parser.add_argument("--token", default=os.environ.get("GITEA_TOKEN") or os.path.expanduser("~/.config/gitea/token"))
parser.add_argument("--dry-run", action="store_true")
parser.add_argument("--apply", action="store_true")
parser.add_argument("--output", default="metrics/pr_complexity.json")
args = parser.parse_args()
token_path = args.token
if os.path.exists(token_path):
with open(token_path) as f:
token = f.read().strip()
else:
token = args.token
if not token:
print("ERROR: No Gitea token provided", file=sys.stderr)
sys.exit(1)
client = GiteaClient(token)
print(f"Fetching open PRs for {args.org}/{args.repo}...")
prs = client.get_open_prs(args.org, args.repo)
if not prs:
print("No open PRs found.")
sys.exit(0)
print(f"Found {len(prs)} open PR(s). Analyzing...")
results = []
Path(args.output).parent.mkdir(parents=True, exist_ok=True)
for pr in prs:
pr_num = pr["number"]
title = pr.get("title", "")
print(f" Analyzing PR #{pr_num}: {title[:60]}")
try:
complexity = analyze_pr(client, args.org, args.repo, pr)
results.append(complexity.to_dict())
comment = build_comment(complexity)
if args.dry_run:
print(f" → Score: {complexity.score}/10, Est: {complexity.estimated_minutes}min [DRY-RUN]")
elif args.apply:
success = client.post_comment(args.org, args.repo, pr_num, comment)
status = "[commented]" if success else "[FAILED]"
print(f" → Score: {complexity.score}/10, Est: {complexity.estimated_minutes}min {status}")
else:
print(f" → Score: {complexity.score}/10, Est: {complexity.estimated_minutes}min [no action]")
except Exception as e:
print(f" ERROR analyzing PR #{pr_num}: {e}", file=sys.stderr)
with open(args.output, "w") as f:
json.dump({
"org": args.org,
"repo": args.repo,
"timestamp": datetime.now(timezone.utc).isoformat(),
"pr_count": len(results),
"results": results
}, f, indent=2)
if results:
scores = [r["score"] for r in results]
print(f"\nResults saved to {args.output}")
print(f"Summary: {len(results)} PRs, scores range {min(scores):.0f}-{max(scores):.0f}")
else:
print("\nNo results to save.")
if __name__ == "__main__":
main()

477
scripts/progress_tracker.py Normal file
View File

@@ -0,0 +1,477 @@
#!/usr/bin/env python3
"""
Progress Tracker — Pipeline 10.8
Track improvement metrics over time. Are we getting better?
Metrics tracked:
1. Test coverage — % of Python functions with associated tests (test:source file ratio + line coverage if available)
2. Doc coverage — % of Python callables with docstrings (AST-based)
3. Issue close rate — closed / (opened + closed) per week (Gitea API)
4. Dep freshness — % of requirements pinned vs outdated (pip list --outdated)
Output:
- metrics/snapshots/YYYY-MM-DD.json — one snapshot per run
- metrics/TRENDS.md — cumulative markdown table
- stdout summary
Usage:
python3 scripts/progress_tracker.py
python3 scripts/progress_tracker.py --json
python3 scripts/progress_tracker.py --output metrics/TRENDS.md
Weekly cron:
0 9 * * 1 cd /path/to/compounding-intelligence && python3 scripts/progress_tracker.py
"""
import argparse
import json
import os
import re
import subprocess
import sys
from collections import defaultdict
from datetime import datetime, timezone, timedelta
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
# ── Configuration ──────────────────────────────────────────────────────────
SCRIPT_DIR = Path(__file__).resolve().parent
REPO_ROOT = SCRIPT_DIR.parent
METRICS_DIR = REPO_ROOT / "metrics"
SNAPSHOTS_DIR = METRICS_DIR / "snapshots"
TOKEN_PATH = Path.home() / ".config" / "gitea" / "token"
GITEA_API_BASE = "https://forge.alexanderwhitestone.com/api/v1"
ORG = "Timmy_Foundation"
# Ensure paths exist
SNAPSHOTS_DIR.mkdir(parents=True, exist_ok=True)
# ── Helpers ─────────────────────────────────────────────────────────────────
def run_cmd(cmd: List[str], cwd: Path = REPO_ROOT) -> str:
"""Run a shell command and return stdout (stderr merged)."""
result = subprocess.run(
cmd, capture_output=True, text=True, cwd=cwd, timeout=30
)
if result.returncode != 0:
return ""
return result.stdout.strip()
def slugify_date(dt: datetime) -> str:
return dt.strftime("%Y-%m-%d")
def snapshot_path(dt: datetime) -> Path:
return SNAPSHOTS_DIR / f"{slugify_date(dt)}.json"
def load_snapshots() -> List[Dict[str, Any]]:
"""Load all existing snapshots sorted by date."""
snapshots = []
for f in sorted(SNAPSHOTS_DIR.glob("*.json")):
try:
with open(f) as fp:
snapshots.append(json.load(fp))
except Exception:
continue
return snapshots
# ── Metric 1: Test Coverage ─────────────────────────────────────────────────
def collect_test_coverage() -> Dict[str, Any]:
"""
Compute test coverage metrics.
Counts test_*.py and *_test.py files vs non-test .py source files.
Also attempts to read .coverage if present.
"""
all_py = list(REPO_ROOT.rglob("*.py"))
source_files = []
test_files = []
for p in all_py:
try:
rel_parts = p.relative_to(REPO_ROOT).parts
except ValueError:
continue
# Skip hidden/cache/temp dirs (check only relative parts)
if any(part.startswith('.') or part.startswith('__') for part in rel_parts):
continue
if any(part in ('node_modules', 'venv', '.venv', 'env', '.pytest_cache') for part in rel_parts):
continue
if p.name.startswith("test_") or p.name.endswith("_test.py"):
test_files.append(p)
else:
source_files.append(p)
# Try to get line coverage from .coverage
coverage_percent = None
coverage_tool = None
coverage_file = REPO_ROOT / ".coverage"
if coverage_file.exists():
try:
import coverage # type: ignore
# Use coverage API if available
cov = coverage.Coverage(data_file=str(coverage_file))
cov.load()
total = cov.report()
coverage_percent = total if isinstance(total, float) else None
coverage_tool = "coverage"
except Exception:
# Fallback: parse `coverage report` output
out = run_cmd(["coverage", "report", "--skip-empty"])
if out:
for line in out.splitlines():
if "TOTAL" in line:
parts = line.split()
if len(parts) >= 2:
try:
coverage_percent = float(parts[-1].rstrip('%'))
coverage_tool = "coverage"
break
except ValueError:
pass
return {
"test_files": len(test_files),
"source_files": len(source_files),
"test_to_source_ratio": round(len(test_files) / len(source_files), 4) if source_files else 0.0,
"coverage_tool": coverage_tool,
"coverage_percent": coverage_percent,
}
# ── Metric 2: Doc Coverage ──────────────────────────────────────────────────
def collect_doc_coverage() -> Dict[str, Any]:
"""
Check AST of Python files for docstrings.
Returns: callables_total, callables_with_doc, doc_coverage_percent
"""
import ast
all_py = list(REPO_ROOT.rglob("*.py"))
source_files = []
test_files = []
for p in all_py:
try:
rel_parts = p.relative_to(REPO_ROOT).parts
except ValueError:
continue
if any(part.startswith('.') or part.startswith('__') for part in rel_parts):
continue
if any(part in ('node_modules', 'venv', '.venv', 'env', '.pytest_cache') for part in rel_parts):
continue
if p.name.startswith("test_") or p.name.endswith("_test.py"):
test_files.append(p)
else:
source_files.append(p)
total_callables = 0
with_doc = 0
for p in source_files + test_files:
try:
with open(p) as f:
tree = ast.parse(f.read(), filename=str(p))
for node in ast.walk(tree):
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
total_callables += 1
doc = ast.get_docstring(node)
if doc and doc.strip():
with_doc += 1
except Exception:
continue
return {
"callables_total": total_callables,
"callables_with_doc": with_doc,
"doc_coverage_percent": round((with_doc / total_callables * 100) if total_callables else 0.0, 2),
}
# ── Metric 3: Issue Close Rate ──────────────────────────────────────────────
def collect_issue_metrics() -> Dict[str, Any]:
"""
Use Gitea API to get issue open/close stats for the last 7 days.
Returns counts and close rate.
"""
token = ""
if TOKEN_PATH.exists():
token = TOKEN_PATH.read_text().strip()
if not token:
return {
"opened_last_7d": None,
"closed_last_7d": None,
"close_rate": None,
"total_open": None,
"note": "Gitea token not available"
}
try:
from urllib.request import Request, urlopen
from urllib.error import HTTPError, URLError
except ImportError:
return {"error": "urllib not available"}
now = datetime.now(timezone.utc)
week_ago = now - timedelta(days=7)
since = week_ago.strftime("%Y-%m-%d")
headers = {"Authorization": f"token {token}"}
base_url = f"{GITEA_API_BASE}/repos/{ORG}/compounding-intelligence/issues"
try:
# Get issues from last 7 days
url = f"{base_url}?state=all&since={since}&per_page=100"
req = Request(url, headers=headers)
with urlopen(req, timeout=15) as resp:
issues = json.loads(resp.read())
opened = 0
closed = 0
for issue in issues:
created = datetime.fromisoformat(issue["created_at"].replace("Z", "+00:00"))
if created >= week_ago:
opened += 1
if issue.get("state") == "closed":
closed_at_str = issue.get("closed_at")
if closed_at_str:
closed_at = datetime.fromisoformat(closed_at_str.replace("Z", "+00:00"))
if closed_at >= week_ago:
closed += 1
# Total open issues
req2 = Request(f"{base_url}?state=open&per_page=1", headers=headers)
with urlopen(req2, timeout=15) as resp:
total_open = int(resp.headers.get("X-Total-Count", "0"))
total = opened + closed
close_rate = closed / total if total > 0 else 0.0
return {
"opened_last_7d": opened,
"closed_last_7d": closed,
"close_rate": round(close_rate, 4),
"total_open": total_open,
}
except Exception as e:
return {
"opened_last_7d": None,
"closed_last_7d": None,
"close_rate": None,
"total_open": None,
"error": str(e)[:100],
"note": "Gitea API unavailable"
}
# ── Metric 4: Dependency Freshness ─────────────────────────────────────────
def collect_dep_freshness() -> Dict[str, Any]:
"""
Check requirements.txt for outdated dependencies using pip list --outdated.
Returns freshness percentage and outdated list.
"""
req_file = REPO_ROOT / "requirements.txt"
if not req_file.exists():
return {
"total_deps": 0,
"outdated_deps": 0,
"freshness_percent": 100.0,
"outdated_list": [],
"note": "requirements.txt not found"
}
# Parse requirements (very simple: take name before comparison op)
reqs = []
with open(req_file) as f:
for line in f:
line = line.strip()
if not line or line.startswith("#"):
continue
m = re.match(r"^([a-zA-Z0-9_.-]+)", line)
if m:
reqs.append(m.group(1))
if not reqs:
return {"total_deps": 0, "outdated_deps": 0, "freshness_percent": 100.0, "outdated_list": []}
# Query pip for outdated packages (may fail if pip not available)
outdated_names = set()
try:
out = run_cmd(["pip", "list", "--outdated", "--format=json"])
if out:
data = json.loads(out)
outdated_names = {item["name"].lower() for item in data}
except Exception:
pass
outdated = [p for p in reqs if p.lower() in outdated_names]
total = len(reqs)
outdated_count = len(outdated)
freshness = round(((total - outdated_count) / total * 100) if total else 100.0, 1)
return {
"total_deps": total,
"outdated_deps": outdated_count,
"freshness_percent": freshness,
"outdated_list": outdated,
}
# ── Snapshot & Trends ───────────────────────────────────────────────────────
def take_snapshot() -> Dict[str, Any]:
"""Collect all metrics and return a snapshot dict."""
now = datetime.now(timezone.utc)
test_cov = collect_test_coverage()
doc_cov = collect_doc_coverage()
issues = collect_issue_metrics()
deps = collect_dep_freshness()
return {
"timestamp": now.isoformat(),
"date": slugify_date(now),
"metrics": {
"test_coverage": test_cov,
"doc_coverage": doc_cov,
"issues": issues,
"dependencies": deps,
}
}
def save_snapshot(snapshot: Dict[str, Any]) -> Path:
path = snapshot_path(datetime.fromisoformat(snapshot["timestamp"]))
with open(path, "w") as f:
json.dump(snapshot, f, indent=2)
return path
def generate_trends(snapshots: List[Dict[str, Any]], output_path: Optional[Path] = None) -> str:
"""Generate markdown trends table; optionally write to file."""
if not snapshots:
msg = "# Progress Tracker — Trends\n\nNo snapshots yet. Run `progress_tracker.py` to create the first snapshot."
if output_path:
output_path.parent.mkdir(parents=True, exist_ok=True)
output_path.write_text(msg)
return msg
lines = [
"# Progress Tracker — Trends",
f"\nLast updated: {datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M UTC')}",
f"\nSnapshots: {len(snapshots)}\n",
"| Date | Test Files → Source | Doc Coverage | Issues Closed/Opened (7d) | Dep Freshness |",
"|------|---------------------|--------------|---------------------------|---------------|",
]
for snap in reversed(snapshots): # chronological
date = snap["date"]
m = snap["metrics"]
tc = m["test_coverage"]
test_str = f"{tc['test_files']}/{tc['source_files']} ({tc['test_to_source_ratio']:.2f})"
doc_str = f"{m['doc_coverage']['doc_coverage_percent']:.1f}%"
issues_str = f"{m['issues'].get('closed_last_7d','-')}/{m['issues'].get('opened_last_7d','-')}"
dep_str = f"{m['dependencies'].get('freshness_percent','?')}%"
lines.append(f"| {date} | {test_str} | {doc_str} | {issues_str} | {dep_str} |")
# Current snapshot summary
cur = snapshots[-1]
cm = cur["metrics"]
lines.append(f"\n## Current Snapshot ({cur['date']})\n")
tc = cm["test_coverage"]
cov_line = f"- Test coverage: {tc['coverage_percent']:.1f}% (via {tc['coverage_tool']})\n" if tc["coverage_percent"] else "- Test coverage: (pytest-cov not configured)\n"
lines.append(cov_line)
lines.append(f"- Doc coverage: {cm['doc_coverage']['doc_coverage_percent']:.1f}%")
im = cm["issues"]
if im.get("close_rate") is not None:
lines.append(f"- Issue close rate (7d): {im['close_rate']*100:.1f}% ({im['closed_last_7d']} closed, {im['opened_last_7d']} opened)")
else:
lines.append(f"- Issue metrics: {im.get('note','unavailable')}")
dd = cm["dependencies"]
lines.append(f"- Dep freshness: {dd.get('freshness_percent','?')}% outdated ({dd.get('outdated_deps',0)}/{dd.get('total_deps',0)} deps)")
if dd.get('outdated_list'):
lines.append(f" Outdated: {', '.join(dd['outdated_list'][:5])}")
content = "\n".join(lines) + "\n"
if output_path:
output_path.parent.mkdir(parents=True, exist_ok=True)
output_path.write_text(content)
return content
# ── Main ─────────────────────────────────────────────────────────────────────
def main() -> int:
parser = argparse.ArgumentParser(description="Progress Tracker — 10.8")
parser.add_argument("--json", action="store_true", help="Emit snapshot as JSON only")
parser.add_argument("--output", type=Path, default=METRICS_DIR / "TRENDS.md",
help="Write trends markdown to this file")
args = parser.parse_args()
snapshot = take_snapshot()
all_snapshots = load_snapshots()
path_written = save_snapshot(snapshot)
if args.json:
print(json.dumps(snapshot, indent=2))
return 0
trends = generate_trends(all_snapshots + [snapshot], output_path=args.output)
# Print current snapshot summary
print(f"Snapshot saved: {path_written}\n")
print(f"Progress Tracker — {snapshot['date']}")
print("=" * 50)
m = snapshot["metrics"]
tc = m["test_coverage"]
print(f"Test files: {tc['test_files']} | Source files: {tc['source_files']} | Ratio: {tc['test_to_source_ratio']:.3f}")
if tc["coverage_percent"] is not None:
print(f"Line coverage: {tc['coverage_percent']:.1f}% (via {tc['coverage_tool']})")
else:
print("Line coverage: (not available — run `pytest --cov`)")
print()
dc = m["doc_coverage"]
print(f"Callables with docstrings: {dc['callables_with_doc']}/{dc['callables_total']} ({dc['doc_coverage_percent']:.1f}%)")
print()
im = m["issues"]
if im.get("close_rate") is not None:
print(f"Issues (7d): {im['closed_last_7d']} closed / {im['opened_last_7d']} opened → close rate: {im['close_rate']*100:.1f}%")
print(f"Total open: {im['total_open']}")
else:
print(f"Issues: {im.get('note','unavailable')}")
print()
dd = m["dependencies"]
print(f"Dependencies: {dd.get('total_deps',0)} total, {dd.get('outdated_deps',0)} outdated")
if dd.get('outdated_list'):
shown = dd['outdated_list'][:5]
print(f"Outdated: {', '.join(shown)}" + ("..." if len(dd['outdated_list']) > 5 else ""))
print(f"\nTrends written to: {args.output}")
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,357 +0,0 @@
#!/usr/bin/env python3
"""
Test Generation Orchestrator — 3.10 (Compounding Intelligence)
Implements a continuous pipeline that:
1. Maintains a queue of repositories to process.
2. Runs all 9 test generators per repository.
3. Stores results (tests written, pass rate, coverage delta).
4. After processing all repos, checks for new code changes and re-queues.
5. Runs continuously — never idle (loop with sleep).
Usage:
python3 scripts/test_generation_orchestrator.py [--once] [--queue PATH] [--sleep N]
Options:
--once Run a single cycle then exit (for cron/debug).
--queue FILE Path to queue file (default: test_queue.txt at repo root).
--sleep N Sleep seconds between cycles (default: 3600).
"""
import argparse
import json
import subprocess
import sys
import time
from dataclasses import dataclass, asdict
from datetime import datetime, timezone
from pathlib import Path
from typing import Dict, List, Optional
# ── Configuration ────────────────────────────────────────────────────────────
SCRIPT_DIR = Path(__file__).resolve().parent
REPO_ROOT = SCRIPT_DIR.parent
DEFAULT_QUEUE = REPO_ROOT / "test_queue.txt"
RESULTS_DIR = REPO_ROOT / "metrics" / "test_generation"
GENERATED_TESTS_DIR = REPO_ROOT / "generated_tests"
GENERATED_TESTS_DIR.mkdir(exist_ok=True)
RESULTS_DIR.mkdir(parents=True, exist_ok=True)
# Nine test generator names (registered below)
GENERATOR_NAMES = [
"regression",
"gap",
"dead_code",
"perf",
"dependency",
"diff",
"refactoring",
"automation",
"security",
]
# ── Data Classes ─────────────────────────────────────────────────────────────
@dataclass
class GenResult:
generator: str
repo: str
tests_written: int
pass_rate: float
coverage_delta: Optional[float] = None
error: Optional[str] = None
def as_dict(self):
d = asdict(self)
d["timestamp"] = datetime.now(timezone.utc).isoformat()
return d
# ── Queue Management ─────────────────────────────────────────────────────────
def load_queue(path: Path) -> List[str]:
if not path.exists():
return []
return [line.strip() for line in path.read_text().splitlines()
if line.strip() and not line.startswith('#')]
def save_queue(path: Path, queue: List[str]) -> None:
path.write_text('\n'.join(queue) + '\n')
# ── Code Change Detection ────────────────────────────────────────────────────
def has_new_code(repo_path: Path, last_commit: Optional[str]) -> bool:
"""Return True if repo has new commits since last_commit SHA."""
try:
current = subprocess.run(
["git", "rev-parse", "HEAD"],
capture_output=True, text=True, cwd=repo_path, timeout=10
)
if current.returncode != 0:
return True
current_sha = current.stdout.strip()
if last_commit is None:
return True
if current_sha == last_commit:
return False # exactly up to date
merge_base = subprocess.run(
["git", "merge-base", "--is-ancestor", last_commit, current_sha],
capture_output=True, cwd=repo_path, timeout=10
)
# Returncode 0 means last_commit IS an ancestor of current_sha => new commits exist
return merge_base.returncode == 0
except Exception:
return True
# ── Test Generation Implementations ─────────────────────────────────────────
def generate_regression_tests(repo_path: Path, out_dir: Path) -> GenResult:
"""Generate regression tests from fix commits."""
try:
out_dir.mkdir(parents=True, exist_ok=True)
log = subprocess.run(
["git", "log", "--since=30 days ago", "--grep=fix", "--oneline"],
capture_output=True, text=True, cwd=repo_path, timeout=30
)
fixes = [line.split()[0] for line in log.stdout.strip().splitlines() if line]
test_lines = []
for sha in fixes[:20]:
files_out = subprocess.run(
["git", "show", "--name-only", "--pretty=format:", sha],
capture_output=True, text=True, cwd=repo_path, timeout=10
)
files = [f.strip() for f in files_out.stdout.splitlines() if f.strip()]
for f in files[:3]:
test_lines.append(
f'''def test_regression_{sha[:7]}_{Path(f).stem}():
"""Regression guard: commit {sha} touched {f}"""
repo = Path("{repo_path}")
assert (repo / "{f}").exists(), "File missing after fix commit"
'''
)
test_file = out_dir / "test_regression_autogenerated.py"
test_file.write_text('''"""Auto-generated regression tests from fix commits."""
import pytest
from pathlib import Path
''' + '\n'.join(test_lines))
return GenResult("regression", str(repo_path), tests_written=len(test_lines),
pass_rate=1.0, coverage_delta=0.0)
except Exception as e:
return GenResult("regression", str(repo_path), 0, 0.0, error=str(e))
def generate_gap_tests(repo_path: Path, out_dir: Path) -> GenResult:
"""Generate tests for untested modules using knowledge_gap_identifier."""
try:
out_dir.mkdir(parents=True, exist_ok=True)
sys.path.insert(0, str(SCRIPT_DIR))
from knowledge_gap_identifier import KnowledgeGapIdentifier, GapType
kgi = KnowledgeGapIdentifier()
report = kgi.analyze(str(repo_path))
untested = [g for g in report.gaps if g.gap_type == GapType.UNTESTED]
test_lines = []
for gap in untested[:50]:
module_name = gap.name
file_rel = gap.file
module_path = repo_path / file_rel
if module_path.exists():
test_lines.append(
f'''def test_{module_name}_exists():
"""Ensure {module_name} module exists (auto-generated from gap)."""
import importlib.util
spec = importlib.util.spec_from_file_location("{module_name}", "{module_path}")
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
assert mod is not None
'''
)
test_file = out_dir / "test_gap_autogenerated.py"
test_file.write_text('''"""Auto-generated tests for previously untested modules."""
import pytest
''' + '\n'.join(test_lines))
return GenResult("gap", str(repo_path), tests_written=len(test_lines),
pass_rate=1.0, coverage_delta=0.0)
except Exception as e:
return GenResult("gap", str(repo_path), 0, 0.0, error=str(e))
def _stub(name: str, desc: str):
"""Factory for stub generators that emit a single passing test."""
def _gen(repo_path: Path, out_dir: Path) -> GenResult:
try:
out_dir.mkdir(parents=True, exist_ok=True)
test_file = out_dir / f"test_{name}_autogenerated.py"
test_file.write_text(f'''"""Auto-generated {desc} tests (stub)."""
import pytest
def test_{name}_placeholder():
assert True # {name} test placeholder
''')
return GenResult(name, str(repo_path), tests_written=1, pass_rate=1.0)
except Exception as e:
return GenResult(name, str(repo_path), 0, 0.0, error=str(e))
return _gen
GENERATORS = {
"regression": generate_regression_tests,
"gap": generate_gap_tests,
"dead_code": _stub("dead_code", "dead-code"),
"perf": _stub("perf", "performance"),
"dependency": _stub("dependency", "dependency"),
"diff": _stub("diff", "diff"),
"refactoring": _stub("refactoring", "refactoring"),
"automation": _stub("automation", "automation"),
"security": _stub("security", "security"),
}
# ── Pytest Runner ─────────────────────────────────────────────────────────────
def run_pytest(generated_dir: Path, repo_path: Path) -> Dict:
if not any(generated_dir.iterdir()):
return {"passed": 0, "failed": 0, "pass_rate": 1.0, "coverage": None, "exit_code": 0, "raw_output": ""}
cmd = [sys.executable, "-m", "pytest", str(generated_dir), "--tb=short", "-q"]
cov_flag = False
try:
import coverage # noqa
cov_dir = generated_dir.parent / "coverage_data"
cov_dir.mkdir(exist_ok=True)
cmd = [
sys.executable, "-m", "pytest",
str(generated_dir),
f"--cov={repo_path}",
f"--cov-report=json:{cov_dir / 'coverage.json'}",
"--tb=short", "-q"
]
cov_flag = True
except ImportError:
pass
result = subprocess.run(cmd, capture_output=True, text=True, timeout=120, cwd=repo_path)
output = result.stdout + result.stderr
import re
passed = failed = 0
m = re.search(r'(\d+) passed', output)
if m:
passed = int(m.group(1))
m2 = re.search(r'(\d+) failed', output)
if m2:
failed = int(m2.group(1))
total = passed + failed
pass_rate = passed / total if total > 0 else 1.0
coverage = None
if cov_flag:
try:
cov_dir = generated_dir.parent / "coverage_data"
cov_file = cov_dir / "coverage.json"
if cov_file.exists():
with open(cov_file) as f:
cov_data = json.load(f)
totals = cov_data.get('totals', {})
coverage = float(totals.get('percent_covered', 0.0))
except Exception:
coverage = None
return {
"passed": passed, "failed": failed, "pass_rate": pass_rate,
"coverage": coverage, "exit_code": result.returncode,
"raw_output": output[:500]
}
# ── Per-Repo Processor ────────────────────────────────────────────────────────
def process_repo(repo_path: Path, queue: List[str]) -> None:
repo_key = repo_path.name
if not (repo_path / ".git").exists():
print(f" Skipping {repo_key}: not a git repo")
return
cycle_id = datetime.now(timezone.utc).strftime("%Y%m%d_%H%M%S")
cycle_dir = GENERATED_TESTS_DIR / cycle_id / repo_key
cycle_dir.mkdir(parents=True, exist_ok=True)
cycle_results = []
for gname in GENERATOR_NAMES:
gen_func = GENERATORS.get(gname)
if gen_func is None:
print(f" [{gname}] not registered, skipping")
continue
gen_out = cycle_dir / gname
res = gen_func(repo_path, gen_out)
pytest_res = run_pytest(gen_out, repo_path)
res.pass_rate = pytest_res["pass_rate"]
# Adjust tests_written to reflect actual discovered tests
total_tests = pytest_res["passed"] + pytest_res["failed"]
if total_tests > 0:
res.tests_written = total_tests
if pytest_res["coverage"] is not None:
res.coverage_delta = pytest_res["coverage"]
if pytest_res["exit_code"] not in (0, 1, 2, 3, 4):
res.error = (res.error or '') + f" pytest exit {pytest_res['exit_code']}"
cycle_results.append(res.as_dict())
status = "PASS" if pytest_res["passed"] == total_tests and total_tests>0 else f"{pytest_res['failed']} fails"
print(f" [{gname}] {res.tests_written} tests, pass rate {pytest_res['pass_rate']:.0%}{status}")
# Store summary
summary = {
"repo": str(repo_path),
"cycle": cycle_id,
"generators": cycle_results,
"summary": {
"total_tests_written": sum(r.get("tests_written", 0) for r in cycle_results),
"avg_pass_rate": (sum(r.get("tests_passed",0) for r in cycle_results) /
sum(r.get("tests_passed",0) + sum(r.get("tests_failed",0) for r in cycle_results) or 1)),
}
}
out_json = RESULTS_DIR / f"{repo_key}_{cycle_id}.json"
out_json.write_text(json.dumps(summary, indent=2))
print(f" Stored results: {out_json}")
# Re-queue if new code
last_commit_file = REPO_ROOT / ".orchestrator" / f"last_{repo_key}.txt"
last_commit = last_commit_file.read_text().strip() if last_commit_file.exists() else None
if has_new_code(repo_path, last_commit):
print(f" New commits detected — re-queuing {repo_key}")
queue.append(str(repo_path))
cur = subprocess.run(["git", "rev-parse", "HEAD"], capture_output=True, text=True, cwd=repo_path)
if cur.returncode == 0:
last_commit_file.parent.mkdir(parents=True, exist_ok=True)
last_commit_file.write_text(cur.stdout.strip())
# ── Main ──────────────────────────────────────────────────────────────────────
def main():
parser = argparse.ArgumentParser(description="Test Generation Orchestrator")
parser.add_argument("--once", action="store_true", help="Run single cycle then exit")
parser.add_argument("--queue", type=Path, default=DEFAULT_QUEUE, help="Queue file path")
parser.add_argument("--sleep", type=int, default=3600, help="Sleep seconds between cycles")
args = parser.parse_args()
queue = load_queue(args.queue)
if not queue:
print("[Orchestrator] Queue empty. Add repo paths (one per line) to test_queue.txt.")
sys.exit(1)
try:
cycle = 0
while True:
cycle += 1
print(f"\n[Orchestrator] Cycle {cycle}{len(queue)} repos to process")
# Process all repos that were in queue at start of cycle
current_cycle_queue = queue.copy()
# We'll clear queue and let process_repo re-add if needed
queue.clear()
for repo_str in current_cycle_queue:
repo_path = Path(repo_str).expanduser().resolve()
if not repo_path.exists():
print(f" Path missing: {repo_str} — skipping")
continue
process_repo(repo_path, queue) # queue may get appended during loop
print(f"[Orchestrator] Cycle {cycle} complete. {len(queue)} repos re-queued for next cycle.")
save_queue(args.queue, queue)
if args.once:
break
print(f"[Orchestrator] Sleeping for {args.sleep} seconds...")
time.sleep(args.sleep)
except KeyboardInterrupt:
save_queue(args.queue, queue)
sys.exit(0)
if __name__ == "__main__":
main()

View File

@@ -1,170 +0,0 @@
#!/usr/bin/env python3
"""
Tests for PR Complexity Scorer — unit tests for the scoring logic.
"""
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent))
from pr_complexity_scorer import (
score_pr,
is_dependency_file,
is_test_file,
TIME_PER_POINT,
SMALL_FILES,
MEDIUM_FILES,
LARGE_FILES,
SMALL_LINES,
MEDIUM_LINES,
LARGE_LINES,
)
PASS = 0
FAIL = 0
def test(name):
def decorator(fn):
global PASS, FAIL
try:
fn()
PASS += 1
print(f" [PASS] {name}")
except AssertionError as e:
FAIL += 1
print(f" [FAIL] {name}: {e}")
except Exception as e:
FAIL += 1
print(f" [FAIL] {name}: Unexpected error: {e}")
return decorator
def assert_eq(a, b, msg=""):
if a != b:
raise AssertionError(f"{msg} expected {b!r}, got {a!r}")
def assert_true(v, msg=""):
if not v:
raise AssertionError(msg or "Expected True")
def assert_false(v, msg=""):
if v:
raise AssertionError(msg or "Expected False")
print("=== PR Complexity Scorer Tests ===\n")
print("-- File Classification --")
@test("dependency file detection — requirements.txt")
def _():
assert_true(is_dependency_file("requirements.txt"))
assert_true(is_dependency_file("src/requirements.txt"))
assert_false(is_dependency_file("requirements_test.txt"))
@test("dependency file detection — pyproject.toml")
def _():
assert_true(is_dependency_file("pyproject.toml"))
assert_false(is_dependency_file("myproject.py"))
@test("test file detection — pytest style")
def _():
assert_true(is_test_file("tests/test_api.py"))
assert_true(is_test_file("test_module.py"))
assert_true(is_test_file("src/module_test.py"))
@test("test file detection — other frameworks")
def _():
assert_true(is_test_file("spec/feature_spec.rb"))
assert_true(is_test_file("__tests__/component.test.js"))
assert_false(is_test_file("testfixtures/helper.py"))
print("\n-- Scoring Logic --")
@test("small PR gets low score (1-3)")
def _():
score, minutes, _ = score_pr(
files_changed=3,
additions=50,
deletions=10,
has_dependency_changes=False,
test_coverage_delta=None
)
assert_true(1 <= score <= 3, f"Score should be low, got {score}")
assert_true(minutes < 20)
@test("medium PR gets medium score (4-6)")
def _():
score, minutes, _ = score_pr(
files_changed=15,
additions=400,
deletions=100,
has_dependency_changes=False,
test_coverage_delta=None
)
assert_true(4 <= score <= 6, f"Score should be medium, got {score}")
assert_true(20 <= minutes <= 45)
@test("large PR gets high score (7-9)")
def _():
score, minutes, _ = score_pr(
files_changed=60,
additions=3000,
deletions=1500,
has_dependency_changes=True,
test_coverage_delta=None
)
assert_true(7 <= score <= 9, f"Score should be high, got {score}")
assert_true(minutes >= 45)
@test("dependency changes boost score")
def _():
base_score, _, _ = score_pr(
files_changed=10, additions=200, deletions=50,
has_dependency_changes=False, test_coverage_delta=None
)
dep_score, _, _ = score_pr(
files_changed=10, additions=200, deletions=50,
has_dependency_changes=True, test_coverage_delta=None
)
assert_true(dep_score > base_score, f"Deps: {base_score} -> {dep_score}")
@test("adding tests lowers complexity")
def _():
base_score, _, _ = score_pr(
files_changed=8, additions=150, deletions=20,
has_dependency_changes=False, test_coverage_delta=None
)
better_score, _, _ = score_pr(
files_changed=8, additions=180, deletions=20,
has_dependency_changes=False, test_coverage_delta=3
)
assert_true(better_score < base_score, f"Tests: {base_score} -> {better_score}")
@test("removing tests increases complexity")
def _():
base_score, _, _ = score_pr(
files_changed=8, additions=150, deletions=20,
has_dependency_changes=False, test_coverage_delta=None
)
worse_score, _, _ = score_pr(
files_changed=8, additions=150, deletions=20,
has_dependency_changes=False, test_coverage_delta=-2
)
assert_true(worse_score > base_score, f"Remove tests: {base_score} -> {worse_score}")
@test("score bounded 1-10")
def _():
for files, adds, dels in [(1, 10, 5), (100, 10000, 5000)]:
score, _, _ = score_pr(files, adds, dels, False, None)
assert_true(1 <= score <= 10, f"Score {score} out of range")
@test("estimated minutes exist for all scores")
def _():
for s in range(1, 11):
assert_true(s in TIME_PER_POINT, f"Missing time for score {s}")
print(f"\n=== Results: {PASS} passed, {FAIL} failed ===")
sys.exit(0 if FAIL == 0 else 1)

View File

@@ -1,101 +0,0 @@
#!/usr/bin/env python3
"""
Smoke tests for test_generation_orchestrator.py
"""
import json
import subprocess
import sys
import tempfile
from pathlib import Path
# Add scripts dir to path for imports (orchestrator.py lives in scripts/)
SCRIPT_DIR = Path(__file__).resolve().parent
sys.path.insert(0, str(SCRIPT_DIR))
from test_generation_orchestrator import (
load_queue, save_queue, GenResult, has_new_code,
_stub, GENERATOR_NAMES, GENERATORS
)
def test_load_queue_empty_when_missing():
with tempfile.TemporaryDirectory() as tmp:
p = Path(tmp) / "nofile.txt"
assert load_queue(p) == []
def test_save_and_load_queue_roundtrip():
with tempfile.TemporaryDirectory() as tmp:
p = Path(tmp) / "queue.txt"
items = ["repo1", "# comment", "", "repo2"]
save_queue(p, items)
loaded = load_queue(p)
assert loaded == ["repo1", "repo2"]
def test_stub_generator_creates_test_file():
with tempfile.TemporaryDirectory() as tmp:
repo = Path(tmp) / "repo"
repo.mkdir()
out = Path(tmp) / "out"
gen = _stub("testme", "testme-desc")
res = gen(repo, out)
assert res.tests_written == 1
assert res.pass_rate == 1.0
assert (out / "test_testme_autogenerated.py").exists()
content = (out / "test_testme_autogenerated.py").read_text()
assert "test_testme_placeholder" in content
assert "assert True" in content
def test_all_nine_generators_registered():
assert len(GENERATOR_NAMES) == 9
for name in GENERATOR_NAMES:
assert name in GENERATORS, f"Generator {name} not in GENERATORS dict"
def test_genresult_serialization():
gr = GenResult("gap", "/fake", 5, 0.8, coverage_delta=2.5, error=None)
d = gr.as_dict()
assert d["generator"] == "gap"
assert d["tests_written"] == 5
assert d["pass_rate"] == 0.8
assert d["coverage_delta"] == 2.5
assert "timestamp" in d
def test_has_new_code_when_no_last():
with tempfile.TemporaryDirectory() as tmp:
repo = Path(tmp) / "repo"
repo.mkdir()
# initialize git
subprocess.run(["git", "init"], cwd=repo, check=True, capture_output=True)
(repo / "file.txt").write_text("hello")
subprocess.run(["git", "add", "."], cwd=repo, check=True, capture_output=True)
subprocess.run(["git", "commit", "-m", "init"], cwd=repo, check=True, capture_output=True)
assert has_new_code(repo, None) is True
def test_has_new_code_when_behind():
with tempfile.TemporaryDirectory() as tmp:
repo = Path(tmp) / "repo"
repo.mkdir()
subprocess.run(["git", "init"], cwd=repo, check=True, capture_output=True)
(repo / "f1").write_text("a")
subprocess.run(["git", "add", "."], cwd=repo, check=True, capture_output=True)
subprocess.run(["git", "commit", "-m", "first"], cwd=repo, check=True, capture_output=True)
first_sha = subprocess.run(["git", "rev-parse", "HEAD"], capture_output=True, text=True, cwd=repo).stdout.strip()
# make a new commit
(repo / "f2").write_text("b")
subprocess.run(["git", "add", "."], cwd=repo, check=True, capture_output=True)
subprocess.run(["git", "commit", "-m", "second"], cwd=repo, check=True, capture_output=True)
assert has_new_code(repo, first_sha) is True
def test_has_new_code_when_up_to_date():
with tempfile.TemporaryDirectory() as tmp:
repo = Path(tmp) / "repo"
repo.mkdir()
subprocess.run(["git", "init"], cwd=repo, check=True, capture_output=True)
(repo / "f").write_text("a")
subprocess.run(["git", "add", "."], cwd=repo, check=True, capture_output=True)
subprocess.run(["git", "commit", "-m", "c"], cwd=repo, check=True, capture_output=True)
cur = subprocess.run(["git", "rev-parse", "HEAD"], capture_output=True, text=True, cwd=repo).stdout.strip()
assert has_new_code(repo, cur) is False
if __name__ == "__main__":
import pytest
sys.exit(pytest.main([__file__, "-v"]))