Compare commits
1 Commits
step35/88-
...
step35/173
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2f57c2b653 |
477
scripts/progress_tracker.py
Normal file
477
scripts/progress_tracker.py
Normal file
@@ -0,0 +1,477 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Progress Tracker — Pipeline 10.8
|
||||
Track improvement metrics over time. Are we getting better?
|
||||
|
||||
Metrics tracked:
|
||||
1. Test coverage — % of Python functions with associated tests (test:source file ratio + line coverage if available)
|
||||
2. Doc coverage — % of Python callables with docstrings (AST-based)
|
||||
3. Issue close rate — closed / (opened + closed) per week (Gitea API)
|
||||
4. Dep freshness — % of requirements pinned vs outdated (pip list --outdated)
|
||||
|
||||
Output:
|
||||
- metrics/snapshots/YYYY-MM-DD.json — one snapshot per run
|
||||
- metrics/TRENDS.md — cumulative markdown table
|
||||
- stdout summary
|
||||
|
||||
Usage:
|
||||
python3 scripts/progress_tracker.py
|
||||
python3 scripts/progress_tracker.py --json
|
||||
python3 scripts/progress_tracker.py --output metrics/TRENDS.md
|
||||
|
||||
Weekly cron:
|
||||
0 9 * * 1 cd /path/to/compounding-intelligence && python3 scripts/progress_tracker.py
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from datetime import datetime, timezone, timedelta
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
# ── Configuration ──────────────────────────────────────────────────────────
|
||||
|
||||
SCRIPT_DIR = Path(__file__).resolve().parent
|
||||
REPO_ROOT = SCRIPT_DIR.parent
|
||||
METRICS_DIR = REPO_ROOT / "metrics"
|
||||
SNAPSHOTS_DIR = METRICS_DIR / "snapshots"
|
||||
TOKEN_PATH = Path.home() / ".config" / "gitea" / "token"
|
||||
GITEA_API_BASE = "https://forge.alexanderwhitestone.com/api/v1"
|
||||
ORG = "Timmy_Foundation"
|
||||
|
||||
# Ensure paths exist
|
||||
SNAPSHOTS_DIR.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
# ── Helpers ─────────────────────────────────────────────────────────────────
|
||||
|
||||
def run_cmd(cmd: List[str], cwd: Path = REPO_ROOT) -> str:
|
||||
"""Run a shell command and return stdout (stderr merged)."""
|
||||
result = subprocess.run(
|
||||
cmd, capture_output=True, text=True, cwd=cwd, timeout=30
|
||||
)
|
||||
if result.returncode != 0:
|
||||
return ""
|
||||
return result.stdout.strip()
|
||||
|
||||
|
||||
def slugify_date(dt: datetime) -> str:
|
||||
return dt.strftime("%Y-%m-%d")
|
||||
|
||||
|
||||
def snapshot_path(dt: datetime) -> Path:
|
||||
return SNAPSHOTS_DIR / f"{slugify_date(dt)}.json"
|
||||
|
||||
|
||||
def load_snapshots() -> List[Dict[str, Any]]:
|
||||
"""Load all existing snapshots sorted by date."""
|
||||
snapshots = []
|
||||
for f in sorted(SNAPSHOTS_DIR.glob("*.json")):
|
||||
try:
|
||||
with open(f) as fp:
|
||||
snapshots.append(json.load(fp))
|
||||
except Exception:
|
||||
continue
|
||||
return snapshots
|
||||
|
||||
|
||||
# ── Metric 1: Test Coverage ─────────────────────────────────────────────────
|
||||
|
||||
def collect_test_coverage() -> Dict[str, Any]:
|
||||
"""
|
||||
Compute test coverage metrics.
|
||||
Counts test_*.py and *_test.py files vs non-test .py source files.
|
||||
Also attempts to read .coverage if present.
|
||||
"""
|
||||
all_py = list(REPO_ROOT.rglob("*.py"))
|
||||
|
||||
source_files = []
|
||||
test_files = []
|
||||
|
||||
for p in all_py:
|
||||
try:
|
||||
rel_parts = p.relative_to(REPO_ROOT).parts
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
# Skip hidden/cache/temp dirs (check only relative parts)
|
||||
if any(part.startswith('.') or part.startswith('__') for part in rel_parts):
|
||||
continue
|
||||
if any(part in ('node_modules', 'venv', '.venv', 'env', '.pytest_cache') for part in rel_parts):
|
||||
continue
|
||||
|
||||
if p.name.startswith("test_") or p.name.endswith("_test.py"):
|
||||
test_files.append(p)
|
||||
else:
|
||||
source_files.append(p)
|
||||
|
||||
# Try to get line coverage from .coverage
|
||||
coverage_percent = None
|
||||
coverage_tool = None
|
||||
coverage_file = REPO_ROOT / ".coverage"
|
||||
if coverage_file.exists():
|
||||
try:
|
||||
import coverage # type: ignore
|
||||
# Use coverage API if available
|
||||
cov = coverage.Coverage(data_file=str(coverage_file))
|
||||
cov.load()
|
||||
total = cov.report()
|
||||
coverage_percent = total if isinstance(total, float) else None
|
||||
coverage_tool = "coverage"
|
||||
except Exception:
|
||||
# Fallback: parse `coverage report` output
|
||||
out = run_cmd(["coverage", "report", "--skip-empty"])
|
||||
if out:
|
||||
for line in out.splitlines():
|
||||
if "TOTAL" in line:
|
||||
parts = line.split()
|
||||
if len(parts) >= 2:
|
||||
try:
|
||||
coverage_percent = float(parts[-1].rstrip('%'))
|
||||
coverage_tool = "coverage"
|
||||
break
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
return {
|
||||
"test_files": len(test_files),
|
||||
"source_files": len(source_files),
|
||||
"test_to_source_ratio": round(len(test_files) / len(source_files), 4) if source_files else 0.0,
|
||||
"coverage_tool": coverage_tool,
|
||||
"coverage_percent": coverage_percent,
|
||||
}
|
||||
|
||||
|
||||
# ── Metric 2: Doc Coverage ──────────────────────────────────────────────────
|
||||
|
||||
def collect_doc_coverage() -> Dict[str, Any]:
|
||||
"""
|
||||
Check AST of Python files for docstrings.
|
||||
Returns: callables_total, callables_with_doc, doc_coverage_percent
|
||||
"""
|
||||
import ast
|
||||
|
||||
all_py = list(REPO_ROOT.rglob("*.py"))
|
||||
|
||||
source_files = []
|
||||
test_files = []
|
||||
|
||||
for p in all_py:
|
||||
try:
|
||||
rel_parts = p.relative_to(REPO_ROOT).parts
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
if any(part.startswith('.') or part.startswith('__') for part in rel_parts):
|
||||
continue
|
||||
if any(part in ('node_modules', 'venv', '.venv', 'env', '.pytest_cache') for part in rel_parts):
|
||||
continue
|
||||
|
||||
if p.name.startswith("test_") or p.name.endswith("_test.py"):
|
||||
test_files.append(p)
|
||||
else:
|
||||
source_files.append(p)
|
||||
|
||||
total_callables = 0
|
||||
with_doc = 0
|
||||
|
||||
for p in source_files + test_files:
|
||||
try:
|
||||
with open(p) as f:
|
||||
tree = ast.parse(f.read(), filename=str(p))
|
||||
for node in ast.walk(tree):
|
||||
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
|
||||
total_callables += 1
|
||||
doc = ast.get_docstring(node)
|
||||
if doc and doc.strip():
|
||||
with_doc += 1
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
return {
|
||||
"callables_total": total_callables,
|
||||
"callables_with_doc": with_doc,
|
||||
"doc_coverage_percent": round((with_doc / total_callables * 100) if total_callables else 0.0, 2),
|
||||
}
|
||||
|
||||
|
||||
# ── Metric 3: Issue Close Rate ──────────────────────────────────────────────
|
||||
|
||||
def collect_issue_metrics() -> Dict[str, Any]:
|
||||
"""
|
||||
Use Gitea API to get issue open/close stats for the last 7 days.
|
||||
Returns counts and close rate.
|
||||
"""
|
||||
token = ""
|
||||
if TOKEN_PATH.exists():
|
||||
token = TOKEN_PATH.read_text().strip()
|
||||
|
||||
if not token:
|
||||
return {
|
||||
"opened_last_7d": None,
|
||||
"closed_last_7d": None,
|
||||
"close_rate": None,
|
||||
"total_open": None,
|
||||
"note": "Gitea token not available"
|
||||
}
|
||||
|
||||
try:
|
||||
from urllib.request import Request, urlopen
|
||||
from urllib.error import HTTPError, URLError
|
||||
except ImportError:
|
||||
return {"error": "urllib not available"}
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
week_ago = now - timedelta(days=7)
|
||||
since = week_ago.strftime("%Y-%m-%d")
|
||||
|
||||
headers = {"Authorization": f"token {token}"}
|
||||
base_url = f"{GITEA_API_BASE}/repos/{ORG}/compounding-intelligence/issues"
|
||||
|
||||
try:
|
||||
# Get issues from last 7 days
|
||||
url = f"{base_url}?state=all&since={since}&per_page=100"
|
||||
req = Request(url, headers=headers)
|
||||
with urlopen(req, timeout=15) as resp:
|
||||
issues = json.loads(resp.read())
|
||||
|
||||
opened = 0
|
||||
closed = 0
|
||||
for issue in issues:
|
||||
created = datetime.fromisoformat(issue["created_at"].replace("Z", "+00:00"))
|
||||
if created >= week_ago:
|
||||
opened += 1
|
||||
if issue.get("state") == "closed":
|
||||
closed_at_str = issue.get("closed_at")
|
||||
if closed_at_str:
|
||||
closed_at = datetime.fromisoformat(closed_at_str.replace("Z", "+00:00"))
|
||||
if closed_at >= week_ago:
|
||||
closed += 1
|
||||
|
||||
# Total open issues
|
||||
req2 = Request(f"{base_url}?state=open&per_page=1", headers=headers)
|
||||
with urlopen(req2, timeout=15) as resp:
|
||||
total_open = int(resp.headers.get("X-Total-Count", "0"))
|
||||
|
||||
total = opened + closed
|
||||
close_rate = closed / total if total > 0 else 0.0
|
||||
|
||||
return {
|
||||
"opened_last_7d": opened,
|
||||
"closed_last_7d": closed,
|
||||
"close_rate": round(close_rate, 4),
|
||||
"total_open": total_open,
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
"opened_last_7d": None,
|
||||
"closed_last_7d": None,
|
||||
"close_rate": None,
|
||||
"total_open": None,
|
||||
"error": str(e)[:100],
|
||||
"note": "Gitea API unavailable"
|
||||
}
|
||||
|
||||
|
||||
# ── Metric 4: Dependency Freshness ─────────────────────────────────────────
|
||||
|
||||
def collect_dep_freshness() -> Dict[str, Any]:
|
||||
"""
|
||||
Check requirements.txt for outdated dependencies using pip list --outdated.
|
||||
Returns freshness percentage and outdated list.
|
||||
"""
|
||||
req_file = REPO_ROOT / "requirements.txt"
|
||||
if not req_file.exists():
|
||||
return {
|
||||
"total_deps": 0,
|
||||
"outdated_deps": 0,
|
||||
"freshness_percent": 100.0,
|
||||
"outdated_list": [],
|
||||
"note": "requirements.txt not found"
|
||||
}
|
||||
|
||||
# Parse requirements (very simple: take name before comparison op)
|
||||
reqs = []
|
||||
with open(req_file) as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if not line or line.startswith("#"):
|
||||
continue
|
||||
m = re.match(r"^([a-zA-Z0-9_.-]+)", line)
|
||||
if m:
|
||||
reqs.append(m.group(1))
|
||||
|
||||
if not reqs:
|
||||
return {"total_deps": 0, "outdated_deps": 0, "freshness_percent": 100.0, "outdated_list": []}
|
||||
|
||||
# Query pip for outdated packages (may fail if pip not available)
|
||||
outdated_names = set()
|
||||
try:
|
||||
out = run_cmd(["pip", "list", "--outdated", "--format=json"])
|
||||
if out:
|
||||
data = json.loads(out)
|
||||
outdated_names = {item["name"].lower() for item in data}
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
outdated = [p for p in reqs if p.lower() in outdated_names]
|
||||
total = len(reqs)
|
||||
outdated_count = len(outdated)
|
||||
freshness = round(((total - outdated_count) / total * 100) if total else 100.0, 1)
|
||||
|
||||
return {
|
||||
"total_deps": total,
|
||||
"outdated_deps": outdated_count,
|
||||
"freshness_percent": freshness,
|
||||
"outdated_list": outdated,
|
||||
}
|
||||
|
||||
|
||||
# ── Snapshot & Trends ───────────────────────────────────────────────────────
|
||||
|
||||
def take_snapshot() -> Dict[str, Any]:
|
||||
"""Collect all metrics and return a snapshot dict."""
|
||||
now = datetime.now(timezone.utc)
|
||||
test_cov = collect_test_coverage()
|
||||
doc_cov = collect_doc_coverage()
|
||||
issues = collect_issue_metrics()
|
||||
deps = collect_dep_freshness()
|
||||
|
||||
return {
|
||||
"timestamp": now.isoformat(),
|
||||
"date": slugify_date(now),
|
||||
"metrics": {
|
||||
"test_coverage": test_cov,
|
||||
"doc_coverage": doc_cov,
|
||||
"issues": issues,
|
||||
"dependencies": deps,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def save_snapshot(snapshot: Dict[str, Any]) -> Path:
|
||||
path = snapshot_path(datetime.fromisoformat(snapshot["timestamp"]))
|
||||
with open(path, "w") as f:
|
||||
json.dump(snapshot, f, indent=2)
|
||||
return path
|
||||
|
||||
|
||||
def generate_trends(snapshots: List[Dict[str, Any]], output_path: Optional[Path] = None) -> str:
|
||||
"""Generate markdown trends table; optionally write to file."""
|
||||
if not snapshots:
|
||||
msg = "# Progress Tracker — Trends\n\nNo snapshots yet. Run `progress_tracker.py` to create the first snapshot."
|
||||
if output_path:
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
output_path.write_text(msg)
|
||||
return msg
|
||||
|
||||
lines = [
|
||||
"# Progress Tracker — Trends",
|
||||
f"\nLast updated: {datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M UTC')}",
|
||||
f"\nSnapshots: {len(snapshots)}\n",
|
||||
"| Date | Test Files → Source | Doc Coverage | Issues Closed/Opened (7d) | Dep Freshness |",
|
||||
"|------|---------------------|--------------|---------------------------|---------------|",
|
||||
]
|
||||
|
||||
for snap in reversed(snapshots): # chronological
|
||||
date = snap["date"]
|
||||
m = snap["metrics"]
|
||||
tc = m["test_coverage"]
|
||||
test_str = f"{tc['test_files']}/{tc['source_files']} ({tc['test_to_source_ratio']:.2f})"
|
||||
doc_str = f"{m['doc_coverage']['doc_coverage_percent']:.1f}%"
|
||||
issues_str = f"{m['issues'].get('closed_last_7d','-')}/{m['issues'].get('opened_last_7d','-')}"
|
||||
dep_str = f"{m['dependencies'].get('freshness_percent','?')}%"
|
||||
lines.append(f"| {date} | {test_str} | {doc_str} | {issues_str} | {dep_str} |")
|
||||
|
||||
# Current snapshot summary
|
||||
cur = snapshots[-1]
|
||||
cm = cur["metrics"]
|
||||
lines.append(f"\n## Current Snapshot ({cur['date']})\n")
|
||||
|
||||
tc = cm["test_coverage"]
|
||||
cov_line = f"- Test coverage: {tc['coverage_percent']:.1f}% (via {tc['coverage_tool']})\n" if tc["coverage_percent"] else "- Test coverage: (pytest-cov not configured)\n"
|
||||
lines.append(cov_line)
|
||||
lines.append(f"- Doc coverage: {cm['doc_coverage']['doc_coverage_percent']:.1f}%")
|
||||
|
||||
im = cm["issues"]
|
||||
if im.get("close_rate") is not None:
|
||||
lines.append(f"- Issue close rate (7d): {im['close_rate']*100:.1f}% ({im['closed_last_7d']} closed, {im['opened_last_7d']} opened)")
|
||||
else:
|
||||
lines.append(f"- Issue metrics: {im.get('note','unavailable')}")
|
||||
|
||||
dd = cm["dependencies"]
|
||||
lines.append(f"- Dep freshness: {dd.get('freshness_percent','?')}% outdated ({dd.get('outdated_deps',0)}/{dd.get('total_deps',0)} deps)")
|
||||
if dd.get('outdated_list'):
|
||||
lines.append(f" Outdated: {', '.join(dd['outdated_list'][:5])}")
|
||||
|
||||
content = "\n".join(lines) + "\n"
|
||||
|
||||
if output_path:
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
output_path.write_text(content)
|
||||
|
||||
return content
|
||||
|
||||
|
||||
# ── Main ─────────────────────────────────────────────────────────────────────
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(description="Progress Tracker — 10.8")
|
||||
parser.add_argument("--json", action="store_true", help="Emit snapshot as JSON only")
|
||||
parser.add_argument("--output", type=Path, default=METRICS_DIR / "TRENDS.md",
|
||||
help="Write trends markdown to this file")
|
||||
args = parser.parse_args()
|
||||
|
||||
snapshot = take_snapshot()
|
||||
all_snapshots = load_snapshots()
|
||||
path_written = save_snapshot(snapshot)
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(snapshot, indent=2))
|
||||
return 0
|
||||
|
||||
trends = generate_trends(all_snapshots + [snapshot], output_path=args.output)
|
||||
|
||||
# Print current snapshot summary
|
||||
print(f"Snapshot saved: {path_written}\n")
|
||||
print(f"Progress Tracker — {snapshot['date']}")
|
||||
print("=" * 50)
|
||||
|
||||
m = snapshot["metrics"]
|
||||
tc = m["test_coverage"]
|
||||
print(f"Test files: {tc['test_files']} | Source files: {tc['source_files']} | Ratio: {tc['test_to_source_ratio']:.3f}")
|
||||
if tc["coverage_percent"] is not None:
|
||||
print(f"Line coverage: {tc['coverage_percent']:.1f}% (via {tc['coverage_tool']})")
|
||||
else:
|
||||
print("Line coverage: (not available — run `pytest --cov`)")
|
||||
|
||||
print()
|
||||
dc = m["doc_coverage"]
|
||||
print(f"Callables with docstrings: {dc['callables_with_doc']}/{dc['callables_total']} ({dc['doc_coverage_percent']:.1f}%)")
|
||||
|
||||
print()
|
||||
im = m["issues"]
|
||||
if im.get("close_rate") is not None:
|
||||
print(f"Issues (7d): {im['closed_last_7d']} closed / {im['opened_last_7d']} opened → close rate: {im['close_rate']*100:.1f}%")
|
||||
print(f"Total open: {im['total_open']}")
|
||||
else:
|
||||
print(f"Issues: {im.get('note','unavailable')}")
|
||||
|
||||
print()
|
||||
dd = m["dependencies"]
|
||||
print(f"Dependencies: {dd.get('total_deps',0)} total, {dd.get('outdated_deps',0)} outdated")
|
||||
if dd.get('outdated_list'):
|
||||
shown = dd['outdated_list'][:5]
|
||||
print(f"Outdated: {', '.join(shown)}" + ("..." if len(dd['outdated_list']) > 5 else ""))
|
||||
|
||||
print(f"\nTrends written to: {args.output}")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -73,14 +73,12 @@ Binary files a/img.png and b/img.png differ
|
||||
|
||||
|
||||
def test_empty():
|
||||
"""Verifies behavior with empty or None input."""
|
||||
a = DiffAnalyzer()
|
||||
s = a.analyze("")
|
||||
assert s.total_files_changed == 0
|
||||
print("PASS: test_empty")
|
||||
|
||||
def test_addition():
|
||||
"""Verifies addition logic."""
|
||||
a = DiffAnalyzer()
|
||||
s = a.analyze(SAMPLE_ADD)
|
||||
assert s.total_files_changed == 1
|
||||
@@ -91,7 +89,6 @@ def test_addition():
|
||||
print("PASS: test_addition")
|
||||
|
||||
def test_deletion():
|
||||
"""Verifies deletion logic."""
|
||||
a = DiffAnalyzer()
|
||||
s = a.analyze(SAMPLE_DELETE)
|
||||
assert s.total_deleted == 2
|
||||
@@ -100,7 +97,6 @@ def test_deletion():
|
||||
print("PASS: test_deletion")
|
||||
|
||||
def test_modification():
|
||||
"""Verifies modification logic."""
|
||||
a = DiffAnalyzer()
|
||||
s = a.analyze(SAMPLE_MODIFY)
|
||||
assert s.total_added == 2
|
||||
@@ -109,7 +105,6 @@ def test_modification():
|
||||
print("PASS: test_modification")
|
||||
|
||||
def test_rename():
|
||||
"""Verifies rename logic."""
|
||||
a = DiffAnalyzer()
|
||||
s = a.analyze(SAMPLE_RENAME)
|
||||
assert s.renamed_files == 1
|
||||
@@ -119,7 +114,6 @@ def test_rename():
|
||||
print("PASS: test_rename")
|
||||
|
||||
def test_multiple_files():
|
||||
"""Verifies multiple files logic."""
|
||||
a = DiffAnalyzer()
|
||||
s = a.analyze(SAMPLE_MULTI)
|
||||
assert s.total_files_changed == 2
|
||||
@@ -127,7 +121,6 @@ def test_multiple_files():
|
||||
print("PASS: test_multiple_files")
|
||||
|
||||
def test_binary():
|
||||
"""Verifies binary logic."""
|
||||
a = DiffAnalyzer()
|
||||
s = a.analyze(SAMPLE_BINARY)
|
||||
assert s.binary_files == 1
|
||||
@@ -136,7 +129,6 @@ def test_binary():
|
||||
print("PASS: test_binary")
|
||||
|
||||
def test_to_dict():
|
||||
"""Verifies to dict logic."""
|
||||
a = DiffAnalyzer()
|
||||
s = a.analyze(SAMPLE_MODIFY)
|
||||
d = s.to_dict()
|
||||
@@ -146,7 +138,6 @@ def test_to_dict():
|
||||
print("PASS: test_to_dict")
|
||||
|
||||
def test_context_only():
|
||||
"""Verifies context only logic."""
|
||||
diff = """diff --git a/f.py b/f.py
|
||||
--- a/f.py
|
||||
+++ b/f.py
|
||||
@@ -163,7 +154,6 @@ def test_context_only():
|
||||
print("PASS: test_context_only")
|
||||
|
||||
def test_multi_hunk():
|
||||
"""Verifies multi hunk logic."""
|
||||
diff = """diff --git a/f.py b/f.py
|
||||
--- a/f.py
|
||||
+++ b/f.py
|
||||
|
||||
@@ -1,207 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Test Documentation Generator — adds module and function docstrings to test files.
|
||||
|
||||
Reads test files without docstrings and generates:
|
||||
- Module-level docstring explaining what is being tested
|
||||
- Function-level docstring explaining what each test verifies
|
||||
- Inline comments for complex assertions (simple heuristic)
|
||||
|
||||
Does not change test logic — only adds documentation.
|
||||
Processes 20+ test files per run.
|
||||
"""
|
||||
|
||||
import ast
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import List, Tuple
|
||||
|
||||
|
||||
def derive_module_name(test_path: Path) -> str:
|
||||
"""Derive the script/module name being tested from test file name."""
|
||||
name = test_path.stem
|
||||
if name.startswith("test_"):
|
||||
name = name[5:] # strip 'test_' (5 chars: t-e-s-t-_, not 6)
|
||||
mapping = {
|
||||
"bootstrapper": "bootstrapper.py",
|
||||
"harvester": "harvester.py",
|
||||
"diff_analyzer": "diff_analyzer.py",
|
||||
"gitea_issue_parser": "gitea_issue_parser.py",
|
||||
"harvest_prompt": "harvest_prompt.py",
|
||||
"harvest_prompt_comprehensive": "harvest_prompt_comprehensive.py",
|
||||
"harvester_pipeline": "harvester_pipeline.py",
|
||||
"improvement_proposals": "improvement_proposals.py",
|
||||
"knowledge_staleness": "knowledge_staleness_check.py",
|
||||
"priority_rebalancer": "priority_rebalancer.py",
|
||||
"refactoring_opportunity_finder": "refactoring_opportunity_finder.py",
|
||||
"session_pair_harvester": "session_pair_harvester.py",
|
||||
"session_reader": "session_reader.py",
|
||||
"automation_opportunity_finder": "automation_opportunity_finder.py",
|
||||
"dedup": "dedup.py",
|
||||
"freshness": "freshness.py",
|
||||
"knowledge_gap_identifier": "knowledge_gap_identifier.py",
|
||||
"perf_bottleneck_finder": "perf_bottleneck_finder.py",
|
||||
"ci_config": "CI configuration",
|
||||
"quality_gate": "quality_gate.py",
|
||||
}
|
||||
base = name.replace("_", " ")
|
||||
if name in mapping:
|
||||
base = mapping[name].replace(".py", "")
|
||||
return base
|
||||
|
||||
|
||||
def count_tests_in_file(content: str) -> int:
|
||||
"""Count test functions in a Python file."""
|
||||
return len(re.findall(r'^def (test_\w+)\s*\(', content, re.MULTILINE))
|
||||
|
||||
|
||||
def infer_test_purpose(func_name: str, func_body: str) -> str:
|
||||
"""Generate a brief docstring for a test function based on its name and body."""
|
||||
name = func_name.replace("test_", "").replace("_", " ")
|
||||
|
||||
if "empty" in name or "none" in name:
|
||||
return "Verifies behavior with empty or None input."
|
||||
if "parsing" in name or "parse" in name:
|
||||
return f"Verifies parsing logic for {name}."
|
||||
if "filter" in name:
|
||||
return f"Verifies knowledge filtering by {name}."
|
||||
if "hash" in name:
|
||||
return "Verifies file hash computation correctness."
|
||||
if "freshness" in name or "staleness" in name:
|
||||
return "Verifies knowledge freshness detection."
|
||||
if "error" in name or "exception" in name:
|
||||
return f"Verifies error handling for {name}."
|
||||
if "boundary" in name or "edge" in name:
|
||||
return "Verifies boundary case handling."
|
||||
return f"Verifies {name} logic."
|
||||
|
||||
|
||||
def has_module_docstring(content: str) -> bool:
|
||||
"""Check if file (after shebang) starts with a docstring."""
|
||||
lines = content.split('\n')
|
||||
start_idx = 1 if lines and lines[0].startswith('#!') else 0
|
||||
for line in lines[start_idx:start_idx + 5]:
|
||||
stripped = line.strip()
|
||||
if stripped.startswith('"""') or stripped.startswith("'''"):
|
||||
return True
|
||||
if stripped == "" or stripped.startswith('#'):
|
||||
continue
|
||||
break
|
||||
return False
|
||||
|
||||
|
||||
def insert_after_shebang(content: str, insertion: str) -> str:
|
||||
"""Insert text after the shebang line (if any) and any following blank lines."""
|
||||
lines = content.split('\n')
|
||||
insert_idx = 0
|
||||
if lines and lines[0].startswith('#!'):
|
||||
insert_idx = 1
|
||||
while insert_idx < len(lines) and lines[insert_idx].strip() == '':
|
||||
insert_idx += 1
|
||||
new_lines = lines[:insert_idx] + [insertion] + lines[insert_idx:]
|
||||
return '\n'.join(new_lines)
|
||||
|
||||
|
||||
def add_function_docstring(content: str, func_lineno: int, docstring: str) -> str:
|
||||
"""Add a docstring to a function at the given line number."""
|
||||
lines = content.split('\n')
|
||||
idx = func_lineno - 1
|
||||
indent = re.match(r'^(\s*)', lines[idx]).group(1)
|
||||
doc_line = f'{indent} """{docstring}"""'
|
||||
new_lines = lines[:idx + 1] + [doc_line] + lines[idx + 1:]
|
||||
return '\n'.join(new_lines)
|
||||
|
||||
|
||||
def generate_module_docstring(test_path: Path) -> str:
|
||||
"""Generate a module-level docstring for a test file."""
|
||||
module = derive_module_name(test_path)
|
||||
count = count_tests_in_file(test_path.read_text())
|
||||
if count > 0:
|
||||
return f"Tests for {module} — {count} tests."
|
||||
return f"Tests for {module}."
|
||||
|
||||
|
||||
def process_test_file(test_path: Path, dry_run: bool = False) -> Tuple[bool, List[str]]:
|
||||
"""Process a single test file, adding missing docstrings. Returns (changed, messages)."""
|
||||
content = test_path.read_text()
|
||||
original = content
|
||||
messages = []
|
||||
|
||||
if not has_module_docstring(content):
|
||||
mod_doc = generate_module_docstring(test_path)
|
||||
content = insert_after_shebang(content, f'''"""{mod_doc}"""''')
|
||||
messages.append(f"Added module docstring: {mod_doc}")
|
||||
|
||||
try:
|
||||
tree = ast.parse(content)
|
||||
except SyntaxError as e:
|
||||
messages.append(f"SKIP (syntax error): {e}")
|
||||
return False, messages
|
||||
|
||||
funcs_to_doc: List[Tuple[int, str, str]] = []
|
||||
|
||||
for node in ast.walk(tree):
|
||||
if isinstance(node, ast.FunctionDef) and node.name.startswith('test_'):
|
||||
has_docstring = (
|
||||
len(node.body) > 0 and
|
||||
isinstance(node.body[0], ast.Expr) and
|
||||
isinstance(node.body[0].value, ast.Constant) and
|
||||
isinstance(node.body[0].value.value, str)
|
||||
)
|
||||
if not has_docstring:
|
||||
func_body = ast.get_source_segment(content, node) or ""
|
||||
doc = infer_test_purpose(node.name, func_body)
|
||||
funcs_to_doc.append((node.lineno, node.name, doc))
|
||||
|
||||
funcs_to_doc.sort(key=lambda x: -x[0])
|
||||
for lineno, func_name, doc in funcs_to_doc:
|
||||
content = add_function_docstring(content, lineno, doc)
|
||||
messages.append(f"Added docstring to {func_name}: {doc}")
|
||||
|
||||
changed = content != original
|
||||
if changed and not dry_run:
|
||||
test_path.write_text(content)
|
||||
|
||||
return changed, messages
|
||||
|
||||
|
||||
def find_test_files(root: Path, max_files: int = 25) -> List[Path]:
|
||||
"""Find test files under scripts/ and tests/ directories."""
|
||||
test_files = []
|
||||
for subdir in [root / "scripts", root / "tests"]:
|
||||
if subdir.exists():
|
||||
test_files.extend(subdir.glob("test_*.py"))
|
||||
test_files.sort()
|
||||
return test_files[:max_files]
|
||||
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(description="Generate documentation for test files")
|
||||
parser.add_argument("--dry-run", action="store_true", help="Show changes without writing")
|
||||
parser.add_argument("--root", type=Path, default=Path.cwd(),
|
||||
help="Repo root (default: current directory)")
|
||||
parser.add_argument("--limit", type=int, default=25,
|
||||
help="Max files to process per run (handles 20+ requirement)")
|
||||
args = parser.parse_args()
|
||||
|
||||
root = args.root
|
||||
test_files = find_test_files(root, args.limit)
|
||||
print(f"Found {len(test_files)} test files to process (limit={args.limit}):")
|
||||
|
||||
total_changed = 0
|
||||
for tf in test_files:
|
||||
changed, msgs = process_test_file(tf, dry_run=args.dry_run)
|
||||
if changed:
|
||||
total_changed += 1
|
||||
status = "CHANGED" if changed else "OK"
|
||||
print(f" [{status}] {tf.relative_to(root)}")
|
||||
for msg in msgs:
|
||||
print(f" {msg}")
|
||||
|
||||
print(f"\nCompleted: {total_changed} file(s) modified, {len(test_files) - total_changed} already up-to-date.")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -14,7 +14,6 @@ parse_issue_body = mod.parse_issue_body
|
||||
|
||||
|
||||
def test_basic_parsing():
|
||||
"""Verifies parsing logic for basic parsing."""
|
||||
body = """## Context
|
||||
|
||||
This is the background info.
|
||||
@@ -41,7 +40,6 @@ Some description.
|
||||
|
||||
|
||||
def test_numbered_criteria():
|
||||
"""Verifies numbered criteria logic."""
|
||||
body = """## Acceptance Criteria
|
||||
|
||||
1. First item
|
||||
@@ -55,7 +53,6 @@ def test_numbered_criteria():
|
||||
|
||||
|
||||
def test_epic_ref_from_body():
|
||||
"""Verifies epic ref from body logic."""
|
||||
body = "Closes #123\n\nSome description."
|
||||
result = parse_issue_body(body)
|
||||
assert result["epic_ref"] == 123
|
||||
@@ -63,7 +60,6 @@ def test_epic_ref_from_body():
|
||||
|
||||
|
||||
def test_empty_body():
|
||||
"""Verifies behavior with empty or None input."""
|
||||
result = parse_issue_body("")
|
||||
assert result["criteria"] == []
|
||||
assert result["context"] == ""
|
||||
@@ -72,7 +68,6 @@ def test_empty_body():
|
||||
|
||||
|
||||
def test_no_sections():
|
||||
"""Verifies no sections logic."""
|
||||
body = "Just a plain issue body with no headings."
|
||||
result = parse_issue_body(body)
|
||||
assert result["context"] == "Just a plain issue body with no headings."
|
||||
@@ -80,7 +75,6 @@ def test_no_sections():
|
||||
|
||||
|
||||
def test_multiple_sections():
|
||||
"""Verifies multiple sections logic."""
|
||||
body = """## Problem
|
||||
|
||||
Something is broken.
|
||||
|
||||
@@ -46,27 +46,22 @@ def check_test_sessions():
|
||||
return True, f"{len(files)} valid sessions"
|
||||
|
||||
def test_prompt_structure():
|
||||
"""Verifies prompt structure logic."""
|
||||
passed, msg = check_prompt_structure()
|
||||
assert passed, msg
|
||||
|
||||
def test_confidence_scoring():
|
||||
"""Verifies confidence scoring logic."""
|
||||
passed, msg = check_confidence_scoring()
|
||||
assert passed, msg
|
||||
|
||||
def test_example_quality():
|
||||
"""Verifies example quality logic."""
|
||||
passed, msg = check_example_quality()
|
||||
assert passed, msg
|
||||
|
||||
def test_constraint_coverage():
|
||||
"""Verifies constraint coverage logic."""
|
||||
passed, msg = check_constraint_coverage()
|
||||
assert passed, msg
|
||||
|
||||
def test_test_sessions():
|
||||
"""Verifies sessions logic."""
|
||||
passed, msg = check_test_sessions()
|
||||
assert passed, msg
|
||||
|
||||
|
||||
@@ -47,14 +47,12 @@ def _make_tool_calls(repeats):
|
||||
# ── Tests ─────────────────────────────────────────────────────
|
||||
|
||||
def test_empty_sessions():
|
||||
"""Verifies behavior with empty or None input."""
|
||||
patterns = analyze_sessions([])
|
||||
assert patterns == []
|
||||
print("PASS: test_empty_sessions")
|
||||
|
||||
|
||||
def test_no_patterns_on_clean_sessions():
|
||||
"""Verifies no patterns on clean sessions logic."""
|
||||
sessions = [
|
||||
_make_session("s1", tool_calls=[{"tool": "read_file", "latency_ms": 50}]),
|
||||
_make_session("s2", tool_calls=[{"tool": "write_file", "latency_ms": 80}]),
|
||||
|
||||
@@ -17,7 +17,6 @@ compute_file_hash = mod.compute_file_hash
|
||||
|
||||
|
||||
def test_fresh_entry():
|
||||
"""Verifies fresh entry logic."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
src = os.path.join(tmpdir, "source.py")
|
||||
with open(src, "w") as f:
|
||||
@@ -32,7 +31,6 @@ def test_fresh_entry():
|
||||
|
||||
|
||||
def test_stale_entry():
|
||||
"""Verifies stale entry logic."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
src = os.path.join(tmpdir, "source.py")
|
||||
with open(src, "w") as f:
|
||||
@@ -49,7 +47,6 @@ def test_stale_entry():
|
||||
|
||||
|
||||
def test_missing_source():
|
||||
"""Verifies missing source logic."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
idx = os.path.join(tmpdir, "index.json")
|
||||
with open(idx, "w") as f:
|
||||
@@ -60,7 +57,6 @@ def test_missing_source():
|
||||
|
||||
|
||||
def test_no_hash():
|
||||
"""Verifies file hash computation correctness."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
src = os.path.join(tmpdir, "source.py")
|
||||
with open(src, "w") as f:
|
||||
@@ -75,7 +71,6 @@ def test_no_hash():
|
||||
|
||||
|
||||
def test_no_source_field():
|
||||
"""Verifies no source field logic."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
idx = os.path.join(tmpdir, "index.json")
|
||||
with open(idx, "w") as f:
|
||||
@@ -86,7 +81,6 @@ def test_no_source_field():
|
||||
|
||||
|
||||
def test_fix_hashes():
|
||||
"""Verifies file hash computation correctness."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
src = os.path.join(tmpdir, "source.py")
|
||||
with open(src, "w") as f:
|
||||
@@ -104,7 +98,6 @@ def test_fix_hashes():
|
||||
|
||||
|
||||
def test_empty_index():
|
||||
"""Verifies behavior with empty or None input."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
idx = os.path.join(tmpdir, "index.json")
|
||||
with open(idx, "w") as f:
|
||||
@@ -115,7 +108,6 @@ def test_empty_index():
|
||||
|
||||
|
||||
def test_compute_hash_nonexistent():
|
||||
"""Verifies behavior with empty or None input."""
|
||||
h = compute_file_hash("/nonexistent/path/file.py")
|
||||
assert h is None
|
||||
print("PASS: test_compute_hash_nonexistent")
|
||||
|
||||
@@ -11,7 +11,6 @@ from session_pair_harvester import extract_pairs_from_session, deduplicate_pairs
|
||||
|
||||
|
||||
def test_basic_extraction():
|
||||
"""Verifies basic extraction logic."""
|
||||
session = {
|
||||
"id": "test_001",
|
||||
"model": "test-model",
|
||||
@@ -30,7 +29,6 @@ def test_basic_extraction():
|
||||
|
||||
|
||||
def test_filters_short_responses():
|
||||
"""Verifies knowledge filtering by filters short responses."""
|
||||
session = {
|
||||
"id": "test_002",
|
||||
"model": "test",
|
||||
@@ -45,7 +43,6 @@ def test_filters_short_responses():
|
||||
|
||||
|
||||
def test_skips_tool_results():
|
||||
"""Verifies skips tool results logic."""
|
||||
session = {
|
||||
"id": "test_003",
|
||||
"model": "test",
|
||||
@@ -60,7 +57,6 @@ def test_skips_tool_results():
|
||||
|
||||
|
||||
def test_deduplication():
|
||||
"""Verifies deduplication logic."""
|
||||
pairs = [
|
||||
{"terse": "What is X?", "rich": "X is Y.", "source": "s1", "model": "m"},
|
||||
{"terse": "What is X?", "rich": "X is Y.", "source": "s2", "model": "m"},
|
||||
@@ -72,7 +68,6 @@ def test_deduplication():
|
||||
|
||||
|
||||
def test_ratio_filter():
|
||||
"""Verifies knowledge filtering by ratio filter."""
|
||||
session = {
|
||||
"id": "test_005",
|
||||
"model": "test",
|
||||
|
||||
@@ -1,16 +1,13 @@
|
||||
"""Tests for CI configuration — 2 tests."""
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def test_requirements_makefile_and_workflow_exist() -> None:
|
||||
"""Verifies requirements makefile and workflow exist logic."""
|
||||
assert Path("requirements.txt").exists()
|
||||
assert Path("Makefile").exists()
|
||||
assert Path(".gitea/workflows/test.yml").exists()
|
||||
|
||||
|
||||
def test_ci_workflow_runs_project_test_command() -> None:
|
||||
"""Verifies ci workflow runs project command logic."""
|
||||
workflow = Path(".gitea/workflows/test.yml").read_text(encoding="utf-8")
|
||||
requirements = Path("requirements.txt").read_text(encoding="utf-8")
|
||||
makefile = Path("Makefile").read_text(encoding="utf-8")
|
||||
|
||||
@@ -22,34 +22,28 @@ from dedup import (
|
||||
|
||||
class TestNormalize:
|
||||
def test_lowercases(self):
|
||||
"""Verifies lowercases logic."""
|
||||
assert normalize_text("Hello World") == "hello world"
|
||||
|
||||
def test_collapses_whitespace(self):
|
||||
"""Verifies collapses whitespace logic."""
|
||||
assert normalize_text(" hello world ") == "hello world"
|
||||
|
||||
def test_strips(self):
|
||||
"""Verifies strips logic."""
|
||||
assert normalize_text(" text ") == "text"
|
||||
|
||||
|
||||
class TestContentHash:
|
||||
def test_deterministic(self):
|
||||
"""Verifies deterministic logic."""
|
||||
h1 = content_hash("Hello World")
|
||||
h2 = content_hash("hello world")
|
||||
h3 = content_hash(" Hello World ")
|
||||
assert h1 == h2 == h3
|
||||
|
||||
def test_different_texts(self):
|
||||
"""Verifies different texts logic."""
|
||||
h1 = content_hash("Hello")
|
||||
h2 = content_hash("World")
|
||||
assert h1 != h2
|
||||
|
||||
def test_returns_hex(self):
|
||||
"""Verifies returns hex logic."""
|
||||
h = content_hash("test")
|
||||
assert len(h) == 64 # SHA256
|
||||
assert all(c in '0123456789abcdef' for c in h)
|
||||
@@ -57,21 +51,18 @@ class TestContentHash:
|
||||
|
||||
class TestTokenize:
|
||||
def test_extracts_words(self):
|
||||
"""Verifies extracts words logic."""
|
||||
tokens = tokenize("Hello World Test")
|
||||
assert "hello" in tokens
|
||||
assert "world" in tokens
|
||||
assert "test" in tokens
|
||||
|
||||
def test_skips_short_words(self):
|
||||
"""Verifies skips short words logic."""
|
||||
tokens = tokenize("a to is the hello")
|
||||
assert "a" not in tokens
|
||||
assert "to" not in tokens
|
||||
assert "hello" in tokens
|
||||
|
||||
def test_returns_set(self):
|
||||
"""Verifies returns set logic."""
|
||||
tokens = tokenize("hello hello world")
|
||||
assert isinstance(tokens, set)
|
||||
assert len(tokens) == 2
|
||||
@@ -79,25 +70,20 @@ class TestTokenize:
|
||||
|
||||
class TestTokenSimilarity:
|
||||
def test_identical(self):
|
||||
"""Verifies identical logic."""
|
||||
assert token_similarity("hello world", "hello world") == 1.0
|
||||
|
||||
def test_no_overlap(self):
|
||||
"""Verifies no overlap logic."""
|
||||
assert token_similarity("alpha beta", "gamma delta") == 0.0
|
||||
|
||||
def test_partial_overlap(self):
|
||||
"""Verifies partial overlap logic."""
|
||||
sim = token_similarity("hello world test", "hello universe test")
|
||||
assert 0.3 < sim < 0.7
|
||||
|
||||
def test_empty(self):
|
||||
"""Verifies behavior with empty or None input."""
|
||||
assert token_similarity("", "hello") == 0.0
|
||||
assert token_similarity("hello", "") == 0.0
|
||||
|
||||
def test_symmetric(self):
|
||||
"""Verifies symmetric logic."""
|
||||
a = "hello world test"
|
||||
b = "hello universe test"
|
||||
assert token_similarity(a, b) == token_similarity(b, a)
|
||||
@@ -105,26 +91,22 @@ class TestTokenSimilarity:
|
||||
|
||||
class TestQualityScore:
|
||||
def test_high_confidence(self):
|
||||
"""Verifies high confidence logic."""
|
||||
fact = {"confidence": 0.95, "source_count": 5, "tags": ["test"], "related": ["x"]}
|
||||
score = quality_score(fact)
|
||||
assert score > 0.7
|
||||
|
||||
def test_low_confidence(self):
|
||||
"""Verifies low confidence logic."""
|
||||
fact = {"confidence": 0.3, "source_count": 1}
|
||||
score = quality_score(fact)
|
||||
assert score < 0.5
|
||||
|
||||
def test_defaults(self):
|
||||
"""Verifies defaults logic."""
|
||||
score = quality_score({})
|
||||
assert 0 < score < 1
|
||||
|
||||
|
||||
class TestMergeFacts:
|
||||
def test_merges_tags(self):
|
||||
"""Verifies merges tags logic."""
|
||||
keep = {"id": "a", "fact": "test", "tags": ["git"], "confidence": 0.9}
|
||||
drop = {"id": "b", "fact": "test", "tags": ["python"], "confidence": 0.8}
|
||||
merged = merge_facts(keep, drop)
|
||||
@@ -132,21 +114,18 @@ class TestMergeFacts:
|
||||
assert "python" in merged["tags"]
|
||||
|
||||
def test_merges_source_count(self):
|
||||
"""Verifies merges source count logic."""
|
||||
keep = {"id": "a", "fact": "test", "source_count": 3}
|
||||
drop = {"id": "b", "fact": "test", "source_count": 2}
|
||||
merged = merge_facts(keep, drop)
|
||||
assert merged["source_count"] == 5
|
||||
|
||||
def test_keeps_higher_confidence(self):
|
||||
"""Verifies keeps higher confidence logic."""
|
||||
keep = {"id": "a", "fact": "test", "confidence": 0.7}
|
||||
drop = {"id": "b", "fact": "test", "confidence": 0.9}
|
||||
merged = merge_facts(keep, drop)
|
||||
assert merged["confidence"] == 0.9
|
||||
|
||||
def test_tracks_merged_from(self):
|
||||
"""Verifies tracks merged from logic."""
|
||||
keep = {"id": "a", "fact": "test"}
|
||||
drop = {"id": "b", "fact": "test"}
|
||||
merged = merge_facts(keep, drop)
|
||||
@@ -155,7 +134,6 @@ class TestMergeFacts:
|
||||
|
||||
class TestDedupFacts:
|
||||
def test_removes_exact_dupes(self):
|
||||
"""Verifies removes exact dupes logic."""
|
||||
facts = [
|
||||
{"id": "1", "fact": "Always use git rebase"},
|
||||
{"id": "2", "fact": "Always use git rebase"}, # exact dupe
|
||||
@@ -166,7 +144,6 @@ class TestDedupFacts:
|
||||
assert stats["unique"] == 2
|
||||
|
||||
def test_removes_near_dupes(self):
|
||||
"""Verifies removes near dupes logic."""
|
||||
facts = [
|
||||
{"id": "1", "fact": "Always check logs before deploying to production server"},
|
||||
{"id": "2", "fact": "Always check logs before deploying to production environment"},
|
||||
@@ -177,7 +154,6 @@ class TestDedupFacts:
|
||||
assert stats["unique"] == 2
|
||||
|
||||
def test_preserves_unique(self):
|
||||
"""Verifies preserves unique logic."""
|
||||
facts = [
|
||||
{"id": "1", "fact": "Use git rebase for clean history"},
|
||||
{"id": "2", "fact": "Docker containers should be stateless"},
|
||||
@@ -188,13 +164,11 @@ class TestDedupFacts:
|
||||
assert stats["removed"] == 0
|
||||
|
||||
def test_empty_input(self):
|
||||
"""Verifies behavior with empty or None input."""
|
||||
deduped, stats = dedup_facts([])
|
||||
assert stats["total"] == 0
|
||||
assert stats["unique"] == 0
|
||||
|
||||
def test_keeps_higher_quality_near_dup(self):
|
||||
"""Verifies keeps higher quality near dup logic."""
|
||||
facts = [
|
||||
{"id": "1", "fact": "Check logs before deploying to production server", "confidence": 0.5, "source_count": 1},
|
||||
{"id": "2", "fact": "Check logs before deploying to production environment", "confidence": 0.9, "source_count": 5, "tags": ["ops"]},
|
||||
@@ -205,7 +179,6 @@ class TestDedupFacts:
|
||||
assert deduped[0]["confidence"] == 0.9
|
||||
|
||||
def test_dry_run_does_not_modify(self):
|
||||
"""Verifies dry run does not modify logic."""
|
||||
facts = [
|
||||
{"id": "1", "fact": "Same text"},
|
||||
{"id": "2", "fact": "Same text"},
|
||||
@@ -218,19 +191,16 @@ class TestDedupFacts:
|
||||
|
||||
class TestGenerateTestDuplicates:
|
||||
def test_generates_correct_count(self):
|
||||
"""Verifies generates correct count logic."""
|
||||
facts = generate_test_duplicates(20)
|
||||
assert len(facts) > 20 # 20 unique + duplicates
|
||||
|
||||
def test_has_exact_dupes(self):
|
||||
"""Verifies has exact dupes logic."""
|
||||
facts = generate_test_duplicates(20)
|
||||
hashes = [content_hash(f["fact"]) for f in facts]
|
||||
# Should have some duplicate hashes
|
||||
assert len(hashes) != len(set(hashes))
|
||||
|
||||
def test_dedup_removes_dupes(self):
|
||||
"""Verifies dedup removes dupes logic."""
|
||||
facts = generate_test_duplicates(20)
|
||||
deduped, stats = dedup_facts(facts)
|
||||
assert stats["unique"] <= 20
|
||||
|
||||
@@ -20,7 +20,6 @@ def _make_repo(tmpdir, structure):
|
||||
|
||||
|
||||
def test_undocumented_symbol():
|
||||
"""Verifies undocumented symbol logic."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
_make_repo(tmpdir, {
|
||||
"src/calculator.py": "def add(a, b):\n return a + b\n",
|
||||
@@ -32,7 +31,6 @@ def test_undocumented_symbol():
|
||||
|
||||
|
||||
def test_documented_symbol_no_gap():
|
||||
"""Verifies documented symbol no gap logic."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
_make_repo(tmpdir, {
|
||||
"src/calculator.py": "def add(a, b):\n return a + b\n",
|
||||
@@ -45,7 +43,6 @@ def test_documented_symbol_no_gap():
|
||||
|
||||
|
||||
def test_untested_module():
|
||||
"""Verifies untested module logic."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
_make_repo(tmpdir, {
|
||||
"src/calculator.py": "def add(a, b):\n return a + b\n",
|
||||
@@ -58,7 +55,6 @@ def test_untested_module():
|
||||
|
||||
|
||||
def test_tested_module_no_gap():
|
||||
"""Verifies tested module no gap logic."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
_make_repo(tmpdir, {
|
||||
"src/calculator.py": "def add(a, b):\n return a + b\n",
|
||||
@@ -71,7 +67,6 @@ def test_tested_module_no_gap():
|
||||
|
||||
|
||||
def test_missing_implementation():
|
||||
"""Verifies missing implementation logic."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
_make_repo(tmpdir, {
|
||||
"src/app.py": "def run():\n pass\n",
|
||||
@@ -83,7 +78,6 @@ def test_missing_implementation():
|
||||
|
||||
|
||||
def test_private_symbols_skipped():
|
||||
"""Verifies private symbols skipped logic."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
_make_repo(tmpdir, {
|
||||
"src/app.py": "def _internal():\n pass\ndef public():\n pass\n",
|
||||
@@ -96,21 +90,18 @@ def test_private_symbols_skipped():
|
||||
|
||||
|
||||
def test_empty_repo():
|
||||
"""Verifies behavior with empty or None input."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
report = KnowledgeGapIdentifier().analyze(tmpdir)
|
||||
assert len(report.gaps) == 0
|
||||
|
||||
|
||||
def test_invalid_path():
|
||||
"""Verifies invalid path logic."""
|
||||
report = KnowledgeGapIdentifier().analyze("/nonexistent/path/xyz")
|
||||
assert len(report.gaps) == 1
|
||||
assert report.gaps[0].severity == GapSeverity.ERROR
|
||||
|
||||
|
||||
def test_report_summary():
|
||||
"""Verifies report summary logic."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
_make_repo(tmpdir, {
|
||||
"src/app.py": "class MyService:\n def handle(self):\n pass\n",
|
||||
@@ -123,7 +114,6 @@ def test_report_summary():
|
||||
|
||||
|
||||
def test_report_to_dict():
|
||||
"""Verifies report to dict logic."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
_make_repo(tmpdir, {
|
||||
"src/app.py": "def hello():\n pass\n",
|
||||
|
||||
@@ -32,7 +32,6 @@ class TestBottleneck:
|
||||
"""Test Bottleneck dataclass."""
|
||||
|
||||
def test_creation(self):
|
||||
"""Verifies creation logic."""
|
||||
b = Bottleneck(
|
||||
category="test",
|
||||
name="test_foo",
|
||||
@@ -49,7 +48,6 @@ class TestBottleneck:
|
||||
assert b.line_number is None
|
||||
|
||||
def test_with_location(self):
|
||||
"""Verifies with location logic."""
|
||||
b = Bottleneck(
|
||||
category="test",
|
||||
name="test_bar",
|
||||
@@ -63,7 +61,6 @@ class TestBottleneck:
|
||||
assert b.line_number == 42
|
||||
|
||||
def test_to_dict(self):
|
||||
"""Verifies to dict logic."""
|
||||
b = Bottleneck("test", "x", 1.0, "info", "y")
|
||||
d = b.__dict__
|
||||
assert "category" in d
|
||||
@@ -74,7 +71,6 @@ class TestPerfReport:
|
||||
"""Test PerfReport dataclass."""
|
||||
|
||||
def test_creation(self):
|
||||
"""Verifies creation logic."""
|
||||
report = PerfReport(
|
||||
timestamp="2026-01-01T00:00:00Z",
|
||||
repo_path="/tmp/repo"
|
||||
@@ -84,7 +80,6 @@ class TestPerfReport:
|
||||
assert report.summary == {}
|
||||
|
||||
def test_to_dict(self):
|
||||
"""Verifies to dict logic."""
|
||||
report = PerfReport(
|
||||
timestamp="2026-01-01T00:00:00Z",
|
||||
repo_path="/tmp/repo",
|
||||
@@ -99,7 +94,6 @@ class TestSeveritySort:
|
||||
"""Test severity sorting."""
|
||||
|
||||
def test_critical_first(self):
|
||||
"""Verifies critical first logic."""
|
||||
items = [
|
||||
Bottleneck("test", "a", 1.0, "info", ""),
|
||||
Bottleneck("test", "b", 0.5, "critical", ""),
|
||||
@@ -111,7 +105,6 @@ class TestSeveritySort:
|
||||
assert items[2].severity == "info"
|
||||
|
||||
def test_duration_within_severity(self):
|
||||
"""Verifies duration within severity logic."""
|
||||
items = [
|
||||
Bottleneck("test", "slow", 10.0, "warning", ""),
|
||||
Bottleneck("test", "fast", 1.0, "warning", ""),
|
||||
@@ -124,7 +117,6 @@ class TestSlowTestScan:
|
||||
"""Test slow test pattern scanning."""
|
||||
|
||||
def test_finds_sleep(self, tmp_path):
|
||||
"""Verifies finds sleep logic."""
|
||||
test_file = tmp_path / "test_sleepy.py"
|
||||
test_file.write_text(textwrap.dedent('''
|
||||
import time
|
||||
@@ -139,7 +131,6 @@ class TestSlowTestScan:
|
||||
assert any("sleep" in b.recommendation.lower() for b in bottlenecks)
|
||||
|
||||
def test_finds_http_calls(self, tmp_path):
|
||||
"""Verifies finds http calls logic."""
|
||||
test_file = tmp_path / "test_http.py"
|
||||
test_file.write_text(textwrap.dedent('''
|
||||
import requests
|
||||
@@ -154,7 +145,6 @@ class TestSlowTestScan:
|
||||
assert any("HTTP" in b.recommendation or "mock" in b.recommendation.lower() for b in bottlenecks)
|
||||
|
||||
def test_skips_non_test_files(self, tmp_path):
|
||||
"""Verifies skips non files logic."""
|
||||
src_file = tmp_path / "main.py"
|
||||
src_file.write_text("import time\ntime.sleep(10)\n")
|
||||
|
||||
@@ -162,12 +152,10 @@ class TestSlowTestScan:
|
||||
assert len(bottlenecks) == 0
|
||||
|
||||
def test_handles_missing_dir(self):
|
||||
"""Verifies handles missing dir logic."""
|
||||
bottlenecks = find_slow_tests_by_scan("/nonexistent/path")
|
||||
assert bottlenecks == []
|
||||
|
||||
def test_file_path_populated(self, tmp_path):
|
||||
"""Verifies file path populated logic."""
|
||||
test_file = tmp_path / "test_example.py"
|
||||
test_file.write_text("import time\n\ndef test_it():\n time.sleep(2)\n")
|
||||
|
||||
@@ -181,7 +169,6 @@ class TestBuildArtifacts:
|
||||
"""Test build artifact analysis."""
|
||||
|
||||
def test_finds_large_node_modules(self, tmp_path):
|
||||
"""Verifies finds large node modules logic."""
|
||||
nm = tmp_path / "node_modules"
|
||||
nm.mkdir()
|
||||
# Create a file > 10MB
|
||||
@@ -193,7 +180,6 @@ class TestBuildArtifacts:
|
||||
assert any("node_modules" in b.name for b in bottlenecks)
|
||||
|
||||
def test_ignores_small_dirs(self, tmp_path):
|
||||
"""Verifies ignores small dirs logic."""
|
||||
nm = tmp_path / "node_modules"
|
||||
nm.mkdir()
|
||||
small_file = nm / "small.txt"
|
||||
@@ -203,7 +189,6 @@ class TestBuildArtifacts:
|
||||
assert not any("node_modules" in b.name for b in bottlenecks)
|
||||
|
||||
def test_finds_pycache(self, tmp_path):
|
||||
"""Verifies finds pycache logic."""
|
||||
cache = tmp_path / "__pycache__"
|
||||
cache.mkdir()
|
||||
big_file = cache / "big.pyc"
|
||||
@@ -217,7 +202,6 @@ class TestMakefileAnalysis:
|
||||
"""Test Makefile analysis."""
|
||||
|
||||
def test_finds_pip_install(self, tmp_path):
|
||||
"""Verifies finds pip install logic."""
|
||||
makefile = tmp_path / "Makefile"
|
||||
makefile.write_text(textwrap.dedent('''
|
||||
install:
|
||||
@@ -231,7 +215,6 @@ class TestMakefileAnalysis:
|
||||
assert len(bottlenecks) >= 1
|
||||
|
||||
def test_no_makefile(self, tmp_path):
|
||||
"""Verifies no makefile logic."""
|
||||
bottlenecks = analyze_makefile_targets(str(tmp_path))
|
||||
assert bottlenecks == []
|
||||
|
||||
@@ -240,7 +223,6 @@ class TestImportAnalysis:
|
||||
"""Test heavy import detection."""
|
||||
|
||||
def test_finds_pandas(self, tmp_path):
|
||||
"""Verifies finds pandas logic."""
|
||||
src = tmp_path / "analysis.py"
|
||||
src.write_text("import pandas as pd\n")
|
||||
|
||||
@@ -249,7 +231,6 @@ class TestImportAnalysis:
|
||||
assert any("pandas" in b.name for b in bottlenecks)
|
||||
|
||||
def test_finds_torch(self, tmp_path):
|
||||
"""Verifies finds torch logic."""
|
||||
src = tmp_path / "model.py"
|
||||
src.write_text("import torch\n")
|
||||
|
||||
@@ -257,7 +238,6 @@ class TestImportAnalysis:
|
||||
assert any("torch" in b.name for b in bottlenecks)
|
||||
|
||||
def test_skips_light_imports(self, tmp_path):
|
||||
"""Verifies skips light imports logic."""
|
||||
src = tmp_path / "utils.py"
|
||||
src.write_text("import json\nimport os\nimport sys\n")
|
||||
|
||||
@@ -269,14 +249,12 @@ class TestGenerateReport:
|
||||
"""Test full report generation."""
|
||||
|
||||
def test_empty_repo(self, tmp_path):
|
||||
"""Verifies behavior with empty or None input."""
|
||||
report = generate_report(str(tmp_path))
|
||||
assert report.summary["total_bottlenecks"] >= 0
|
||||
assert "critical" in report.summary
|
||||
assert "warning" in report.summary
|
||||
|
||||
def test_with_findings(self, tmp_path):
|
||||
"""Verifies with findings logic."""
|
||||
# Create a test file with issues
|
||||
test_file = tmp_path / "test_slow.py"
|
||||
test_file.write_text(textwrap.dedent('''
|
||||
@@ -295,7 +273,6 @@ class TestGenerateReport:
|
||||
assert len(report.bottlenecks) > 0
|
||||
|
||||
def test_summary_categories(self, tmp_path):
|
||||
"""Verifies summary categories logic."""
|
||||
report = generate_report(str(tmp_path))
|
||||
assert "by_category" in report.summary
|
||||
|
||||
@@ -304,7 +281,6 @@ class TestMarkdownReport:
|
||||
"""Test markdown output."""
|
||||
|
||||
def test_format(self):
|
||||
"""Verifies format logic."""
|
||||
report = PerfReport(
|
||||
timestamp="2026-01-01T00:00:00Z",
|
||||
repo_path="/tmp/repo",
|
||||
@@ -327,7 +303,6 @@ class TestMarkdownReport:
|
||||
assert "Fix it" in md
|
||||
|
||||
def test_empty_report(self):
|
||||
"""Verifies behavior with empty or None input."""
|
||||
report = PerfReport(
|
||||
timestamp="2026-01-01T00:00:00Z",
|
||||
repo_path="/tmp/repo",
|
||||
|
||||
@@ -21,32 +21,27 @@ from quality_gate import (
|
||||
|
||||
class TestScoreSpecificity(unittest.TestCase):
|
||||
def test_specific_content_scores_high(self):
|
||||
"""Verifies specific content scores high logic."""
|
||||
content = "Run `python3 deploy.py --env prod` on 2026-04-15. Example: step 1 configure nginx."
|
||||
score = score_specificity(content)
|
||||
self.assertGreater(score, 0.6)
|
||||
|
||||
def test_vague_content_scores_low(self):
|
||||
"""Verifies vague content scores low logic."""
|
||||
content = "It generally depends. Various factors might affect this. Basically, it varies."
|
||||
score = score_specificity(content)
|
||||
self.assertLess(score, 0.5)
|
||||
|
||||
def test_empty_scores_baseline(self):
|
||||
"""Verifies behavior with empty or None input."""
|
||||
score = score_specificity("")
|
||||
self.assertAlmostEqual(score, 0.5, delta=0.1)
|
||||
|
||||
|
||||
class TestScoreActionability(unittest.TestCase):
|
||||
def test_actionable_content_scores_high(self):
|
||||
"""Verifies actionable content scores high logic."""
|
||||
content = "1. Run `pip install -r requirements.txt`\n2. Execute `python3 train.py`\n3. Verify with `pytest`"
|
||||
score = score_actionability(content)
|
||||
self.assertGreater(score, 0.6)
|
||||
|
||||
def test_abstract_content_scores_low(self):
|
||||
"""Verifies abstract content scores low logic."""
|
||||
content = "The concept of intelligence is fascinating and multifaceted."
|
||||
score = score_actionability(content)
|
||||
self.assertLess(score, 0.5)
|
||||
@@ -54,40 +49,33 @@ class TestScoreActionability(unittest.TestCase):
|
||||
|
||||
class TestScoreFreshness(unittest.TestCase):
|
||||
def test_recent_timestamp_scores_high(self):
|
||||
"""Verifies recent timestamp scores high logic."""
|
||||
recent = datetime.now(timezone.utc).isoformat()
|
||||
score = score_freshness(recent)
|
||||
self.assertGreater(score, 0.9)
|
||||
|
||||
def test_old_timestamp_scores_low(self):
|
||||
"""Verifies old timestamp scores low logic."""
|
||||
old = (datetime.now(timezone.utc) - timedelta(days=365)).isoformat()
|
||||
score = score_freshness(old)
|
||||
self.assertLess(score, 0.2)
|
||||
|
||||
def test_none_returns_baseline(self):
|
||||
"""Verifies behavior with empty or None input."""
|
||||
score = score_freshness(None)
|
||||
self.assertEqual(score, 0.5)
|
||||
|
||||
|
||||
class TestScoreSourceQuality(unittest.TestCase):
|
||||
def test_claude_scores_high(self):
|
||||
"""Verifies claude scores high logic."""
|
||||
self.assertGreater(score_source_quality("claude-sonnet"), 0.85)
|
||||
|
||||
def test_ollama_scores_lower(self):
|
||||
"""Verifies ollama scores lower logic."""
|
||||
self.assertLess(score_source_quality("ollama"), 0.7)
|
||||
|
||||
def test_unknown_returns_default(self):
|
||||
"""Verifies unknown returns default logic."""
|
||||
self.assertEqual(score_source_quality("unknown"), 0.5)
|
||||
|
||||
|
||||
class TestScoreEntry(unittest.TestCase):
|
||||
def test_good_entry_scores_high(self):
|
||||
"""Verifies good entry scores high logic."""
|
||||
entry = {
|
||||
"content": "To deploy: run `kubectl apply -f deployment.yaml`. Verify with `kubectl get pods`.",
|
||||
"model": "claude-sonnet",
|
||||
@@ -97,7 +85,6 @@ class TestScoreEntry(unittest.TestCase):
|
||||
self.assertGreater(score, 0.6)
|
||||
|
||||
def test_poor_entry_scores_low(self):
|
||||
"""Verifies poor entry scores low logic."""
|
||||
entry = {
|
||||
"content": "It depends. Various things might happen.",
|
||||
"model": "unknown",
|
||||
@@ -108,7 +95,6 @@ class TestScoreEntry(unittest.TestCase):
|
||||
|
||||
class TestFilterEntries(unittest.TestCase):
|
||||
def test_filters_low_quality(self):
|
||||
"""Verifies knowledge filtering by filters low quality."""
|
||||
entries = [
|
||||
{"content": "Run `deploy.py` to fix the issue.", "model": "claude"},
|
||||
{"content": "It might work sometimes.", "model": "unknown"},
|
||||
|
||||
Reference in New Issue
Block a user