Compare commits
1 Commits
step35/107
...
step35/173
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2f57c2b653 |
@@ -1,308 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Dependency Inventory — Scan repos and list third-party dependencies.
|
||||
|
||||
Reads: package.json, requirements.txt, go.mod, Cargo.toml, pyproject.toml
|
||||
Extracts: package name, version constraint, source file/repo
|
||||
Outputs: JSON (default) or markdown table
|
||||
|
||||
Usage:
|
||||
python3 scripts/dependency_inventory.py --repos-dir ~/repos/
|
||||
python3 scripts/dependency_inventory.py --repos ~/repo1,~/repo2 --format markdown
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Any, Optional
|
||||
|
||||
# Mapping of file pattern to canonical parser name
|
||||
MANIFEST_PATTERNS = {
|
||||
'requirements.txt': 'requirements',
|
||||
'package.json': 'npm',
|
||||
'pyproject.toml': 'pyproject',
|
||||
'go.mod': 'go',
|
||||
'Cargo.toml': 'cargo',
|
||||
}
|
||||
|
||||
# Parser registry
|
||||
PARSERS = {}
|
||||
|
||||
|
||||
def register_parser(name: str):
|
||||
"""Decorator to register a parser function."""
|
||||
def decorator(fn):
|
||||
PARSERS[name] = fn
|
||||
return fn
|
||||
return decorator
|
||||
|
||||
|
||||
# ─── Parsers ────────────────────────────────────────────────────────────────
|
||||
|
||||
@register_parser('requirements')
|
||||
def parse_requirements(content: str) -> List[Dict[str, str]]:
|
||||
"""Parse requirements.txt — one requirement per line."""
|
||||
deps = []
|
||||
for line in content.splitlines():
|
||||
line = line.strip()
|
||||
if not line or line.startswith('#'):
|
||||
continue
|
||||
pkg_spec = re.split(r'[ ;#]', line)[0].strip()
|
||||
if '>=' in pkg_spec:
|
||||
name, ver = pkg_spec.split('>=', 1)
|
||||
elif '==' in pkg_spec:
|
||||
name, ver = pkg_spec.split('==', 1)
|
||||
elif '<=' in pkg_spec:
|
||||
name, ver = pkg_spec.split('<=', 1)
|
||||
elif '~=' in pkg_spec:
|
||||
name, ver = pkg_spec.split('~=', 1)
|
||||
elif '>' in pkg_spec:
|
||||
name, ver = pkg_spec.split('>', 1)
|
||||
elif '<' in pkg_spec:
|
||||
name, ver = pkg_spec.split('<', 1)
|
||||
elif '=' in pkg_spec:
|
||||
name, ver = pkg_spec.split('=', 1)
|
||||
else:
|
||||
name, ver = pkg_spec, ''
|
||||
deps.append({
|
||||
'package': name.strip(),
|
||||
'version': ver.strip(),
|
||||
'constraint': line[len(name):].strip()
|
||||
})
|
||||
return deps
|
||||
|
||||
|
||||
@register_parser('npm')
|
||||
def parse_package_json(content: str) -> List[Dict[str, str]]:
|
||||
"""Parse package.json dependencies."""
|
||||
try:
|
||||
data = json.loads(content)
|
||||
except json.JSONDecodeError:
|
||||
return []
|
||||
deps = []
|
||||
for section in ('dependencies', 'devDependencies', 'peerDependencies', 'optionalDependencies'):
|
||||
for name, ver in data.get(section, {}).items():
|
||||
deps.append({
|
||||
'package': name,
|
||||
'version': ver,
|
||||
'constraint': ver,
|
||||
'type': section
|
||||
})
|
||||
return deps
|
||||
|
||||
|
||||
@register_parser('pyproject')
|
||||
def parse_pyproject_toml(content: str) -> List[Dict[str, str]]:
|
||||
"""Parse pyproject.toml [project] dependencies."""
|
||||
deps = []
|
||||
in_deps = False
|
||||
dep_buffer = ''
|
||||
for line in content.splitlines():
|
||||
stripped = line.strip()
|
||||
if stripped.startswith('dependencies = ['):
|
||||
in_deps = True
|
||||
remainder = stripped.split('=', 1)[1].strip()
|
||||
dep_buffer = remainder[1:] if remainder.startswith('[') else remainder
|
||||
continue
|
||||
if in_deps:
|
||||
if stripped.startswith(']'):
|
||||
in_deps = False
|
||||
continue
|
||||
dep_buffer += ' ' + line
|
||||
dep_buffer = dep_buffer.strip().rstrip(',')
|
||||
for match in re.finditer(r'"([^"]+)"', dep_buffer):
|
||||
spec = match.group(1)
|
||||
m = re.match(r'^([a-zA-Z0-9_.-]+)\s*([<>=!~]+)?\s*(.*)$', spec)
|
||||
if m:
|
||||
name, op, ver = m.groups()
|
||||
deps.append({
|
||||
'package': name,
|
||||
'version': (ver or '').strip(),
|
||||
'constraint': spec
|
||||
})
|
||||
return deps
|
||||
|
||||
|
||||
@register_parser('go')
|
||||
def parse_go_mod(content: str) -> List[Dict[str, str]]:
|
||||
"""Parse go.mod — require statements."""
|
||||
deps = []
|
||||
for line in content.splitlines():
|
||||
line = line.strip()
|
||||
if line.startswith('require ') and not line.startswith('require ('):
|
||||
parts = line.split()
|
||||
if len(parts) >= 3:
|
||||
mod, ver = parts[1], parts[2]
|
||||
deps.append({'package': mod, 'version': ver, 'constraint': ver})
|
||||
elif line.startswith('\t') and '/' in line:
|
||||
parts = line.strip().split()
|
||||
if len(parts) >= 2:
|
||||
mod, ver = parts[0], parts[1]
|
||||
deps.append({'package': mod, 'version': ver, 'constraint': ver})
|
||||
return deps
|
||||
|
||||
|
||||
@register_parser('cargo')
|
||||
def parse_cargo_toml(content: str) -> List[Dict[str, str]]:
|
||||
"""Parse [dependencies] section from Cargo.toml."""
|
||||
deps = []
|
||||
in_deps = False
|
||||
for line in content.splitlines():
|
||||
stripped = line.strip()
|
||||
if stripped in ('[dependencies]', '[dependencies]'):
|
||||
in_deps = True
|
||||
continue
|
||||
if stripped.startswith('['):
|
||||
in_deps = False
|
||||
continue
|
||||
if in_deps and '=' in stripped:
|
||||
name_part, ver_part = stripped.split('=', 1)
|
||||
name = name_part.strip()
|
||||
ver = ver_part.strip().strip('"').strip("'")
|
||||
deps.append({'package': name, 'version': ver, 'constraint': ver})
|
||||
return deps
|
||||
|
||||
|
||||
# ─── File Discovery ─────────────────────────────────────────────────────────
|
||||
|
||||
def find_manifest_files(root: Path) -> Dict[str, List[Path]]:
|
||||
"""Find all manifest files under root."""
|
||||
found = {k: [] for k in MANIFEST_PATTERNS}
|
||||
for pattern in MANIFEST_PATTERNS:
|
||||
for path in root.rglob(pattern):
|
||||
if not any(skip in str(path) for skip in ('.git', 'node_modules', '__pycache__', '.venv', 'venv')):
|
||||
found[pattern].append(path)
|
||||
return found
|
||||
|
||||
|
||||
# ─── Main Scanner ────────────────────────────────────────────────────────────
|
||||
|
||||
def scan_repo(repo_path: Path) -> Dict[str, Any]:
|
||||
"""Scan a single repo directory for dependency manifests."""
|
||||
repo_name = repo_path.name
|
||||
found = find_manifest_files(repo_path)
|
||||
all_deps: List[Dict[str, str]] = []
|
||||
files_scanned = 0
|
||||
|
||||
for pattern, paths in found.items():
|
||||
parser_name = MANIFEST_PATTERNS[pattern]
|
||||
# Map parser_name to function
|
||||
if parser_name == 'requirements':
|
||||
parser = parse_requirements
|
||||
elif parser_name == 'npm':
|
||||
parser = parse_package_json
|
||||
elif parser_name == 'pyproject':
|
||||
parser = parse_pyproject_toml
|
||||
elif parser_name == 'go':
|
||||
parser = parse_go_mod
|
||||
elif parser_name == 'cargo':
|
||||
parser = parse_cargo_toml
|
||||
else:
|
||||
continue
|
||||
|
||||
for fp in paths:
|
||||
try:
|
||||
content = fp.read_text(encoding='utf-8', errors='replace')
|
||||
files_scanned += 1
|
||||
rel = fp.relative_to(repo_path)
|
||||
for dep in parser(content):
|
||||
dep['source'] = pattern
|
||||
dep['file'] = str(rel)
|
||||
dep['repo'] = repo_name
|
||||
all_deps.append(dep)
|
||||
except Exception as e:
|
||||
print(f" [WARN] Could not parse {fp}: {e}", file=sys.stderr)
|
||||
|
||||
return {
|
||||
'repo': repo_name,
|
||||
'path': str(repo_path),
|
||||
'files_scanned': files_scanned,
|
||||
'dependencies': all_deps,
|
||||
'dependency_count': len(all_deps),
|
||||
}
|
||||
|
||||
|
||||
def scan_repos(repos: List[Path]) -> Dict[str, Any]:
|
||||
"""Scan multiple repos and aggregate."""
|
||||
results = {}
|
||||
total_deps = 0
|
||||
total_files = 0
|
||||
for repo in repos:
|
||||
if not repo.is_dir():
|
||||
print(f"[WARN] Skipping {repo}: not a directory", file=sys.stderr)
|
||||
continue
|
||||
print(f"Scanning {repo.name}...", file=sys.stderr)
|
||||
result = scan_repo(repo)
|
||||
results[repo.name] = result
|
||||
total_deps += result['dependency_count']
|
||||
total_files += result['files_scanned']
|
||||
return {
|
||||
'repos': results,
|
||||
'summary': {
|
||||
'total_repos': len(results),
|
||||
'total_files_scanned': total_files,
|
||||
'total_dependencies': total_deps,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# ─── Output ─────────────────────────────────────────────────────────────────
|
||||
|
||||
def output_json(data: Dict[str, Any], out_path: Optional[Path] = None) -> None:
|
||||
text = json.dumps(data, indent=2)
|
||||
if out_path:
|
||||
out_path.write_text(text)
|
||||
print(f"Written: {out_path}", file=sys.stderr)
|
||||
else:
|
||||
print(text)
|
||||
|
||||
|
||||
def output_markdown(data: Dict[str, Any], out_path: Optional[Path] = None) -> None:
|
||||
lines = []
|
||||
lines.append("# Dependency Inventory")
|
||||
lines.append("\nGenerated: *(TODO: add timestamp)*")
|
||||
lines.append(f"\n**Summary:** {data['summary']['total_dependencies']} dependencies across {data['summary']['total_repos']} repos")
|
||||
lines.append("")
|
||||
lines.append("| Repo | File | Package | Version |")
|
||||
lines.append("|------|------|---------|---------|")
|
||||
for repo_name, rdata in sorted(data['repos'].items()):
|
||||
for dep in sorted(rdata['dependencies'], key=lambda d: d['package']):
|
||||
lines.append(f"| {repo_name} | {dep['file']} | {dep['package']} | {dep['version']} |")
|
||||
text = '\n'.join(lines) + '\n'
|
||||
if out_path:
|
||||
out_path.write_text(text)
|
||||
print(f"Written: {out_path}", file=sys.stderr)
|
||||
else:
|
||||
print(text)
|
||||
|
||||
|
||||
# ─── CLI Entry ────────────────────────────────────────────────────────────────
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Generate org-wide dependency inventory")
|
||||
parser.add_argument('--repos-dir', help='Directory containing multiple repos')
|
||||
parser.add_argument('--repos', help='Comma-separated list of repo paths')
|
||||
parser.add_argument('--output', '-o', help='Output file (default: stdout)')
|
||||
parser.add_argument('--format', choices=['json', 'markdown'], default='json',
|
||||
help='Output format (default: json)')
|
||||
args = parser.parse_args()
|
||||
if args.repos:
|
||||
repo_paths = [Path(p.strip()).expanduser() for p in args.repos.split(',')]
|
||||
elif args.repos_dir:
|
||||
base = Path(args.repos_dir).expanduser()
|
||||
repo_paths = [p for p in base.iterdir() if p.is_dir() and not p.name.startswith('.')]
|
||||
else:
|
||||
repo_paths = [Path(__file__).resolve().parent.parent]
|
||||
out_path = Path(args.output).expanduser() if args.output else None
|
||||
data = scan_repos(repo_paths)
|
||||
if args.format == 'json':
|
||||
output_json(data, out_path)
|
||||
else:
|
||||
output_markdown(data, out_path)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
477
scripts/progress_tracker.py
Normal file
477
scripts/progress_tracker.py
Normal file
@@ -0,0 +1,477 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Progress Tracker — Pipeline 10.8
|
||||
Track improvement metrics over time. Are we getting better?
|
||||
|
||||
Metrics tracked:
|
||||
1. Test coverage — % of Python functions with associated tests (test:source file ratio + line coverage if available)
|
||||
2. Doc coverage — % of Python callables with docstrings (AST-based)
|
||||
3. Issue close rate — closed / (opened + closed) per week (Gitea API)
|
||||
4. Dep freshness — % of requirements pinned vs outdated (pip list --outdated)
|
||||
|
||||
Output:
|
||||
- metrics/snapshots/YYYY-MM-DD.json — one snapshot per run
|
||||
- metrics/TRENDS.md — cumulative markdown table
|
||||
- stdout summary
|
||||
|
||||
Usage:
|
||||
python3 scripts/progress_tracker.py
|
||||
python3 scripts/progress_tracker.py --json
|
||||
python3 scripts/progress_tracker.py --output metrics/TRENDS.md
|
||||
|
||||
Weekly cron:
|
||||
0 9 * * 1 cd /path/to/compounding-intelligence && python3 scripts/progress_tracker.py
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from datetime import datetime, timezone, timedelta
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
# ── Configuration ──────────────────────────────────────────────────────────
|
||||
|
||||
SCRIPT_DIR = Path(__file__).resolve().parent
|
||||
REPO_ROOT = SCRIPT_DIR.parent
|
||||
METRICS_DIR = REPO_ROOT / "metrics"
|
||||
SNAPSHOTS_DIR = METRICS_DIR / "snapshots"
|
||||
TOKEN_PATH = Path.home() / ".config" / "gitea" / "token"
|
||||
GITEA_API_BASE = "https://forge.alexanderwhitestone.com/api/v1"
|
||||
ORG = "Timmy_Foundation"
|
||||
|
||||
# Ensure paths exist
|
||||
SNAPSHOTS_DIR.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
# ── Helpers ─────────────────────────────────────────────────────────────────
|
||||
|
||||
def run_cmd(cmd: List[str], cwd: Path = REPO_ROOT) -> str:
|
||||
"""Run a shell command and return stdout (stderr merged)."""
|
||||
result = subprocess.run(
|
||||
cmd, capture_output=True, text=True, cwd=cwd, timeout=30
|
||||
)
|
||||
if result.returncode != 0:
|
||||
return ""
|
||||
return result.stdout.strip()
|
||||
|
||||
|
||||
def slugify_date(dt: datetime) -> str:
|
||||
return dt.strftime("%Y-%m-%d")
|
||||
|
||||
|
||||
def snapshot_path(dt: datetime) -> Path:
|
||||
return SNAPSHOTS_DIR / f"{slugify_date(dt)}.json"
|
||||
|
||||
|
||||
def load_snapshots() -> List[Dict[str, Any]]:
|
||||
"""Load all existing snapshots sorted by date."""
|
||||
snapshots = []
|
||||
for f in sorted(SNAPSHOTS_DIR.glob("*.json")):
|
||||
try:
|
||||
with open(f) as fp:
|
||||
snapshots.append(json.load(fp))
|
||||
except Exception:
|
||||
continue
|
||||
return snapshots
|
||||
|
||||
|
||||
# ── Metric 1: Test Coverage ─────────────────────────────────────────────────
|
||||
|
||||
def collect_test_coverage() -> Dict[str, Any]:
|
||||
"""
|
||||
Compute test coverage metrics.
|
||||
Counts test_*.py and *_test.py files vs non-test .py source files.
|
||||
Also attempts to read .coverage if present.
|
||||
"""
|
||||
all_py = list(REPO_ROOT.rglob("*.py"))
|
||||
|
||||
source_files = []
|
||||
test_files = []
|
||||
|
||||
for p in all_py:
|
||||
try:
|
||||
rel_parts = p.relative_to(REPO_ROOT).parts
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
# Skip hidden/cache/temp dirs (check only relative parts)
|
||||
if any(part.startswith('.') or part.startswith('__') for part in rel_parts):
|
||||
continue
|
||||
if any(part in ('node_modules', 'venv', '.venv', 'env', '.pytest_cache') for part in rel_parts):
|
||||
continue
|
||||
|
||||
if p.name.startswith("test_") or p.name.endswith("_test.py"):
|
||||
test_files.append(p)
|
||||
else:
|
||||
source_files.append(p)
|
||||
|
||||
# Try to get line coverage from .coverage
|
||||
coverage_percent = None
|
||||
coverage_tool = None
|
||||
coverage_file = REPO_ROOT / ".coverage"
|
||||
if coverage_file.exists():
|
||||
try:
|
||||
import coverage # type: ignore
|
||||
# Use coverage API if available
|
||||
cov = coverage.Coverage(data_file=str(coverage_file))
|
||||
cov.load()
|
||||
total = cov.report()
|
||||
coverage_percent = total if isinstance(total, float) else None
|
||||
coverage_tool = "coverage"
|
||||
except Exception:
|
||||
# Fallback: parse `coverage report` output
|
||||
out = run_cmd(["coverage", "report", "--skip-empty"])
|
||||
if out:
|
||||
for line in out.splitlines():
|
||||
if "TOTAL" in line:
|
||||
parts = line.split()
|
||||
if len(parts) >= 2:
|
||||
try:
|
||||
coverage_percent = float(parts[-1].rstrip('%'))
|
||||
coverage_tool = "coverage"
|
||||
break
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
return {
|
||||
"test_files": len(test_files),
|
||||
"source_files": len(source_files),
|
||||
"test_to_source_ratio": round(len(test_files) / len(source_files), 4) if source_files else 0.0,
|
||||
"coverage_tool": coverage_tool,
|
||||
"coverage_percent": coverage_percent,
|
||||
}
|
||||
|
||||
|
||||
# ── Metric 2: Doc Coverage ──────────────────────────────────────────────────
|
||||
|
||||
def collect_doc_coverage() -> Dict[str, Any]:
|
||||
"""
|
||||
Check AST of Python files for docstrings.
|
||||
Returns: callables_total, callables_with_doc, doc_coverage_percent
|
||||
"""
|
||||
import ast
|
||||
|
||||
all_py = list(REPO_ROOT.rglob("*.py"))
|
||||
|
||||
source_files = []
|
||||
test_files = []
|
||||
|
||||
for p in all_py:
|
||||
try:
|
||||
rel_parts = p.relative_to(REPO_ROOT).parts
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
if any(part.startswith('.') or part.startswith('__') for part in rel_parts):
|
||||
continue
|
||||
if any(part in ('node_modules', 'venv', '.venv', 'env', '.pytest_cache') for part in rel_parts):
|
||||
continue
|
||||
|
||||
if p.name.startswith("test_") or p.name.endswith("_test.py"):
|
||||
test_files.append(p)
|
||||
else:
|
||||
source_files.append(p)
|
||||
|
||||
total_callables = 0
|
||||
with_doc = 0
|
||||
|
||||
for p in source_files + test_files:
|
||||
try:
|
||||
with open(p) as f:
|
||||
tree = ast.parse(f.read(), filename=str(p))
|
||||
for node in ast.walk(tree):
|
||||
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
|
||||
total_callables += 1
|
||||
doc = ast.get_docstring(node)
|
||||
if doc and doc.strip():
|
||||
with_doc += 1
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
return {
|
||||
"callables_total": total_callables,
|
||||
"callables_with_doc": with_doc,
|
||||
"doc_coverage_percent": round((with_doc / total_callables * 100) if total_callables else 0.0, 2),
|
||||
}
|
||||
|
||||
|
||||
# ── Metric 3: Issue Close Rate ──────────────────────────────────────────────
|
||||
|
||||
def collect_issue_metrics() -> Dict[str, Any]:
|
||||
"""
|
||||
Use Gitea API to get issue open/close stats for the last 7 days.
|
||||
Returns counts and close rate.
|
||||
"""
|
||||
token = ""
|
||||
if TOKEN_PATH.exists():
|
||||
token = TOKEN_PATH.read_text().strip()
|
||||
|
||||
if not token:
|
||||
return {
|
||||
"opened_last_7d": None,
|
||||
"closed_last_7d": None,
|
||||
"close_rate": None,
|
||||
"total_open": None,
|
||||
"note": "Gitea token not available"
|
||||
}
|
||||
|
||||
try:
|
||||
from urllib.request import Request, urlopen
|
||||
from urllib.error import HTTPError, URLError
|
||||
except ImportError:
|
||||
return {"error": "urllib not available"}
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
week_ago = now - timedelta(days=7)
|
||||
since = week_ago.strftime("%Y-%m-%d")
|
||||
|
||||
headers = {"Authorization": f"token {token}"}
|
||||
base_url = f"{GITEA_API_BASE}/repos/{ORG}/compounding-intelligence/issues"
|
||||
|
||||
try:
|
||||
# Get issues from last 7 days
|
||||
url = f"{base_url}?state=all&since={since}&per_page=100"
|
||||
req = Request(url, headers=headers)
|
||||
with urlopen(req, timeout=15) as resp:
|
||||
issues = json.loads(resp.read())
|
||||
|
||||
opened = 0
|
||||
closed = 0
|
||||
for issue in issues:
|
||||
created = datetime.fromisoformat(issue["created_at"].replace("Z", "+00:00"))
|
||||
if created >= week_ago:
|
||||
opened += 1
|
||||
if issue.get("state") == "closed":
|
||||
closed_at_str = issue.get("closed_at")
|
||||
if closed_at_str:
|
||||
closed_at = datetime.fromisoformat(closed_at_str.replace("Z", "+00:00"))
|
||||
if closed_at >= week_ago:
|
||||
closed += 1
|
||||
|
||||
# Total open issues
|
||||
req2 = Request(f"{base_url}?state=open&per_page=1", headers=headers)
|
||||
with urlopen(req2, timeout=15) as resp:
|
||||
total_open = int(resp.headers.get("X-Total-Count", "0"))
|
||||
|
||||
total = opened + closed
|
||||
close_rate = closed / total if total > 0 else 0.0
|
||||
|
||||
return {
|
||||
"opened_last_7d": opened,
|
||||
"closed_last_7d": closed,
|
||||
"close_rate": round(close_rate, 4),
|
||||
"total_open": total_open,
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
"opened_last_7d": None,
|
||||
"closed_last_7d": None,
|
||||
"close_rate": None,
|
||||
"total_open": None,
|
||||
"error": str(e)[:100],
|
||||
"note": "Gitea API unavailable"
|
||||
}
|
||||
|
||||
|
||||
# ── Metric 4: Dependency Freshness ─────────────────────────────────────────
|
||||
|
||||
def collect_dep_freshness() -> Dict[str, Any]:
|
||||
"""
|
||||
Check requirements.txt for outdated dependencies using pip list --outdated.
|
||||
Returns freshness percentage and outdated list.
|
||||
"""
|
||||
req_file = REPO_ROOT / "requirements.txt"
|
||||
if not req_file.exists():
|
||||
return {
|
||||
"total_deps": 0,
|
||||
"outdated_deps": 0,
|
||||
"freshness_percent": 100.0,
|
||||
"outdated_list": [],
|
||||
"note": "requirements.txt not found"
|
||||
}
|
||||
|
||||
# Parse requirements (very simple: take name before comparison op)
|
||||
reqs = []
|
||||
with open(req_file) as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if not line or line.startswith("#"):
|
||||
continue
|
||||
m = re.match(r"^([a-zA-Z0-9_.-]+)", line)
|
||||
if m:
|
||||
reqs.append(m.group(1))
|
||||
|
||||
if not reqs:
|
||||
return {"total_deps": 0, "outdated_deps": 0, "freshness_percent": 100.0, "outdated_list": []}
|
||||
|
||||
# Query pip for outdated packages (may fail if pip not available)
|
||||
outdated_names = set()
|
||||
try:
|
||||
out = run_cmd(["pip", "list", "--outdated", "--format=json"])
|
||||
if out:
|
||||
data = json.loads(out)
|
||||
outdated_names = {item["name"].lower() for item in data}
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
outdated = [p for p in reqs if p.lower() in outdated_names]
|
||||
total = len(reqs)
|
||||
outdated_count = len(outdated)
|
||||
freshness = round(((total - outdated_count) / total * 100) if total else 100.0, 1)
|
||||
|
||||
return {
|
||||
"total_deps": total,
|
||||
"outdated_deps": outdated_count,
|
||||
"freshness_percent": freshness,
|
||||
"outdated_list": outdated,
|
||||
}
|
||||
|
||||
|
||||
# ── Snapshot & Trends ───────────────────────────────────────────────────────
|
||||
|
||||
def take_snapshot() -> Dict[str, Any]:
|
||||
"""Collect all metrics and return a snapshot dict."""
|
||||
now = datetime.now(timezone.utc)
|
||||
test_cov = collect_test_coverage()
|
||||
doc_cov = collect_doc_coverage()
|
||||
issues = collect_issue_metrics()
|
||||
deps = collect_dep_freshness()
|
||||
|
||||
return {
|
||||
"timestamp": now.isoformat(),
|
||||
"date": slugify_date(now),
|
||||
"metrics": {
|
||||
"test_coverage": test_cov,
|
||||
"doc_coverage": doc_cov,
|
||||
"issues": issues,
|
||||
"dependencies": deps,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def save_snapshot(snapshot: Dict[str, Any]) -> Path:
|
||||
path = snapshot_path(datetime.fromisoformat(snapshot["timestamp"]))
|
||||
with open(path, "w") as f:
|
||||
json.dump(snapshot, f, indent=2)
|
||||
return path
|
||||
|
||||
|
||||
def generate_trends(snapshots: List[Dict[str, Any]], output_path: Optional[Path] = None) -> str:
|
||||
"""Generate markdown trends table; optionally write to file."""
|
||||
if not snapshots:
|
||||
msg = "# Progress Tracker — Trends\n\nNo snapshots yet. Run `progress_tracker.py` to create the first snapshot."
|
||||
if output_path:
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
output_path.write_text(msg)
|
||||
return msg
|
||||
|
||||
lines = [
|
||||
"# Progress Tracker — Trends",
|
||||
f"\nLast updated: {datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M UTC')}",
|
||||
f"\nSnapshots: {len(snapshots)}\n",
|
||||
"| Date | Test Files → Source | Doc Coverage | Issues Closed/Opened (7d) | Dep Freshness |",
|
||||
"|------|---------------------|--------------|---------------------------|---------------|",
|
||||
]
|
||||
|
||||
for snap in reversed(snapshots): # chronological
|
||||
date = snap["date"]
|
||||
m = snap["metrics"]
|
||||
tc = m["test_coverage"]
|
||||
test_str = f"{tc['test_files']}/{tc['source_files']} ({tc['test_to_source_ratio']:.2f})"
|
||||
doc_str = f"{m['doc_coverage']['doc_coverage_percent']:.1f}%"
|
||||
issues_str = f"{m['issues'].get('closed_last_7d','-')}/{m['issues'].get('opened_last_7d','-')}"
|
||||
dep_str = f"{m['dependencies'].get('freshness_percent','?')}%"
|
||||
lines.append(f"| {date} | {test_str} | {doc_str} | {issues_str} | {dep_str} |")
|
||||
|
||||
# Current snapshot summary
|
||||
cur = snapshots[-1]
|
||||
cm = cur["metrics"]
|
||||
lines.append(f"\n## Current Snapshot ({cur['date']})\n")
|
||||
|
||||
tc = cm["test_coverage"]
|
||||
cov_line = f"- Test coverage: {tc['coverage_percent']:.1f}% (via {tc['coverage_tool']})\n" if tc["coverage_percent"] else "- Test coverage: (pytest-cov not configured)\n"
|
||||
lines.append(cov_line)
|
||||
lines.append(f"- Doc coverage: {cm['doc_coverage']['doc_coverage_percent']:.1f}%")
|
||||
|
||||
im = cm["issues"]
|
||||
if im.get("close_rate") is not None:
|
||||
lines.append(f"- Issue close rate (7d): {im['close_rate']*100:.1f}% ({im['closed_last_7d']} closed, {im['opened_last_7d']} opened)")
|
||||
else:
|
||||
lines.append(f"- Issue metrics: {im.get('note','unavailable')}")
|
||||
|
||||
dd = cm["dependencies"]
|
||||
lines.append(f"- Dep freshness: {dd.get('freshness_percent','?')}% outdated ({dd.get('outdated_deps',0)}/{dd.get('total_deps',0)} deps)")
|
||||
if dd.get('outdated_list'):
|
||||
lines.append(f" Outdated: {', '.join(dd['outdated_list'][:5])}")
|
||||
|
||||
content = "\n".join(lines) + "\n"
|
||||
|
||||
if output_path:
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
output_path.write_text(content)
|
||||
|
||||
return content
|
||||
|
||||
|
||||
# ── Main ─────────────────────────────────────────────────────────────────────
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(description="Progress Tracker — 10.8")
|
||||
parser.add_argument("--json", action="store_true", help="Emit snapshot as JSON only")
|
||||
parser.add_argument("--output", type=Path, default=METRICS_DIR / "TRENDS.md",
|
||||
help="Write trends markdown to this file")
|
||||
args = parser.parse_args()
|
||||
|
||||
snapshot = take_snapshot()
|
||||
all_snapshots = load_snapshots()
|
||||
path_written = save_snapshot(snapshot)
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(snapshot, indent=2))
|
||||
return 0
|
||||
|
||||
trends = generate_trends(all_snapshots + [snapshot], output_path=args.output)
|
||||
|
||||
# Print current snapshot summary
|
||||
print(f"Snapshot saved: {path_written}\n")
|
||||
print(f"Progress Tracker — {snapshot['date']}")
|
||||
print("=" * 50)
|
||||
|
||||
m = snapshot["metrics"]
|
||||
tc = m["test_coverage"]
|
||||
print(f"Test files: {tc['test_files']} | Source files: {tc['source_files']} | Ratio: {tc['test_to_source_ratio']:.3f}")
|
||||
if tc["coverage_percent"] is not None:
|
||||
print(f"Line coverage: {tc['coverage_percent']:.1f}% (via {tc['coverage_tool']})")
|
||||
else:
|
||||
print("Line coverage: (not available — run `pytest --cov`)")
|
||||
|
||||
print()
|
||||
dc = m["doc_coverage"]
|
||||
print(f"Callables with docstrings: {dc['callables_with_doc']}/{dc['callables_total']} ({dc['doc_coverage_percent']:.1f}%)")
|
||||
|
||||
print()
|
||||
im = m["issues"]
|
||||
if im.get("close_rate") is not None:
|
||||
print(f"Issues (7d): {im['closed_last_7d']} closed / {im['opened_last_7d']} opened → close rate: {im['close_rate']*100:.1f}%")
|
||||
print(f"Total open: {im['total_open']}")
|
||||
else:
|
||||
print(f"Issues: {im.get('note','unavailable')}")
|
||||
|
||||
print()
|
||||
dd = m["dependencies"]
|
||||
print(f"Dependencies: {dd.get('total_deps',0)} total, {dd.get('outdated_deps',0)} outdated")
|
||||
if dd.get('outdated_list'):
|
||||
shown = dd['outdated_list'][:5]
|
||||
print(f"Outdated: {', '.join(shown)}" + ("..." if len(dd['outdated_list']) > 5 else ""))
|
||||
|
||||
print(f"\nTrends written to: {args.output}")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -1,52 +0,0 @@
|
||||
"""
|
||||
Tests for scripts/dependency_inventory.py
|
||||
"""
|
||||
|
||||
import unittest
|
||||
import json
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from scripts.dependency_inventory import (
|
||||
parse_requirements,
|
||||
parse_package_json,
|
||||
parse_pyproject_toml,
|
||||
scan_repo,
|
||||
)
|
||||
|
||||
|
||||
class TestParseRequirements(unittest.TestCase):
|
||||
def test_parses_simple_requirement(self):
|
||||
result = parse_requirements("requests>=2.33.0")
|
||||
self.assertEqual(len(result), 1)
|
||||
self.assertEqual(result[0]["package"], "requests")
|
||||
|
||||
def test_parses_version_range(self):
|
||||
result = parse_requirements("pytest>=8,<9")
|
||||
self.assertEqual(result[0]["package"], "pytest")
|
||||
|
||||
|
||||
class TestParsePackageJson(unittest.TestCase):
|
||||
def test_parses_dependencies(self):
|
||||
content = json.dumps({"name": "test", "dependencies": {"react": "^18.2.0"}})
|
||||
result = parse_package_json(content)
|
||||
self.assertTrue(any(d["package"] == "react" for d in result))
|
||||
|
||||
|
||||
class TestParsePyprojectToml(unittest.TestCase):
|
||||
def test_parses_project_dependencies(self):
|
||||
content = "\n[project]\nname = \"test\"\ndependencies = [\n \"openai>=2.21.0,<3\",\n]"
|
||||
result = parse_pyproject_toml(content)
|
||||
self.assertEqual(len(result), 1)
|
||||
|
||||
|
||||
class TestScanRepo(unittest.TestCase):
|
||||
def test_scans_local_repo(self):
|
||||
result = scan_repo(Path(__file__).resolve().parents[1])
|
||||
self.assertGreater(result["dependency_count"], 0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
Reference in New Issue
Block a user