Compare commits
2 Commits
step35/111
...
step35/172
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
824b4e7af8 | ||
|
|
4b5a675355 |
@@ -180,89 +180,6 @@ def to_mermaid(graph: dict) -> str:
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
|
||||
def transitive_closure(graph: dict) -> dict:
|
||||
"""Compute transitive closure for each node (all indirect deps)."""
|
||||
closure = {}
|
||||
# Build adjacency list
|
||||
adj = {node: set(data.get("dependencies", [])) for node, data in graph.items()}
|
||||
all_nodes = set(adj.keys()) | set().union(*adj.values())
|
||||
|
||||
for node in all_nodes:
|
||||
visited = set()
|
||||
stack = list(adj.get(node, set()))
|
||||
while stack:
|
||||
current = stack.pop()
|
||||
if current not in visited:
|
||||
visited.add(current)
|
||||
stack.extend(adj.get(current, set()))
|
||||
# Remove self-reference: a node's transitive deps should not include itself
|
||||
visited.discard(node)
|
||||
closure[node] = visited
|
||||
|
||||
return closure
|
||||
|
||||
|
||||
def find_deep_chains(graph: dict) -> list[list[str]]:
|
||||
"""Find the longest simple paths in the dependency graph (ignoring cycles)."""
|
||||
from collections import defaultdict
|
||||
|
||||
adj = {node: list(data.get("dependencies", [])) for node, data in graph.items()}
|
||||
deepest = []
|
||||
max_len = 0
|
||||
|
||||
def dfs(node: str, path: list, visited: set):
|
||||
nonlocal deepest, max_len
|
||||
# Stop if we hit a cycle (node already in path)
|
||||
if node in path:
|
||||
return
|
||||
new_path = path + [node]
|
||||
if node not in adj or not adj[node]:
|
||||
# leaf
|
||||
if len(new_path) > max_len:
|
||||
max_len = len(new_path)
|
||||
deepest = [new_path.copy()]
|
||||
elif len(new_path) == max_len:
|
||||
deepest.append(new_path.copy())
|
||||
else:
|
||||
for neighbor in adj[node]:
|
||||
dfs(neighbor, new_path.copy(), visited | {node})
|
||||
|
||||
for start in graph:
|
||||
dfs(start, [], set())
|
||||
|
||||
return deepest
|
||||
|
||||
|
||||
def format_transitive_markdown(closure: dict) -> str:
|
||||
"""Render transitive closure as a markdown table."""
|
||||
lines = ["# Transitive Dependencies\n\n"]
|
||||
lines.append("| Node | Transitive Dependencies | Count |\n")
|
||||
lines.append("|------|------------------------|-------|\n")
|
||||
for node in sorted(closure.keys()):
|
||||
deps = closure[node]
|
||||
deps_str = ", ".join(sorted(deps)) if deps else "(none)"
|
||||
lines.append(f"| {node} | {deps_str} | {len(deps)} |\n")
|
||||
return "".join(lines)
|
||||
|
||||
|
||||
def format_deep_chains_markdown(chains: list[list[str]]) -> str:
|
||||
"""Render longest dependency chains as a markdown list."""
|
||||
lines = ["# Deepest Dependency Chains\n\n"]
|
||||
if not chains:
|
||||
lines.append("No chains found.\n")
|
||||
return "".join(lines)
|
||||
max_len = max(len(c) for c in chains)
|
||||
lines.append(f"*Longest chain length:* {max_len}\n\n")
|
||||
for i, chain in enumerate(sorted(chains, key=lambda c: (-len(c), " -> ".join(c))), 1):
|
||||
lines.append(f"**Chain {i}** ({len(chain)} nodes)\n\n")
|
||||
indent = " "
|
||||
for j, node in enumerate(chain):
|
||||
arrow = " → " if j < len(chain)-1 else " • "
|
||||
lines.append(f"{indent}{arrow}{node}\n")
|
||||
lines.append("\n")
|
||||
return "".join(lines)
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Build cross-repo dependency graph")
|
||||
parser.add_argument("repos_dir", nargs="?", help="Directory containing repos")
|
||||
@@ -311,20 +228,13 @@ def main():
|
||||
elif args.format == "mermaid":
|
||||
output = to_mermaid(results)
|
||||
else:
|
||||
# Compute transitive and deep chains
|
||||
closure = transitive_closure(results)
|
||||
deep_chains = find_deep_chains(results)
|
||||
output = json.dumps({
|
||||
"repos": results,
|
||||
"cycles": cycles,
|
||||
"transitive": {node: sorted(deps) for node, deps in closure.items()},
|
||||
"deep_chains": [chain for chain in deep_chains if len(chain) > 1],
|
||||
"summary": {
|
||||
"total_repos": len(results),
|
||||
"total_deps": sum(len(r["dependencies"]) for r in results.values()),
|
||||
"cycles_found": len(cycles),
|
||||
"transitive_pairs": sum(len(deps) for deps in closure.values()),
|
||||
"longest_chain_length": max((len(c) for c in deep_chains), default=0),
|
||||
}
|
||||
}, indent=2)
|
||||
|
||||
|
||||
@@ -75,7 +75,7 @@ class GapReport:
|
||||
return {
|
||||
"repo_path": self.repo_path,
|
||||
"total_gaps": len(self.gaps),
|
||||
"stats": {k: len(v) for k, v in
|
||||
"stats": {k.value: len(v) for k, v in
|
||||
{gt: [g for g in self.gaps if g.gap_type == gt]
|
||||
for gt in GapType}.items() if v},
|
||||
"gaps": [
|
||||
@@ -273,3 +273,44 @@ class KnowledgeGapIdentifier:
|
||||
))
|
||||
|
||||
return report
|
||||
|
||||
def main() -> None:
|
||||
import argparse
|
||||
import json
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Knowledge Gap Identifier — cross-reference code, docs, and tests to find gaps"
|
||||
)
|
||||
parser.add_argument(
|
||||
"repo_path",
|
||||
nargs="?",
|
||||
default=".",
|
||||
help="Path to repository root (default: current directory)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--json",
|
||||
action="store_true",
|
||||
help="Output report as JSON instead of human-readable summary"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-o", "--output",
|
||||
help="Write report to file instead of stdout"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
report = KnowledgeGapIdentifier().analyze(args.repo_path)
|
||||
|
||||
if args.json:
|
||||
output = json.dumps(report.to_dict(), indent=2, default=str)
|
||||
else:
|
||||
output = report.summary()
|
||||
|
||||
if args.output:
|
||||
with open(args.output, "w") as fh:
|
||||
print(output, file=fh)
|
||||
else:
|
||||
print(output)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
351
scripts/pr_complexity_scorer.py
Normal file
351
scripts/pr_complexity_scorer.py
Normal file
@@ -0,0 +1,351 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
PR Complexity Scorer - Estimate review effort for PRs.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from dataclasses import dataclass, asdict
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
import urllib.request
|
||||
import urllib.error
|
||||
|
||||
GITEA_BASE = "https://forge.alexanderwhitestone.com/api/v1"
|
||||
|
||||
DEPENDENCY_FILES = {
|
||||
"requirements.txt", "pyproject.toml", "setup.py", "setup.cfg",
|
||||
"Pipfile", "poetry.lock", "package.json", "yarn.lock", "Gemfile",
|
||||
"go.mod", "Cargo.toml", "pom.xml", "build.gradle"
|
||||
}
|
||||
|
||||
TEST_PATTERNS = [
|
||||
r"tests?/.*\.py$", r".*_test\.py$", r"test_.*\.py$",
|
||||
r"spec/.*\.rb$", r".*_spec\.rb$",
|
||||
r"__tests__/", r".*\.test\.(js|ts|jsx|tsx)$"
|
||||
]
|
||||
|
||||
WEIGHT_FILES = 0.25
|
||||
WEIGHT_LINES = 0.25
|
||||
WEIGHT_DEPS = 0.30
|
||||
WEIGHT_TEST_COV = 0.20
|
||||
|
||||
SMALL_FILES = 5
|
||||
MEDIUM_FILES = 20
|
||||
LARGE_FILES = 50
|
||||
|
||||
SMALL_LINES = 100
|
||||
MEDIUM_LINES = 500
|
||||
LARGE_LINES = 2000
|
||||
|
||||
TIME_PER_POINT = {1: 5, 2: 10, 3: 15, 4: 20, 5: 25, 6: 30, 7: 45, 8: 60, 9: 90, 10: 120}
|
||||
|
||||
|
||||
@dataclass
|
||||
class PRComplexity:
|
||||
pr_number: int
|
||||
title: str
|
||||
files_changed: int
|
||||
additions: int
|
||||
deletions: int
|
||||
has_dependency_changes: bool
|
||||
test_coverage_delta: Optional[int]
|
||||
score: int
|
||||
estimated_minutes: int
|
||||
reasons: List[str]
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return asdict(self)
|
||||
|
||||
|
||||
class GiteaClient:
|
||||
def __init__(self, token: str):
|
||||
self.token = token
|
||||
self.base_url = GITEA_BASE.rstrip("/")
|
||||
|
||||
def _request(self, path: str, params: Dict = None) -> Any:
|
||||
url = f"{self.base_url}{path}"
|
||||
if params:
|
||||
qs = "&".join(f"{k}={v}" for k, v in params.items() if v is not None)
|
||||
url += f"?{qs}"
|
||||
|
||||
req = urllib.request.Request(url)
|
||||
req.add_header("Authorization", f"token {self.token}")
|
||||
req.add_header("Content-Type", "application/json")
|
||||
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=30) as resp:
|
||||
return json.loads(resp.read().decode())
|
||||
except urllib.error.HTTPError as e:
|
||||
print(f"API error {e.code}: {e.read().decode()[:200]}", file=sys.stderr)
|
||||
return None
|
||||
except urllib.error.URLError as e:
|
||||
print(f"Network error: {e}", file=sys.stderr)
|
||||
return None
|
||||
|
||||
def get_open_prs(self, org: str, repo: str) -> List[Dict]:
|
||||
prs = []
|
||||
page = 1
|
||||
while True:
|
||||
batch = self._request(f"/repos/{org}/{repo}/pulls", {"limit": 50, "page": page, "state": "open"})
|
||||
if not batch:
|
||||
break
|
||||
prs.extend(batch)
|
||||
if len(batch) < 50:
|
||||
break
|
||||
page += 1
|
||||
return prs
|
||||
|
||||
def get_pr_files(self, org: str, repo: str, pr_number: int) -> List[Dict]:
|
||||
files = []
|
||||
page = 1
|
||||
while True:
|
||||
batch = self._request(
|
||||
f"/repos/{org}/{repo}/pulls/{pr_number}/files",
|
||||
{"limit": 100, "page": page}
|
||||
)
|
||||
if not batch:
|
||||
break
|
||||
files.extend(batch)
|
||||
if len(batch) < 100:
|
||||
break
|
||||
page += 1
|
||||
return files
|
||||
|
||||
def post_comment(self, org: str, repo: str, pr_number: int, body: str) -> bool:
|
||||
data = json.dumps({"body": body}).encode("utf-8")
|
||||
req = urllib.request.Request(
|
||||
f"{self.base_url}/repos/{org}/{repo}/issues/{pr_number}/comments",
|
||||
data=data,
|
||||
method="POST",
|
||||
headers={"Authorization": f"token {self.token}", "Content-Type": "application/json"}
|
||||
)
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=30) as resp:
|
||||
return resp.status in (200, 201)
|
||||
except urllib.error.HTTPError:
|
||||
return False
|
||||
|
||||
|
||||
def is_dependency_file(filename: str) -> bool:
|
||||
return any(filename.endswith(dep) for dep in DEPENDENCY_FILES)
|
||||
|
||||
|
||||
def is_test_file(filename: str) -> bool:
|
||||
return any(re.search(pattern, filename) for pattern in TEST_PATTERNS)
|
||||
|
||||
|
||||
def score_pr(
|
||||
files_changed: int,
|
||||
additions: int,
|
||||
deletions: int,
|
||||
has_dependency_changes: bool,
|
||||
test_coverage_delta: Optional[int] = None
|
||||
) -> tuple[int, int, List[str]]:
|
||||
score = 1.0
|
||||
reasons = []
|
||||
|
||||
# Files changed
|
||||
if files_changed <= SMALL_FILES:
|
||||
fscore = 1.0
|
||||
reasons.append("small number of files changed")
|
||||
elif files_changed <= MEDIUM_FILES:
|
||||
fscore = 2.0
|
||||
reasons.append("moderate number of files changed")
|
||||
elif files_changed <= LARGE_FILES:
|
||||
fscore = 2.5
|
||||
reasons.append("large number of files changed")
|
||||
else:
|
||||
fscore = 3.0
|
||||
reasons.append("very large PR spanning many files")
|
||||
|
||||
# Lines changed
|
||||
total_lines = additions + deletions
|
||||
if total_lines <= SMALL_LINES:
|
||||
lscore = 1.0
|
||||
reasons.append("small change size")
|
||||
elif total_lines <= MEDIUM_LINES:
|
||||
lscore = 2.0
|
||||
reasons.append("moderate change size")
|
||||
elif total_lines <= LARGE_LINES:
|
||||
lscore = 3.0
|
||||
reasons.append("large change size")
|
||||
else:
|
||||
lscore = 4.0
|
||||
reasons.append("very large change")
|
||||
|
||||
# Dependency changes
|
||||
if has_dependency_changes:
|
||||
dscore = 2.5
|
||||
reasons.append("dependency changes (architectural impact)")
|
||||
else:
|
||||
dscore = 0.0
|
||||
|
||||
# Test coverage delta
|
||||
tscore = 0.0
|
||||
if test_coverage_delta is not None:
|
||||
if test_coverage_delta > 0:
|
||||
reasons.append(f"test additions (+{test_coverage_delta} test files)")
|
||||
tscore = -min(2.0, test_coverage_delta / 2.0)
|
||||
elif test_coverage_delta < 0:
|
||||
reasons.append(f"test removals ({abs(test_coverage_delta)} test files)")
|
||||
tscore = min(2.0, abs(test_coverage_delta) * 0.5)
|
||||
else:
|
||||
reasons.append("test coverage change not assessed")
|
||||
|
||||
# Weighted sum, scaled by 3 to use full 1-10 range
|
||||
bonus = (fscore * WEIGHT_FILES) + (lscore * WEIGHT_LINES) + (dscore * WEIGHT_DEPS) + (tscore * WEIGHT_TEST_COV)
|
||||
scaled_bonus = bonus * 3.0
|
||||
score = 1.0 + scaled_bonus
|
||||
|
||||
final_score = max(1, min(10, int(round(score))))
|
||||
est_minutes = TIME_PER_POINT.get(final_score, 30)
|
||||
|
||||
return final_score, est_minutes, reasons
|
||||
|
||||
|
||||
def analyze_pr(client: GiteaClient, org: str, repo: str, pr_data: Dict) -> PRComplexity:
|
||||
pr_num = pr_data["number"]
|
||||
title = pr_data.get("title", "")
|
||||
files = client.get_pr_files(org, repo, pr_num)
|
||||
|
||||
additions = sum(f.get("additions", 0) for f in files)
|
||||
deletions = sum(f.get("deletions", 0) for f in files)
|
||||
filenames = [f.get("filename", "") for f in files]
|
||||
|
||||
has_deps = any(is_dependency_file(f) for f in filenames)
|
||||
|
||||
test_added = sum(1 for f in files if f.get("status") == "added" and is_test_file(f.get("filename", "")))
|
||||
test_removed = sum(1 for f in files if f.get("status") == "removed" and is_test_file(f.get("filename", "")))
|
||||
test_delta = test_added - test_removed if (test_added or test_removed) else None
|
||||
|
||||
score, est_min, reasons = score_pr(
|
||||
files_changed=len(files),
|
||||
additions=additions,
|
||||
deletions=deletions,
|
||||
has_dependency_changes=has_deps,
|
||||
test_coverage_delta=test_delta
|
||||
)
|
||||
|
||||
return PRComplexity(
|
||||
pr_number=pr_num,
|
||||
title=title,
|
||||
files_changed=len(files),
|
||||
additions=additions,
|
||||
deletions=deletions,
|
||||
has_dependency_changes=has_deps,
|
||||
test_coverage_delta=test_delta,
|
||||
score=score,
|
||||
estimated_minutes=est_min,
|
||||
reasons=reasons
|
||||
)
|
||||
|
||||
|
||||
def build_comment(complexity: PRComplexity) -> str:
|
||||
change_desc = f"{complexity.files_changed} files, +{complexity.additions}/-{complexity.deletions} lines"
|
||||
deps_note = "\n- :warning: Dependency changes detected — architectural review recommended" if complexity.has_dependency_changes else ""
|
||||
test_note = ""
|
||||
if complexity.test_coverage_delta is not None:
|
||||
if complexity.test_coverage_delta > 0:
|
||||
test_note = f"\n- :+1: {complexity.test_coverage_delta} test file(s) added"
|
||||
elif complexity.test_coverage_delta < 0:
|
||||
test_note = f"\n- :warning: {abs(complexity.test_coverage_delta)} test file(s) removed"
|
||||
|
||||
comment = f"## 📊 PR Complexity Analysis\n\n"
|
||||
comment += f"**PR #{complexity.pr_number}: {complexity.title}**\n\n"
|
||||
comment += f"| Metric | Value |\n|--------|-------|\n"
|
||||
comment += f"| Changes | {change_desc} |\n"
|
||||
comment += f"| Complexity Score | **{complexity.score}/10** |\n"
|
||||
comment += f"| Estimated Review Time | ~{complexity.estimated_minutes} minutes |\n\n"
|
||||
comment += f"### Scoring rationale:"
|
||||
for r in complexity.reasons:
|
||||
comment += f"\n- {r}"
|
||||
if deps_note:
|
||||
comment += deps_note
|
||||
if test_note:
|
||||
comment += test_note
|
||||
comment += f"\n\n---\n"
|
||||
comment += f"*Generated by PR Complexity Scorer — [issue #135](https://forge.alexanderwhitestone.com/Timmy_Foundation/compounding-intelligence/issues/135)*"
|
||||
return comment
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="PR Complexity Scorer")
|
||||
parser.add_argument("--org", default="Timmy_Foundation")
|
||||
parser.add_argument("--repo", default="compounding-intelligence")
|
||||
parser.add_argument("--token", default=os.environ.get("GITEA_TOKEN") or os.path.expanduser("~/.config/gitea/token"))
|
||||
parser.add_argument("--dry-run", action="store_true")
|
||||
parser.add_argument("--apply", action="store_true")
|
||||
parser.add_argument("--output", default="metrics/pr_complexity.json")
|
||||
args = parser.parse_args()
|
||||
|
||||
token_path = args.token
|
||||
if os.path.exists(token_path):
|
||||
with open(token_path) as f:
|
||||
token = f.read().strip()
|
||||
else:
|
||||
token = args.token
|
||||
|
||||
if not token:
|
||||
print("ERROR: No Gitea token provided", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
client = GiteaClient(token)
|
||||
|
||||
print(f"Fetching open PRs for {args.org}/{args.repo}...")
|
||||
prs = client.get_open_prs(args.org, args.repo)
|
||||
if not prs:
|
||||
print("No open PRs found.")
|
||||
sys.exit(0)
|
||||
|
||||
print(f"Found {len(prs)} open PR(s). Analyzing...")
|
||||
|
||||
results = []
|
||||
Path(args.output).parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
for pr in prs:
|
||||
pr_num = pr["number"]
|
||||
title = pr.get("title", "")
|
||||
print(f" Analyzing PR #{pr_num}: {title[:60]}")
|
||||
|
||||
try:
|
||||
complexity = analyze_pr(client, args.org, args.repo, pr)
|
||||
results.append(complexity.to_dict())
|
||||
|
||||
comment = build_comment(complexity)
|
||||
|
||||
if args.dry_run:
|
||||
print(f" → Score: {complexity.score}/10, Est: {complexity.estimated_minutes}min [DRY-RUN]")
|
||||
elif args.apply:
|
||||
success = client.post_comment(args.org, args.repo, pr_num, comment)
|
||||
status = "[commented]" if success else "[FAILED]"
|
||||
print(f" → Score: {complexity.score}/10, Est: {complexity.estimated_minutes}min {status}")
|
||||
else:
|
||||
print(f" → Score: {complexity.score}/10, Est: {complexity.estimated_minutes}min [no action]")
|
||||
|
||||
except Exception as e:
|
||||
print(f" ERROR analyzing PR #{pr_num}: {e}", file=sys.stderr)
|
||||
|
||||
with open(args.output, "w") as f:
|
||||
json.dump({
|
||||
"org": args.org,
|
||||
"repo": args.repo,
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"pr_count": len(results),
|
||||
"results": results
|
||||
}, f, indent=2)
|
||||
|
||||
if results:
|
||||
scores = [r["score"] for r in results]
|
||||
print(f"\nResults saved to {args.output}")
|
||||
print(f"Summary: {len(results)} PRs, scores range {min(scores):.0f}-{max(scores):.0f}")
|
||||
else:
|
||||
print("\nNo results to save.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,155 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for dependency_graph.py — transitive closure and deep chain detection."""
|
||||
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
import tempfile
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, os.path.dirname(__file__) or ".")
|
||||
|
||||
import importlib.util
|
||||
spec = importlib.util.spec_from_file_location(
|
||||
"dg", os.path.join(os.path.dirname(__file__) or ".", "dependency_graph.py")
|
||||
)
|
||||
mod = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(mod)
|
||||
|
||||
transitive_closure = mod.transitive_closure
|
||||
find_deep_chains = mod.find_deep_chains
|
||||
detect_cycles = mod.detect_cycles
|
||||
|
||||
|
||||
def make_graph(edges: dict[str, list[str]]) -> dict:
|
||||
"""Build graph dict in expected format: {repo: {"dependencies": [...]}}."""
|
||||
return {
|
||||
node: {"dependencies": sorted(deps), "files_scanned": 1}
|
||||
for node, deps in edges.items()
|
||||
}
|
||||
|
||||
|
||||
def test_transitive_closure_simple_chain():
|
||||
graph = make_graph({
|
||||
"A": ["B"],
|
||||
"B": ["C"],
|
||||
"C": [],
|
||||
})
|
||||
closure = transitive_closure(graph)
|
||||
assert closure["A"] == {"B", "C"}
|
||||
assert closure["B"] == {"C"}
|
||||
assert closure["C"] == set()
|
||||
print("✅ Simple chain transitive closure")
|
||||
|
||||
|
||||
def test_transitive_closure_diamond():
|
||||
graph = make_graph({
|
||||
"A": ["B", "C"],
|
||||
"B": ["D"],
|
||||
"C": ["D"],
|
||||
"D": [],
|
||||
})
|
||||
closure = transitive_closure(graph)
|
||||
assert closure["A"] == {"B", "C", "D"}
|
||||
assert closure["B"] == {"D"}
|
||||
assert closure["C"] == {"D"}
|
||||
assert closure["D"] == set()
|
||||
print("✅ Diamond closure")
|
||||
|
||||
|
||||
def test_transitive_closure_with_cycle():
|
||||
graph = make_graph({
|
||||
"A": ["B"],
|
||||
"B": ["C"],
|
||||
"C": ["A"], # cycle
|
||||
})
|
||||
closure = transitive_closure(graph)
|
||||
assert closure["A"] == {"B", "C"}
|
||||
assert closure["B"] == {"C", "A"}
|
||||
assert closure["C"] == {"A", "B"}
|
||||
print("✅ Cycle in transitive closure")
|
||||
|
||||
|
||||
def test_find_deep_chains_simple():
|
||||
graph = make_graph({
|
||||
"A": ["B"],
|
||||
"B": ["C"],
|
||||
"C": [],
|
||||
})
|
||||
chains = find_deep_chains(graph)
|
||||
chains_sorted = sorted(chains, key=len, reverse=True)
|
||||
assert len(chains_sorted) == 1
|
||||
assert chains_sorted[0] == ["A", "B", "C"]
|
||||
print("✅ Simple deep chain")
|
||||
|
||||
|
||||
def test_find_deep_chains_multiple():
|
||||
graph = make_graph({
|
||||
"A": ["B", "C"],
|
||||
"B": ["D"],
|
||||
"C": ["E"],
|
||||
"D": [],
|
||||
"E": [],
|
||||
})
|
||||
chains = find_deep_chains(graph)
|
||||
lengths = [len(c) for c in chains]
|
||||
assert max(lengths) == 3
|
||||
print("✅ Multiple chains detected")
|
||||
|
||||
|
||||
def test_find_deep_chains_with_cycle_does_not_infinite_loop():
|
||||
graph = make_graph({
|
||||
"A": ["B"],
|
||||
"B": ["C"],
|
||||
"C": ["A"],
|
||||
})
|
||||
chains = find_deep_chains(graph)
|
||||
print(f"✅ Cycle handled: found {len(chains)} chains")
|
||||
|
||||
|
||||
def test_empty_graph():
|
||||
graph = {}
|
||||
assert transitive_closure(graph) == {}
|
||||
assert find_deep_chains(graph) == []
|
||||
print("✅ Empty graph handled")
|
||||
|
||||
|
||||
def test_detect_cycles_shorthand():
|
||||
graph = make_graph({
|
||||
"A": ["B"],
|
||||
"B": ["C"],
|
||||
"C": ["A"],
|
||||
})
|
||||
cycles = detect_cycles(graph)
|
||||
assert len(cycles) == 1
|
||||
assert set(cycles[0]) == {"A", "B", "C"}
|
||||
print("✅ Cycle detection works")
|
||||
|
||||
|
||||
def test_chain_length_reporting():
|
||||
graph = make_graph({
|
||||
"root": ["a", "b"],
|
||||
"a": ["c"],
|
||||
"b": ["d"],
|
||||
"c": ["e"],
|
||||
"d": [],
|
||||
"e": [],
|
||||
})
|
||||
chains = find_deep_chains(graph)
|
||||
max_len = max(len(c) for c in chains)
|
||||
assert max_len == 4
|
||||
print(f"✅ Longest chain length: {max_len}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_transitive_closure_simple_chain()
|
||||
test_transitive_closure_diamond()
|
||||
test_transitive_closure_with_cycle()
|
||||
test_find_deep_chains_simple()
|
||||
test_find_deep_chains_multiple()
|
||||
test_find_deep_chains_with_cycle_does_not_infinite_loop()
|
||||
test_empty_graph()
|
||||
test_detect_cycles_shorthand()
|
||||
test_chain_length_reporting()
|
||||
print("\n✅ All dependency graph tests passed")
|
||||
170
scripts/test_pr_complexity_scorer.py
Normal file
170
scripts/test_pr_complexity_scorer.py
Normal file
@@ -0,0 +1,170 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Tests for PR Complexity Scorer — unit tests for the scoring logic.
|
||||
"""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent))
|
||||
|
||||
from pr_complexity_scorer import (
|
||||
score_pr,
|
||||
is_dependency_file,
|
||||
is_test_file,
|
||||
TIME_PER_POINT,
|
||||
SMALL_FILES,
|
||||
MEDIUM_FILES,
|
||||
LARGE_FILES,
|
||||
SMALL_LINES,
|
||||
MEDIUM_LINES,
|
||||
LARGE_LINES,
|
||||
)
|
||||
|
||||
PASS = 0
|
||||
FAIL = 0
|
||||
|
||||
def test(name):
|
||||
def decorator(fn):
|
||||
global PASS, FAIL
|
||||
try:
|
||||
fn()
|
||||
PASS += 1
|
||||
print(f" [PASS] {name}")
|
||||
except AssertionError as e:
|
||||
FAIL += 1
|
||||
print(f" [FAIL] {name}: {e}")
|
||||
except Exception as e:
|
||||
FAIL += 1
|
||||
print(f" [FAIL] {name}: Unexpected error: {e}")
|
||||
return decorator
|
||||
|
||||
def assert_eq(a, b, msg=""):
|
||||
if a != b:
|
||||
raise AssertionError(f"{msg} expected {b!r}, got {a!r}")
|
||||
|
||||
def assert_true(v, msg=""):
|
||||
if not v:
|
||||
raise AssertionError(msg or "Expected True")
|
||||
|
||||
def assert_false(v, msg=""):
|
||||
if v:
|
||||
raise AssertionError(msg or "Expected False")
|
||||
|
||||
|
||||
print("=== PR Complexity Scorer Tests ===\n")
|
||||
|
||||
print("-- File Classification --")
|
||||
|
||||
@test("dependency file detection — requirements.txt")
|
||||
def _():
|
||||
assert_true(is_dependency_file("requirements.txt"))
|
||||
assert_true(is_dependency_file("src/requirements.txt"))
|
||||
assert_false(is_dependency_file("requirements_test.txt"))
|
||||
|
||||
@test("dependency file detection — pyproject.toml")
|
||||
def _():
|
||||
assert_true(is_dependency_file("pyproject.toml"))
|
||||
assert_false(is_dependency_file("myproject.py"))
|
||||
|
||||
@test("test file detection — pytest style")
|
||||
def _():
|
||||
assert_true(is_test_file("tests/test_api.py"))
|
||||
assert_true(is_test_file("test_module.py"))
|
||||
assert_true(is_test_file("src/module_test.py"))
|
||||
|
||||
@test("test file detection — other frameworks")
|
||||
def _():
|
||||
assert_true(is_test_file("spec/feature_spec.rb"))
|
||||
assert_true(is_test_file("__tests__/component.test.js"))
|
||||
assert_false(is_test_file("testfixtures/helper.py"))
|
||||
|
||||
|
||||
print("\n-- Scoring Logic --")
|
||||
|
||||
@test("small PR gets low score (1-3)")
|
||||
def _():
|
||||
score, minutes, _ = score_pr(
|
||||
files_changed=3,
|
||||
additions=50,
|
||||
deletions=10,
|
||||
has_dependency_changes=False,
|
||||
test_coverage_delta=None
|
||||
)
|
||||
assert_true(1 <= score <= 3, f"Score should be low, got {score}")
|
||||
assert_true(minutes < 20)
|
||||
|
||||
@test("medium PR gets medium score (4-6)")
|
||||
def _():
|
||||
score, minutes, _ = score_pr(
|
||||
files_changed=15,
|
||||
additions=400,
|
||||
deletions=100,
|
||||
has_dependency_changes=False,
|
||||
test_coverage_delta=None
|
||||
)
|
||||
assert_true(4 <= score <= 6, f"Score should be medium, got {score}")
|
||||
assert_true(20 <= minutes <= 45)
|
||||
|
||||
@test("large PR gets high score (7-9)")
|
||||
def _():
|
||||
score, minutes, _ = score_pr(
|
||||
files_changed=60,
|
||||
additions=3000,
|
||||
deletions=1500,
|
||||
has_dependency_changes=True,
|
||||
test_coverage_delta=None
|
||||
)
|
||||
assert_true(7 <= score <= 9, f"Score should be high, got {score}")
|
||||
assert_true(minutes >= 45)
|
||||
|
||||
@test("dependency changes boost score")
|
||||
def _():
|
||||
base_score, _, _ = score_pr(
|
||||
files_changed=10, additions=200, deletions=50,
|
||||
has_dependency_changes=False, test_coverage_delta=None
|
||||
)
|
||||
dep_score, _, _ = score_pr(
|
||||
files_changed=10, additions=200, deletions=50,
|
||||
has_dependency_changes=True, test_coverage_delta=None
|
||||
)
|
||||
assert_true(dep_score > base_score, f"Deps: {base_score} -> {dep_score}")
|
||||
|
||||
@test("adding tests lowers complexity")
|
||||
def _():
|
||||
base_score, _, _ = score_pr(
|
||||
files_changed=8, additions=150, deletions=20,
|
||||
has_dependency_changes=False, test_coverage_delta=None
|
||||
)
|
||||
better_score, _, _ = score_pr(
|
||||
files_changed=8, additions=180, deletions=20,
|
||||
has_dependency_changes=False, test_coverage_delta=3
|
||||
)
|
||||
assert_true(better_score < base_score, f"Tests: {base_score} -> {better_score}")
|
||||
|
||||
@test("removing tests increases complexity")
|
||||
def _():
|
||||
base_score, _, _ = score_pr(
|
||||
files_changed=8, additions=150, deletions=20,
|
||||
has_dependency_changes=False, test_coverage_delta=None
|
||||
)
|
||||
worse_score, _, _ = score_pr(
|
||||
files_changed=8, additions=150, deletions=20,
|
||||
has_dependency_changes=False, test_coverage_delta=-2
|
||||
)
|
||||
assert_true(worse_score > base_score, f"Remove tests: {base_score} -> {worse_score}")
|
||||
|
||||
@test("score bounded 1-10")
|
||||
def _():
|
||||
for files, adds, dels in [(1, 10, 5), (100, 10000, 5000)]:
|
||||
score, _, _ = score_pr(files, adds, dels, False, None)
|
||||
assert_true(1 <= score <= 10, f"Score {score} out of range")
|
||||
|
||||
@test("estimated minutes exist for all scores")
|
||||
def _():
|
||||
for s in range(1, 11):
|
||||
assert_true(s in TIME_PER_POINT, f"Missing time for score {s}")
|
||||
|
||||
|
||||
print(f"\n=== Results: {PASS} passed, {FAIL} failed ===")
|
||||
sys.exit(0 if FAIL == 0 else 1)
|
||||
Reference in New Issue
Block a user