From c4586eb2be59194ec19be46e27937aaa88907843 Mon Sep 17 00:00:00 2001 From: Alexander Payne Date: Sun, 26 Apr 2026 12:22:15 -0400 Subject: [PATCH] feat(review): add Review Quality Scorer (#127) Score PR reviews across 5 categories: style, logic, security, performance, tests. Produces weighted composite and markdown report. Closes #127 --- scripts/review_quality_scorer.py | 340 ++++++++++++++++++++++++++ scripts/test_review_quality_scorer.py | 204 ++++++++++++++++ 2 files changed, 544 insertions(+) create mode 100755 scripts/review_quality_scorer.py create mode 100755 scripts/test_review_quality_scorer.py diff --git a/scripts/review_quality_scorer.py b/scripts/review_quality_scorer.py new file mode 100755 index 0000000..03f0984 --- /dev/null +++ b/scripts/review_quality_scorer.py @@ -0,0 +1,340 @@ +#!/usr/bin/env python3 +""" +review_quality_scorer.py — Evaluate code review quality. + +Scores PR reviews on 5 dimensions (0-100 each): + - Style: formatting, naming, conventions, lint + - Logic: algorithmic correctness, edge cases, reasoning + - Security: vulnerabilities, auth/authz, data exposure + - Performance: efficiency, bottlenecks, resource usage + - Tests: coverage, test quality, missing tests + +Produces a weighted composite score and a human-readable report. + +Usage: + python3 review_quality_scorer.py --input reviews.json + python3 review_quality_scorer.py --pr 123 --org Timmy_Foundation --repo compounding-intelligence +""" + +from __future__ import annotations +import argparse +import json +import os +import re +import sys +from dataclasses import dataclass, asdict +from datetime import datetime, timezone +from pathlib import Path +from typing import Any, Dict, List, Optional +import urllib.request +import urllib.error + +# --------------------------------------------------------------------------- +# Category weights (must sum to 1.0) +# --------------------------------------------------------------------------- +WEIGHTS = { + "style": 0.15, + "logic": 0.25, + "security": 0.25, + "performance": 0.15, + "tests": 0.20, +} + +# --------------------------------------------------------------------------- +# Indicator patterns per category (presence suggests category was addressed) +# --------------------------------------------------------------------------- +STYLE_INDICATORS = [ + r"\bstyle\b", r"\blint\b", r"\bformatting\b", r"\bnaming\b", + r"\bPEP8\b", r"\bblack\b", r"\bprettier\b", r"\bclang-format\b", + r"\bwhitespace\b", r"\bindentation\b", r"\bconsistent\b", + r"`(black|isort|flake8|eslint|prettier)`", r"\bformat\s+code\b", + r"\bstyle\s+guide\b", r"\bconventional\b", +] + +LOGIC_INDICATORS = [ + r"\blogic\b", r"\balgorithm\b", r"\bedge\s+case\b", r"\bredgecase\b", + r"\bincorrect\b", r"\bwrong\b", r"\bbug\b", r"\b flawed\b", + r"\bmissing\s+case\b", r"\bunhandled\b", r"\boverflow\b", + r"\bunderflow\b", r"\brace\b", r"\bcondition\b", r"\bcheck\s+this\b", + r"\bthink\s+about\b", r"\bwhat\s+happens\s+when\b", + r"\bcorrect\s+behavior\b", r"\bverify\s+logic\b", +] + +SECURITY_INDICATORS = [ + r"\bsecurity\b", r"\bvuln\b", r"\bCVE\b", r"\bexploit\b", + r"\battack\b", r"\bXSS\b", r"\bSQL\s+injection\b", r"\bRCE\b", + r"\bauthorization\b", r"\bauthentication\b", r"\bpermission\b", + r"\bsensitive\b", r"\bsecret\b", r"\bpassword\b", r"\btoken\b", + r"\bexposure\b", r"\bsanitize\b", r"\bescape\b", r"\bvalidate\b", + r"\binjection\b", r"\breadact\b", r"\bhardcode\b", r"\bcreds\b", + r"\bpublic\s+repo\b", r"\bexfil\b", r"\bleak\b", +] + +PERFORMANCE_INDICATORS = [ + r"\bperformance\b", r"\bslow\b", r"\boptimize\b", r"\bbottleneck\b", + r"\bcpu\b", r"\bmemory\b", r"\bleak\b", r"\bfast\b", r"\befficient\b", + r"\bcache\b", r"\boverhead\b", r"\bO\(n\)\b", r"\bcomplexity\b", + r"\bswap\b", r"\bpaging\b", r"\bmultiply\b", r"\bredundant\b", + r"\bperf\b", r"\bprofiling\b", r"\bprofiler\b", r"\bhot\s+path\b", + r"\batch\b", r"\block\b", r"\bthread\b", r"\bpool\b", +] + +TESTS_INDICATORS = [ + r"\btest\b", r"\btesting\b", r"\bcoverage\b", r"\bunittest\b", + r"\bpytest\b", r"\bassert\b", r"\bmock\b", r"\bstub\b", + r"\bfixture\b", r"\bspec\b", r"\bTDD\b", r"\bedge\s+case\b", + r"\bmissing\s+test\b", r"\bno\s+test\b", r"\btest\s+this\b", + r"\badd\s+tests\b", r"\btest\s+coverage\b", r"\bcoverage\s+gap\b", + r"\bregression\b", r"\bintegration\s+test\b", r"\bunit\s+test\b", +] + +# Depth indicators (presence = deeper review) +DEPTH_MARKERS = [ + r"\bwhy\b", r"\bbecause\b", r"\bexplain\b", r"\bconsider\b", + r"\balternative\b", r"\boption\b", r"\bsuggestion\b", r"\bfix\b", + r"\bupdate\b", r"\bchange\b", r"\bimprove\b", r"\bperhaps\b", + r"\bcould\s+also\b", r"\baside\b", r"\bfootnote\b", +] + +# --------------------------------------------------------------------------- +# Scoring helpers +# --------------------------------------------------------------------------- + +def _category_presence_score(comments: List[str], indicators: List[str]) -> float: + """Score 0-1 based on indicator keyword matches.""" + if not comments: + return 0.0 + text = " ".join(comments).lower() + hits = sum(len(re.findall(pat, text, re.IGNORECASE)) for pat in indicators) + # Normalize: 1 hit = 0.2, 2 = 0.4, ... cap at 1.0 + return min(1.0, hits * 0.2) + + +def _depth_score(comments: List[str]) -> float: + """Measure review depth: number of substantive comments.""" + if not comments: + return 0.0 + depth_markers = sum( + 1 for c in comments + if len(c.split()) >= 10 and re.search("|".join(DEPTH_MARKERS), c, re.IGNORECASE) + ) + # 0 comments → 0, 1-2 → 0.3-0.6, 3+ → 0.7+ + return min(1.0, 0.1 + depth_markers * 0.3) + + +def _category_score(comments: List[str], indicators: List[str], weight: float) -> float: + """Combined score: 60% presence, 40% depth.""" + pres = _category_presence_score(comments, indicators) + depth = _depth_score(comments) + return round((0.6 * pres + 0.4 * depth) * 100, 1) + + +@dataclass +class ReviewQualityReport: + """Quality Scores: one 0-100 per category + composite.""" + style: float + logic: float + security: float + performance: float + tests: float + composite: float + breakdown: Dict[str, float] + comment_count: int + review_count: int + + def to_dict(self) -> Dict[str, Any]: + return asdict(self) + + def to_markdown(self) -> str: + lines = [ + "# PR Review Quality Report", + "", + f"**Composite Score:** {self.composite:.1f} / 100", + f"**Reviews analyzed:** {self.review_count}", + f"**Comments found:** {self.comment_count}", + "", + "## Category Scores", + "| Category | Score | Weight | Contribution |", + "|----------|-------|--------|--------------|", + ] + for cat in ["style", "logic", "security", "performance", "tests"]: + score = getattr(self, cat) + weight = WEIGHTS[cat] + contrib = score * weight + bar = "█" * int(score / 5) + lines.append(f"| {cat.capitalize():10} | {score:5.1f} | {weight:.2f} | {contrib:5.1f} | {bar}") + lines.extend(["", "## Interpretation", ""]) + if self.composite >= 80: + verdict = "Excellent — review is thorough across all categories." + elif self.composite >= 60: + verdict = "Good — major areas covered, some gaps remain." + elif self.composite >= 40: + verdict = "Fair — several categories need more attention." + else: + verdict = "Poor — review lacks depth in multiple critical areas." + lines.append(f"- {verdict}") + lines.append("") + return "\n".join(lines) + + +# --------------------------------------------------------------------------- +# Core +# --------------------------------------------------------------------------- + +def score_review(comments: List[str]) -> ReviewQualityReport: + """Score a list of review comment bodies.""" + # Extract individual comments (body strings) + bodies = [c.strip() for c in comments if c and c.strip()] + if not bodies: + bodies = ["(no substantive review comments found)"] + + # Deduplicate to avoid counting +1 on same person repeating themselves + # Actually, keep all; depth naturally inflates with volume. + + style_s = _category_score(bodies, STYLE_INDICATORS, WEIGHTS["style"]) + logic_s = _category_score(bodies, LOGIC_INDICATORS, WEIGHTS["logic"]) + sec_s = _category_score(bodies, SECURITY_INDICATORS, WEIGHTS["security"]) + perf_s = _category_score(bodies, PERFORMANCE_INDICATORS, WEIGHTS["performance"]) + tests_s = _category_score(bodies, TESTS_INDICATORS, WEIGHTS["tests"]) + + composite = round( + style_s * WEIGHTS["style"] + + logic_s * WEIGHTS["logic"] + + sec_s * WEIGHTS["security"] + + perf_s * WEIGHTS["performance"] + + tests_s * WEIGHTS["tests"], + 1 + ) + + return ReviewQualityReport( + style=style_s, + logic=logic_s, + security=sec_s, + performance=perf_s, + tests=tests_s, + composite=composite, + breakdown={k: round(v, 1) for k, v in [ + ("style", style_s), ("logic", logic_s), ("security", sec_s), + ("performance", perf_s), ("tests", tests_s)]}, + comment_count=len(bodies), + review_count=len(set(bodies)) # Approx unique reviews + ) + + +# --------------------------------------------------------------------------- +# Gitea integration (optional — fetch PR comments) +# --------------------------------------------------------------------------- + +GITEA_BASE = "https://forge.alexanderwhitestone.com/api/v1" + + +class GiteaClient: + def __init__(self, token: str): + self.token = token + self.base_url = GITEA_BASE.rstrip("/") + + def _request(self, path: str, params: Dict = None) -> Any: + url = f"{self.base_url}{path}" + if params: + qs = "&".join(f"{k}={v}" for k, v in params.items() if v is not None) + url += f"?{qs}" + req = urllib.request.Request(url) + req.add_header("Authorization", f"token {self.token}") + req.add_header("Content-Type", "application/json") + try: + with urllib.request.urlopen(req, timeout=30) as resp: + return json.loads(resp.read().decode()) + except urllib.error.HTTPError as e: + print(f"API error {e.code}: {e.read().decode()[:200]}", file=sys.stderr) + return None + except urllib.error.URLError as e: + print(f"Network error: {e}", file=sys.stderr) + return None + + def get_pr_comments(self, org: str, repo: str, pr_number: int) -> List[Dict]: + """Fetch review comments (not PR discussion comments).""" + comments = [] + page = 1 + while True: + batch = self._request( + f"/repos/{org}/{repo}/pulls/{pr_number}/comments", + {"limit": 100, "page": page} + ) + if not batch: + break + comments.extend(batch) + if len(batch) < 100: + break + page += 1 + return comments + + +def fetch_review_comments(org: str, repo: str, pr_number: int, token: str) -> List[str]: + client = GiteaClient(token) + raw = client.get_pr_comments(org, repo, pr_number) + # Each comment object: {body, user, ...} + return [c.get("body", "") for c in raw if c.get("body")] + + +# --------------------------------------------------------------------------- +# CLI +# --------------------------------------------------------------------------- + +def main() -> None: + parser = argparse.ArgumentParser(description="Review Quality Scorer") + parser.add_argument("--input", help="JSON file with review findings (list of strings)") + parser.add_argument("--pr", type=int, help="PR number to fetch from Gitea") + parser.add_argument("--org", default="Timmy_Foundation", help="Gitea org") + parser.add_argument("--repo", default="compounding-intelligence", help="Gitea repo") + parser.add_argument("--token", default=os.environ.get("GITEA_TOKEN") or os.path.expanduser("~/.config/gitea/token")) + parser.add_argument("--output", default="metrics/review_quality_report.json", help="Output path") + parser.add_argument("--markdown", action="store_true", help="Emit human-readable markdown to stdout") + args = parser.parse_args() + + # Load token if file path + token_path = args.token + if os.path.exists(token_path): + with open(token_path) as f: + token = f.read().strip() + else: + token = args.token + + # Get review bodies + if args.input: + with open(args.input) as f: + data = json.load(f) + if isinstance(data, dict) and "reviews" in data: + comments = data["reviews"] + elif isinstance(data, list): + comments = data + else: + print("ERROR: Input JSON must be a list or 'reviews' key", file=sys.stderr) + sys.exit(1) + elif args.pr: + if not token: + print("ERROR: Gitea token required for --pr", file=sys.stderr) + sys.exit(1) + comments = fetch_review_comments(args.org, args.repo, args.pr, token) + print(f"Fetched {len(comments)} review comments from PR #{args.pr}") + else: + print("ERROR: Must provide either --input or --pr", file=sys.stderr) + sys.exit(1) + + # Score + report = score_review(comments) + + # Output + Path(args.output).parent.mkdir(parents=True, exist_ok=True) + with open(args.output, "w") as f: + json.dump(report.to_dict(), f, indent=2) + print(f"Report saved: {args.output}") + + if args.markdown: + print(report.to_markdown()) + else: + print(json.dumps(report.to_dict(), indent=2)) + + +if __name__ == "__main__": + main() + diff --git a/scripts/test_review_quality_scorer.py b/scripts/test_review_quality_scorer.py new file mode 100755 index 0000000..ff5713f --- /dev/null +++ b/scripts/test_review_quality_scorer.py @@ -0,0 +1,204 @@ +#!/usr/bin/env python3 +""" +Tests for Review Quality Scorer — unit tests for the scoring logic. +""" + +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).parent)) + +from review_quality_scorer import ( + score_review, + _category_presence_score, + _depth_score, + _category_score, + ReviewQualityReport, + STYLE_INDICATORS, + LOGIC_INDICATORS, + SECURITY_INDICATORS, + PERFORMANCE_INDICATORS, + TESTS_INDICATORS, +) + +PASS = 0 +FAIL = 0 + +def test(name): + def decorator(fn): + global PASS, FAIL + try: + fn() + PASS += 1 + print(f" [PASS] {name}") + except AssertionError as e: + FAIL += 1 + print(f" [FAIL] {name}: {e}") + except Exception as e: + FAIL += 1 + print(f" [FAIL] {name}: Unexpected error: {e}") + return decorator + +def assert_eq(a, b, msg=""): + if abs(a - b) > 0.1: + raise AssertionError(f"{msg} expected ~{b!r}, got {a!r}") + +def assert_true(v, msg=""): + if not v: + raise AssertionError(msg or "Expected True") + +def assert_false(v, msg=""): + if v: + raise AssertionError(msg or "Expected False") + + +print("=== Review Quality Scorer Tests ===\n") + +print("-- Category Presence --") + +@test("style keywords detected") +def _(): + score = _category_presence_score( + ["Please fix the formatting and run black."], + STYLE_INDICATORS + ) + assert_true(score > 0, f"Style score should be > 0, got {score}") + +@test("logic keywords detected") +def _(): + score = _category_presence_score( + ["Consider the edge case when input is empty.", "This algorithm is O(n^2)"], + LOGIC_INDICATORS + ) + assert_true(score > 0, f"Logic score should be > 0") + +@test("security keywords detected") +def _(): + score = _category_presence_score( + ["Potential SQL injection here.", "Don't hardcode secrets"], + SECURITY_INDICATORS + ) + assert_true(score > 0) + +@test("performance keywords detected") +def _(): + score = _category_presence_score( + ["This loop is a bottleneck.", "Memory usage could be optimized"], + PERFORMANCE_INDICATORS + ) + assert_true(score > 0) + +@test("tests keywords detected") +def _(): + score = _category_presence_score( + ["Add tests for this branch.", "Missing test coverage here"], + TESTS_INDICATORS + ) + assert_true(score > 0) + +@test("no keywords → score 0") +def _(): + score = _category_presence_score( + ["Looks good to me."], + STYLE_INDICATORS + ) + assert_eq(score, 0.0) + + +print("\n-- Depth Scoring --") + +@test("shallow comment → low depth") +def _(): + d = _depth_score(["OK"]) + assert_eq(d, 0.0) + +@test("substantive comment → positive depth") +def _(): + d = _depth_score([ + "Please consider updating this logic: when x is zero we divide by zero. " + "Why not add an early return? This would fix the edge case." + ]) + assert_true(d > 0.3) + + +print("\n-- Category Score Integration --") + +@test("thorough style review scores high on style") +def _(): + comments = [ + "The indentation is inconsistent — please run black to auto-format.", + "Function names are camelCase but should be snake_case per PEP8.", + "Trailing whitespace on several lines — please clean up.", + "Missing .gitignore would accidentally commit __pycache__ and .venv.", + "Consider adding a linter (flake8) to catch these style issues early.", + ] + rpt = score_review(comments) + assert_true(rpt.style >= 50, f"Style score should be >= 50, got {rpt.style}") + +@test("thorough logic review scores high on logic") +def _(): + comments = [ + "What happens if the input list is empty? The algorithm would crash.", + "This nested loop is O(n^2). Could we use a dictionary for O(n)?", + "Negative numbers aren't handled — possible overflow.", + "Consider the edge case where the user passes None.", + "Please add input validation at the start of the function.", + "Why not extract this into a pure function for easier testing?", + ] + rpt = score_review(comments) + assert_true(rpt.logic >= 50) + +@test("thorough security review scores high on security") +def _(): + comments = [ + "The SQL query uses string concatenation — vulnerable to SQL injection.", + "API token is hardcoded in source — move to environment variables.", + "Check for XSS when rendering user-provided HTML.", + "Are we validating all user inputs before processing?", + "Consider rate limiting to prevent abuse.", + "Ensure secrets are never committed to the repository.", + ] + rpt = score_review(comments) + assert_true(rpt.security >= 50) + +@test("combines multiple categories") +def _(): + comments = [ + "Please run black to auto-format. Also, the O(n²) loop here will hurt performance on large inputs.", + "Security risk: hardcoded API token. Style: inconsistent indentation. Logic: missing null check could crash.", + "Missing test coverage for edge cases. Also consider caching the result to improve performance.", + "Naming violates PEP8 (style). Edge case: negative inputs cause overflow (logic). Potential XSS when rendering user HTML (security).", + "Run a linter (style), add unit tests (tests), and check for memory leaks (performance).", + ] + rpt = score_review(comments) + assert_true(rpt.style >= 50) + assert_true(rpt.logic >= 40) + assert_true(rpt.security >= 50) + assert_true(rpt.performance >= 50) + assert_true(rpt.tests >= 50) + +@test("composite is weighted average") +def _(): + # Generate a known distribution to verify math + comments = ["x"] * 20 # very shallow + rpt = score_review(comments) + # All categories should be equal-ish + assert_true(0 <= rpt.composite <= 100) + + +print("\n-- Edge Cases --") + +@test("empty comments produces non-zero baseline") +def _(): + rpt = score_review([]) + assert_true(rpt.composite >= 0) + +@test("single one-word comment → very low") +def _(): + rpt = score_review(["OK"]) + assert_true(rpt.composite < 40) + + +print(f"\n=== Results: {PASS} passed, {FAIL} failed ===") +sys.exit(0 if FAIL == 0 else 1) + -- 2.43.0