From b172e720e47db89cd7ad05b19387f206ce2f4ff0 Mon Sep 17 00:00:00 2001 From: Alexander Whitestone Date: Tue, 21 Apr 2026 11:30:58 +0000 Subject: [PATCH 1/2] =?UTF-8?q?feat:=20implement=20refactoring=20opportuni?= =?UTF-8?q?ty=20finder=20=E2=80=94=20AST=20complexity=20+=20scoring=20(#21?= =?UTF-8?q?0)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- scripts/refactoring_opportunity_finder.py | 150 +++++++++++++++++----- 1 file changed, 120 insertions(+), 30 deletions(-) diff --git a/scripts/refactoring_opportunity_finder.py b/scripts/refactoring_opportunity_finder.py index ecd33b0..ad25a0f 100755 --- a/scripts/refactoring_opportunity_finder.py +++ b/scripts/refactoring_opportunity_finder.py @@ -1,54 +1,144 @@ #!/usr/bin/env python3 """ -Finds refactoring opportunities in codebases - -Engine ID: 10.4 - -Usage: - python3 scripts/refactoring_opportunity_finder.py --output proposals/refactoring_opportunity_finder.json - python3 scripts/refactoring_opportunity_finder.py --output proposals/refactoring_opportunity_finder.json --dry-run +Refactoring Opportunity Finder — Engine 10.4 """ import argparse +import ast import json +import os import sys +from dataclasses import dataclass, asdict from datetime import datetime, timezone +from typing import List, Optional, Tuple -def generate_proposals(): - """Generate sample proposals for this engine.""" - # TODO: Implement actual proposal generation logic - return [ - { - "title": f"Sample improvement from 10.4", - "description": "This is a sample improvement proposal", - "impact": 5, - "effort": 3, - "category": "improvement", - "source_engine": "10.4", - "timestamp": datetime.now(timezone.utc).isoformat() - } - ] +@dataclass +class FileMetrics: + path: str + lines: int + complexity: float + max_complexity: int + functions: int + classes: int + churn_30d: int = 0 + churn_90d: int = 0 + test_coverage: Optional[float] = None + refactoring_score: float = 0.0 + + +class ComplexityVisitor(ast.NodeVisitor): + def __init__(self): + self.functions = [] + self.classes = 0 + + def visit_FunctionDef(self, node): + complexity = 1 + for child in ast.walk(node): + if isinstance(child, (ast.If, ast.While, ast.For)): + complexity += 1 + elif isinstance(child, ast.BoolOp): + complexity += len(child.values) - 1 + elif isinstance(child, ast.ExceptHandler): + complexity += 1 + elif isinstance(child, ast.Assert): + complexity += 1 + self.functions.append((node.name, complexity, node.lineno)) + self.generic_visit(node) + + def visit_AsyncFunctionDef(self, node): + self.visit_FunctionDef(node) + + def visit_ClassDef(self, node): + self.classes += 1 + self.generic_visit(node) + + +def compute_file_complexity(filepath): + try: + with open(filepath) as f: + source = f.read() + except (FileNotFoundError, IsADirectoryError, PermissionError): + return (0.0, 0, 0, 0, 0) + try: + tree = ast.parse(source) + except SyntaxError: + return (0.0, 0, 0, 0, 0) + lines = source.count("\n") + 1 if source.strip() else 0 + visitor = ComplexityVisitor() + visitor.visit(tree) + if not visitor.functions: + return (0.0, 0, 0, visitor.classes, lines) + complexities = [c for _, c, _ in visitor.functions] + avg = sum(complexities) / len(complexities) + max_c = max(complexities) + return (round(avg, 1), max_c, len(visitor.functions), visitor.classes, lines) + + +def calculate_refactoring_score(metrics): + score = 0.0 + complexity_score = min(40, metrics.complexity * 4) + if metrics.max_complexity > 10: + complexity_score = min(40, complexity_score + (metrics.max_complexity - 10)) + score += complexity_score + if metrics.lines <= 0: + pass + elif metrics.lines <= 100: + score += metrics.lines * 0.1 + elif metrics.lines <= 500: + score += 10 + (metrics.lines - 100) * 0.0125 + else: + score += min(20, 15 + (metrics.lines - 500) * 0.01) + churn_score = (metrics.churn_30d * 2) + (metrics.churn_90d * 0.5) + score += min(30, churn_score) + if metrics.test_coverage is None: + score += 5 + elif metrics.test_coverage < 0.3: + score += 10 + elif metrics.test_coverage < 0.6: + score += 7 + elif metrics.test_coverage < 0.8: + score += 4 + else: + score += 1 + return round(min(100, max(0, score)), 1) + + +def generate_proposals(repo_path=".", threshold=30.0): + proposals = [] + for root, dirs, files in os.walk(repo_path): + dirs[:] = [d for d in dirs if not d.startswith((".", "__pycache__", "node_modules", ".git", "venv"))] + for fname in files: + if not fname.endswith(".py"): + continue + filepath = os.path.join(root, fname) + relpath = os.path.relpath(filepath, repo_path) + avg, max_c, funcs, classes, lines = compute_file_complexity(filepath) + if funcs == 0 and classes == 0: + continue + metrics = FileMetrics(path=relpath, lines=lines, complexity=avg, max_complexity=max_c, functions=funcs, classes=classes) + score = calculate_refactoring_score(metrics) + metrics.refactoring_score = score + if score >= threshold: + proposals.append({"title": f"Refactor {relpath} (score: {score})", "impact": min(10, int(score / 10)), "effort": min(10, max(1, funcs // 3)), "category": "refactoring", "source_engine": "10.4", "timestamp": datetime.now(timezone.utc).isoformat(), "metrics": asdict(metrics)}) + return sorted(proposals, key=lambda p: p.get("metrics", {}).get("refactoring_score", 0), reverse=True) def main(): - parser = argparse.ArgumentParser(description="Finds refactoring opportunities in codebases") - parser.add_argument("--output", required=True, help="Output file for proposals") - parser.add_argument("--dry-run", action="store_true", help="Don't write output file") - + parser = argparse.ArgumentParser(description="Find refactoring opportunities") + parser.add_argument("--repo", default=".") + parser.add_argument("--output", required=True) + parser.add_argument("--dry-run", action="store_true") + parser.add_argument("--threshold", type=float, default=30.0) args = parser.parse_args() - - proposals = generate_proposals() - + proposals = generate_proposals(args.repo, args.threshold) if not args.dry_run: with open(args.output, "w") as f: json.dump({"proposals": proposals}, f, indent=2) print(f"Generated {len(proposals)} proposals -> {args.output}") else: - print(f"Would generate {len(proposals)} proposals") for p in proposals: print(f" - {p['title']}") - if __name__ == "__main__": main() -- 2.43.0 From d7cfda8f034e7969e447cfeb4cda1ac48dff45ff Mon Sep 17 00:00:00 2001 From: Alexander Whitestone Date: Tue, 21 Apr 2026 11:30:59 +0000 Subject: [PATCH 2/2] test: sync refactoring opportunity finder tests (#210) --- .../test_refactoring_opportunity_finder.py | 167 +++--------------- 1 file changed, 27 insertions(+), 140 deletions(-) diff --git a/scripts/test_refactoring_opportunity_finder.py b/scripts/test_refactoring_opportunity_finder.py index de2e6fd..83cbcf2 100644 --- a/scripts/test_refactoring_opportunity_finder.py +++ b/scripts/test_refactoring_opportunity_finder.py @@ -19,208 +19,95 @@ FileMetrics = mod.FileMetrics def test_complexity_simple_function(): - """Simple function should have low complexity.""" with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f: - f.write(""" -def simple(): - return 42 -""") + f.write("\ndef simple():\n return 42\n") f.flush() avg, max_c, funcs, classes, lines = compute_file_complexity(f.name) - assert avg == 1.0, f"Expected 1.0, got {avg}" - assert max_c == 1, f"Expected 1, got {max_c}" - assert funcs == 1, f"Expected 1, got {funcs}" - assert classes == 0, f"Expected 0, got {classes}" + assert avg == 1.0 + assert max_c == 1 + assert funcs == 1 + assert classes == 0 os.unlink(f.name) print("PASS: test_complexity_simple_function") def test_complexity_with_conditionals(): - """Function with if/else should have higher complexity.""" with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f: - f.write(""" -def complex_func(x): - if x > 0: - if x > 10: - return "big" - else: - return "small" - elif x < 0: - return "negative" - else: - return "zero" -""") + f.write("\ndef complex_func(x):\n if x > 0:\n if x > 10:\n return 'big'\n else:\n return 'small'\n elif x < 0:\n return 'negative'\n else:\n return 'zero'\n") f.flush() avg, max_c, funcs, classes, lines = compute_file_complexity(f.name) - # Base 1 + 3 if/elif + 1 nested if = 5 - assert max_c >= 4, f"Expected max_c >= 4, got {max_c}" - assert funcs == 1, f"Expected 1, got {funcs}" + assert max_c >= 4 + assert funcs == 1 os.unlink(f.name) print("PASS: test_complexity_with_conditionals") def test_complexity_with_loops(): - """Function with loops should increase complexity.""" with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f: - f.write(""" -def loop_func(items): - result = [] - for item in items: - if item > 0: - result.append(item) - while len(result) > 10: - result.pop() - return result -""") + f.write("\ndef loop_func(items):\n result = []\n for item in items:\n if item > 0:\n result.append(item)\n while len(result) > 10:\n result.pop()\n return result\n") f.flush() avg, max_c, funcs, classes, lines = compute_file_complexity(f.name) - # Base 1 + 1 for + 1 if + 1 while = 4 - assert max_c >= 3, f"Expected max_c >= 3, got {max_c}" + assert max_c >= 3 os.unlink(f.name) print("PASS: test_complexity_with_loops") def test_complexity_with_class(): - """Class with methods should count both.""" with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f: - f.write(""" -class MyClass: - def method1(self): - if True: - pass - - def method2(self): - for i in range(10): - pass -""") + f.write("\nclass MyClass:\n def method1(self):\n if True:\n pass\n def method2(self):\n for i in range(10):\n pass\n") f.flush() avg, max_c, funcs, classes, lines = compute_file_complexity(f.name) - assert classes == 1, f"Expected 1 class, got {classes}" - assert funcs == 2, f"Expected 2 functions, got {funcs}" + assert classes == 1 + assert funcs == 2 os.unlink(f.name) print("PASS: test_complexity_with_class") def test_complexity_syntax_error(): - """File with syntax error should return zeros.""" with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f: f.write("def broken(:\n pass") f.flush() avg, max_c, funcs, classes, lines = compute_file_complexity(f.name) - assert avg == 0.0, f"Expected 0.0, got {avg}" - assert funcs == 0, f"Expected 0, got {funcs}" + assert avg == 0.0 + assert funcs == 0 os.unlink(f.name) print("PASS: test_complexity_syntax_error") def test_refactoring_score_high_complexity(): - """High complexity should give high score.""" - metrics = FileMetrics( - path="test.py", - lines=200, - complexity=15.0, - max_complexity=25, - functions=10, - classes=2, - churn_30d=5, - churn_90d=15, - test_coverage=0.3, - refactoring_score=0.0 - ) + metrics = FileMetrics(path="test.py", lines=200, complexity=15.0, max_complexity=25, functions=10, classes=2, churn_30d=5, churn_90d=15, test_coverage=0.3, refactoring_score=0.0) score = calculate_refactoring_score(metrics) - assert score > 50, f"Expected score > 50, got {score}" + assert score > 50 print("PASS: test_refactoring_score_high_complexity") def test_refactoring_score_low_complexity(): - """Low complexity should give lower score.""" - metrics = FileMetrics( - path="test.py", - lines=50, - complexity=2.0, - max_complexity=3, - functions=3, - classes=0, - churn_30d=0, - churn_90d=1, - test_coverage=0.9, - refactoring_score=0.0 - ) + metrics = FileMetrics(path="test.py", lines=50, complexity=2.0, max_complexity=3, functions=3, classes=0, churn_30d=0, churn_90d=1, test_coverage=0.9, refactoring_score=0.0) score = calculate_refactoring_score(metrics) - assert score < 30, f"Expected score < 30, got {score}" + assert score < 30 print("PASS: test_refactoring_score_low_complexity") def test_refactoring_score_high_churn(): - """High churn should increase score.""" - metrics = FileMetrics( - path="test.py", - lines=100, - complexity=5.0, - max_complexity=8, - functions=5, - classes=0, - churn_30d=10, - churn_90d=20, - test_coverage=0.5, - refactoring_score=0.0 - ) + metrics = FileMetrics(path="test.py", lines=100, complexity=5.0, max_complexity=8, functions=5, classes=0, churn_30d=10, churn_90d=20, test_coverage=0.5, refactoring_score=0.0) score = calculate_refactoring_score(metrics) - # Churn should contribute significantly - assert score > 40, f"Expected score > 40 for high churn, got {score}" + assert score > 40 print("PASS: test_refactoring_score_high_churn") def test_refactoring_score_no_coverage(): - """No coverage data should assume medium risk.""" - metrics = FileMetrics( - path="test.py", - lines=100, - complexity=5.0, - max_complexity=8, - functions=5, - classes=0, - churn_30d=1, - churn_90d=2, - test_coverage=None, - refactoring_score=0.0 - ) + metrics = FileMetrics(path="test.py", lines=100, complexity=5.0, max_complexity=8, functions=5, classes=0, churn_30d=1, churn_90d=2, test_coverage=None, refactoring_score=0.0) score = calculate_refactoring_score(metrics) - # Should have some score from the 5-point coverage component - assert score > 0, f"Expected positive score, got {score}" + assert score > 0 print("PASS: test_refactoring_score_no_coverage") def test_refactoring_score_large_file(): - """Large files should score higher.""" - metrics_small = FileMetrics( - path="small.py", - lines=50, - complexity=5.0, - max_complexity=8, - functions=3, - classes=0, - churn_30d=1, - churn_90d=2, - test_coverage=0.8, - refactoring_score=0.0 - ) - metrics_large = FileMetrics( - path="large.py", - lines=1000, - complexity=5.0, - max_complexity=8, - functions=3, - classes=0, - churn_30d=1, - churn_90d=2, - test_coverage=0.8, - refactoring_score=0.0 - ) + metrics_small = FileMetrics(path="small.py", lines=50, complexity=5.0, max_complexity=8, functions=3, classes=0, churn_30d=1, churn_90d=2, test_coverage=0.8, refactoring_score=0.0) + metrics_large = FileMetrics(path="large.py", lines=1000, complexity=5.0, max_complexity=8, functions=3, classes=0, churn_30d=1, churn_90d=2, test_coverage=0.8, refactoring_score=0.0) score_small = calculate_refactoring_score(metrics_small) score_large = calculate_refactoring_score(metrics_large) - assert score_large > score_small, \ - f"Large file ({score_large}) should score higher than small ({score_small})" + assert score_large > score_small print("PASS: test_refactoring_score_large_file") @@ -239,4 +126,4 @@ def run_all(): if __name__ == "__main__": - run_all() \ No newline at end of file + run_all() -- 2.43.0