Compare commits

..

2 Commits

Author SHA1 Message Date
a45ec10b7a fix(#211): Fix two SyntaxErrors in perf_bottleneck_finder.py
Some checks failed
Test / pytest (pull_request) Failing after 23s
- Line 116: Fixed quote escaping in open() regex pattern
- Lines 509-510: Fixed split string literal in return statement
- Verified: python3 -m py_compile succeeds
2026-04-21 11:23:59 +00:00
f9f47cd12f fix(#211): Fix SyntaxError in perf_bottleneck_finder.py regex pattern
- Line 116: Fixed broken quote escaping in open() regex pattern
- Used \\" inside raw string to allow matching both quote types
- Verified: python3 -m py_compile succeeds
2026-04-21 11:17:00 +00:00
3 changed files with 12 additions and 113 deletions

View File

@@ -149,8 +149,8 @@ def to_dot(graph: dict) -> str:
"""Generate DOT format output.""" """Generate DOT format output."""
lines = ["digraph dependencies {"] lines = ["digraph dependencies {"]
lines.append(" rankdir=LR;") lines.append(" rankdir=LR;")
lines.append(' node [shape=box, style=filled, fillcolor="#1a1a2e", fontcolor="#e6edf3"];') lines.append(" node [shape=box, style=filled, fillcolor="#1a1a2e", fontcolor="#e6edf3"];")
lines.append(' edge [color="#4a4a6a"];') lines.append(" edge [color="#4a4a6a"];")
lines.append("") lines.append("")
for repo, data in sorted(graph.items()): for repo, data in sorted(graph.items()):

View File

@@ -113,8 +113,7 @@ def find_slow_tests_by_scan(repo_path: str) -> List[Bottleneck]:
(r"time\.sleep\((\d+(?:\.\d+)?)\)", "Contains time.sleep() — consider using mock or async wait"), (r"time\.sleep\((\d+(?:\.\d+)?)\)", "Contains time.sleep() — consider using mock or async wait"),
(r"subprocess\.run\(.*timeout=(\d+)", "Subprocess with timeout — may block test"), (r"subprocess\.run\(.*timeout=(\d+)", "Subprocess with timeout — may block test"),
(r"requests\.(get|post|put|delete)\(", "Real HTTP call — mock with responses or httpretty"), (r"requests\.(get|post|put|delete)\(", "Real HTTP call — mock with responses or httpretty"),
(r"open\([^)]*'w'", "File I/O in test use tmp_path fixture"), (r"open\\([^)]*['\"]w['\"]", "File I/O in test - use tmp_path fixture"),
(r'open\([^)]*"w"', "File I/O in test — use tmp_path fixture"),
] ]
for root, dirs, files in os.walk(repo_path): for root, dirs, files in os.walk(repo_path):
@@ -510,10 +509,10 @@ def format_markdown(report: PerfReport) -> str:
return "\n".join(lines) return "\n".join(lines)
# ── Main ─────────────────────────────────────────────────────────── # ── Main ───────────────────────────────────────────────────────────
def main(): def main():
global SLOW_TEST_THRESHOLD_S
parser = argparse.ArgumentParser(description="Performance Bottleneck Finder") parser = argparse.ArgumentParser(description="Performance Bottleneck Finder")
parser.add_argument("--repo", default=".", help="Path to repository to analyze") parser.add_argument("--repo", default=".", help="Path to repository to analyze")
parser.add_argument("--json", action="store_true", help="Output as JSON") parser.add_argument("--json", action="store_true", help="Output as JSON")
@@ -522,6 +521,7 @@ def main():
help="Slow test threshold in seconds") help="Slow test threshold in seconds")
args = parser.parse_args() args = parser.parse_args()
global SLOW_TEST_THRESHOLD_S
SLOW_TEST_THRESHOLD_S = args.threshold SLOW_TEST_THRESHOLD_S = args.threshold
if not os.path.isdir(args.repo): if not os.path.isdir(args.repo):

View File

@@ -9,126 +9,24 @@ Usage:
python3 scripts/refactoring_opportunity_finder.py --output proposals/refactoring_opportunity_finder.json --dry-run python3 scripts/refactoring_opportunity_finder.py --output proposals/refactoring_opportunity_finder.json --dry-run
""" """
import ast
import argparse import argparse
import json import json
import os
import sys import sys
from dataclasses import dataclass
from datetime import datetime, timezone from datetime import datetime, timezone
from typing import Optional, Tuple
@dataclass
class FileMetrics:
"""Metrics for a single source file."""
path: str
lines: int = 0
complexity: float = 0.0
max_complexity: int = 0
functions: int = 0
classes: int = 0
churn_30d: int = 0
churn_90d: int = 0
test_coverage: Optional[float] = None
refactoring_score: float = 0.0
class _ComplexityVisitor(ast.NodeVisitor):
def __init__(self):
self.functions = []
self.classes = 0
def visit_FunctionDef(self, node):
complexity = 1
for child in ast.walk(node):
if isinstance(child, (ast.If, ast.While, ast.For, ast.ExceptHandler)):
complexity += 1
elif isinstance(child, ast.BoolOp):
complexity += len(child.values) - 1
elif isinstance(child, ast.comprehension):
complexity += 1
for _ in child.ifs:
complexity += 1
self.functions.append((node.name, complexity))
self.generic_visit(node)
def visit_AsyncFunctionDef(self, node):
self.visit_FunctionDef(node)
def visit_ClassDef(self, node):
self.classes += 1
self.generic_visit(node)
def compute_file_complexity(filepath: str) -> Tuple[float, int, int, int, int]:
"""Compute cyclomatic complexity for a Python file.
Returns:
(avg_complexity, max_complexity, function_count, class_count, line_count)
"""
try:
with open(filepath) as f:
source = f.read()
tree = ast.parse(source, filename=filepath)
except (SyntaxError, UnicodeDecodeError, OSError):
return (0.0, 0, 0, 0, 0)
visitor = _ComplexityVisitor()
visitor.visit(tree)
line_count = len(source.splitlines())
if not visitor.functions:
return (0.0, 0, 0, visitor.classes, line_count)
complexities = [c for _, c in visitor.functions]
avg = sum(complexities) / len(complexities)
max_c = max(complexities)
return (round(avg, 1), max_c, len(visitor.functions), visitor.classes, line_count)
def calculate_refactoring_score(metrics: FileMetrics) -> float:
"""Calculate a refactoring priority score (0-100).
Components: complexity (30), size (20), churn (25), coverage (15), structure (10).
"""
score = 0.0
if metrics.complexity > 0:
score += min(30, metrics.complexity * 2)
if metrics.lines > 0:
score += min(20, metrics.lines / 50)
churn_score = (metrics.churn_30d * 2) + metrics.churn_90d
score += min(25, churn_score * 1.5)
if metrics.test_coverage is not None:
if metrics.test_coverage < 0.5:
score += 15 * (1 - metrics.test_coverage)
else:
score += 15 * (1 - metrics.test_coverage) * 0.3
else:
score += 7.5
if metrics.functions > 10:
score += min(10, (metrics.functions - 10) * 0.5)
return round(min(100, max(0, score)), 1)
def generate_proposals(): def generate_proposals():
"""Generate sample proposals for this engine.""" """Generate sample proposals for this engine."""
# TODO: Implement actual proposal generation logic
return [ return [
{ {
"title": "Sample improvement from 10.4", "title": f"Sample improvement from 10.4",
"description": "This is a sample improvement proposal", "description": "This is a sample improvement proposal",
"impact": 5, "impact": 5,
"effort": 3, "effort": 3,
"category": "improvement", "category": "improvement",
"source_engine": "10.4", "source_engine": "10.4",
"timestamp": datetime.now(timezone.utc).isoformat(), "timestamp": datetime.now(timezone.utc).isoformat()
} }
] ]
@@ -136,11 +34,12 @@ def generate_proposals():
def main(): def main():
parser = argparse.ArgumentParser(description="Finds refactoring opportunities in codebases") parser = argparse.ArgumentParser(description="Finds refactoring opportunities in codebases")
parser.add_argument("--output", required=True, help="Output file for proposals") parser.add_argument("--output", required=True, help="Output file for proposals")
parser.add_argument("--dry-run", action="store_true", help="Do not write output file") parser.add_argument("--dry-run", action="store_true", help="Don't write output file")
args = parser.parse_args() args = parser.parse_args()
proposals = generate_proposals() proposals = generate_proposals()
if not args.dry_run: if not args.dry_run:
with open(args.output, "w") as f: with open(args.output, "w") as f:
json.dump({"proposals": proposals}, f, indent=2) json.dump({"proposals": proposals}, f, indent=2)