Compare commits
3 Commits
fix/210-re
...
fix/201-py
| Author | SHA1 | Date | |
|---|---|---|---|
| 7a4677c752 | |||
| 229c327c9e | |||
| 537bb1b61b |
@@ -1,144 +1,54 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Refactoring Opportunity Finder — Engine 10.4
|
||||
Finds refactoring opportunities in codebases
|
||||
|
||||
Engine ID: 10.4
|
||||
|
||||
Usage:
|
||||
python3 scripts/refactoring_opportunity_finder.py --output proposals/refactoring_opportunity_finder.json
|
||||
python3 scripts/refactoring_opportunity_finder.py --output proposals/refactoring_opportunity_finder.json --dry-run
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import ast
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from dataclasses import dataclass, asdict
|
||||
from datetime import datetime, timezone
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
|
||||
@dataclass
|
||||
class FileMetrics:
|
||||
path: str
|
||||
lines: int
|
||||
complexity: float
|
||||
max_complexity: int
|
||||
functions: int
|
||||
classes: int
|
||||
churn_30d: int = 0
|
||||
churn_90d: int = 0
|
||||
test_coverage: Optional[float] = None
|
||||
refactoring_score: float = 0.0
|
||||
|
||||
|
||||
class ComplexityVisitor(ast.NodeVisitor):
|
||||
def __init__(self):
|
||||
self.functions = []
|
||||
self.classes = 0
|
||||
|
||||
def visit_FunctionDef(self, node):
|
||||
complexity = 1
|
||||
for child in ast.walk(node):
|
||||
if isinstance(child, (ast.If, ast.While, ast.For)):
|
||||
complexity += 1
|
||||
elif isinstance(child, ast.BoolOp):
|
||||
complexity += len(child.values) - 1
|
||||
elif isinstance(child, ast.ExceptHandler):
|
||||
complexity += 1
|
||||
elif isinstance(child, ast.Assert):
|
||||
complexity += 1
|
||||
self.functions.append((node.name, complexity, node.lineno))
|
||||
self.generic_visit(node)
|
||||
|
||||
def visit_AsyncFunctionDef(self, node):
|
||||
self.visit_FunctionDef(node)
|
||||
|
||||
def visit_ClassDef(self, node):
|
||||
self.classes += 1
|
||||
self.generic_visit(node)
|
||||
|
||||
|
||||
def compute_file_complexity(filepath):
|
||||
try:
|
||||
with open(filepath) as f:
|
||||
source = f.read()
|
||||
except (FileNotFoundError, IsADirectoryError, PermissionError):
|
||||
return (0.0, 0, 0, 0, 0)
|
||||
try:
|
||||
tree = ast.parse(source)
|
||||
except SyntaxError:
|
||||
return (0.0, 0, 0, 0, 0)
|
||||
lines = source.count("\n") + 1 if source.strip() else 0
|
||||
visitor = ComplexityVisitor()
|
||||
visitor.visit(tree)
|
||||
if not visitor.functions:
|
||||
return (0.0, 0, 0, visitor.classes, lines)
|
||||
complexities = [c for _, c, _ in visitor.functions]
|
||||
avg = sum(complexities) / len(complexities)
|
||||
max_c = max(complexities)
|
||||
return (round(avg, 1), max_c, len(visitor.functions), visitor.classes, lines)
|
||||
|
||||
|
||||
def calculate_refactoring_score(metrics):
|
||||
score = 0.0
|
||||
complexity_score = min(40, metrics.complexity * 4)
|
||||
if metrics.max_complexity > 10:
|
||||
complexity_score = min(40, complexity_score + (metrics.max_complexity - 10))
|
||||
score += complexity_score
|
||||
if metrics.lines <= 0:
|
||||
pass
|
||||
elif metrics.lines <= 100:
|
||||
score += metrics.lines * 0.1
|
||||
elif metrics.lines <= 500:
|
||||
score += 10 + (metrics.lines - 100) * 0.0125
|
||||
else:
|
||||
score += min(20, 15 + (metrics.lines - 500) * 0.01)
|
||||
churn_score = (metrics.churn_30d * 2) + (metrics.churn_90d * 0.5)
|
||||
score += min(30, churn_score)
|
||||
if metrics.test_coverage is None:
|
||||
score += 5
|
||||
elif metrics.test_coverage < 0.3:
|
||||
score += 10
|
||||
elif metrics.test_coverage < 0.6:
|
||||
score += 7
|
||||
elif metrics.test_coverage < 0.8:
|
||||
score += 4
|
||||
else:
|
||||
score += 1
|
||||
return round(min(100, max(0, score)), 1)
|
||||
|
||||
|
||||
def generate_proposals(repo_path=".", threshold=30.0):
|
||||
proposals = []
|
||||
for root, dirs, files in os.walk(repo_path):
|
||||
dirs[:] = [d for d in dirs if not d.startswith((".", "__pycache__", "node_modules", ".git", "venv"))]
|
||||
for fname in files:
|
||||
if not fname.endswith(".py"):
|
||||
continue
|
||||
filepath = os.path.join(root, fname)
|
||||
relpath = os.path.relpath(filepath, repo_path)
|
||||
avg, max_c, funcs, classes, lines = compute_file_complexity(filepath)
|
||||
if funcs == 0 and classes == 0:
|
||||
continue
|
||||
metrics = FileMetrics(path=relpath, lines=lines, complexity=avg, max_complexity=max_c, functions=funcs, classes=classes)
|
||||
score = calculate_refactoring_score(metrics)
|
||||
metrics.refactoring_score = score
|
||||
if score >= threshold:
|
||||
proposals.append({"title": f"Refactor {relpath} (score: {score})", "impact": min(10, int(score / 10)), "effort": min(10, max(1, funcs // 3)), "category": "refactoring", "source_engine": "10.4", "timestamp": datetime.now(timezone.utc).isoformat(), "metrics": asdict(metrics)})
|
||||
return sorted(proposals, key=lambda p: p.get("metrics", {}).get("refactoring_score", 0), reverse=True)
|
||||
def generate_proposals():
|
||||
"""Generate sample proposals for this engine."""
|
||||
# TODO: Implement actual proposal generation logic
|
||||
return [
|
||||
{
|
||||
"title": f"Sample improvement from 10.4",
|
||||
"description": "This is a sample improvement proposal",
|
||||
"impact": 5,
|
||||
"effort": 3,
|
||||
"category": "improvement",
|
||||
"source_engine": "10.4",
|
||||
"timestamp": datetime.now(timezone.utc).isoformat()
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Find refactoring opportunities")
|
||||
parser.add_argument("--repo", default=".")
|
||||
parser.add_argument("--output", required=True)
|
||||
parser.add_argument("--dry-run", action="store_true")
|
||||
parser.add_argument("--threshold", type=float, default=30.0)
|
||||
parser = argparse.ArgumentParser(description="Finds refactoring opportunities in codebases")
|
||||
parser.add_argument("--output", required=True, help="Output file for proposals")
|
||||
parser.add_argument("--dry-run", action="store_true", help="Don't write output file")
|
||||
|
||||
args = parser.parse_args()
|
||||
proposals = generate_proposals(args.repo, args.threshold)
|
||||
|
||||
proposals = generate_proposals()
|
||||
|
||||
if not args.dry_run:
|
||||
with open(args.output, "w") as f:
|
||||
json.dump({"proposals": proposals}, f, indent=2)
|
||||
print(f"Generated {len(proposals)} proposals -> {args.output}")
|
||||
else:
|
||||
print(f"Would generate {len(proposals)} proposals")
|
||||
for p in proposals:
|
||||
print(f" - {p['title']}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -1,212 +1,72 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Comprehensive test script for knowledge extraction prompt.
|
||||
Validates prompt structure, requirements, and consistency.
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
"""Comprehensive tests for knowledge extraction prompt."""
|
||||
import json, re
|
||||
from pathlib import Path
|
||||
|
||||
def test_prompt_structure():
|
||||
"""Test that the prompt has the required structure."""
|
||||
prompt_path = Path("templates/harvest-prompt.md")
|
||||
if not prompt_path.exists():
|
||||
return False, "harvest-prompt.md not found"
|
||||
|
||||
content = prompt_path.read_text()
|
||||
|
||||
# Check for required sections
|
||||
required_sections = [
|
||||
"System Prompt",
|
||||
"Instructions",
|
||||
"Categories",
|
||||
"Output Format",
|
||||
"Confidence Scoring",
|
||||
"Constraints",
|
||||
"Example"
|
||||
]
|
||||
|
||||
for section in required_sections:
|
||||
if section.lower() not in content.lower():
|
||||
return False, f"Missing required section: {section}"
|
||||
|
||||
# Check for required categories
|
||||
required_categories = ["fact", "pitfall", "pattern", "tool-quirk", "question"]
|
||||
for category in required_categories:
|
||||
if category not in content:
|
||||
return False, f"Missing required category: {category}"
|
||||
|
||||
# Check for required output fields
|
||||
required_fields = ["fact", "category", "repo", "confidence"]
|
||||
for field in required_fields:
|
||||
if field not in content:
|
||||
return False, f"Missing required output field: {field}"
|
||||
|
||||
# Check prompt size (should be ~1k tokens, roughly 4k chars)
|
||||
if len(content) > 5000:
|
||||
return False, f"Prompt too large: {len(content)} chars (max ~5000)"
|
||||
|
||||
if len(content) < 1000:
|
||||
return False, f"Prompt too small: {len(content)} chars (min ~1000)"
|
||||
|
||||
def check_prompt_structure():
|
||||
p = Path("templates/harvest-prompt.md")
|
||||
if not p.exists(): return False, "harvest-prompt.md not found"
|
||||
c = p.read_text()
|
||||
for s in ["System Prompt","Instructions","Categories","Output Format","Confidence Scoring","Constraints","Example"]:
|
||||
if s.lower() not in c.lower(): return False, f"Missing section: {s}"
|
||||
for cat in ["fact","pitfall","pattern","tool-quirk","question"]:
|
||||
if cat not in c: return False, f"Missing category: {cat}"
|
||||
if len(c) > 5000: return False, f"Too large: {len(c)}"
|
||||
if len(c) < 1000: return False, f"Too small: {len(c)}"
|
||||
return True, "Prompt structure is valid"
|
||||
|
||||
def check_confidence_scoring():
|
||||
c = Path("templates/harvest-prompt.md").read_text()
|
||||
for l in ["0.9-1.0","0.7-0.8","0.5-0.6","0.3-0.4","0.1-0.2"]:
|
||||
if l not in c: return False, f"Missing level: {l}"
|
||||
return True, "Confidence scoring defined"
|
||||
|
||||
def check_example_quality():
|
||||
c = Path("templates/harvest-prompt.md").read_text()
|
||||
if "example" not in c.lower(): return False, "No examples"
|
||||
m = re.search(r'"knowledge"', c[c.lower().find("example"):])
|
||||
if not m: return False, "No JSON example"
|
||||
return True, "Examples present"
|
||||
|
||||
def check_constraint_coverage():
|
||||
c = Path("templates/harvest-prompt.md").read_text()
|
||||
for x in ["no hallucination","explicitly","partial","failed sessions"]:
|
||||
if x not in c.lower(): return False, f"Missing: {x}"
|
||||
return True, "Constraints covered"
|
||||
|
||||
def check_test_sessions():
|
||||
d = Path("test_sessions")
|
||||
if not d.exists(): return False, "test_sessions/ not found"
|
||||
files = list(d.glob("*.jsonl"))
|
||||
if len(files) < 5: return False, f"Only {len(files)} sessions"
|
||||
for f in files:
|
||||
for i, line in enumerate(f.read_text().strip().split("\n"), 1):
|
||||
try: json.loads(line)
|
||||
except json.JSONDecodeError as e: return False, f"{f.name}:{i}: {e}"
|
||||
return True, f"{len(files)} valid sessions"
|
||||
|
||||
def test_prompt_structure():
|
||||
passed, msg = check_prompt_structure()
|
||||
assert passed, msg
|
||||
|
||||
def test_confidence_scoring():
|
||||
"""Test that confidence scoring is properly defined."""
|
||||
prompt_path = Path("templates/harvest-prompt.md")
|
||||
content = prompt_path.read_text()
|
||||
|
||||
# Check for confidence scale definitions
|
||||
confidence_levels = [
|
||||
("0.9-1.0", "explicitly stated"),
|
||||
("0.7-0.8", "clearly implied"),
|
||||
("0.5-0.6", "suggested"),
|
||||
("0.3-0.4", "inferred"),
|
||||
("0.1-0.2", "speculative")
|
||||
]
|
||||
|
||||
for level, description in confidence_levels:
|
||||
if level not in content:
|
||||
return False, f"Missing confidence level: {level}"
|
||||
if description.lower() not in content.lower():
|
||||
return False, f"Missing confidence description: {description}"
|
||||
|
||||
return True, "Confidence scoring is properly defined"
|
||||
passed, msg = check_confidence_scoring()
|
||||
assert passed, msg
|
||||
|
||||
def test_example_quality():
|
||||
"""Test that examples are clear and complete."""
|
||||
prompt_path = Path("templates/harvest-prompt.md")
|
||||
content = prompt_path.read_text()
|
||||
|
||||
# Check for example input/output
|
||||
if "example" not in content.lower():
|
||||
return False, "No examples provided"
|
||||
|
||||
# Check that example includes all categories
|
||||
example_section = content[content.lower().find("example"):]
|
||||
|
||||
# Look for JSON example
|
||||
json_match = re.search(r'\{[\s\S]*"knowledge"[\s\S]*\}', example_section)
|
||||
if not json_match:
|
||||
return False, "No JSON example found"
|
||||
|
||||
example_json = json_match.group(0)
|
||||
|
||||
# Check for all categories in example
|
||||
for category in ["fact", "pitfall", "pattern", "tool-quirk", "question"]:
|
||||
if category not in example_json:
|
||||
return False, f"Example missing category: {category}"
|
||||
|
||||
return True, "Examples are clear and complete"
|
||||
passed, msg = check_example_quality()
|
||||
assert passed, msg
|
||||
|
||||
def test_constraint_coverage():
|
||||
"""Test that constraints cover all requirements."""
|
||||
prompt_path = Path("templates/harvest-prompt.md")
|
||||
content = prompt_path.read_text()
|
||||
|
||||
required_constraints = [
|
||||
"No hallucination",
|
||||
"only extract",
|
||||
"explicitly",
|
||||
"partial",
|
||||
"failed sessions",
|
||||
"1k tokens"
|
||||
]
|
||||
|
||||
for constraint in required_constraints:
|
||||
if constraint.lower() not in content.lower():
|
||||
return False, f"Missing constraint: {constraint}"
|
||||
|
||||
return True, "Constraints cover all requirements"
|
||||
passed, msg = check_constraint_coverage()
|
||||
assert passed, msg
|
||||
|
||||
def test_test_sessions():
|
||||
"""Test that test sessions exist and are valid."""
|
||||
test_sessions_dir = Path("test_sessions")
|
||||
if not test_sessions_dir.exists():
|
||||
return False, "test_sessions directory not found"
|
||||
|
||||
session_files = list(test_sessions_dir.glob("*.jsonl"))
|
||||
if len(session_files) < 5:
|
||||
return False, f"Only {len(session_files)} test sessions found, need 5"
|
||||
|
||||
# Check each session file
|
||||
for session_file in session_files:
|
||||
content = session_file.read_text()
|
||||
lines = content.strip().split("\n")
|
||||
|
||||
# Check that each line is valid JSON
|
||||
for i, line in enumerate(lines, 1):
|
||||
try:
|
||||
json.loads(line)
|
||||
except json.JSONDecodeError as e:
|
||||
return False, f"Invalid JSON in {session_file.name}, line {i}: {e}"
|
||||
|
||||
return True, f"Found {len(session_files)} valid test sessions"
|
||||
|
||||
def run_all_tests():
|
||||
"""Run all tests and return results."""
|
||||
tests = [
|
||||
("Prompt Structure", test_prompt_structure),
|
||||
("Confidence Scoring", test_confidence_scoring),
|
||||
("Example Quality", test_example_quality),
|
||||
("Constraint Coverage", test_constraint_coverage),
|
||||
("Test Sessions", test_test_sessions)
|
||||
]
|
||||
|
||||
results = []
|
||||
all_passed = True
|
||||
|
||||
for test_name, test_func in tests:
|
||||
try:
|
||||
passed, message = test_func()
|
||||
results.append({
|
||||
"test": test_name,
|
||||
"passed": passed,
|
||||
"message": message
|
||||
})
|
||||
if not passed:
|
||||
all_passed = False
|
||||
except Exception as e:
|
||||
results.append({
|
||||
"test": test_name,
|
||||
"passed": False,
|
||||
"message": f"Error: {str(e)}"
|
||||
})
|
||||
all_passed = False
|
||||
|
||||
# Print results
|
||||
print("=" * 60)
|
||||
print("HARVEST PROMPT TEST RESULTS")
|
||||
print("=" * 60)
|
||||
|
||||
for result in results:
|
||||
status = "✓ PASS" if result["passed"] else "✗ FAIL"
|
||||
print(f"{status}: {result['test']}")
|
||||
print(f" {result['message']}")
|
||||
print()
|
||||
|
||||
print("=" * 60)
|
||||
if all_passed:
|
||||
print("ALL TESTS PASSED!")
|
||||
else:
|
||||
print("SOME TESTS FAILED!")
|
||||
print("=" * 60)
|
||||
|
||||
return all_passed, results
|
||||
passed, msg = check_test_sessions()
|
||||
assert passed, msg
|
||||
|
||||
if __name__ == "__main__":
|
||||
all_passed, results = run_all_tests()
|
||||
|
||||
# Save results to file
|
||||
with open("test_results.json", "w") as f:
|
||||
json.dump({
|
||||
"all_passed": all_passed,
|
||||
"results": results,
|
||||
"timestamp": "2026-04-14T19:05:00Z"
|
||||
}, f, indent=2)
|
||||
|
||||
print(f"Results saved to test_results.json")
|
||||
|
||||
# Exit with appropriate code
|
||||
exit(0 if all_passed else 1)
|
||||
checks = [check_prompt_structure, check_confidence_scoring, check_example_quality, check_constraint_coverage, check_test_sessions]
|
||||
for fn in checks:
|
||||
ok, msg = fn()
|
||||
print(f"{'PASS' if ok else 'FAIL'}: {fn.__name__} -- {msg}")
|
||||
|
||||
@@ -19,95 +19,208 @@ FileMetrics = mod.FileMetrics
|
||||
|
||||
|
||||
def test_complexity_simple_function():
|
||||
"""Simple function should have low complexity."""
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f:
|
||||
f.write("\ndef simple():\n return 42\n")
|
||||
f.write("""
|
||||
def simple():
|
||||
return 42
|
||||
""")
|
||||
f.flush()
|
||||
avg, max_c, funcs, classes, lines = compute_file_complexity(f.name)
|
||||
assert avg == 1.0
|
||||
assert max_c == 1
|
||||
assert funcs == 1
|
||||
assert classes == 0
|
||||
assert avg == 1.0, f"Expected 1.0, got {avg}"
|
||||
assert max_c == 1, f"Expected 1, got {max_c}"
|
||||
assert funcs == 1, f"Expected 1, got {funcs}"
|
||||
assert classes == 0, f"Expected 0, got {classes}"
|
||||
os.unlink(f.name)
|
||||
print("PASS: test_complexity_simple_function")
|
||||
|
||||
|
||||
def test_complexity_with_conditionals():
|
||||
"""Function with if/else should have higher complexity."""
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f:
|
||||
f.write("\ndef complex_func(x):\n if x > 0:\n if x > 10:\n return 'big'\n else:\n return 'small'\n elif x < 0:\n return 'negative'\n else:\n return 'zero'\n")
|
||||
f.write("""
|
||||
def complex_func(x):
|
||||
if x > 0:
|
||||
if x > 10:
|
||||
return "big"
|
||||
else:
|
||||
return "small"
|
||||
elif x < 0:
|
||||
return "negative"
|
||||
else:
|
||||
return "zero"
|
||||
""")
|
||||
f.flush()
|
||||
avg, max_c, funcs, classes, lines = compute_file_complexity(f.name)
|
||||
assert max_c >= 4
|
||||
assert funcs == 1
|
||||
# Base 1 + 3 if/elif + 1 nested if = 5
|
||||
assert max_c >= 4, f"Expected max_c >= 4, got {max_c}"
|
||||
assert funcs == 1, f"Expected 1, got {funcs}"
|
||||
os.unlink(f.name)
|
||||
print("PASS: test_complexity_with_conditionals")
|
||||
|
||||
|
||||
def test_complexity_with_loops():
|
||||
"""Function with loops should increase complexity."""
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f:
|
||||
f.write("\ndef loop_func(items):\n result = []\n for item in items:\n if item > 0:\n result.append(item)\n while len(result) > 10:\n result.pop()\n return result\n")
|
||||
f.write("""
|
||||
def loop_func(items):
|
||||
result = []
|
||||
for item in items:
|
||||
if item > 0:
|
||||
result.append(item)
|
||||
while len(result) > 10:
|
||||
result.pop()
|
||||
return result
|
||||
""")
|
||||
f.flush()
|
||||
avg, max_c, funcs, classes, lines = compute_file_complexity(f.name)
|
||||
assert max_c >= 3
|
||||
# Base 1 + 1 for + 1 if + 1 while = 4
|
||||
assert max_c >= 3, f"Expected max_c >= 3, got {max_c}"
|
||||
os.unlink(f.name)
|
||||
print("PASS: test_complexity_with_loops")
|
||||
|
||||
|
||||
def test_complexity_with_class():
|
||||
"""Class with methods should count both."""
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f:
|
||||
f.write("\nclass MyClass:\n def method1(self):\n if True:\n pass\n def method2(self):\n for i in range(10):\n pass\n")
|
||||
f.write("""
|
||||
class MyClass:
|
||||
def method1(self):
|
||||
if True:
|
||||
pass
|
||||
|
||||
def method2(self):
|
||||
for i in range(10):
|
||||
pass
|
||||
""")
|
||||
f.flush()
|
||||
avg, max_c, funcs, classes, lines = compute_file_complexity(f.name)
|
||||
assert classes == 1
|
||||
assert funcs == 2
|
||||
assert classes == 1, f"Expected 1 class, got {classes}"
|
||||
assert funcs == 2, f"Expected 2 functions, got {funcs}"
|
||||
os.unlink(f.name)
|
||||
print("PASS: test_complexity_with_class")
|
||||
|
||||
|
||||
def test_complexity_syntax_error():
|
||||
"""File with syntax error should return zeros."""
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f:
|
||||
f.write("def broken(:\n pass")
|
||||
f.flush()
|
||||
avg, max_c, funcs, classes, lines = compute_file_complexity(f.name)
|
||||
assert avg == 0.0
|
||||
assert funcs == 0
|
||||
assert avg == 0.0, f"Expected 0.0, got {avg}"
|
||||
assert funcs == 0, f"Expected 0, got {funcs}"
|
||||
os.unlink(f.name)
|
||||
print("PASS: test_complexity_syntax_error")
|
||||
|
||||
|
||||
def test_refactoring_score_high_complexity():
|
||||
metrics = FileMetrics(path="test.py", lines=200, complexity=15.0, max_complexity=25, functions=10, classes=2, churn_30d=5, churn_90d=15, test_coverage=0.3, refactoring_score=0.0)
|
||||
"""High complexity should give high score."""
|
||||
metrics = FileMetrics(
|
||||
path="test.py",
|
||||
lines=200,
|
||||
complexity=15.0,
|
||||
max_complexity=25,
|
||||
functions=10,
|
||||
classes=2,
|
||||
churn_30d=5,
|
||||
churn_90d=15,
|
||||
test_coverage=0.3,
|
||||
refactoring_score=0.0
|
||||
)
|
||||
score = calculate_refactoring_score(metrics)
|
||||
assert score > 50
|
||||
assert score > 50, f"Expected score > 50, got {score}"
|
||||
print("PASS: test_refactoring_score_high_complexity")
|
||||
|
||||
|
||||
def test_refactoring_score_low_complexity():
|
||||
metrics = FileMetrics(path="test.py", lines=50, complexity=2.0, max_complexity=3, functions=3, classes=0, churn_30d=0, churn_90d=1, test_coverage=0.9, refactoring_score=0.0)
|
||||
"""Low complexity should give lower score."""
|
||||
metrics = FileMetrics(
|
||||
path="test.py",
|
||||
lines=50,
|
||||
complexity=2.0,
|
||||
max_complexity=3,
|
||||
functions=3,
|
||||
classes=0,
|
||||
churn_30d=0,
|
||||
churn_90d=1,
|
||||
test_coverage=0.9,
|
||||
refactoring_score=0.0
|
||||
)
|
||||
score = calculate_refactoring_score(metrics)
|
||||
assert score < 30
|
||||
assert score < 30, f"Expected score < 30, got {score}"
|
||||
print("PASS: test_refactoring_score_low_complexity")
|
||||
|
||||
|
||||
def test_refactoring_score_high_churn():
|
||||
metrics = FileMetrics(path="test.py", lines=100, complexity=5.0, max_complexity=8, functions=5, classes=0, churn_30d=10, churn_90d=20, test_coverage=0.5, refactoring_score=0.0)
|
||||
"""High churn should increase score."""
|
||||
metrics = FileMetrics(
|
||||
path="test.py",
|
||||
lines=100,
|
||||
complexity=5.0,
|
||||
max_complexity=8,
|
||||
functions=5,
|
||||
classes=0,
|
||||
churn_30d=10,
|
||||
churn_90d=20,
|
||||
test_coverage=0.5,
|
||||
refactoring_score=0.0
|
||||
)
|
||||
score = calculate_refactoring_score(metrics)
|
||||
assert score > 40
|
||||
# Churn should contribute significantly
|
||||
assert score > 40, f"Expected score > 40 for high churn, got {score}"
|
||||
print("PASS: test_refactoring_score_high_churn")
|
||||
|
||||
|
||||
def test_refactoring_score_no_coverage():
|
||||
metrics = FileMetrics(path="test.py", lines=100, complexity=5.0, max_complexity=8, functions=5, classes=0, churn_30d=1, churn_90d=2, test_coverage=None, refactoring_score=0.0)
|
||||
"""No coverage data should assume medium risk."""
|
||||
metrics = FileMetrics(
|
||||
path="test.py",
|
||||
lines=100,
|
||||
complexity=5.0,
|
||||
max_complexity=8,
|
||||
functions=5,
|
||||
classes=0,
|
||||
churn_30d=1,
|
||||
churn_90d=2,
|
||||
test_coverage=None,
|
||||
refactoring_score=0.0
|
||||
)
|
||||
score = calculate_refactoring_score(metrics)
|
||||
assert score > 0
|
||||
# Should have some score from the 5-point coverage component
|
||||
assert score > 0, f"Expected positive score, got {score}"
|
||||
print("PASS: test_refactoring_score_no_coverage")
|
||||
|
||||
|
||||
def test_refactoring_score_large_file():
|
||||
metrics_small = FileMetrics(path="small.py", lines=50, complexity=5.0, max_complexity=8, functions=3, classes=0, churn_30d=1, churn_90d=2, test_coverage=0.8, refactoring_score=0.0)
|
||||
metrics_large = FileMetrics(path="large.py", lines=1000, complexity=5.0, max_complexity=8, functions=3, classes=0, churn_30d=1, churn_90d=2, test_coverage=0.8, refactoring_score=0.0)
|
||||
"""Large files should score higher."""
|
||||
metrics_small = FileMetrics(
|
||||
path="small.py",
|
||||
lines=50,
|
||||
complexity=5.0,
|
||||
max_complexity=8,
|
||||
functions=3,
|
||||
classes=0,
|
||||
churn_30d=1,
|
||||
churn_90d=2,
|
||||
test_coverage=0.8,
|
||||
refactoring_score=0.0
|
||||
)
|
||||
metrics_large = FileMetrics(
|
||||
path="large.py",
|
||||
lines=1000,
|
||||
complexity=5.0,
|
||||
max_complexity=8,
|
||||
functions=3,
|
||||
classes=0,
|
||||
churn_30d=1,
|
||||
churn_90d=2,
|
||||
test_coverage=0.8,
|
||||
refactoring_score=0.0
|
||||
)
|
||||
score_small = calculate_refactoring_score(metrics_small)
|
||||
score_large = calculate_refactoring_score(metrics_large)
|
||||
assert score_large > score_small
|
||||
assert score_large > score_small, \
|
||||
f"Large file ({score_large}) should score higher than small ({score_small})"
|
||||
print("PASS: test_refactoring_score_large_file")
|
||||
|
||||
|
||||
@@ -126,4 +239,4 @@ def run_all():
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_all()
|
||||
run_all()
|
||||
Reference in New Issue
Block a user