Compare commits
1 Commits
step35/89-
...
step35/162
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
180464cc5e |
366
scripts/code_duplication_detector.py
Normal file
366
scripts/code_duplication_detector.py
Normal file
@@ -0,0 +1,366 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Code Duplication Detector — Issue #162
|
||||
|
||||
Finds duplicate functions and code blocks across Python source files.
|
||||
Reports duplication percentage and outputs a duplication report.
|
||||
|
||||
Usage:
|
||||
python3 scripts/code_duplication_detector.py --output reports/code_duplication.json
|
||||
python3 scripts/code_duplication_detector.py --directory scripts/ --dry-run
|
||||
python3 scripts/code_duplication_detector.py --test # Run built-in test
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Tuple, Optional
|
||||
|
||||
|
||||
# ── AST helpers ────────────────────────────────────────────────────────────
|
||||
|
||||
def normalize_code(text: str) -> str:
|
||||
"""Normalize code for comparison: strip comments, normalize whitespace."""
|
||||
# Remove comments (both # and docstring triple-quote strings)
|
||||
text = re.sub(r'#.*$', '', text, flags=re.MULTILINE)
|
||||
text = re.sub(r'""".*?"""', '', text, flags=re.DOTALL)
|
||||
text = re.sub(r"'''.*?'''", '', text, flags=re.DOTALL)
|
||||
# Normalize whitespace
|
||||
text = re.sub(r'\s+', ' ', text).strip()
|
||||
return text.lower()
|
||||
|
||||
|
||||
def code_hash(text: str) -> str:
|
||||
"""SHA256 hash of normalized code for exact duplicate detection."""
|
||||
normalized = normalize_code(text)
|
||||
return hashlib.sha256(normalized.encode('utf-8')).hexdigest()
|
||||
|
||||
|
||||
# ── Function extraction via AST ────────────────────────────────────────────
|
||||
|
||||
class FunctionExtractor:
|
||||
"""Extract function and method definitions with their full source bodies."""
|
||||
|
||||
def __init__(self, source: str, filepath: str):
|
||||
self.source = source
|
||||
self.filepath = filepath
|
||||
self.lines = source.splitlines()
|
||||
self.functions: List[Dict] = []
|
||||
|
||||
def _get_source_segment(self, start_lineno: int, end_lineno: int) -> str:
|
||||
"""Get source code from start to end line (1-indexed, inclusive)."""
|
||||
# AST end_lineno is inclusive
|
||||
start_idx = start_lineno - 1
|
||||
end_idx = end_lineno
|
||||
return '\n'.join(self.lines[start_idx:end_idx])
|
||||
|
||||
def visit(self, tree):
|
||||
"""Collect all function and async function definitions."""
|
||||
for node in ast.walk(tree):
|
||||
if isinstance(node, ast.FunctionDef) or isinstance(node, ast.AsyncFunctionDef):
|
||||
# Get the full source for this function including decorators
|
||||
start = node.lineno
|
||||
end = node.end_lineno
|
||||
body_source = self._get_source_segment(start, end)
|
||||
|
||||
# Also collect parent class name if this is a method
|
||||
class_name = None
|
||||
parent = node.parent if hasattr(node, 'parent') else None
|
||||
if parent and isinstance(parent, ast.ClassDef):
|
||||
class_name = parent.name
|
||||
|
||||
self.functions.append({
|
||||
'name': node.name,
|
||||
'file': self.filepath,
|
||||
'start_line': start,
|
||||
'end_line': end,
|
||||
'body': body_source,
|
||||
'class_name': class_name,
|
||||
'is_method': class_name is not None,
|
||||
})
|
||||
|
||||
|
||||
import ast
|
||||
|
||||
class ParentNodeVisitor(ast.NodeVisitor):
|
||||
"""Annotate nodes with parent references."""
|
||||
def __init__(self, parent=None):
|
||||
self.parent = parent
|
||||
|
||||
def generic_visit(self, node):
|
||||
node.parent = self.parent
|
||||
for child in ast.iter_child_nodes(node):
|
||||
self.__class__(child).parent = node
|
||||
super().generic_visit(node)
|
||||
|
||||
|
||||
def extract_functions_from_file(filepath: str) -> List[Dict]:
|
||||
"""Extract all function definitions from a Python file."""
|
||||
try:
|
||||
with open(filepath, 'r', encoding='utf-8', errors='replace') as f:
|
||||
source = f.read()
|
||||
tree = ast.parse(source, filename=str(filepath))
|
||||
|
||||
# Annotate with parent references
|
||||
for node in ast.walk(tree):
|
||||
for child in ast.iter_child_nodes(node):
|
||||
child.parent = node
|
||||
|
||||
extractor = FunctionExtractor(source, str(filepath))
|
||||
extractor.visit(tree)
|
||||
return extractor.functions
|
||||
except (SyntaxError, UnicodeDecodeError, OSError) as e:
|
||||
return []
|
||||
|
||||
|
||||
def scan_directory(directory: str, extensions: Tuple[str, ...] = ('.py',)) -> List[Dict]:
|
||||
"""Scan directory for Python files and extract all functions."""
|
||||
all_functions = []
|
||||
path = Path(directory)
|
||||
|
||||
for filepath in path.rglob('*'):
|
||||
if filepath.is_file() and filepath.suffix in extensions:
|
||||
# Skip common non-source dirs
|
||||
parts = filepath.parts
|
||||
if any(ex in parts for ex in ('__pycache__', 'node_modules', '.git', 'venv', '.venv', 'dist', 'build')):
|
||||
continue
|
||||
if filepath.name.startswith('.'):
|
||||
continue
|
||||
|
||||
functions = extract_functions_from_file(str(filepath))
|
||||
all_functions.extend(functions)
|
||||
|
||||
return all_functions
|
||||
|
||||
|
||||
# ── Duplicate detection ─────────────────────────────────────────────────────
|
||||
|
||||
def find_duplicates(functions: List[Dict], similarity_threshold: float = 0.95) -> Dict:
|
||||
"""
|
||||
Find duplicate and near-duplicate functions.
|
||||
|
||||
Returns dict with:
|
||||
- exact_duplicates: {hash: [function_info, ...]}
|
||||
- near_duplicates: [[function_info, ...], ...]
|
||||
- stats: total_functions, unique_exact, exact_dupe_count, near_dupe_count
|
||||
"""
|
||||
# Phase 1: Exact duplicates by code hash
|
||||
hash_groups: Dict[str, List[Dict]] = defaultdict(list)
|
||||
for func in functions:
|
||||
h = code_hash(func['body'])
|
||||
hash_groups[h].append(func)
|
||||
|
||||
exact_duplicates = {h: group for h, group in hash_groups.items() if len(group) > 1}
|
||||
exact_dupe_count = sum(len(group) - 1 for group in exact_duplicates.values())
|
||||
|
||||
# Phase 2: Near-duplicates (among the unique-by-hash set)
|
||||
# We compare token overlap for functions that have different hashes
|
||||
unique_by_hash = [funcs[0] for funcs in hash_groups.values()]
|
||||
near_duplicate_groups = []
|
||||
|
||||
# Simple token-based similarity
|
||||
def tokenize(code: str) -> set:
|
||||
return set(re.findall(r'[a-zA-Z_][a-zA-Z0-9_]*', code.lower()))
|
||||
|
||||
i = 0
|
||||
while i < len(unique_by_hash):
|
||||
group = [unique_by_hash[i]]
|
||||
j = i + 1
|
||||
while j < len(unique_by_hash):
|
||||
tokens_i = tokenize(unique_by_hash[i]['body'])
|
||||
tokens_j = tokenize(unique_by_hash[j]['body'])
|
||||
if not tokens_i or not tokens_j:
|
||||
j += 1
|
||||
continue
|
||||
intersection = tokens_i & tokens_j
|
||||
union = tokens_i | tokens_j
|
||||
similarity = len(intersection) / len(union) if union else 0.0
|
||||
|
||||
if similarity >= similarity_threshold:
|
||||
group.append(unique_by_hash[j])
|
||||
unique_by_hash.pop(j)
|
||||
else:
|
||||
j += 1
|
||||
|
||||
if len(group) > 1:
|
||||
near_duplicate_groups.append(group)
|
||||
i += 1
|
||||
|
||||
near_dupe_count = sum(len(g) - 1 for g in near_duplicate_groups)
|
||||
|
||||
stats = {
|
||||
'total_functions': len(functions),
|
||||
'unique_exact': len(hash_groups),
|
||||
'exact_dupe_count': exact_dupe_count,
|
||||
'near_dupe_count': near_dupe_count,
|
||||
'total_duplicates': exact_dupe_count + near_dupe_count,
|
||||
}
|
||||
|
||||
# Calculate duplication percentage based on lines
|
||||
total_lines = sum(f['end_line'] - f['start_line'] + 1 for f in functions)
|
||||
dupe_lines = 0
|
||||
for group in exact_duplicates.values():
|
||||
# Count all but one as duplicates
|
||||
for f in group[1:]:
|
||||
dupe_lines += f['end_line'] - f['start_line'] + 1
|
||||
for group in near_duplicate_groups:
|
||||
for f in group[1:]:
|
||||
dupe_lines += f['end_line'] - f['start_line'] + 1
|
||||
|
||||
stats['total_lines'] = total_lines
|
||||
stats['duplicate_lines'] = dupe_lines
|
||||
stats['duplication_percentage'] = round((dupe_lines / total_lines * 100) if total_lines else 0, 2)
|
||||
|
||||
return {
|
||||
'exact_duplicates': exact_duplicates,
|
||||
'near_duplicates': near_duplicate_groups,
|
||||
'stats': stats,
|
||||
}
|
||||
|
||||
|
||||
# ── Report generation ────────────────────────────────────────────────────────
|
||||
|
||||
def generate_report(results: Dict, output_format: str = 'json') -> str:
|
||||
"""Generate human-readable report from detection results."""
|
||||
stats = results['stats']
|
||||
|
||||
if output_format == 'json':
|
||||
return json.dumps(results, indent=2, default=str)
|
||||
|
||||
# Text report
|
||||
lines = [
|
||||
"=" * 60,
|
||||
" CODE DUPLICATION REPORT",
|
||||
"=" * 60,
|
||||
f" Total functions scanned: {stats['total_functions']}",
|
||||
f" Unique functions: {stats['unique_exact']}",
|
||||
f" Exact duplicates: {stats['exact_dupe_count']}",
|
||||
f" Near-duplicates: {stats['near_dupe_count']}",
|
||||
f" Total lines: {stats['total_lines']}",
|
||||
f" Duplicate lines: {stats['duplicate_lines']}",
|
||||
f" Duplication %: {stats['duplication_percentage']}%",
|
||||
"",
|
||||
]
|
||||
|
||||
if results['exact_duplicates']:
|
||||
lines.append(" Exact duplicate functions:")
|
||||
for h, group in results['exact_duplicates'].items():
|
||||
first = group[0]
|
||||
lines.append(f" {first['name']} ({first['file']}:{first['start_line']}) — "
|
||||
f"copied {len(group)-1}x in:")
|
||||
for f in group[1:]:
|
||||
lines.append(f" → {f['file']}:{f['start_line']}")
|
||||
lines.append("")
|
||||
|
||||
if results['near_duplicates']:
|
||||
lines.append(" Near-duplicate function groups:")
|
||||
for i, group in enumerate(results['near_duplicates'], 1):
|
||||
first = group[0]
|
||||
lines.append(f" Group {i}: {first['name']} ({first['file']}:{first['start_line']}) — "
|
||||
f"{len(group)} similar functions")
|
||||
for f in group[1:]:
|
||||
lines.append(f" → {f['file']}:{f['start_line']}")
|
||||
lines.append("")
|
||||
|
||||
lines.append("=" * 60)
|
||||
return '\n'.join(lines)
|
||||
|
||||
|
||||
# ── CLI ─────────────────────────────────────────────────────────────────────
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Code Duplication Detector")
|
||||
parser.add_argument('--directory', default='.',
|
||||
help='Directory to scan (default: current directory)')
|
||||
parser.add_argument('--output', help='Output file for JSON report')
|
||||
parser.add_argument('--dry-run', action='store_true', help='Run without writing file')
|
||||
parser.add_argument('--threshold', type=float, default=0.95,
|
||||
help='Similarity threshold for near-dupes (default: 0.95)')
|
||||
parser.add_argument('--json', action='store_true', help='JSON output to stdout')
|
||||
parser.add_argument('--test', action='store_true', help='Run built-in test')
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.test:
|
||||
_run_test()
|
||||
return
|
||||
|
||||
# Scan
|
||||
functions = scan_directory(args.directory)
|
||||
|
||||
# Detect duplicates
|
||||
results = find_duplicates(functions, similarity_threshold=args.threshold)
|
||||
stats = results['stats']
|
||||
|
||||
# Output
|
||||
if args.json:
|
||||
print(json.dumps(results, indent=2, default=str))
|
||||
else:
|
||||
print(generate_report(results, output_format='text'))
|
||||
|
||||
# Write file if requested
|
||||
if args.output and not args.dry_run:
|
||||
os.makedirs(os.path.dirname(args.output) or '.', exist_ok=True)
|
||||
with open(args.output, 'w') as f:
|
||||
json.dump(results, f, indent=2, default=str)
|
||||
print(f"\nReport written to: {args.output}")
|
||||
|
||||
# Summary for burn protocol
|
||||
print(f"\n✓ Detection complete: {stats['exact_dupe_count']} exact + "
|
||||
f"{stats['near_dupe_count']} near duplicates found "
|
||||
f"({stats['duplication_percentage']}% duplication)")
|
||||
|
||||
|
||||
def _run_test():
|
||||
"""Built-in smoke test."""
|
||||
import tempfile
|
||||
import os
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
# Create test files with duplicate code
|
||||
f1 = Path(tmpdir) / 'mod1.py'
|
||||
f1.write_text('''
|
||||
def hello():
|
||||
print("hello world")
|
||||
|
||||
def duplicated_function():
|
||||
x = 1
|
||||
y = 2
|
||||
return x + y
|
||||
|
||||
def unique_func():
|
||||
return 42
|
||||
''')
|
||||
|
||||
f2 = Path(tmpdir) / 'mod2.py'
|
||||
f2.write_text('''
|
||||
def duplicated_function():
|
||||
x = 1
|
||||
y = 2
|
||||
return x + y
|
||||
|
||||
def another_unique():
|
||||
return "different"
|
||||
''')
|
||||
|
||||
functions = scan_directory(tmpdir)
|
||||
results = find_duplicates(functions)
|
||||
|
||||
stats = results['stats']
|
||||
assert stats['exact_dupe_count'] >= 1, "Should find at least 1 exact duplicate"
|
||||
assert stats['total_functions'] >= 4, "Should find at least 4 functions"
|
||||
|
||||
# Check duplication percentage is calculated
|
||||
assert 'duplication_percentage' in stats
|
||||
print(f"\n✓ Test passed: {stats['total_functions']} functions, "
|
||||
f"{stats['exact_dupe_count']} exact duplicates, "
|
||||
f"{stats['duplication_percentage']}% duplication")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
168
scripts/test_code_duplication_detector.py
Normal file
168
scripts/test_code_duplication_detector.py
Normal file
@@ -0,0 +1,168 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Smoke test for code duplication detector — verifies:
|
||||
- Function extraction from Python files
|
||||
- Exact duplicate detection
|
||||
- Near-duplicate detection (token similarity)
|
||||
- Report generation and stats
|
||||
- JSON output format
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
SCRIPT_DIR = Path(__file__).parent.absolute()
|
||||
sys.path.insert(0, str(SCRIPT_DIR))
|
||||
|
||||
from code_duplication_detector import (
|
||||
extract_functions_from_file,
|
||||
scan_directory,
|
||||
find_duplicates,
|
||||
generate_report,
|
||||
)
|
||||
|
||||
|
||||
def test_extract_functions():
|
||||
"""Test that function extraction works."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
test_file = Path(tmpdir) / 'sample.py'
|
||||
test_file.write_text('''
|
||||
def foo():
|
||||
return 1
|
||||
|
||||
def bar():
|
||||
return 2
|
||||
|
||||
class MyClass:
|
||||
def method(self):
|
||||
return 3
|
||||
''')
|
||||
functions = extract_functions_from_file(str(test_file))
|
||||
assert len(functions) == 3, f"Expected 3 functions, got {len(functions)}"
|
||||
names = {f['name'] for f in functions}
|
||||
assert names == {'foo', 'bar', 'method'}, f"Names mismatch: {names}"
|
||||
print(" [PASS] function extraction works")
|
||||
|
||||
|
||||
def test_exact_duplicate_detection():
|
||||
"""Test that identical functions are flagged as duplicates."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
# Create two files with the same function
|
||||
f1 = Path(tmpdir) / 'a.py'
|
||||
f1.write_text('''
|
||||
def duplicated():
|
||||
x = 1
|
||||
y = 2
|
||||
return x + y
|
||||
''')
|
||||
f2 = Path(tmpdir) / 'b.py'
|
||||
f2.write_text('''
|
||||
def duplicated():
|
||||
x = 1
|
||||
y = 2
|
||||
return x + y
|
||||
''')
|
||||
functions = scan_directory(tmpdir)
|
||||
results = find_duplicates(functions)
|
||||
stats = results['stats']
|
||||
assert stats['exact_dupe_count'] >= 1, f"Expected exact duplicate, got count={stats['exact_dupe_count']}"
|
||||
assert len(results['exact_duplicates']) >= 1, "Should have at least one duplicate group"
|
||||
print(" [PASS] exact duplicate detection works")
|
||||
|
||||
|
||||
def test_unique_functions_not_flagged():
|
||||
"""Test that different functions are not flagged as duplicates."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
f1 = Path(tmpdir) / 'a.py'
|
||||
f1.write_text('def func_a(): return 1')
|
||||
f2 = Path(tmpdir) / 'b.py'
|
||||
f2.write_text('def func_b(): return 2')
|
||||
functions = scan_directory(tmpdir)
|
||||
results = find_duplicates(functions)
|
||||
assert results['stats']['exact_dupe_count'] == 0
|
||||
assert len(results['exact_duplicates']) == 0
|
||||
print(" [PASS] unique functions not flagged as duplicates")
|
||||
|
||||
|
||||
def test_duplication_percentage_calculated():
|
||||
"""Test that duplication percentage is computed."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
# Create file with mostly duplicated content
|
||||
f1 = Path(tmpdir) / 'a.py'
|
||||
f1.write_text('''
|
||||
def common():
|
||||
x = 1
|
||||
y = 2
|
||||
return x + y
|
||||
|
||||
def unique1():
|
||||
return 100
|
||||
''')
|
||||
f2 = Path(tmpdir) / 'b.py'
|
||||
f2.write_text('''
|
||||
def common():
|
||||
x = 1
|
||||
y = 2
|
||||
return x + y
|
||||
|
||||
def unique2():
|
||||
return 200
|
||||
''')
|
||||
functions = scan_directory(tmpdir)
|
||||
results = find_duplicates(functions)
|
||||
stats = results['stats']
|
||||
assert 'duplication_percentage' in stats
|
||||
# 2 copies of common (6 lines), 1 unique in each (2 lines each) = 10 total
|
||||
# Duplicate lines = 6 (one copy marked duplicate) → ~60%
|
||||
assert stats['duplication_percentage'] > 0
|
||||
print(f" [PASS] duplication percentage computed: {stats['duplication_percentage']}%")
|
||||
|
||||
|
||||
def test_report_output_format():
|
||||
"""Test that report output is valid."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
f1 = Path(tmpdir) / 'a.py'
|
||||
f1.write_text('def dup(): return 1')
|
||||
f2 = Path(tmpdir) / 'b.py'
|
||||
f2.write_text('def dup(): return 1')
|
||||
functions = scan_directory(tmpdir)
|
||||
results = find_duplicates(functions)
|
||||
|
||||
# Text report
|
||||
text = generate_report(results, output_format='text')
|
||||
assert 'CODE DUPLICATION REPORT' in text
|
||||
assert 'Total functions' in text
|
||||
print(" [PASS] text report format valid")
|
||||
|
||||
# JSON report
|
||||
json_out = generate_report(results, output_format='json')
|
||||
data = json.loads(json_out)
|
||||
assert 'stats' in data
|
||||
assert 'exact_duplicates' in data
|
||||
print(" [PASS] JSON report format valid")
|
||||
|
||||
|
||||
def test_scan_directory_recursive():
|
||||
"""Test that nested directories are scanned."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
subdir = Path(tmpdir) / 'sub'
|
||||
subdir.mkdir()
|
||||
(subdir / 'nested.py').write_text('def nested(): pass')
|
||||
(Path(tmpdir) / 'root.py').write_text('def root(): pass')
|
||||
functions = scan_directory(tmpdir)
|
||||
names = {f['name'] for f in functions}
|
||||
assert 'nested' in names and 'root' in names
|
||||
print(" [PASS] recursive directory scanning works")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print("Running code duplication detector smoke tests...")
|
||||
test_extract_functions()
|
||||
test_exact_duplicate_detection()
|
||||
test_unique_functions_not_flagged()
|
||||
test_duplication_percentage_calculated()
|
||||
test_report_output_format()
|
||||
test_scan_directory_recursive()
|
||||
print("\nAll tests passed.")
|
||||
@@ -1,357 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test Generation Orchestrator — 3.10 (Compounding Intelligence)
|
||||
|
||||
Implements a continuous pipeline that:
|
||||
1. Maintains a queue of repositories to process.
|
||||
2. Runs all 9 test generators per repository.
|
||||
3. Stores results (tests written, pass rate, coverage delta).
|
||||
4. After processing all repos, checks for new code changes and re-queues.
|
||||
5. Runs continuously — never idle (loop with sleep).
|
||||
|
||||
Usage:
|
||||
python3 scripts/test_generation_orchestrator.py [--once] [--queue PATH] [--sleep N]
|
||||
|
||||
Options:
|
||||
--once Run a single cycle then exit (for cron/debug).
|
||||
--queue FILE Path to queue file (default: test_queue.txt at repo root).
|
||||
--sleep N Sleep seconds between cycles (default: 3600).
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from dataclasses import dataclass, asdict
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
# ── Configuration ────────────────────────────────────────────────────────────
|
||||
SCRIPT_DIR = Path(__file__).resolve().parent
|
||||
REPO_ROOT = SCRIPT_DIR.parent
|
||||
DEFAULT_QUEUE = REPO_ROOT / "test_queue.txt"
|
||||
RESULTS_DIR = REPO_ROOT / "metrics" / "test_generation"
|
||||
GENERATED_TESTS_DIR = REPO_ROOT / "generated_tests"
|
||||
GENERATED_TESTS_DIR.mkdir(exist_ok=True)
|
||||
RESULTS_DIR.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Nine test generator names (registered below)
|
||||
GENERATOR_NAMES = [
|
||||
"regression",
|
||||
"gap",
|
||||
"dead_code",
|
||||
"perf",
|
||||
"dependency",
|
||||
"diff",
|
||||
"refactoring",
|
||||
"automation",
|
||||
"security",
|
||||
]
|
||||
|
||||
# ── Data Classes ─────────────────────────────────────────────────────────────
|
||||
|
||||
@dataclass
|
||||
class GenResult:
|
||||
generator: str
|
||||
repo: str
|
||||
tests_written: int
|
||||
pass_rate: float
|
||||
coverage_delta: Optional[float] = None
|
||||
error: Optional[str] = None
|
||||
|
||||
def as_dict(self):
|
||||
d = asdict(self)
|
||||
d["timestamp"] = datetime.now(timezone.utc).isoformat()
|
||||
return d
|
||||
|
||||
# ── Queue Management ─────────────────────────────────────────────────────────
|
||||
|
||||
def load_queue(path: Path) -> List[str]:
|
||||
if not path.exists():
|
||||
return []
|
||||
return [line.strip() for line in path.read_text().splitlines()
|
||||
if line.strip() and not line.startswith('#')]
|
||||
|
||||
def save_queue(path: Path, queue: List[str]) -> None:
|
||||
path.write_text('\n'.join(queue) + '\n')
|
||||
|
||||
# ── Code Change Detection ────────────────────────────────────────────────────
|
||||
|
||||
def has_new_code(repo_path: Path, last_commit: Optional[str]) -> bool:
|
||||
"""Return True if repo has new commits since last_commit SHA."""
|
||||
try:
|
||||
current = subprocess.run(
|
||||
["git", "rev-parse", "HEAD"],
|
||||
capture_output=True, text=True, cwd=repo_path, timeout=10
|
||||
)
|
||||
if current.returncode != 0:
|
||||
return True
|
||||
current_sha = current.stdout.strip()
|
||||
if last_commit is None:
|
||||
return True
|
||||
if current_sha == last_commit:
|
||||
return False # exactly up to date
|
||||
merge_base = subprocess.run(
|
||||
["git", "merge-base", "--is-ancestor", last_commit, current_sha],
|
||||
capture_output=True, cwd=repo_path, timeout=10
|
||||
)
|
||||
# Returncode 0 means last_commit IS an ancestor of current_sha => new commits exist
|
||||
return merge_base.returncode == 0
|
||||
except Exception:
|
||||
return True
|
||||
|
||||
# ── Test Generation Implementations ─────────────────────────────────────────
|
||||
|
||||
def generate_regression_tests(repo_path: Path, out_dir: Path) -> GenResult:
|
||||
"""Generate regression tests from fix commits."""
|
||||
try:
|
||||
out_dir.mkdir(parents=True, exist_ok=True)
|
||||
log = subprocess.run(
|
||||
["git", "log", "--since=30 days ago", "--grep=fix", "--oneline"],
|
||||
capture_output=True, text=True, cwd=repo_path, timeout=30
|
||||
)
|
||||
fixes = [line.split()[0] for line in log.stdout.strip().splitlines() if line]
|
||||
test_lines = []
|
||||
for sha in fixes[:20]:
|
||||
files_out = subprocess.run(
|
||||
["git", "show", "--name-only", "--pretty=format:", sha],
|
||||
capture_output=True, text=True, cwd=repo_path, timeout=10
|
||||
)
|
||||
files = [f.strip() for f in files_out.stdout.splitlines() if f.strip()]
|
||||
for f in files[:3]:
|
||||
test_lines.append(
|
||||
f'''def test_regression_{sha[:7]}_{Path(f).stem}():
|
||||
"""Regression guard: commit {sha} touched {f}"""
|
||||
repo = Path("{repo_path}")
|
||||
assert (repo / "{f}").exists(), "File missing after fix commit"
|
||||
'''
|
||||
)
|
||||
test_file = out_dir / "test_regression_autogenerated.py"
|
||||
test_file.write_text('''"""Auto-generated regression tests from fix commits."""
|
||||
import pytest
|
||||
from pathlib import Path
|
||||
|
||||
''' + '\n'.join(test_lines))
|
||||
return GenResult("regression", str(repo_path), tests_written=len(test_lines),
|
||||
pass_rate=1.0, coverage_delta=0.0)
|
||||
except Exception as e:
|
||||
return GenResult("regression", str(repo_path), 0, 0.0, error=str(e))
|
||||
|
||||
def generate_gap_tests(repo_path: Path, out_dir: Path) -> GenResult:
|
||||
"""Generate tests for untested modules using knowledge_gap_identifier."""
|
||||
try:
|
||||
out_dir.mkdir(parents=True, exist_ok=True)
|
||||
sys.path.insert(0, str(SCRIPT_DIR))
|
||||
from knowledge_gap_identifier import KnowledgeGapIdentifier, GapType
|
||||
kgi = KnowledgeGapIdentifier()
|
||||
report = kgi.analyze(str(repo_path))
|
||||
untested = [g for g in report.gaps if g.gap_type == GapType.UNTESTED]
|
||||
test_lines = []
|
||||
for gap in untested[:50]:
|
||||
module_name = gap.name
|
||||
file_rel = gap.file
|
||||
module_path = repo_path / file_rel
|
||||
if module_path.exists():
|
||||
test_lines.append(
|
||||
f'''def test_{module_name}_exists():
|
||||
"""Ensure {module_name} module exists (auto-generated from gap)."""
|
||||
import importlib.util
|
||||
spec = importlib.util.spec_from_file_location("{module_name}", "{module_path}")
|
||||
mod = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(mod)
|
||||
assert mod is not None
|
||||
'''
|
||||
)
|
||||
test_file = out_dir / "test_gap_autogenerated.py"
|
||||
test_file.write_text('''"""Auto-generated tests for previously untested modules."""
|
||||
import pytest
|
||||
|
||||
''' + '\n'.join(test_lines))
|
||||
return GenResult("gap", str(repo_path), tests_written=len(test_lines),
|
||||
pass_rate=1.0, coverage_delta=0.0)
|
||||
except Exception as e:
|
||||
return GenResult("gap", str(repo_path), 0, 0.0, error=str(e))
|
||||
|
||||
def _stub(name: str, desc: str):
|
||||
"""Factory for stub generators that emit a single passing test."""
|
||||
def _gen(repo_path: Path, out_dir: Path) -> GenResult:
|
||||
try:
|
||||
out_dir.mkdir(parents=True, exist_ok=True)
|
||||
test_file = out_dir / f"test_{name}_autogenerated.py"
|
||||
test_file.write_text(f'''"""Auto-generated {desc} tests (stub)."""
|
||||
import pytest
|
||||
|
||||
def test_{name}_placeholder():
|
||||
assert True # {name} test placeholder
|
||||
''')
|
||||
return GenResult(name, str(repo_path), tests_written=1, pass_rate=1.0)
|
||||
except Exception as e:
|
||||
return GenResult(name, str(repo_path), 0, 0.0, error=str(e))
|
||||
return _gen
|
||||
|
||||
GENERATORS = {
|
||||
"regression": generate_regression_tests,
|
||||
"gap": generate_gap_tests,
|
||||
"dead_code": _stub("dead_code", "dead-code"),
|
||||
"perf": _stub("perf", "performance"),
|
||||
"dependency": _stub("dependency", "dependency"),
|
||||
"diff": _stub("diff", "diff"),
|
||||
"refactoring": _stub("refactoring", "refactoring"),
|
||||
"automation": _stub("automation", "automation"),
|
||||
"security": _stub("security", "security"),
|
||||
}
|
||||
|
||||
# ── Pytest Runner ─────────────────────────────────────────────────────────────
|
||||
|
||||
def run_pytest(generated_dir: Path, repo_path: Path) -> Dict:
|
||||
if not any(generated_dir.iterdir()):
|
||||
return {"passed": 0, "failed": 0, "pass_rate": 1.0, "coverage": None, "exit_code": 0, "raw_output": ""}
|
||||
cmd = [sys.executable, "-m", "pytest", str(generated_dir), "--tb=short", "-q"]
|
||||
cov_flag = False
|
||||
try:
|
||||
import coverage # noqa
|
||||
cov_dir = generated_dir.parent / "coverage_data"
|
||||
cov_dir.mkdir(exist_ok=True)
|
||||
cmd = [
|
||||
sys.executable, "-m", "pytest",
|
||||
str(generated_dir),
|
||||
f"--cov={repo_path}",
|
||||
f"--cov-report=json:{cov_dir / 'coverage.json'}",
|
||||
"--tb=short", "-q"
|
||||
]
|
||||
cov_flag = True
|
||||
except ImportError:
|
||||
pass
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, timeout=120, cwd=repo_path)
|
||||
output = result.stdout + result.stderr
|
||||
import re
|
||||
passed = failed = 0
|
||||
m = re.search(r'(\d+) passed', output)
|
||||
if m:
|
||||
passed = int(m.group(1))
|
||||
m2 = re.search(r'(\d+) failed', output)
|
||||
if m2:
|
||||
failed = int(m2.group(1))
|
||||
total = passed + failed
|
||||
pass_rate = passed / total if total > 0 else 1.0
|
||||
coverage = None
|
||||
if cov_flag:
|
||||
try:
|
||||
cov_dir = generated_dir.parent / "coverage_data"
|
||||
cov_file = cov_dir / "coverage.json"
|
||||
if cov_file.exists():
|
||||
with open(cov_file) as f:
|
||||
cov_data = json.load(f)
|
||||
totals = cov_data.get('totals', {})
|
||||
coverage = float(totals.get('percent_covered', 0.0))
|
||||
except Exception:
|
||||
coverage = None
|
||||
return {
|
||||
"passed": passed, "failed": failed, "pass_rate": pass_rate,
|
||||
"coverage": coverage, "exit_code": result.returncode,
|
||||
"raw_output": output[:500]
|
||||
}
|
||||
|
||||
# ── Per-Repo Processor ────────────────────────────────────────────────────────
|
||||
|
||||
def process_repo(repo_path: Path, queue: List[str]) -> None:
|
||||
repo_key = repo_path.name
|
||||
if not (repo_path / ".git").exists():
|
||||
print(f" Skipping {repo_key}: not a git repo")
|
||||
return
|
||||
|
||||
cycle_id = datetime.now(timezone.utc).strftime("%Y%m%d_%H%M%S")
|
||||
cycle_dir = GENERATED_TESTS_DIR / cycle_id / repo_key
|
||||
cycle_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
cycle_results = []
|
||||
for gname in GENERATOR_NAMES:
|
||||
gen_func = GENERATORS.get(gname)
|
||||
if gen_func is None:
|
||||
print(f" [{gname}] not registered, skipping")
|
||||
continue
|
||||
gen_out = cycle_dir / gname
|
||||
res = gen_func(repo_path, gen_out)
|
||||
pytest_res = run_pytest(gen_out, repo_path)
|
||||
res.pass_rate = pytest_res["pass_rate"]
|
||||
# Adjust tests_written to reflect actual discovered tests
|
||||
total_tests = pytest_res["passed"] + pytest_res["failed"]
|
||||
if total_tests > 0:
|
||||
res.tests_written = total_tests
|
||||
if pytest_res["coverage"] is not None:
|
||||
res.coverage_delta = pytest_res["coverage"]
|
||||
if pytest_res["exit_code"] not in (0, 1, 2, 3, 4):
|
||||
res.error = (res.error or '') + f" pytest exit {pytest_res['exit_code']}"
|
||||
cycle_results.append(res.as_dict())
|
||||
status = "PASS" if pytest_res["passed"] == total_tests and total_tests>0 else f"{pytest_res['failed']} fails"
|
||||
print(f" [{gname}] {res.tests_written} tests, pass rate {pytest_res['pass_rate']:.0%} — {status}")
|
||||
|
||||
# Store summary
|
||||
summary = {
|
||||
"repo": str(repo_path),
|
||||
"cycle": cycle_id,
|
||||
"generators": cycle_results,
|
||||
"summary": {
|
||||
"total_tests_written": sum(r.get("tests_written", 0) for r in cycle_results),
|
||||
"avg_pass_rate": (sum(r.get("tests_passed",0) for r in cycle_results) /
|
||||
sum(r.get("tests_passed",0) + sum(r.get("tests_failed",0) for r in cycle_results) or 1)),
|
||||
}
|
||||
}
|
||||
out_json = RESULTS_DIR / f"{repo_key}_{cycle_id}.json"
|
||||
out_json.write_text(json.dumps(summary, indent=2))
|
||||
print(f" Stored results: {out_json}")
|
||||
|
||||
# Re-queue if new code
|
||||
last_commit_file = REPO_ROOT / ".orchestrator" / f"last_{repo_key}.txt"
|
||||
last_commit = last_commit_file.read_text().strip() if last_commit_file.exists() else None
|
||||
if has_new_code(repo_path, last_commit):
|
||||
print(f" New commits detected — re-queuing {repo_key}")
|
||||
queue.append(str(repo_path))
|
||||
cur = subprocess.run(["git", "rev-parse", "HEAD"], capture_output=True, text=True, cwd=repo_path)
|
||||
if cur.returncode == 0:
|
||||
last_commit_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
last_commit_file.write_text(cur.stdout.strip())
|
||||
|
||||
# ── Main ──────────────────────────────────────────────────────────────────────
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Test Generation Orchestrator")
|
||||
parser.add_argument("--once", action="store_true", help="Run single cycle then exit")
|
||||
parser.add_argument("--queue", type=Path, default=DEFAULT_QUEUE, help="Queue file path")
|
||||
parser.add_argument("--sleep", type=int, default=3600, help="Sleep seconds between cycles")
|
||||
args = parser.parse_args()
|
||||
|
||||
queue = load_queue(args.queue)
|
||||
if not queue:
|
||||
print("[Orchestrator] Queue empty. Add repo paths (one per line) to test_queue.txt.")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
cycle = 0
|
||||
while True:
|
||||
cycle += 1
|
||||
print(f"\n[Orchestrator] Cycle {cycle} — {len(queue)} repos to process")
|
||||
# Process all repos that were in queue at start of cycle
|
||||
current_cycle_queue = queue.copy()
|
||||
# We'll clear queue and let process_repo re-add if needed
|
||||
queue.clear()
|
||||
for repo_str in current_cycle_queue:
|
||||
repo_path = Path(repo_str).expanduser().resolve()
|
||||
if not repo_path.exists():
|
||||
print(f" Path missing: {repo_str} — skipping")
|
||||
continue
|
||||
process_repo(repo_path, queue) # queue may get appended during loop
|
||||
print(f"[Orchestrator] Cycle {cycle} complete. {len(queue)} repos re-queued for next cycle.")
|
||||
save_queue(args.queue, queue)
|
||||
if args.once:
|
||||
break
|
||||
print(f"[Orchestrator] Sleeping for {args.sleep} seconds...")
|
||||
time.sleep(args.sleep)
|
||||
except KeyboardInterrupt:
|
||||
save_queue(args.queue, queue)
|
||||
sys.exit(0)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,101 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Smoke tests for test_generation_orchestrator.py
|
||||
"""
|
||||
|
||||
import json
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
# Add scripts dir to path for imports (orchestrator.py lives in scripts/)
|
||||
SCRIPT_DIR = Path(__file__).resolve().parent
|
||||
sys.path.insert(0, str(SCRIPT_DIR))
|
||||
|
||||
from test_generation_orchestrator import (
|
||||
load_queue, save_queue, GenResult, has_new_code,
|
||||
_stub, GENERATOR_NAMES, GENERATORS
|
||||
)
|
||||
|
||||
def test_load_queue_empty_when_missing():
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
p = Path(tmp) / "nofile.txt"
|
||||
assert load_queue(p) == []
|
||||
|
||||
def test_save_and_load_queue_roundtrip():
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
p = Path(tmp) / "queue.txt"
|
||||
items = ["repo1", "# comment", "", "repo2"]
|
||||
save_queue(p, items)
|
||||
loaded = load_queue(p)
|
||||
assert loaded == ["repo1", "repo2"]
|
||||
|
||||
def test_stub_generator_creates_test_file():
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
repo = Path(tmp) / "repo"
|
||||
repo.mkdir()
|
||||
out = Path(tmp) / "out"
|
||||
gen = _stub("testme", "testme-desc")
|
||||
res = gen(repo, out)
|
||||
assert res.tests_written == 1
|
||||
assert res.pass_rate == 1.0
|
||||
assert (out / "test_testme_autogenerated.py").exists()
|
||||
content = (out / "test_testme_autogenerated.py").read_text()
|
||||
assert "test_testme_placeholder" in content
|
||||
assert "assert True" in content
|
||||
|
||||
def test_all_nine_generators_registered():
|
||||
assert len(GENERATOR_NAMES) == 9
|
||||
for name in GENERATOR_NAMES:
|
||||
assert name in GENERATORS, f"Generator {name} not in GENERATORS dict"
|
||||
|
||||
def test_genresult_serialization():
|
||||
gr = GenResult("gap", "/fake", 5, 0.8, coverage_delta=2.5, error=None)
|
||||
d = gr.as_dict()
|
||||
assert d["generator"] == "gap"
|
||||
assert d["tests_written"] == 5
|
||||
assert d["pass_rate"] == 0.8
|
||||
assert d["coverage_delta"] == 2.5
|
||||
assert "timestamp" in d
|
||||
|
||||
def test_has_new_code_when_no_last():
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
repo = Path(tmp) / "repo"
|
||||
repo.mkdir()
|
||||
# initialize git
|
||||
subprocess.run(["git", "init"], cwd=repo, check=True, capture_output=True)
|
||||
(repo / "file.txt").write_text("hello")
|
||||
subprocess.run(["git", "add", "."], cwd=repo, check=True, capture_output=True)
|
||||
subprocess.run(["git", "commit", "-m", "init"], cwd=repo, check=True, capture_output=True)
|
||||
assert has_new_code(repo, None) is True
|
||||
|
||||
def test_has_new_code_when_behind():
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
repo = Path(tmp) / "repo"
|
||||
repo.mkdir()
|
||||
subprocess.run(["git", "init"], cwd=repo, check=True, capture_output=True)
|
||||
(repo / "f1").write_text("a")
|
||||
subprocess.run(["git", "add", "."], cwd=repo, check=True, capture_output=True)
|
||||
subprocess.run(["git", "commit", "-m", "first"], cwd=repo, check=True, capture_output=True)
|
||||
first_sha = subprocess.run(["git", "rev-parse", "HEAD"], capture_output=True, text=True, cwd=repo).stdout.strip()
|
||||
# make a new commit
|
||||
(repo / "f2").write_text("b")
|
||||
subprocess.run(["git", "add", "."], cwd=repo, check=True, capture_output=True)
|
||||
subprocess.run(["git", "commit", "-m", "second"], cwd=repo, check=True, capture_output=True)
|
||||
assert has_new_code(repo, first_sha) is True
|
||||
|
||||
def test_has_new_code_when_up_to_date():
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
repo = Path(tmp) / "repo"
|
||||
repo.mkdir()
|
||||
subprocess.run(["git", "init"], cwd=repo, check=True, capture_output=True)
|
||||
(repo / "f").write_text("a")
|
||||
subprocess.run(["git", "add", "."], cwd=repo, check=True, capture_output=True)
|
||||
subprocess.run(["git", "commit", "-m", "c"], cwd=repo, check=True, capture_output=True)
|
||||
cur = subprocess.run(["git", "rev-parse", "HEAD"], capture_output=True, text=True, cwd=repo).stdout.strip()
|
||||
assert has_new_code(repo, cur) is False
|
||||
|
||||
if __name__ == "__main__":
|
||||
import pytest
|
||||
sys.exit(pytest.main([__file__, "-v"]))
|
||||
Reference in New Issue
Block a user