Compare commits
2 Commits
burn/169-1
...
feat/177-i
| Author | SHA1 | Date | |
|---|---|---|---|
| 54f3bef7fc | |||
| 4fcd372de4 |
131
scripts/gitea_issue_parser.py
Normal file
131
scripts/gitea_issue_parser.py
Normal file
@@ -0,0 +1,131 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Gitea Issue Body Parser — Extract structured data from markdown issue bodies.
|
||||
|
||||
Usage:
|
||||
cat issue_body.txt | python3 scripts/gitea_issue_parser.py --stdin --pretty
|
||||
python3 scripts/gitea_issue_parser.py --url https://forge.../api/v1/repos/.../issues/123 --pretty
|
||||
python3 scripts/gitea_issue_parser.py body.txt --title "Fix thing (#42)" --labels pipeline extraction
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
from typing import Dict, List, Any, Optional
|
||||
|
||||
|
||||
def parse_issue_body(body: str, title: str = "", labels: List[str] = None) -> Dict[str, Any]:
|
||||
"""Parse a Gitea issue markdown body into structured JSON.
|
||||
|
||||
Extracted fields:
|
||||
- title: Issue title
|
||||
- context: Background/description section
|
||||
- criteria[]: Acceptance criteria (checkboxes or numbered lists)
|
||||
- labels[]: Issue labels
|
||||
- epic_ref: Parent/epic issue reference (from "Closes #N" or title)
|
||||
- sections{}: All ## sections as key-value pairs
|
||||
"""
|
||||
result = {
|
||||
"title": title,
|
||||
"context": "",
|
||||
"criteria": [],
|
||||
"labels": labels or [],
|
||||
"epic_ref": None,
|
||||
"sections": {},
|
||||
}
|
||||
|
||||
if not body:
|
||||
return result
|
||||
|
||||
# Extract epic reference from title or body
|
||||
epic_patterns = [
|
||||
r"(?:closes|fixes|addresses|refs?)\s+#(\d+)",
|
||||
r"#(\d+)",
|
||||
]
|
||||
for pattern in epic_patterns:
|
||||
match = re.search(pattern, (title + " " + body).lower())
|
||||
if match:
|
||||
result["epic_ref"] = int(match.group(1))
|
||||
break
|
||||
|
||||
# Parse ## sections
|
||||
section_pattern = r"^##\s+(.+?)$\n((?:^(?!##\s).*$\n?)*)"
|
||||
for match in re.finditer(section_pattern, body, re.MULTILINE):
|
||||
section_name = match.group(1).strip().lower().replace(" ", "_")
|
||||
section_content = match.group(2).strip()
|
||||
result["sections"][section_name] = section_content
|
||||
|
||||
# Extract acceptance criteria (checkboxes)
|
||||
checkbox_pattern = r"^\s*-\s*\[([ xX])\]\s*(.+)$"
|
||||
for match in re.finditer(checkbox_pattern, body, re.MULTILINE):
|
||||
checked = match.group(1).lower() == "x"
|
||||
text = match.group(2).strip()
|
||||
result["criteria"].append({"text": text, "checked": checked})
|
||||
|
||||
# If no checkboxes, try numbered lists in "Acceptance Criteria" or "Criteria" section
|
||||
if not result["criteria"]:
|
||||
for section_name in ["acceptance_criteria", "criteria", "acceptance criteria"]:
|
||||
if section_name in result["sections"]:
|
||||
numbered = r"^\s*\d+\.\s*(.+)$"
|
||||
for match in re.finditer(numbered, result["sections"][section_name], re.MULTILINE):
|
||||
result["criteria"].append({"text": match.group(1).strip(), "checked": False})
|
||||
break
|
||||
|
||||
# Extract context (first section or first paragraph before any ## heading)
|
||||
first_heading = body.find("## ")
|
||||
if first_heading > 0:
|
||||
context_text = body[:first_heading].strip()
|
||||
else:
|
||||
context_text = body.split("\n\n")[0].strip()
|
||||
# Clean up: remove "## Context" or "## Problem" header if present
|
||||
context_text = re.sub(r"^#+\s*\w+\s*\n?", "", context_text).strip()
|
||||
result["context"] = context_text[:500] # Cap at 500 chars
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def fetch_issue_from_url(url: str) -> Dict[str, Any]:
|
||||
"""Fetch an issue from a Gitea API URL and parse it."""
|
||||
import urllib.request
|
||||
req = urllib.request.Request(url, headers={"Accept": "application/json"})
|
||||
with urllib.request.urlopen(req) as resp:
|
||||
data = json.loads(resp.read())
|
||||
|
||||
return parse_issue_body(
|
||||
body=data.get("body", ""),
|
||||
title=data.get("title", ""),
|
||||
labels=[l["name"] for l in data.get("labels", [])]
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Parse Gitea issue markdown into structured JSON")
|
||||
parser.add_argument("file", nargs="?", help="Issue body file (or use --stdin)")
|
||||
parser.add_argument("--stdin", action="store_true", help="Read from stdin")
|
||||
parser.add_argument("--url", help="Gitea API URL to fetch issue from")
|
||||
parser.add_argument("--title", default="", help="Issue title")
|
||||
parser.add_argument("--labels", nargs="*", default=[], help="Issue labels")
|
||||
parser.add_argument("--pretty", action="store_true", help="Pretty-print JSON output")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.url:
|
||||
result = fetch_issue_from_url(args.url)
|
||||
elif args.stdin:
|
||||
body = sys.stdin.read()
|
||||
result = parse_issue_body(body, args.title, args.labels)
|
||||
elif args.file:
|
||||
with open(args.file) as f:
|
||||
body = f.read()
|
||||
result = parse_issue_body(body, args.title, args.labels)
|
||||
else:
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
indent = 2 if args.pretty else None
|
||||
print(json.dumps(result, indent=indent))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,131 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Knowledge Store Staleness Detector — Detect stale knowledge entries by comparing source file hashes.
|
||||
|
||||
Usage:
|
||||
python3 scripts/knowledge_staleness_check.py --index knowledge/index.json
|
||||
python3 scripts/knowledge_staleness_check.py --index knowledge/index.json --json
|
||||
python3 scripts/knowledge_staleness_check.py --index knowledge/index.json --fix
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Any, Optional
|
||||
|
||||
|
||||
def compute_file_hash(filepath: str) -> Optional[str]:
|
||||
"""Compute SHA-256 hash of a file. Returns None if file doesn't exist."""
|
||||
try:
|
||||
with open(filepath, "rb") as f:
|
||||
return "sha256:" + hashlib.sha256(f.read()).hexdigest()
|
||||
except (FileNotFoundError, IsADirectoryError, PermissionError):
|
||||
return None
|
||||
|
||||
|
||||
def check_staleness(index_path: str, repo_root: str = ".") -> List[Dict[str, Any]]:
|
||||
"""Check all entries in knowledge index for staleness.
|
||||
|
||||
Returns list of entries with staleness info:
|
||||
- status: "fresh" | "stale" | "missing_source" | "no_hash"
|
||||
- current_hash: computed hash (if source exists)
|
||||
- stored_hash: hash from index
|
||||
"""
|
||||
with open(index_path) as f:
|
||||
data = json.load(f)
|
||||
|
||||
facts = data.get("facts", [])
|
||||
results = []
|
||||
|
||||
for entry in facts:
|
||||
source_file = entry.get("source_file")
|
||||
stored_hash = entry.get("source_hash")
|
||||
|
||||
if not source_file:
|
||||
results.append({**entry, "status": "no_source", "current_hash": None})
|
||||
continue
|
||||
|
||||
full_path = os.path.join(repo_root, source_file)
|
||||
current_hash = compute_file_hash(full_path)
|
||||
|
||||
if current_hash is None:
|
||||
results.append({**entry, "status": "missing_source", "current_hash": None})
|
||||
elif not stored_hash:
|
||||
results.append({**entry, "status": "no_hash", "current_hash": current_hash})
|
||||
elif current_hash != stored_hash:
|
||||
results.append({**entry, "status": "stale", "current_hash": current_hash})
|
||||
else:
|
||||
results.append({**entry, "status": "fresh", "current_hash": current_hash})
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def fix_hashes(index_path: str, repo_root: str = ".") -> int:
|
||||
"""Add hashes to entries missing them. Returns count of fixed entries."""
|
||||
with open(index_path) as f:
|
||||
data = json.load(f)
|
||||
|
||||
fixed = 0
|
||||
for entry in data.get("facts", []):
|
||||
if entry.get("source_hash"):
|
||||
continue
|
||||
source_file = entry.get("source_file")
|
||||
if not source_file:
|
||||
continue
|
||||
full_path = os.path.join(repo_root, source_file)
|
||||
h = compute_file_hash(full_path)
|
||||
if h:
|
||||
entry["source_hash"] = h
|
||||
fixed += 1
|
||||
|
||||
with open(index_path, "w") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
|
||||
return fixed
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Check knowledge store staleness")
|
||||
parser.add_argument("--index", required=True, help="Path to knowledge/index.json")
|
||||
parser.add_argument("--repo", default=".", help="Repo root for source file resolution")
|
||||
parser.add_argument("--json", action="store_true", help="Output as JSON")
|
||||
parser.add_argument("--fix", action="store_true", help="Add hashes to entries missing them")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.fix:
|
||||
fixed = fix_hashes(args.index, args.repo)
|
||||
print(f"Fixed {fixed} entries with missing hashes.")
|
||||
return
|
||||
|
||||
results = check_staleness(args.index, args.repo)
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(results, indent=2))
|
||||
else:
|
||||
stale = [r for r in results if r["status"] != "fresh"]
|
||||
fresh = [r for r in results if r["status"] == "fresh"]
|
||||
|
||||
print(f"Knowledge Store Staleness Check")
|
||||
print(f" Total entries: {len(results)}")
|
||||
print(f" Fresh: {len(fresh)}")
|
||||
print(f" Stale/Issues: {len(stale)}")
|
||||
print()
|
||||
|
||||
if stale:
|
||||
print("Issues found:")
|
||||
for r in stale:
|
||||
status = r["status"]
|
||||
fact = r.get("fact", "?")[:60]
|
||||
source = r.get("source_file", "?")
|
||||
print(f" [{status}] {source}: {fact}")
|
||||
else:
|
||||
print("All entries are fresh!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,54 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Finds refactoring opportunities in codebases
|
||||
|
||||
Engine ID: 10.4
|
||||
|
||||
Usage:
|
||||
python3 scripts/refactoring_opportunity_finder.py --output proposals/refactoring_opportunity_finder.json
|
||||
python3 scripts/refactoring_opportunity_finder.py --output proposals/refactoring_opportunity_finder.json --dry-run
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
|
||||
|
||||
def generate_proposals():
|
||||
"""Generate sample proposals for this engine."""
|
||||
# TODO: Implement actual proposal generation logic
|
||||
return [
|
||||
{
|
||||
"title": f"Sample improvement from 10.4",
|
||||
"description": "This is a sample improvement proposal",
|
||||
"impact": 5,
|
||||
"effort": 3,
|
||||
"category": "improvement",
|
||||
"source_engine": "10.4",
|
||||
"timestamp": datetime.now(timezone.utc).isoformat()
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Finds refactoring opportunities in codebases")
|
||||
parser.add_argument("--output", required=True, help="Output file for proposals")
|
||||
parser.add_argument("--dry-run", action="store_true", help="Don't write output file")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
proposals = generate_proposals()
|
||||
|
||||
if not args.dry_run:
|
||||
with open(args.output, "w") as f:
|
||||
json.dump({"proposals": proposals}, f, indent=2)
|
||||
print(f"Generated {len(proposals)} proposals -> {args.output}")
|
||||
else:
|
||||
print(f"Would generate {len(proposals)} proposals")
|
||||
for p in proposals:
|
||||
print(f" - {p['title']}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
109
scripts/test_gitea_issue_parser.py
Normal file
109
scripts/test_gitea_issue_parser.py
Normal file
@@ -0,0 +1,109 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for scripts/gitea_issue_parser.py"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
sys.path.insert(0, os.path.dirname(__file__) or ".")
|
||||
|
||||
# Import from sibling
|
||||
import importlib.util
|
||||
spec = importlib.util.spec_from_file_location("parser", os.path.join(os.path.dirname(__file__) or ".", "gitea_issue_parser.py"))
|
||||
mod = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(mod)
|
||||
parse_issue_body = mod.parse_issue_body
|
||||
|
||||
|
||||
def test_basic_parsing():
|
||||
body = """## Context
|
||||
|
||||
This is the background info.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] First criterion
|
||||
- [x] Second criterion (done)
|
||||
|
||||
## What to build
|
||||
|
||||
Some description.
|
||||
"""
|
||||
result = parse_issue_body(body, title="Test (#42)", labels=["bug"])
|
||||
assert result["title"] == "Test (#42)"
|
||||
assert result["labels"] == ["bug"]
|
||||
assert result["epic_ref"] == 42
|
||||
assert len(result["criteria"]) == 2
|
||||
assert result["criteria"][0]["text"] == "First criterion"
|
||||
assert result["criteria"][0]["checked"] == False
|
||||
assert result["criteria"][1]["checked"] == True
|
||||
assert "context" in result["sections"]
|
||||
print("PASS: test_basic_parsing")
|
||||
|
||||
|
||||
def test_numbered_criteria():
|
||||
body = """## Acceptance Criteria
|
||||
|
||||
1. First item
|
||||
2. Second item
|
||||
3. Third item
|
||||
"""
|
||||
result = parse_issue_body(body)
|
||||
assert len(result["criteria"]) == 3
|
||||
assert result["criteria"][0]["text"] == "First item"
|
||||
print("PASS: test_numbered_criteria")
|
||||
|
||||
|
||||
def test_epic_ref_from_body():
|
||||
body = "Closes #123\n\nSome description."
|
||||
result = parse_issue_body(body)
|
||||
assert result["epic_ref"] == 123
|
||||
print("PASS: test_epic_ref_from_body")
|
||||
|
||||
|
||||
def test_empty_body():
|
||||
result = parse_issue_body("")
|
||||
assert result["criteria"] == []
|
||||
assert result["context"] == ""
|
||||
assert result["sections"] == {}
|
||||
print("PASS: test_empty_body")
|
||||
|
||||
|
||||
def test_no_sections():
|
||||
body = "Just a plain issue body with no headings."
|
||||
result = parse_issue_body(body)
|
||||
assert result["context"] == "Just a plain issue body with no headings."
|
||||
print("PASS: test_no_sections")
|
||||
|
||||
|
||||
def test_multiple_sections():
|
||||
body = """## Problem
|
||||
|
||||
Something is broken.
|
||||
|
||||
## Fix
|
||||
|
||||
Do this instead.
|
||||
|
||||
## Notes
|
||||
|
||||
Additional info.
|
||||
"""
|
||||
result = parse_issue_body(body)
|
||||
assert "problem" in result["sections"]
|
||||
assert "fix" in result["sections"]
|
||||
assert "notes" in result["sections"]
|
||||
assert "Something is broken" in result["sections"]["problem"]
|
||||
print("PASS: test_multiple_sections")
|
||||
|
||||
|
||||
def run_all():
|
||||
test_basic_parsing()
|
||||
test_numbered_criteria()
|
||||
test_epic_ref_from_body()
|
||||
test_empty_body()
|
||||
test_no_sections()
|
||||
test_multiple_sections()
|
||||
print("\nAll 6 tests passed!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_all()
|
||||
@@ -1,129 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for scripts/knowledge_staleness_check.py — 8 tests."""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
sys.path.insert(0, os.path.dirname(__file__) or ".")
|
||||
import importlib.util
|
||||
spec = importlib.util.spec_from_file_location("ks", os.path.join(os.path.dirname(__file__) or ".", "knowledge_staleness_check.py"))
|
||||
mod = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(mod)
|
||||
check_staleness = mod.check_staleness
|
||||
fix_hashes = mod.fix_hashes
|
||||
compute_file_hash = mod.compute_file_hash
|
||||
|
||||
|
||||
def test_fresh_entry():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
src = os.path.join(tmpdir, "source.py")
|
||||
with open(src, "w") as f:
|
||||
f.write("print('hello')")
|
||||
h = compute_file_hash(src)
|
||||
idx = os.path.join(tmpdir, "index.json")
|
||||
with open(idx, "w") as f:
|
||||
json.dump({"facts": [{"fact": "hello", "source_file": "source.py", "source_hash": h}]}, f)
|
||||
results = check_staleness(idx, tmpdir)
|
||||
assert results[0]["status"] == "fresh"
|
||||
print("PASS: test_fresh_entry")
|
||||
|
||||
|
||||
def test_stale_entry():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
src = os.path.join(tmpdir, "source.py")
|
||||
with open(src, "w") as f:
|
||||
f.write("original content")
|
||||
idx = os.path.join(tmpdir, "index.json")
|
||||
with open(idx, "w") as f:
|
||||
json.dump({"facts": [{"fact": "old", "source_file": "source.py", "source_hash": "sha256:wrong"}]}, f)
|
||||
# Now change the source
|
||||
with open(src, "w") as f:
|
||||
f.write("modified content")
|
||||
results = check_staleness(idx, tmpdir)
|
||||
assert results[0]["status"] == "stale"
|
||||
print("PASS: test_stale_entry")
|
||||
|
||||
|
||||
def test_missing_source():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
idx = os.path.join(tmpdir, "index.json")
|
||||
with open(idx, "w") as f:
|
||||
json.dump({"facts": [{"fact": "gone", "source_file": "nonexistent.py", "source_hash": "sha256:abc"}]}, f)
|
||||
results = check_staleness(idx, tmpdir)
|
||||
assert results[0]["status"] == "missing_source"
|
||||
print("PASS: test_missing_source")
|
||||
|
||||
|
||||
def test_no_hash():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
src = os.path.join(tmpdir, "source.py")
|
||||
with open(src, "w") as f:
|
||||
f.write("content")
|
||||
idx = os.path.join(tmpdir, "index.json")
|
||||
with open(idx, "w") as f:
|
||||
json.dump({"facts": [{"fact": "no hash", "source_file": "source.py"}]}, f)
|
||||
results = check_staleness(idx, tmpdir)
|
||||
assert results[0]["status"] == "no_hash"
|
||||
assert results[0]["current_hash"].startswith("sha256:")
|
||||
print("PASS: test_no_hash")
|
||||
|
||||
|
||||
def test_no_source_field():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
idx = os.path.join(tmpdir, "index.json")
|
||||
with open(idx, "w") as f:
|
||||
json.dump({"facts": [{"fact": "orphan"}]}, f)
|
||||
results = check_staleness(idx, tmpdir)
|
||||
assert results[0]["status"] == "no_source"
|
||||
print("PASS: test_no_source_field")
|
||||
|
||||
|
||||
def test_fix_hashes():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
src = os.path.join(tmpdir, "source.py")
|
||||
with open(src, "w") as f:
|
||||
f.write("content for hashing")
|
||||
idx = os.path.join(tmpdir, "index.json")
|
||||
with open(idx, "w") as f:
|
||||
json.dump({"facts": [{"fact": "needs hash", "source_file": "source.py"}]}, f)
|
||||
fixed = fix_hashes(idx, tmpdir)
|
||||
assert fixed == 1
|
||||
# Verify hash was added
|
||||
with open(idx) as f:
|
||||
data = json.load(f)
|
||||
assert data["facts"][0]["source_hash"].startswith("sha256:")
|
||||
print("PASS: test_fix_hashes")
|
||||
|
||||
|
||||
def test_empty_index():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
idx = os.path.join(tmpdir, "index.json")
|
||||
with open(idx, "w") as f:
|
||||
json.dump({"facts": []}, f)
|
||||
results = check_staleness(idx, tmpdir)
|
||||
assert results == []
|
||||
print("PASS: test_empty_index")
|
||||
|
||||
|
||||
def test_compute_hash_nonexistent():
|
||||
h = compute_file_hash("/nonexistent/path/file.py")
|
||||
assert h is None
|
||||
print("PASS: test_compute_hash_nonexistent")
|
||||
|
||||
|
||||
def run_all():
|
||||
test_fresh_entry()
|
||||
test_stale_entry()
|
||||
test_missing_source()
|
||||
test_no_hash()
|
||||
test_no_source_field()
|
||||
test_fix_hashes()
|
||||
test_empty_index()
|
||||
test_compute_hash_nonexistent()
|
||||
print("\nAll 8 tests passed!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_all()
|
||||
@@ -1,242 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for scripts/refactoring_opportunity_finder.py — 10 tests."""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
sys.path.insert(0, os.path.dirname(__file__) or ".")
|
||||
import importlib.util
|
||||
spec = importlib.util.spec_from_file_location(
|
||||
"rof", os.path.join(os.path.dirname(__file__) or ".", "refactoring_opportunity_finder.py"))
|
||||
mod = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(mod)
|
||||
|
||||
compute_file_complexity = mod.compute_file_complexity
|
||||
calculate_refactoring_score = mod.calculate_refactoring_score
|
||||
FileMetrics = mod.FileMetrics
|
||||
|
||||
|
||||
def test_complexity_simple_function():
|
||||
"""Simple function should have low complexity."""
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f:
|
||||
f.write("""
|
||||
def simple():
|
||||
return 42
|
||||
""")
|
||||
f.flush()
|
||||
avg, max_c, funcs, classes, lines = compute_file_complexity(f.name)
|
||||
assert avg == 1.0, f"Expected 1.0, got {avg}"
|
||||
assert max_c == 1, f"Expected 1, got {max_c}"
|
||||
assert funcs == 1, f"Expected 1, got {funcs}"
|
||||
assert classes == 0, f"Expected 0, got {classes}"
|
||||
os.unlink(f.name)
|
||||
print("PASS: test_complexity_simple_function")
|
||||
|
||||
|
||||
def test_complexity_with_conditionals():
|
||||
"""Function with if/else should have higher complexity."""
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f:
|
||||
f.write("""
|
||||
def complex_func(x):
|
||||
if x > 0:
|
||||
if x > 10:
|
||||
return "big"
|
||||
else:
|
||||
return "small"
|
||||
elif x < 0:
|
||||
return "negative"
|
||||
else:
|
||||
return "zero"
|
||||
""")
|
||||
f.flush()
|
||||
avg, max_c, funcs, classes, lines = compute_file_complexity(f.name)
|
||||
# Base 1 + 3 if/elif + 1 nested if = 5
|
||||
assert max_c >= 4, f"Expected max_c >= 4, got {max_c}"
|
||||
assert funcs == 1, f"Expected 1, got {funcs}"
|
||||
os.unlink(f.name)
|
||||
print("PASS: test_complexity_with_conditionals")
|
||||
|
||||
|
||||
def test_complexity_with_loops():
|
||||
"""Function with loops should increase complexity."""
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f:
|
||||
f.write("""
|
||||
def loop_func(items):
|
||||
result = []
|
||||
for item in items:
|
||||
if item > 0:
|
||||
result.append(item)
|
||||
while len(result) > 10:
|
||||
result.pop()
|
||||
return result
|
||||
""")
|
||||
f.flush()
|
||||
avg, max_c, funcs, classes, lines = compute_file_complexity(f.name)
|
||||
# Base 1 + 1 for + 1 if + 1 while = 4
|
||||
assert max_c >= 3, f"Expected max_c >= 3, got {max_c}"
|
||||
os.unlink(f.name)
|
||||
print("PASS: test_complexity_with_loops")
|
||||
|
||||
|
||||
def test_complexity_with_class():
|
||||
"""Class with methods should count both."""
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f:
|
||||
f.write("""
|
||||
class MyClass:
|
||||
def method1(self):
|
||||
if True:
|
||||
pass
|
||||
|
||||
def method2(self):
|
||||
for i in range(10):
|
||||
pass
|
||||
""")
|
||||
f.flush()
|
||||
avg, max_c, funcs, classes, lines = compute_file_complexity(f.name)
|
||||
assert classes == 1, f"Expected 1 class, got {classes}"
|
||||
assert funcs == 2, f"Expected 2 functions, got {funcs}"
|
||||
os.unlink(f.name)
|
||||
print("PASS: test_complexity_with_class")
|
||||
|
||||
|
||||
def test_complexity_syntax_error():
|
||||
"""File with syntax error should return zeros."""
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f:
|
||||
f.write("def broken(:\n pass")
|
||||
f.flush()
|
||||
avg, max_c, funcs, classes, lines = compute_file_complexity(f.name)
|
||||
assert avg == 0.0, f"Expected 0.0, got {avg}"
|
||||
assert funcs == 0, f"Expected 0, got {funcs}"
|
||||
os.unlink(f.name)
|
||||
print("PASS: test_complexity_syntax_error")
|
||||
|
||||
|
||||
def test_refactoring_score_high_complexity():
|
||||
"""High complexity should give high score."""
|
||||
metrics = FileMetrics(
|
||||
path="test.py",
|
||||
lines=200,
|
||||
complexity=15.0,
|
||||
max_complexity=25,
|
||||
functions=10,
|
||||
classes=2,
|
||||
churn_30d=5,
|
||||
churn_90d=15,
|
||||
test_coverage=0.3,
|
||||
refactoring_score=0.0
|
||||
)
|
||||
score = calculate_refactoring_score(metrics)
|
||||
assert score > 50, f"Expected score > 50, got {score}"
|
||||
print("PASS: test_refactoring_score_high_complexity")
|
||||
|
||||
|
||||
def test_refactoring_score_low_complexity():
|
||||
"""Low complexity should give lower score."""
|
||||
metrics = FileMetrics(
|
||||
path="test.py",
|
||||
lines=50,
|
||||
complexity=2.0,
|
||||
max_complexity=3,
|
||||
functions=3,
|
||||
classes=0,
|
||||
churn_30d=0,
|
||||
churn_90d=1,
|
||||
test_coverage=0.9,
|
||||
refactoring_score=0.0
|
||||
)
|
||||
score = calculate_refactoring_score(metrics)
|
||||
assert score < 30, f"Expected score < 30, got {score}"
|
||||
print("PASS: test_refactoring_score_low_complexity")
|
||||
|
||||
|
||||
def test_refactoring_score_high_churn():
|
||||
"""High churn should increase score."""
|
||||
metrics = FileMetrics(
|
||||
path="test.py",
|
||||
lines=100,
|
||||
complexity=5.0,
|
||||
max_complexity=8,
|
||||
functions=5,
|
||||
classes=0,
|
||||
churn_30d=10,
|
||||
churn_90d=20,
|
||||
test_coverage=0.5,
|
||||
refactoring_score=0.0
|
||||
)
|
||||
score = calculate_refactoring_score(metrics)
|
||||
# Churn should contribute significantly
|
||||
assert score > 40, f"Expected score > 40 for high churn, got {score}"
|
||||
print("PASS: test_refactoring_score_high_churn")
|
||||
|
||||
|
||||
def test_refactoring_score_no_coverage():
|
||||
"""No coverage data should assume medium risk."""
|
||||
metrics = FileMetrics(
|
||||
path="test.py",
|
||||
lines=100,
|
||||
complexity=5.0,
|
||||
max_complexity=8,
|
||||
functions=5,
|
||||
classes=0,
|
||||
churn_30d=1,
|
||||
churn_90d=2,
|
||||
test_coverage=None,
|
||||
refactoring_score=0.0
|
||||
)
|
||||
score = calculate_refactoring_score(metrics)
|
||||
# Should have some score from the 5-point coverage component
|
||||
assert score > 0, f"Expected positive score, got {score}"
|
||||
print("PASS: test_refactoring_score_no_coverage")
|
||||
|
||||
|
||||
def test_refactoring_score_large_file():
|
||||
"""Large files should score higher."""
|
||||
metrics_small = FileMetrics(
|
||||
path="small.py",
|
||||
lines=50,
|
||||
complexity=5.0,
|
||||
max_complexity=8,
|
||||
functions=3,
|
||||
classes=0,
|
||||
churn_30d=1,
|
||||
churn_90d=2,
|
||||
test_coverage=0.8,
|
||||
refactoring_score=0.0
|
||||
)
|
||||
metrics_large = FileMetrics(
|
||||
path="large.py",
|
||||
lines=1000,
|
||||
complexity=5.0,
|
||||
max_complexity=8,
|
||||
functions=3,
|
||||
classes=0,
|
||||
churn_30d=1,
|
||||
churn_90d=2,
|
||||
test_coverage=0.8,
|
||||
refactoring_score=0.0
|
||||
)
|
||||
score_small = calculate_refactoring_score(metrics_small)
|
||||
score_large = calculate_refactoring_score(metrics_large)
|
||||
assert score_large > score_small, \
|
||||
f"Large file ({score_large}) should score higher than small ({score_small})"
|
||||
print("PASS: test_refactoring_score_large_file")
|
||||
|
||||
|
||||
def run_all():
|
||||
test_complexity_simple_function()
|
||||
test_complexity_with_conditionals()
|
||||
test_complexity_with_loops()
|
||||
test_complexity_with_class()
|
||||
test_complexity_syntax_error()
|
||||
test_refactoring_score_high_complexity()
|
||||
test_refactoring_score_low_complexity()
|
||||
test_refactoring_score_high_churn()
|
||||
test_refactoring_score_no_coverage()
|
||||
test_refactoring_score_large_file()
|
||||
print("\nAll 10 tests passed!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_all()
|
||||
Reference in New Issue
Block a user