Compare commits

..

1 Commits

Author SHA1 Message Date
TERRA
fe4a04145a feat: add diff analyzer for PR change categorization (closes #118) 2026-04-14 23:25:15 -04:00
3 changed files with 463 additions and 249 deletions

View File

@@ -1,249 +0,0 @@
#!/usr/bin/env python3
"""
Cross-Repo Dependency Graph Builder
Scans repos for import/require/reference patterns and builds a directed
dependency graph. Detects circular dependencies. Outputs DOT and Mermaid.
Usage:
python3 scripts/dependency_graph.py /path/to/repos/
python3 scripts/dependency_graph.py --repos repo1,repo2,repo3 --format mermaid
python3 scripts/dependency_graph.py --repos-dir /path/to/ --format dot --output deps.dot
Patterns detected:
- Python: import X, from X import Y
- JavaScript: require("X"), import ... from "X"
- Go: import "X"
- Ansible: include_role, import_role
- Docker/Compose: image: X, depends_on
- Config references: repo-name in YAML/TOML/JSON
"""
import argparse
import json
import os
import re
import sys
from collections import defaultdict
from pathlib import Path
# Known repo names for matching
KNOWN_REPOS = [
"hermes-agent", "timmy-config", "timmy-home", "the-nexus", "the-door",
"the-beacon", "fleet-ops", "burn-fleet", "timmy-dispatch", "turboquant",
"compounding-intelligence", "the-playground", "second-son-of-timmy",
"ai-safety-review", "the-echo-pattern", "timmy-academy", "wolf",
"the-testament",
]
def normalize_repo_name(name: str) -> str:
"""Normalize a repo name for comparison."""
return name.lower().replace("_", "-").replace(".git", "").strip()
def scan_file_for_deps(filepath: str, content: str, own_repo: str) -> set:
"""Scan a file's content for references to other repos."""
deps = set()
own_norm = normalize_repo_name(own_repo)
for repo in KNOWN_REPOS:
repo_norm = normalize_repo_name(repo)
if repo_norm == own_norm:
continue
# Direct name references
patterns = [
repo, # exact name
repo.replace("-", "_"), # underscore variant
repo.replace("-", ""), # no separator
f"/{repo}/", # path reference
f'"{repo}"', # quoted
f"'{repo}'", # single quoted
f"Timmy_Foundation/{repo}", # full Gitea path
f"Timmy_Foundation.{repo}", # Python module path
]
for pattern in patterns:
if pattern in content:
deps.add(repo)
break
return deps
def scan_repo(repo_path: str, repo_name: str = None) -> dict:
"""Scan a repo directory for dependencies."""
path = Path(repo_path)
if not path.is_dir():
return {"error": f"Not a directory: {repo_path}"}
if not repo_name:
repo_name = path.name
deps = set()
files_scanned = 0
exts = {".py", ".js", ".ts", ".go", ".yaml", ".yml", ".toml", ".json",
".md", ".sh", ".bash", ".Dockerfile", ".tf", ".hcl"}
for fpath in path.rglob("*"):
if not fpath.is_file():
continue
if fpath.suffix not in exts:
continue
# Skip common non-source dirs
parts = fpath.parts
if any(p in (".git", "node_modules", "__pycache__", ".venv", "venv",
"vendor", "dist", "build", ".tox") for p in parts):
continue
try:
content = fpath.read_text(errors="ignore")
except:
continue
file_deps = scan_file_for_deps(str(fpath), content, repo_name)
deps.update(file_deps)
files_scanned += 1
return {
"repo": repo_name,
"dependencies": sorted(deps),
"files_scanned": files_scanned,
}
def detect_cycles(graph: dict) -> list:
"""Detect circular dependencies using DFS."""
cycles = []
visited = set()
rec_stack = set()
def dfs(node, path):
visited.add(node)
rec_stack.add(node)
for neighbor in graph.get(node, {}).get("dependencies", []):
if neighbor not in visited:
result = dfs(neighbor, path + [neighbor])
if result:
return result
elif neighbor in rec_stack:
cycle_start = path.index(neighbor)
return path[cycle_start:] + [neighbor]
rec_stack.remove(node)
return None
for node in graph:
if node not in visited:
cycle = dfs(node, [node])
if cycle:
cycles.append(cycle)
return cycles
def to_dot(graph: dict) -> str:
"""Generate DOT format output."""
lines = ["digraph dependencies {"]
lines.append(" rankdir=LR;")
lines.append(" node [shape=box, style=filled, fillcolor="#1a1a2e", fontcolor="#e6edf3"];")
lines.append(" edge [color="#4a4a6a"];")
lines.append("")
for repo, data in sorted(graph.items()):
dep_count = len(data.get("dependencies", []))
fill = "#2d1b69" if dep_count > 2 else "#16213e"
lines.append(f' "{repo}" [fillcolor="{fill}"];')
for dep in data.get("dependencies", []):
lines.append(f' "{repo}" -> "{dep}";')
lines.append("}")
return "\n".join(lines)
def to_mermaid(graph: dict) -> str:
"""Generate Mermaid format output."""
lines = ["graph LR"]
for repo, data in sorted(graph.items()):
for dep in data.get("dependencies", []):
lines.append(f" {repo.replace('-','_')} --> {dep.replace('-','_')}")
# Add node labels
lines.append("")
for repo in sorted(graph.keys()):
lines.append(f" {repo.replace('-','_')}[{repo}]")
return "\n".join(lines)
def main():
parser = argparse.ArgumentParser(description="Build cross-repo dependency graph")
parser.add_argument("repos_dir", nargs="?", help="Directory containing repos")
parser.add_argument("--repos", help="Comma-separated list of repo paths")
parser.add_argument("--format", choices=["dot", "mermaid", "json"], default="json")
parser.add_argument("--output", "-o", help="Output file (default: stdout)")
parser.add_argument("--cycles-only", action="store_true", help="Only report cycles")
args = parser.parse_args()
results = {}
repo_paths = []
if args.repos:
repo_paths = [p.strip() for p in args.repos.split(",")]
elif args.repos_dir:
base = Path(args.repos_dir)
repo_paths = [str(p) for p in base.iterdir() if p.is_dir() and not p.name.startswith(".")]
else:
parser.print_help()
sys.exit(1)
for rpath in repo_paths:
name = Path(rpath).name
print(f"Scanning {name}...", file=sys.stderr)
result = scan_repo(rpath, name)
if "error" not in result:
results[name] = result
# Detect cycles
cycles = detect_cycles(results)
if args.cycles_only:
if cycles:
print("CIRCULAR DEPENDENCIES DETECTED:")
for cycle in cycles:
print(f" {' -> '.join(cycle)}")
sys.exit(1)
else:
print("No circular dependencies found.")
sys.exit(0)
# Output
output = {}
if args.format == "dot":
output = to_dot(results)
elif args.format == "mermaid":
output = to_mermaid(results)
else:
output = json.dumps({
"repos": results,
"cycles": cycles,
"summary": {
"total_repos": len(results),
"total_deps": sum(len(r["dependencies"]) for r in results.values()),
"cycles_found": len(cycles),
}
}, indent=2)
if args.output:
Path(args.output).write_text(output)
print(f"Written to {args.output}", file=sys.stderr)
else:
print(output)
if __name__ == "__main__":
main()

239
scripts/diff_analyzer.py Normal file
View File

@@ -0,0 +1,239 @@
"""
Diff Analyzer — Pipeline 6.1
Reads PR diffs and categorizes changes: new code, deleted code, modified code, moved code.
Produces a change summary with line counts per category.
Usage:
from diff_analyzer import DiffAnalyzer
analyzer = DiffAnalyzer()
summary = analyzer.analyze(diff_text)
"""
from __future__ import annotations
import re
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional
class ChangeCategory(Enum):
"""Categories of code changes in a diff hunk."""
ADDED = "added"
DELETED = "deleted"
MODIFIED = "modified"
MOVED = "moved"
CONTEXT = "context"
@dataclass
class Hunk:
"""A single diff hunk with metadata."""
header: str
old_start: int
old_count: int
new_start: int
new_count: int
lines: List[str] = field(default_factory=list)
category: ChangeCategory = ChangeCategory.CONTEXT
old_lines: int = 0
new_lines: int = 0
@dataclass
class FileChange:
"""Changes within a single file."""
path: str
old_path: Optional[str] = None # For renames
is_new: bool = False
is_deleted: bool = False
is_renamed: bool = False
hunks: List[Hunk] = field(default_factory=list)
added_lines: int = 0
deleted_lines: int = 0
context_lines: int = 0
@dataclass
class ChangeSummary:
"""Summary of all changes in a diff."""
files_changed: int = 0
files_added: int = 0
files_deleted: int = 0
files_renamed: int = 0
files_modified: int = 0
total_added: int = 0
total_deleted: int = 0
total_context: int = 0
hunks_added: int = 0
hunks_deleted: int = 0
hunks_modified: int = 0
hunks_moved: int = 0
file_changes: List[FileChange] = field(default_factory=list)
def to_dict(self) -> dict:
"""Serialize to dict for JSON output."""
return {
"files_changed": self.files_changed,
"files_added": self.files_added,
"files_deleted": self.files_deleted,
"files_renamed": self.files_renamed,
"files_modified": self.files_modified,
"total_added": self.total_added,
"total_deleted": self.total_deleted,
"total_context": self.total_context,
"hunks_added": self.hunks_added,
"hunks_deleted": self.hunks_deleted,
"hunks_modified": self.hunks_modified,
"hunks_moved": self.hunks_moved,
"files": [
{
"path": fc.path,
"old_path": fc.old_path,
"is_new": fc.is_new,
"is_deleted": fc.is_deleted,
"is_renamed": fc.is_renamed,
"added": fc.added_lines,
"deleted": fc.deleted_lines,
"context": fc.context_lines,
}
for fc in self.file_changes
],
}
# Regex for unified diff headers
_HUNK_RE = re.compile(
r"^@@\s+-(\d+)(?:,(\d+))?\s+\+(\d+)(?:,(\d+))?\s+@@(.*)$"
)
_FILE_HEADER_RE = re.compile(r"^diff --git a/(.*) b/(.*)$")
_RENAME_RE = re.compile(r"^rename from (.+)$|^rename to (.+)$")
class DiffAnalyzer:
"""Parses unified diffs and categorizes changes."""
def analyze(self, diff_text: str) -> ChangeSummary:
"""Analyze a unified diff string and return a ChangeSummary."""
summary = ChangeSummary()
lines = diff_text.splitlines(keepends=False)
current_file: Optional[FileChange] = None
current_hunk: Optional[Hunk] = None
old_path: Optional[str] = None
new_path: Optional[str] = None
for line in lines:
# File header
m = _FILE_HEADER_RE.match(line)
if m:
# Save previous file
if current_file:
self._classify_file(current_file)
summary.file_changes.append(current_file)
old_path = m.group(1)
new_path = m.group(2)
current_file = FileChange(path=new_path, old_path=old_path)
current_hunk = None
continue
if current_file is None:
continue
# Detect new/deleted file markers
if line.startswith("new file mode"):
current_file.is_new = True
continue
if line.startswith("deleted file mode"):
current_file.is_deleted = True
continue
# Detect renames
rm = _RENAME_RE.match(line)
if rm:
current_file.is_renamed = True
continue
# Hunk header
hm = _HUNK_RE.match(line)
if hm:
if current_hunk:
self._classify_hunk(current_hunk)
current_file.hunks.append(current_hunk)
current_hunk = Hunk(
header=line,
old_start=int(hm.group(1)),
old_count=int(hm.group(2) or 1),
new_start=int(hm.group(3)),
new_count=int(hm.group(4) or 1),
)
continue
if current_hunk is None:
continue
# Hunk content
current_hunk.lines.append(line)
if line.startswith("+"):
current_hunk.new_lines += 1
current_file.added_lines += 1
elif line.startswith("-"):
current_hunk.old_lines += 1
current_file.deleted_lines += 1
elif line.startswith(" "):
current_file.context_lines += 1
# Finalize last hunk and file
if current_hunk:
self._classify_hunk(current_hunk)
if current_file:
current_file.hunks.append(current_hunk)
if current_file:
self._classify_file(current_file)
summary.file_changes.append(current_file)
# Aggregate
summary.files_changed = len(summary.file_changes)
for fc in summary.file_changes:
summary.total_added += fc.added_lines
summary.total_deleted += fc.deleted_lines
summary.total_context += fc.context_lines
if fc.is_new:
summary.files_added += 1
elif fc.is_deleted:
summary.files_deleted += 1
elif fc.is_renamed:
summary.files_renamed += 1
else:
summary.files_modified += 1
for h in fc.hunks:
if h.category == ChangeCategory.ADDED:
summary.hunks_added += 1
elif h.category == ChangeCategory.DELETED:
summary.hunks_deleted += 1
elif h.category == ChangeCategory.MODIFIED:
summary.hunks_modified += 1
elif h.category == ChangeCategory.MOVED:
summary.hunks_moved += 1
return summary
def _classify_hunk(self, hunk: Hunk) -> None:
"""Classify a hunk based on its add/delete ratio."""
if hunk.new_lines > 0 and hunk.old_lines == 0:
hunk.category = ChangeCategory.ADDED
elif hunk.old_lines > 0 and hunk.new_lines == 0:
hunk.category = ChangeCategory.DELETED
elif hunk.new_lines > 0 and hunk.old_lines > 0:
hunk.category = ChangeCategory.MODIFIED
else:
hunk.category = ChangeCategory.CONTEXT
def _classify_file(self, fc: FileChange) -> None:
"""Final file classification (renames already detected via headers)."""
pass

224
tests/test_diff_analyzer.py Normal file
View File

@@ -0,0 +1,224 @@
"""Tests for diff_analyzer module."""
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'scripts'))
from diff_analyzer import DiffAnalyzer, ChangeCategory
def test_parse_simple_addition():
diff = """diff --git a/foo.py b/foo.py
new file mode 100644
--- /dev/null
+++ b/foo.py
@@ -0,0 +1,3 @@
+def hello():
+ return "world"
+# end
"""
analyzer = DiffAnalyzer()
summary = analyzer.analyze(diff)
assert summary.files_changed == 1
assert summary.files_added == 1
assert summary.files_modified == 0
assert summary.total_added == 3
assert summary.total_deleted == 0
assert summary.hunks_added == 1
assert len(summary.file_changes) == 1
assert summary.file_changes[0].is_new is True
assert summary.file_changes[0].path == "foo.py"
def test_parse_simple_deletion():
diff = """diff --git a/old.py b/old.py
deleted file mode 100644
--- a/old.py
+++ /dev/null
@@ -1,2 +0,0 @@
-x = 1
-y = 2
"""
analyzer = DiffAnalyzer()
summary = analyzer.analyze(diff)
assert summary.files_changed == 1
assert summary.files_deleted == 1
assert summary.total_deleted == 2
assert summary.total_added == 0
assert summary.hunks_deleted == 1
assert summary.file_changes[0].is_deleted is True
def test_parse_modification():
diff = """diff --git a/bar.py b/bar.py
--- a/bar.py
+++ b/bar.py
@@ -10,3 +10,4 @@ def foo():
existing()
- old_call()
+ new_call()
+ extra_step()
return
"""
analyzer = DiffAnalyzer()
summary = analyzer.analyze(diff)
assert summary.files_changed == 1
assert summary.files_modified == 1
assert summary.total_added == 2 # +new_call(), +extra_step()
assert summary.total_deleted == 1 # -old_call()
assert summary.total_context == 2 # 2 context lines
assert summary.hunks_modified == 1
def test_parse_multiple_files():
diff = """diff --git a/a.py b/a.py
--- a/a.py
+++ b/a.py
@@ -1,1 +1,2 @@
existing
+added
diff --git a/b.py b/b.py
new file mode 100644
--- /dev/null
+++ b/b.py
@@ -0,0 +1,1 @@
+new file
diff --git a/c.py b/c.py
deleted file mode 100644
--- a/c.py
+++ /dev/null
@@ -1,1 +0,0 @@
-gone
"""
analyzer = DiffAnalyzer()
summary = analyzer.analyze(diff)
assert summary.files_changed == 3
assert summary.files_added == 1
assert summary.files_deleted == 1
assert summary.files_modified == 1
assert summary.total_added == 2
assert summary.total_deleted == 1
def test_parse_rename():
diff = """diff --git a/old_name.py b/new_name.py
rename from old_name.py
rename to new_name.py
--- a/old_name.py
+++ b/new_name.py
@@ -1,1 +1,1 @@
-old_func()
+new_func()
"""
analyzer = DiffAnalyzer()
summary = analyzer.analyze(diff)
assert summary.files_changed == 1
assert summary.files_renamed == 1
assert summary.file_changes[0].is_renamed is True
assert summary.file_changes[0].old_path == "old_name.py"
assert summary.file_changes[0].path == "new_name.py"
def test_parse_mixed_hunks():
"""A file with one add hunk and one delete hunk."""
diff = """diff --git a/mixed.py b/mixed.py
--- a/mixed.py
+++ b/mixed.py
@@ -5,0 +6,2 @@
+new_line_1
+new_line_2
@@ -20,2 +22,0 @@
-removed_1
-removed_2
"""
analyzer = DiffAnalyzer()
summary = analyzer.analyze(diff)
assert summary.files_changed == 1
assert summary.hunks_added == 1
assert summary.hunks_deleted == 1
assert summary.total_added == 2
assert summary.total_deleted == 2
def test_empty_diff():
analyzer = DiffAnalyzer()
summary = analyzer.analyze("")
assert summary.files_changed == 0
assert summary.total_added == 0
assert summary.total_deleted == 0
def test_to_dict():
diff = """diff --git a/test.py b/test.py
new file mode 100644
--- /dev/null
+++ b/test.py
@@ -0,0 +1,2 @@
+line1
+line2
"""
analyzer = DiffAnalyzer()
summary = analyzer.analyze(diff)
d = summary.to_dict()
assert d["files_changed"] == 1
assert d["files_added"] == 1
assert d["total_added"] == 2
assert d["total_deleted"] == 0
assert len(d["files"]) == 1
assert d["files"][0]["path"] == "test.py"
assert d["files"][0]["is_new"] is True
def test_context_only_hunk():
"""A hunk with only context lines (rare but possible)."""
diff = """diff --git a/noop.py b/noop.py
--- a/noop.py
+++ b/noop.py
@@ -5,3 +5,3 @@
context1
context2
context3
"""
analyzer = DiffAnalyzer()
summary = analyzer.analyze(diff)
assert summary.total_context == 3
assert summary.total_added == 0
assert summary.total_deleted == 0
def test_binary_files_skipped():
"""Binary file diffs have no content lines — just headers."""
diff = """diff --git a/image.png b/image.png
--- a/image.png
+++ b/image.png
Binary files a/image.png and b/image.png differ
"""
analyzer = DiffAnalyzer()
summary = analyzer.analyze(diff)
assert summary.files_changed == 1
assert summary.total_added == 0
assert summary.total_deleted == 0
if __name__ == "__main__":
test_parse_simple_addition()
test_parse_simple_deletion()
test_parse_modification()
test_parse_multiple_files()
test_parse_rename()
test_parse_mixed_hunks()
test_empty_diff()
test_to_dict()
test_context_only_hunk()
test_binary_files_skipped()
print("All 10 tests passed.")