Compare commits
1 Commits
step35/151
...
step35/137
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b823d4e308 |
@@ -1,206 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
graph_visualizer.py — Generate visual graph representations of the knowledge graph.
|
||||
|
||||
Reads knowledge/index.json and renders the fact relationship graph.
|
||||
Supports ASCII terminal output and DOT export for Graphviz.
|
||||
|
||||
Usage:
|
||||
python3 scripts/graph_visualizer.py # ASCII, all nodes
|
||||
python3 scripts/graph_visualizer.py --format dot # DOT output
|
||||
python3 scripts/graph_visualizer.py --seed root --max-depth 2
|
||||
python3 scripts/graph_visualizer.py --filter-domain hermes-agent
|
||||
python3 scripts/graph_visualizer.py --filter-category pitfall
|
||||
|
||||
Acceptance: [x] Subgraph extraction [x] ASCII rendering [x] DOT export [x] Configurable depth/filter
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import sys
|
||||
from collections import defaultdict, deque
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
|
||||
def load_index(index_path: Path):
|
||||
with open(index_path) as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def build_adjacency(facts):
|
||||
adj = defaultdict(list)
|
||||
all_ids = {f['id'] for f in facts if 'id' in f}
|
||||
for f in facts:
|
||||
fid = f.get('id')
|
||||
if not fid:
|
||||
continue
|
||||
for rel in f.get('related', []):
|
||||
if rel in all_ids:
|
||||
adj[fid].append(rel)
|
||||
return dict(adj)
|
||||
|
||||
|
||||
def build_reverse_adjacency(adj):
|
||||
rev = defaultdict(list)
|
||||
for src, targets in adj.items():
|
||||
for tgt in targets:
|
||||
rev[tgt].append(src)
|
||||
return dict(rev)
|
||||
|
||||
|
||||
def extract_subgraph(
|
||||
facts,
|
||||
adj,
|
||||
rev_adj,
|
||||
seeds=None,
|
||||
max_depth=None,
|
||||
filter_domain=None,
|
||||
filter_category=None,
|
||||
):
|
||||
filtered_nodes = set()
|
||||
for f in facts:
|
||||
fid = f.get('id')
|
||||
if not fid:
|
||||
continue
|
||||
if filter_domain and f.get('domain') != filter_domain:
|
||||
continue
|
||||
if filter_category and f.get('category') != filter_category:
|
||||
continue
|
||||
filtered_nodes.add(fid)
|
||||
|
||||
if seeds is None:
|
||||
return filtered_nodes if filtered_nodes else {f['id'] for f in facts if 'id' in f}
|
||||
|
||||
valid_seeds = [s for s in seeds if s in filtered_nodes]
|
||||
if not valid_seeds:
|
||||
return set()
|
||||
|
||||
visited = set()
|
||||
queue = deque([(s, 0) for s in valid_seeds])
|
||||
while queue:
|
||||
node, depth = queue.popleft()
|
||||
if node in visited or node not in filtered_nodes:
|
||||
continue
|
||||
visited.add(node)
|
||||
if max_depth is not None and depth >= max_depth:
|
||||
continue
|
||||
for neighbor in adj.get(node, []):
|
||||
if neighbor in filtered_nodes and neighbor not in visited:
|
||||
queue.append((neighbor, depth + 1))
|
||||
for neighbor in rev_adj.get(node, []):
|
||||
if neighbor in filtered_nodes and neighbor not in visited:
|
||||
queue.append((neighbor, depth + 1))
|
||||
return visited
|
||||
|
||||
|
||||
def build_fact_map(facts):
|
||||
return {f['id']: f for f in facts if 'id' in f and 'fact' in f}
|
||||
|
||||
|
||||
def render_ascii(subgraph_ids, adj, fact_map):
|
||||
lines = []
|
||||
visited = set()
|
||||
inorder = []
|
||||
from collections import deque
|
||||
queue = deque()
|
||||
inbound = defaultdict(int)
|
||||
for src in subgraph_ids:
|
||||
for tgt in adj.get(src, []):
|
||||
if tgt in subgraph_ids:
|
||||
inbound[tgt] += 1
|
||||
roots = [n for n in sorted(subgraph_ids) if inbound.get(n, 0) == 0]
|
||||
if not roots:
|
||||
roots = sorted(subgraph_ids)
|
||||
for root in roots:
|
||||
queue.append((root, 0, None))
|
||||
while queue:
|
||||
node, depth, parent_label = queue.popleft()
|
||||
if node in visited:
|
||||
continue
|
||||
visited.add(node)
|
||||
fact = fact_map.get(node, {})
|
||||
label = fact.get('fact', str(node))[:80]
|
||||
category = fact.get('category', 'fact')
|
||||
domain = fact.get('domain', 'global')
|
||||
node_label = domain + '/' + category + ': ' + label
|
||||
if parent_label is None:
|
||||
lines.append(f"{' ' * depth}┌─ {node_label}")
|
||||
else:
|
||||
lines.append(f"{' ' * depth}├─ {node_label}")
|
||||
children = [c for c in adj.get(node, []) if c in subgraph_ids]
|
||||
for i, child in enumerate(children):
|
||||
queue.append((child, depth + 1, node))
|
||||
if len(visited) < len(subgraph_ids):
|
||||
lines.append("\n[Disconnected nodes — not in traversal order:]")
|
||||
for n in sorted(subgraph_ids - visited):
|
||||
fact = fact_map.get(n, {})
|
||||
label = fact.get('fact', n)[:60]
|
||||
lines.append(f" {n} — {label}")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def render_dot(subgraph_ids, adj, fact_map):
|
||||
lines = ["digraph knowledge_graph {", " rankdir=LR;"]
|
||||
cat_colors = {
|
||||
'fact': '#3498db',
|
||||
'pitfall': '#e74c3c',
|
||||
'pattern': '#2ecc71',
|
||||
'tool-quirk': '#f39c12',
|
||||
'question': '#9b59b6',
|
||||
}
|
||||
for nid in sorted(subgraph_ids):
|
||||
fact = fact_map.get(nid, {})
|
||||
category = fact.get('category', 'fact')
|
||||
domain = fact.get('domain', 'global')
|
||||
label = fact.get('fact', nid).replace('"', '\\"')[:80]
|
||||
fillcolor = cat_colors.get(category, '#666666')
|
||||
lines.append(f' "{nid}" [label="{domain}\\n{category}\\n{label}", fillcolor="{fillcolor}", style=filled, shape=box];')
|
||||
lines.append("")
|
||||
for src in sorted(subgraph_ids):
|
||||
for tgt in adj.get(src, []):
|
||||
if tgt in subgraph_ids:
|
||||
lines.append(f' "{src}" -> "{tgt}";')
|
||||
lines.append("}")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Visualize the knowledge graph (ASCII terminal or DOT for Graphviz).")
|
||||
parser.add_argument("--index", type=Path, default=Path(__file__).parent.parent / "knowledge" / "index.json",
|
||||
help="Path to knowledge/index.json")
|
||||
parser.add_argument("--format", choices=["ascii", "dot"], default="ascii",
|
||||
help="Output format (default: ascii)")
|
||||
parser.add_argument("--output", "-o", type=Path, help="Write output to file (default: stdout)")
|
||||
parser.add_argument("--seed", help="Starting fact ID (comma-sep). Omit to render full graph.")
|
||||
parser.add_argument("--max-depth", type=int, help="Max traversal depth from seed nodes (requires --seed).")
|
||||
parser.add_argument("--filter-domain", help="Only include facts from this domain.")
|
||||
parser.add_argument("--filter-category", help="Only include facts of this category.")
|
||||
args = parser.parse_args()
|
||||
|
||||
index = load_index(args.index)
|
||||
facts = index.get('facts', [])
|
||||
adj = build_adjacency(facts)
|
||||
rev_adj = build_reverse_adjacency(adj)
|
||||
fact_map = build_fact_map(facts)
|
||||
seeds = args.seed.split(',') if args.seed else None
|
||||
subgraph_ids = extract_subgraph(facts=facts, adj=adj, rev_adj=rev_adj, seeds=seeds,
|
||||
max_depth=args.max_depth,
|
||||
filter_domain=args.filter_domain,
|
||||
filter_category=args.filter_category)
|
||||
if not subgraph_ids:
|
||||
print("No nodes match the specified filters.", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
if args.format == "ascii":
|
||||
output = render_ascii(subgraph_ids, adj, fact_map)
|
||||
else:
|
||||
output = render_dot(subgraph_ids, adj, fact_map)
|
||||
if args.output:
|
||||
args.output.write_text(output)
|
||||
print(f"Written: {args.output}", file=sys.stderr)
|
||||
else:
|
||||
print(output)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
203
scripts/release_note_analyzer.py
Executable file
203
scripts/release_note_analyzer.py
Executable file
@@ -0,0 +1,203 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Release Note Analyzer — Monitor dependency releases and extract structured insights.
|
||||
|
||||
Fetches GitHub releases for configured repositories, parses changelogs,
|
||||
categorizes changes, and flags breaking changes.
|
||||
|
||||
Usage:
|
||||
python3 scripts/release_note_analyzer.py --repos owner/repo1,owner/repo2
|
||||
python3 scripts/release_note_analyzer.py --repos numpy/numpy --limit 5
|
||||
python3 scripts/release_note_analyzer.py --repos owner/repo --output metrics/releases.json
|
||||
python3 scripts/release_note_analyzer.py --repos owner/repo --token $GITHUB_TOKEN
|
||||
|
||||
Output:
|
||||
JSON with per-release structure: version, date, url, categories (features, fixes, breaking), raw_body
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from typing import Dict, List, Any, Optional
|
||||
from dataclasses import dataclass, field, asdict
|
||||
import os
|
||||
|
||||
|
||||
@dataclass
|
||||
class ReleaseAnalysis:
|
||||
version: str
|
||||
date: str
|
||||
url: str
|
||||
categories: Dict[str, List[str]] = field(default_factory=dict)
|
||||
breaking_change_flags: List[str] = field(default_factory=list)
|
||||
raw_body: str = ""
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return asdict(self)
|
||||
|
||||
|
||||
def fetch_github_releases(repo: str, token: Optional[str] = None, limit: int = 10) -> List[Dict[str, Any]]:
|
||||
"""Fetch latest releases from GitHub API."""
|
||||
import urllib.request
|
||||
import urllib.error
|
||||
|
||||
url = f"https://api.github.com/repos/{repo}/releases?per_page={limit}"
|
||||
headers = {"Accept": "application/vnd.github.v3+json"}
|
||||
if token:
|
||||
headers["Authorization"] = f"token {token}"
|
||||
|
||||
req = urllib.request.Request(url, headers=headers)
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=30) as resp:
|
||||
data = json.loads(resp.read())
|
||||
return data
|
||||
except urllib.error.HTTPError as e:
|
||||
print(f"Error fetching releases for {repo}: HTTP {e.code}", file=sys.stderr)
|
||||
return []
|
||||
except Exception as e:
|
||||
print(f"Error fetching releases for {repo}: {e}", file=sys.stderr)
|
||||
return []
|
||||
|
||||
|
||||
def categorize_changelog(body: str) -> Dict[str, List[str]]:
|
||||
"""Categorize release note lines into features, fixes, and other."""
|
||||
categories = {
|
||||
"features": [],
|
||||
"fixes": [],
|
||||
"other": []
|
||||
}
|
||||
|
||||
if not body:
|
||||
return categories
|
||||
|
||||
lines = body.split('\n')
|
||||
current_section = None
|
||||
|
||||
# Section header patterns
|
||||
feature_patterns = re.compile(r'^(?:features?|new|add|enhancement)s?', re.IGNORECASE)
|
||||
fix_patterns = re.compile(r'^(?:fix(?:es|ed)?|bug|patch|correction)', re.IGNORECASE)
|
||||
|
||||
for line in lines:
|
||||
stripped = line.strip()
|
||||
if not stripped:
|
||||
continue
|
||||
|
||||
# Check for section headers (e.g., "### Features", "## Added")
|
||||
header_match = re.match(r'^#{1,3}\s+(.+)$', stripped)
|
||||
if header_match:
|
||||
header = header_match.group(1).lower()
|
||||
if feature_patterns.search(header):
|
||||
current_section = "features"
|
||||
elif fix_patterns.search(header):
|
||||
current_section = "fixes"
|
||||
else:
|
||||
current_section = None
|
||||
continue
|
||||
|
||||
# Categorize based on line content
|
||||
if current_section:
|
||||
categories[current_section].append(stripped)
|
||||
else:
|
||||
# Infer from keywords
|
||||
if re.search(r'^(?:added|new|feature|introdu)', stripped, re.IGNORECASE):
|
||||
categories["features"].append(stripped)
|
||||
elif re.search(r'^(?:fix|bug|patch|resolved)', stripped, re.IGNORECASE):
|
||||
categories["fixes"].append(stripped)
|
||||
else:
|
||||
categories["other"].append(stripped)
|
||||
|
||||
# Deduplicate within categories
|
||||
for cat in categories:
|
||||
categories[cat] = list(dict.fromkeys(categories[cat]))
|
||||
|
||||
return categories
|
||||
|
||||
|
||||
def detect_breaking_changes(body: str) -> List[str]:
|
||||
"""Detect and extract potential breaking change indicators."""
|
||||
breaking_indicators = []
|
||||
lines = body.split('\n')
|
||||
|
||||
# Keywords that suggest breaking changes
|
||||
breaking_keywords = re.compile(
|
||||
r'\b(?:BREAKING|breaking\s+change|backward\s+incompatible|'
|
||||
r'removed\s+.*?API|deprecated.*?removed|'
|
||||
r'major\s+version|'
|
||||
r'not\s+backward\s+compatible)\b',
|
||||
re.IGNORECASE
|
||||
)
|
||||
|
||||
for line in lines:
|
||||
if breaking_keywords.search(line):
|
||||
breaking_indicators.append(line.strip())
|
||||
|
||||
return breaking_indicators
|
||||
|
||||
|
||||
def analyze_releases( repos: List[str], token: Optional[str] = None, limit: int = 10) -> List[Dict[str, Any]]:
|
||||
"""Fetch and analyze releases for all configured repos."""
|
||||
all_releases = []
|
||||
|
||||
for repo in repos:
|
||||
repo = repo.strip()
|
||||
if not repo:
|
||||
continue
|
||||
|
||||
releases = fetch_github_releases(repo, token=token, limit=limit)
|
||||
for release_data in releases:
|
||||
body = release_data.get('body') or ""
|
||||
tag = release_data.get('tag_name', 'unknown')
|
||||
date = release_data.get('published_at', '')
|
||||
url = release_data.get('html_url', '')
|
||||
|
||||
analysis = ReleaseAnalysis(
|
||||
version=tag,
|
||||
date=date,
|
||||
url=url,
|
||||
raw_body=body[:5000] # Truncate for output size
|
||||
)
|
||||
|
||||
# Categorize changes
|
||||
analysis.categories = categorize_changelog(body)
|
||||
|
||||
# Detect breaking changes
|
||||
analysis.breaking_change_flags = detect_breaking_changes(body)
|
||||
|
||||
all_releases.append(analysis.to_dict())
|
||||
|
||||
return all_releases
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Analyze GitHub release notes for changes and breaking changes")
|
||||
parser.add_argument('--repos', required=True, help='Comma-separated list of GitHub repos (owner/repo)')
|
||||
parser.add_argument('--token', help='GitHub API token (or set GITHUB_TOKEN env var)')
|
||||
parser.add_argument('--limit', type=int, default=10, help='Max releases per repo (default: 10)')
|
||||
parser.add_argument('--output', help='Write JSON output to file (default: stdout)')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
repos = [r.strip() for r in args.repos.split(',')]
|
||||
token = args.token or os.environ.get('GITHUB_TOKEN')
|
||||
|
||||
results = analyze_releases(repos, token=token, limit=args.limit)
|
||||
|
||||
output = {
|
||||
"generated_at": datetime.now(timezone.utc).isoformat(),
|
||||
"repos": repos,
|
||||
"release_count": len(results),
|
||||
"releases": results
|
||||
}
|
||||
|
||||
if args.output:
|
||||
with open(args.output, 'w') as f:
|
||||
json.dump(output, f, indent=2)
|
||||
print(f"Wrote {len(results)} releases to {args.output}")
|
||||
else:
|
||||
print(json.dumps(output, indent=2))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -1,105 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Tests for graph_visualizer.py — smoke test + subgraph logic.
|
||||
Run: python3 scripts/test_graph_visualizer.py
|
||||
"""
|
||||
|
||||
import json, sys, tempfile
|
||||
from pathlib import Path
|
||||
sys.path.insert(0, str(Path(__file__).resolve().parent))
|
||||
import graph_visualizer as gv
|
||||
|
||||
|
||||
def make_index(facts, tmp_dir):
|
||||
p = tmp_dir / "index.json"
|
||||
p.write_text(json.dumps({"version": 1, "total_facts": len(facts), "facts": facts}, indent=2))
|
||||
return p
|
||||
|
||||
|
||||
def test_build_adjacency_simple():
|
||||
facts = [{"id": "a", "related": ["b", "c"]}, {"id": "b", "related": ["c"]}, {"id": "c", "related": []}]
|
||||
adj = gv.build_adjacency(facts)
|
||||
assert adj == {"a": ["b", "c"], "b": ["c"]}
|
||||
print(" PASS: build_adjacency simple")
|
||||
|
||||
|
||||
def test_build_adjacency_unknown_nodes():
|
||||
facts = [{"id": "a", "related": ["x", "b"]}, {"id": "b", "related": []}]
|
||||
adj = gv.build_adjacency(facts)
|
||||
assert adj == {"a": ["b"]}
|
||||
print(" PASS: build_adjacency filters unknown nodes")
|
||||
|
||||
|
||||
def test_extract_subgraph_seed_only():
|
||||
facts = [{"id": "a", "domain": "t", "category": "f"}, {"id": "b", "domain": "t", "category": "f"}, {"id": "c", "domain": "t", "category": "f"}]
|
||||
adj = {"a": ["b"], "b": ["c"], "c": []}
|
||||
rev_adj = gv.build_reverse_adjacency(adj)
|
||||
sub = gv.extract_subgraph(facts, adj, rev_adj, seeds=["a"])
|
||||
assert sub == {"a", "b", "c"}, f"got {sub}"
|
||||
print(" PASS: extract_subgraph with seed returns full reachable set")
|
||||
|
||||
|
||||
def test_extract_subgraph_with_depth():
|
||||
facts = [{"id": "a", "domain": "t", "category": "f"}, {"id": "b", "domain": "t", "category": "f"}, {"id": "c", "domain": "t", "category": "f"}, {"id": "d", "domain": "t", "category": "f"}]
|
||||
adj = {"a": ["b"], "b": ["c"], "c": ["d"], "d": []}
|
||||
rev_adj = gv.build_reverse_adjacency(adj)
|
||||
sub = gv.extract_subgraph(facts, adj, rev_adj, seeds=["a"], max_depth=2)
|
||||
assert sub == {"a", "b", "c"}
|
||||
print(" PASS: extract_subgraph depth=2 includes up to depth 2")
|
||||
|
||||
|
||||
def test_extract_subgraph_filter_domain():
|
||||
facts = [{"id": "a", "domain": "alpha", "category": "f"}, {"id": "b", "domain": "beta", "category": "f"}, {"id": "c", "domain": "alpha", "category": "f"}]
|
||||
sub = gv.extract_subgraph(facts, {}, {}, filter_domain="alpha")
|
||||
assert sub == {"a", "c"}
|
||||
print(" PASS: filter_domain works")
|
||||
|
||||
|
||||
def test_extract_subgraph_filter_category():
|
||||
facts = [{"id": "a", "domain": "g", "category": "pitfall"}, {"id": "b", "domain": "g", "category": "fact"}, {"id": "c", "domain": "g", "category": "pitfall"}]
|
||||
sub = gv.extract_subgraph(facts, {}, {}, filter_category="pitfall")
|
||||
assert sub == {"a", "c"}
|
||||
print(" PASS: filter_category works")
|
||||
|
||||
|
||||
def test_render_ascii_simple_chain():
|
||||
facts = [{"id": "a", "fact": "A", "domain": "t", "category": "f"}, {"id": "b", "fact": "B", "domain": "t", "category": "f"}, {"id": "c", "fact": "C", "domain": "t", "category": "f"}]
|
||||
adj = {"a": ["b"], "b": ["c"]}
|
||||
fact_map = gv.build_fact_map(facts)
|
||||
out = gv.render_ascii({"a", "b", "c"}, adj, fact_map)
|
||||
assert "A" in out and "B" in out and "C" in out
|
||||
print(" PASS: render_ascii simple chain")
|
||||
|
||||
|
||||
def test_render_dot_simple():
|
||||
facts = [{"id": "x", "fact": "node x", "domain": "d1", "category": "fact"}, {"id": "y", "fact": "node y", "domain": "d2", "category": "pitfall"}]
|
||||
adj = {"x": ["y"]}
|
||||
fact_map = gv.build_fact_map(facts)
|
||||
out = gv.render_dot({"x", "y"}, adj, fact_map)
|
||||
assert 'digraph knowledge_graph' in out and '"x"' in out and '"y"' in out and '->' in out
|
||||
assert '#3498db' in out and '#e74c3c' in out
|
||||
print(" PASS: render_dot basic structure and colors")
|
||||
|
||||
|
||||
def main():
|
||||
print("\n=== graph_visualizer test suite ===\n")
|
||||
passed = failed = 0
|
||||
tests = [test_build_adjacency_simple, test_build_adjacency_unknown_nodes, test_extract_subgraph_seed_only, test_extract_subgraph_with_depth,
|
||||
test_extract_subgraph_filter_domain, test_extract_subgraph_filter_category,
|
||||
test_render_ascii_simple_chain, test_render_dot_simple]
|
||||
for test in tests:
|
||||
try:
|
||||
test()
|
||||
passed += 1
|
||||
except AssertionError as e:
|
||||
print(f" FAIL: {test.__name__} — {e}")
|
||||
failed += 1
|
||||
except Exception as e:
|
||||
print(f" ERROR: {test.__name__} — {e}")
|
||||
failed += 1
|
||||
print(f"\n=== Results: {passed}/{passed+failed} passed, {failed} failed ===")
|
||||
return failed == 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(0 if main() else 1)
|
||||
107
tests/test_release_note_analyzer.py
Normal file
107
tests/test_release_note_analyzer.py
Normal file
@@ -0,0 +1,107 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for scripts/release_note_analyzer.py"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or ".", ".."))
|
||||
import importlib.util
|
||||
spec = importlib.util.spec_from_file_location(
|
||||
"release_note_analyzer",
|
||||
os.path.join(os.path.dirname(__file__) or ".", "..", "scripts", "release_note_analyzer.py")
|
||||
)
|
||||
mod = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(mod)
|
||||
|
||||
categorize_changelog = mod.categorize_changelog
|
||||
detect_breaking_changes = mod.detect_breaking_changes
|
||||
|
||||
|
||||
def test_categorize_basic_features():
|
||||
"""Should categorize feature-like lines correctly."""
|
||||
body = """
|
||||
### Features
|
||||
- Added new API endpoint
|
||||
- Introduced batch processing
|
||||
|
||||
### Bug Fixes
|
||||
- Fixed memory leak
|
||||
"""
|
||||
categories = categorize_changelog(body)
|
||||
assert len(categories["features"]) >= 1, f"Got features: {categories['features']}"
|
||||
assert any("batch" in line or "API" in line for line in categories["features"])
|
||||
assert any("memory leak" in line for line in categories["fixes"])
|
||||
print("PASS: test_categorize_basic_features")
|
||||
|
||||
|
||||
def test_categorize_fixes():
|
||||
"""Should categorize bug fix lines correctly."""
|
||||
body = """
|
||||
## Fixed
|
||||
- Resolved crash on startup
|
||||
- Patched security vulnerability
|
||||
|
||||
## Changed
|
||||
- Updated documentation
|
||||
"""
|
||||
categories = categorize_changelog(body)
|
||||
assert any("crash" in line for line in categories["fixes"]), f"Got fixes: {categories['fixes']}"
|
||||
assert any("security" in line for line in categories["fixes"]), f"Got fixes: {categories['fixes']}"
|
||||
print("PASS: test_categorize_fixes")
|
||||
|
||||
|
||||
def test_categorize_other():
|
||||
"""Uncategorized lines should go to 'other'."""
|
||||
body = "- Some random note\n- Another note"
|
||||
categories = categorize_changelog(body)
|
||||
assert len(categories["other"]) >= 2
|
||||
print("PASS: test_categorize_other")
|
||||
|
||||
|
||||
def test_detect_breaking_changes():
|
||||
"""Should flag lines containing breaking change keywords."""
|
||||
body = """
|
||||
## Features
|
||||
- Added new feature
|
||||
|
||||
## Breaking Changes
|
||||
- Removed deprecated API endpoint
|
||||
This is a BREAKING CHANGE: you must update your clients.
|
||||
|
||||
We also removed support for Python 3.8.
|
||||
"""
|
||||
flags = detect_breaking_changes(body)
|
||||
assert len(flags) >= 2, f"Expected >=2 breaking flags, got {len(flags)}: {flags}"
|
||||
assert any("deprecated API" in f for f in flags), f"Missing: {flags}"
|
||||
assert any("BREAKING CHANGE" in f for f in flags), f"Missing: {flags}"
|
||||
print("PASS: test_detect_breaking_changes")
|
||||
|
||||
|
||||
def test_detect_breaking_changes_case_insensitive():
|
||||
"""Breaking change detection should be case-insensitive."""
|
||||
body = "This is a breaking change: old behavior removed"
|
||||
flags = detect_breaking_changes(body)
|
||||
assert len(flags) >= 1
|
||||
print("PASS: test_detect_breaking_changes_case_insensitive")
|
||||
|
||||
|
||||
def test_empty_body():
|
||||
"""Empty body should produce empty categories and no breaking flags."""
|
||||
body = ""
|
||||
categories = categorize_changelog(body)
|
||||
assert categories["features"] == []
|
||||
assert categories["fixes"] == []
|
||||
assert detect_breaking_changes(body) == []
|
||||
print("PASS: test_empty_body")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_categorize_basic_features()
|
||||
test_categorize_fixes()
|
||||
test_categorize_other()
|
||||
test_detect_breaking_changes()
|
||||
test_detect_breaking_changes_case_insensitive()
|
||||
test_empty_body()
|
||||
print("\nAll release_note_analyzer tests passed.")
|
||||
Reference in New Issue
Block a user