Compare commits
1 Commits
step35/158
...
step35/137
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b823d4e308 |
203
scripts/release_note_analyzer.py
Executable file
203
scripts/release_note_analyzer.py
Executable file
@@ -0,0 +1,203 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Release Note Analyzer — Monitor dependency releases and extract structured insights.
|
||||
|
||||
Fetches GitHub releases for configured repositories, parses changelogs,
|
||||
categorizes changes, and flags breaking changes.
|
||||
|
||||
Usage:
|
||||
python3 scripts/release_note_analyzer.py --repos owner/repo1,owner/repo2
|
||||
python3 scripts/release_note_analyzer.py --repos numpy/numpy --limit 5
|
||||
python3 scripts/release_note_analyzer.py --repos owner/repo --output metrics/releases.json
|
||||
python3 scripts/release_note_analyzer.py --repos owner/repo --token $GITHUB_TOKEN
|
||||
|
||||
Output:
|
||||
JSON with per-release structure: version, date, url, categories (features, fixes, breaking), raw_body
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from typing import Dict, List, Any, Optional
|
||||
from dataclasses import dataclass, field, asdict
|
||||
import os
|
||||
|
||||
|
||||
@dataclass
|
||||
class ReleaseAnalysis:
|
||||
version: str
|
||||
date: str
|
||||
url: str
|
||||
categories: Dict[str, List[str]] = field(default_factory=dict)
|
||||
breaking_change_flags: List[str] = field(default_factory=list)
|
||||
raw_body: str = ""
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return asdict(self)
|
||||
|
||||
|
||||
def fetch_github_releases(repo: str, token: Optional[str] = None, limit: int = 10) -> List[Dict[str, Any]]:
|
||||
"""Fetch latest releases from GitHub API."""
|
||||
import urllib.request
|
||||
import urllib.error
|
||||
|
||||
url = f"https://api.github.com/repos/{repo}/releases?per_page={limit}"
|
||||
headers = {"Accept": "application/vnd.github.v3+json"}
|
||||
if token:
|
||||
headers["Authorization"] = f"token {token}"
|
||||
|
||||
req = urllib.request.Request(url, headers=headers)
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=30) as resp:
|
||||
data = json.loads(resp.read())
|
||||
return data
|
||||
except urllib.error.HTTPError as e:
|
||||
print(f"Error fetching releases for {repo}: HTTP {e.code}", file=sys.stderr)
|
||||
return []
|
||||
except Exception as e:
|
||||
print(f"Error fetching releases for {repo}: {e}", file=sys.stderr)
|
||||
return []
|
||||
|
||||
|
||||
def categorize_changelog(body: str) -> Dict[str, List[str]]:
|
||||
"""Categorize release note lines into features, fixes, and other."""
|
||||
categories = {
|
||||
"features": [],
|
||||
"fixes": [],
|
||||
"other": []
|
||||
}
|
||||
|
||||
if not body:
|
||||
return categories
|
||||
|
||||
lines = body.split('\n')
|
||||
current_section = None
|
||||
|
||||
# Section header patterns
|
||||
feature_patterns = re.compile(r'^(?:features?|new|add|enhancement)s?', re.IGNORECASE)
|
||||
fix_patterns = re.compile(r'^(?:fix(?:es|ed)?|bug|patch|correction)', re.IGNORECASE)
|
||||
|
||||
for line in lines:
|
||||
stripped = line.strip()
|
||||
if not stripped:
|
||||
continue
|
||||
|
||||
# Check for section headers (e.g., "### Features", "## Added")
|
||||
header_match = re.match(r'^#{1,3}\s+(.+)$', stripped)
|
||||
if header_match:
|
||||
header = header_match.group(1).lower()
|
||||
if feature_patterns.search(header):
|
||||
current_section = "features"
|
||||
elif fix_patterns.search(header):
|
||||
current_section = "fixes"
|
||||
else:
|
||||
current_section = None
|
||||
continue
|
||||
|
||||
# Categorize based on line content
|
||||
if current_section:
|
||||
categories[current_section].append(stripped)
|
||||
else:
|
||||
# Infer from keywords
|
||||
if re.search(r'^(?:added|new|feature|introdu)', stripped, re.IGNORECASE):
|
||||
categories["features"].append(stripped)
|
||||
elif re.search(r'^(?:fix|bug|patch|resolved)', stripped, re.IGNORECASE):
|
||||
categories["fixes"].append(stripped)
|
||||
else:
|
||||
categories["other"].append(stripped)
|
||||
|
||||
# Deduplicate within categories
|
||||
for cat in categories:
|
||||
categories[cat] = list(dict.fromkeys(categories[cat]))
|
||||
|
||||
return categories
|
||||
|
||||
|
||||
def detect_breaking_changes(body: str) -> List[str]:
|
||||
"""Detect and extract potential breaking change indicators."""
|
||||
breaking_indicators = []
|
||||
lines = body.split('\n')
|
||||
|
||||
# Keywords that suggest breaking changes
|
||||
breaking_keywords = re.compile(
|
||||
r'\b(?:BREAKING|breaking\s+change|backward\s+incompatible|'
|
||||
r'removed\s+.*?API|deprecated.*?removed|'
|
||||
r'major\s+version|'
|
||||
r'not\s+backward\s+compatible)\b',
|
||||
re.IGNORECASE
|
||||
)
|
||||
|
||||
for line in lines:
|
||||
if breaking_keywords.search(line):
|
||||
breaking_indicators.append(line.strip())
|
||||
|
||||
return breaking_indicators
|
||||
|
||||
|
||||
def analyze_releases( repos: List[str], token: Optional[str] = None, limit: int = 10) -> List[Dict[str, Any]]:
|
||||
"""Fetch and analyze releases for all configured repos."""
|
||||
all_releases = []
|
||||
|
||||
for repo in repos:
|
||||
repo = repo.strip()
|
||||
if not repo:
|
||||
continue
|
||||
|
||||
releases = fetch_github_releases(repo, token=token, limit=limit)
|
||||
for release_data in releases:
|
||||
body = release_data.get('body') or ""
|
||||
tag = release_data.get('tag_name', 'unknown')
|
||||
date = release_data.get('published_at', '')
|
||||
url = release_data.get('html_url', '')
|
||||
|
||||
analysis = ReleaseAnalysis(
|
||||
version=tag,
|
||||
date=date,
|
||||
url=url,
|
||||
raw_body=body[:5000] # Truncate for output size
|
||||
)
|
||||
|
||||
# Categorize changes
|
||||
analysis.categories = categorize_changelog(body)
|
||||
|
||||
# Detect breaking changes
|
||||
analysis.breaking_change_flags = detect_breaking_changes(body)
|
||||
|
||||
all_releases.append(analysis.to_dict())
|
||||
|
||||
return all_releases
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Analyze GitHub release notes for changes and breaking changes")
|
||||
parser.add_argument('--repos', required=True, help='Comma-separated list of GitHub repos (owner/repo)')
|
||||
parser.add_argument('--token', help='GitHub API token (or set GITHUB_TOKEN env var)')
|
||||
parser.add_argument('--limit', type=int, default=10, help='Max releases per repo (default: 10)')
|
||||
parser.add_argument('--output', help='Write JSON output to file (default: stdout)')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
repos = [r.strip() for r in args.repos.split(',')]
|
||||
token = args.token or os.environ.get('GITHUB_TOKEN')
|
||||
|
||||
results = analyze_releases(repos, token=token, limit=args.limit)
|
||||
|
||||
output = {
|
||||
"generated_at": datetime.now(timezone.utc).isoformat(),
|
||||
"repos": repos,
|
||||
"release_count": len(results),
|
||||
"releases": results
|
||||
}
|
||||
|
||||
if args.output:
|
||||
with open(args.output, 'w') as f:
|
||||
json.dump(output, f, indent=2)
|
||||
print(f"Wrote {len(results)} releases to {args.output}")
|
||||
else:
|
||||
print(json.dumps(output, indent=2))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -1,174 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
security_linter.py — Scan code for security vulnerabilities.
|
||||
|
||||
Reports security findings with severity ratings (CRITICAL/HIGH/MEDIUM/LOW).
|
||||
Outputs a JSON security lint report.
|
||||
|
||||
Usage:
|
||||
python3 security_linter.py --path .
|
||||
python3 security_linter.py --path . --output security_report.json
|
||||
python3 security_linter.py --path . --format json # default
|
||||
python3 security_linter.py --path . --format markdown
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Any, Optional
|
||||
|
||||
|
||||
SEVERITY_CRITICAL = "CRITICAL"
|
||||
SEVERITY_HIGH = "HIGH"
|
||||
SEVERITY_MEDIUM = "MEDIUM"
|
||||
SEVERITY_LOW = "LOW"
|
||||
|
||||
|
||||
class SecurityFinding:
|
||||
"""Represents a security finding."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
file: str,
|
||||
line: int,
|
||||
issue: str,
|
||||
severity: str,
|
||||
cwe: Optional[str] = None,
|
||||
recommendation: Optional[str] = None,
|
||||
):
|
||||
self.file = file
|
||||
self.line = line
|
||||
self.issue = issue
|
||||
self.severity = severity
|
||||
self.cwe = cwe
|
||||
self.recommendation = recommendation
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"file": self.file,
|
||||
"line": self.line,
|
||||
"issue": self.issue,
|
||||
"severity": self.severity,
|
||||
"cwe": self.cwe,
|
||||
"recommendation": self.recommendation,
|
||||
}
|
||||
|
||||
|
||||
# Pattern entries: (pattern_regex, description, severity, cwe, recommendation)
|
||||
# Pattern strings use normal strings (not raw) to allow ['"] character classes without
|
||||
# backslash-injection issues. \s and \b are escaped to give \s and \b in the actual regex.
|
||||
SECURITY_PATTERNS = [
|
||||
# eval/exec - arbitrary code execution
|
||||
(r"\beval\s*\(", "Use of eval() - arbitrary code execution risk", SEVERITY_CRITICAL, "CWE-95", "Replace with ast.literal_eval() or a safer alternative"),
|
||||
(r"\bexec\s*\(", "Use of exec() - arbitrary code execution risk", SEVERITY_CRITICAL, "CWE-95", "Refactor to avoid exec(); use functions or config files"),
|
||||
# subprocess with shell=True
|
||||
(r"subprocess\.(?:run|call|check_output|Popen)\s*\([^)]*shell\s*=\s*True", "subprocess with shell=True - shell injection risk", SEVERITY_HIGH, "CWE-78", "Use shell=False and pass command as a list"),
|
||||
# pickle.loads - arbitrary code execution
|
||||
(r"pickle\.loads?\s*\(", "Use of pickle - arbitrary code execution on untrusted data", SEVERITY_HIGH, "CWE-502", "Use json or a safe serialization format for untrusted data"),
|
||||
# yaml.load without Loader
|
||||
(r"yaml\.load\s*\(", "yaml.load() - unsafe deserialization", SEVERITY_HIGH, "CWE-502", "Use yaml.safe_load()"),
|
||||
# tempfile.mktemp - insecure temp file creation
|
||||
(r"tempfile\.mktemp\s*\(", "tempfile.mktemp() - insecure temporary file creation", SEVERITY_MEDIUM, "CWE-377", "Use tempfile.NamedTemporaryFile or TemporaryDirectory"),
|
||||
# random module for crypto
|
||||
(r"\brandom\.(?:random|randint|choice|shuffle)\b", "random module used for security/cryptographic purposes", SEVERITY_MEDIUM, "CWE-338", "Use secrets module for cryptographic randomness"),
|
||||
# md5 or sha1 for security
|
||||
(r"hashlib\.(?:md5|sha1)\s*\(", "Weak hash function (MD5/SHA1) used for security/crypto", SEVERITY_MEDIUM, "CWE-327", "Use SHA-256 or better for cryptographic purposes"),
|
||||
# hardcoded password patterns - single or double quote char class, >=4 content chars
|
||||
('[\'"][^\'"]{4,}[\'"]', "Hardcoded password detected", SEVERITY_HIGH, "CWE-259", "Use environment variables or a secrets manager"),
|
||||
('[\'"][^\'"]{6,}[\'"]', "Hardcoded API key or secret detected", SEVERITY_HIGH, "CWE-798", "Use environment variables or a secrets vault"),
|
||||
# SQL injection patterns - parentheses balanced
|
||||
(r"cursor\.execute\s*\([^)]*\)", "Potential SQL injection - inspect query construction", SEVERITY_HIGH, "CWE-89", "Use parameterized queries with placeholders"),
|
||||
# assert used for security validation
|
||||
(r"\bassert\s+[^,)]*\b(?:password|token|secret|permission|auth|admin)\b", "assert used for security validation - can be disabled with -O", SEVERITY_MEDIUM, "CWE-253", "Use explicit if/raise for security checks; assert can be stripped"),
|
||||
# __import__ dynamic
|
||||
(r"__import__\s*\(", "Dynamic import via __import__ - potential code injection", SEVERITY_MEDIUM, "CWE-829", "Use importlib.import_module with validated module names"),
|
||||
]
|
||||
|
||||
|
||||
def scan_file(path: Path) -> List[SecurityFinding]:
|
||||
findings = []
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8", errors="ignore") as f:
|
||||
lines = f.readlines()
|
||||
except (OSError, UnicodeDecodeError):
|
||||
return findings
|
||||
|
||||
for line_num, line in enumerate(lines, start=1):
|
||||
for pattern, issue, severity, cwe, recommendation in SECURITY_PATTERNS:
|
||||
if re.search(pattern, line):
|
||||
findings.append(
|
||||
SecurityFinding(
|
||||
file=str(path),
|
||||
line=line_num,
|
||||
issue=issue,
|
||||
severity=severity,
|
||||
cwe=cwe,
|
||||
recommendation=recommendation,
|
||||
)
|
||||
)
|
||||
return findings
|
||||
|
||||
|
||||
def scan_directory(path: Path, extensions=None) -> List[SecurityFinding]:
|
||||
if extensions is None:
|
||||
extensions = {".py"}
|
||||
findings = []
|
||||
if not path.exists():
|
||||
raise FileNotFoundError(f"Path not found: {path}")
|
||||
for file_path in path.rglob("*"):
|
||||
if file_path.is_file() and file_path.suffix in extensions:
|
||||
findings.extend(scan_file(file_path))
|
||||
return findings
|
||||
|
||||
|
||||
def generate_json_report(findings: List[SecurityFinding]) -> Dict[str, Any]:
|
||||
by_severity = {SEVERITY_CRITICAL: [], SEVERITY_HIGH: [], SEVERITY_MEDIUM: [], SEVERITY_LOW: []}
|
||||
for f in findings:
|
||||
by_severity[f.severity].append(f.to_dict())
|
||||
severity_counts = {s: len(v) for s, v in by_severity.items()}
|
||||
total = sum(severity_counts.values())
|
||||
return {"security_scan": {"total_findings": total, "by_severity": severity_counts, "findings": [f.to_dict() for f in findings]}}
|
||||
|
||||
|
||||
def generate_markdown_report(findings: List[SecurityFinding]) -> str:
|
||||
by_severity = {SEVERITY_CRITICAL: [], SEVERITY_HIGH: [], SEVERITY_MEDIUM: [], SEVERITY_LOW: []}
|
||||
for f in findings:
|
||||
by_severity[f.severity].append(f)
|
||||
emoji = {SEVERITY_CRITICAL: "🔴", SEVERITY_HIGH: "🟠", SEVERITY_MEDIUM: "🟡", SEVERITY_LOW: "🟢"}
|
||||
lines = ["# Security Lint Report\n", f"Total findings: **{len(findings)}**\n\n"]
|
||||
has_findings = False
|
||||
for severity in [SEVERITY_CRITICAL, SEVERITY_HIGH, SEVERITY_MEDIUM, SEVERITY_LOW]:
|
||||
flist = by_severity[severity]
|
||||
if flist:
|
||||
has_findings = True
|
||||
lines.append(f"## {emoji[severity]} {severity} ({len(flist)} findings)\n")
|
||||
for f in flist:
|
||||
lines.append(f"- **{f.file}:{f.line}** — {f.issue}")
|
||||
lines.append("")
|
||||
if not has_findings:
|
||||
lines.append("✅ No security issues found.\n")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Scan code for security vulnerabilities")
|
||||
parser.add_argument("--path", type=Path, default=Path("."), help="Path to scan (file or directory)")
|
||||
parser.add_argument("--output", "-o", type=Path, default=None, help="Output file")
|
||||
parser.add_argument("--format", choices=["json", "markdown"], default="json", help="Output format (default: json)")
|
||||
parser.add_argument("--extensions", type=str, default=".py", help="Comma-separated file extensions (default: .py)")
|
||||
args = parser.parse_args()
|
||||
exts = {e.strip() for e in args.extensions.split(",")}
|
||||
findings = scan_directory(args.path, extensions=exts)
|
||||
output = json.dumps(generate_json_report(findings), indent=2) if args.format == "json" else generate_markdown_report(findings)
|
||||
if args.output:
|
||||
args.output.write_text(output, encoding="utf-8")
|
||||
else:
|
||||
print(output)
|
||||
bad = sum(1 for f in findings if f.severity in (SEVERITY_CRITICAL, SEVERITY_HIGH))
|
||||
sys.exit(1 if bad > 0 else 0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,95 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for scripts/security_linter.py — Issue #158: 9.4 Security Linter."""
|
||||
|
||||
import sys
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent / "scripts"))
|
||||
|
||||
from security_linter import (
|
||||
scan_file,
|
||||
scan_directory,
|
||||
generate_json_report,
|
||||
generate_markdown_report,
|
||||
SEVERITY_CRITICAL,
|
||||
SEVERITY_HIGH,
|
||||
SEVERITY_MEDIUM,
|
||||
SEVERITY_LOW,
|
||||
)
|
||||
|
||||
|
||||
def test_scan_file_detects_eval():
|
||||
with tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False) as f:
|
||||
f.write("result = eval(user_input)\n")
|
||||
f.flush()
|
||||
findings = scan_file(Path(f.name))
|
||||
assert len(findings) >= 1
|
||||
assert findings[0].severity == SEVERITY_CRITICAL
|
||||
assert "eval" in findings[0].issue.lower()
|
||||
|
||||
|
||||
def test_scan_file_detects_hardcoded_password():
|
||||
with tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False) as f:
|
||||
f.write("password = 'supersecret123'\n")
|
||||
f.flush()
|
||||
findings = scan_file(Path(f.name))
|
||||
assert any(f.severity == SEVERITY_HIGH for f in findings)
|
||||
|
||||
|
||||
def test_scan_file_detects_subprocess_shell_true():
|
||||
with tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False) as f:
|
||||
f.write("subprocess.run(cmd, shell=True)\n")
|
||||
f.flush()
|
||||
findings = scan_file(Path(f.name))
|
||||
assert any(f.severity == SEVERITY_HIGH and "shell" in f.issue.lower() for f in findings)
|
||||
|
||||
|
||||
def test_scan_file_detects_pickle():
|
||||
with tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False) as f:
|
||||
f.write("data = pickle.loads(raw)\n")
|
||||
f.flush()
|
||||
findings = scan_file(Path(f.name))
|
||||
assert any(f.severity == SEVERITY_HIGH and "pickle" in f.issue.lower() for f in findings)
|
||||
|
||||
|
||||
def test_scan_file_detects_yaml_load():
|
||||
with tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False) as f:
|
||||
f.write("config = yaml.load(stream)\n")
|
||||
f.flush()
|
||||
findings = scan_file(Path(f.name))
|
||||
assert any("yaml.load" in f.issue.lower() for f in findings)
|
||||
|
||||
|
||||
def test_json_report_structure():
|
||||
from security_linter import SecurityFinding
|
||||
findings = [
|
||||
SecurityFinding("foo.py", 1, "eval() used", SEVERITY_CRITICAL, "CWE-95", "Use ast.literal_eval"),
|
||||
SecurityFinding("bar.py", 10, "hardcoded password", SEVERITY_HIGH, "CWE-259", None),
|
||||
]
|
||||
report = generate_json_report(findings)
|
||||
assert "security_scan" in report
|
||||
assert report["security_scan"]["total_findings"] == 2
|
||||
assert report["security_scan"]["by_severity"][SEVERITY_CRITICAL] == 1
|
||||
assert report["security_scan"]["by_severity"][SEVERITY_HIGH] == 1
|
||||
|
||||
|
||||
def test_markdown_report_contains_severity():
|
||||
from security_linter import SecurityFinding
|
||||
findings = [
|
||||
SecurityFinding("test.py", 1, "eval() used", SEVERITY_CRITICAL, "CWE-95", "Use ast.literal_eval"),
|
||||
]
|
||||
md = generate_markdown_report(findings)
|
||||
assert "CRITICAL" in md or "🔴" in md
|
||||
assert "eval() used" in md
|
||||
assert "CWE-95" in md
|
||||
|
||||
|
||||
def test_scan_directory_empty_dir():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
findings = scan_directory(Path(tmpdir))
|
||||
assert findings == []
|
||||
|
||||
|
||||
def test_scan_file_no_issues():
|
||||
safe_code =
|
||||
107
tests/test_release_note_analyzer.py
Normal file
107
tests/test_release_note_analyzer.py
Normal file
@@ -0,0 +1,107 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for scripts/release_note_analyzer.py"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__) or ".", ".."))
|
||||
import importlib.util
|
||||
spec = importlib.util.spec_from_file_location(
|
||||
"release_note_analyzer",
|
||||
os.path.join(os.path.dirname(__file__) or ".", "..", "scripts", "release_note_analyzer.py")
|
||||
)
|
||||
mod = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(mod)
|
||||
|
||||
categorize_changelog = mod.categorize_changelog
|
||||
detect_breaking_changes = mod.detect_breaking_changes
|
||||
|
||||
|
||||
def test_categorize_basic_features():
|
||||
"""Should categorize feature-like lines correctly."""
|
||||
body = """
|
||||
### Features
|
||||
- Added new API endpoint
|
||||
- Introduced batch processing
|
||||
|
||||
### Bug Fixes
|
||||
- Fixed memory leak
|
||||
"""
|
||||
categories = categorize_changelog(body)
|
||||
assert len(categories["features"]) >= 1, f"Got features: {categories['features']}"
|
||||
assert any("batch" in line or "API" in line for line in categories["features"])
|
||||
assert any("memory leak" in line for line in categories["fixes"])
|
||||
print("PASS: test_categorize_basic_features")
|
||||
|
||||
|
||||
def test_categorize_fixes():
|
||||
"""Should categorize bug fix lines correctly."""
|
||||
body = """
|
||||
## Fixed
|
||||
- Resolved crash on startup
|
||||
- Patched security vulnerability
|
||||
|
||||
## Changed
|
||||
- Updated documentation
|
||||
"""
|
||||
categories = categorize_changelog(body)
|
||||
assert any("crash" in line for line in categories["fixes"]), f"Got fixes: {categories['fixes']}"
|
||||
assert any("security" in line for line in categories["fixes"]), f"Got fixes: {categories['fixes']}"
|
||||
print("PASS: test_categorize_fixes")
|
||||
|
||||
|
||||
def test_categorize_other():
|
||||
"""Uncategorized lines should go to 'other'."""
|
||||
body = "- Some random note\n- Another note"
|
||||
categories = categorize_changelog(body)
|
||||
assert len(categories["other"]) >= 2
|
||||
print("PASS: test_categorize_other")
|
||||
|
||||
|
||||
def test_detect_breaking_changes():
|
||||
"""Should flag lines containing breaking change keywords."""
|
||||
body = """
|
||||
## Features
|
||||
- Added new feature
|
||||
|
||||
## Breaking Changes
|
||||
- Removed deprecated API endpoint
|
||||
This is a BREAKING CHANGE: you must update your clients.
|
||||
|
||||
We also removed support for Python 3.8.
|
||||
"""
|
||||
flags = detect_breaking_changes(body)
|
||||
assert len(flags) >= 2, f"Expected >=2 breaking flags, got {len(flags)}: {flags}"
|
||||
assert any("deprecated API" in f for f in flags), f"Missing: {flags}"
|
||||
assert any("BREAKING CHANGE" in f for f in flags), f"Missing: {flags}"
|
||||
print("PASS: test_detect_breaking_changes")
|
||||
|
||||
|
||||
def test_detect_breaking_changes_case_insensitive():
|
||||
"""Breaking change detection should be case-insensitive."""
|
||||
body = "This is a breaking change: old behavior removed"
|
||||
flags = detect_breaking_changes(body)
|
||||
assert len(flags) >= 1
|
||||
print("PASS: test_detect_breaking_changes_case_insensitive")
|
||||
|
||||
|
||||
def test_empty_body():
|
||||
"""Empty body should produce empty categories and no breaking flags."""
|
||||
body = ""
|
||||
categories = categorize_changelog(body)
|
||||
assert categories["features"] == []
|
||||
assert categories["fixes"] == []
|
||||
assert detect_breaking_changes(body) == []
|
||||
print("PASS: test_empty_body")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_categorize_basic_features()
|
||||
test_categorize_fixes()
|
||||
test_categorize_other()
|
||||
test_detect_breaking_changes()
|
||||
test_detect_breaking_changes_case_insensitive()
|
||||
test_empty_body()
|
||||
print("\nAll release_note_analyzer tests passed.")
|
||||
Reference in New Issue
Block a user