Compare commits
1 Commits
step35/112
...
step35/103
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ceb7e0bd0c |
@@ -1,112 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Dependency Bloat Detector — find declared packages never imported
|
||||
|
||||
Usage:
|
||||
python3 scripts/dependency_bloat_detector.py
|
||||
python3 scripts/dependency_bloat_detector.py --output json
|
||||
"""
|
||||
|
||||
import ast
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Set, List, Tuple
|
||||
|
||||
|
||||
def extract_imports_from_py_files(repo_path: Path) -> Set[str]:
|
||||
"""Walk the repo and return the set of top-level imported module names."""
|
||||
imports = set()
|
||||
exclude_dirs = {".git", "venv", ".venv", "__pycache__", "node_modules",
|
||||
"dist", "build", ".tox", "vendor"}
|
||||
py_files = [
|
||||
f for f in repo_path.rglob("*.py")
|
||||
if not any(part in exclude_dirs for part in f.parts)
|
||||
]
|
||||
for fpath in py_files:
|
||||
try:
|
||||
content = fpath.read_text(errors="ignore")
|
||||
tree = ast.parse(content)
|
||||
except Exception:
|
||||
continue
|
||||
for node in ast.walk(tree):
|
||||
if isinstance(node, ast.Import):
|
||||
for alias in node.names:
|
||||
top = alias.name.split('.')[0]
|
||||
imports.add(top)
|
||||
elif isinstance(node, ast.ImportFrom):
|
||||
if node.module:
|
||||
top = node.module.split('.')[0]
|
||||
imports.add(top)
|
||||
return imports
|
||||
|
||||
|
||||
def parse_requirements_txt(req_path: Path) -> List[Tuple[str, str]]:
|
||||
"""
|
||||
Parse requirements.txt and return list of (package_name, raw_line).
|
||||
Strips version specifiers and ignores comments.
|
||||
"""
|
||||
if not req_path.exists():
|
||||
return []
|
||||
declared = []
|
||||
for line in req_path.read_text().splitlines():
|
||||
line = line.strip()
|
||||
if not line or line.startswith('#'):
|
||||
continue
|
||||
# Strip inline comments
|
||||
line = line.split('#')[0].strip()
|
||||
# Extract package name (before any version specifier)
|
||||
pkg_match = re.match(r'^([a-zA-Z0-9_-]+)', line)
|
||||
if pkg_match:
|
||||
pkg = pkg_match.group(1).strip()
|
||||
declared.append((pkg, line))
|
||||
return declared
|
||||
|
||||
|
||||
def main():
|
||||
repo_path = Path('.').resolve()
|
||||
req_path = repo_path / 'requirements.txt'
|
||||
|
||||
# 1. Scan imports
|
||||
used = extract_imports_from_py_files(repo_path)
|
||||
|
||||
# 2. Parse declared deps
|
||||
declared = parse_requirements_txt(req_path)
|
||||
declared_names = [pkg for pkg, _ in declared]
|
||||
|
||||
# 3. Compare
|
||||
unused = [(raw, pkg) for pkg, raw in declared if pkg not in used]
|
||||
missing_from_req = [imp for imp in used if imp not in declared_names]
|
||||
|
||||
# 4. Output
|
||||
print("=" * 60)
|
||||
print(" DEPENDENCY BLOAT DETECTOR")
|
||||
print("=" * 60)
|
||||
print(f" Repository: {repo_path.name}")
|
||||
print(f" Requirements: {req_path}")
|
||||
print(f" Python files: {len(list(repo_path.rglob('*.py')))}")
|
||||
print()
|
||||
print(f" Declared packages ({len(declared_names)}): {declared_names}")
|
||||
print(f" Imported packages ({len(used)}): {sorted(used)}")
|
||||
print()
|
||||
if unused:
|
||||
print(" UNUSED DEPENDENCIES (bloat):")
|
||||
for raw, pkg in unused:
|
||||
print(f" ✗ {raw}")
|
||||
else:
|
||||
print(" No unused dependencies detected.")
|
||||
print()
|
||||
if missing_from_req:
|
||||
print(" UNDECLARED IMPORTS (used but not in requirements.txt):")
|
||||
for imp in missing_from_req:
|
||||
print(f" ! {imp}")
|
||||
print()
|
||||
print("=" * 60)
|
||||
|
||||
# Exit code: 0 if no bloat, 1 if unused deps found
|
||||
sys.exit(1 if unused else 0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
131
scripts/validate_doc_links.py
Executable file
131
scripts/validate_doc_links.py
Executable file
@@ -0,0 +1,131 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Doc Link Validator — Extract and verify all documentation links.
|
||||
Issue: #103 — 4.8: Doc Link Validator
|
||||
|
||||
Acceptance:
|
||||
Extracts links from docs | HTTP HEAD check | Reports broken links
|
||||
(Weekly cron/CI integration out of scope for this minimal script)
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import List, Tuple, Optional
|
||||
from urllib.request import Request, urlopen
|
||||
from urllib.error import URLError, HTTPError
|
||||
from urllib.parse import urlparse
|
||||
|
||||
# Markdown link patterns
|
||||
INLINE_LINK_RE = re.compile(r'\[[^\]]*\]\(([^)\s]+)(?:\s+"[^"]*")?\)')
|
||||
AUTOLINK_RE = re.compile(r'<([^>]+)>')
|
||||
|
||||
|
||||
def extract_links(content: str) -> List[str]:
|
||||
urls = [m.group(1) for m in INLINE_LINK_RE.finditer(content)]
|
||||
urls += [m.group(1) for m in AUTOLINK_RE.finditer(content)]
|
||||
return urls
|
||||
|
||||
|
||||
def is_ignorable(url: str, ignore_prefixes: List[str]) -> bool:
|
||||
p = urlparse(url)
|
||||
if p.scheme not in ('http', 'https'):
|
||||
return True
|
||||
host = p.netloc.split(':')[0]
|
||||
if host in ('localhost', '127.0.0.1', '::1'):
|
||||
return True
|
||||
# Private IPv4 ranges
|
||||
if re.match(r'^(10\.|192\.168\.|172\.(1[6-9]|2[0-9]|3[01])\.)', host):
|
||||
return True
|
||||
for prefix in ignore_prefixes:
|
||||
if url.startswith(prefix):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def check_url(url: str, timeout: float = 8.0) -> Tuple[bool, Optional[int], str]:
|
||||
try:
|
||||
req = Request(url, method='HEAD')
|
||||
req.add_header('User-Agent', 'DocLinkValidator/1.0')
|
||||
try:
|
||||
with urlopen(req, timeout=timeout) as resp:
|
||||
return True, resp.getcode(), "OK"
|
||||
except HTTPError as e:
|
||||
if e.code in (405, 403, 400):
|
||||
req2 = Request(url, method='GET')
|
||||
req2.add_header('User-Agent', 'DocLinkValidator/1.0')
|
||||
req2.add_header('Range', 'bytes=0-1')
|
||||
with urlopen(req2, timeout=timeout) as resp2:
|
||||
return True, resp2.getcode(), "OK via GET"
|
||||
return False, e.code, e.reason
|
||||
except URLError as e:
|
||||
return False, None, str(e.reason) if hasattr(e, 'reason') else str(e)
|
||||
except Exception as e:
|
||||
return False, None, str(e)
|
||||
|
||||
|
||||
def main() -> int:
|
||||
p = argparse.ArgumentParser(description="Validate documentation links")
|
||||
p.add_argument('--root', default='.', help='Repository root')
|
||||
p.add_argument('--fail-on-broken', action='store_true', help='Exit non-zero if broken links found')
|
||||
p.add_argument('--json', action='store_true', help='Emit JSON report')
|
||||
p.add_argument('--ignore', default='', help='Comma-separated URL prefixes to ignore')
|
||||
args = p.parse_args()
|
||||
|
||||
root = Path(args.root).resolve()
|
||||
ignore_prefixes = [x.strip() for x in args.ignore.split(',') if x.strip()]
|
||||
|
||||
md_files = list(root.rglob('*.md'))
|
||||
if not md_files:
|
||||
print("No markdown files found.", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
print(f"Scanning {len(md_files)} markdown files")
|
||||
|
||||
all_links: List[Tuple[Path, str]] = []
|
||||
for md in md_files:
|
||||
content = md.read_text(errors='replace')
|
||||
for m in INLINE_LINK_RE.finditer(content):
|
||||
all_links.append((md, m.group(1)))
|
||||
for m in AUTOLINK_RE.finditer(content):
|
||||
all_links.append((md, m.group(1)))
|
||||
|
||||
print(f"Raw link occurrences: {len(all_links)}")
|
||||
|
||||
# De-duplicate by URL, keep first file context
|
||||
first_file: dict[str, Path] = {}
|
||||
unique_urls: List[str] = []
|
||||
for file, url in all_links:
|
||||
if url not in first_file:
|
||||
first_file[url] = file
|
||||
unique_urls.append(url)
|
||||
|
||||
print(f"Unique URLs to check: {len(unique_urls)}")
|
||||
|
||||
broken: List[dict] = []
|
||||
ok_count = 0
|
||||
for url in unique_urls:
|
||||
if is_ignorable(url, ignore_prefixes):
|
||||
continue
|
||||
ok, code, reason = check_url(url)
|
||||
if ok:
|
||||
ok_count += 1
|
||||
else:
|
||||
broken.append({"url": url, "file": str(first_file[url]), "error": reason})
|
||||
|
||||
print(f"OK: {ok_count} Broken: {len(broken)}")
|
||||
if broken:
|
||||
print("\nBroken links:")
|
||||
for b in broken:
|
||||
print(f" [{b['file']}] {b['url']} — {b['error']}")
|
||||
|
||||
if args.json:
|
||||
print(json.dumps({"scanned": len(unique_urls), "ok": ok_count,
|
||||
"broken": len(broken), "broken_links": broken}, indent=2))
|
||||
|
||||
return 1 if (args.fail_on_broken and broken) else 0
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
||||
Reference in New Issue
Block a user