Some checks failed
Test / pytest (pull_request) Failing after 7s
- scripts/api_doc_generator.py: AST-based scanner for scripts/ Python modules - docs/API.md: generated API reference (33 modules, ~500 lines) - tests/test_api_doc_generator.py: 12 smoke tests (all passing) The generator extracts module docstrings and public function signatures (name, args, summary) and produces a markdown table per script. One consolidated document per repo (docs/API.md). Closes #98
149 lines
6.2 KiB
Python
149 lines
6.2 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Smoke tests for API Doc Generator — Issue #98
|
|
|
|
Validates that the generator runs, produces docs/API.md, and that
|
|
the generated markdown contains expected sections for the known scripts.
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
import subprocess
|
|
import sys
|
|
from pathlib import Path
|
|
|
|
import pytest
|
|
|
|
# Resolve repo root
|
|
REPO_ROOT = Path(__file__).resolve().parents[1]
|
|
SCRIPTS_DIR = REPO_ROOT / "scripts"
|
|
DOCS_DIR = REPO_ROOT / "docs"
|
|
API_MD = DOCS_DIR / "API.md"
|
|
GENERATOR = SCRIPTS_DIR / "api_doc_generator.py"
|
|
|
|
|
|
# ─── Generator presence ─────────────────────────────────────────────────────────
|
|
class TestGeneratorPresence:
|
|
def test_generator_script_exists(self):
|
|
assert GENERATOR.exists(), f"Missing: {GENERATOR}"
|
|
|
|
def test_generator_is_executable(self):
|
|
with open(GENERATOR) as f:
|
|
first = f.readline().strip()
|
|
assert first.startswith("#!"), "Missing shebang"
|
|
assert "python" in first.lower()
|
|
|
|
|
|
# ─── API.md generation ──────────────────────────────────────────────────────────
|
|
class TestAPIDocGeneration:
|
|
def test_generator_runs_successfully(self):
|
|
"""Run the generator and verify exit code 0."""
|
|
result = subprocess.run(
|
|
[sys.executable, str(GENERATOR)],
|
|
capture_output=True, text=True, cwd=REPO_ROOT, timeout=30
|
|
)
|
|
assert result.returncode == 0, (
|
|
f"Generator failed (code {{result.returncode}})\n"
|
|
f"STDERR: {{result.stderr[:500]}}"
|
|
)
|
|
|
|
def test_api_md_is_created(self):
|
|
"""docs/API.md must exist after generation."""
|
|
assert API_MD.exists(), f"Missing output: {API_MD}"
|
|
|
|
def test_api_md_is_not_empty(self):
|
|
"""Generate markdown must have substantial content."""
|
|
content = API_MD.read_text(encoding="utf-8")
|
|
assert len(content) > 1000, "API.md is suspiciously small"
|
|
|
|
def test_api_md_has_expected_structure(self):
|
|
"""Top-level headings and table markers must be present."""
|
|
content = API_MD.read_text(encoding="utf-8")
|
|
assert "# Compounding Intelligence — Scripts API Reference" in content
|
|
assert "## `scripts/" in content
|
|
assert "| Function | Signature | Description |" in content
|
|
|
|
def test_api_md_covers_expected_scripts(self):
|
|
"""At minimum the core scripts should be documented."""
|
|
content = API_MD.read_text(encoding="utf-8")
|
|
# Core scripts that must appear
|
|
core = ["scripts/harvester.py", "scripts/bootstrapper.py",
|
|
"scripts/session_reader.py", "scripts/dedup.py"]
|
|
for rel in core:
|
|
assert f"## `{rel}`" in content, f"Missing section for {rel}"
|
|
|
|
def test_api_md_contains_function_names(self):
|
|
"""Spot-check: known public functions from key modules must appear."""
|
|
content = API_MD.read_text(encoding="utf-8")
|
|
checks = [
|
|
("harvester", "read_session"),
|
|
("bootstrapper", "load_index"),
|
|
("session_reader", "extract_conversation"),
|
|
("dedup", "normalize_text"),
|
|
]
|
|
for module_stem, func_name in checks:
|
|
assert f"| `{func_name}` |" in content, f"Missing function {func_name} from {module_stem}"
|
|
|
|
|
|
# ─── Idempotence / --check ─────────────────────────────────────────────────────
|
|
class TestIdempotence:
|
|
def test_check_flag_passes_when_current(self):
|
|
"""`--check` should exit 0 immediately after generation."""
|
|
result = subprocess.run(
|
|
[sys.executable, str(GENERATOR), "--check"],
|
|
capture_output=True, text=True, cwd=REPO_ROOT, timeout=30
|
|
)
|
|
assert result.returncode == 0, (
|
|
f"--check failed\nSTDOUT: {{result.stdout}}\nSTDERR: {{result.stderr[:200]}}"
|
|
)
|
|
|
|
def test_check_fails_when_api_md_stale(self):
|
|
"""If docs/API.md is manually altered, --check should detect staleness."""
|
|
# Generate fresh baseline first
|
|
subprocess.run([sys.executable, str(GENERATOR)], capture_output=True, cwd=REPO_ROOT, timeout=30)
|
|
|
|
# Corrupt API.md slightly (append a line at the end)
|
|
original = API_MD.read_text(encoding="utf-8")
|
|
corrupted = original + "\n<!-- corrupted -->\n"
|
|
API_MD.write_text(corrupted, encoding="utf-8")
|
|
|
|
# --check should now fail
|
|
result = subprocess.run(
|
|
[sys.executable, str(GENERATOR), "--check"],
|
|
capture_output=True, text=True, cwd=REPO_ROOT, timeout=30
|
|
)
|
|
assert result.returncode != 0, "--check should detect stale API.md"
|
|
assert "out-of-date" in result.stderr.lower() or "out-of-date" in result.stdout.lower()
|
|
|
|
# Restore clean state
|
|
subprocess.run([sys.executable, str(GENERATOR)], capture_output=True, cwd=REPO_ROOT, timeout=30)
|
|
assert API_MD.read_text(encoding="utf-8") == original
|
|
|
|
# ─── JSON output ────────────────────────────────────────────────────────────────
|
|
class TestJSONOutput:
|
|
def test_json_flag_emits_valid_json(self):
|
|
result = subprocess.run(
|
|
[sys.executable, str(GENERATOR), "--json"],
|
|
capture_output=True, text=True, cwd=REPO_ROOT, timeout=30
|
|
)
|
|
assert result.returncode == 0
|
|
import json
|
|
payload = json.loads(result.stdout)
|
|
assert "modules" in payload
|
|
assert len(payload["modules"]) >= 30
|
|
|
|
def test_json_has_expected_fields(self):
|
|
result = subprocess.run(
|
|
[sys.executable, str(GENERATOR), "--json"],
|
|
capture_output=True, text=True, cwd=REPO_ROOT, timeout=30
|
|
)
|
|
import json
|
|
payload = json.loads(result.stdout)
|
|
mod = payload["modules"][0]
|
|
for key in ("path", "docstring", "functions"):
|
|
assert key in mod, f"Missing key {{key}} in module payload"
|
|
|
|
|
|
if __name__ == "__main__":
|
|
pytest.main([__file__, "-v"])
|