Compare commits
2 Commits
burn/750-1
...
fix/662
| Author | SHA1 | Date | |
|---|---|---|---|
| b263c88259 | |||
| 00526b8e5f |
236
scripts/cron-audit.py
Normal file
236
scripts/cron-audit.py
Normal file
@@ -0,0 +1,236 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
cron-audit.py — Audit cron jobs across the fleet for error rates.
|
||||
|
||||
Categorizes jobs as healthy, transient errors, or systemic failures.
|
||||
Can disable jobs that have been erroring for 48+ hours.
|
||||
|
||||
Usage:
|
||||
python3 scripts/cron-audit.py --report # Full audit report
|
||||
python3 scripts/cron-audit.py --disable-stale # Disable 48h+ erroring jobs
|
||||
python3 scripts/cron-audit.py --json # Machine-readable
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import urllib.request
|
||||
from datetime import datetime, timezone, timedelta
|
||||
from pathlib import Path
|
||||
|
||||
GITEA_URL = "https://forge.alexanderwhitestone.com"
|
||||
GITEA_TOKEN_PATH = Path.home() / ".config" / "gitea" / "token"
|
||||
|
||||
# Cron config locations
|
||||
CRON_DIRS = [
|
||||
Path.home() / "timmy-config" / "cron",
|
||||
Path.home() / "fleet-ops" / "cron",
|
||||
Path.home() / "burn-fleet" / "cron",
|
||||
]
|
||||
|
||||
CRON_EXTENSIONS = {".yml", ".yaml", ".json"}
|
||||
|
||||
|
||||
def load_token() -> str:
|
||||
if GITEA_TOKEN_PATH.exists():
|
||||
return GITEA_TOKEN_PATH.read_text().strip()
|
||||
return os.environ.get("GITEA_TOKEN", "")
|
||||
|
||||
|
||||
def api_get(path: str, token: str) -> list | dict:
|
||||
req = urllib.request.Request(
|
||||
f"{GITEA_URL}/api/v1{path}",
|
||||
headers={"Authorization": f"token {token}"}
|
||||
)
|
||||
try:
|
||||
return json.loads(urllib.request.urlopen(req, timeout=20).read())
|
||||
except Exception:
|
||||
return []
|
||||
|
||||
|
||||
def find_cron_configs() -> list[dict]:
|
||||
"""Find all cron job config files across repos."""
|
||||
configs = []
|
||||
seen_repos = set()
|
||||
|
||||
for d in CRON_DIRS:
|
||||
if not d.exists():
|
||||
continue
|
||||
for f in sorted(d.rglob("*")):
|
||||
if f.suffix in CRON_EXTENSIONS and f.is_file():
|
||||
try:
|
||||
content = f.read_text(errors="ignore")
|
||||
# Simple YAML-like parse for name/schedule
|
||||
name = ""
|
||||
schedule = ""
|
||||
for line in content.split("\n"):
|
||||
line = line.strip()
|
||||
if line.startswith("name:") or line.startswith("- name:"):
|
||||
name = line.split(":", 1)[1].strip().strip("'\"")
|
||||
if line.startswith("schedule:"):
|
||||
schedule = line.split(":", 1)[1].strip().strip("'\"")
|
||||
if name:
|
||||
configs.append({
|
||||
"name": name,
|
||||
"file": str(f),
|
||||
"schedule": schedule,
|
||||
"repo": d.parent.name if d.parent != Path.home() else "local",
|
||||
})
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
return configs
|
||||
|
||||
|
||||
def check_hermes_cron(token: str) -> list[dict]:
|
||||
"""Check cron jobs via hermes CLI (if available) or Gitea cron configs."""
|
||||
jobs = []
|
||||
|
||||
# Try hermes cron list
|
||||
try:
|
||||
import subprocess
|
||||
result = subprocess.run(
|
||||
["hermes", "cron", "list", "--json"],
|
||||
capture_output=True, text=True, timeout=15
|
||||
)
|
||||
if result.returncode == 0:
|
||||
hermes_jobs = json.loads(result.stdout)
|
||||
for job in hermes_jobs:
|
||||
jobs.append({
|
||||
"name": job.get("name", "unknown"),
|
||||
"schedule": job.get("schedule", "unknown"),
|
||||
"last_status": job.get("last_status", "unknown"),
|
||||
"last_run": job.get("last_run", None),
|
||||
"error_count": job.get("error_count", 0),
|
||||
"enabled": job.get("enabled", True),
|
||||
"source": "hermes",
|
||||
})
|
||||
except (FileNotFoundError, subprocess.TimeoutExpired, json.JSONDecodeError):
|
||||
pass
|
||||
|
||||
# Also scan local cron configs
|
||||
configs = find_cron_configs()
|
||||
for config in configs:
|
||||
# Check if already in hermes list
|
||||
if not any(j["name"] == config["name"] for j in jobs):
|
||||
jobs.append({
|
||||
"name": config["name"],
|
||||
"schedule": config["schedule"],
|
||||
"last_status": "unknown",
|
||||
"last_run": None,
|
||||
"error_count": 0,
|
||||
"enabled": True,
|
||||
"source": "config",
|
||||
"file": config["file"],
|
||||
})
|
||||
|
||||
return jobs
|
||||
|
||||
|
||||
def categorize_jobs(jobs: list[dict]) -> dict:
|
||||
"""Categorize jobs into healthy, transient, systemic."""
|
||||
healthy = []
|
||||
transient = []
|
||||
systemic = []
|
||||
unknown = []
|
||||
|
||||
for job in jobs:
|
||||
status = job.get("last_status", "unknown")
|
||||
error_count = job.get("error_count", 0)
|
||||
|
||||
if status == "ok" or status == "success":
|
||||
healthy.append(job)
|
||||
elif status == "error" and error_count <= 3:
|
||||
transient.append(job)
|
||||
elif status == "error" and error_count > 3:
|
||||
systemic.append(job)
|
||||
elif status == "unknown":
|
||||
unknown.append(job)
|
||||
else:
|
||||
transient.append(job)
|
||||
|
||||
return {
|
||||
"healthy": healthy,
|
||||
"transient": transient,
|
||||
"systemic": systemic,
|
||||
"unknown": unknown,
|
||||
}
|
||||
|
||||
|
||||
def cmd_report(token: str, as_json: bool = False):
|
||||
jobs = check_hermes_cron(token)
|
||||
categories = categorize_jobs(jobs)
|
||||
|
||||
if as_json:
|
||||
print(json.dumps({"total": len(jobs), "categories": {k: len(v) for k, v in categories.items()}, "jobs": jobs}, indent=2))
|
||||
return
|
||||
|
||||
print(f"CRON AUDIT — {len(jobs)} jobs total")
|
||||
print(f"{'='*50}")
|
||||
print(f" Healthy: {len(categories['healthy'])}")
|
||||
print(f" Transient: {len(categories['transient'])} (retry likely)")
|
||||
print(f" Systemic: {len(categories['systemic'])} (needs investigation)")
|
||||
print(f" Unknown: {len(categories['unknown'])} (no status data)")
|
||||
|
||||
if categories["systemic"]:
|
||||
print(f"\nSYSTEMIC FAILURES:")
|
||||
for job in categories["systemic"]:
|
||||
print(f" ❌ {job['name']}: errors={job.get('error_count', '?')} last={job.get('last_run', '?')}")
|
||||
|
||||
if categories["transient"]:
|
||||
print(f"\nTRANSIENT ERRORS:")
|
||||
for job in categories["transient"][:10]:
|
||||
print(f" ⚠️ {job['name']}: status={job.get('last_status', '?')}")
|
||||
|
||||
if categories["unknown"]:
|
||||
print(f"\nUNKNOWN STATUS ({len(categories['unknown'])} jobs):")
|
||||
for job in categories["unknown"][:10]:
|
||||
src = job.get("source", "?")
|
||||
print(f" ❓ {job['name']} (source: {src})")
|
||||
|
||||
|
||||
def cmd_disable_stale(token: str):
|
||||
"""Disable jobs erroring for 48+ hours."""
|
||||
jobs = check_hermes_cron(token)
|
||||
categories = categorize_jobs(jobs)
|
||||
disabled = 0
|
||||
|
||||
for job in categories["systemic"]:
|
||||
last_run = job.get("last_run")
|
||||
if last_run:
|
||||
try:
|
||||
last_dt = datetime.fromisoformat(last_run.replace("Z", "+00:00"))
|
||||
age_hours = (datetime.now(timezone.utc) - last_dt).total_seconds() / 3600
|
||||
if age_hours > 48:
|
||||
print(f"DISABLING {job['name']}: erroring for {age_hours:.0f}h")
|
||||
# hermes cron disable JOB_NAME
|
||||
try:
|
||||
import subprocess
|
||||
subprocess.run(["hermes", "cron", "disable", job["name"]], capture_output=True, timeout=10)
|
||||
disabled += 1
|
||||
except FileNotFoundError:
|
||||
print(f" (hermes CLI not available, skipping)")
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
print(f"\nDisabled: {disabled} jobs")
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Cron job audit")
|
||||
parser.add_argument("--report", action="store_true")
|
||||
parser.add_argument("--disable-stale", action="store_true")
|
||||
parser.add_argument("--json", action="store_true", dest="as_json")
|
||||
args = parser.parse_args()
|
||||
|
||||
token = load_token()
|
||||
|
||||
if args.disable_stale:
|
||||
cmd_disable_stale(token)
|
||||
else:
|
||||
cmd_report(token, args.as_json)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
196
scripts/cron_audit.py
Normal file
196
scripts/cron_audit.py
Normal file
@@ -0,0 +1,196 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Cron Job Audit — Identify erroring jobs, categorize health, recommend actions.
|
||||
|
||||
Usage:
|
||||
python scripts/cron_audit.py # Full audit
|
||||
python scripts/cron_audit.py --disable-stale 48 # Disable jobs erroring 48+ hours
|
||||
python scripts/cron_audit.py --json # JSON output
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
|
||||
def get_cron_jobs() -> list[dict]:
|
||||
"""Get all cron jobs from hermes."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["hermes", "cron", "list", "--all"],
|
||||
capture_output=True, text=True, timeout=30,
|
||||
)
|
||||
# Parse hermes cron list output
|
||||
jobs = []
|
||||
# hermes cron list outputs JSON via the tool
|
||||
try:
|
||||
data = json.loads(result.stdout)
|
||||
if isinstance(data, dict) and "jobs" in data:
|
||||
jobs = data["jobs"]
|
||||
elif isinstance(data, list):
|
||||
jobs = data
|
||||
except json.JSONDecodeError:
|
||||
# Fall back to parsing the jobs file directly
|
||||
jobs_file = Path.home() / ".hermes" / "cron" / "jobs.json"
|
||||
if jobs_file.exists():
|
||||
data = json.loads(jobs_file.read_text())
|
||||
jobs = data.get("jobs", [])
|
||||
return jobs
|
||||
except Exception as e:
|
||||
print(f"Error fetching jobs: {e}", file=sys.stderr)
|
||||
return []
|
||||
|
||||
|
||||
def categorize_job(job: dict) -> dict:
|
||||
"""Categorize a job as healthy, transient error, or systemic error."""
|
||||
last_status = job.get("last_status", "unknown")
|
||||
last_run = job.get("last_run_at", "")
|
||||
last_error = job.get("last_error", "")
|
||||
enabled = job.get("enabled", True)
|
||||
name = job.get("name", job.get("id", "?"))
|
||||
|
||||
if not enabled:
|
||||
return {"name": name, "id": job.get("id"), "health": "disabled", "action": "none"}
|
||||
|
||||
if last_status == "ok" or last_status is None:
|
||||
return {"name": name, "id": job.get("id"), "health": "healthy", "action": "none"}
|
||||
|
||||
if last_status == "error":
|
||||
# Check if transient (network) or systemic (bad config)
|
||||
error_lower = (last_error or "").lower()
|
||||
transient_signals = ["timeout", "connection", "network", "temporary", "rate limit", "429", "503"]
|
||||
systemic_signals = ["not found", "import", "module", "attribute", "syntax", "permission", "404", "401"]
|
||||
|
||||
is_transient = any(s in error_lower for s in transient_signals)
|
||||
is_systemic = any(s in error_lower for s in systemic_signals)
|
||||
|
||||
# Check staleness
|
||||
staleness_hours = 0
|
||||
if last_run:
|
||||
try:
|
||||
last_dt = datetime.fromisoformat(last_run.replace("Z", "+00:00"))
|
||||
staleness_hours = (datetime.now(timezone.utc) - last_dt).total_seconds() / 3600
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if is_systemic or staleness_hours > 48:
|
||||
return {
|
||||
"name": name, "id": job.get("id"), "health": "systemic",
|
||||
"action": "disable", "staleness_hours": round(staleness_hours, 1),
|
||||
"error": last_error[:200] if last_error else "unknown",
|
||||
}
|
||||
elif is_transient:
|
||||
return {
|
||||
"name": name, "id": job.get("id"), "health": "transient",
|
||||
"action": "monitor", "staleness_hours": round(staleness_hours, 1),
|
||||
"error": last_error[:200] if last_error else "unknown",
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"name": name, "id": job.get("id"), "health": "unknown_error",
|
||||
"action": "investigate", "staleness_hours": round(staleness_hours, 1),
|
||||
"error": last_error[:200] if last_error else "unknown",
|
||||
}
|
||||
|
||||
return {"name": name, "id": job.get("id"), "health": "unknown", "action": "investigate"}
|
||||
|
||||
|
||||
def run_audit() -> dict:
|
||||
"""Run full cron audit."""
|
||||
jobs = get_cron_jobs()
|
||||
|
||||
if not jobs:
|
||||
return {"total": 0, "categories": {}, "jobs": [], "recommendations": ["No jobs found or hermes not available"]}
|
||||
|
||||
categorized = [categorize_job(j) for j in jobs]
|
||||
|
||||
categories = {}
|
||||
for c in categorized:
|
||||
health = c["health"]
|
||||
categories.setdefault(health, []).append(c)
|
||||
|
||||
recommendations = []
|
||||
if categories.get("systemic"):
|
||||
recommendations.append(f"DISABLE {len(categories['systemic'])} systemic error jobs (erroring 48+ hours)")
|
||||
if categories.get("transient"):
|
||||
recommendations.append(f"MONITOR {len(categories['transient'])} transient error jobs (network/timeout)")
|
||||
if categories.get("unknown_error"):
|
||||
recommendations.append(f"INVESTIGATE {len(categories['unknown_error'])} jobs with unclassified errors")
|
||||
|
||||
return {
|
||||
"generated_at": datetime.now(timezone.utc).isoformat(),
|
||||
"total": len(jobs),
|
||||
"healthy": len(categories.get("healthy", [])),
|
||||
"transient_errors": len(categories.get("transient", [])),
|
||||
"systemic_errors": len(categories.get("systemic", [])),
|
||||
"disabled": len(categories.get("disabled", [])),
|
||||
"unknown": len(categories.get("unknown_error", [])) + len(categories.get("unknown", [])),
|
||||
"categories": {k: len(v) for k, v in categories.items()},
|
||||
"jobs": categorized,
|
||||
"recommendations": recommendations,
|
||||
}
|
||||
|
||||
|
||||
def to_markdown(audit: dict) -> str:
|
||||
lines = [
|
||||
"# Cron Job Audit Report",
|
||||
"",
|
||||
f"Generated: {audit['generated_at'][:16]}",
|
||||
"",
|
||||
"## Summary",
|
||||
"",
|
||||
f"| Health | Count |",
|
||||
f"|--------|-------|",
|
||||
f"| Healthy | {audit['healthy']} |",
|
||||
f"| Transient errors | {audit['transient_errors']} |",
|
||||
f"| Systemic errors | {audit['systemic_errors']} |",
|
||||
f"| Disabled | {audit['disabled']} |",
|
||||
f"| Unknown | {audit['unknown']} |",
|
||||
f"| **Total** | **{audit['total']}** |",
|
||||
"",
|
||||
]
|
||||
|
||||
if audit["recommendations"]:
|
||||
lines.extend(["## Recommendations", ""])
|
||||
for r in audit["recommendations"]:
|
||||
lines.append(f"- {r}")
|
||||
lines.append("")
|
||||
|
||||
if audit.get("systemic_errors", 0) > 0:
|
||||
lines.extend(["## Systemic Errors (Recommend Disable)", ""])
|
||||
for j in audit["jobs"]:
|
||||
if j["health"] == "systemic":
|
||||
lines.append(f"- `{j['id']}`: {j['name']} (stale {j.get('staleness_hours', '?')}h)")
|
||||
lines.append(f" Error: {j.get('error', 'unknown')}")
|
||||
|
||||
if audit.get("transient_errors", 0) > 0:
|
||||
lines.extend(["", "## Transient Errors (Monitor)", ""])
|
||||
for j in audit["jobs"]:
|
||||
if j["health"] == "transient":
|
||||
lines.append(f"- `{j['id']}`: {j['name']} — {j.get('error', 'unknown')[:100]}")
|
||||
|
||||
return "
|
||||
".join(lines)
|
||||
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(description="Cron job audit")
|
||||
parser.add_argument("--json", action="store_true")
|
||||
parser.add_argument("--disable-stale", type=int, default=0, help="Disable jobs stale N+ hours")
|
||||
args = parser.parse_args()
|
||||
|
||||
audit = run_audit()
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(audit, indent=2))
|
||||
else:
|
||||
print(to_markdown(audit))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,138 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
normalize-code-blocks.py — Fix inconsistent indentation in training data code blocks.
|
||||
|
||||
When code blocks are embedded in JSONL as triple-quoted strings, indentation
|
||||
accumulates from the surrounding context. This script normalizes code block
|
||||
content using textwrap.dedent and consistent 4-space indentation.
|
||||
|
||||
Usage:
|
||||
python3 scripts/normalize-code-blocks.py training/data/preference_pairs.jsonl
|
||||
python3 scripts/normalize-code-blocks.py --dry-run training/data/*.jsonl
|
||||
python3 scripts/normalize-code-blocks.py --check training/data/*.jsonl # CI mode
|
||||
"""
|
||||
import argparse
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
import textwrap
|
||||
from pathlib import Path
|
||||
|
||||
# Matches ```python ... ``` or ``` ... ``` blocks inside string values
|
||||
CODE_BLOCK_RE = re.compile(
|
||||
r"(?P<open>```(?:python|py|bash|sh|javascript|js|typescript|ts|go|rust|ruby)?\s*\n)"
|
||||
r"(?P<code>.*?)"
|
||||
r"(?P<close>```)",
|
||||
re.DOTALL,
|
||||
)
|
||||
|
||||
|
||||
def normalize_code_block(match: re.Match) -> str:
|
||||
"""Normalize indentation in a single code block."""
|
||||
open_tag = match.group("open")
|
||||
code = match.group("code")
|
||||
close_tag = match.group("close")
|
||||
|
||||
if not code.strip():
|
||||
return match.group(0)
|
||||
|
||||
dedented = textwrap.dedent(code)
|
||||
|
||||
lines = dedented.split("\n")
|
||||
while lines and not lines[0].strip():
|
||||
lines.pop(0)
|
||||
while lines and not lines[-1].strip():
|
||||
lines.pop()
|
||||
|
||||
normalized = "\n".join(lines)
|
||||
return f"{open_tag}{normalized}\n{close_tag}"
|
||||
|
||||
|
||||
def process_line(line: str) -> tuple:
|
||||
"""Process a single JSONL line. Returns (new_line, num_fixes)."""
|
||||
try:
|
||||
obj = json.loads(line)
|
||||
except json.JSONDecodeError:
|
||||
return line, 0
|
||||
|
||||
fixes = 0
|
||||
|
||||
def fix_strings(obj):
|
||||
nonlocal fixes
|
||||
if isinstance(obj, str):
|
||||
original = obj
|
||||
fixed = CODE_BLOCK_RE.sub(normalize_code_block, obj)
|
||||
if fixed != original:
|
||||
fixes += 1
|
||||
return fixed
|
||||
elif isinstance(obj, dict):
|
||||
return {k: fix_strings(v) for k, v in obj.items()}
|
||||
elif isinstance(obj, list):
|
||||
return [fix_strings(item) for item in obj]
|
||||
return obj
|
||||
|
||||
fixed_obj = fix_strings(obj)
|
||||
return json.dumps(fixed_obj, ensure_ascii=False) + "\n", fixes
|
||||
|
||||
|
||||
def process_file(filepath: str, dry_run: bool = False) -> dict:
|
||||
"""Process a single JSONL file. Returns stats dict."""
|
||||
path = Path(filepath)
|
||||
if not path.exists():
|
||||
return {"file": str(filepath), "error": "not found", "fixes": 0, "lines": 0}
|
||||
|
||||
lines = path.read_text(encoding="utf-8").splitlines()
|
||||
fixed_lines = []
|
||||
total_fixes = 0
|
||||
|
||||
for line in lines:
|
||||
if not line.strip():
|
||||
fixed_lines.append(line)
|
||||
continue
|
||||
new_line, fixes = process_line(line)
|
||||
fixed_lines.append(new_line.rstrip("\n"))
|
||||
total_fixes += fixes
|
||||
|
||||
if total_fixes > 0 and not dry_run:
|
||||
path.write_text("\n".join(fixed_lines) + "\n", encoding="utf-8")
|
||||
|
||||
return {
|
||||
"file": str(filepath),
|
||||
"lines": len(lines),
|
||||
"fixes": total_fixes,
|
||||
"changed": total_fixes > 0,
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Normalize code block indentation in JSONL training data"
|
||||
)
|
||||
parser.add_argument("files", nargs="+", help="JSONL files to process")
|
||||
parser.add_argument("--dry-run", action="store_true", help="Show changes without writing")
|
||||
parser.add_argument("--check", action="store_true", help="CI mode: exit 1 if fixes needed")
|
||||
args = parser.parse_args()
|
||||
|
||||
total_fixes = 0
|
||||
results = []
|
||||
|
||||
for filepath in args.files:
|
||||
result = process_file(filepath, dry_run=args.dry_run or args.check)
|
||||
results.append(result)
|
||||
total_fixes += result["fixes"]
|
||||
|
||||
if result["fixes"] > 0:
|
||||
status = "FIXED" if not args.dry_run and not args.check else "WOULD FIX"
|
||||
print(f" {status}: {result['file']} — {result['fixes']} code blocks normalized")
|
||||
else:
|
||||
print(f" OK: {result['file']}")
|
||||
|
||||
print(f"\nTotal: {total_fixes} code blocks normalized across {len(results)} files")
|
||||
|
||||
if args.check and total_fixes > 0:
|
||||
print("FAIL: Code block indentation issues found. Run without --check to fix.")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,151 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for normalize-code-blocks.py — issue #750"""
|
||||
import json
|
||||
import sys
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
# Import from scripts/
|
||||
sys.path.insert(0, str(Path(__file__).resolve().parent.parent / "scripts"))
|
||||
from normalize_code_blocks import normalize_code_block, process_line, process_file, CODE_BLOCK_RE
|
||||
|
||||
|
||||
class TestCodeBlockRegex:
|
||||
def test_matches_python_block(self):
|
||||
text = "```python\nprint('hi')\n```"
|
||||
assert CODE_BLOCK_RE.search(text)
|
||||
|
||||
def test_matches_plain_block(self):
|
||||
text = "```\nsome code\n```"
|
||||
assert CODE_BLOCK_RE.search(text)
|
||||
|
||||
def test_matches_bash_block(self):
|
||||
text = "```bash\necho hello\n```"
|
||||
assert CODE_BLOCK_RE.search(text)
|
||||
|
||||
def test_ignores_inline_backticks(self):
|
||||
text = "Use `code` inline"
|
||||
assert not CODE_BLOCK_RE.search(text)
|
||||
|
||||
def test_handles_multiline_code(self):
|
||||
text = "```python\ndef foo():\n return 1\n\ndef bar():\n return 2\n```"
|
||||
match = CODE_BLOCK_RE.search(text)
|
||||
assert match
|
||||
assert "def foo" in match.group("code")
|
||||
|
||||
|
||||
class TestNormalizeCodeBlock:
|
||||
def test_strips_leading_indent(self):
|
||||
match = CODE_BLOCK_RE.search("```python\n print('hi')\n```")
|
||||
result = normalize_code_block(match)
|
||||
assert " print" not in result
|
||||
assert "print('hi')" in result
|
||||
|
||||
def test_dedents_mixed_indent(self):
|
||||
code = "```python\n def foo():\n return 1\n def bar():\n return 2\n```"
|
||||
match = CODE_BLOCK_RE.search(code)
|
||||
result = normalize_code_block(match)
|
||||
lines = result.split("\n")
|
||||
# First non-tag line should have 0 indent
|
||||
code_lines = [l for l in lines if l.strip() and not l.startswith("```")]
|
||||
assert code_lines[0].startswith("def foo")
|
||||
|
||||
def test_strips_trailing_blank_lines(self):
|
||||
match = CODE_BLOCK_RE.search("```python\nprint('hi')\n\n\n```")
|
||||
result = normalize_code_block(match)
|
||||
assert result.endswith("print('hi')\n```")
|
||||
|
||||
def test_preserves_language_tag(self):
|
||||
match = CODE_BLOCK_RE.search("```python\n x = 1\n```")
|
||||
result = normalize_code_block(match)
|
||||
assert result.startswith("```python")
|
||||
|
||||
def test_empty_block_unchanged(self):
|
||||
match = CODE_BLOCK_RE.search("```python\n \n```")
|
||||
original = match.group(0)
|
||||
result = normalize_code_block(match)
|
||||
assert result == original
|
||||
|
||||
def test_diff_markers_preserved(self):
|
||||
code = "```\n+def new_func():\n+ return 1\n-def old_func():\n- return 0\n```"
|
||||
match = CODE_BLOCK_RE.search(code)
|
||||
result = normalize_code_block(match)
|
||||
assert "+def new_func" in result
|
||||
assert "-def old_func" in result
|
||||
|
||||
|
||||
class TestProcessLine:
|
||||
def test_valid_json_no_code_blocks(self):
|
||||
line = json.dumps({"prompt": "hello world"})
|
||||
new_line, fixes = process_line(line)
|
||||
assert fixes == 0
|
||||
|
||||
def test_valid_json_with_code_block(self):
|
||||
obj = {"prompt": "Here is code:\n```python\n x = 1\n```"}
|
||||
line = json.dumps(obj)
|
||||
new_line, fixes = process_line(line)
|
||||
assert fixes == 1
|
||||
parsed = json.loads(new_line)
|
||||
assert " x = 1" not in parsed["prompt"]
|
||||
|
||||
def test_nested_dict_code_blocks(self):
|
||||
obj = {
|
||||
"prompt": "code: ```python\n a = 1\n```",
|
||||
"chosen": "```python\n b = 2\n```",
|
||||
}
|
||||
line = json.dumps(obj)
|
||||
new_line, fixes = process_line(line)
|
||||
assert fixes == 2
|
||||
|
||||
def test_invalid_json_returned_unchanged(self):
|
||||
line = "{broken json"
|
||||
new_line, fixes = process_line(line)
|
||||
assert new_line == line
|
||||
assert fixes == 0
|
||||
|
||||
def test_list_field_code_blocks(self):
|
||||
obj = {"items": ["```python\n x = 1\n```", "no code here"]}
|
||||
line = json.dumps(obj)
|
||||
new_line, fixes = process_line(line)
|
||||
assert fixes == 1
|
||||
|
||||
|
||||
class TestProcessFile:
|
||||
def test_fixes_file_in_place(self, tmp_path):
|
||||
f = tmp_path / "test.jsonl"
|
||||
lines = [
|
||||
json.dumps({"prompt": "```python\n x = 1\n```"}),
|
||||
json.dumps({"prompt": "no code"}),
|
||||
]
|
||||
f.write_text("\n".join(lines) + "\n")
|
||||
|
||||
result = process_file(str(f))
|
||||
assert result["fixes"] == 1
|
||||
assert result["lines"] == 2
|
||||
|
||||
# Verify file was actually modified
|
||||
content = f.read_text()
|
||||
assert " x = 1" not in content
|
||||
|
||||
def test_dry_run_no_write(self, tmp_path):
|
||||
f = tmp_path / "test.jsonl"
|
||||
original = json.dumps({"prompt": "```python\n x = 1\n```"})
|
||||
f.write_text(original + "\n")
|
||||
|
||||
result = process_file(str(f), dry_run=True)
|
||||
assert result["fixes"] == 1
|
||||
|
||||
# File unchanged
|
||||
assert f.read_text().strip() == original
|
||||
|
||||
def test_missing_file(self, tmp_path):
|
||||
result = process_file(str(tmp_path / "nope.jsonl"))
|
||||
assert "error" in result
|
||||
|
||||
def test_clean_file_no_fixes(self, tmp_path):
|
||||
f = tmp_path / "clean.jsonl"
|
||||
f.write_text(json.dumps({"prompt": "no code blocks here"}) + "\n")
|
||||
result = process_file(str(f))
|
||||
assert result["fixes"] == 0
|
||||
Reference in New Issue
Block a user