Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 00526b8e5f |
196
scripts/cron_audit.py
Normal file
196
scripts/cron_audit.py
Normal file
@@ -0,0 +1,196 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Cron Job Audit — Identify erroring jobs, categorize health, recommend actions.
|
||||
|
||||
Usage:
|
||||
python scripts/cron_audit.py # Full audit
|
||||
python scripts/cron_audit.py --disable-stale 48 # Disable jobs erroring 48+ hours
|
||||
python scripts/cron_audit.py --json # JSON output
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
|
||||
def get_cron_jobs() -> list[dict]:
|
||||
"""Get all cron jobs from hermes."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["hermes", "cron", "list", "--all"],
|
||||
capture_output=True, text=True, timeout=30,
|
||||
)
|
||||
# Parse hermes cron list output
|
||||
jobs = []
|
||||
# hermes cron list outputs JSON via the tool
|
||||
try:
|
||||
data = json.loads(result.stdout)
|
||||
if isinstance(data, dict) and "jobs" in data:
|
||||
jobs = data["jobs"]
|
||||
elif isinstance(data, list):
|
||||
jobs = data
|
||||
except json.JSONDecodeError:
|
||||
# Fall back to parsing the jobs file directly
|
||||
jobs_file = Path.home() / ".hermes" / "cron" / "jobs.json"
|
||||
if jobs_file.exists():
|
||||
data = json.loads(jobs_file.read_text())
|
||||
jobs = data.get("jobs", [])
|
||||
return jobs
|
||||
except Exception as e:
|
||||
print(f"Error fetching jobs: {e}", file=sys.stderr)
|
||||
return []
|
||||
|
||||
|
||||
def categorize_job(job: dict) -> dict:
|
||||
"""Categorize a job as healthy, transient error, or systemic error."""
|
||||
last_status = job.get("last_status", "unknown")
|
||||
last_run = job.get("last_run_at", "")
|
||||
last_error = job.get("last_error", "")
|
||||
enabled = job.get("enabled", True)
|
||||
name = job.get("name", job.get("id", "?"))
|
||||
|
||||
if not enabled:
|
||||
return {"name": name, "id": job.get("id"), "health": "disabled", "action": "none"}
|
||||
|
||||
if last_status == "ok" or last_status is None:
|
||||
return {"name": name, "id": job.get("id"), "health": "healthy", "action": "none"}
|
||||
|
||||
if last_status == "error":
|
||||
# Check if transient (network) or systemic (bad config)
|
||||
error_lower = (last_error or "").lower()
|
||||
transient_signals = ["timeout", "connection", "network", "temporary", "rate limit", "429", "503"]
|
||||
systemic_signals = ["not found", "import", "module", "attribute", "syntax", "permission", "404", "401"]
|
||||
|
||||
is_transient = any(s in error_lower for s in transient_signals)
|
||||
is_systemic = any(s in error_lower for s in systemic_signals)
|
||||
|
||||
# Check staleness
|
||||
staleness_hours = 0
|
||||
if last_run:
|
||||
try:
|
||||
last_dt = datetime.fromisoformat(last_run.replace("Z", "+00:00"))
|
||||
staleness_hours = (datetime.now(timezone.utc) - last_dt).total_seconds() / 3600
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if is_systemic or staleness_hours > 48:
|
||||
return {
|
||||
"name": name, "id": job.get("id"), "health": "systemic",
|
||||
"action": "disable", "staleness_hours": round(staleness_hours, 1),
|
||||
"error": last_error[:200] if last_error else "unknown",
|
||||
}
|
||||
elif is_transient:
|
||||
return {
|
||||
"name": name, "id": job.get("id"), "health": "transient",
|
||||
"action": "monitor", "staleness_hours": round(staleness_hours, 1),
|
||||
"error": last_error[:200] if last_error else "unknown",
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"name": name, "id": job.get("id"), "health": "unknown_error",
|
||||
"action": "investigate", "staleness_hours": round(staleness_hours, 1),
|
||||
"error": last_error[:200] if last_error else "unknown",
|
||||
}
|
||||
|
||||
return {"name": name, "id": job.get("id"), "health": "unknown", "action": "investigate"}
|
||||
|
||||
|
||||
def run_audit() -> dict:
|
||||
"""Run full cron audit."""
|
||||
jobs = get_cron_jobs()
|
||||
|
||||
if not jobs:
|
||||
return {"total": 0, "categories": {}, "jobs": [], "recommendations": ["No jobs found or hermes not available"]}
|
||||
|
||||
categorized = [categorize_job(j) for j in jobs]
|
||||
|
||||
categories = {}
|
||||
for c in categorized:
|
||||
health = c["health"]
|
||||
categories.setdefault(health, []).append(c)
|
||||
|
||||
recommendations = []
|
||||
if categories.get("systemic"):
|
||||
recommendations.append(f"DISABLE {len(categories['systemic'])} systemic error jobs (erroring 48+ hours)")
|
||||
if categories.get("transient"):
|
||||
recommendations.append(f"MONITOR {len(categories['transient'])} transient error jobs (network/timeout)")
|
||||
if categories.get("unknown_error"):
|
||||
recommendations.append(f"INVESTIGATE {len(categories['unknown_error'])} jobs with unclassified errors")
|
||||
|
||||
return {
|
||||
"generated_at": datetime.now(timezone.utc).isoformat(),
|
||||
"total": len(jobs),
|
||||
"healthy": len(categories.get("healthy", [])),
|
||||
"transient_errors": len(categories.get("transient", [])),
|
||||
"systemic_errors": len(categories.get("systemic", [])),
|
||||
"disabled": len(categories.get("disabled", [])),
|
||||
"unknown": len(categories.get("unknown_error", [])) + len(categories.get("unknown", [])),
|
||||
"categories": {k: len(v) for k, v in categories.items()},
|
||||
"jobs": categorized,
|
||||
"recommendations": recommendations,
|
||||
}
|
||||
|
||||
|
||||
def to_markdown(audit: dict) -> str:
|
||||
lines = [
|
||||
"# Cron Job Audit Report",
|
||||
"",
|
||||
f"Generated: {audit['generated_at'][:16]}",
|
||||
"",
|
||||
"## Summary",
|
||||
"",
|
||||
f"| Health | Count |",
|
||||
f"|--------|-------|",
|
||||
f"| Healthy | {audit['healthy']} |",
|
||||
f"| Transient errors | {audit['transient_errors']} |",
|
||||
f"| Systemic errors | {audit['systemic_errors']} |",
|
||||
f"| Disabled | {audit['disabled']} |",
|
||||
f"| Unknown | {audit['unknown']} |",
|
||||
f"| **Total** | **{audit['total']}** |",
|
||||
"",
|
||||
]
|
||||
|
||||
if audit["recommendations"]:
|
||||
lines.extend(["## Recommendations", ""])
|
||||
for r in audit["recommendations"]:
|
||||
lines.append(f"- {r}")
|
||||
lines.append("")
|
||||
|
||||
if audit.get("systemic_errors", 0) > 0:
|
||||
lines.extend(["## Systemic Errors (Recommend Disable)", ""])
|
||||
for j in audit["jobs"]:
|
||||
if j["health"] == "systemic":
|
||||
lines.append(f"- `{j['id']}`: {j['name']} (stale {j.get('staleness_hours', '?')}h)")
|
||||
lines.append(f" Error: {j.get('error', 'unknown')}")
|
||||
|
||||
if audit.get("transient_errors", 0) > 0:
|
||||
lines.extend(["", "## Transient Errors (Monitor)", ""])
|
||||
for j in audit["jobs"]:
|
||||
if j["health"] == "transient":
|
||||
lines.append(f"- `{j['id']}`: {j['name']} — {j.get('error', 'unknown')[:100]}")
|
||||
|
||||
return "
|
||||
".join(lines)
|
||||
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(description="Cron job audit")
|
||||
parser.add_argument("--json", action="store_true")
|
||||
parser.add_argument("--disable-stale", type=int, default=0, help="Disable jobs stale N+ hours")
|
||||
args = parser.parse_args()
|
||||
|
||||
audit = run_audit()
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(audit, indent=2))
|
||||
else:
|
||||
print(to_markdown(audit))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,139 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
normalize-code-blocks.py — Fix inconsistent indentation in training data code blocks.
|
||||
|
||||
When code blocks are embedded in JSONL as triple-quoted strings, indentation
|
||||
accumulates from the surrounding context. This script normalizes code block
|
||||
content using textwrap.dedent and consistent 4-space indentation.
|
||||
|
||||
Usage:
|
||||
python3 scripts/normalize-code-blocks.py training/data/preference_pairs.jsonl
|
||||
python3 scripts/normalize-code-blocks.py --dry-run training/data/*.jsonl
|
||||
python3 scripts/normalize-code-blocks.py --check training/data/*.jsonl # CI mode
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
import textwrap
|
||||
from pathlib import Path
|
||||
|
||||
# Matches ```python ... ``` or ``` ... ``` blocks inside string values
|
||||
CODE_BLOCK_RE = re.compile(
|
||||
r'(?P<open>```(?:python|py|bash|sh|javascript|js|typescript|ts|go|rust|ruby)?\s*\n)'
|
||||
r'(?P<code>.*?)'
|
||||
r'(?P<close>```)',
|
||||
re.DOTALL,
|
||||
)
|
||||
|
||||
|
||||
def normalize_code_block(match: re.Match) -> str:
|
||||
"""Normalize indentation in a single code block."""
|
||||
open_tag = match.group("open")
|
||||
code = match.group("code")
|
||||
close_tag = match.group("close")
|
||||
|
||||
# Skip empty blocks
|
||||
if not code.strip():
|
||||
return match.group(0)
|
||||
|
||||
# Dedent the code
|
||||
dedented = textwrap.dedent(code)
|
||||
|
||||
# Strip leading/trailing blank lines
|
||||
lines = dedented.split("\n")
|
||||
while lines and not lines[0].strip():
|
||||
lines.pop(0)
|
||||
while lines and not lines[-1].strip():
|
||||
lines.pop()
|
||||
|
||||
normalized = "\n".join(lines)
|
||||
|
||||
return f"{open_tag}{normalized}\n{close_tag}"
|
||||
|
||||
|
||||
def process_line(line: str) -> tuple[str, int]:
|
||||
"""Process a single JSONL line. Returns (new_line, num_fixes)."""
|
||||
try:
|
||||
obj = json.loads(line)
|
||||
except json.JSONDecodeError:
|
||||
return line, 0
|
||||
|
||||
fixes = 0
|
||||
|
||||
def fix_strings(obj):
|
||||
nonlocal fixes
|
||||
if isinstance(obj, str):
|
||||
original = obj
|
||||
fixed = CODE_BLOCK_RE.sub(normalize_code_block, obj)
|
||||
if fixed != original:
|
||||
fixes += 1
|
||||
return fixed
|
||||
elif isinstance(obj, dict):
|
||||
return {k: fix_strings(v) for k, v in obj.items()}
|
||||
elif isinstance(obj, list):
|
||||
return [fix_strings(item) for item in obj]
|
||||
return obj
|
||||
|
||||
fixed_obj = fix_strings(obj)
|
||||
return json.dumps(fixed_obj, ensure_ascii=False) + "\n", fixes
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Normalize code block indentation in JSONL training data")
|
||||
parser.add_argument("files", nargs="+", help="JSONL files to process")
|
||||
parser.add_argument("--dry-run", action="store_true", help="Show changes without writing")
|
||||
parser.add_argument("--check", action="store_true", help="CI mode: exit 1 if fixes needed")
|
||||
args = parser.parse_args()
|
||||
|
||||
total_fixes = 0
|
||||
total_lines = 0
|
||||
files_changed = 0
|
||||
|
||||
for filepath in args.files:
|
||||
path = Path(filepath)
|
||||
if not path.exists():
|
||||
print(f"SKIP: {path} not found", file=sys.stderr)
|
||||
continue
|
||||
|
||||
lines = path.read_text().splitlines(keepends=True)
|
||||
fixed_lines = []
|
||||
file_fixes = 0
|
||||
|
||||
for i, line in enumerate(lines):
|
||||
if not line.strip():
|
||||
fixed_lines.append(line)
|
||||
continue
|
||||
fixed_line, n = process_line(line)
|
||||
fixed_lines.append(fixed_line)
|
||||
file_fixes += n
|
||||
total_lines += 1
|
||||
|
||||
if file_fixes > 0:
|
||||
files_changed += 1
|
||||
total_fixes += file_fixes
|
||||
print(f"{'CHECK' if args.check else 'FIX'}: {path} — {file_fixes} code blocks normalized")
|
||||
|
||||
if args.check:
|
||||
# Show diff
|
||||
for i, (old, new) in enumerate(zip(lines, fixed_lines)):
|
||||
if old != new:
|
||||
print(f" Line {i+1}: indentation changed")
|
||||
elif not args.dry_run:
|
||||
path.write_text("".join(fixed_lines))
|
||||
print(f" Written: {path}")
|
||||
else:
|
||||
print(f"OK: {path} — no indentation issues")
|
||||
|
||||
print(f"\nSummary: {total_fixes} code blocks fixed across {files_changed} files ({total_lines} lines processed)")
|
||||
|
||||
if args.check and total_fixes > 0:
|
||||
print("FAIL: Code block indentation issues found. Run without --check to fix.")
|
||||
sys.exit(1)
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user