Compare commits
1 Commits
feat/543-t
...
fix/issue-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aa3d22d89c |
@@ -31,14 +31,6 @@ class GlitchCategory(Enum):
|
||||
WATER_REFLECTION = "water_reflection"
|
||||
SKYBOX_SEAM = "skybox_seam"
|
||||
|
||||
# Three.js-specific categories (ref: timmy-config#543)
|
||||
SHADER_FAILURE = "shader_failure"
|
||||
TEXTURE_PLACEHOLDER = "texture_placeholder"
|
||||
UV_MAPPING_ERROR = "uv_mapping_error"
|
||||
FRUSTUM_CULLING = "frustum_culling"
|
||||
SHADOW_MAP_ARTIFACT = "shadow_map_artifact"
|
||||
BLOOM_OVERFLOW = "bloom_overflow"
|
||||
|
||||
|
||||
@dataclass
|
||||
class GlitchPattern:
|
||||
@@ -249,123 +241,6 @@ MATRIX_GLITCH_PATTERNS: list[GlitchPattern] = [
|
||||
],
|
||||
confidence_threshold=0.45,
|
||||
),
|
||||
|
||||
# --- Three.js-Specific Glitch Patterns (ref: timmy-config#543) ---
|
||||
GlitchPattern(
|
||||
category=GlitchCategory.SHADER_FAILURE,
|
||||
name="Shader Compilation Failure",
|
||||
description="Three.js shader failed to compile, rendering the material as solid black. "
|
||||
"Common when custom ShaderMaterial has syntax errors or missing uniforms.",
|
||||
severity=GlitchSeverity.CRITICAL,
|
||||
detection_prompts=[
|
||||
"Look for objects or surfaces rendered as pure black (#000000) that should have visible textures or materials.",
|
||||
"Identify geometry that appears completely dark while surrounding objects are normally lit.",
|
||||
"Check for objects where the material seems to 'absorb all light' — flat black with no shading gradient.",
|
||||
],
|
||||
visual_indicators=[
|
||||
"solid black object with no shading",
|
||||
"geometry rendered as silhouette",
|
||||
"material appears to absorb light entirely",
|
||||
"black patch inconsistent with scene lighting",
|
||||
],
|
||||
confidence_threshold=0.7,
|
||||
),
|
||||
GlitchPattern(
|
||||
category=GlitchCategory.TEXTURE_PLACEHOLDER,
|
||||
name="Three.js Texture Not Loaded",
|
||||
description="Three.js failed to load the texture asset, rendering a 1x1 white pixel "
|
||||
"stretched across the entire surface. Distinguished from missing-texture by "
|
||||
"the uniform white/grey appearance rather than magenta.",
|
||||
severity=GlitchSeverity.CRITICAL,
|
||||
detection_prompts=[
|
||||
"Look for surfaces that are uniformly white or light grey with no texture detail, even on large geometry.",
|
||||
"Identify objects where the texture appears as a single solid color stretched across complex UVs.",
|
||||
"Check for surfaces that look 'blank' or 'unloaded' — flat white/grey where detail should exist.",
|
||||
],
|
||||
visual_indicators=[
|
||||
"uniform white or light grey surface",
|
||||
"no texture detail on large geometry",
|
||||
"stretched single-color appearance",
|
||||
"1x1 pixel placeholder stretched to fill UV space",
|
||||
],
|
||||
confidence_threshold=0.65,
|
||||
),
|
||||
GlitchPattern(
|
||||
category=GlitchCategory.UV_MAPPING_ERROR,
|
||||
name="BufferGeometry UV Mapping Error",
|
||||
description="Three.js BufferGeometry has incorrect UV coordinates, causing textures to "
|
||||
"appear stretched, compressed, or mapped to the wrong faces.",
|
||||
severity=GlitchSeverity.HIGH,
|
||||
detection_prompts=[
|
||||
"Look for textures that appear dramatically stretched in one direction on specific faces.",
|
||||
"Identify surfaces where the texture pattern is distorted but other nearby surfaces look correct.",
|
||||
"Check for faces where the texture seems 'smeared' or mapped with incorrect aspect ratio.",
|
||||
],
|
||||
visual_indicators=[
|
||||
"texture stretching on specific faces",
|
||||
"distorted pattern on geometry",
|
||||
"smeared texture appearance",
|
||||
"aspect ratio mismatch between texture and surface",
|
||||
],
|
||||
confidence_threshold=0.6,
|
||||
),
|
||||
GlitchPattern(
|
||||
category=GlitchCategory.FRUSTUM_CULLING,
|
||||
name="Frustum Culling Artifact",
|
||||
description="Three.js frustum culling incorrectly marks objects as outside the camera "
|
||||
"frustum, causing them to pop in/out of existence at screen edges.",
|
||||
severity=GlitchSeverity.MEDIUM,
|
||||
detection_prompts=[
|
||||
"Look for objects that are partially visible at the edge of the frame — half-rendered or cut off unnaturally.",
|
||||
"Identify geometry that seems to 'pop' into existence as the view angle changes.",
|
||||
"Check screen edges for objects that appear suddenly rather than smoothly entering the viewport.",
|
||||
],
|
||||
visual_indicators=[
|
||||
"half-visible object at screen edge",
|
||||
"object popping into frame",
|
||||
"abrupt appearance of geometry",
|
||||
"bounding box visible but mesh missing",
|
||||
],
|
||||
confidence_threshold=0.55,
|
||||
),
|
||||
GlitchPattern(
|
||||
category=GlitchCategory.SHADOW_MAP_ARTIFACT,
|
||||
name="Shadow Map Resolution Artifact",
|
||||
description="Three.js shadow map has insufficient resolution, causing pixelated, "
|
||||
"blocky shadows with visible texel edges instead of smooth shadow gradients.",
|
||||
severity=GlitchSeverity.MEDIUM,
|
||||
detection_prompts=[
|
||||
"Look for shadows with visible blocky or pixelated edges instead of smooth gradients.",
|
||||
"Identify shadow maps where individual texels (texture pixels) are clearly visible.",
|
||||
"Check for shadows that appear as jagged stair-stepped patterns rather than soft edges.",
|
||||
],
|
||||
visual_indicators=[
|
||||
"blocky shadow edges",
|
||||
"visible texel grid in shadows",
|
||||
"stair-stepped shadow boundary",
|
||||
"pixelated shadow gradient",
|
||||
],
|
||||
confidence_threshold=0.55,
|
||||
),
|
||||
GlitchPattern(
|
||||
category=GlitchCategory.BLOOM_OVERFLOW,
|
||||
name="Post-Processing Bloom Overflow",
|
||||
description="Three.js UnrealBloomPass or similar post-processing bloom effect is too "
|
||||
"intense, causing bright areas to bleed glow into surrounding geometry.",
|
||||
severity=GlitchSeverity.LOW,
|
||||
detection_prompts=[
|
||||
"Look for bright areas that have an unusually large, soft glow bleeding into adjacent surfaces.",
|
||||
"Identify scenes where light sources appear to have a 'halo' that extends beyond physical plausibility.",
|
||||
"Check for bright objects whose glow color bleeds onto nearby unrelated geometry.",
|
||||
],
|
||||
visual_indicators=[
|
||||
"excessive glow bleeding from bright surfaces",
|
||||
"halo around light sources",
|
||||
"bloom color tinting adjacent geometry",
|
||||
"glow bleeding beyond object boundaries",
|
||||
],
|
||||
confidence_threshold=0.5,
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@@ -414,23 +289,6 @@ def build_vision_prompt(patterns: list[GlitchPattern] | None = None) -> str:
|
||||
)
|
||||
|
||||
|
||||
|
||||
# Three.js-specific category set for filtering (ref: timmy-config#543)
|
||||
THREEJS_CATEGORIES = {
|
||||
GlitchCategory.SHADER_FAILURE,
|
||||
GlitchCategory.TEXTURE_PLACEHOLDER,
|
||||
GlitchCategory.UV_MAPPING_ERROR,
|
||||
GlitchCategory.FRUSTUM_CULLING,
|
||||
GlitchCategory.SHADOW_MAP_ARTIFACT,
|
||||
GlitchCategory.BLOOM_OVERFLOW,
|
||||
}
|
||||
|
||||
|
||||
def get_threejs_patterns() -> list[GlitchPattern]:
|
||||
"""Return only Three.js-specific glitch patterns."""
|
||||
return [p for p in MATRIX_GLITCH_PATTERNS if p.category in THREEJS_CATEGORIES]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import json
|
||||
print(f"Loaded {len(MATRIX_GLITCH_PATTERNS)} glitch patterns:\n")
|
||||
|
||||
@@ -9,7 +9,7 @@ Usage:
|
||||
python matrix_glitch_detector.py <url> [--angles 4] [--output report.json]
|
||||
python matrix_glitch_detector.py --demo # Run with synthetic test data
|
||||
|
||||
Ref: timmy-config#491, timmy-config#543
|
||||
Ref: timmy-config#491
|
||||
"""
|
||||
|
||||
import argparse
|
||||
@@ -33,7 +33,6 @@ from glitch_patterns import (
|
||||
MATRIX_GLITCH_PATTERNS,
|
||||
build_vision_prompt,
|
||||
get_patterns_by_severity,
|
||||
get_threejs_patterns,
|
||||
)
|
||||
|
||||
|
||||
@@ -346,17 +345,14 @@ def _parse_vision_response(
|
||||
|
||||
def _infer_severity(category: str, confidence: float) -> str:
|
||||
"""Infer severity from category and confidence when not provided."""
|
||||
critical_cats = {"missing_textures", "clipping", "shader_failure", "texture_placeholder"}
|
||||
high_cats = {"floating_assets", "broken_normals", "uv_mapping_error"}
|
||||
medium_cats = {"frustum_culling", "shadow_map_artifact"}
|
||||
critical_cats = {"missing_textures", "clipping"}
|
||||
high_cats = {"floating_assets", "broken_normals"}
|
||||
|
||||
cat_lower = category.lower()
|
||||
if any(c in cat_lower for c in critical_cats):
|
||||
return "critical" if confidence > 0.7 else "high"
|
||||
if any(c in cat_lower for c in high_cats):
|
||||
return "high" if confidence > 0.7 else "medium"
|
||||
if any(c in cat_lower for c in medium_cats):
|
||||
return "medium" if confidence > 0.6 else "low"
|
||||
return "medium" if confidence > 0.6 else "low"
|
||||
|
||||
|
||||
@@ -393,9 +389,9 @@ def build_report(
|
||||
),
|
||||
},
|
||||
metadata={
|
||||
"detector_version": "0.2.0",
|
||||
"detector_version": "0.1.0",
|
||||
"pattern_count": len(MATRIX_GLITCH_PATTERNS),
|
||||
"reference": "timmy-config#491, timmy-config#543",
|
||||
"reference": "timmy-config#491",
|
||||
},
|
||||
)
|
||||
|
||||
@@ -464,30 +460,6 @@ def run_demo(output_path: Optional[Path] = None) -> ScanResult:
|
||||
screenshot_index=3,
|
||||
screenshot_angle="left",
|
||||
),
|
||||
DetectedGlitch(
|
||||
id=str(uuid.uuid4())[:8],
|
||||
category="shader_failure",
|
||||
name="Black Material on Portal Frame",
|
||||
description="Portal frame rendered as solid black — shader compilation failed (missing uniform u_time)",
|
||||
severity="critical",
|
||||
confidence=0.91,
|
||||
location_x=45.0,
|
||||
location_y=30.0,
|
||||
screenshot_index=0,
|
||||
screenshot_angle="front",
|
||||
),
|
||||
DetectedGlitch(
|
||||
id=str(uuid.uuid4())[:8],
|
||||
category="shadow_map_artifact",
|
||||
name="Pixelated Character Shadow",
|
||||
description="Character shadow shows visible texel grid — shadow map resolution too low (512x512)",
|
||||
severity="medium",
|
||||
confidence=0.78,
|
||||
location_x=52.0,
|
||||
location_y=75.0,
|
||||
screenshot_index=1,
|
||||
screenshot_angle="right",
|
||||
),
|
||||
]
|
||||
|
||||
print(f"[*] Detected {len(demo_glitches)} glitches")
|
||||
@@ -524,11 +496,6 @@ Examples:
|
||||
help="Minimum severity to include in report",
|
||||
)
|
||||
parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output")
|
||||
parser.add_argument(
|
||||
"--threejs",
|
||||
action="store_true",
|
||||
help="Focus on Three.js-specific glitch patterns only (shader, texture, UV, culling, shadow, bloom)",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -558,13 +525,9 @@ Examples:
|
||||
screenshots = capture_screenshots(args.url, angles, screenshots_dir)
|
||||
print(f"[*] Captured {len(screenshots)} screenshots")
|
||||
|
||||
# Filter patterns by severity and type
|
||||
# Filter patterns by severity
|
||||
min_sev = GlitchSeverity(args.min_severity)
|
||||
patterns = get_patterns_by_severity(min_sev)
|
||||
if args.threejs:
|
||||
threejs_patterns = get_threejs_patterns()
|
||||
patterns = [p for p in patterns if p in threejs_patterns]
|
||||
print(f"[*] Three.js-focused mode: {len(patterns)} patterns")
|
||||
|
||||
# Analyze with vision AI
|
||||
print(f"[*] Analyzing with vision AI ({len(patterns)} patterns)...")
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Full Nostr agent-to-agent communication demo - FINAL WORKING
|
||||
"""
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Soul Eval Gate — The Conscience of the Training Pipeline
|
||||
|
||||
|
||||
104
cron/audit-report.json
Normal file
104
cron/audit-report.json
Normal file
@@ -0,0 +1,104 @@
|
||||
{
|
||||
"audit_time": "2026-04-15T01:13:31.126215+00:00",
|
||||
"total_jobs": 7,
|
||||
"summary": {
|
||||
"healthy": 7,
|
||||
"transient_errors": 0,
|
||||
"systemic_failures": 0
|
||||
},
|
||||
"systemic_jobs": [],
|
||||
"transient_jobs": [],
|
||||
"all_jobs": [
|
||||
{
|
||||
"id": "9e0624269ba7",
|
||||
"name": "Triage Heartbeat",
|
||||
"schedule": "every 15m",
|
||||
"state": "paused",
|
||||
"enabled": false,
|
||||
"last_status": "ok",
|
||||
"last_error": null,
|
||||
"last_run_at": "2026-03-24T15:33:57.749458-04:00",
|
||||
"category": "healthy",
|
||||
"reason": "Dashboard repo frozen - loops redirected to the-nexus",
|
||||
"action": "none \u2014 paused intentionally"
|
||||
},
|
||||
{
|
||||
"id": "e29eda4a8548",
|
||||
"name": "PR Review Sweep",
|
||||
"schedule": "every 30m",
|
||||
"state": "paused",
|
||||
"enabled": false,
|
||||
"last_status": "ok",
|
||||
"last_error": null,
|
||||
"last_run_at": "2026-03-24T15:21:42.995715-04:00",
|
||||
"category": "healthy",
|
||||
"reason": "Dashboard repo frozen - loops redirected to the-nexus",
|
||||
"action": "none \u2014 paused intentionally"
|
||||
},
|
||||
{
|
||||
"id": "a77a87392582",
|
||||
"name": "Health Monitor",
|
||||
"schedule": "every 5m",
|
||||
"state": "scheduled",
|
||||
"enabled": true,
|
||||
"last_status": "ok",
|
||||
"last_error": null,
|
||||
"last_run_at": "2026-03-24T15:34:39.045945-04:00",
|
||||
"category": "healthy",
|
||||
"reason": "Last run succeeded",
|
||||
"action": ""
|
||||
},
|
||||
{
|
||||
"id": "36fb2f630a17",
|
||||
"name": "Hermes Philosophy Loop",
|
||||
"schedule": "every 1440m",
|
||||
"state": "unknown",
|
||||
"enabled": false,
|
||||
"last_status": null,
|
||||
"last_error": null,
|
||||
"last_run_at": null,
|
||||
"category": "healthy",
|
||||
"reason": "Never run, no errors",
|
||||
"action": ""
|
||||
},
|
||||
{
|
||||
"id": "muda-audit-weekly",
|
||||
"name": "Muda Audit",
|
||||
"schedule": "0 21 * * 0",
|
||||
"state": "scheduled",
|
||||
"enabled": true,
|
||||
"last_status": null,
|
||||
"last_error": null,
|
||||
"last_run_at": null,
|
||||
"category": "healthy",
|
||||
"reason": "Never run, no errors",
|
||||
"action": ""
|
||||
},
|
||||
{
|
||||
"id": "kaizen-retro-349",
|
||||
"name": "Kaizen Retro",
|
||||
"schedule": "daily at 07:30",
|
||||
"state": "scheduled",
|
||||
"enabled": true,
|
||||
"last_status": null,
|
||||
"last_error": null,
|
||||
"last_run_at": null,
|
||||
"category": "healthy",
|
||||
"reason": "Never run, no errors",
|
||||
"action": ""
|
||||
},
|
||||
{
|
||||
"id": "overnight-rd-nightly",
|
||||
"name": "Overnight R&D Loop",
|
||||
"schedule": "Nightly at 10 PM EDT",
|
||||
"state": "scheduled",
|
||||
"enabled": true,
|
||||
"last_status": null,
|
||||
"last_error": null,
|
||||
"last_run_at": null,
|
||||
"category": "healthy",
|
||||
"reason": "Never run, no errors",
|
||||
"action": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -196,37 +196,7 @@
|
||||
"paused_reason": null,
|
||||
"skills": [],
|
||||
"skill": null
|
||||
},
|
||||
{
|
||||
"id": "tmux-supervisor-513",
|
||||
"name": "Autonomous Cron Supervisor",
|
||||
"prompt": "Load the tmux-supervisor skill and execute the monitoring protocol.\n\nCheck both `dev` and `timmy` tmux sessions for idle panes. Only send Telegram notifications on actionable events (idle, overflow, failure). Be silent when all agents are working.\n\nSteps:\n1. List all tmux sessions (skip 'Alexander')\n2. For each session, list windows and panes\n3. Capture each pane and classify state (idle vs active)\n4. For idle panes: read context, craft context-aware prompt\n5. Send /queue prompts to idle panes\n6. Verify prompts landed\n7. Only notify via Telegram if:\n - A pane was prompted (idle detected)\n - A pane shows context overflow (>80%)\n - A pane is stuck or crashed\n8. If all panes are active: respond with [SILENT]",
|
||||
"schedule": {
|
||||
"kind": "interval",
|
||||
"minutes": 7,
|
||||
"display": "every 7m"
|
||||
},
|
||||
"schedule_display": "every 7m",
|
||||
"repeat": {
|
||||
"times": null,
|
||||
"completed": 0
|
||||
},
|
||||
"enabled": true,
|
||||
"created_at": "2026-04-15T03:00:00.000000+00:00",
|
||||
"next_run_at": null,
|
||||
"last_run_at": null,
|
||||
"last_status": null,
|
||||
"last_error": null,
|
||||
"deliver": "telegram",
|
||||
"origin": null,
|
||||
"state": "scheduled",
|
||||
"paused_at": null,
|
||||
"paused_reason": null,
|
||||
"skills": [
|
||||
"tmux-supervisor"
|
||||
],
|
||||
"skill": "tmux-supervisor"
|
||||
}
|
||||
],
|
||||
"updated_at": "2026-04-13T02:00:00+00:00"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
#!/usr/bin/env python3
|
||||
import json
|
||||
from hermes_tools import browser_navigate, browser_vision
|
||||
|
||||
|
||||
333
scripts/cron-audit-662.py
Normal file
333
scripts/cron-audit-662.py
Normal file
@@ -0,0 +1,333 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Cron Fleet Audit Script — #662
|
||||
|
||||
Reads hermes cron job state, categorizes all jobs into:
|
||||
- healthy: last_status=ok or never-run-and-enabled
|
||||
- transient: recent errors (likely network/timeout)
|
||||
- systemic: repeated errors over 48+ hours
|
||||
|
||||
Outputs a JSON report and optionally:
|
||||
--disable Disable systemic jobs erroring 48+ hours
|
||||
--issues File Gitea issues for systemic failures
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
import argparse
|
||||
from datetime import datetime, timezone, timedelta
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Any
|
||||
|
||||
# --- Config ---
|
||||
ERROR_THRESHOLD_HOURS = 48
|
||||
CRON_STATE_PATHS = [
|
||||
Path.home() / ".hermes" / "cron" / "jobs.json",
|
||||
Path.home() / ".hermes" / "cron" / "state.json",
|
||||
Path("/root/.hermes/cron/jobs.json"),
|
||||
Path("/root/.hermes/cron/state.json"),
|
||||
]
|
||||
|
||||
def load_cron_state() -> List[Dict[str, Any]]:
|
||||
"""Load cron job state from known locations."""
|
||||
for path in CRON_STATE_PATHS:
|
||||
if path.exists():
|
||||
try:
|
||||
with open(path) as f:
|
||||
data = json.load(f)
|
||||
if isinstance(data, dict) and "jobs" in data:
|
||||
return data["jobs"]
|
||||
if isinstance(data, list):
|
||||
return data
|
||||
except (json.JSONDecodeError, IOError):
|
||||
continue
|
||||
|
||||
# Fallback: try hermes cron list CLI
|
||||
try:
|
||||
import subprocess
|
||||
result = subprocess.run(
|
||||
["hermes", "cron", "list", "--json"],
|
||||
capture_output=True, text=True, timeout=30
|
||||
)
|
||||
if result.returncode == 0:
|
||||
data = json.loads(result.stdout)
|
||||
if isinstance(data, dict) and "jobs" in data:
|
||||
return data["jobs"]
|
||||
if isinstance(data, list):
|
||||
return data
|
||||
except (subprocess.TimeoutExpired, FileNotFoundError, json.JSONDecodeError):
|
||||
pass
|
||||
|
||||
return []
|
||||
|
||||
|
||||
def parse_timestamp(ts: str) -> datetime:
|
||||
"""Parse ISO timestamp, handle various formats."""
|
||||
if not ts:
|
||||
return None
|
||||
# Normalize timezone
|
||||
ts = ts.replace("+00:00", "+00:00")
|
||||
try:
|
||||
dt = datetime.fromisoformat(ts)
|
||||
if dt.tzinfo is None:
|
||||
dt = dt.replace(tzinfo=timezone.utc)
|
||||
return dt
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def categorize_job(job: Dict[str, Any], now: datetime) -> Dict[str, Any]:
|
||||
"""Categorize a single job."""
|
||||
status = job.get("last_status", "")
|
||||
last_error = job.get("last_error", "")
|
||||
last_run = parse_timestamp(job.get("last_run_at"))
|
||||
enabled = job.get("enabled", False)
|
||||
state = job.get("state", "unknown")
|
||||
name = job.get("name", job.get("id", "unknown"))
|
||||
|
||||
entry = {
|
||||
"id": job.get("id", ""),
|
||||
"name": name,
|
||||
"schedule": job.get("schedule_display", str(job.get("schedule", ""))),
|
||||
"state": state,
|
||||
"enabled": enabled,
|
||||
"last_status": status,
|
||||
"last_error": last_error,
|
||||
"last_run_at": job.get("last_run_at"),
|
||||
"category": "healthy",
|
||||
"reason": "",
|
||||
"action": "",
|
||||
}
|
||||
|
||||
# Never run / no error
|
||||
if status is None and not last_error:
|
||||
entry["category"] = "healthy"
|
||||
entry["reason"] = "Never run, no errors"
|
||||
return entry
|
||||
|
||||
# Explicitly paused with reason
|
||||
if state == "paused":
|
||||
entry["category"] = "healthy"
|
||||
entry["reason"] = job.get("paused_reason", "Manually paused")
|
||||
entry["action"] = "none — paused intentionally"
|
||||
return entry
|
||||
|
||||
# Completed jobs
|
||||
if state == "completed":
|
||||
entry["category"] = "healthy"
|
||||
entry["reason"] = "Completed (one-shot)"
|
||||
return entry
|
||||
|
||||
# Error status
|
||||
if status == "error" and last_error:
|
||||
age_hours = None
|
||||
if last_run:
|
||||
age_hours = (now - last_run).total_seconds() / 3600
|
||||
|
||||
if age_hours is not None and age_hours >= ERROR_THRESHOLD_HOURS:
|
||||
entry["category"] = "systemic"
|
||||
entry["reason"] = f"Erroring for {age_hours:.1f}h (>{ERROR_THRESHOLD_HOURS}h threshold)"
|
||||
entry["action"] = "disable"
|
||||
else:
|
||||
entry["category"] = "transient"
|
||||
age_str = f"{age_hours:.1f}h ago" if age_hours is not None else "unknown age"
|
||||
entry["reason"] = f"Recent error ({age_str}), may be transient"
|
||||
entry["action"] = "monitor"
|
||||
return entry
|
||||
|
||||
# OK status
|
||||
if status == "ok":
|
||||
entry["category"] = "healthy"
|
||||
entry["reason"] = "Last run succeeded"
|
||||
return entry
|
||||
|
||||
# Scheduled but never errored
|
||||
if state == "scheduled" and enabled:
|
||||
entry["category"] = "healthy"
|
||||
entry["reason"] = "Scheduled and running"
|
||||
return entry
|
||||
|
||||
# Unknown state
|
||||
entry["category"] = "transient"
|
||||
entry["reason"] = f"Unknown state: {state}, status: {status}"
|
||||
entry["action"] = "investigate"
|
||||
return entry
|
||||
|
||||
|
||||
def audit_jobs(jobs: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||
"""Run full audit on job list."""
|
||||
now = datetime.now(timezone.utc)
|
||||
categorized = [categorize_job(j, now) for j in jobs]
|
||||
|
||||
healthy = [c for c in categorized if c["category"] == "healthy"]
|
||||
transient = [c for c in categorized if c["category"] == "transient"]
|
||||
systemic = [c for c in categorized if c["category"] == "systemic"]
|
||||
|
||||
report = {
|
||||
"audit_time": now.isoformat(),
|
||||
"total_jobs": len(jobs),
|
||||
"summary": {
|
||||
"healthy": len(healthy),
|
||||
"transient_errors": len(transient),
|
||||
"systemic_failures": len(systemic),
|
||||
},
|
||||
"systemic_jobs": [
|
||||
{
|
||||
"id": j["id"],
|
||||
"name": j["name"],
|
||||
"reason": j["reason"],
|
||||
"last_error": j["last_error"],
|
||||
}
|
||||
for j in systemic
|
||||
],
|
||||
"transient_jobs": [
|
||||
{
|
||||
"id": j["id"],
|
||||
"name": j["name"],
|
||||
"reason": j["reason"],
|
||||
}
|
||||
for j in transient
|
||||
],
|
||||
"all_jobs": categorized,
|
||||
}
|
||||
|
||||
return report
|
||||
|
||||
|
||||
def generate_issue_body(job: Dict[str, Any]) -> str:
|
||||
"""Generate a Gitea issue body for a systemic cron failure."""
|
||||
return f"""## Systemic Cron Failure — Auto-Filed by Audit #662
|
||||
|
||||
**Job:** {job['name']} (`{job['id']}`)
|
||||
**Schedule:** {job['schedule']}
|
||||
**State:** {job['state']}
|
||||
**Last Error:**
|
||||
```
|
||||
{job['last_error'] or 'No error details available'}
|
||||
```
|
||||
|
||||
**Audit Finding:** {job['reason']}
|
||||
|
||||
### Action Required
|
||||
- [ ] Diagnose root cause of repeated failure
|
||||
- [ ] Fix configuration or remove broken job
|
||||
- [ ] Verify job resumes healthy after fix
|
||||
|
||||
*Auto-generated by cron-audit-662.py*
|
||||
"""
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Cron fleet audit (#662)")
|
||||
parser.add_argument("--jobs-file", help="Path to jobs.json override")
|
||||
parser.add_argument("--disable", action="store_true",
|
||||
help="Disable systemic jobs (requires hermes CLI)")
|
||||
parser.add_argument("--issues", action="store_true",
|
||||
help="File Gitea issues for systemic failures")
|
||||
parser.add_argument("--output", help="Write report to file")
|
||||
parser.add_argument("--json", action="store_true", help="JSON output only")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Load jobs
|
||||
jobs = []
|
||||
if args.jobs_file:
|
||||
with open(args.jobs_file) as f:
|
||||
data = json.load(f)
|
||||
jobs = data.get("jobs", data) if isinstance(data, dict) else data
|
||||
else:
|
||||
jobs = load_cron_state()
|
||||
|
||||
if not jobs:
|
||||
print("ERROR: No cron jobs found. Check ~/.hermes/cron/ or run 'hermes cron list'.")
|
||||
sys.exit(1)
|
||||
|
||||
# Run audit
|
||||
report = audit_jobs(jobs)
|
||||
|
||||
# Output
|
||||
if args.json:
|
||||
print(json.dumps(report, indent=2))
|
||||
else:
|
||||
print(f"\n{'='*60}")
|
||||
print(f" CRON FLEET AUDIT — {report['total_jobs']} jobs")
|
||||
print(f"{'='*60}")
|
||||
print(f" Healthy: {report['summary']['healthy']}")
|
||||
print(f" Transient errors: {report['summary']['transient_errors']}")
|
||||
print(f" Systemic failures: {report['summary']['systemic_failures']}")
|
||||
print(f"{'='*60}")
|
||||
|
||||
if report["systemic_jobs"]:
|
||||
print(f"\n SYSTEMIC FAILURES (>{ERROR_THRESHOLD_HOURS}h):")
|
||||
for j in report["systemic_jobs"]:
|
||||
print(f" - {j['name']} ({j['id']}): {j['reason']}")
|
||||
if j["last_error"]:
|
||||
print(f" Error: {j['last_error'][:100]}")
|
||||
|
||||
if report["transient_jobs"]:
|
||||
print(f"\n TRANSIENT ERRORS:")
|
||||
for j in report["transient_jobs"]:
|
||||
print(f" - {j['name']} ({j['id']}): {j['reason']}")
|
||||
|
||||
print()
|
||||
|
||||
# Write report file
|
||||
if args.output:
|
||||
with open(args.output, "w") as f:
|
||||
json.dump(report, f, indent=2)
|
||||
print(f"Report written to {args.output}")
|
||||
|
||||
# Disable systemic jobs
|
||||
if args.disable and report["systemic_jobs"]:
|
||||
import subprocess
|
||||
for j in report["systemic_jobs"]:
|
||||
print(f"Disabling: {j['name']} ({j['id']})")
|
||||
try:
|
||||
subprocess.run(
|
||||
["hermes", "cron", "pause", j["id"]],
|
||||
capture_output=True, text=True, timeout=10
|
||||
)
|
||||
print(f" → Disabled")
|
||||
except Exception as e:
|
||||
print(f" → Failed: {e}")
|
||||
|
||||
# File issues for systemic failures
|
||||
if args.issues and report["systemic_jobs"]:
|
||||
gitea_token = os.environ.get("GITEA_TOKEN") or ""
|
||||
if not gitea_token:
|
||||
token_path = Path.home() / ".config" / "gitea" / "token"
|
||||
if token_path.exists():
|
||||
gitea_token = token_path.read_text().strip()
|
||||
|
||||
if not gitea_token:
|
||||
print("ERROR: No Gitea token found. Set GITEA_TOKEN or ~/.config/gitea/token")
|
||||
sys.exit(1)
|
||||
|
||||
import urllib.request
|
||||
base = "https://forge.alexanderwhitestone.com/api/v1"
|
||||
headers = {
|
||||
"Authorization": f"token {gitea_token}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
for j in report["systemic_jobs"]:
|
||||
title = f"CRON FAIL: {j['name']} — systemic error ({j['id']})"
|
||||
body = generate_issue_body(j)
|
||||
data = json.dumps({"title": title, "body": body}).encode()
|
||||
req = urllib.request.Request(
|
||||
f"{base}/repos/Timmy_Foundation/timmy-config/issues",
|
||||
data=data, headers=headers, method="POST"
|
||||
)
|
||||
try:
|
||||
resp = urllib.request.urlopen(req)
|
||||
result = json.loads(resp.read())
|
||||
print(f"Issued #{result['number']}: {title}")
|
||||
except Exception as e:
|
||||
print(f"Failed to file issue for {j['name']}: {e}")
|
||||
|
||||
# Exit code: non-zero if systemic failures found
|
||||
sys.exit(1 if report["systemic_jobs"] else 0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
1
scripts/cron_audit_662.py
Symbolic link
1
scripts/cron_audit_662.py
Symbolic link
@@ -0,0 +1 @@
|
||||
cron-audit-662.py
|
||||
@@ -1,4 +1,3 @@
|
||||
#!/usr/bin/env python3
|
||||
import json
|
||||
from hermes_tools import browser_navigate, browser_vision
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
#!/usr/bin/env python3
|
||||
import json
|
||||
from hermes_tools import browser_navigate, browser_vision
|
||||
|
||||
|
||||
109
tests/test_cron_audit.py
Normal file
109
tests/test_cron_audit.py
Normal file
@@ -0,0 +1,109 @@
|
||||
"""
|
||||
Tests for scripts/cron-audit-662.py — cron fleet audit.
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
import unittest
|
||||
from datetime import datetime, timezone, timedelta
|
||||
from pathlib import Path
|
||||
|
||||
# Add scripts to path
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent / "scripts"))
|
||||
from cron_audit_662 import categorize_job, audit_jobs
|
||||
|
||||
|
||||
class TestCategorizeJob(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.now = datetime(2026, 4, 14, 20, 0, 0, tzinfo=timezone.utc)
|
||||
|
||||
def test_healthy_ok(self):
|
||||
job = {"id": "a1", "name": "Test", "last_status": "ok", "enabled": True, "state": "scheduled"}
|
||||
result = categorize_job(job, self.now)
|
||||
self.assertEqual(result["category"], "healthy")
|
||||
|
||||
def test_healthy_never_run(self):
|
||||
job = {"id": "a2", "name": "Never", "last_status": None, "last_error": None}
|
||||
result = categorize_job(job, self.now)
|
||||
self.assertEqual(result["category"], "healthy")
|
||||
|
||||
def test_healthy_paused(self):
|
||||
job = {"id": "a3", "name": "Paused", "state": "paused", "paused_reason": "intentional"}
|
||||
result = categorize_job(job, self.now)
|
||||
self.assertEqual(result["category"], "healthy")
|
||||
|
||||
def test_healthy_completed(self):
|
||||
job = {"id": "a4", "name": "Done", "state": "completed"}
|
||||
result = categorize_job(job, self.now)
|
||||
self.assertEqual(result["category"], "healthy")
|
||||
|
||||
def test_transient_recent_error(self):
|
||||
recent = (self.now - timedelta(hours=2)).isoformat()
|
||||
job = {
|
||||
"id": "t1", "name": "RecentErr",
|
||||
"last_status": "error",
|
||||
"last_error": "Connection timeout",
|
||||
"last_run_at": recent,
|
||||
"enabled": True,
|
||||
"state": "scheduled",
|
||||
}
|
||||
result = categorize_job(job, self.now)
|
||||
self.assertEqual(result["category"], "transient")
|
||||
self.assertIn("transient", result["reason"].lower())
|
||||
|
||||
def test_systemic_old_error(self):
|
||||
old = (self.now - timedelta(hours=72)).isoformat()
|
||||
job = {
|
||||
"id": "s1", "name": "OldErr",
|
||||
"last_status": "error",
|
||||
"last_error": "ConfigError: bad config",
|
||||
"last_run_at": old,
|
||||
"enabled": True,
|
||||
"state": "scheduled",
|
||||
}
|
||||
result = categorize_job(job, self.now)
|
||||
self.assertEqual(result["category"], "systemic")
|
||||
self.assertEqual(result["action"], "disable")
|
||||
|
||||
def test_systemic_boundary(self):
|
||||
"""48.1 hours should be systemic."""
|
||||
boundary = (self.now - timedelta(hours=48, minutes=6)).isoformat()
|
||||
job = {
|
||||
"id": "s2", "name": "Boundary",
|
||||
"last_status": "error",
|
||||
"last_error": "fail",
|
||||
"last_run_at": boundary,
|
||||
"enabled": True,
|
||||
"state": "scheduled",
|
||||
}
|
||||
result = categorize_job(job, self.now)
|
||||
self.assertEqual(result["category"], "systemic")
|
||||
|
||||
|
||||
class TestAuditJobs(unittest.TestCase):
|
||||
def test_empty(self):
|
||||
report = audit_jobs([])
|
||||
self.assertEqual(report["total_jobs"], 0)
|
||||
self.assertEqual(report["summary"]["healthy"], 0)
|
||||
|
||||
def test_mixed_report(self):
|
||||
now = datetime(2026, 4, 14, 20, 0, 0, tzinfo=timezone.utc)
|
||||
old = (now - timedelta(hours=72)).isoformat()
|
||||
recent = (now - timedelta(hours=1)).isoformat()
|
||||
|
||||
jobs = [
|
||||
{"id": "h1", "name": "Healthy", "last_status": "ok", "enabled": True, "state": "scheduled"},
|
||||
{"id": "t1", "name": "Transient", "last_status": "error", "last_error": "timeout", "last_run_at": recent, "enabled": True, "state": "scheduled"},
|
||||
{"id": "s1", "name": "Systemic", "last_status": "error", "last_error": "config bad", "last_run_at": old, "enabled": True, "state": "scheduled"},
|
||||
{"id": "p1", "name": "Paused", "state": "paused", "paused_reason": "frozen"},
|
||||
]
|
||||
report = audit_jobs(jobs)
|
||||
self.assertEqual(report["summary"]["healthy"], 2)
|
||||
self.assertEqual(report["summary"]["transient_errors"], 1)
|
||||
self.assertEqual(report["summary"]["systemic_failures"], 1)
|
||||
self.assertEqual(len(report["systemic_jobs"]), 1)
|
||||
self.assertEqual(report["systemic_jobs"][0]["name"], "Systemic")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
@@ -19,11 +19,9 @@ from glitch_patterns import (
|
||||
GlitchPattern,
|
||||
GlitchSeverity,
|
||||
MATRIX_GLITCH_PATTERNS,
|
||||
THREEJS_CATEGORIES,
|
||||
build_vision_prompt,
|
||||
get_pattern_by_category,
|
||||
get_patterns_by_severity,
|
||||
get_threejs_patterns,
|
||||
)
|
||||
|
||||
from matrix_glitch_detector import (
|
||||
@@ -42,7 +40,7 @@ class TestGlitchPatterns(unittest.TestCase):
|
||||
|
||||
def test_pattern_count(self):
|
||||
"""Verify we have a reasonable number of defined patterns."""
|
||||
self.assertGreaterEqual(len(MATRIX_GLITCH_PATTERNS), 14) # 10 generic + 6 Three.js
|
||||
self.assertGreaterEqual(len(MATRIX_GLITCH_PATTERNS), 8)
|
||||
|
||||
def test_all_patterns_have_required_fields(self):
|
||||
"""Every pattern must have category, name, description, severity, prompts."""
|
||||
@@ -90,9 +88,6 @@ class TestGlitchPatterns(unittest.TestCase):
|
||||
self.assertIn("Floating Object", prompt)
|
||||
self.assertIn("Z-Fighting", prompt)
|
||||
self.assertIn("Missing", prompt)
|
||||
# Three.js patterns should be included
|
||||
self.assertIn("Shader Compilation Failure", prompt)
|
||||
self.assertIn("Bloom Overflow", prompt)
|
||||
|
||||
def test_build_vision_prompt_subset(self):
|
||||
"""Vision prompt with subset should only include specified patterns."""
|
||||
@@ -253,7 +248,7 @@ class TestGlitchDetector(unittest.TestCase):
|
||||
|
||||
try:
|
||||
report = run_demo(output_path)
|
||||
self.assertEqual(len(report.glitches), 6) # 4 original + 2 Three.js
|
||||
self.assertEqual(len(report.glitches), 4)
|
||||
self.assertGreater(report.summary["total_glitches"], 0)
|
||||
self.assertTrue(output_path.exists())
|
||||
|
||||
@@ -265,93 +260,6 @@ class TestGlitchDetector(unittest.TestCase):
|
||||
output_path.unlink(missing_ok=True)
|
||||
|
||||
|
||||
class TestThreeJsPatterns(unittest.TestCase):
|
||||
"""Tests for Three.js-specific glitch patterns (timmy-config#543)."""
|
||||
|
||||
def test_get_threejs_patterns_returns_only_threejs(self):
|
||||
"""get_threejs_patterns() should return only Three.js categories."""
|
||||
patterns = get_threejs_patterns()
|
||||
self.assertEqual(len(patterns), 6)
|
||||
for p in patterns:
|
||||
self.assertIn(p.category, THREEJS_CATEGORIES)
|
||||
|
||||
def test_threejs_patterns_have_required_fields(self):
|
||||
"""All Three.js patterns must have valid fields."""
|
||||
for p in get_threejs_patterns():
|
||||
self.assertIsInstance(p.category, GlitchCategory)
|
||||
self.assertTrue(p.name)
|
||||
self.assertTrue(p.description)
|
||||
self.assertIsInstance(p.severity, GlitchSeverity)
|
||||
self.assertGreater(len(p.detection_prompts), 0)
|
||||
self.assertGreater(len(p.visual_indicators), 0)
|
||||
|
||||
def test_shader_failure_is_critical(self):
|
||||
"""Shader compilation failure should be CRITICAL severity."""
|
||||
p = get_pattern_by_category(GlitchCategory.SHADER_FAILURE)
|
||||
self.assertIsNotNone(p)
|
||||
self.assertEqual(p.severity, GlitchSeverity.CRITICAL)
|
||||
|
||||
def test_texture_placeholder_is_critical(self):
|
||||
"""Texture placeholder (1x1 white) should be CRITICAL severity."""
|
||||
p = get_pattern_by_category(GlitchCategory.TEXTURE_PLACEHOLDER)
|
||||
self.assertIsNotNone(p)
|
||||
self.assertEqual(p.severity, GlitchSeverity.CRITICAL)
|
||||
|
||||
def test_infer_severity_shader_failure(self):
|
||||
"""Shader failure should infer critical/high."""
|
||||
self.assertEqual(_infer_severity("shader_failure", 0.8), "critical")
|
||||
self.assertEqual(_infer_severity("shader_failure", 0.5), "high")
|
||||
|
||||
def test_infer_severity_texture_placeholder(self):
|
||||
"""Texture placeholder should infer critical/high."""
|
||||
self.assertEqual(_infer_severity("texture_placeholder", 0.8), "critical")
|
||||
self.assertEqual(_infer_severity("texture_placeholder", 0.5), "high")
|
||||
|
||||
def test_infer_severity_uv_mapping(self):
|
||||
"""UV mapping error should infer high/medium."""
|
||||
self.assertEqual(_infer_severity("uv_mapping_error", 0.8), "high")
|
||||
self.assertEqual(_infer_severity("uv_mapping_error", 0.5), "medium")
|
||||
|
||||
def test_infer_severity_frustum_culling(self):
|
||||
"""Frustum culling should infer medium/low."""
|
||||
self.assertEqual(_infer_severity("frustum_culling", 0.7), "medium")
|
||||
self.assertEqual(_infer_severity("frustum_culling", 0.4), "low")
|
||||
|
||||
def test_infer_severity_shadow_map(self):
|
||||
"""Shadow map artifact should infer medium/low."""
|
||||
self.assertEqual(_infer_severity("shadow_map_artifact", 0.7), "medium")
|
||||
self.assertEqual(_infer_severity("shadow_map_artifact", 0.4), "low")
|
||||
|
||||
def test_infer_severity_bloom_overflow(self):
|
||||
"""Bloom overflow should infer medium/low (default path)."""
|
||||
self.assertEqual(_infer_severity("bloom_overflow", 0.7), "medium")
|
||||
self.assertEqual(_infer_severity("bloom_overflow", 0.4), "low")
|
||||
|
||||
def test_threejs_patterns_in_vision_prompt(self):
|
||||
"""Three.js patterns should appear in the composite vision prompt."""
|
||||
prompt = build_vision_prompt()
|
||||
self.assertIn("shader_failure", prompt)
|
||||
self.assertIn("texture_placeholder", prompt)
|
||||
self.assertIn("uv_mapping_error", prompt)
|
||||
self.assertIn("frustum_culling", prompt)
|
||||
self.assertIn("shadow_map_artifact", prompt)
|
||||
self.assertIn("bloom_overflow", prompt)
|
||||
|
||||
def test_threejs_subset_prompt(self):
|
||||
"""Building prompt from Three.js-only patterns should work."""
|
||||
threejs = get_threejs_patterns()
|
||||
prompt = build_vision_prompt(threejs)
|
||||
self.assertIn("Shader Compilation Failure", prompt)
|
||||
self.assertNotIn("Floating Object", prompt) # generic, not Three.js
|
||||
|
||||
def test_report_metadata_version(self):
|
||||
"""Report metadata should reference both issues."""
|
||||
report = run_demo()
|
||||
self.assertEqual(report.metadata["detector_version"], "0.2.0")
|
||||
self.assertIn("543", report.metadata["reference"])
|
||||
|
||||
|
||||
|
||||
class TestIntegration(unittest.TestCase):
|
||||
"""Integration-level tests."""
|
||||
|
||||
@@ -368,13 +276,6 @@ class TestIntegration(unittest.TestCase):
|
||||
expected = {"floating_assets", "z_fighting", "missing_textures", "clipping", "broken_normals"}
|
||||
self.assertTrue(expected.issubset(category_values))
|
||||
|
||||
def test_patterns_cover_threejs_themes(self):
|
||||
"""Patterns should cover Three.js-specific glitch themes (#543)."""
|
||||
category_values = {p.category.value for p in MATRIX_GLITCH_PATTERNS}
|
||||
threejs_expected = {"shader_failure", "texture_placeholder", "uv_mapping_error",
|
||||
"frustum_culling", "shadow_map_artifact", "bloom_overflow"}
|
||||
self.assertTrue(threejs_expected.issubset(category_values))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
||||
Reference in New Issue
Block a user