Compare commits
12 Commits
fix/685-py
...
sprint/iss
| Author | SHA1 | Date | |
|---|---|---|---|
| 817785d763 | |||
|
|
3603030235 | ||
| 35a191f7b1 | |||
| e987e1b870 | |||
| 19278513b4 | |||
| 1088bf8983 | |||
| 94f0a132d4 | |||
| 279356bed6 | |||
| 511ff863c2 | |||
| b6e3a647b0 | |||
| e14158676d | |||
| 26e39d8949 |
@@ -1,4 +1,3 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""
|
"""
|
||||||
Glitch pattern definitions for 3D world anomaly detection.
|
Glitch pattern definitions for 3D world anomaly detection.
|
||||||
|
|
||||||
@@ -32,6 +31,14 @@ class GlitchCategory(Enum):
|
|||||||
WATER_REFLECTION = "water_reflection"
|
WATER_REFLECTION = "water_reflection"
|
||||||
SKYBOX_SEAM = "skybox_seam"
|
SKYBOX_SEAM = "skybox_seam"
|
||||||
|
|
||||||
|
# Three.js-specific categories (ref: timmy-config#543)
|
||||||
|
SHADER_FAILURE = "shader_failure"
|
||||||
|
TEXTURE_PLACEHOLDER = "texture_placeholder"
|
||||||
|
UV_MAPPING_ERROR = "uv_mapping_error"
|
||||||
|
FRUSTUM_CULLING = "frustum_culling"
|
||||||
|
SHADOW_MAP_ARTIFACT = "shadow_map_artifact"
|
||||||
|
BLOOM_OVERFLOW = "bloom_overflow"
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class GlitchPattern:
|
class GlitchPattern:
|
||||||
@@ -242,6 +249,123 @@ MATRIX_GLITCH_PATTERNS: list[GlitchPattern] = [
|
|||||||
],
|
],
|
||||||
confidence_threshold=0.45,
|
confidence_threshold=0.45,
|
||||||
),
|
),
|
||||||
|
|
||||||
|
# --- Three.js-Specific Glitch Patterns (ref: timmy-config#543) ---
|
||||||
|
GlitchPattern(
|
||||||
|
category=GlitchCategory.SHADER_FAILURE,
|
||||||
|
name="Shader Compilation Failure",
|
||||||
|
description="Three.js shader failed to compile, rendering the material as solid black. "
|
||||||
|
"Common when custom ShaderMaterial has syntax errors or missing uniforms.",
|
||||||
|
severity=GlitchSeverity.CRITICAL,
|
||||||
|
detection_prompts=[
|
||||||
|
"Look for objects or surfaces rendered as pure black (#000000) that should have visible textures or materials.",
|
||||||
|
"Identify geometry that appears completely dark while surrounding objects are normally lit.",
|
||||||
|
"Check for objects where the material seems to 'absorb all light' — flat black with no shading gradient.",
|
||||||
|
],
|
||||||
|
visual_indicators=[
|
||||||
|
"solid black object with no shading",
|
||||||
|
"geometry rendered as silhouette",
|
||||||
|
"material appears to absorb light entirely",
|
||||||
|
"black patch inconsistent with scene lighting",
|
||||||
|
],
|
||||||
|
confidence_threshold=0.7,
|
||||||
|
),
|
||||||
|
GlitchPattern(
|
||||||
|
category=GlitchCategory.TEXTURE_PLACEHOLDER,
|
||||||
|
name="Three.js Texture Not Loaded",
|
||||||
|
description="Three.js failed to load the texture asset, rendering a 1x1 white pixel "
|
||||||
|
"stretched across the entire surface. Distinguished from missing-texture by "
|
||||||
|
"the uniform white/grey appearance rather than magenta.",
|
||||||
|
severity=GlitchSeverity.CRITICAL,
|
||||||
|
detection_prompts=[
|
||||||
|
"Look for surfaces that are uniformly white or light grey with no texture detail, even on large geometry.",
|
||||||
|
"Identify objects where the texture appears as a single solid color stretched across complex UVs.",
|
||||||
|
"Check for surfaces that look 'blank' or 'unloaded' — flat white/grey where detail should exist.",
|
||||||
|
],
|
||||||
|
visual_indicators=[
|
||||||
|
"uniform white or light grey surface",
|
||||||
|
"no texture detail on large geometry",
|
||||||
|
"stretched single-color appearance",
|
||||||
|
"1x1 pixel placeholder stretched to fill UV space",
|
||||||
|
],
|
||||||
|
confidence_threshold=0.65,
|
||||||
|
),
|
||||||
|
GlitchPattern(
|
||||||
|
category=GlitchCategory.UV_MAPPING_ERROR,
|
||||||
|
name="BufferGeometry UV Mapping Error",
|
||||||
|
description="Three.js BufferGeometry has incorrect UV coordinates, causing textures to "
|
||||||
|
"appear stretched, compressed, or mapped to the wrong faces.",
|
||||||
|
severity=GlitchSeverity.HIGH,
|
||||||
|
detection_prompts=[
|
||||||
|
"Look for textures that appear dramatically stretched in one direction on specific faces.",
|
||||||
|
"Identify surfaces where the texture pattern is distorted but other nearby surfaces look correct.",
|
||||||
|
"Check for faces where the texture seems 'smeared' or mapped with incorrect aspect ratio.",
|
||||||
|
],
|
||||||
|
visual_indicators=[
|
||||||
|
"texture stretching on specific faces",
|
||||||
|
"distorted pattern on geometry",
|
||||||
|
"smeared texture appearance",
|
||||||
|
"aspect ratio mismatch between texture and surface",
|
||||||
|
],
|
||||||
|
confidence_threshold=0.6,
|
||||||
|
),
|
||||||
|
GlitchPattern(
|
||||||
|
category=GlitchCategory.FRUSTUM_CULLING,
|
||||||
|
name="Frustum Culling Artifact",
|
||||||
|
description="Three.js frustum culling incorrectly marks objects as outside the camera "
|
||||||
|
"frustum, causing them to pop in/out of existence at screen edges.",
|
||||||
|
severity=GlitchSeverity.MEDIUM,
|
||||||
|
detection_prompts=[
|
||||||
|
"Look for objects that are partially visible at the edge of the frame — half-rendered or cut off unnaturally.",
|
||||||
|
"Identify geometry that seems to 'pop' into existence as the view angle changes.",
|
||||||
|
"Check screen edges for objects that appear suddenly rather than smoothly entering the viewport.",
|
||||||
|
],
|
||||||
|
visual_indicators=[
|
||||||
|
"half-visible object at screen edge",
|
||||||
|
"object popping into frame",
|
||||||
|
"abrupt appearance of geometry",
|
||||||
|
"bounding box visible but mesh missing",
|
||||||
|
],
|
||||||
|
confidence_threshold=0.55,
|
||||||
|
),
|
||||||
|
GlitchPattern(
|
||||||
|
category=GlitchCategory.SHADOW_MAP_ARTIFACT,
|
||||||
|
name="Shadow Map Resolution Artifact",
|
||||||
|
description="Three.js shadow map has insufficient resolution, causing pixelated, "
|
||||||
|
"blocky shadows with visible texel edges instead of smooth shadow gradients.",
|
||||||
|
severity=GlitchSeverity.MEDIUM,
|
||||||
|
detection_prompts=[
|
||||||
|
"Look for shadows with visible blocky or pixelated edges instead of smooth gradients.",
|
||||||
|
"Identify shadow maps where individual texels (texture pixels) are clearly visible.",
|
||||||
|
"Check for shadows that appear as jagged stair-stepped patterns rather than soft edges.",
|
||||||
|
],
|
||||||
|
visual_indicators=[
|
||||||
|
"blocky shadow edges",
|
||||||
|
"visible texel grid in shadows",
|
||||||
|
"stair-stepped shadow boundary",
|
||||||
|
"pixelated shadow gradient",
|
||||||
|
],
|
||||||
|
confidence_threshold=0.55,
|
||||||
|
),
|
||||||
|
GlitchPattern(
|
||||||
|
category=GlitchCategory.BLOOM_OVERFLOW,
|
||||||
|
name="Post-Processing Bloom Overflow",
|
||||||
|
description="Three.js UnrealBloomPass or similar post-processing bloom effect is too "
|
||||||
|
"intense, causing bright areas to bleed glow into surrounding geometry.",
|
||||||
|
severity=GlitchSeverity.LOW,
|
||||||
|
detection_prompts=[
|
||||||
|
"Look for bright areas that have an unusually large, soft glow bleeding into adjacent surfaces.",
|
||||||
|
"Identify scenes where light sources appear to have a 'halo' that extends beyond physical plausibility.",
|
||||||
|
"Check for bright objects whose glow color bleeds onto nearby unrelated geometry.",
|
||||||
|
],
|
||||||
|
visual_indicators=[
|
||||||
|
"excessive glow bleeding from bright surfaces",
|
||||||
|
"halo around light sources",
|
||||||
|
"bloom color tinting adjacent geometry",
|
||||||
|
"glow bleeding beyond object boundaries",
|
||||||
|
],
|
||||||
|
confidence_threshold=0.5,
|
||||||
|
),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@@ -290,6 +414,23 @@ def build_vision_prompt(patterns: list[GlitchPattern] | None = None) -> str:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Three.js-specific category set for filtering (ref: timmy-config#543)
|
||||||
|
THREEJS_CATEGORIES = {
|
||||||
|
GlitchCategory.SHADER_FAILURE,
|
||||||
|
GlitchCategory.TEXTURE_PLACEHOLDER,
|
||||||
|
GlitchCategory.UV_MAPPING_ERROR,
|
||||||
|
GlitchCategory.FRUSTUM_CULLING,
|
||||||
|
GlitchCategory.SHADOW_MAP_ARTIFACT,
|
||||||
|
GlitchCategory.BLOOM_OVERFLOW,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def get_threejs_patterns() -> list[GlitchPattern]:
|
||||||
|
"""Return only Three.js-specific glitch patterns."""
|
||||||
|
return [p for p in MATRIX_GLITCH_PATTERNS if p.category in THREEJS_CATEGORIES]
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import json
|
import json
|
||||||
print(f"Loaded {len(MATRIX_GLITCH_PATTERNS)} glitch patterns:\n")
|
print(f"Loaded {len(MATRIX_GLITCH_PATTERNS)} glitch patterns:\n")
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ Usage:
|
|||||||
python matrix_glitch_detector.py <url> [--angles 4] [--output report.json]
|
python matrix_glitch_detector.py <url> [--angles 4] [--output report.json]
|
||||||
python matrix_glitch_detector.py --demo # Run with synthetic test data
|
python matrix_glitch_detector.py --demo # Run with synthetic test data
|
||||||
|
|
||||||
Ref: timmy-config#491
|
Ref: timmy-config#491, timmy-config#543
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
@@ -33,6 +33,7 @@ from glitch_patterns import (
|
|||||||
MATRIX_GLITCH_PATTERNS,
|
MATRIX_GLITCH_PATTERNS,
|
||||||
build_vision_prompt,
|
build_vision_prompt,
|
||||||
get_patterns_by_severity,
|
get_patterns_by_severity,
|
||||||
|
get_threejs_patterns,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -345,14 +346,17 @@ def _parse_vision_response(
|
|||||||
|
|
||||||
def _infer_severity(category: str, confidence: float) -> str:
|
def _infer_severity(category: str, confidence: float) -> str:
|
||||||
"""Infer severity from category and confidence when not provided."""
|
"""Infer severity from category and confidence when not provided."""
|
||||||
critical_cats = {"missing_textures", "clipping"}
|
critical_cats = {"missing_textures", "clipping", "shader_failure", "texture_placeholder"}
|
||||||
high_cats = {"floating_assets", "broken_normals"}
|
high_cats = {"floating_assets", "broken_normals", "uv_mapping_error"}
|
||||||
|
medium_cats = {"frustum_culling", "shadow_map_artifact"}
|
||||||
|
|
||||||
cat_lower = category.lower()
|
cat_lower = category.lower()
|
||||||
if any(c in cat_lower for c in critical_cats):
|
if any(c in cat_lower for c in critical_cats):
|
||||||
return "critical" if confidence > 0.7 else "high"
|
return "critical" if confidence > 0.7 else "high"
|
||||||
if any(c in cat_lower for c in high_cats):
|
if any(c in cat_lower for c in high_cats):
|
||||||
return "high" if confidence > 0.7 else "medium"
|
return "high" if confidence > 0.7 else "medium"
|
||||||
|
if any(c in cat_lower for c in medium_cats):
|
||||||
|
return "medium" if confidence > 0.6 else "low"
|
||||||
return "medium" if confidence > 0.6 else "low"
|
return "medium" if confidence > 0.6 else "low"
|
||||||
|
|
||||||
|
|
||||||
@@ -389,9 +393,9 @@ def build_report(
|
|||||||
),
|
),
|
||||||
},
|
},
|
||||||
metadata={
|
metadata={
|
||||||
"detector_version": "0.1.0",
|
"detector_version": "0.2.0",
|
||||||
"pattern_count": len(MATRIX_GLITCH_PATTERNS),
|
"pattern_count": len(MATRIX_GLITCH_PATTERNS),
|
||||||
"reference": "timmy-config#491",
|
"reference": "timmy-config#491, timmy-config#543",
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -460,6 +464,30 @@ def run_demo(output_path: Optional[Path] = None) -> ScanResult:
|
|||||||
screenshot_index=3,
|
screenshot_index=3,
|
||||||
screenshot_angle="left",
|
screenshot_angle="left",
|
||||||
),
|
),
|
||||||
|
DetectedGlitch(
|
||||||
|
id=str(uuid.uuid4())[:8],
|
||||||
|
category="shader_failure",
|
||||||
|
name="Black Material on Portal Frame",
|
||||||
|
description="Portal frame rendered as solid black — shader compilation failed (missing uniform u_time)",
|
||||||
|
severity="critical",
|
||||||
|
confidence=0.91,
|
||||||
|
location_x=45.0,
|
||||||
|
location_y=30.0,
|
||||||
|
screenshot_index=0,
|
||||||
|
screenshot_angle="front",
|
||||||
|
),
|
||||||
|
DetectedGlitch(
|
||||||
|
id=str(uuid.uuid4())[:8],
|
||||||
|
category="shadow_map_artifact",
|
||||||
|
name="Pixelated Character Shadow",
|
||||||
|
description="Character shadow shows visible texel grid — shadow map resolution too low (512x512)",
|
||||||
|
severity="medium",
|
||||||
|
confidence=0.78,
|
||||||
|
location_x=52.0,
|
||||||
|
location_y=75.0,
|
||||||
|
screenshot_index=1,
|
||||||
|
screenshot_angle="right",
|
||||||
|
),
|
||||||
]
|
]
|
||||||
|
|
||||||
print(f"[*] Detected {len(demo_glitches)} glitches")
|
print(f"[*] Detected {len(demo_glitches)} glitches")
|
||||||
@@ -496,6 +524,11 @@ Examples:
|
|||||||
help="Minimum severity to include in report",
|
help="Minimum severity to include in report",
|
||||||
)
|
)
|
||||||
parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output")
|
parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output")
|
||||||
|
parser.add_argument(
|
||||||
|
"--threejs",
|
||||||
|
action="store_true",
|
||||||
|
help="Focus on Three.js-specific glitch patterns only (shader, texture, UV, culling, shadow, bloom)",
|
||||||
|
)
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
@@ -525,9 +558,13 @@ Examples:
|
|||||||
screenshots = capture_screenshots(args.url, angles, screenshots_dir)
|
screenshots = capture_screenshots(args.url, angles, screenshots_dir)
|
||||||
print(f"[*] Captured {len(screenshots)} screenshots")
|
print(f"[*] Captured {len(screenshots)} screenshots")
|
||||||
|
|
||||||
# Filter patterns by severity
|
# Filter patterns by severity and type
|
||||||
min_sev = GlitchSeverity(args.min_severity)
|
min_sev = GlitchSeverity(args.min_severity)
|
||||||
patterns = get_patterns_by_severity(min_sev)
|
patterns = get_patterns_by_severity(min_sev)
|
||||||
|
if args.threejs:
|
||||||
|
threejs_patterns = get_threejs_patterns()
|
||||||
|
patterns = [p for p in patterns if p in threejs_patterns]
|
||||||
|
print(f"[*] Three.js-focused mode: {len(patterns)} patterns")
|
||||||
|
|
||||||
# Analyze with vision AI
|
# Analyze with vision AI
|
||||||
print(f"[*] Analyzing with vision AI ({len(patterns)} patterns)...")
|
print(f"[*] Analyzing with vision AI ({len(patterns)} patterns)...")
|
||||||
|
|||||||
271
bin/preflight-provider-check.py
Normal file
271
bin/preflight-provider-check.py
Normal file
@@ -0,0 +1,271 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Pre-Flight Provider Check Script
|
||||||
|
Issue #508: [Robustness] Credential drain detection — provider health checks
|
||||||
|
|
||||||
|
Pre-flight check before session launch: verifies provider credentials and balance.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
python3 preflight-provider-check.py # Check all providers
|
||||||
|
python3 preflight-provider-check.py --launch # Check and return exit code
|
||||||
|
python3 preflight-provider-check.py --balance # Check OpenRouter balance
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os, sys, json, yaml, urllib.request
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
HERMES_HOME = Path(os.environ.get("HERMES_HOME", Path.home() / ".hermes"))
|
||||||
|
LOG_DIR = Path.home() / ".local" / "timmy" / "fleet-health"
|
||||||
|
LOG_FILE = LOG_DIR / "preflight-check.log"
|
||||||
|
|
||||||
|
def log(msg):
|
||||||
|
"""Log message to file and optionally console."""
|
||||||
|
timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S")
|
||||||
|
log_entry = "[" + timestamp + "] " + msg
|
||||||
|
|
||||||
|
LOG_DIR.mkdir(parents=True, exist_ok=True)
|
||||||
|
with open(LOG_FILE, "a") as f:
|
||||||
|
f.write(log_entry + "\n")
|
||||||
|
|
||||||
|
if "--quiet" not in sys.argv:
|
||||||
|
print(log_entry)
|
||||||
|
|
||||||
|
def get_provider_api_key(provider):
|
||||||
|
"""Get API key for a provider from .env or environment."""
|
||||||
|
env_file = HERMES_HOME / ".env"
|
||||||
|
if env_file.exists():
|
||||||
|
with open(env_file) as f:
|
||||||
|
for line in f:
|
||||||
|
line = line.strip()
|
||||||
|
if line.startswith(provider.upper() + "_API_KEY="):
|
||||||
|
return line.split("=", 1)[1].strip().strip("'\"")
|
||||||
|
|
||||||
|
return os.environ.get(provider.upper() + "_API_KEY")
|
||||||
|
|
||||||
|
def check_openrouter_balance(api_key):
|
||||||
|
"""Check OpenRouter balance via /api/v1/auth/key."""
|
||||||
|
if not api_key:
|
||||||
|
return False, "No API key", 0
|
||||||
|
|
||||||
|
try:
|
||||||
|
req = urllib.request.Request(
|
||||||
|
"https://openrouter.ai/api/v1/auth/key",
|
||||||
|
headers={"Authorization": "Bearer " + api_key}
|
||||||
|
)
|
||||||
|
resp = urllib.request.urlopen(req, timeout=10)
|
||||||
|
data = json.loads(resp.read())
|
||||||
|
|
||||||
|
# Check for credits
|
||||||
|
credits = data.get("data", {}).get("limit", 0)
|
||||||
|
usage = data.get("data", {}).get("usage", 0)
|
||||||
|
remaining = credits - usage if credits else None
|
||||||
|
|
||||||
|
if remaining is not None and remaining <= 0:
|
||||||
|
return False, "No credits remaining", 0
|
||||||
|
elif remaining is not None:
|
||||||
|
return True, "Credits available", remaining
|
||||||
|
else:
|
||||||
|
return True, "Unlimited or unknown balance", None
|
||||||
|
|
||||||
|
except urllib.error.HTTPError as e:
|
||||||
|
if e.code == 401:
|
||||||
|
return False, "Invalid API key", 0
|
||||||
|
else:
|
||||||
|
return False, "HTTP " + str(e.code), 0
|
||||||
|
except Exception as e:
|
||||||
|
return False, str(e)[:100], 0
|
||||||
|
|
||||||
|
def check_nous_key(api_key):
|
||||||
|
"""Check Nous API key with minimal test call."""
|
||||||
|
if not api_key:
|
||||||
|
return False, "No API key"
|
||||||
|
|
||||||
|
try:
|
||||||
|
req = urllib.request.Request(
|
||||||
|
"https://inference.nousresearch.com/v1/models",
|
||||||
|
headers={"Authorization": "Bearer " + api_key}
|
||||||
|
)
|
||||||
|
resp = urllib.request.urlopen(req, timeout=10)
|
||||||
|
|
||||||
|
if resp.status == 200:
|
||||||
|
return True, "Valid key"
|
||||||
|
else:
|
||||||
|
return False, "HTTP " + str(resp.status)
|
||||||
|
|
||||||
|
except urllib.error.HTTPError as e:
|
||||||
|
if e.code == 401:
|
||||||
|
return False, "Invalid API key"
|
||||||
|
elif e.code == 403:
|
||||||
|
return False, "Forbidden"
|
||||||
|
else:
|
||||||
|
return False, "HTTP " + str(e.code)
|
||||||
|
except Exception as e:
|
||||||
|
return False, str(e)[:100]
|
||||||
|
|
||||||
|
def check_anthropic_key(api_key):
|
||||||
|
"""Check Anthropic API key with minimal test call."""
|
||||||
|
if not api_key:
|
||||||
|
return False, "No API key"
|
||||||
|
|
||||||
|
try:
|
||||||
|
req = urllib.request.Request(
|
||||||
|
"https://api.anthropic.com/v1/models",
|
||||||
|
headers={
|
||||||
|
"x-api-key": api_key,
|
||||||
|
"anthropic-version": "2023-06-01"
|
||||||
|
}
|
||||||
|
)
|
||||||
|
resp = urllib.request.urlopen(req, timeout=10)
|
||||||
|
|
||||||
|
if resp.status == 200:
|
||||||
|
return True, "Valid key"
|
||||||
|
else:
|
||||||
|
return False, "HTTP " + str(resp.status)
|
||||||
|
|
||||||
|
except urllib.error.HTTPError as e:
|
||||||
|
if e.code == 401:
|
||||||
|
return False, "Invalid API key"
|
||||||
|
elif e.code == 403:
|
||||||
|
return False, "Forbidden"
|
||||||
|
else:
|
||||||
|
return False, "HTTP " + str(e.code)
|
||||||
|
except Exception as e:
|
||||||
|
return False, str(e)[:100]
|
||||||
|
|
||||||
|
def check_ollama():
|
||||||
|
"""Check if Ollama is running."""
|
||||||
|
try:
|
||||||
|
req = urllib.request.Request("http://localhost:11434/api/tags")
|
||||||
|
resp = urllib.request.urlopen(req, timeout=5)
|
||||||
|
|
||||||
|
if resp.status == 200:
|
||||||
|
data = json.loads(resp.read())
|
||||||
|
models = data.get("models", [])
|
||||||
|
return True, str(len(models)) + " models loaded"
|
||||||
|
else:
|
||||||
|
return False, "HTTP " + str(resp.status)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
return False, str(e)[:100]
|
||||||
|
|
||||||
|
def get_configured_provider():
|
||||||
|
"""Get the configured provider from global config."""
|
||||||
|
config_file = HERMES_HOME / "config.yaml"
|
||||||
|
if not config_file.exists():
|
||||||
|
return None
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(config_file) as f:
|
||||||
|
config = yaml.safe_load(f)
|
||||||
|
|
||||||
|
model_config = config.get("model", {})
|
||||||
|
if isinstance(model_config, dict):
|
||||||
|
return model_config.get("provider")
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
def run_preflight_check():
|
||||||
|
"""Run pre-flight check on all providers."""
|
||||||
|
log("=== Pre-Flight Provider Check ===")
|
||||||
|
|
||||||
|
results = {}
|
||||||
|
|
||||||
|
# Check OpenRouter
|
||||||
|
or_key = get_provider_api_key("openrouter")
|
||||||
|
or_ok, or_msg, or_balance = check_openrouter_balance(or_key)
|
||||||
|
results["openrouter"] = {"healthy": or_ok, "message": or_msg, "balance": or_balance}
|
||||||
|
|
||||||
|
# Check Nous
|
||||||
|
nous_key = get_provider_api_key("nous")
|
||||||
|
nous_ok, nous_msg = check_nous_key(nous_key)
|
||||||
|
results["nous"] = {"healthy": nous_ok, "message": nous_msg}
|
||||||
|
|
||||||
|
# Check Anthropic
|
||||||
|
anthropic_key = get_provider_api_key("anthropic")
|
||||||
|
anthropic_ok, anthropic_msg = check_anthropic_key(anthropic_key)
|
||||||
|
results["anthropic"] = {"healthy": anthropic_ok, "message": anthropic_msg}
|
||||||
|
|
||||||
|
# Check Ollama
|
||||||
|
ollama_ok, ollama_msg = check_ollama()
|
||||||
|
results["ollama"] = {"healthy": ollama_ok, "message": ollama_msg}
|
||||||
|
|
||||||
|
# Get configured provider
|
||||||
|
configured = get_configured_provider()
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
healthy_count = sum(1 for r in results.values() if r["healthy"])
|
||||||
|
total_count = len(results)
|
||||||
|
|
||||||
|
log("Results: " + str(healthy_count) + "/" + str(total_count) + " providers healthy")
|
||||||
|
|
||||||
|
for provider, result in results.items():
|
||||||
|
status = "HEALTHY" if result["healthy"] else "UNHEALTHY"
|
||||||
|
extra = ""
|
||||||
|
if provider == "openrouter" and result.get("balance") is not None:
|
||||||
|
extra = " (balance: " + str(result["balance"]) + ")"
|
||||||
|
|
||||||
|
log(" " + provider + ": " + status + " - " + result["message"] + extra)
|
||||||
|
|
||||||
|
if configured:
|
||||||
|
log("Configured provider: " + configured)
|
||||||
|
if configured in results and not results[configured]["healthy"]:
|
||||||
|
log("WARNING: Configured provider " + configured + " is UNHEALTHY!")
|
||||||
|
|
||||||
|
return results, configured
|
||||||
|
|
||||||
|
def check_launch_readiness():
|
||||||
|
"""Check if we're ready to launch sessions."""
|
||||||
|
results, configured = run_preflight_check()
|
||||||
|
|
||||||
|
# Check if configured provider is healthy
|
||||||
|
if configured and configured in results:
|
||||||
|
if not results[configured]["healthy"]:
|
||||||
|
log("LAUNCH BLOCKED: Configured provider " + configured + " is unhealthy")
|
||||||
|
return False, configured + " is unhealthy"
|
||||||
|
|
||||||
|
# Check if at least one provider is healthy
|
||||||
|
healthy_providers = [p for p, r in results.items() if r["healthy"]]
|
||||||
|
if not healthy_providers:
|
||||||
|
log("LAUNCH BLOCKED: No healthy providers available")
|
||||||
|
return False, "No healthy providers"
|
||||||
|
|
||||||
|
log("LAUNCH READY: " + str(len(healthy_providers)) + " healthy providers available")
|
||||||
|
return True, "Ready"
|
||||||
|
|
||||||
|
def show_balance():
|
||||||
|
"""Show OpenRouter balance."""
|
||||||
|
api_key = get_provider_api_key("openrouter")
|
||||||
|
if not api_key:
|
||||||
|
print("No OpenRouter API key found")
|
||||||
|
return
|
||||||
|
|
||||||
|
ok, msg, balance = check_openrouter_balance(api_key)
|
||||||
|
|
||||||
|
if ok:
|
||||||
|
if balance is not None:
|
||||||
|
print("OpenRouter balance: " + str(balance) + " credits")
|
||||||
|
else:
|
||||||
|
print("OpenRouter: " + msg)
|
||||||
|
else:
|
||||||
|
print("OpenRouter: " + msg)
|
||||||
|
|
||||||
|
def main():
|
||||||
|
if "--balance" in sys.argv:
|
||||||
|
show_balance()
|
||||||
|
elif "--launch" in sys.argv:
|
||||||
|
ready, message = check_launch_readiness()
|
||||||
|
if ready:
|
||||||
|
print("READY")
|
||||||
|
sys.exit(0)
|
||||||
|
else:
|
||||||
|
print("BLOCKED: " + message)
|
||||||
|
sys.exit(1)
|
||||||
|
else:
|
||||||
|
run_preflight_check()
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
411
bin/provider-health-monitor.py
Normal file
411
bin/provider-health-monitor.py
Normal file
@@ -0,0 +1,411 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Provider Health Monitor Script
|
||||||
|
Issue #509: [Robustness] Provider-aware profile config — auto-switch on failure
|
||||||
|
|
||||||
|
Monitors provider health and automatically switches profiles to working providers.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
python3 provider-health-monitor.py # Run once
|
||||||
|
python3 provider-health-monitor.py --daemon # Run continuously
|
||||||
|
python3 provider-health-monitor.py --status # Show provider health
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os, sys, json, yaml, urllib.request, time
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
HERMES_HOME = Path(os.environ.get("HERMES_HOME", Path.home() / ".hermes"))
|
||||||
|
PROFILES_DIR = HERMES_HOME / "profiles"
|
||||||
|
LOG_DIR = Path.home() / ".local" / "timmy" / "fleet-health"
|
||||||
|
STATE_FILE = LOG_DIR / "tmux-state.json"
|
||||||
|
LOG_FILE = LOG_DIR / "provider-health.log"
|
||||||
|
|
||||||
|
# Provider test endpoints
|
||||||
|
PROVIDER_TESTS = {
|
||||||
|
"openrouter": {
|
||||||
|
"url": "https://openrouter.ai/api/v1/models",
|
||||||
|
"method": "GET",
|
||||||
|
"headers": lambda api_key: {"Authorization": "Bearer " + api_key},
|
||||||
|
"timeout": 10
|
||||||
|
},
|
||||||
|
"anthropic": {
|
||||||
|
"url": "https://api.anthropic.com/v1/models",
|
||||||
|
"method": "GET",
|
||||||
|
"headers": lambda api_key: {"x-api-key": api_key, "anthropic-version": "2023-06-01"},
|
||||||
|
"timeout": 10
|
||||||
|
},
|
||||||
|
"nous": {
|
||||||
|
"url": "https://inference.nousresearch.com/v1/models",
|
||||||
|
"method": "GET",
|
||||||
|
"headers": lambda api_key: {"Authorization": "Bearer " + api_key},
|
||||||
|
"timeout": 10
|
||||||
|
},
|
||||||
|
"kimi-coding": {
|
||||||
|
"url": "https://api.kimi.com/coding/v1/models",
|
||||||
|
"method": "GET",
|
||||||
|
"headers": lambda api_key: {"x-api-key": api_key, "x-api-provider": "kimi-coding"},
|
||||||
|
"timeout": 10
|
||||||
|
},
|
||||||
|
"ollama": {
|
||||||
|
"url": "http://localhost:11434/api/tags",
|
||||||
|
"method": "GET",
|
||||||
|
"headers": lambda api_key: {},
|
||||||
|
"timeout": 5
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def log(msg):
|
||||||
|
"""Log message to file and optionally console."""
|
||||||
|
timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S")
|
||||||
|
log_entry = "[" + timestamp + "] " + msg
|
||||||
|
|
||||||
|
LOG_DIR.mkdir(parents=True, exist_ok=True)
|
||||||
|
with open(LOG_FILE, "a") as f:
|
||||||
|
f.write(log_entry + "\n")
|
||||||
|
|
||||||
|
if "--quiet" not in sys.argv:
|
||||||
|
print(log_entry)
|
||||||
|
|
||||||
|
def get_provider_api_key(provider):
|
||||||
|
"""Get API key for a provider from .env or environment."""
|
||||||
|
env_file = HERMES_HOME / ".env"
|
||||||
|
if env_file.exists():
|
||||||
|
with open(env_file) as f:
|
||||||
|
for line in f:
|
||||||
|
line = line.strip()
|
||||||
|
if line.startswith(provider.upper() + "_API_KEY="):
|
||||||
|
return line.split("=", 1)[1].strip().strip("'\"")
|
||||||
|
|
||||||
|
return os.environ.get(provider.upper() + "_API_KEY")
|
||||||
|
|
||||||
|
def test_provider(provider, api_key=None):
|
||||||
|
"""Test if a provider is healthy."""
|
||||||
|
config = PROVIDER_TESTS.get(provider)
|
||||||
|
if not config:
|
||||||
|
return False, "Unknown provider: " + provider
|
||||||
|
|
||||||
|
headers = config["headers"](api_key or "")
|
||||||
|
|
||||||
|
try:
|
||||||
|
req = urllib.request.Request(
|
||||||
|
config["url"],
|
||||||
|
headers=headers,
|
||||||
|
method=config["method"]
|
||||||
|
)
|
||||||
|
resp = urllib.request.urlopen(req, timeout=config["timeout"])
|
||||||
|
|
||||||
|
if resp.status == 200:
|
||||||
|
return True, "Healthy"
|
||||||
|
else:
|
||||||
|
return False, "HTTP " + str(resp.status)
|
||||||
|
|
||||||
|
except urllib.error.HTTPError as e:
|
||||||
|
if e.code == 401:
|
||||||
|
return False, "Unauthorized (401)"
|
||||||
|
elif e.code == 403:
|
||||||
|
return False, "Forbidden (403)"
|
||||||
|
elif e.code == 429:
|
||||||
|
return True, "Rate limited but accessible"
|
||||||
|
else:
|
||||||
|
return False, "HTTP " + str(e.code)
|
||||||
|
except Exception as e:
|
||||||
|
return False, str(e)[:100]
|
||||||
|
|
||||||
|
def get_all_providers():
|
||||||
|
"""Get all providers from profiles and global config."""
|
||||||
|
providers = set()
|
||||||
|
|
||||||
|
# Global config
|
||||||
|
global_config = HERMES_HOME / "config.yaml"
|
||||||
|
if global_config.exists():
|
||||||
|
try:
|
||||||
|
with open(global_config) as f:
|
||||||
|
config = yaml.safe_load(f)
|
||||||
|
|
||||||
|
# Primary model provider
|
||||||
|
model_config = config.get("model", {})
|
||||||
|
if isinstance(model_config, dict):
|
||||||
|
provider = model_config.get("provider", "")
|
||||||
|
if provider:
|
||||||
|
providers.add(provider)
|
||||||
|
|
||||||
|
# Auxiliary providers
|
||||||
|
auxiliary = config.get("auxiliary", {})
|
||||||
|
for aux_config in auxiliary.values():
|
||||||
|
if isinstance(aux_config, dict):
|
||||||
|
provider = aux_config.get("provider", "")
|
||||||
|
if provider and provider != "auto":
|
||||||
|
providers.add(provider)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Profile configs
|
||||||
|
if PROFILES_DIR.exists():
|
||||||
|
for profile_dir in PROFILES_DIR.iterdir():
|
||||||
|
if profile_dir.is_dir():
|
||||||
|
config_file = profile_dir / "config.yaml"
|
||||||
|
if config_file.exists():
|
||||||
|
try:
|
||||||
|
with open(config_file) as f:
|
||||||
|
config = yaml.safe_load(f)
|
||||||
|
|
||||||
|
model_config = config.get("model", {})
|
||||||
|
if isinstance(model_config, dict):
|
||||||
|
provider = model_config.get("provider", "")
|
||||||
|
if provider:
|
||||||
|
providers.add(provider)
|
||||||
|
|
||||||
|
auxiliary = config.get("auxiliary", {})
|
||||||
|
for aux_config in auxiliary.values():
|
||||||
|
if isinstance(aux_config, dict):
|
||||||
|
provider = aux_config.get("provider", "")
|
||||||
|
if provider and provider != "auto":
|
||||||
|
providers.add(provider)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Add common providers even if not configured
|
||||||
|
providers.update(["openrouter", "nous", "ollama"])
|
||||||
|
|
||||||
|
return list(providers)
|
||||||
|
|
||||||
|
def build_health_map():
|
||||||
|
"""Build a health map of all providers."""
|
||||||
|
providers = get_all_providers()
|
||||||
|
health_map = {}
|
||||||
|
|
||||||
|
log("Testing " + str(len(providers)) + " providers...")
|
||||||
|
|
||||||
|
for provider in providers:
|
||||||
|
api_key = get_provider_api_key(provider)
|
||||||
|
healthy, message = test_provider(provider, api_key)
|
||||||
|
|
||||||
|
health_map[provider] = {
|
||||||
|
"healthy": healthy,
|
||||||
|
"message": message,
|
||||||
|
"last_test": datetime.now(timezone.utc).isoformat(),
|
||||||
|
"api_key_present": bool(api_key)
|
||||||
|
}
|
||||||
|
|
||||||
|
status = "HEALTHY" if healthy else "UNHEALTHY"
|
||||||
|
log(" " + provider + ": " + status + " - " + message)
|
||||||
|
|
||||||
|
return health_map
|
||||||
|
|
||||||
|
def get_fallback_providers(health_map):
|
||||||
|
"""Get list of healthy providers in priority order."""
|
||||||
|
# Priority order: nous, openrouter, ollama, others
|
||||||
|
priority_order = ["nous", "openrouter", "ollama", "anthropic", "kimi-coding"]
|
||||||
|
|
||||||
|
healthy = []
|
||||||
|
for provider in priority_order:
|
||||||
|
if provider in health_map and health_map[provider]["healthy"]:
|
||||||
|
healthy.append(provider)
|
||||||
|
|
||||||
|
# Add any other healthy providers not in priority list
|
||||||
|
for provider, info in health_map.items():
|
||||||
|
if info["healthy"] and provider not in healthy:
|
||||||
|
healthy.append(provider)
|
||||||
|
|
||||||
|
return healthy
|
||||||
|
|
||||||
|
def update_profile_config(profile_name, new_provider):
|
||||||
|
"""Update a profile's config to use a new provider."""
|
||||||
|
config_file = PROFILES_DIR / profile_name / "config.yaml"
|
||||||
|
|
||||||
|
if not config_file.exists():
|
||||||
|
return False, "Config file not found"
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(config_file) as f:
|
||||||
|
config = yaml.safe_load(f)
|
||||||
|
|
||||||
|
# Update model provider
|
||||||
|
if "model" not in config:
|
||||||
|
config["model"] = {}
|
||||||
|
|
||||||
|
old_provider = config["model"].get("provider", "unknown")
|
||||||
|
config["model"]["provider"] = new_provider
|
||||||
|
|
||||||
|
# Update auxiliary providers if they were using the old provider
|
||||||
|
auxiliary = config.get("auxiliary", {})
|
||||||
|
for aux_name, aux_config in auxiliary.items():
|
||||||
|
if isinstance(aux_config, dict) and aux_config.get("provider") == old_provider:
|
||||||
|
aux_config["provider"] = new_provider
|
||||||
|
|
||||||
|
# Write back
|
||||||
|
with open(config_file, "w") as f:
|
||||||
|
yaml.dump(config, f, default_flow_style=False)
|
||||||
|
|
||||||
|
log("Updated " + profile_name + ": " + old_provider + " -> " + new_provider)
|
||||||
|
return True, "Updated"
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
return False, str(e)
|
||||||
|
|
||||||
|
def check_profiles(health_map):
|
||||||
|
"""Check all profiles and update unhealthy providers."""
|
||||||
|
if not PROFILES_DIR.exists():
|
||||||
|
return
|
||||||
|
|
||||||
|
fallback_providers = get_fallback_providers(health_map)
|
||||||
|
if not fallback_providers:
|
||||||
|
log("CRITICAL: No healthy providers available!")
|
||||||
|
return
|
||||||
|
|
||||||
|
updated_profiles = []
|
||||||
|
|
||||||
|
for profile_dir in PROFILES_DIR.iterdir():
|
||||||
|
if not profile_dir.is_dir():
|
||||||
|
continue
|
||||||
|
|
||||||
|
profile_name = profile_dir.name
|
||||||
|
config_file = profile_dir / "config.yaml"
|
||||||
|
|
||||||
|
if not config_file.exists():
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(config_file) as f:
|
||||||
|
config = yaml.safe_load(f)
|
||||||
|
|
||||||
|
model_config = config.get("model", {})
|
||||||
|
if not isinstance(model_config, dict):
|
||||||
|
continue
|
||||||
|
|
||||||
|
current_provider = model_config.get("provider", "")
|
||||||
|
if not current_provider:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check if current provider is healthy
|
||||||
|
if current_provider in health_map and health_map[current_provider]["healthy"]:
|
||||||
|
continue # Provider is healthy, no action needed
|
||||||
|
|
||||||
|
# Find best fallback
|
||||||
|
best_fallback = None
|
||||||
|
for provider in fallback_providers:
|
||||||
|
if provider != current_provider:
|
||||||
|
best_fallback = provider
|
||||||
|
break
|
||||||
|
|
||||||
|
if not best_fallback:
|
||||||
|
log("No fallback for " + profile_name + " (current: " + current_provider + ")")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Update profile
|
||||||
|
success, message = update_profile_config(profile_name, best_fallback)
|
||||||
|
if success:
|
||||||
|
updated_profiles.append({
|
||||||
|
"profile": profile_name,
|
||||||
|
"old_provider": current_provider,
|
||||||
|
"new_provider": best_fallback
|
||||||
|
})
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log("Error processing " + profile_name + ": " + str(e))
|
||||||
|
|
||||||
|
return updated_profiles
|
||||||
|
|
||||||
|
def load_state():
|
||||||
|
"""Load state from tmux-state.json."""
|
||||||
|
if STATE_FILE.exists():
|
||||||
|
try:
|
||||||
|
with open(STATE_FILE) as f:
|
||||||
|
return json.load(f)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def save_state(state):
|
||||||
|
"""Save state to tmux-state.json."""
|
||||||
|
LOG_DIR.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
with open(STATE_FILE, "w") as f:
|
||||||
|
json.dump(state, f, indent=2)
|
||||||
|
|
||||||
|
def run_once():
|
||||||
|
"""Run provider health check once."""
|
||||||
|
log("=== Provider Health Check ===")
|
||||||
|
|
||||||
|
state = load_state()
|
||||||
|
|
||||||
|
# Build health map
|
||||||
|
health_map = build_health_map()
|
||||||
|
|
||||||
|
# Check profiles and update if needed
|
||||||
|
updated_profiles = check_profiles(health_map)
|
||||||
|
|
||||||
|
# Update state
|
||||||
|
state["provider_health"] = health_map
|
||||||
|
state["last_provider_check"] = datetime.now(timezone.utc).isoformat()
|
||||||
|
|
||||||
|
if updated_profiles:
|
||||||
|
state["last_profile_updates"] = updated_profiles
|
||||||
|
|
||||||
|
save_state(state)
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
healthy_count = sum(1 for p in health_map.values() if p["healthy"])
|
||||||
|
total_count = len(health_map)
|
||||||
|
|
||||||
|
log("Health: " + str(healthy_count) + "/" + str(total_count) + " providers healthy")
|
||||||
|
|
||||||
|
if updated_profiles:
|
||||||
|
log("Updated " + str(len(updated_profiles)) + " profiles:")
|
||||||
|
for update in updated_profiles:
|
||||||
|
log(" " + update["profile"] + ": " + update["old_provider"] + " -> " + update["new_provider"])
|
||||||
|
|
||||||
|
def show_status():
|
||||||
|
"""Show provider health status."""
|
||||||
|
state = load_state()
|
||||||
|
health_map = state.get("provider_health", {})
|
||||||
|
|
||||||
|
if not health_map:
|
||||||
|
print("No provider health data available. Run without --status first.")
|
||||||
|
return
|
||||||
|
|
||||||
|
print("Provider Health (last updated: " + str(state.get("last_provider_check", "unknown")) + ")")
|
||||||
|
print("=" * 80)
|
||||||
|
|
||||||
|
for provider, info in sorted(health_map.items()):
|
||||||
|
status = "HEALTHY" if info["healthy"] else "UNHEALTHY"
|
||||||
|
message = info.get("message", "")
|
||||||
|
api_key = "yes" if info.get("api_key_present") else "no"
|
||||||
|
|
||||||
|
print(provider.ljust(20) + " " + status.ljust(10) + " API key: " + api_key + " - " + message)
|
||||||
|
|
||||||
|
# Show recent updates
|
||||||
|
updates = state.get("last_profile_updates", [])
|
||||||
|
if updates:
|
||||||
|
print()
|
||||||
|
print("Recent Profile Updates:")
|
||||||
|
for update in updates:
|
||||||
|
print(" " + update["profile"] + ": " + update["old_provider"] + " -> " + update["new_provider"])
|
||||||
|
|
||||||
|
def daemon_mode():
|
||||||
|
"""Run continuously."""
|
||||||
|
log("Starting provider health daemon (check every 300s)")
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
run_once()
|
||||||
|
time.sleep(300) # Check every 5 minutes
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
log("Daemon stopped by user")
|
||||||
|
break
|
||||||
|
except Exception as e:
|
||||||
|
log("Error: " + str(e))
|
||||||
|
time.sleep(60)
|
||||||
|
|
||||||
|
def main():
|
||||||
|
if "--status" in sys.argv:
|
||||||
|
show_status()
|
||||||
|
elif "--daemon" in sys.argv:
|
||||||
|
daemon_mode()
|
||||||
|
else:
|
||||||
|
run_once()
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@@ -196,7 +196,37 @@
|
|||||||
"paused_reason": null,
|
"paused_reason": null,
|
||||||
"skills": [],
|
"skills": [],
|
||||||
"skill": null
|
"skill": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "tmux-supervisor-513",
|
||||||
|
"name": "Autonomous Cron Supervisor",
|
||||||
|
"prompt": "Load the tmux-supervisor skill and execute the monitoring protocol.\n\nCheck both `dev` and `timmy` tmux sessions for idle panes. Only send Telegram notifications on actionable events (idle, overflow, failure). Be silent when all agents are working.\n\nSteps:\n1. List all tmux sessions (skip 'Alexander')\n2. For each session, list windows and panes\n3. Capture each pane and classify state (idle vs active)\n4. For idle panes: read context, craft context-aware prompt\n5. Send /queue prompts to idle panes\n6. Verify prompts landed\n7. Only notify via Telegram if:\n - A pane was prompted (idle detected)\n - A pane shows context overflow (>80%)\n - A pane is stuck or crashed\n8. If all panes are active: respond with [SILENT]",
|
||||||
|
"schedule": {
|
||||||
|
"kind": "interval",
|
||||||
|
"minutes": 7,
|
||||||
|
"display": "every 7m"
|
||||||
|
},
|
||||||
|
"schedule_display": "every 7m",
|
||||||
|
"repeat": {
|
||||||
|
"times": null,
|
||||||
|
"completed": 0
|
||||||
|
},
|
||||||
|
"enabled": true,
|
||||||
|
"created_at": "2026-04-15T03:00:00.000000+00:00",
|
||||||
|
"next_run_at": null,
|
||||||
|
"last_run_at": null,
|
||||||
|
"last_status": null,
|
||||||
|
"last_error": null,
|
||||||
|
"deliver": "telegram",
|
||||||
|
"origin": null,
|
||||||
|
"state": "scheduled",
|
||||||
|
"paused_at": null,
|
||||||
|
"paused_reason": null,
|
||||||
|
"skills": [
|
||||||
|
"tmux-supervisor"
|
||||||
|
],
|
||||||
|
"skill": "tmux-supervisor"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"updated_at": "2026-04-13T02:00:00+00:00"
|
"updated_at": "2026-04-13T02:00:00+00:00"
|
||||||
}
|
}
|
||||||
@@ -19,9 +19,11 @@ from glitch_patterns import (
|
|||||||
GlitchPattern,
|
GlitchPattern,
|
||||||
GlitchSeverity,
|
GlitchSeverity,
|
||||||
MATRIX_GLITCH_PATTERNS,
|
MATRIX_GLITCH_PATTERNS,
|
||||||
|
THREEJS_CATEGORIES,
|
||||||
build_vision_prompt,
|
build_vision_prompt,
|
||||||
get_pattern_by_category,
|
get_pattern_by_category,
|
||||||
get_patterns_by_severity,
|
get_patterns_by_severity,
|
||||||
|
get_threejs_patterns,
|
||||||
)
|
)
|
||||||
|
|
||||||
from matrix_glitch_detector import (
|
from matrix_glitch_detector import (
|
||||||
@@ -40,7 +42,7 @@ class TestGlitchPatterns(unittest.TestCase):
|
|||||||
|
|
||||||
def test_pattern_count(self):
|
def test_pattern_count(self):
|
||||||
"""Verify we have a reasonable number of defined patterns."""
|
"""Verify we have a reasonable number of defined patterns."""
|
||||||
self.assertGreaterEqual(len(MATRIX_GLITCH_PATTERNS), 8)
|
self.assertGreaterEqual(len(MATRIX_GLITCH_PATTERNS), 14) # 10 generic + 6 Three.js
|
||||||
|
|
||||||
def test_all_patterns_have_required_fields(self):
|
def test_all_patterns_have_required_fields(self):
|
||||||
"""Every pattern must have category, name, description, severity, prompts."""
|
"""Every pattern must have category, name, description, severity, prompts."""
|
||||||
@@ -88,6 +90,9 @@ class TestGlitchPatterns(unittest.TestCase):
|
|||||||
self.assertIn("Floating Object", prompt)
|
self.assertIn("Floating Object", prompt)
|
||||||
self.assertIn("Z-Fighting", prompt)
|
self.assertIn("Z-Fighting", prompt)
|
||||||
self.assertIn("Missing", prompt)
|
self.assertIn("Missing", prompt)
|
||||||
|
# Three.js patterns should be included
|
||||||
|
self.assertIn("Shader Compilation Failure", prompt)
|
||||||
|
self.assertIn("Bloom Overflow", prompt)
|
||||||
|
|
||||||
def test_build_vision_prompt_subset(self):
|
def test_build_vision_prompt_subset(self):
|
||||||
"""Vision prompt with subset should only include specified patterns."""
|
"""Vision prompt with subset should only include specified patterns."""
|
||||||
@@ -248,7 +253,7 @@ class TestGlitchDetector(unittest.TestCase):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
report = run_demo(output_path)
|
report = run_demo(output_path)
|
||||||
self.assertEqual(len(report.glitches), 4)
|
self.assertEqual(len(report.glitches), 6) # 4 original + 2 Three.js
|
||||||
self.assertGreater(report.summary["total_glitches"], 0)
|
self.assertGreater(report.summary["total_glitches"], 0)
|
||||||
self.assertTrue(output_path.exists())
|
self.assertTrue(output_path.exists())
|
||||||
|
|
||||||
@@ -260,6 +265,93 @@ class TestGlitchDetector(unittest.TestCase):
|
|||||||
output_path.unlink(missing_ok=True)
|
output_path.unlink(missing_ok=True)
|
||||||
|
|
||||||
|
|
||||||
|
class TestThreeJsPatterns(unittest.TestCase):
|
||||||
|
"""Tests for Three.js-specific glitch patterns (timmy-config#543)."""
|
||||||
|
|
||||||
|
def test_get_threejs_patterns_returns_only_threejs(self):
|
||||||
|
"""get_threejs_patterns() should return only Three.js categories."""
|
||||||
|
patterns = get_threejs_patterns()
|
||||||
|
self.assertEqual(len(patterns), 6)
|
||||||
|
for p in patterns:
|
||||||
|
self.assertIn(p.category, THREEJS_CATEGORIES)
|
||||||
|
|
||||||
|
def test_threejs_patterns_have_required_fields(self):
|
||||||
|
"""All Three.js patterns must have valid fields."""
|
||||||
|
for p in get_threejs_patterns():
|
||||||
|
self.assertIsInstance(p.category, GlitchCategory)
|
||||||
|
self.assertTrue(p.name)
|
||||||
|
self.assertTrue(p.description)
|
||||||
|
self.assertIsInstance(p.severity, GlitchSeverity)
|
||||||
|
self.assertGreater(len(p.detection_prompts), 0)
|
||||||
|
self.assertGreater(len(p.visual_indicators), 0)
|
||||||
|
|
||||||
|
def test_shader_failure_is_critical(self):
|
||||||
|
"""Shader compilation failure should be CRITICAL severity."""
|
||||||
|
p = get_pattern_by_category(GlitchCategory.SHADER_FAILURE)
|
||||||
|
self.assertIsNotNone(p)
|
||||||
|
self.assertEqual(p.severity, GlitchSeverity.CRITICAL)
|
||||||
|
|
||||||
|
def test_texture_placeholder_is_critical(self):
|
||||||
|
"""Texture placeholder (1x1 white) should be CRITICAL severity."""
|
||||||
|
p = get_pattern_by_category(GlitchCategory.TEXTURE_PLACEHOLDER)
|
||||||
|
self.assertIsNotNone(p)
|
||||||
|
self.assertEqual(p.severity, GlitchSeverity.CRITICAL)
|
||||||
|
|
||||||
|
def test_infer_severity_shader_failure(self):
|
||||||
|
"""Shader failure should infer critical/high."""
|
||||||
|
self.assertEqual(_infer_severity("shader_failure", 0.8), "critical")
|
||||||
|
self.assertEqual(_infer_severity("shader_failure", 0.5), "high")
|
||||||
|
|
||||||
|
def test_infer_severity_texture_placeholder(self):
|
||||||
|
"""Texture placeholder should infer critical/high."""
|
||||||
|
self.assertEqual(_infer_severity("texture_placeholder", 0.8), "critical")
|
||||||
|
self.assertEqual(_infer_severity("texture_placeholder", 0.5), "high")
|
||||||
|
|
||||||
|
def test_infer_severity_uv_mapping(self):
|
||||||
|
"""UV mapping error should infer high/medium."""
|
||||||
|
self.assertEqual(_infer_severity("uv_mapping_error", 0.8), "high")
|
||||||
|
self.assertEqual(_infer_severity("uv_mapping_error", 0.5), "medium")
|
||||||
|
|
||||||
|
def test_infer_severity_frustum_culling(self):
|
||||||
|
"""Frustum culling should infer medium/low."""
|
||||||
|
self.assertEqual(_infer_severity("frustum_culling", 0.7), "medium")
|
||||||
|
self.assertEqual(_infer_severity("frustum_culling", 0.4), "low")
|
||||||
|
|
||||||
|
def test_infer_severity_shadow_map(self):
|
||||||
|
"""Shadow map artifact should infer medium/low."""
|
||||||
|
self.assertEqual(_infer_severity("shadow_map_artifact", 0.7), "medium")
|
||||||
|
self.assertEqual(_infer_severity("shadow_map_artifact", 0.4), "low")
|
||||||
|
|
||||||
|
def test_infer_severity_bloom_overflow(self):
|
||||||
|
"""Bloom overflow should infer medium/low (default path)."""
|
||||||
|
self.assertEqual(_infer_severity("bloom_overflow", 0.7), "medium")
|
||||||
|
self.assertEqual(_infer_severity("bloom_overflow", 0.4), "low")
|
||||||
|
|
||||||
|
def test_threejs_patterns_in_vision_prompt(self):
|
||||||
|
"""Three.js patterns should appear in the composite vision prompt."""
|
||||||
|
prompt = build_vision_prompt()
|
||||||
|
self.assertIn("shader_failure", prompt)
|
||||||
|
self.assertIn("texture_placeholder", prompt)
|
||||||
|
self.assertIn("uv_mapping_error", prompt)
|
||||||
|
self.assertIn("frustum_culling", prompt)
|
||||||
|
self.assertIn("shadow_map_artifact", prompt)
|
||||||
|
self.assertIn("bloom_overflow", prompt)
|
||||||
|
|
||||||
|
def test_threejs_subset_prompt(self):
|
||||||
|
"""Building prompt from Three.js-only patterns should work."""
|
||||||
|
threejs = get_threejs_patterns()
|
||||||
|
prompt = build_vision_prompt(threejs)
|
||||||
|
self.assertIn("Shader Compilation Failure", prompt)
|
||||||
|
self.assertNotIn("Floating Object", prompt) # generic, not Three.js
|
||||||
|
|
||||||
|
def test_report_metadata_version(self):
|
||||||
|
"""Report metadata should reference both issues."""
|
||||||
|
report = run_demo()
|
||||||
|
self.assertEqual(report.metadata["detector_version"], "0.2.0")
|
||||||
|
self.assertIn("543", report.metadata["reference"])
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class TestIntegration(unittest.TestCase):
|
class TestIntegration(unittest.TestCase):
|
||||||
"""Integration-level tests."""
|
"""Integration-level tests."""
|
||||||
|
|
||||||
@@ -276,6 +368,13 @@ class TestIntegration(unittest.TestCase):
|
|||||||
expected = {"floating_assets", "z_fighting", "missing_textures", "clipping", "broken_normals"}
|
expected = {"floating_assets", "z_fighting", "missing_textures", "clipping", "broken_normals"}
|
||||||
self.assertTrue(expected.issubset(category_values))
|
self.assertTrue(expected.issubset(category_values))
|
||||||
|
|
||||||
|
def test_patterns_cover_threejs_themes(self):
|
||||||
|
"""Patterns should cover Three.js-specific glitch themes (#543)."""
|
||||||
|
category_values = {p.category.value for p in MATRIX_GLITCH_PATTERNS}
|
||||||
|
threejs_expected = {"shader_failure", "texture_placeholder", "uv_mapping_error",
|
||||||
|
"frustum_culling", "shadow_map_artifact", "bloom_overflow"}
|
||||||
|
self.assertTrue(threejs_expected.issubset(category_values))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
129
training/scripts/augment_pairs.py
Executable file
129
training/scripts/augment_pairs.py
Executable file
@@ -0,0 +1,129 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
augment_pairs.py — Training data augmentation: paraphrase and translate.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
python3 augment_pairs.py --input data.jsonl
|
||||||
|
python3 augment_pairs.py --input data.jsonl --paraphrases 3 --langs es,fr,de
|
||||||
|
python3 augment_pairs.py --input data.jsonl --llm-endpoint http://localhost:11434/v1
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json, os, sys, re, random
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
random.seed(42)
|
||||||
|
|
||||||
|
PARAPHRASE_TRANSFORMS = [
|
||||||
|
lambda s: re.sub(r"(\w+), (\w+)", r"\2, \1", s, count=1),
|
||||||
|
lambda s: f"A beautifully rendered scene: {s[0].lower()}{s[1:]}" if len(s) > 10 else s,
|
||||||
|
lambda s: s.replace("A ", "The ").replace("An ", "The ") if s.startswith(("A ", "An ")) else f"Here, {s[0].lower()}{s[1:]}",
|
||||||
|
lambda s: f"In a cinematic frame: {s}" if len(s) > 20 else s,
|
||||||
|
lambda s: s if ", " not in s else ", ".join(s.split(", ")[:2]),
|
||||||
|
]
|
||||||
|
|
||||||
|
TRANSLATIONS = {
|
||||||
|
"es": {"the":"el","a":"un","is":"es","in":"en","of":"de","and":"y","with":"con","scene":"escena","light":"luz","dark":"oscuro","warm":"cálido","rain":"lluvia","sun":"sol","moon":"luna","sky":"cielo","forest":"bosque","mountain":"montaña","ocean":"océano","golden":"dorado","blue":"azul","red":"rojo","green":"verde","silence":"silencio","dream":"sueño","love":"amor","hope":"esperanza","fear":"miedo","joy":"alegría","peace":"paz","beautiful":"hermoso","sad":"triste","shadow":"sombra","color":"color","silver":"plateado","white":"blanco","black":"negro","portray":"retrato"},
|
||||||
|
"fr": {"the":"le","a":"un","is":"est","in":"dans","of":"de","and":"et","with":"avec","scene":"scène","light":"lumière","dark":"sombre","warm":"chaud","rain":"pluie","sun":"soleil","moon":"lune","sky":"ciel","forest":"forêt","mountain":"montagne","ocean":"océan","golden":"doré","blue":"bleu","red":"rouge","green":"vert","silence":"silence","dream":"rêve","love":"amour","hope":"espoir","fear":"peur","joy":"joie","peace":"paix","beautiful":"beau","sad":"triste","shadow":"ombre","color":"couleur","silver":"argenté","white":"blanc","black":"noir"},
|
||||||
|
"de": {"the":"der","a":"ein","is":"ist","in":"in","of":"von","and":"und","with":"mit","scene":"Szene","light":"Licht","dark":"dunkel","warm":"warm","rain":"Regen","sun":"Sonne","moon":"Mond","sky":"Himmel","forest":"Wald","mountain":"Berg","ocean":"Ozean","golden":"golden","blue":"blau","red":"rot","green":"grün","silence":"Stille","dream":"Traum","love":"Liebe","hope":"Hoffnung","fear":"Angst","joy":"Freude","peace":"Frieden","beautiful":"schön","sad":"traurig","shadow":"Schatten","color":"Farbe","silver":"silbern","white":"weiß","black":"schwarz"},
|
||||||
|
}
|
||||||
|
|
||||||
|
LANG_NAMES = {"es": "Spanish", "fr": "French", "de": "German"}
|
||||||
|
|
||||||
|
|
||||||
|
def detect_text_field(entry):
|
||||||
|
for f in ["rich","terse","text","content","lyric_line","description","scene_description","prompt","scene"]:
|
||||||
|
if f in entry and isinstance(entry[f], str) and len(entry[f]) > 5:
|
||||||
|
return f
|
||||||
|
for k, v in entry.items():
|
||||||
|
if isinstance(v, str) and len(v) > 5:
|
||||||
|
return k
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def paraphrase(text):
|
||||||
|
t = random.choice(PARAPHRASE_TRANSFORMS)(text)
|
||||||
|
if t == text:
|
||||||
|
t = text.replace(" and ", " & ").replace(" with ", " alongside ")
|
||||||
|
if t == text:
|
||||||
|
t = f"In this scene: {text[0].lower()}{text[1:]}" if text[0].isupper() else text
|
||||||
|
return t
|
||||||
|
|
||||||
|
|
||||||
|
def translate(text, lang):
|
||||||
|
d = TRANSLATIONS.get(lang, {})
|
||||||
|
words = text.split()
|
||||||
|
out = []
|
||||||
|
for w in words:
|
||||||
|
lo = w.lower().strip(".,;:!?")
|
||||||
|
suf = w[len(w.rstrip(".,;:!?")):]
|
||||||
|
if lo in d:
|
||||||
|
out.append(d[lo] + suf)
|
||||||
|
else:
|
||||||
|
out.append(w)
|
||||||
|
return " ".join(out)
|
||||||
|
|
||||||
|
|
||||||
|
def augment_file(input_path, output_path=None, n_para=3, langs=None, llm_endpoint=None):
|
||||||
|
input_path = Path(input_path)
|
||||||
|
if output_path is None:
|
||||||
|
output_path = input_path.parent / f"{input_path.stem}_augmented{input_path.suffix}"
|
||||||
|
|
||||||
|
entries = [json.loads(l) for l in open(input_path) if l.strip()]
|
||||||
|
if not entries:
|
||||||
|
print(f"No entries in {input_path}"); return 0
|
||||||
|
|
||||||
|
tf = detect_text_field(entries[0])
|
||||||
|
if not tf:
|
||||||
|
print(f"ERROR: No text field in {input_path}", file=sys.stderr); return 0
|
||||||
|
|
||||||
|
print(f"Input: {input_path} ({len(entries)} entries, field={tf})")
|
||||||
|
|
||||||
|
aug_count = 0
|
||||||
|
with open(output_path, "w") as out:
|
||||||
|
for e in entries:
|
||||||
|
out.write(json.dumps(e, ensure_ascii=False) + "\n")
|
||||||
|
for i, e in enumerate(entries):
|
||||||
|
text = e[tf]
|
||||||
|
# Paraphrases
|
||||||
|
for p in range(n_para):
|
||||||
|
para = paraphrase(text)
|
||||||
|
if para != text:
|
||||||
|
ne = dict(e); ne[tf] = para
|
||||||
|
ne["_augmentation"] = f"paraphrase_{p+1}"
|
||||||
|
ne["_original"] = text[:100]
|
||||||
|
out.write(json.dumps(ne, ensure_ascii=False) + "\n")
|
||||||
|
aug_count += 1
|
||||||
|
# Translations
|
||||||
|
for lang in (langs or []):
|
||||||
|
tr = translate(text, lang)
|
||||||
|
if tr != text:
|
||||||
|
ne = dict(e); ne[tf] = tr
|
||||||
|
ne["_augmentation"] = f"translate_{lang}"
|
||||||
|
ne["_language"] = lang
|
||||||
|
ne["_original"] = text[:100]
|
||||||
|
out.write(json.dumps(ne, ensure_ascii=False) + "\n")
|
||||||
|
aug_count += 1
|
||||||
|
if (i+1) % 100 == 0:
|
||||||
|
print(f" {i+1}/{len(entries)} done ({aug_count} augmented)")
|
||||||
|
|
||||||
|
total = len(entries) + aug_count
|
||||||
|
print(f"Done: {len(entries)} originals + {aug_count} augmented = {total}")
|
||||||
|
print(f"Output: {output_path}")
|
||||||
|
return aug_count
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
import argparse
|
||||||
|
p = argparse.ArgumentParser()
|
||||||
|
p.add_argument("--input", required=True)
|
||||||
|
p.add_argument("--output", default=None)
|
||||||
|
p.add_argument("--paraphrases", type=int, default=3)
|
||||||
|
p.add_argument("--langs", default="es,fr,de")
|
||||||
|
p.add_argument("--llm-endpoint", default=None)
|
||||||
|
args = p.parse_args()
|
||||||
|
langs = [l.strip() for l in args.langs.split(",") if l.strip()] if args.langs else []
|
||||||
|
augment_file(args.input, args.output, args.paraphrases, langs, args.llm_endpoint)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
Reference in New Issue
Block a user