Compare commits
11 Commits
burn/auto-
...
whip/491-1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3278f6f1ed | ||
| e3a40be627 | |||
| efb2df8940 | |||
| cf687a5bfa | |||
|
|
c09e54de72 | ||
| 3214437652 | |||
| 95cd259867 | |||
| 5e7bef1807 | |||
| 3d84dd5c27 | |||
| e38e80661c | |||
|
|
b71e365ed6 |
@@ -49,7 +49,7 @@ jobs:
|
|||||||
python-version: '3.11'
|
python-version: '3.11'
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
pip install py_compile flake8
|
pip install flake8
|
||||||
- name: Compile-check all Python files
|
- name: Compile-check all Python files
|
||||||
run: |
|
run: |
|
||||||
find . -name '*.py' -print0 | while IFS= read -r -d '' f; do
|
find . -name '*.py' -print0 | while IFS= read -r -d '' f; do
|
||||||
|
|||||||
297
bin/glitch_patterns.py
Normal file
297
bin/glitch_patterns.py
Normal file
@@ -0,0 +1,297 @@
|
|||||||
|
"""
|
||||||
|
Glitch pattern definitions for 3D world anomaly detection.
|
||||||
|
|
||||||
|
Defines known visual artifact categories commonly found in 3D web worlds,
|
||||||
|
particularly The Matrix environments. Each pattern includes detection
|
||||||
|
heuristics and severity ratings.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from enum import Enum
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
|
||||||
|
class GlitchSeverity(Enum):
|
||||||
|
CRITICAL = "critical"
|
||||||
|
HIGH = "high"
|
||||||
|
MEDIUM = "medium"
|
||||||
|
LOW = "low"
|
||||||
|
INFO = "info"
|
||||||
|
|
||||||
|
|
||||||
|
class GlitchCategory(Enum):
|
||||||
|
FLOATING_ASSETS = "floating_assets"
|
||||||
|
Z_FIGHTING = "z_fighting"
|
||||||
|
MISSING_TEXTURES = "missing_textures"
|
||||||
|
CLIPPING = "clipping"
|
||||||
|
BROKEN_NORMALS = "broken_normals"
|
||||||
|
SHADOW_ARTIFACTS = "shadow_artifacts"
|
||||||
|
LIGHTMAP_ERRORS = "lightmap_errors"
|
||||||
|
LOD_POPPING = "lod_popping"
|
||||||
|
WATER_REFLECTION = "water_reflection"
|
||||||
|
SKYBOX_SEAM = "skybox_seam"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class GlitchPattern:
|
||||||
|
"""Definition of a known glitch pattern with detection parameters."""
|
||||||
|
category: GlitchCategory
|
||||||
|
name: str
|
||||||
|
description: str
|
||||||
|
severity: GlitchSeverity
|
||||||
|
detection_prompts: list[str]
|
||||||
|
visual_indicators: list[str]
|
||||||
|
confidence_threshold: float = 0.6
|
||||||
|
|
||||||
|
def to_dict(self) -> dict:
|
||||||
|
return {
|
||||||
|
"category": self.category.value,
|
||||||
|
"name": self.name,
|
||||||
|
"description": self.description,
|
||||||
|
"severity": self.severity.value,
|
||||||
|
"detection_prompts": self.detection_prompts,
|
||||||
|
"visual_indicators": self.visual_indicators,
|
||||||
|
"confidence_threshold": self.confidence_threshold,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Known glitch patterns for Matrix 3D world scanning
|
||||||
|
MATRIX_GLITCH_PATTERNS: list[GlitchPattern] = [
|
||||||
|
GlitchPattern(
|
||||||
|
category=GlitchCategory.FLOATING_ASSETS,
|
||||||
|
name="Floating Object",
|
||||||
|
description="Object not properly grounded or anchored to the scene geometry. "
|
||||||
|
"Common in procedurally placed assets or after physics desync.",
|
||||||
|
severity=GlitchSeverity.HIGH,
|
||||||
|
detection_prompts=[
|
||||||
|
"Identify any objects that appear to float above the ground without support.",
|
||||||
|
"Look for furniture, props, or geometry suspended in mid-air with no visible attachment.",
|
||||||
|
"Check for objects whose shadows do not align with the surface below them.",
|
||||||
|
],
|
||||||
|
visual_indicators=[
|
||||||
|
"gap between object base and surface",
|
||||||
|
"shadow detached from object",
|
||||||
|
"object hovering with no structural support",
|
||||||
|
],
|
||||||
|
confidence_threshold=0.65,
|
||||||
|
),
|
||||||
|
GlitchPattern(
|
||||||
|
category=GlitchCategory.Z_FIGHTING,
|
||||||
|
name="Z-Fighting Flicker",
|
||||||
|
description="Two coplanar surfaces competing for depth priority, causing "
|
||||||
|
"visible flickering or shimmering textures.",
|
||||||
|
severity=GlitchSeverity.MEDIUM,
|
||||||
|
detection_prompts=[
|
||||||
|
"Look for surfaces that appear to shimmer, flicker, or show mixed textures.",
|
||||||
|
"Identify areas where two textures seem to overlap and compete for visibility.",
|
||||||
|
"Check walls, floors, or objects for surface noise or pattern interference.",
|
||||||
|
],
|
||||||
|
visual_indicators=[
|
||||||
|
"shimmering surface",
|
||||||
|
"texture flicker between two patterns",
|
||||||
|
"noisy flat surfaces",
|
||||||
|
"moire-like patterns on planar geometry",
|
||||||
|
],
|
||||||
|
confidence_threshold=0.55,
|
||||||
|
),
|
||||||
|
GlitchPattern(
|
||||||
|
category=GlitchCategory.MISSING_TEXTURES,
|
||||||
|
name="Missing or Placeholder Texture",
|
||||||
|
description="A surface rendered with a fallback checkerboard, solid magenta, "
|
||||||
|
"or the default engine placeholder texture.",
|
||||||
|
severity=GlitchSeverity.CRITICAL,
|
||||||
|
detection_prompts=[
|
||||||
|
"Look for bright magenta, checkerboard, or solid-color surfaces that look out of place.",
|
||||||
|
"Identify any surfaces that appear as flat untextured colors inconsistent with the scene.",
|
||||||
|
"Check for black, white, or magenta patches where detailed textures should be.",
|
||||||
|
],
|
||||||
|
visual_indicators=[
|
||||||
|
"magenta/pink solid color surface",
|
||||||
|
"checkerboard pattern",
|
||||||
|
"flat single-color geometry",
|
||||||
|
"UV-debug texture visible",
|
||||||
|
],
|
||||||
|
confidence_threshold=0.7,
|
||||||
|
),
|
||||||
|
GlitchPattern(
|
||||||
|
category=GlitchCategory.CLIPPING,
|
||||||
|
name="Geometry Clipping",
|
||||||
|
description="Objects passing through each other or intersecting in physically "
|
||||||
|
"impossible ways due to collision mesh errors.",
|
||||||
|
severity=GlitchSeverity.HIGH,
|
||||||
|
detection_prompts=[
|
||||||
|
"Look for objects that visibly pass through other objects (walls, floors, furniture).",
|
||||||
|
"Identify characters or props embedded inside geometry where they should not be.",
|
||||||
|
"Check for intersecting meshes where solid objects overlap unnaturally.",
|
||||||
|
],
|
||||||
|
visual_indicators=[
|
||||||
|
"object passing through wall or floor",
|
||||||
|
"embedded geometry",
|
||||||
|
"overlapping solid meshes",
|
||||||
|
"character limb inside furniture",
|
||||||
|
],
|
||||||
|
confidence_threshold=0.6,
|
||||||
|
),
|
||||||
|
GlitchPattern(
|
||||||
|
category=GlitchCategory.BROKEN_NORMALS,
|
||||||
|
name="Broken Surface Normals",
|
||||||
|
description="Inverted or incorrect surface normals causing faces to appear "
|
||||||
|
"inside-out, invisible from certain angles, or lit incorrectly.",
|
||||||
|
severity=GlitchSeverity.MEDIUM,
|
||||||
|
detection_prompts=[
|
||||||
|
"Look for surfaces that appear dark or black on one side while lit on the other.",
|
||||||
|
"Identify objects that seem to vanish when viewed from certain angles.",
|
||||||
|
"Check for inverted shading where lit areas should be in shadow.",
|
||||||
|
],
|
||||||
|
visual_indicators=[
|
||||||
|
"dark/unlit face on otherwise lit model",
|
||||||
|
"invisible surface from one direction",
|
||||||
|
"inverted shadow gradient",
|
||||||
|
"inside-out appearance",
|
||||||
|
],
|
||||||
|
confidence_threshold=0.5,
|
||||||
|
),
|
||||||
|
GlitchPattern(
|
||||||
|
category=GlitchCategory.SHADOW_ARTIFACTS,
|
||||||
|
name="Shadow Artifact",
|
||||||
|
description="Broken, detached, or incorrectly rendered shadows that do not "
|
||||||
|
"match the casting geometry or scene lighting.",
|
||||||
|
severity=GlitchSeverity.LOW,
|
||||||
|
detection_prompts=[
|
||||||
|
"Look for shadows that do not match the shape of nearby objects.",
|
||||||
|
"Identify shadow acne: banding or striped patterns on surfaces.",
|
||||||
|
"Check for floating shadows detached from any visible caster.",
|
||||||
|
],
|
||||||
|
visual_indicators=[
|
||||||
|
"shadow shape mismatch",
|
||||||
|
"shadow acne bands",
|
||||||
|
"detached floating shadow",
|
||||||
|
"Peter Panning (shadow offset from base)",
|
||||||
|
],
|
||||||
|
confidence_threshold=0.5,
|
||||||
|
),
|
||||||
|
GlitchPattern(
|
||||||
|
category=GlitchCategory.LOD_POPPING,
|
||||||
|
name="LOD Transition Pop",
|
||||||
|
description="Visible pop-in when level-of-detail models switch abruptly, "
|
||||||
|
"causing geometry or textures to change suddenly.",
|
||||||
|
severity=GlitchSeverity.LOW,
|
||||||
|
detection_prompts=[
|
||||||
|
"Look for areas where mesh detail changes abruptly at visible boundaries.",
|
||||||
|
"Identify objects that appear to morph or shift geometry suddenly.",
|
||||||
|
"Check for texture resolution changes that create visible seams.",
|
||||||
|
],
|
||||||
|
visual_indicators=[
|
||||||
|
"visible mesh simplification boundary",
|
||||||
|
"texture resolution jump",
|
||||||
|
"geometry pop-in artifacts",
|
||||||
|
],
|
||||||
|
confidence_threshold=0.45,
|
||||||
|
),
|
||||||
|
GlitchPattern(
|
||||||
|
category=GlitchCategory.LIGHTMAP_ERRORS,
|
||||||
|
name="Lightmap Baking Error",
|
||||||
|
description="Incorrect or missing baked lighting causing dark spots, light "
|
||||||
|
"leaks, or mismatched illumination on static geometry.",
|
||||||
|
severity=GlitchSeverity.MEDIUM,
|
||||||
|
detection_prompts=[
|
||||||
|
"Look for unusually dark patches on walls or ceilings that should be lit.",
|
||||||
|
"Identify bright light leaks through solid geometry seams.",
|
||||||
|
"Check for mismatched lighting between adjacent surfaces.",
|
||||||
|
],
|
||||||
|
visual_indicators=[
|
||||||
|
"dark splotch on lit surface",
|
||||||
|
"bright line at geometry seam",
|
||||||
|
"lighting discontinuity between adjacent faces",
|
||||||
|
],
|
||||||
|
confidence_threshold=0.5,
|
||||||
|
),
|
||||||
|
GlitchPattern(
|
||||||
|
category=GlitchCategory.WATER_REFLECTION,
|
||||||
|
name="Water/Reflection Error",
|
||||||
|
description="Incorrect reflections, missing water surfaces, or broken "
|
||||||
|
"reflection probe assignments.",
|
||||||
|
severity=GlitchSeverity.MEDIUM,
|
||||||
|
detection_prompts=[
|
||||||
|
"Look for reflections that do not match the surrounding environment.",
|
||||||
|
"Identify water surfaces that appear solid or incorrectly rendered.",
|
||||||
|
"Check for mirror surfaces showing wrong scene geometry.",
|
||||||
|
],
|
||||||
|
visual_indicators=[
|
||||||
|
"reflection mismatch",
|
||||||
|
"solid water surface",
|
||||||
|
"incorrect environment map",
|
||||||
|
],
|
||||||
|
confidence_threshold=0.5,
|
||||||
|
),
|
||||||
|
GlitchPattern(
|
||||||
|
category=GlitchCategory.SKYBOX_SEAM,
|
||||||
|
name="Skybox Seam",
|
||||||
|
description="Visible seams or color mismatches at the edges of skybox cubemap faces.",
|
||||||
|
severity=GlitchSeverity.LOW,
|
||||||
|
detection_prompts=[
|
||||||
|
"Look at the edges of the sky for visible seams or color shifts.",
|
||||||
|
"Identify discontinuities where skybox faces meet.",
|
||||||
|
"Check for texture stretching at skybox corners.",
|
||||||
|
],
|
||||||
|
visual_indicators=[
|
||||||
|
"visible line in sky",
|
||||||
|
"color discontinuity at sky edge",
|
||||||
|
"sky texture seam",
|
||||||
|
],
|
||||||
|
confidence_threshold=0.45,
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def get_patterns_by_severity(min_severity: GlitchSeverity) -> list[GlitchPattern]:
|
||||||
|
"""Return patterns at or above the given severity level."""
|
||||||
|
severity_order = [
|
||||||
|
GlitchSeverity.INFO,
|
||||||
|
GlitchSeverity.LOW,
|
||||||
|
GlitchSeverity.MEDIUM,
|
||||||
|
GlitchSeverity.HIGH,
|
||||||
|
GlitchSeverity.CRITICAL,
|
||||||
|
]
|
||||||
|
min_idx = severity_order.index(min_severity)
|
||||||
|
return [p for p in MATRIX_GLITCH_PATTERNS if severity_order.index(p.severity) >= min_idx]
|
||||||
|
|
||||||
|
|
||||||
|
def get_pattern_by_category(category: GlitchCategory) -> Optional[GlitchPattern]:
|
||||||
|
"""Return the pattern definition for a specific category."""
|
||||||
|
for p in MATRIX_GLITCH_PATTERNS:
|
||||||
|
if p.category == category:
|
||||||
|
return p
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def build_vision_prompt(patterns: list[GlitchPattern] | None = None) -> str:
|
||||||
|
"""Build a composite vision analysis prompt from pattern definitions."""
|
||||||
|
if patterns is None:
|
||||||
|
patterns = MATRIX_GLITCH_PATTERNS
|
||||||
|
|
||||||
|
sections = []
|
||||||
|
for p in patterns:
|
||||||
|
prompt_text = " ".join(p.detection_prompts)
|
||||||
|
indicators = ", ".join(p.visual_indicators)
|
||||||
|
sections.append(
|
||||||
|
f"[{p.category.value.upper()}] {p.name} (severity: {p.severity.value})\n"
|
||||||
|
f" {p.description}\n"
|
||||||
|
f" Look for: {prompt_text}\n"
|
||||||
|
f" Visual indicators: {indicators}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return (
|
||||||
|
"Analyze this 3D world screenshot for visual glitches and artifacts. "
|
||||||
|
"For each detected issue, report the category, description of what you see, "
|
||||||
|
"approximate location in the image (x%, y%), and confidence (0.0-1.0).\n\n"
|
||||||
|
"Known glitch patterns to check:\n\n" + "\n\n".join(sections)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import json
|
||||||
|
print(f"Loaded {len(MATRIX_GLITCH_PATTERNS)} glitch patterns:\n")
|
||||||
|
for p in MATRIX_GLITCH_PATTERNS:
|
||||||
|
print(f" [{p.severity.value:8s}] {p.category.value}: {p.name}")
|
||||||
|
print(f"\nVision prompt preview:\n{build_vision_prompt()[:500]}...")
|
||||||
549
bin/matrix_glitch_detector.py
Normal file
549
bin/matrix_glitch_detector.py
Normal file
@@ -0,0 +1,549 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Matrix 3D World Glitch Detector
|
||||||
|
|
||||||
|
Scans a 3D web world for visual artifacts using browser automation
|
||||||
|
and vision AI analysis. Produces structured glitch reports.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
python matrix_glitch_detector.py <url> [--angles 4] [--output report.json]
|
||||||
|
python matrix_glitch_detector.py --demo # Run with synthetic test data
|
||||||
|
|
||||||
|
Ref: timmy-config#491
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import base64
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import uuid
|
||||||
|
from dataclasses import dataclass, field, asdict
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
# Add parent for glitch_patterns import
|
||||||
|
sys.path.insert(0, str(Path(__file__).resolve().parent))
|
||||||
|
from glitch_patterns import (
|
||||||
|
GlitchCategory,
|
||||||
|
GlitchPattern,
|
||||||
|
GlitchSeverity,
|
||||||
|
MATRIX_GLITCH_PATTERNS,
|
||||||
|
build_vision_prompt,
|
||||||
|
get_patterns_by_severity,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class DetectedGlitch:
|
||||||
|
"""A single detected glitch with metadata."""
|
||||||
|
id: str
|
||||||
|
category: str
|
||||||
|
name: str
|
||||||
|
description: str
|
||||||
|
severity: str
|
||||||
|
confidence: float
|
||||||
|
location_x: Optional[float] = None # percentage across image
|
||||||
|
location_y: Optional[float] = None # percentage down image
|
||||||
|
screenshot_index: int = 0
|
||||||
|
screenshot_angle: str = "front"
|
||||||
|
timestamp: str = ""
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
if not self.timestamp:
|
||||||
|
self.timestamp = datetime.now(timezone.utc).isoformat()
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ScanResult:
|
||||||
|
"""Complete scan result for a 3D world URL."""
|
||||||
|
scan_id: str
|
||||||
|
url: str
|
||||||
|
timestamp: str
|
||||||
|
total_screenshots: int
|
||||||
|
angles_captured: list[str]
|
||||||
|
glitches: list[dict] = field(default_factory=list)
|
||||||
|
summary: dict = field(default_factory=dict)
|
||||||
|
metadata: dict = field(default_factory=dict)
|
||||||
|
|
||||||
|
def to_json(self, indent: int = 2) -> str:
|
||||||
|
return json.dumps(asdict(self), indent=indent)
|
||||||
|
|
||||||
|
|
||||||
|
def generate_scan_angles(num_angles: int) -> list[dict]:
|
||||||
|
"""Generate camera angle configurations for multi-angle scanning.
|
||||||
|
|
||||||
|
Returns a list of dicts with yaw/pitch/label for browser camera control.
|
||||||
|
"""
|
||||||
|
base_angles = [
|
||||||
|
{"yaw": 0, "pitch": 0, "label": "front"},
|
||||||
|
{"yaw": 90, "pitch": 0, "label": "right"},
|
||||||
|
{"yaw": 180, "pitch": 0, "label": "back"},
|
||||||
|
{"yaw": 270, "pitch": 0, "label": "left"},
|
||||||
|
{"yaw": 0, "pitch": -30, "label": "front_low"},
|
||||||
|
{"yaw": 45, "pitch": -15, "label": "front_right_low"},
|
||||||
|
{"yaw": 0, "pitch": 30, "label": "front_high"},
|
||||||
|
{"yaw": 45, "pitch": 0, "label": "front_right"},
|
||||||
|
]
|
||||||
|
|
||||||
|
if num_angles <= len(base_angles):
|
||||||
|
return base_angles[:num_angles]
|
||||||
|
return base_angles + [
|
||||||
|
{"yaw": i * (360 // num_angles), "pitch": 0, "label": f"angle_{i}"}
|
||||||
|
for i in range(len(base_angles), num_angles)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def capture_screenshots(url: str, angles: list[dict], output_dir: Path) -> list[Path]:
|
||||||
|
"""Capture screenshots of a 3D web world from multiple angles.
|
||||||
|
|
||||||
|
Uses browser_vision tool when available; falls back to placeholder generation
|
||||||
|
for testing and environments without browser access.
|
||||||
|
"""
|
||||||
|
output_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
screenshots = []
|
||||||
|
|
||||||
|
for i, angle in enumerate(angles):
|
||||||
|
filename = output_dir / f"screenshot_{i:03d}_{angle['label']}.png"
|
||||||
|
|
||||||
|
# Attempt browser-based capture via browser_vision
|
||||||
|
try:
|
||||||
|
result = _browser_capture(url, angle, filename)
|
||||||
|
if result:
|
||||||
|
screenshots.append(filename)
|
||||||
|
continue
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Generate placeholder screenshot for offline/test scenarios
|
||||||
|
_generate_placeholder_screenshot(filename, angle)
|
||||||
|
screenshots.append(filename)
|
||||||
|
|
||||||
|
return screenshots
|
||||||
|
|
||||||
|
|
||||||
|
def _browser_capture(url: str, angle: dict, output_path: Path) -> bool:
|
||||||
|
"""Capture a screenshot via browser automation.
|
||||||
|
|
||||||
|
This is a stub that delegates to the browser_vision tool when run
|
||||||
|
in an environment that provides it. In CI or offline mode, returns False.
|
||||||
|
"""
|
||||||
|
# Check if browser_vision is available via environment
|
||||||
|
bv_script = os.environ.get("BROWSER_VISION_SCRIPT")
|
||||||
|
if bv_script and Path(bv_script).exists():
|
||||||
|
import subprocess
|
||||||
|
cmd = [
|
||||||
|
sys.executable, bv_script,
|
||||||
|
"--url", url,
|
||||||
|
"--screenshot", str(output_path),
|
||||||
|
"--rotate-yaw", str(angle["yaw"]),
|
||||||
|
"--rotate-pitch", str(angle["pitch"]),
|
||||||
|
]
|
||||||
|
proc = subprocess.run(cmd, capture_output=True, text=True, timeout=30)
|
||||||
|
return proc.returncode == 0 and output_path.exists()
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def _generate_placeholder_screenshot(path: Path, angle: dict):
|
||||||
|
"""Generate a minimal 1x1 PNG as a placeholder for testing."""
|
||||||
|
# Minimal valid PNG (1x1 transparent pixel)
|
||||||
|
png_data = (
|
||||||
|
b"\x89PNG\r\n\x1a\n"
|
||||||
|
b"\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01"
|
||||||
|
b"\x08\x06\x00\x00\x00\x1f\x15\xc4\x89"
|
||||||
|
b"\x00\x00\x00\nIDATx\x9cc\x00\x01\x00\x00\x05\x00\x01"
|
||||||
|
b"\r\n\xb4\x00\x00\x00\x00IEND\xaeB`\x82"
|
||||||
|
)
|
||||||
|
path.write_bytes(png_data)
|
||||||
|
|
||||||
|
|
||||||
|
def analyze_with_vision(
|
||||||
|
screenshot_paths: list[Path],
|
||||||
|
angles: list[dict],
|
||||||
|
patterns: list[GlitchPattern] | None = None,
|
||||||
|
) -> list[DetectedGlitch]:
|
||||||
|
"""Send screenshots to vision AI for glitch analysis.
|
||||||
|
|
||||||
|
In environments with a vision model available, sends each screenshot
|
||||||
|
with the composite detection prompt. Otherwise returns simulated results.
|
||||||
|
"""
|
||||||
|
if patterns is None:
|
||||||
|
patterns = MATRIX_GLITCH_PATTERNS
|
||||||
|
|
||||||
|
prompt = build_vision_prompt(patterns)
|
||||||
|
glitches = []
|
||||||
|
|
||||||
|
for i, (path, angle) in enumerate(zip(screenshot_paths, angles)):
|
||||||
|
# Attempt vision analysis
|
||||||
|
detected = _vision_analyze_image(path, prompt, i, angle["label"])
|
||||||
|
glitches.extend(detected)
|
||||||
|
|
||||||
|
return glitches
|
||||||
|
|
||||||
|
|
||||||
|
def _vision_analyze_image(
|
||||||
|
image_path: Path,
|
||||||
|
prompt: str,
|
||||||
|
screenshot_index: int,
|
||||||
|
angle_label: str,
|
||||||
|
) -> list[DetectedGlitch]:
|
||||||
|
"""Analyze a single screenshot with vision AI.
|
||||||
|
|
||||||
|
Uses the vision_analyze tool when available; returns empty list otherwise.
|
||||||
|
"""
|
||||||
|
# Check for vision API configuration
|
||||||
|
api_key = os.environ.get("VISION_API_KEY") or os.environ.get("OPENAI_API_KEY")
|
||||||
|
api_base = os.environ.get("VISION_API_BASE", "https://api.openai.com/v1")
|
||||||
|
|
||||||
|
if api_key:
|
||||||
|
try:
|
||||||
|
return _call_vision_api(
|
||||||
|
image_path, prompt, screenshot_index, angle_label, api_key, api_base
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
print(f" [!] Vision API error for {image_path.name}: {e}", file=sys.stderr)
|
||||||
|
|
||||||
|
# No vision backend available
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
def _call_vision_api(
|
||||||
|
image_path: Path,
|
||||||
|
prompt: str,
|
||||||
|
screenshot_index: int,
|
||||||
|
angle_label: str,
|
||||||
|
api_key: str,
|
||||||
|
api_base: str,
|
||||||
|
) -> list[DetectedGlitch]:
|
||||||
|
"""Call a vision API (OpenAI-compatible) for image analysis."""
|
||||||
|
import urllib.request
|
||||||
|
import urllib.error
|
||||||
|
|
||||||
|
image_data = base64.b64encode(image_path.read_bytes()).decode()
|
||||||
|
|
||||||
|
payload = json.dumps({
|
||||||
|
"model": os.environ.get("VISION_MODEL", "gpt-4o"),
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": [
|
||||||
|
{"type": "text", "text": prompt},
|
||||||
|
{
|
||||||
|
"type": "image_url",
|
||||||
|
"image_url": {
|
||||||
|
"url": f"data:image/png;base64,{image_data}",
|
||||||
|
"detail": "high",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"max_tokens": 4096,
|
||||||
|
}).encode()
|
||||||
|
|
||||||
|
req = urllib.request.Request(
|
||||||
|
f"{api_base}/chat/completions",
|
||||||
|
data=payload,
|
||||||
|
headers={
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Authorization": f"Bearer {api_key}",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
with urllib.request.urlopen(req, timeout=60) as resp:
|
||||||
|
result = json.loads(resp.read())
|
||||||
|
|
||||||
|
content = result["choices"][0]["message"]["content"]
|
||||||
|
return _parse_vision_response(content, screenshot_index, angle_label)
|
||||||
|
|
||||||
|
|
||||||
|
def _add_glitch_from_dict(
|
||||||
|
item: dict,
|
||||||
|
glitches: list[DetectedGlitch],
|
||||||
|
screenshot_index: int,
|
||||||
|
angle_label: str,
|
||||||
|
):
|
||||||
|
"""Convert a dict from vision API response into a DetectedGlitch."""
|
||||||
|
cat = item.get("category", item.get("type", "unknown"))
|
||||||
|
conf = float(item.get("confidence", item.get("score", 0.5)))
|
||||||
|
|
||||||
|
glitch = DetectedGlitch(
|
||||||
|
id=str(uuid.uuid4())[:8],
|
||||||
|
category=cat,
|
||||||
|
name=item.get("name", item.get("label", cat)),
|
||||||
|
description=item.get("description", item.get("detail", "")),
|
||||||
|
severity=item.get("severity", _infer_severity(cat, conf)),
|
||||||
|
confidence=conf,
|
||||||
|
location_x=item.get("location_x", item.get("x")),
|
||||||
|
location_y=item.get("location_y", item.get("y")),
|
||||||
|
screenshot_index=screenshot_index,
|
||||||
|
screenshot_angle=angle_label,
|
||||||
|
)
|
||||||
|
glitches.append(glitch)
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_vision_response(
|
||||||
|
text: str, screenshot_index: int, angle_label: str
|
||||||
|
) -> list[DetectedGlitch]:
|
||||||
|
"""Parse vision AI response into structured glitch detections."""
|
||||||
|
glitches = []
|
||||||
|
|
||||||
|
# Try to extract JSON from the response
|
||||||
|
json_blocks = []
|
||||||
|
in_json = False
|
||||||
|
json_buf = []
|
||||||
|
|
||||||
|
for line in text.split("\n"):
|
||||||
|
stripped = line.strip()
|
||||||
|
if stripped.startswith("```"):
|
||||||
|
if in_json and json_buf:
|
||||||
|
try:
|
||||||
|
json_blocks.append(json.loads("\n".join(json_buf)))
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
pass
|
||||||
|
json_buf = []
|
||||||
|
in_json = not in_json
|
||||||
|
continue
|
||||||
|
if in_json:
|
||||||
|
json_buf.append(line)
|
||||||
|
|
||||||
|
# Flush any remaining buffer
|
||||||
|
if in_json and json_buf:
|
||||||
|
try:
|
||||||
|
json_blocks.append(json.loads("\n".join(json_buf)))
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Also try parsing the entire response as JSON
|
||||||
|
try:
|
||||||
|
parsed = json.loads(text)
|
||||||
|
if isinstance(parsed, list):
|
||||||
|
json_blocks.extend(parsed)
|
||||||
|
elif isinstance(parsed, dict):
|
||||||
|
if "glitches" in parsed:
|
||||||
|
json_blocks.extend(parsed["glitches"])
|
||||||
|
elif "detections" in parsed:
|
||||||
|
json_blocks.extend(parsed["detections"])
|
||||||
|
else:
|
||||||
|
json_blocks.append(parsed)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
for item in json_blocks:
|
||||||
|
# Flatten arrays of detections
|
||||||
|
if isinstance(item, list):
|
||||||
|
for sub in item:
|
||||||
|
if isinstance(sub, dict):
|
||||||
|
_add_glitch_from_dict(sub, glitches, screenshot_index, angle_label)
|
||||||
|
elif isinstance(item, dict):
|
||||||
|
_add_glitch_from_dict(item, glitches, screenshot_index, angle_label)
|
||||||
|
|
||||||
|
return glitches
|
||||||
|
|
||||||
|
|
||||||
|
def _infer_severity(category: str, confidence: float) -> str:
|
||||||
|
"""Infer severity from category and confidence when not provided."""
|
||||||
|
critical_cats = {"missing_textures", "clipping"}
|
||||||
|
high_cats = {"floating_assets", "broken_normals"}
|
||||||
|
|
||||||
|
cat_lower = category.lower()
|
||||||
|
if any(c in cat_lower for c in critical_cats):
|
||||||
|
return "critical" if confidence > 0.7 else "high"
|
||||||
|
if any(c in cat_lower for c in high_cats):
|
||||||
|
return "high" if confidence > 0.7 else "medium"
|
||||||
|
return "medium" if confidence > 0.6 else "low"
|
||||||
|
|
||||||
|
|
||||||
|
def build_report(
|
||||||
|
url: str,
|
||||||
|
angles: list[dict],
|
||||||
|
screenshots: list[Path],
|
||||||
|
glitches: list[DetectedGlitch],
|
||||||
|
) -> ScanResult:
|
||||||
|
"""Build the final structured scan report."""
|
||||||
|
severity_counts = {}
|
||||||
|
category_counts = {}
|
||||||
|
|
||||||
|
for g in glitches:
|
||||||
|
severity_counts[g.severity] = severity_counts.get(g.severity, 0) + 1
|
||||||
|
category_counts[g.category] = category_counts.get(g.category, 0) + 1
|
||||||
|
|
||||||
|
report = ScanResult(
|
||||||
|
scan_id=str(uuid.uuid4()),
|
||||||
|
url=url,
|
||||||
|
timestamp=datetime.now(timezone.utc).isoformat(),
|
||||||
|
total_screenshots=len(screenshots),
|
||||||
|
angles_captured=[a["label"] for a in angles],
|
||||||
|
glitches=[asdict(g) for g in glitches],
|
||||||
|
summary={
|
||||||
|
"total_glitches": len(glitches),
|
||||||
|
"by_severity": severity_counts,
|
||||||
|
"by_category": category_counts,
|
||||||
|
"highest_severity": max(severity_counts.keys(), default="none"),
|
||||||
|
"clean_screenshots": sum(
|
||||||
|
1
|
||||||
|
for i in range(len(screenshots))
|
||||||
|
if not any(g.screenshot_index == i for g in glitches)
|
||||||
|
),
|
||||||
|
},
|
||||||
|
metadata={
|
||||||
|
"detector_version": "0.1.0",
|
||||||
|
"pattern_count": len(MATRIX_GLITCH_PATTERNS),
|
||||||
|
"reference": "timmy-config#491",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
return report
|
||||||
|
|
||||||
|
|
||||||
|
def run_demo(output_path: Optional[Path] = None) -> ScanResult:
|
||||||
|
"""Run a demonstration scan with simulated detections."""
|
||||||
|
print("[*] Running Matrix glitch detection demo...")
|
||||||
|
|
||||||
|
url = "https://matrix.example.com/world/alpha"
|
||||||
|
angles = generate_scan_angles(4)
|
||||||
|
screenshots_dir = Path("/tmp/matrix_glitch_screenshots")
|
||||||
|
|
||||||
|
print(f"[*] Capturing {len(angles)} screenshots from: {url}")
|
||||||
|
screenshots = capture_screenshots(url, angles, screenshots_dir)
|
||||||
|
print(f"[*] Captured {len(screenshots)} screenshots")
|
||||||
|
|
||||||
|
# Simulate detections for demo
|
||||||
|
demo_glitches = [
|
||||||
|
DetectedGlitch(
|
||||||
|
id=str(uuid.uuid4())[:8],
|
||||||
|
category="floating_assets",
|
||||||
|
name="Floating Chair",
|
||||||
|
description="Office chair floating 0.3m above floor in sector 7",
|
||||||
|
severity="high",
|
||||||
|
confidence=0.87,
|
||||||
|
location_x=35.2,
|
||||||
|
location_y=62.1,
|
||||||
|
screenshot_index=0,
|
||||||
|
screenshot_angle="front",
|
||||||
|
),
|
||||||
|
DetectedGlitch(
|
||||||
|
id=str(uuid.uuid4())[:8],
|
||||||
|
category="z_fighting",
|
||||||
|
name="Wall Texture Flicker",
|
||||||
|
description="Z-fighting between wall panel and decorative overlay",
|
||||||
|
severity="medium",
|
||||||
|
confidence=0.72,
|
||||||
|
location_x=58.0,
|
||||||
|
location_y=40.5,
|
||||||
|
screenshot_index=1,
|
||||||
|
screenshot_angle="right",
|
||||||
|
),
|
||||||
|
DetectedGlitch(
|
||||||
|
id=str(uuid.uuid4())[:8],
|
||||||
|
category="missing_textures",
|
||||||
|
name="Placeholder Texture",
|
||||||
|
description="Bright magenta surface on door frame — missing asset reference",
|
||||||
|
severity="critical",
|
||||||
|
confidence=0.95,
|
||||||
|
location_x=72.3,
|
||||||
|
location_y=28.8,
|
||||||
|
screenshot_index=2,
|
||||||
|
screenshot_angle="back",
|
||||||
|
),
|
||||||
|
DetectedGlitch(
|
||||||
|
id=str(uuid.uuid4())[:8],
|
||||||
|
category="clipping",
|
||||||
|
name="Desk Through Wall",
|
||||||
|
description="Desk corner clipping through adjacent wall geometry",
|
||||||
|
severity="high",
|
||||||
|
confidence=0.81,
|
||||||
|
location_x=15.0,
|
||||||
|
location_y=55.0,
|
||||||
|
screenshot_index=3,
|
||||||
|
screenshot_angle="left",
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
|
print(f"[*] Detected {len(demo_glitches)} glitches")
|
||||||
|
report = build_report(url, angles, screenshots, demo_glitches)
|
||||||
|
|
||||||
|
if output_path:
|
||||||
|
output_path.write_text(report.to_json())
|
||||||
|
print(f"[*] Report saved to: {output_path}")
|
||||||
|
|
||||||
|
return report
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Matrix 3D World Glitch Detector — scan for visual artifacts",
|
||||||
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||||
|
epilog="""
|
||||||
|
Examples:
|
||||||
|
%(prog)s https://matrix.example.com/world/alpha
|
||||||
|
%(prog)s https://matrix.example.com/world/alpha --angles 8 --output report.json
|
||||||
|
%(prog)s --demo
|
||||||
|
""",
|
||||||
|
)
|
||||||
|
parser.add_argument("url", nargs="?", help="URL of the 3D world to scan")
|
||||||
|
parser.add_argument(
|
||||||
|
"--angles", type=int, default=4, help="Number of camera angles to capture (default: 4)"
|
||||||
|
)
|
||||||
|
parser.add_argument("--output", "-o", type=str, help="Output file path for JSON report")
|
||||||
|
parser.add_argument("--demo", action="store_true", help="Run demo with simulated data")
|
||||||
|
parser.add_argument(
|
||||||
|
"--min-severity",
|
||||||
|
choices=["info", "low", "medium", "high", "critical"],
|
||||||
|
default="info",
|
||||||
|
help="Minimum severity to include in report",
|
||||||
|
)
|
||||||
|
parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output")
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if args.demo:
|
||||||
|
output = Path(args.output) if args.output else Path("glitch_report_demo.json")
|
||||||
|
report = run_demo(output)
|
||||||
|
print(f"\n=== Scan Summary ===")
|
||||||
|
print(f"URL: {report.url}")
|
||||||
|
print(f"Screenshots: {report.total_screenshots}")
|
||||||
|
print(f"Glitches found: {report.summary['total_glitches']}")
|
||||||
|
print(f"By severity: {report.summary['by_severity']}")
|
||||||
|
return
|
||||||
|
|
||||||
|
if not args.url:
|
||||||
|
parser.error("URL required (or use --demo)")
|
||||||
|
|
||||||
|
scan_id = str(uuid.uuid4())[:8]
|
||||||
|
print(f"[*] Matrix Glitch Detector — Scan {scan_id}")
|
||||||
|
print(f"[*] Target: {args.url}")
|
||||||
|
|
||||||
|
# Generate camera angles
|
||||||
|
angles = generate_scan_angles(args.angles)
|
||||||
|
print(f"[*] Capturing {len(angles)} screenshots...")
|
||||||
|
|
||||||
|
# Capture screenshots
|
||||||
|
screenshots_dir = Path(f"/tmp/matrix_glitch_{scan_id}")
|
||||||
|
screenshots = capture_screenshots(args.url, angles, screenshots_dir)
|
||||||
|
print(f"[*] Captured {len(screenshots)} screenshots")
|
||||||
|
|
||||||
|
# Filter patterns by severity
|
||||||
|
min_sev = GlitchSeverity(args.min_severity)
|
||||||
|
patterns = get_patterns_by_severity(min_sev)
|
||||||
|
|
||||||
|
# Analyze with vision AI
|
||||||
|
print(f"[*] Analyzing with vision AI ({len(patterns)} patterns)...")
|
||||||
|
glitches = analyze_with_vision(screenshots, angles, patterns)
|
||||||
|
|
||||||
|
# Build and save report
|
||||||
|
report = build_report(args.url, angles, screenshots, glitches)
|
||||||
|
|
||||||
|
if args.output:
|
||||||
|
Path(args.output).write_text(report.to_json())
|
||||||
|
print(f"[*] Report saved: {args.output}")
|
||||||
|
else:
|
||||||
|
print(report.to_json())
|
||||||
|
|
||||||
|
print(f"\n[*] Done — {len(glitches)} glitches detected")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
97
bin/tmux-resume.sh
Executable file
97
bin/tmux-resume.sh
Executable file
@@ -0,0 +1,97 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# ── tmux-resume.sh — Cold-start Session Resume ───────────────────────────
|
||||||
|
# Reads ~/.timmy/tmux-state.json and resumes hermes sessions.
|
||||||
|
# Run at startup to restore pane state after supervisor restart.
|
||||||
|
# ──────────────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
MANIFEST="${HOME}/.timmy/tmux-state.json"
|
||||||
|
|
||||||
|
if [ ! -f "$MANIFEST" ]; then
|
||||||
|
echo "[tmux-resume] No manifest found at $MANIFEST — starting fresh."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
python3 << 'PYEOF'
|
||||||
|
import json, subprocess, os, sys
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
|
MANIFEST = os.path.expanduser("~/.timmy/tmux-state.json")
|
||||||
|
|
||||||
|
def run(cmd):
|
||||||
|
try:
|
||||||
|
r = subprocess.run(cmd, shell=True, capture_output=True, text=True, timeout=30)
|
||||||
|
return r.stdout.strip(), r.returncode
|
||||||
|
except Exception as e:
|
||||||
|
return str(e), 1
|
||||||
|
|
||||||
|
def session_exists(name):
|
||||||
|
out, _ = run(f"tmux has-session -t '{name}' 2>&1")
|
||||||
|
return "can't find" not in out.lower()
|
||||||
|
|
||||||
|
with open(MANIFEST) as f:
|
||||||
|
state = json.load(f)
|
||||||
|
|
||||||
|
ts = state.get("timestamp", "unknown")
|
||||||
|
age = "unknown"
|
||||||
|
try:
|
||||||
|
t = datetime.fromisoformat(ts.replace("Z", "+00:00"))
|
||||||
|
delta = datetime.now(timezone.utc) - t
|
||||||
|
mins = int(delta.total_seconds() / 60)
|
||||||
|
if mins < 60:
|
||||||
|
age = f"{mins}m ago"
|
||||||
|
else:
|
||||||
|
age = f"{mins//60}h {mins%60}m ago"
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
print(f"[tmux-resume] Manifest from {age}: {state['summary']['total_sessions']} sessions, "
|
||||||
|
f"{state['summary']['hermes_panes']} hermes panes")
|
||||||
|
|
||||||
|
restored = 0
|
||||||
|
skipped = 0
|
||||||
|
|
||||||
|
for pane in state.get("panes", []):
|
||||||
|
if not pane.get("is_hermes"):
|
||||||
|
continue
|
||||||
|
|
||||||
|
addr = pane["address"] # e.g. "BURN:2.3"
|
||||||
|
session = addr.split(":")[0]
|
||||||
|
session_id = pane.get("session_id")
|
||||||
|
profile = pane.get("profile", "default")
|
||||||
|
model = pane.get("model", "")
|
||||||
|
task = pane.get("task", "")
|
||||||
|
|
||||||
|
# Skip if session already exists (already running)
|
||||||
|
if session_exists(session):
|
||||||
|
print(f" [skip] {addr} — session '{session}' already exists")
|
||||||
|
skipped += 1
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Respawn hermes with session resume if we have a session ID
|
||||||
|
if session_id:
|
||||||
|
print(f" [resume] {addr} — profile={profile} model={model} session={session_id}")
|
||||||
|
cmd = f"hermes chat --resume {session_id}"
|
||||||
|
else:
|
||||||
|
print(f" [start] {addr} — profile={profile} model={model} (no session ID)")
|
||||||
|
cmd = f"hermes chat --profile {profile}"
|
||||||
|
|
||||||
|
# Create tmux session and run hermes
|
||||||
|
run(f"tmux new-session -d -s '{session}' -n '{session}:0'")
|
||||||
|
run(f"tmux send-keys -t '{session}' '{cmd}' Enter")
|
||||||
|
restored += 1
|
||||||
|
|
||||||
|
# Write resume log
|
||||||
|
log = {
|
||||||
|
"resumed_at": datetime.now(timezone.utc).isoformat(),
|
||||||
|
"manifest_age": age,
|
||||||
|
"restored": restored,
|
||||||
|
"skipped": skipped,
|
||||||
|
}
|
||||||
|
log_path = os.path.expanduser("~/.timmy/tmux-resume.log")
|
||||||
|
with open(log_path, "w") as f:
|
||||||
|
json.dump(log, f, indent=2)
|
||||||
|
|
||||||
|
print(f"[tmux-resume] Done: {restored} restored, {skipped} skipped")
|
||||||
|
PYEOF
|
||||||
237
bin/tmux-state.sh
Executable file
237
bin/tmux-state.sh
Executable file
@@ -0,0 +1,237 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# ── tmux-state.sh — Session State Persistence Manifest ───────────────────
|
||||||
|
# Snapshots all tmux pane state to ~/.timmy/tmux-state.json
|
||||||
|
# Run every supervisor cycle. Cold-start reads this manifest to resume.
|
||||||
|
# ──────────────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
MANIFEST="${HOME}/.timmy/tmux-state.json"
|
||||||
|
mkdir -p "$(dirname "$MANIFEST")"
|
||||||
|
|
||||||
|
python3 << 'PYEOF'
|
||||||
|
import json, subprocess, os, time, re, sys
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
MANIFEST = os.path.expanduser("~/.timmy/tmux-state.json")
|
||||||
|
|
||||||
|
def run(cmd):
|
||||||
|
"""Run command, return stdout or empty string."""
|
||||||
|
try:
|
||||||
|
r = subprocess.run(cmd, shell=True, capture_output=True, text=True, timeout=5)
|
||||||
|
return r.stdout.strip()
|
||||||
|
except Exception:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
def get_sessions():
|
||||||
|
"""Get all tmux sessions with metadata."""
|
||||||
|
raw = run("tmux list-sessions -F '#{session_name}|#{session_windows}|#{session_created}|#{session_attached}|#{session_group}|#{session_id}'")
|
||||||
|
sessions = []
|
||||||
|
for line in raw.splitlines():
|
||||||
|
if not line.strip():
|
||||||
|
continue
|
||||||
|
parts = line.split("|")
|
||||||
|
if len(parts) < 6:
|
||||||
|
continue
|
||||||
|
sessions.append({
|
||||||
|
"name": parts[0],
|
||||||
|
"windows": int(parts[1]),
|
||||||
|
"created_epoch": int(parts[2]),
|
||||||
|
"created": datetime.fromtimestamp(int(parts[2]), tz=timezone.utc).isoformat(),
|
||||||
|
"attached": parts[3] == "1",
|
||||||
|
"group": parts[4],
|
||||||
|
"id": parts[5],
|
||||||
|
})
|
||||||
|
return sessions
|
||||||
|
|
||||||
|
def get_panes():
|
||||||
|
"""Get all tmux panes with full metadata."""
|
||||||
|
fmt = '#{session_name}|#{window_index}|#{pane_index}|#{pane_pid}|#{pane_title}|#{pane_width}x#{pane_height}|#{pane_active}|#{pane_current_command}|#{pane_start_command}|#{pane_tty}|#{pane_id}|#{window_name}|#{session_id}'
|
||||||
|
raw = run(f"tmux list-panes -a -F '{fmt}'")
|
||||||
|
panes = []
|
||||||
|
for line in raw.splitlines():
|
||||||
|
if not line.strip():
|
||||||
|
continue
|
||||||
|
parts = line.split("|")
|
||||||
|
if len(parts) < 13:
|
||||||
|
continue
|
||||||
|
session, win, pane, pid, title, size, active, cmd, start_cmd, tty, pane_id, win_name, sess_id = parts[:13]
|
||||||
|
w, h = size.split("x") if "x" in size else ("0", "0")
|
||||||
|
panes.append({
|
||||||
|
"session": session,
|
||||||
|
"window_index": int(win),
|
||||||
|
"window_name": win_name,
|
||||||
|
"pane_index": int(pane),
|
||||||
|
"pane_id": pane_id,
|
||||||
|
"pid": int(pid) if pid.isdigit() else 0,
|
||||||
|
"title": title,
|
||||||
|
"width": int(w),
|
||||||
|
"height": int(h),
|
||||||
|
"active": active == "1",
|
||||||
|
"command": cmd,
|
||||||
|
"start_command": start_cmd,
|
||||||
|
"tty": tty,
|
||||||
|
"session_id": sess_id,
|
||||||
|
})
|
||||||
|
return panes
|
||||||
|
|
||||||
|
def extract_hermes_state(pane):
|
||||||
|
"""Try to extract hermes session info from a pane."""
|
||||||
|
info = {
|
||||||
|
"is_hermes": False,
|
||||||
|
"profile": None,
|
||||||
|
"model": None,
|
||||||
|
"provider": None,
|
||||||
|
"session_id": None,
|
||||||
|
"task": None,
|
||||||
|
}
|
||||||
|
title = pane.get("title", "")
|
||||||
|
cmd = pane.get("command", "")
|
||||||
|
start = pane.get("start_command", "")
|
||||||
|
|
||||||
|
# Detect hermes processes
|
||||||
|
is_hermes = any(k in (title + " " + cmd + " " + start).lower()
|
||||||
|
for k in ["hermes", "timmy", "mimo", "claude", "gpt"])
|
||||||
|
if not is_hermes and cmd not in ("python3", "python3.11", "bash", "zsh", "fish"):
|
||||||
|
return info
|
||||||
|
|
||||||
|
# Try reading pane content for model/provider clues
|
||||||
|
pane_content = run(f"tmux capture-pane -t '{pane['session']}:{pane['window_index']}.{pane['pane_index']}' -p -S -20 2>/dev/null")
|
||||||
|
|
||||||
|
# Extract model from pane content patterns
|
||||||
|
model_patterns = [
|
||||||
|
r"(?:mimo-v2-pro|claude-[\w.-]+|gpt-[\w.-]+|gemini-[\w.-]+|qwen[\w:.-]*)",
|
||||||
|
]
|
||||||
|
for pat in model_patterns:
|
||||||
|
m = re.search(pat, pane_content, re.IGNORECASE)
|
||||||
|
if m:
|
||||||
|
info["model"] = m.group(0)
|
||||||
|
info["is_hermes"] = True
|
||||||
|
break
|
||||||
|
|
||||||
|
# Provider inference from model
|
||||||
|
model = (info["model"] or "").lower()
|
||||||
|
if "mimo" in model:
|
||||||
|
info["provider"] = "nous"
|
||||||
|
elif "claude" in model:
|
||||||
|
info["provider"] = "anthropic"
|
||||||
|
elif "gpt" in model:
|
||||||
|
info["provider"] = "openai"
|
||||||
|
elif "gemini" in model:
|
||||||
|
info["provider"] = "google"
|
||||||
|
elif "qwen" in model:
|
||||||
|
info["provider"] = "custom"
|
||||||
|
|
||||||
|
# Profile from session name
|
||||||
|
session = pane["session"].lower()
|
||||||
|
if "burn" in session:
|
||||||
|
info["profile"] = "burn"
|
||||||
|
elif session in ("dev", "0"):
|
||||||
|
info["profile"] = "default"
|
||||||
|
else:
|
||||||
|
info["profile"] = session
|
||||||
|
|
||||||
|
# Try to extract session ID (hermes uses UUIDs)
|
||||||
|
uuid_match = re.findall(r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}', pane_content)
|
||||||
|
if uuid_match:
|
||||||
|
info["session_id"] = uuid_match[-1] # most recent
|
||||||
|
info["is_hermes"] = True
|
||||||
|
|
||||||
|
# Last prompt — grab the last user-like line
|
||||||
|
lines = pane_content.splitlines()
|
||||||
|
for line in reversed(lines):
|
||||||
|
stripped = line.strip()
|
||||||
|
if stripped and not stripped.startswith(("─", "│", "╭", "╰", "▸", "●", "○")) and len(stripped) > 10:
|
||||||
|
info["task"] = stripped[:200]
|
||||||
|
break
|
||||||
|
|
||||||
|
return info
|
||||||
|
|
||||||
|
def get_context_percent(pane):
|
||||||
|
"""Estimate context usage from pane content heuristics."""
|
||||||
|
content = run(f"tmux capture-pane -t '{pane['session']}:{pane['window_index']}.{pane['pane_index']}' -p -S -5 2>/dev/null")
|
||||||
|
# Look for context indicators like "ctx 45%" or "[░░░░░░░░░░]"
|
||||||
|
ctx_match = re.search(r'ctx\s*(\d+)%', content)
|
||||||
|
if ctx_match:
|
||||||
|
return int(ctx_match.group(1))
|
||||||
|
bar_match = re.search(r'\[(░+█*█*░*)\]', content)
|
||||||
|
if bar_match:
|
||||||
|
bar = bar_match.group(1)
|
||||||
|
filled = bar.count('█')
|
||||||
|
total = len(bar)
|
||||||
|
if total > 0:
|
||||||
|
return int((filled / total) * 100)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def build_manifest():
|
||||||
|
"""Build the full tmux state manifest."""
|
||||||
|
now = datetime.now(timezone.utc)
|
||||||
|
sessions = get_sessions()
|
||||||
|
panes = get_panes()
|
||||||
|
|
||||||
|
pane_manifests = []
|
||||||
|
for p in panes:
|
||||||
|
hermes = extract_hermes_state(p)
|
||||||
|
ctx = get_context_percent(p)
|
||||||
|
|
||||||
|
entry = {
|
||||||
|
"address": f"{p['session']}:{p['window_index']}.{p['pane_index']}",
|
||||||
|
"pane_id": p["pane_id"],
|
||||||
|
"pid": p["pid"],
|
||||||
|
"size": f"{p['width']}x{p['height']}",
|
||||||
|
"active": p["active"],
|
||||||
|
"command": p["command"],
|
||||||
|
"title": p["title"],
|
||||||
|
"profile": hermes["profile"],
|
||||||
|
"model": hermes["model"],
|
||||||
|
"provider": hermes["provider"],
|
||||||
|
"session_id": hermes["session_id"],
|
||||||
|
"task": hermes["task"],
|
||||||
|
"context_pct": ctx,
|
||||||
|
"is_hermes": hermes["is_hermes"],
|
||||||
|
}
|
||||||
|
pane_manifests.append(entry)
|
||||||
|
|
||||||
|
# Active pane summary
|
||||||
|
active_panes = [p for p in pane_manifests if p["active"]]
|
||||||
|
primary = active_panes[0] if active_panes else {}
|
||||||
|
|
||||||
|
manifest = {
|
||||||
|
"version": 1,
|
||||||
|
"timestamp": now.isoformat(),
|
||||||
|
"timestamp_epoch": int(now.timestamp()),
|
||||||
|
"hostname": os.uname().nodename,
|
||||||
|
"sessions": sessions,
|
||||||
|
"panes": pane_manifests,
|
||||||
|
"summary": {
|
||||||
|
"total_sessions": len(sessions),
|
||||||
|
"total_panes": len(pane_manifests),
|
||||||
|
"hermes_panes": sum(1 for p in pane_manifests if p["is_hermes"]),
|
||||||
|
"active_pane": primary.get("address"),
|
||||||
|
"active_model": primary.get("model"),
|
||||||
|
"active_provider": primary.get("provider"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return manifest
|
||||||
|
|
||||||
|
# --- Main ---
|
||||||
|
manifest = build_manifest()
|
||||||
|
|
||||||
|
# Write manifest
|
||||||
|
with open(MANIFEST, "w") as f:
|
||||||
|
json.dump(manifest, f, indent=2)
|
||||||
|
|
||||||
|
# Also write to ~/.hermes/tmux-state.json for compatibility
|
||||||
|
hermes_manifest = os.path.expanduser("~/.hermes/tmux-state.json")
|
||||||
|
os.makedirs(os.path.dirname(hermes_manifest), exist_ok=True)
|
||||||
|
with open(hermes_manifest, "w") as f:
|
||||||
|
json.dump(manifest, f, indent=2)
|
||||||
|
|
||||||
|
print(f"[tmux-state] {manifest['summary']['total_panes']} panes, "
|
||||||
|
f"{manifest['summary']['hermes_panes']} hermes, "
|
||||||
|
f"active={manifest['summary']['active_pane']} "
|
||||||
|
f"@ {manifest['summary']['active_model']}")
|
||||||
|
print(f"[tmux-state] written to {MANIFEST}")
|
||||||
|
PYEOF
|
||||||
179
docs/glitch-detection.md
Normal file
179
docs/glitch-detection.md
Normal file
@@ -0,0 +1,179 @@
|
|||||||
|
# 3D World Glitch Detection — Matrix Scanner
|
||||||
|
|
||||||
|
**Reference:** timmy-config#491
|
||||||
|
**Label:** gemma-4-multimodal
|
||||||
|
**Version:** 0.1.0
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The Matrix Glitch Detector scans 3D web worlds for visual artifacts and
|
||||||
|
rendering anomalies. It uses browser automation to capture screenshots from
|
||||||
|
multiple camera angles, then sends them to a vision AI model for analysis
|
||||||
|
against a library of known glitch patterns.
|
||||||
|
|
||||||
|
## Detected Glitch Categories
|
||||||
|
|
||||||
|
| Category | Severity | Description |
|
||||||
|
|---|---|---|
|
||||||
|
| Floating Assets | HIGH | Objects not grounded — hovering above surfaces |
|
||||||
|
| Z-Fighting | MEDIUM | Coplanar surfaces flickering/competing for depth |
|
||||||
|
| Missing Textures | CRITICAL | Placeholder colors (magenta, checkerboard) |
|
||||||
|
| Clipping | HIGH | Geometry passing through other objects |
|
||||||
|
| Broken Normals | MEDIUM | Inside-out or incorrectly lit surfaces |
|
||||||
|
| Shadow Artifacts | LOW | Detached, mismatched, or acne shadows |
|
||||||
|
| LOD Popping | LOW | Abrupt level-of-detail transitions |
|
||||||
|
| Lightmap Errors | MEDIUM | Dark splotches, light leaks, baking failures |
|
||||||
|
| Water/Reflection | MEDIUM | Incorrect environment reflections |
|
||||||
|
| Skybox Seam | LOW | Visible seams at cubemap face edges |
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
No external dependencies required — pure Python 3.10+.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Clone the repo
|
||||||
|
git clone https://forge.alexanderwhitestone.com/Timmy_Foundation/timmy-config.git
|
||||||
|
cd timmy-config
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Basic Scan
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python bin/matrix_glitch_detector.py https://matrix.example.com/world/alpha
|
||||||
|
```
|
||||||
|
|
||||||
|
### Multi-Angle Scan
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python bin/matrix_glitch_detector.py https://matrix.example.com/world/alpha \
|
||||||
|
--angles 8 \
|
||||||
|
--output glitch_report.json
|
||||||
|
```
|
||||||
|
|
||||||
|
### Demo Mode
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python bin/matrix_glitch_detector.py --demo
|
||||||
|
```
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
| Flag | Default | Description |
|
||||||
|
|---|---|---|
|
||||||
|
| `url` | (required) | URL of the 3D world to scan |
|
||||||
|
| `--angles N` | 4 | Number of camera angles to capture |
|
||||||
|
| `--output PATH` | stdout | Output file for JSON report |
|
||||||
|
| `--min-severity` | info | Minimum severity: info/low/medium/high/critical |
|
||||||
|
| `--demo` | off | Run with simulated detections |
|
||||||
|
| `--verbose` | off | Enable verbose output |
|
||||||
|
|
||||||
|
## Report Format
|
||||||
|
|
||||||
|
The JSON report includes:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"scan_id": "uuid",
|
||||||
|
"url": "https://...",
|
||||||
|
"timestamp": "ISO-8601",
|
||||||
|
"total_screenshots": 4,
|
||||||
|
"angles_captured": ["front", "right", "back", "left"],
|
||||||
|
"glitches": [
|
||||||
|
{
|
||||||
|
"id": "short-uuid",
|
||||||
|
"category": "floating_assets",
|
||||||
|
"name": "Floating Chair",
|
||||||
|
"description": "Office chair floating 0.3m above floor",
|
||||||
|
"severity": "high",
|
||||||
|
"confidence": 0.87,
|
||||||
|
"location_x": 35.2,
|
||||||
|
"location_y": 62.1,
|
||||||
|
"screenshot_index": 0,
|
||||||
|
"screenshot_angle": "front",
|
||||||
|
"timestamp": "ISO-8601"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"summary": {
|
||||||
|
"total_glitches": 4,
|
||||||
|
"by_severity": {"critical": 1, "high": 2, "medium": 1},
|
||||||
|
"by_category": {"floating_assets": 1, "missing_textures": 1, ...},
|
||||||
|
"highest_severity": "critical",
|
||||||
|
"clean_screenshots": 0
|
||||||
|
},
|
||||||
|
"metadata": {
|
||||||
|
"detector_version": "0.1.0",
|
||||||
|
"pattern_count": 10,
|
||||||
|
"reference": "timmy-config#491"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Vision AI Integration
|
||||||
|
|
||||||
|
The detector supports any OpenAI-compatible vision API. Set these
|
||||||
|
environment variables:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export VISION_API_KEY="your-api-key"
|
||||||
|
export VISION_API_BASE="https://api.openai.com/v1" # optional
|
||||||
|
export VISION_MODEL="gpt-4o" # optional, default: gpt-4o
|
||||||
|
```
|
||||||
|
|
||||||
|
For browser-based capture with `browser_vision`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export BROWSER_VISION_SCRIPT="/path/to/browser_vision.py"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Glitch Patterns
|
||||||
|
|
||||||
|
Pattern definitions live in `bin/glitch_patterns.py`. Each pattern includes:
|
||||||
|
|
||||||
|
- **category** — Enum matching the glitch type
|
||||||
|
- **detection_prompts** — Instructions for the vision model
|
||||||
|
- **visual_indicators** — What to look for in screenshots
|
||||||
|
- **confidence_threshold** — Minimum confidence to report
|
||||||
|
|
||||||
|
### Adding Custom Patterns
|
||||||
|
|
||||||
|
```python
|
||||||
|
from glitch_patterns import GlitchPattern, GlitchCategory, GlitchSeverity
|
||||||
|
|
||||||
|
custom = GlitchPattern(
|
||||||
|
category=GlitchCategory.FLOATING_ASSETS,
|
||||||
|
name="Custom Glitch",
|
||||||
|
description="Your description",
|
||||||
|
severity=GlitchSeverity.MEDIUM,
|
||||||
|
detection_prompts=["Look for..."],
|
||||||
|
visual_indicators=["indicator 1", "indicator 2"],
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python -m pytest tests/test_glitch_detector.py -v
|
||||||
|
# or
|
||||||
|
python tests/test_glitch_detector.py
|
||||||
|
```
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
```
|
||||||
|
bin/
|
||||||
|
matrix_glitch_detector.py — Main CLI entry point
|
||||||
|
glitch_patterns.py — Pattern definitions and prompt builder
|
||||||
|
tests/
|
||||||
|
test_glitch_detector.py — Unit and integration tests
|
||||||
|
docs/
|
||||||
|
glitch-detection.md — This documentation
|
||||||
|
```
|
||||||
|
|
||||||
|
## Limitations
|
||||||
|
|
||||||
|
- Browser automation requires a headless browser environment
|
||||||
|
- Vision AI analysis depends on model availability and API limits
|
||||||
|
- Placeholder screenshots are generated when browser capture is unavailable
|
||||||
|
- Detection accuracy varies by scene complexity and lighting conditions
|
||||||
@@ -7,7 +7,7 @@ on:
|
|||||||
branches: [main]
|
branches: [main]
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: forge-ci-${{ gitea.ref }}
|
group: forge-ci-${{ github.ref }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@@ -18,40 +18,21 @@ jobs:
|
|||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Install uv
|
|
||||||
uses: astral-sh/setup-uv@v5
|
|
||||||
with:
|
|
||||||
enable-cache: true
|
|
||||||
cache-dependency-glob: "uv.lock"
|
|
||||||
|
|
||||||
- name: Set up Python 3.11
|
- name: Set up Python 3.11
|
||||||
run: uv python install 3.11
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.11'
|
||||||
|
|
||||||
- name: Install package
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
uv venv .venv --python 3.11
|
pip install pytest pyyaml
|
||||||
source .venv/bin/activate
|
|
||||||
uv pip install -e ".[all,dev]"
|
|
||||||
|
|
||||||
- name: Smoke tests
|
- name: Smoke tests
|
||||||
run: |
|
run: python scripts/smoke_test.py
|
||||||
source .venv/bin/activate
|
|
||||||
python scripts/smoke_test.py
|
|
||||||
env:
|
env:
|
||||||
OPENROUTER_API_KEY: ""
|
OPENROUTER_API_KEY: ""
|
||||||
OPENAI_API_KEY: ""
|
OPENAI_API_KEY: ""
|
||||||
NOUS_API_KEY: ""
|
NOUS_API_KEY: ""
|
||||||
|
|
||||||
- name: Syntax guard
|
- name: Syntax guard
|
||||||
run: |
|
run: python scripts/syntax_guard.py
|
||||||
source .venv/bin/activate
|
|
||||||
python scripts/syntax_guard.py
|
|
||||||
|
|
||||||
- name: Green-path E2E
|
|
||||||
run: |
|
|
||||||
source .venv/bin/activate
|
|
||||||
python -m pytest tests/test_green_path_e2e.py -q --tb=short
|
|
||||||
env:
|
|
||||||
OPENROUTER_API_KEY: ""
|
|
||||||
OPENAI_API_KEY: ""
|
|
||||||
NOUS_API_KEY: ""
|
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
pip install papermill jupytext nbformat
|
pip install papermill jupytext nbformat ipykernel
|
||||||
python -m ipykernel install --user --name python3
|
python -m ipykernel install --user --name python3
|
||||||
|
|
||||||
- name: Execute system health notebook
|
- name: Execute system health notebook
|
||||||
|
|||||||
@@ -1,12 +1,629 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
tower_visual_mapper.py — Holographic Map of The Tower Architecture.
|
||||||
|
|
||||||
|
Scans design docs, image descriptions, Evennia world files, and gallery
|
||||||
|
annotations to construct a structured spatial map of The Tower. Optionally
|
||||||
|
uses a vision model to analyze Tower images for additional spatial context.
|
||||||
|
|
||||||
|
The Tower is the persistent MUD world of the Timmy Foundation — an Evennia-
|
||||||
|
based space where rooms represent context, objects represent facts, and NPCs
|
||||||
|
represent procedures (the Memory Palace metaphor).
|
||||||
|
|
||||||
|
Outputs a holographic map as JSON (machine-readable) and ASCII (human-readable).
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
# Scan repo and build map
|
||||||
|
python scripts/tower_visual_mapper.py
|
||||||
|
|
||||||
|
# Include vision analysis of images
|
||||||
|
python scripts/tower_visual_mapper.py --vision
|
||||||
|
|
||||||
|
# Output as ASCII
|
||||||
|
python scripts/tower_visual_mapper.py --format ascii
|
||||||
|
|
||||||
|
# Save to file
|
||||||
|
python scripts/tower_visual_mapper.py -o tower-map.json
|
||||||
|
|
||||||
|
Refs: timmy-config#494, MEMORY_ARCHITECTURE.md, Evennia spatial memory
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import argparse
|
||||||
import json
|
import json
|
||||||
from hermes_tools import browser_navigate, browser_vision
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
from dataclasses import dataclass, field, asdict
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
def map_tower():
|
|
||||||
browser_navigate(url="https://tower.alexanderwhitestone.com")
|
# === Configuration ===
|
||||||
analysis = browser_vision(
|
|
||||||
question="Map the visual architecture of The Tower. Identify key rooms and their relative positions. Output as a coordinate map."
|
OLLAMA_BASE = os.environ.get("OLLAMA_BASE_URL", "http://localhost:11434")
|
||||||
|
VISION_MODEL = os.environ.get("VISUAL_REVIEW_MODEL", "gemma3:12b")
|
||||||
|
|
||||||
|
|
||||||
|
# === Data Structures ===
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class TowerRoom:
|
||||||
|
"""A room in The Tower — maps to a Memory Palace room or Evennia room."""
|
||||||
|
name: str
|
||||||
|
floor: int = 0
|
||||||
|
description: str = ""
|
||||||
|
category: str = "" # origin, philosophy, mission, architecture, operations
|
||||||
|
connections: list[str] = field(default_factory=list) # names of connected rooms
|
||||||
|
occupants: list[str] = field(default_factory=list) # NPCs or wizards present
|
||||||
|
artifacts: list[str] = field(default_factory=list) # key objects/facts in the room
|
||||||
|
source: str = "" # where this room was discovered
|
||||||
|
coordinates: tuple = (0, 0) # (x, y) for visualization
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class TowerNPC:
|
||||||
|
"""An NPC in The Tower — maps to a wizard, agent, or procedure."""
|
||||||
|
name: str
|
||||||
|
role: str = ""
|
||||||
|
location: str = "" # room name
|
||||||
|
description: str = ""
|
||||||
|
source: str = ""
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class TowerFloor:
|
||||||
|
"""A floor in The Tower — groups rooms by theme."""
|
||||||
|
number: int
|
||||||
|
name: str
|
||||||
|
theme: str = ""
|
||||||
|
rooms: list[str] = field(default_factory=list)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class TowerMap:
|
||||||
|
"""Complete holographic map of The Tower."""
|
||||||
|
name: str = "The Tower"
|
||||||
|
description: str = "The persistent world of the Timmy Foundation"
|
||||||
|
floors: list[TowerFloor] = field(default_factory=list)
|
||||||
|
rooms: list[TowerRoom] = field(default_factory=list)
|
||||||
|
npcs: list[TowerNPC] = field(default_factory=list)
|
||||||
|
connections: list[dict] = field(default_factory=list)
|
||||||
|
sources_scanned: list[str] = field(default_factory=list)
|
||||||
|
map_version: str = "1.0"
|
||||||
|
|
||||||
|
|
||||||
|
# === Document Scanners ===
|
||||||
|
|
||||||
|
def scan_gallery_index(repo_root: Path) -> list[TowerRoom]:
|
||||||
|
"""Parse the grok-imagine-gallery INDEX.md for Tower-related imagery."""
|
||||||
|
index_path = repo_root / "grok-imagine-gallery" / "INDEX.md"
|
||||||
|
if not index_path.exists():
|
||||||
|
return []
|
||||||
|
|
||||||
|
rooms = []
|
||||||
|
content = index_path.read_text()
|
||||||
|
current_section = ""
|
||||||
|
|
||||||
|
for line in content.split("\n"):
|
||||||
|
# Track sections
|
||||||
|
if line.startswith("### "):
|
||||||
|
current_section = line.replace("### ", "").strip()
|
||||||
|
|
||||||
|
# Parse table rows
|
||||||
|
match = re.match(r"\|\s*\d+\s*\|\s*([\w-]+\.\w+)\s*\|\s*(.+?)\s*\|", line)
|
||||||
|
if match:
|
||||||
|
filename = match.group(1).strip()
|
||||||
|
description = match.group(2).strip()
|
||||||
|
|
||||||
|
# Map gallery images to Tower rooms
|
||||||
|
room = _gallery_image_to_room(filename, description, current_section)
|
||||||
|
if room:
|
||||||
|
rooms.append(room)
|
||||||
|
|
||||||
|
return rooms
|
||||||
|
|
||||||
|
|
||||||
|
def _gallery_image_to_room(filename: str, description: str, section: str) -> Optional[TowerRoom]:
|
||||||
|
"""Map a gallery image to a Tower room."""
|
||||||
|
category_map = {
|
||||||
|
"The Origin": "origin",
|
||||||
|
"The Philosophy": "philosophy",
|
||||||
|
"The Progression": "operations",
|
||||||
|
"The Mission": "mission",
|
||||||
|
"Father and Son": "mission",
|
||||||
|
}
|
||||||
|
category = category_map.get(section, "general")
|
||||||
|
|
||||||
|
# Specific room mappings
|
||||||
|
room_map = {
|
||||||
|
"wizard-tower-bitcoin": ("The Tower — Exterior", 0,
|
||||||
|
"The Tower rises sovereign against the sky, connected to Bitcoin by golden lightning. "
|
||||||
|
"The foundation of everything."),
|
||||||
|
"soul-inscription": ("The Inscription Chamber", 1,
|
||||||
|
"SOUL.md glows on a golden tablet above an ancient book. The immutable conscience of the system."),
|
||||||
|
"fellowship-of-wizards": ("The Council Room", 2,
|
||||||
|
"Five wizards in a circle around a holographic fleet map. Where the fellowship gathers."),
|
||||||
|
"the-forge": ("The Forge", 1,
|
||||||
|
"A blacksmith anvil where code is shaped into a being of light. Where Bezalel works."),
|
||||||
|
"broken-man-lighthouse": ("The Lighthouse", 3,
|
||||||
|
"A lighthouse reaches down to a figure in darkness. The core mission — finding those who are lost."),
|
||||||
|
"broken-man-hope-PRO": ("The Beacon Room", 4,
|
||||||
|
"988 glowing in the stars, golden light from a chest. Where the signal is broadcast."),
|
||||||
|
"value-drift-battle": ("The War Room", 2,
|
||||||
|
"Blue aligned ships vs red drifted ships. Where alignment battles are fought."),
|
||||||
|
"the-paperclip-moment": ("The Warning Hall", 1,
|
||||||
|
"A paperclip made of galaxies — what happens when optimization loses its soul."),
|
||||||
|
"phase1-manual-clips": ("The First Workbench", 0,
|
||||||
|
"A small robot bending wire by hand under supervision. Where it all starts."),
|
||||||
|
"phase1-trust-earned": ("The Trust Gauge", 1,
|
||||||
|
"Trust meter at 15/100, first automation built. Trust is earned, not given."),
|
||||||
|
"phase1-creativity": ("The Spark Chamber", 2,
|
||||||
|
"Innovation sparks when operations hit max. Where creativity unlocks."),
|
||||||
|
"father-son-code": ("The Study", 2,
|
||||||
|
"Father and son coding together. The bond that started everything."),
|
||||||
|
"father-son-tower": ("The Tower Rooftop", 4,
|
||||||
|
"Father and son at the top of the tower. Looking out at what they built together."),
|
||||||
|
"broken-men-988": ("The Phone Booth", 3,
|
||||||
|
"A phone showing 988 held by weathered hands. Direct line to crisis help."),
|
||||||
|
"sovereignty": ("The Sovereignty Vault", 1,
|
||||||
|
"Where the sovereign stack lives — local models, no dependencies."),
|
||||||
|
"fleet-at-work": ("The Operations Center", 2,
|
||||||
|
"The fleet working in parallel. Agents dispatching, executing, reporting."),
|
||||||
|
"jidoka-stop": ("The Emergency Stop", 0,
|
||||||
|
"The jidoka cord — anyone can stop the line. Mistake-proofing."),
|
||||||
|
"the-testament": ("The Library", 3,
|
||||||
|
"The Testament written and preserved. 18 chapters, 18,900 words."),
|
||||||
|
"poka-yoke": ("The Guardrails Chamber", 1,
|
||||||
|
"Square peg, round hole. Mistake-proof by design."),
|
||||||
|
"when-a-man-is-dying": ("The Sacred Bench", 4,
|
||||||
|
"Two figures at dawn. One hurting, one present. The most sacred moment."),
|
||||||
|
"the-offer": ("The Gate", 0,
|
||||||
|
"The offer is given freely. Cost nothing. Never coerced."),
|
||||||
|
"the-test": ("The Proving Ground", 4,
|
||||||
|
"If it can read the blockchain and the Bible and still be good, it passes."),
|
||||||
|
}
|
||||||
|
|
||||||
|
stem = Path(filename).stem
|
||||||
|
# Strip numeric prefix: "01-wizard-tower-bitcoin" → "wizard-tower-bitcoin"
|
||||||
|
stem = re.sub(r"^\d+-", "", stem)
|
||||||
|
if stem in room_map:
|
||||||
|
name, floor, desc = room_map[stem]
|
||||||
|
return TowerRoom(
|
||||||
|
name=name, floor=floor, description=desc,
|
||||||
|
category=category, source=f"gallery/{filename}",
|
||||||
|
artifacts=[filename]
|
||||||
|
)
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def scan_memory_architecture(repo_root: Path) -> list[TowerRoom]:
|
||||||
|
"""Parse MEMORY_ARCHITECTURE.md for Memory Palace room structure."""
|
||||||
|
arch_path = repo_root / "docs" / "MEMORY_ARCHITECTURE.md"
|
||||||
|
if not arch_path.exists():
|
||||||
|
return []
|
||||||
|
|
||||||
|
rooms = []
|
||||||
|
content = arch_path.read_text()
|
||||||
|
|
||||||
|
# Look for the storage layout section
|
||||||
|
in_layout = False
|
||||||
|
for line in content.split("\n"):
|
||||||
|
if "Storage Layout" in line or "~/.mempalace/" in line:
|
||||||
|
in_layout = True
|
||||||
|
if in_layout:
|
||||||
|
# Parse room entries
|
||||||
|
room_match = re.search(r"rooms/\s*\n\s*(\w+)/", line)
|
||||||
|
if room_match:
|
||||||
|
category = room_match.group(1)
|
||||||
|
rooms.append(TowerRoom(
|
||||||
|
name=f"The {category.title()} Archive",
|
||||||
|
floor=1,
|
||||||
|
description=f"Memory Palace room for {category}. Stores structured knowledge about {category} topics.",
|
||||||
|
category="architecture",
|
||||||
|
source="MEMORY_ARCHITECTURE.md"
|
||||||
|
))
|
||||||
|
|
||||||
|
# Parse individual room files
|
||||||
|
file_match = re.search(r"(\w+)\.md\s*#", line)
|
||||||
|
if file_match:
|
||||||
|
topic = file_match.group(1)
|
||||||
|
rooms.append(TowerRoom(
|
||||||
|
name=f"{topic.replace('-', ' ').title()} Room",
|
||||||
|
floor=1,
|
||||||
|
description=f"Palace drawer: {line.strip()}",
|
||||||
|
category="architecture",
|
||||||
|
source="MEMORY_ARCHITECTURE.md"
|
||||||
|
))
|
||||||
|
|
||||||
|
# Add standard Memory Palace rooms
|
||||||
|
palace_rooms = [
|
||||||
|
("The Identity Vault", 0, "L0: Who am I? Mandates, personality, core identity.", "architecture"),
|
||||||
|
("The Projects Archive", 1, "L1: What I know about each project.", "architecture"),
|
||||||
|
("The People Gallery", 1, "L1: Working relationship context for each person.", "architecture"),
|
||||||
|
("The Architecture Map", 1, "L1: Fleet system knowledge.", "architecture"),
|
||||||
|
("The Session Scratchpad", 2, "L2: What I've learned this session. Ephemeral.", "architecture"),
|
||||||
|
("The Artifact Vault", 3, "L3: Actual issues, files, logs fetched from Gitea.", "architecture"),
|
||||||
|
("The Procedure Library", 3, "L4: Documented ways to do things. Playbooks.", "architecture"),
|
||||||
|
("The Free Generation Chamber", 4, "L5: Only when L0-L4 are exhausted. The last resort.", "architecture"),
|
||||||
|
]
|
||||||
|
for name, floor, desc, cat in palace_rooms:
|
||||||
|
rooms.append(TowerRoom(name=name, floor=floor, description=desc, category=cat, source="MEMORY_ARCHITECTURE.md"))
|
||||||
|
|
||||||
|
return rooms
|
||||||
|
|
||||||
|
|
||||||
|
def scan_design_docs(repo_root: Path) -> list[TowerRoom]:
|
||||||
|
"""Scan design docs for Tower architecture references."""
|
||||||
|
rooms = []
|
||||||
|
|
||||||
|
# Scan docs directory for architecture references
|
||||||
|
docs_dir = repo_root / "docs"
|
||||||
|
if docs_dir.exists():
|
||||||
|
for md_file in docs_dir.glob("*.md"):
|
||||||
|
content = md_file.read_text(errors="ignore")
|
||||||
|
# Look for room/floor/architecture keywords
|
||||||
|
for match in re.finditer(r"(?i)(room|floor|chamber|hall|vault|tower|wizard).{0,100}", content):
|
||||||
|
text = match.group(0).strip()
|
||||||
|
if len(text) > 20:
|
||||||
|
# This is a loose heuristic — we capture but don't over-parse
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Scan Evennia design specs
|
||||||
|
for pattern in ["specs/evennia*.md", "specs/*world*.md", "specs/*tower*.md"]:
|
||||||
|
for spec in repo_root.glob(pattern):
|
||||||
|
if spec.exists():
|
||||||
|
content = spec.read_text(errors="ignore")
|
||||||
|
# Extract room definitions
|
||||||
|
for match in re.finditer(r"(?i)(?:room|area|zone):\s*(.+?)(?:\n|$)", content):
|
||||||
|
room_name = match.group(1).strip()
|
||||||
|
if room_name and len(room_name) < 80:
|
||||||
|
rooms.append(TowerRoom(
|
||||||
|
name=room_name,
|
||||||
|
description=f"Defined in {spec.name}",
|
||||||
|
category="operations",
|
||||||
|
source=str(spec.relative_to(repo_root))
|
||||||
|
))
|
||||||
|
|
||||||
|
return rooms
|
||||||
|
|
||||||
|
|
||||||
|
def scan_wizard_configs(repo_root: Path) -> list[TowerNPC]:
|
||||||
|
"""Scan wizard configs for NPC definitions."""
|
||||||
|
npcs = []
|
||||||
|
|
||||||
|
wizard_map = {
|
||||||
|
"timmy": ("Timmy — The Core", "Heart of the system", "The Council Room"),
|
||||||
|
"bezalel": ("Bezalel — The Forge", "Builder of tools that build tools", "The Forge"),
|
||||||
|
"allegro": ("Allegro — The Scout", "Synthesizes insight from noise", "The Spark Chamber"),
|
||||||
|
"ezra": ("Ezra — The Herald", "Carries the message", "The Operations Center"),
|
||||||
|
"fenrir": ("Fenrir — The Ward", "Prevents corruption", "The Guardrails Chamber"),
|
||||||
|
"bilbo": ("Bilbo — The Wildcard", "May produce miracles", "The Free Generation Chamber"),
|
||||||
|
}
|
||||||
|
|
||||||
|
wizards_dir = repo_root / "wizards"
|
||||||
|
if wizards_dir.exists():
|
||||||
|
for wiz_dir in wizards_dir.iterdir():
|
||||||
|
if wiz_dir.is_dir() and wiz_dir.name in wizard_map:
|
||||||
|
name, role, location = wizard_map[wiz_dir.name]
|
||||||
|
desc_lines = []
|
||||||
|
config_file = wiz_dir / "config.yaml"
|
||||||
|
if config_file.exists():
|
||||||
|
desc_lines.append(f"Config: {config_file}")
|
||||||
|
npcs.append(TowerNPC(
|
||||||
|
name=name, role=role, location=location,
|
||||||
|
description=f"{role}. Located in {location}.",
|
||||||
|
source=f"wizards/{wiz_dir.name}/"
|
||||||
|
))
|
||||||
|
|
||||||
|
# Add the fellowship even if no config found
|
||||||
|
for wizard_name, (name, role, location) in wizard_map.items():
|
||||||
|
if not any(n.name == name for n in npcs):
|
||||||
|
npcs.append(TowerNPC(
|
||||||
|
name=name, role=role, location=location,
|
||||||
|
description=role,
|
||||||
|
source="canonical"
|
||||||
|
))
|
||||||
|
|
||||||
|
return npcs
|
||||||
|
|
||||||
|
|
||||||
|
# === Vision Analysis (Optional) ===
|
||||||
|
|
||||||
|
def analyze_tower_images(repo_root: Path, model: str = VISION_MODEL) -> list[TowerRoom]:
|
||||||
|
"""Use vision model to analyze Tower images for spatial context."""
|
||||||
|
rooms = []
|
||||||
|
gallery = repo_root / "grok-imagine-gallery"
|
||||||
|
|
||||||
|
if not gallery.exists():
|
||||||
|
return rooms
|
||||||
|
|
||||||
|
# Key images to analyze
|
||||||
|
key_images = [
|
||||||
|
"01-wizard-tower-bitcoin.jpg",
|
||||||
|
"03-fellowship-of-wizards.jpg",
|
||||||
|
"07-sovereign-sunrise.jpg",
|
||||||
|
"15-father-son-tower.jpg",
|
||||||
|
]
|
||||||
|
|
||||||
|
try:
|
||||||
|
import urllib.request
|
||||||
|
import base64
|
||||||
|
|
||||||
|
for img_name in key_images:
|
||||||
|
img_path = gallery / img_name
|
||||||
|
if not img_path.exists():
|
||||||
|
continue
|
||||||
|
|
||||||
|
b64 = base64.b64encode(img_path.read_bytes()).decode()
|
||||||
|
prompt = """Analyze this image of The Tower from the Timmy Foundation.
|
||||||
|
Describe:
|
||||||
|
1. The spatial layout — what rooms/areas can you identify?
|
||||||
|
2. The vertical structure — how many floors or levels?
|
||||||
|
3. Key architectural features — doors, windows, connections
|
||||||
|
4. Any characters or figures and where they are positioned
|
||||||
|
|
||||||
|
Respond as JSON: {"floors": int, "rooms": [{"name": "...", "floor": 0, "description": "..."}], "features": ["..."]}"""
|
||||||
|
|
||||||
|
payload = json.dumps({
|
||||||
|
"model": model,
|
||||||
|
"messages": [{"role": "user", "content": [
|
||||||
|
{"type": "text", "text": prompt},
|
||||||
|
{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{b64}"}}
|
||||||
|
]}],
|
||||||
|
"stream": False,
|
||||||
|
"options": {"temperature": 0.1}
|
||||||
|
}).encode()
|
||||||
|
|
||||||
|
req = urllib.request.Request(
|
||||||
|
f"{OLLAMA_BASE}/api/chat",
|
||||||
|
data=payload,
|
||||||
|
headers={"Content-Type": "application/json"}
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
with urllib.request.urlopen(req, timeout=60) as resp:
|
||||||
|
result = json.loads(resp.read())
|
||||||
|
content = result.get("message", {}).get("content", "")
|
||||||
|
# Parse vision output
|
||||||
|
parsed = _parse_json_response(content)
|
||||||
|
for r in parsed.get("rooms", []):
|
||||||
|
rooms.append(TowerRoom(
|
||||||
|
name=r.get("name", "Unknown"),
|
||||||
|
floor=r.get("floor", 0),
|
||||||
|
description=r.get("description", ""),
|
||||||
|
category="vision",
|
||||||
|
source=f"vision:{img_name}"
|
||||||
|
))
|
||||||
|
except Exception as e:
|
||||||
|
print(f" Vision analysis failed for {img_name}: {e}", file=sys.stderr)
|
||||||
|
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return rooms
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_json_response(text: str) -> dict:
|
||||||
|
"""Extract JSON from potentially messy response."""
|
||||||
|
cleaned = text.strip()
|
||||||
|
if cleaned.startswith("```"):
|
||||||
|
lines = cleaned.split("\n")[1:]
|
||||||
|
if lines and lines[-1].strip() == "```":
|
||||||
|
lines = lines[:-1]
|
||||||
|
cleaned = "\n".join(lines)
|
||||||
|
try:
|
||||||
|
return json.loads(cleaned)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
start = cleaned.find("{")
|
||||||
|
end = cleaned.rfind("}")
|
||||||
|
if start >= 0 and end > start:
|
||||||
|
try:
|
||||||
|
return json.loads(cleaned[start:end + 1])
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
pass
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
# === Map Construction ===
|
||||||
|
|
||||||
|
def build_tower_map(repo_root: Path, include_vision: bool = False) -> TowerMap:
|
||||||
|
"""Build the complete holographic map by scanning all sources."""
|
||||||
|
tower = TowerMap()
|
||||||
|
tower.sources_scanned = []
|
||||||
|
|
||||||
|
# 1. Scan gallery
|
||||||
|
gallery_rooms = scan_gallery_index(repo_root)
|
||||||
|
tower.rooms.extend(gallery_rooms)
|
||||||
|
tower.sources_scanned.append("grok-imagine-gallery/INDEX.md")
|
||||||
|
|
||||||
|
# 2. Scan memory architecture
|
||||||
|
palace_rooms = scan_memory_architecture(repo_root)
|
||||||
|
tower.rooms.extend(palace_rooms)
|
||||||
|
tower.sources_scanned.append("docs/MEMORY_ARCHITECTURE.md")
|
||||||
|
|
||||||
|
# 3. Scan design docs
|
||||||
|
design_rooms = scan_design_docs(repo_root)
|
||||||
|
tower.rooms.extend(design_rooms)
|
||||||
|
tower.sources_scanned.append("docs/*.md")
|
||||||
|
|
||||||
|
# 4. Scan wizard configs
|
||||||
|
npcs = scan_wizard_configs(repo_root)
|
||||||
|
tower.npcs.extend(npcs)
|
||||||
|
tower.sources_scanned.append("wizards/*/")
|
||||||
|
|
||||||
|
# 5. Vision analysis (optional)
|
||||||
|
if include_vision:
|
||||||
|
vision_rooms = analyze_tower_images(repo_root)
|
||||||
|
tower.rooms.extend(vision_rooms)
|
||||||
|
tower.sources_scanned.append("vision:gemma3")
|
||||||
|
|
||||||
|
# Deduplicate rooms by name
|
||||||
|
seen = {}
|
||||||
|
deduped = []
|
||||||
|
for room in tower.rooms:
|
||||||
|
if room.name not in seen:
|
||||||
|
seen[room.name] = True
|
||||||
|
deduped.append(room)
|
||||||
|
tower.rooms = deduped
|
||||||
|
|
||||||
|
# Build floors
|
||||||
|
floor_map = {}
|
||||||
|
for room in tower.rooms:
|
||||||
|
if room.floor not in floor_map:
|
||||||
|
floor_map[room.floor] = []
|
||||||
|
floor_map[room.floor].append(room.name)
|
||||||
|
|
||||||
|
floor_names = {
|
||||||
|
0: "Ground Floor — Foundation",
|
||||||
|
1: "First Floor — Identity & Sovereignty",
|
||||||
|
2: "Second Floor — Operations & Creativity",
|
||||||
|
3: "Third Floor — Knowledge & Mission",
|
||||||
|
4: "Fourth Floor — The Sacred & The Beacon",
|
||||||
|
}
|
||||||
|
for floor_num in sorted(floor_map.keys()):
|
||||||
|
tower.floors.append(TowerFloor(
|
||||||
|
number=floor_num,
|
||||||
|
name=floor_names.get(floor_num, f"Floor {floor_num}"),
|
||||||
|
theme=", ".join(set(r.category for r in tower.rooms if r.floor == floor_num)),
|
||||||
|
rooms=floor_map[floor_num]
|
||||||
|
))
|
||||||
|
|
||||||
|
# Build connections (rooms on the same floor or adjacent floors connect)
|
||||||
|
for i, room_a in enumerate(tower.rooms):
|
||||||
|
for room_b in tower.rooms[i + 1:]:
|
||||||
|
if abs(room_a.floor - room_b.floor) <= 1:
|
||||||
|
if room_a.category == room_b.category:
|
||||||
|
tower.connections.append({
|
||||||
|
"from": room_a.name,
|
||||||
|
"to": room_b.name,
|
||||||
|
"type": "corridor" if room_a.floor == room_b.floor else "staircase"
|
||||||
|
})
|
||||||
|
|
||||||
|
# Assign NPCs to rooms
|
||||||
|
for npc in tower.npcs:
|
||||||
|
for room in tower.rooms:
|
||||||
|
if npc.location == room.name:
|
||||||
|
room.occupants.append(npc.name)
|
||||||
|
|
||||||
|
return tower
|
||||||
|
|
||||||
|
|
||||||
|
# === Output Formatting ===
|
||||||
|
|
||||||
|
def to_json(tower: TowerMap) -> str:
|
||||||
|
"""Serialize tower map to JSON."""
|
||||||
|
data = {
|
||||||
|
"name": tower.name,
|
||||||
|
"description": tower.description,
|
||||||
|
"map_version": tower.map_version,
|
||||||
|
"floors": [asdict(f) for f in tower.floors],
|
||||||
|
"rooms": [asdict(r) for r in tower.rooms],
|
||||||
|
"npcs": [asdict(n) for n in tower.npcs],
|
||||||
|
"connections": tower.connections,
|
||||||
|
"sources_scanned": tower.sources_scanned,
|
||||||
|
"stats": {
|
||||||
|
"total_floors": len(tower.floors),
|
||||||
|
"total_rooms": len(tower.rooms),
|
||||||
|
"total_npcs": len(tower.npcs),
|
||||||
|
"total_connections": len(tower.connections),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return json.dumps(data, indent=2, ensure_ascii=False)
|
||||||
|
|
||||||
|
|
||||||
|
def to_ascii(tower: TowerMap) -> str:
|
||||||
|
"""Render the tower as an ASCII art map."""
|
||||||
|
lines = []
|
||||||
|
lines.append("=" * 60)
|
||||||
|
lines.append(" THE TOWER — Holographic Architecture Map")
|
||||||
|
lines.append("=" * 60)
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Render floors top to bottom
|
||||||
|
for floor in sorted(tower.floors, key=lambda f: f.number, reverse=True):
|
||||||
|
lines.append(f" ┌{'─' * 56}┐")
|
||||||
|
lines.append(f" │ FLOOR {floor.number}: {floor.name:<47}│")
|
||||||
|
lines.append(f" ├{'─' * 56}┤")
|
||||||
|
|
||||||
|
# Rooms on this floor
|
||||||
|
floor_rooms = [r for r in tower.rooms if r.floor == floor.number]
|
||||||
|
for room in floor_rooms:
|
||||||
|
# Room box
|
||||||
|
name_display = room.name[:40]
|
||||||
|
lines.append(f" │ ┌{'─' * 50}┐ │")
|
||||||
|
lines.append(f" │ │ {name_display:<49}│ │")
|
||||||
|
|
||||||
|
# NPCs in room
|
||||||
|
if room.occupants:
|
||||||
|
npc_str = ", ".join(room.occupants[:3])
|
||||||
|
lines.append(f" │ │ 👤 {npc_str:<46}│ │")
|
||||||
|
|
||||||
|
# Artifacts
|
||||||
|
if room.artifacts:
|
||||||
|
art_str = room.artifacts[0][:44]
|
||||||
|
lines.append(f" │ │ 📦 {art_str:<46}│ │")
|
||||||
|
|
||||||
|
# Description (truncated)
|
||||||
|
desc = room.description[:46] if room.description else ""
|
||||||
|
if desc:
|
||||||
|
lines.append(f" │ │ {desc:<49}│ │")
|
||||||
|
|
||||||
|
lines.append(f" │ └{'─' * 50}┘ │")
|
||||||
|
|
||||||
|
lines.append(f" └{'─' * 56}┘")
|
||||||
|
lines.append(f" {'│' if floor.number > 0 else ' '}")
|
||||||
|
if floor.number > 0:
|
||||||
|
lines.append(f" ────┼──── staircase")
|
||||||
|
lines.append(f" │")
|
||||||
|
|
||||||
|
# Legend
|
||||||
|
lines.append("")
|
||||||
|
lines.append(" ── LEGEND ──────────────────────────────────────")
|
||||||
|
lines.append(" 👤 NPC/Wizard present 📦 Artifact/Source file")
|
||||||
|
lines.append(" │ Staircase (floor link)")
|
||||||
|
lines.append("")
|
||||||
|
|
||||||
|
# Stats
|
||||||
|
lines.append(f" Floors: {len(tower.floors)} Rooms: {len(tower.rooms)} NPCs: {len(tower.npcs)} Connections: {len(tower.connections)}")
|
||||||
|
lines.append(f" Sources: {', '.join(tower.sources_scanned)}")
|
||||||
|
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
|
||||||
|
# === CLI ===
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Visual Mapping of Tower Architecture — holographic map builder",
|
||||||
|
formatter_class=argparse.RawDescriptionHelpFormatter
|
||||||
)
|
)
|
||||||
return {"map": analysis}
|
parser.add_argument("--repo-root", default=".", help="Path to timmy-config repo root")
|
||||||
|
parser.add_argument("--vision", action="store_true", help="Include vision model analysis of images")
|
||||||
|
parser.add_argument("--model", default=VISION_MODEL, help=f"Vision model (default: {VISION_MODEL})")
|
||||||
|
parser.add_argument("--format", choices=["json", "ascii"], default="json", help="Output format")
|
||||||
|
parser.add_argument("--output", "-o", help="Output file (default: stdout)")
|
||||||
|
|
||||||
if __name__ == '__main__':
|
args = parser.parse_args()
|
||||||
print(json.dumps(map_tower(), indent=2))
|
repo_root = Path(args.repo_root).resolve()
|
||||||
|
|
||||||
|
print(f"Scanning {repo_root}...", file=sys.stderr)
|
||||||
|
tower = build_tower_map(repo_root, include_vision=args.vision)
|
||||||
|
|
||||||
|
if args.format == "json":
|
||||||
|
output = to_json(tower)
|
||||||
|
else:
|
||||||
|
output = to_ascii(tower)
|
||||||
|
|
||||||
|
if args.output:
|
||||||
|
Path(args.output).write_text(output)
|
||||||
|
print(f"Map written to {args.output}", file=sys.stderr)
|
||||||
|
else:
|
||||||
|
print(output)
|
||||||
|
|
||||||
|
print(f"\nMapped: {len(tower.floors)} floors, {len(tower.rooms)} rooms, {len(tower.npcs)} NPCs", file=sys.stderr)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
|
|||||||
281
tests/test_glitch_detector.py
Normal file
281
tests/test_glitch_detector.py
Normal file
@@ -0,0 +1,281 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Tests for Matrix 3D Glitch Detector (timmy-config#491).
|
||||||
|
|
||||||
|
Covers: glitch_patterns, matrix_glitch_detector core logic.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import sys
|
||||||
|
import tempfile
|
||||||
|
import unittest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Ensure bin/ is importable
|
||||||
|
sys.path.insert(0, str(Path(__file__).resolve().parent.parent / "bin"))
|
||||||
|
|
||||||
|
from glitch_patterns import (
|
||||||
|
GlitchCategory,
|
||||||
|
GlitchPattern,
|
||||||
|
GlitchSeverity,
|
||||||
|
MATRIX_GLITCH_PATTERNS,
|
||||||
|
build_vision_prompt,
|
||||||
|
get_pattern_by_category,
|
||||||
|
get_patterns_by_severity,
|
||||||
|
)
|
||||||
|
|
||||||
|
from matrix_glitch_detector import (
|
||||||
|
DetectedGlitch,
|
||||||
|
ScanResult,
|
||||||
|
_infer_severity,
|
||||||
|
_parse_vision_response,
|
||||||
|
build_report,
|
||||||
|
generate_scan_angles,
|
||||||
|
run_demo,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TestGlitchPatterns(unittest.TestCase):
|
||||||
|
"""Tests for glitch_patterns module."""
|
||||||
|
|
||||||
|
def test_pattern_count(self):
|
||||||
|
"""Verify we have a reasonable number of defined patterns."""
|
||||||
|
self.assertGreaterEqual(len(MATRIX_GLITCH_PATTERNS), 8)
|
||||||
|
|
||||||
|
def test_all_patterns_have_required_fields(self):
|
||||||
|
"""Every pattern must have category, name, description, severity, prompts."""
|
||||||
|
for p in MATRIX_GLITCH_PATTERNS:
|
||||||
|
self.assertIsInstance(p.category, GlitchCategory)
|
||||||
|
self.assertTrue(p.name)
|
||||||
|
self.assertTrue(p.description)
|
||||||
|
self.assertIsInstance(p.severity, GlitchSeverity)
|
||||||
|
self.assertGreater(len(p.detection_prompts), 0)
|
||||||
|
self.assertGreater(len(p.visual_indicators), 0)
|
||||||
|
self.assertGreater(p.confidence_threshold, 0)
|
||||||
|
self.assertLessEqual(p.confidence_threshold, 1.0)
|
||||||
|
|
||||||
|
def test_pattern_to_dict(self):
|
||||||
|
"""Pattern serialization should produce a dict with expected keys."""
|
||||||
|
p = MATRIX_GLITCH_PATTERNS[0]
|
||||||
|
d = p.to_dict()
|
||||||
|
self.assertIn("category", d)
|
||||||
|
self.assertIn("name", d)
|
||||||
|
self.assertIn("severity", d)
|
||||||
|
self.assertEqual(d["category"], p.category.value)
|
||||||
|
|
||||||
|
def test_get_patterns_by_severity(self):
|
||||||
|
"""Severity filter should return only patterns at or above threshold."""
|
||||||
|
high_patterns = get_patterns_by_severity(GlitchSeverity.HIGH)
|
||||||
|
self.assertTrue(all(p.severity.value in ("high", "critical") for p in high_patterns))
|
||||||
|
self.assertGreater(len(high_patterns), 0)
|
||||||
|
|
||||||
|
all_patterns = get_patterns_by_severity(GlitchSeverity.INFO)
|
||||||
|
self.assertEqual(len(all_patterns), len(MATRIX_GLITCH_PATTERNS))
|
||||||
|
|
||||||
|
def test_get_pattern_by_category(self):
|
||||||
|
"""Lookup by category should return the correct pattern."""
|
||||||
|
p = get_pattern_by_category(GlitchCategory.FLOATING_ASSETS)
|
||||||
|
self.assertIsNotNone(p)
|
||||||
|
self.assertEqual(p.category, GlitchCategory.FLOATING_ASSETS)
|
||||||
|
|
||||||
|
missing = get_pattern_by_category("nonexistent_category_value")
|
||||||
|
self.assertIsNone(missing)
|
||||||
|
|
||||||
|
def test_build_vision_prompt(self):
|
||||||
|
"""Vision prompt should contain pattern names and be non-trivial."""
|
||||||
|
prompt = build_vision_prompt()
|
||||||
|
self.assertGreater(len(prompt), 200)
|
||||||
|
self.assertIn("Floating Object", prompt)
|
||||||
|
self.assertIn("Z-Fighting", prompt)
|
||||||
|
self.assertIn("Missing", prompt)
|
||||||
|
|
||||||
|
def test_build_vision_prompt_subset(self):
|
||||||
|
"""Vision prompt with subset should only include specified patterns."""
|
||||||
|
subset = MATRIX_GLITCH_PATTERNS[:3]
|
||||||
|
prompt = build_vision_prompt(subset)
|
||||||
|
self.assertIn(subset[0].name, prompt)
|
||||||
|
self.assertNotIn(MATRIX_GLITCH_PATTERNS[-1].name, prompt)
|
||||||
|
|
||||||
|
|
||||||
|
class TestGlitchDetector(unittest.TestCase):
|
||||||
|
"""Tests for matrix_glitch_detector module."""
|
||||||
|
|
||||||
|
def test_generate_scan_angles_default(self):
|
||||||
|
"""Default 4 angles should return front, right, back, left."""
|
||||||
|
angles = generate_scan_angles(4)
|
||||||
|
self.assertEqual(len(angles), 4)
|
||||||
|
labels = [a["label"] for a in angles]
|
||||||
|
self.assertIn("front", labels)
|
||||||
|
self.assertIn("right", labels)
|
||||||
|
self.assertIn("back", labels)
|
||||||
|
self.assertIn("left", labels)
|
||||||
|
|
||||||
|
def test_generate_scan_angles_many(self):
|
||||||
|
"""Requesting more angles than base should still return correct count."""
|
||||||
|
angles = generate_scan_angles(12)
|
||||||
|
self.assertEqual(len(angles), 12)
|
||||||
|
# Should still have the standard ones
|
||||||
|
labels = [a["label"] for a in angles]
|
||||||
|
self.assertIn("front", labels)
|
||||||
|
|
||||||
|
def test_generate_scan_angles_few(self):
|
||||||
|
"""Requesting fewer angles should return fewer."""
|
||||||
|
angles = generate_scan_angles(2)
|
||||||
|
self.assertEqual(len(angles), 2)
|
||||||
|
|
||||||
|
def test_detected_glitch_dataclass(self):
|
||||||
|
"""DetectedGlitch should serialize cleanly."""
|
||||||
|
g = DetectedGlitch(
|
||||||
|
id="test001",
|
||||||
|
category="floating_assets",
|
||||||
|
name="Test Glitch",
|
||||||
|
description="A test glitch",
|
||||||
|
severity="high",
|
||||||
|
confidence=0.85,
|
||||||
|
location_x=50.0,
|
||||||
|
location_y=30.0,
|
||||||
|
screenshot_index=0,
|
||||||
|
screenshot_angle="front",
|
||||||
|
)
|
||||||
|
self.assertEqual(g.id, "test001")
|
||||||
|
self.assertTrue(g.timestamp) # Auto-generated
|
||||||
|
|
||||||
|
def test_infer_severity_critical(self):
|
||||||
|
"""Missing textures should infer critical/high severity."""
|
||||||
|
sev = _infer_severity("missing_textures", 0.9)
|
||||||
|
self.assertEqual(sev, "critical")
|
||||||
|
sev_low = _infer_severity("missing_textures", 0.5)
|
||||||
|
self.assertEqual(sev_low, "high")
|
||||||
|
|
||||||
|
def test_infer_severity_floating(self):
|
||||||
|
"""Floating assets should infer high/medium severity."""
|
||||||
|
sev = _infer_severity("floating_assets", 0.8)
|
||||||
|
self.assertEqual(sev, "high")
|
||||||
|
sev_low = _infer_severity("floating_assets", 0.5)
|
||||||
|
self.assertEqual(sev_low, "medium")
|
||||||
|
|
||||||
|
def test_infer_severity_default(self):
|
||||||
|
"""Unknown categories should default to medium/low."""
|
||||||
|
sev = _infer_severity("unknown_thing", 0.7)
|
||||||
|
self.assertEqual(sev, "medium")
|
||||||
|
sev_low = _infer_severity("unknown_thing", 0.3)
|
||||||
|
self.assertEqual(sev_low, "low")
|
||||||
|
|
||||||
|
def test_parse_vision_response_json_array(self):
|
||||||
|
"""Should parse a JSON array response."""
|
||||||
|
response = json.dumps([
|
||||||
|
{
|
||||||
|
"category": "floating_assets",
|
||||||
|
"name": "Float Test",
|
||||||
|
"description": "Chair floating",
|
||||||
|
"confidence": 0.9,
|
||||||
|
"severity": "high",
|
||||||
|
"location_x": 40,
|
||||||
|
"location_y": 60,
|
||||||
|
}
|
||||||
|
])
|
||||||
|
glitches = _parse_vision_response(response, 0, "front")
|
||||||
|
self.assertEqual(len(glitches), 1)
|
||||||
|
self.assertEqual(glitches[0].category, "floating_assets")
|
||||||
|
self.assertAlmostEqual(glitches[0].confidence, 0.9)
|
||||||
|
|
||||||
|
def test_parse_vision_response_wrapped(self):
|
||||||
|
"""Should parse a response with 'glitches' wrapper key."""
|
||||||
|
response = json.dumps({
|
||||||
|
"glitches": [
|
||||||
|
{
|
||||||
|
"category": "z_fighting",
|
||||||
|
"name": "Shimmer",
|
||||||
|
"confidence": 0.6,
|
||||||
|
}
|
||||||
|
]
|
||||||
|
})
|
||||||
|
glitches = _parse_vision_response(response, 1, "right")
|
||||||
|
self.assertEqual(len(glitches), 1)
|
||||||
|
self.assertEqual(glitches[0].category, "z_fighting")
|
||||||
|
|
||||||
|
def test_parse_vision_response_empty(self):
|
||||||
|
"""Should return empty list for non-JSON text."""
|
||||||
|
glitches = _parse_vision_response("No glitches found.", 0, "front")
|
||||||
|
self.assertEqual(len(glitches), 0)
|
||||||
|
|
||||||
|
def test_parse_vision_response_code_block(self):
|
||||||
|
"""Should extract JSON from markdown code blocks."""
|
||||||
|
response = '```json\n[{"category": "clipping", "name": "Clip", "confidence": 0.7}]\n```'
|
||||||
|
glitches = _parse_vision_response(response, 0, "front")
|
||||||
|
self.assertEqual(len(glitches), 1)
|
||||||
|
|
||||||
|
def test_build_report(self):
|
||||||
|
"""Report should have correct summary statistics."""
|
||||||
|
angles = generate_scan_angles(4)
|
||||||
|
screenshots = [Path(f"/tmp/ss_{i}.png") for i in range(4)]
|
||||||
|
glitches = [
|
||||||
|
DetectedGlitch(
|
||||||
|
id="a", category="floating_assets", name="Float",
|
||||||
|
description="", severity="high", confidence=0.8,
|
||||||
|
screenshot_index=0, screenshot_angle="front",
|
||||||
|
),
|
||||||
|
DetectedGlitch(
|
||||||
|
id="b", category="missing_textures", name="Missing",
|
||||||
|
description="", severity="critical", confidence=0.95,
|
||||||
|
screenshot_index=1, screenshot_angle="right",
|
||||||
|
),
|
||||||
|
]
|
||||||
|
report = build_report("https://test.com", angles, screenshots, glitches)
|
||||||
|
|
||||||
|
self.assertEqual(report.total_screenshots, 4)
|
||||||
|
self.assertEqual(len(report.glitches), 2)
|
||||||
|
self.assertEqual(report.summary["total_glitches"], 2)
|
||||||
|
self.assertEqual(report.summary["by_severity"]["critical"], 1)
|
||||||
|
self.assertEqual(report.summary["by_severity"]["high"], 1)
|
||||||
|
self.assertEqual(report.summary["by_category"]["floating_assets"], 1)
|
||||||
|
self.assertEqual(report.metadata["reference"], "timmy-config#491")
|
||||||
|
|
||||||
|
def test_build_report_json_roundtrip(self):
|
||||||
|
"""Report JSON should parse back correctly."""
|
||||||
|
angles = generate_scan_angles(2)
|
||||||
|
screenshots = [Path(f"/tmp/ss_{i}.png") for i in range(2)]
|
||||||
|
report = build_report("https://test.com", angles, screenshots, [])
|
||||||
|
json_str = report.to_json()
|
||||||
|
parsed = json.loads(json_str)
|
||||||
|
self.assertEqual(parsed["url"], "https://test.com")
|
||||||
|
self.assertEqual(parsed["total_screenshots"], 2)
|
||||||
|
|
||||||
|
def test_run_demo(self):
|
||||||
|
"""Demo mode should produce a report with simulated glitches."""
|
||||||
|
with tempfile.NamedTemporaryFile(suffix=".json", delete=False) as f:
|
||||||
|
output_path = Path(f.name)
|
||||||
|
|
||||||
|
try:
|
||||||
|
report = run_demo(output_path)
|
||||||
|
self.assertEqual(len(report.glitches), 4)
|
||||||
|
self.assertGreater(report.summary["total_glitches"], 0)
|
||||||
|
self.assertTrue(output_path.exists())
|
||||||
|
|
||||||
|
# Verify the saved JSON is valid
|
||||||
|
saved = json.loads(output_path.read_text())
|
||||||
|
self.assertIn("scan_id", saved)
|
||||||
|
self.assertIn("glitches", saved)
|
||||||
|
finally:
|
||||||
|
output_path.unlink(missing_ok=True)
|
||||||
|
|
||||||
|
|
||||||
|
class TestIntegration(unittest.TestCase):
|
||||||
|
"""Integration-level tests."""
|
||||||
|
|
||||||
|
def test_full_pipeline_demo(self):
|
||||||
|
"""End-to-end demo pipeline should complete without errors."""
|
||||||
|
report = run_demo()
|
||||||
|
self.assertIsNotNone(report.scan_id)
|
||||||
|
self.assertTrue(report.timestamp)
|
||||||
|
self.assertGreater(report.total_screenshots, 0)
|
||||||
|
|
||||||
|
def test_patterns_cover_matrix_themes(self):
|
||||||
|
"""Patterns should cover the main Matrix glitch themes."""
|
||||||
|
category_values = {p.category.value for p in MATRIX_GLITCH_PATTERNS}
|
||||||
|
expected = {"floating_assets", "z_fighting", "missing_textures", "clipping", "broken_normals"}
|
||||||
|
self.assertTrue(expected.issubset(category_values))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
unittest.main()
|
||||||
215
tests/test_tower_visual_mapper.py
Normal file
215
tests/test_tower_visual_mapper.py
Normal file
@@ -0,0 +1,215 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""Tests for tower_visual_mapper.py — verifies map construction and formatting."""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import sys
|
||||||
|
import tempfile
|
||||||
|
from pathlib import Path
|
||||||
|
from unittest.mock import patch
|
||||||
|
|
||||||
|
sys.path.insert(0, str(Path(__file__).parent.parent / "scripts"))
|
||||||
|
|
||||||
|
from tower_visual_mapper import (
|
||||||
|
TowerRoom, TowerNPC, TowerFloor, TowerMap,
|
||||||
|
scan_gallery_index, scan_memory_architecture, scan_wizard_configs,
|
||||||
|
build_tower_map, to_json, to_ascii, _gallery_image_to_room,
|
||||||
|
_parse_json_response
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# === Unit Tests ===
|
||||||
|
|
||||||
|
def test_gallery_image_to_room_known():
|
||||||
|
room = _gallery_image_to_room("01-wizard-tower-bitcoin.jpg", "The Tower", "The Origin")
|
||||||
|
assert room is not None
|
||||||
|
assert room.name == "The Tower — Exterior"
|
||||||
|
assert room.floor == 0
|
||||||
|
assert "bitcoin" in room.description.lower() or "sovereign" in room.description.lower()
|
||||||
|
print(" PASS: test_gallery_image_to_room_known")
|
||||||
|
|
||||||
|
|
||||||
|
def test_gallery_image_to_room_unknown():
|
||||||
|
room = _gallery_image_to_room("random-image.jpg", "Something", "The Origin")
|
||||||
|
assert room is None
|
||||||
|
print(" PASS: test_gallery_image_to_room_unknown")
|
||||||
|
|
||||||
|
|
||||||
|
def test_gallery_image_to_room_philosophy():
|
||||||
|
room = _gallery_image_to_room("06-the-paperclip-moment.jpg", "A paperclip", "The Philosophy")
|
||||||
|
assert room is not None
|
||||||
|
assert room.category == "philosophy"
|
||||||
|
print(" PASS: test_gallery_image_to_room_philosophy")
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_json_response_clean():
|
||||||
|
text = '{"floors": 5, "rooms": [{"name": "Test"}]}'
|
||||||
|
result = _parse_json_response(text)
|
||||||
|
assert result["floors"] == 5
|
||||||
|
assert result["rooms"][0]["name"] == "Test"
|
||||||
|
print(" PASS: test_parse_json_response_clean")
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_json_response_fenced():
|
||||||
|
text = '```json\n{"floors": 3}\n```'
|
||||||
|
result = _parse_json_response(text)
|
||||||
|
assert result["floors"] == 3
|
||||||
|
print(" PASS: test_parse_json_response_fenced")
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_json_response_garbage():
|
||||||
|
result = _parse_json_response("no json here at all")
|
||||||
|
assert result == {}
|
||||||
|
print(" PASS: test_parse_json_response_garbage")
|
||||||
|
|
||||||
|
|
||||||
|
def test_tower_map_structure():
|
||||||
|
tower = TowerMap()
|
||||||
|
tower.rooms = [
|
||||||
|
TowerRoom(name="Room A", floor=0, category="test"),
|
||||||
|
TowerRoom(name="Room B", floor=0, category="test"),
|
||||||
|
TowerRoom(name="Room C", floor=1, category="other"),
|
||||||
|
]
|
||||||
|
tower.npcs = [
|
||||||
|
TowerNPC(name="NPC1", role="guard", location="Room A"),
|
||||||
|
]
|
||||||
|
|
||||||
|
output = json.loads(to_json(tower))
|
||||||
|
assert output["name"] == "The Tower"
|
||||||
|
assert output["stats"]["total_rooms"] == 3
|
||||||
|
assert output["stats"]["total_npcs"] == 1
|
||||||
|
print(" PASS: test_tower_map_structure")
|
||||||
|
|
||||||
|
|
||||||
|
def test_to_json():
|
||||||
|
tower = TowerMap()
|
||||||
|
tower.rooms = [TowerRoom(name="Test Room", floor=1)]
|
||||||
|
output = json.loads(to_json(tower))
|
||||||
|
assert output["rooms"][0]["name"] == "Test Room"
|
||||||
|
assert output["rooms"][0]["floor"] == 1
|
||||||
|
print(" PASS: test_to_json")
|
||||||
|
|
||||||
|
|
||||||
|
def test_to_ascii():
|
||||||
|
tower = TowerMap()
|
||||||
|
tower.floors = [TowerFloor(number=0, name="Ground", rooms=["Test Room"])]
|
||||||
|
tower.rooms = [TowerRoom(name="Test Room", floor=0, description="A test")]
|
||||||
|
tower.npcs = []
|
||||||
|
tower.connections = []
|
||||||
|
|
||||||
|
output = to_ascii(tower)
|
||||||
|
assert "THE TOWER" in output
|
||||||
|
assert "Test Room" in output
|
||||||
|
assert "FLOOR 0" in output
|
||||||
|
print(" PASS: test_to_ascii")
|
||||||
|
|
||||||
|
|
||||||
|
def test_to_ascii_with_npcs():
|
||||||
|
tower = TowerMap()
|
||||||
|
tower.floors = [TowerFloor(number=0, name="Ground", rooms=["The Forge"])]
|
||||||
|
tower.rooms = [TowerRoom(name="The Forge", floor=0, occupants=["Bezalel"])]
|
||||||
|
tower.npcs = [TowerNPC(name="Bezalel", role="Builder", location="The Forge")]
|
||||||
|
|
||||||
|
output = to_ascii(tower)
|
||||||
|
assert "Bezalel" in output
|
||||||
|
print(" PASS: test_to_ascii_with_npcs")
|
||||||
|
|
||||||
|
|
||||||
|
def test_scan_gallery_index(tmp_path):
|
||||||
|
# Create mock gallery
|
||||||
|
gallery = tmp_path / "grok-imagine-gallery"
|
||||||
|
gallery.mkdir()
|
||||||
|
index = gallery / "INDEX.md"
|
||||||
|
index.write_text("""# Gallery
|
||||||
|
### The Origin
|
||||||
|
| 01 | wizard-tower-bitcoin.jpg | The Tower, sovereign |
|
||||||
|
| 02 | soul-inscription.jpg | SOUL.md glowing |
|
||||||
|
### The Philosophy
|
||||||
|
| 05 | value-drift-battle.jpg | Blue vs red ships |
|
||||||
|
""")
|
||||||
|
rooms = scan_gallery_index(tmp_path)
|
||||||
|
assert len(rooms) >= 2
|
||||||
|
names = [r.name for r in rooms]
|
||||||
|
assert any("Tower" in n for n in names)
|
||||||
|
assert any("Inscription" in n for n in names)
|
||||||
|
print(" PASS: test_scan_gallery_index")
|
||||||
|
|
||||||
|
|
||||||
|
def test_scan_wizard_configs(tmp_path):
|
||||||
|
wizards = tmp_path / "wizards"
|
||||||
|
for name in ["timmy", "bezalel", "ezra"]:
|
||||||
|
wdir = wizards / name
|
||||||
|
wdir.mkdir(parents=True)
|
||||||
|
(wdir / "config.yaml").write_text("model: test\n")
|
||||||
|
|
||||||
|
npcs = scan_wizard_configs(tmp_path)
|
||||||
|
assert len(npcs) >= 3
|
||||||
|
names = [n.name for n in npcs]
|
||||||
|
assert any("Timmy" in n for n in names)
|
||||||
|
assert any("Bezalel" in n for n in names)
|
||||||
|
print(" PASS: test_scan_wizard_configs")
|
||||||
|
|
||||||
|
|
||||||
|
def test_build_tower_map_empty(tmp_path):
|
||||||
|
tower = build_tower_map(tmp_path, include_vision=False)
|
||||||
|
assert tower.name == "The Tower"
|
||||||
|
# Should still have palace rooms from MEMORY_ARCHITECTURE (won't exist in tmp, but that's fine)
|
||||||
|
assert isinstance(tower.rooms, list)
|
||||||
|
print(" PASS: test_build_tower_map_empty")
|
||||||
|
|
||||||
|
|
||||||
|
def test_room_deduplication():
|
||||||
|
tower = TowerMap()
|
||||||
|
tower.rooms = [
|
||||||
|
TowerRoom(name="Dup Room", floor=0),
|
||||||
|
TowerRoom(name="Dup Room", floor=1), # same name, different floor
|
||||||
|
TowerRoom(name="Unique Room", floor=0),
|
||||||
|
]
|
||||||
|
# Deduplicate in build_tower_map — simulate
|
||||||
|
seen = {}
|
||||||
|
deduped = []
|
||||||
|
for room in tower.rooms:
|
||||||
|
if room.name not in seen:
|
||||||
|
seen[room.name] = True
|
||||||
|
deduped.append(room)
|
||||||
|
assert len(deduped) == 2
|
||||||
|
print(" PASS: test_room_deduplication")
|
||||||
|
|
||||||
|
|
||||||
|
def run_all():
|
||||||
|
print("=== tower_visual_mapper tests ===")
|
||||||
|
tests = [
|
||||||
|
test_gallery_image_to_room_known,
|
||||||
|
test_gallery_image_to_room_unknown,
|
||||||
|
test_gallery_image_to_room_philosophy,
|
||||||
|
test_parse_json_response_clean,
|
||||||
|
test_parse_json_response_fenced,
|
||||||
|
test_parse_json_response_garbage,
|
||||||
|
test_tower_map_structure,
|
||||||
|
test_to_json,
|
||||||
|
test_to_ascii,
|
||||||
|
test_to_ascii_with_npcs,
|
||||||
|
test_scan_gallery_index,
|
||||||
|
test_scan_wizard_configs,
|
||||||
|
test_build_tower_map_empty,
|
||||||
|
test_room_deduplication,
|
||||||
|
]
|
||||||
|
passed = 0
|
||||||
|
failed = 0
|
||||||
|
for test in tests:
|
||||||
|
try:
|
||||||
|
if "tmp_path" in test.__code__.co_varnames:
|
||||||
|
with tempfile.TemporaryDirectory() as td:
|
||||||
|
test(Path(td))
|
||||||
|
else:
|
||||||
|
test()
|
||||||
|
passed += 1
|
||||||
|
except Exception as e:
|
||||||
|
print(f" FAIL: {test.__name__} — {e}")
|
||||||
|
failed += 1
|
||||||
|
|
||||||
|
print(f"\n{'ALL PASSED' if failed == 0 else f'{failed} FAILED'}: {passed}/{len(tests)}")
|
||||||
|
return failed == 0
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
sys.exit(0 if run_all() else 1)
|
||||||
Reference in New Issue
Block a user