Compare commits

..

1 Commits

Author SHA1 Message Date
Alexander Whitestone
bd78d71dfb feat: cross-session agent memory via MemPalace (#1124)
Some checks failed
Review Approval Gate / verify-review (pull_request) Failing after 9s
CI / test (pull_request) Failing after 53s
CI / validate (pull_request) Failing after 53s
Integrates MemPalace for persistent agent memory across sessions.
Agents recall context at session start, store important decisions,
and write diary entries at session end.

## What's added

agent/memory.py — AgentMemory class:
  - recall_context(): Load L0/L1 context (diaries, facts, relevant memories)
  - remember(): Store decisions and facts by room
  - write_diary(): Auto-generate session summary from transcript
  - start_session/end_session(): Session lifecycle management
  - Graceful degradation when MemPalace unavailable

agent/memory_hooks.py — Drop-in session lifecycle hooks:
  - on_session_start(): Load context, return prompt block
  - on_user_turn/on_agent_turn/on_tool_call(): Record transcript
  - on_important_decision(): Store key decisions for long-term memory
  - on_session_end(): Write diary, clean up

bin/memory_mine.py — Mine session transcripts into MemPalace:
  - Parse JSONL session files
  - Generate compact summaries
  - Batch mining with --days filter
  - Dry run mode

tests/test_agent_memory.py — 31 tests covering:
  - SessionTranscript (create, turns, truncation, summary)
  - MemoryContext (empty, loaded, prompt formatting)
  - AgentMemory (create, factory, graceful degradation, lifecycle)
  - MemoryHooks (full lifecycle, before/after session guards)
  - Session mining (parse, summarize, find files, dry run)
  - Full lifecycle integration test

## Usage
2026-04-13 20:36:39 -04:00
13 changed files with 1256 additions and 1539 deletions

21
agent/__init__.py Normal file
View File

@@ -0,0 +1,21 @@
"""
agent — Cross-session agent memory and lifecycle hooks.
Provides persistent memory for agents via MemPalace integration.
Agents recall context at session start and write diary entries at session end.
Modules:
memory.py — AgentMemory class (recall, remember, diary)
memory_hooks.py — Session lifecycle hooks (drop-in integration)
"""
from agent.memory import AgentMemory, MemoryContext, SessionTranscript, create_agent_memory
from agent.memory_hooks import MemoryHooks
__all__ = [
"AgentMemory",
"MemoryContext",
"MemoryHooks",
"SessionTranscript",
"create_agent_memory",
]

396
agent/memory.py Normal file
View File

@@ -0,0 +1,396 @@
"""
agent.memory — Cross-session agent memory via MemPalace.
Gives agents persistent memory across sessions. On wake-up, agents
recall relevant context from past sessions. On session end, they
write a diary entry summarizing what happened.
Architecture:
Session Start → memory.recall_context() → inject L0/L1 into prompt
During Session → memory.remember() → store important facts
Session End → memory.write_diary() → summarize session
All operations degrade gracefully — if MemPalace is unavailable,
the agent continues without memory and logs a warning.
Usage:
from agent.memory import AgentMemory
mem = AgentMemory(agent_name="bezalel", wing="wing_bezalel")
# Session start — load context
context = mem.recall_context("What was I working on last time?")
# During session — store important decisions
mem.remember("Switched CI runner from GitHub Actions to self-hosted", room="forge")
# Session end — write diary
mem.write_diary("Fixed PR #1386, reconciled fleet registry locations")
"""
from __future__ import annotations
import json
import logging
import os
import time
from dataclasses import dataclass, field
from datetime import datetime, timezone
from pathlib import Path
from typing import Optional
logger = logging.getLogger("agent.memory")
@dataclass
class MemoryContext:
"""Context loaded at session start from MemPalace."""
relevant_memories: list[dict] = field(default_factory=list)
recent_diaries: list[dict] = field(default_factory=list)
facts: list[dict] = field(default_factory=list)
loaded: bool = False
error: Optional[str] = None
def to_prompt_block(self) -> str:
"""Format context as a text block to inject into the agent prompt."""
if not self.loaded:
return ""
parts = []
if self.recent_diaries:
parts.append("=== Recent Session Summaries ===")
for d in self.recent_diaries[:3]:
ts = d.get("timestamp", "")
text = d.get("text", "")
parts.append(f"[{ts}] {text[:500]}")
if self.facts:
parts.append("\n=== Known Facts ===")
for f in self.facts[:10]:
text = f.get("text", "")
parts.append(f"- {text[:200]}")
if self.relevant_memories:
parts.append("\n=== Relevant Past Memories ===")
for m in self.relevant_memories[:5]:
text = m.get("text", "")
score = m.get("score", 0)
parts.append(f"[{score:.2f}] {text[:300]}")
if not parts:
return ""
return "\n".join(parts)
@dataclass
class SessionTranscript:
"""A running log of the current session for diary writing."""
agent_name: str
wing: str
started_at: str = field(
default_factory=lambda: datetime.now(timezone.utc).isoformat()
)
entries: list[dict] = field(default_factory=list)
def add_user_turn(self, text: str):
self.entries.append({
"role": "user",
"text": text[:2000],
"ts": time.time(),
})
def add_agent_turn(self, text: str):
self.entries.append({
"role": "agent",
"text": text[:2000],
"ts": time.time(),
})
def add_tool_call(self, tool: str, args: str, result_summary: str):
self.entries.append({
"role": "tool",
"tool": tool,
"args": args[:500],
"result": result_summary[:500],
"ts": time.time(),
})
def summary(self) -> str:
"""Generate a compact transcript summary."""
if not self.entries:
return "Empty session."
turns = []
for e in self.entries[-20:]: # last 20 entries
role = e["role"]
if role == "user":
turns.append(f"USER: {e['text'][:200]}")
elif role == "agent":
turns.append(f"AGENT: {e['text'][:200]}")
elif role == "tool":
turns.append(f"TOOL({e.get('tool','')}): {e.get('result','')[:150]}")
return "\n".join(turns)
class AgentMemory:
"""
Cross-session memory for an agent.
Wraps MemPalace with agent-specific conventions:
- Each agent has a wing (e.g., "wing_bezalel")
- Session summaries go in the "hermes" room
- Important decisions go in room-specific closets
- Facts go in the "nexus" room
"""
def __init__(
self,
agent_name: str,
wing: Optional[str] = None,
palace_path: Optional[Path] = None,
):
self.agent_name = agent_name
self.wing = wing or f"wing_{agent_name}"
self.palace_path = palace_path
self._transcript: Optional[SessionTranscript] = None
self._available: Optional[bool] = None
def _check_available(self) -> bool:
"""Check if MemPalace is accessible."""
if self._available is not None:
return self._available
try:
from nexus.mempalace.searcher import search_memories, add_memory, _get_client
from nexus.mempalace.config import MEMPALACE_PATH
path = self.palace_path or MEMPALACE_PATH
_get_client(path)
self._available = True
logger.info(f"MemPalace available at {path}")
except Exception as e:
self._available = False
logger.warning(f"MemPalace unavailable: {e}")
return self._available
def recall_context(
self,
query: Optional[str] = None,
n_results: int = 5,
) -> MemoryContext:
"""
Load relevant context from past sessions.
Called at session start to inject L0/L1 memory into the prompt.
Args:
query: What to search for. If None, loads recent diary entries.
n_results: Max memories to recall.
"""
ctx = MemoryContext()
if not self._check_available():
ctx.error = "MemPalace unavailable"
return ctx
try:
from nexus.mempalace.searcher import search_memories
# Load recent diary entries (session summaries)
ctx.recent_diaries = [
{"text": r.text, "score": r.score, "timestamp": r.metadata.get("timestamp", "")}
for r in search_memories(
"session summary",
palace_path=self.palace_path,
wing=self.wing,
room="hermes",
n_results=3,
)
]
# Load known facts
ctx.facts = [
{"text": r.text, "score": r.score}
for r in search_memories(
"important facts decisions",
palace_path=self.palace_path,
wing=self.wing,
room="nexus",
n_results=5,
)
]
# Search for relevant memories if query provided
if query:
ctx.relevant_memories = [
{"text": r.text, "score": r.score, "room": r.room}
for r in search_memories(
query,
palace_path=self.palace_path,
wing=self.wing,
n_results=n_results,
)
]
ctx.loaded = True
except Exception as e:
ctx.error = str(e)
logger.warning(f"Failed to recall context: {e}")
return ctx
def remember(
self,
text: str,
room: str = "nexus",
source_file: str = "",
metadata: Optional[dict] = None,
) -> Optional[str]:
"""
Store a memory.
Args:
text: The memory content.
room: Target room (forge, hermes, nexus, issues, experiments).
source_file: Optional source attribution.
metadata: Extra metadata.
Returns:
Document ID if stored, None if MemPalace unavailable.
"""
if not self._check_available():
logger.warning("Cannot store memory — MemPalace unavailable")
return None
try:
from nexus.mempalace.searcher import add_memory
doc_id = add_memory(
text=text,
room=room,
wing=self.wing,
palace_path=self.palace_path,
source_file=source_file,
extra_metadata=metadata or {},
)
logger.debug(f"Stored memory in {room}: {text[:80]}...")
return doc_id
except Exception as e:
logger.warning(f"Failed to store memory: {e}")
return None
def write_diary(
self,
summary: Optional[str] = None,
) -> Optional[str]:
"""
Write a session diary entry to MemPalace.
Called at session end. If summary is None, auto-generates one
from the session transcript.
Args:
summary: Override summary text. If None, generates from transcript.
Returns:
Document ID if stored, None if unavailable.
"""
if summary is None and self._transcript:
summary = self._transcript.summary()
if not summary:
return None
timestamp = datetime.now(timezone.utc).isoformat()
diary_text = f"[{timestamp}] Session by {self.agent_name}:\n{summary}"
return self.remember(
diary_text,
room="hermes",
metadata={
"type": "session_diary",
"agent": self.agent_name,
"timestamp": timestamp,
"entry_count": len(self._transcript.entries) if self._transcript else 0,
},
)
def start_session(self) -> SessionTranscript:
"""
Begin a new session transcript.
Returns the transcript object for recording turns.
"""
self._transcript = SessionTranscript(
agent_name=self.agent_name,
wing=self.wing,
)
logger.info(f"Session started for {self.agent_name}")
return self._transcript
def end_session(self, diary_summary: Optional[str] = None) -> Optional[str]:
"""
End the current session, write diary, return diary doc ID.
"""
doc_id = self.write_diary(diary_summary)
self._transcript = None
logger.info(f"Session ended for {self.agent_name}")
return doc_id
def search(
self,
query: str,
room: Optional[str] = None,
n_results: int = 5,
) -> list[dict]:
"""
Search memories. Useful during a session for recall.
Returns list of {text, room, wing, score} dicts.
"""
if not self._check_available():
return []
try:
from nexus.mempalace.searcher import search_memories
results = search_memories(
query,
palace_path=self.palace_path,
wing=self.wing,
room=room,
n_results=n_results,
)
return [
{"text": r.text, "room": r.room, "wing": r.wing, "score": r.score}
for r in results
]
except Exception as e:
logger.warning(f"Search failed: {e}")
return []
# --- Fleet-wide memory helpers ---
def create_agent_memory(
agent_name: str,
palace_path: Optional[Path] = None,
) -> AgentMemory:
"""
Factory for creating AgentMemory with standard config.
Reads wing from MEMPALACE_WING env or defaults to wing_{agent_name}.
"""
wing = os.environ.get("MEMPALACE_WING", f"wing_{agent_name}")
return AgentMemory(
agent_name=agent_name,
wing=wing,
palace_path=palace_path,
)

183
agent/memory_hooks.py Normal file
View File

@@ -0,0 +1,183 @@
"""
agent.memory_hooks — Session lifecycle hooks for agent memory.
Integrates AgentMemory into the agent session lifecycle:
- on_session_start: Load context, inject into prompt
- on_user_turn: Record user input
- on_agent_turn: Record agent output
- on_tool_call: Record tool usage
- on_session_end: Write diary, clean up
These hooks are designed to be called from the Hermes harness or
any agent framework. They're fire-and-forget — failures are logged
but never crash the session.
Usage:
from agent.memory_hooks import MemoryHooks
hooks = MemoryHooks(agent_name="bezalel")
hooks.on_session_start() # loads context
# In your agent loop:
hooks.on_user_turn("Check CI pipeline health")
hooks.on_agent_turn("Running CI check...")
hooks.on_tool_call("shell", "pytest tests/", "12 passed")
# End of session:
hooks.on_session_end() # writes diary
"""
from __future__ import annotations
import logging
from typing import Optional
from agent.memory import AgentMemory, MemoryContext, create_agent_memory
logger = logging.getLogger("agent.memory_hooks")
class MemoryHooks:
"""
Drop-in session lifecycle hooks for agent memory.
Wraps AgentMemory with error boundaries — every hook catches
exceptions and logs warnings so memory failures never crash
the agent session.
"""
def __init__(
self,
agent_name: str,
palace_path=None,
auto_diary: bool = True,
):
self.agent_name = agent_name
self.auto_diary = auto_diary
self._memory: Optional[AgentMemory] = None
self._context: Optional[MemoryContext] = None
self._active = False
@property
def memory(self) -> AgentMemory:
if self._memory is None:
self._memory = create_agent_memory(
self.agent_name,
palace_path=getattr(self, '_palace_path', None),
)
return self._memory
def on_session_start(self, query: Optional[str] = None) -> str:
"""
Called at session start. Loads context from MemPalace.
Returns a prompt block to inject into the agent's context, or
empty string if memory is unavailable.
Args:
query: Optional recall query (e.g., "What was I working on?")
"""
try:
self.memory.start_session()
self._active = True
self._context = self.memory.recall_context(query=query)
block = self._context.to_prompt_block()
if block:
logger.info(
f"Loaded {len(self._context.recent_diaries)} diaries, "
f"{len(self._context.facts)} facts, "
f"{len(self._context.relevant_memories)} relevant memories "
f"for {self.agent_name}"
)
else:
logger.info(f"No prior memory for {self.agent_name}")
return block
except Exception as e:
logger.warning(f"Session start memory hook failed: {e}")
return ""
def on_user_turn(self, text: str):
"""Record a user message."""
if not self._active:
return
try:
if self.memory._transcript:
self.memory._transcript.add_user_turn(text)
except Exception as e:
logger.debug(f"Failed to record user turn: {e}")
def on_agent_turn(self, text: str):
"""Record an agent response."""
if not self._active:
return
try:
if self.memory._transcript:
self.memory._transcript.add_agent_turn(text)
except Exception as e:
logger.debug(f"Failed to record agent turn: {e}")
def on_tool_call(self, tool: str, args: str, result_summary: str):
"""Record a tool invocation."""
if not self._active:
return
try:
if self.memory._transcript:
self.memory._transcript.add_tool_call(tool, args, result_summary)
except Exception as e:
logger.debug(f"Failed to record tool call: {e}")
def on_important_decision(self, text: str, room: str = "nexus"):
"""
Record an important decision or fact for long-term memory.
Use this when the agent makes a significant decision that
should persist beyond the current session.
"""
try:
self.memory.remember(text, room=room, metadata={"type": "decision"})
logger.info(f"Remembered decision: {text[:80]}...")
except Exception as e:
logger.warning(f"Failed to remember decision: {e}")
def on_session_end(self, summary: Optional[str] = None) -> Optional[str]:
"""
Called at session end. Writes diary entry.
Args:
summary: Override diary text. If None, auto-generates.
Returns:
Diary document ID, or None.
"""
if not self._active:
return None
try:
doc_id = self.memory.end_session(diary_summary=summary)
self._active = False
self._context = None
return doc_id
except Exception as e:
logger.warning(f"Session end memory hook failed: {e}")
self._active = False
return None
def search(self, query: str, room: Optional[str] = None) -> list[dict]:
"""
Search memories during a session.
Returns list of {text, room, wing, score}.
"""
try:
return self.memory.search(query, room=room)
except Exception as e:
logger.warning(f"Memory search failed: {e}")
return []
@property
def is_active(self) -> bool:
return self._active

36
app.js
View File

@@ -8,7 +8,7 @@ import { SpatialAudio } from './nexus/components/spatial-audio.js';
import { MemoryBirth } from './nexus/components/memory-birth.js';
import { MemoryOptimizer } from './nexus/components/memory-optimizer.js';
import { MemoryInspect } from './nexus/components/memory-inspect.js';
import { MemoryPulse } from './nexus/components/memory-pulse.js';\nimport { performanceSystem } from './nexus/performance-integration.js';
import { MemoryPulse } from './nexus/components/memory-pulse.js';
// ═══════════════════════════════════════════
// NEXUS v1.1 — Portal System Update
@@ -757,7 +757,8 @@ async function init() {
SpatialAudio.init(camera, scene);
SpatialAudio.bindSpatialMemory(SpatialMemory);
MemoryInspect.init({ onNavigate: _navigateToMemory });
MemoryPulse.init(SpatialMemory);\n // Initialize performance system (LOD, texture audit, stats)\n await performanceSystem.init(camera, scene, renderer);\n updateLoad(90);
MemoryPulse.init(SpatialMemory);
updateLoad(90);
loadSession();
connectHermes();
@@ -1332,24 +1333,28 @@ function createAgentPresences() {
const color = new THREE.Color(data.color);
// Agent Orb with LOD
const orbLods = performanceSystem.lodManager.constructor.createSphereLODs(0.4, color, 2);
const orb = new THREE.Mesh(orbLods.high.geometry.clone(), orbLods.high.material.clone());
// Agent Orb
const orbGeo = new THREE.SphereGeometry(0.4, 32, 32);
const orbMat = new THREE.MeshPhysicalMaterial({
color: color,
emissive: color,
emissiveIntensity: 2,
roughness: 0,
metalness: 1,
transmission: 0.8,
thickness: 0.5,
});
const orb = new THREE.Mesh(orbGeo, orbMat);
orb.position.y = 3;
group.add(orb);
// Register orb for LOD management
performanceSystem.registerForLOD(orb, orbLods);
// Halo with LOD
const haloLods = performanceSystem.lodManager.constructor.createTorusLODs(0.6, 0.02, color);
const halo = new THREE.Mesh(haloLods.high.geometry.clone(), haloLods.high.material.clone());
// Halo
const haloGeo = new THREE.TorusGeometry(0.6, 0.02, 16, 64);
const haloMat = new THREE.MeshBasicMaterial({ color: color, transparent: true, opacity: 0.4 });
const halo = new THREE.Mesh(haloGeo, haloMat);
halo.position.y = 3;
halo.rotation.x = Math.PI / 2;
group.add(halo);
// Register halo for LOD management
performanceSystem.registerForLOD(halo, haloLods);
// Label
const canvas = document.createElement('canvas');
@@ -3313,7 +3318,8 @@ function gameLoop() {
SpatialMemory.update(delta);
SpatialAudio.update(delta);
MemoryBirth.update(delta);
MemoryPulse.update();\n performanceSystem.update(delta);\n animateMemoryOrbs(delta);
MemoryPulse.update();
animateMemoryOrbs(delta);
}

258
bin/memory_mine.py Normal file
View File

@@ -0,0 +1,258 @@
#!/usr/bin/env python3
"""
memory_mine.py — Mine session transcripts into MemPalace.
Reads Hermes session logs (JSONL format) and stores summaries
in the palace. Supports batch mining, single-file processing,
and live directory watching.
Usage:
# Mine a single session file
python3 bin/memory_mine.py ~/.hermes/sessions/2026-04-13.jsonl
# Mine all sessions from last 7 days
python3 bin/memory_mine.py --days 7
# Mine a specific wing's sessions
python3 bin/memory_mine.py --wing wing_bezalel --days 14
# Dry run — show what would be mined
python3 bin/memory_mine.py --dry-run --days 7
"""
from __future__ import annotations
import argparse
import json
import logging
import os
import sys
import time
from datetime import datetime, timedelta, timezone
from pathlib import Path
from typing import Optional
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
logger = logging.getLogger("memory-mine")
REPO_ROOT = Path(__file__).resolve().parent.parent
if str(REPO_ROOT) not in sys.path:
sys.path.insert(0, str(REPO_ROOT))
def parse_session_file(path: Path) -> list[dict]:
"""
Parse a JSONL session file into turns.
Each line is expected to be a JSON object with:
- role: "user" | "assistant" | "system" | "tool"
- content: text
- timestamp: ISO string (optional)
"""
turns = []
with open(path) as f:
for i, line in enumerate(f):
line = line.strip()
if not line:
continue
try:
turn = json.loads(line)
turns.append(turn)
except json.JSONDecodeError:
logger.debug(f"Skipping malformed line {i+1} in {path}")
return turns
def summarize_session(turns: list[dict], agent_name: str = "unknown") -> str:
"""
Generate a compact summary of a session's turns.
Keeps user messages and key agent responses, strips noise.
"""
if not turns:
return "Empty session."
user_msgs = []
agent_msgs = []
tool_calls = []
for turn in turns:
role = turn.get("role", "")
content = str(turn.get("content", ""))[:300]
if role == "user":
user_msgs.append(content)
elif role == "assistant":
agent_msgs.append(content)
elif role == "tool":
tool_name = turn.get("name", turn.get("tool", "unknown"))
tool_calls.append(f"{tool_name}: {content[:150]}")
parts = [f"Session by {agent_name}:"]
if user_msgs:
parts.append(f"\nUser asked ({len(user_msgs)} messages):")
for msg in user_msgs[:5]:
parts.append(f" - {msg[:200]}")
if len(user_msgs) > 5:
parts.append(f" ... and {len(user_msgs) - 5} more")
if agent_msgs:
parts.append(f"\nAgent responded ({len(agent_msgs)} messages):")
for msg in agent_msgs[:3]:
parts.append(f" - {msg[:200]}")
if tool_calls:
parts.append(f"\nTools used ({len(tool_calls)} calls):")
for tc in tool_calls[:5]:
parts.append(f" - {tc}")
return "\n".join(parts)
def mine_session(
path: Path,
wing: str,
palace_path: Optional[Path] = None,
dry_run: bool = False,
) -> Optional[str]:
"""
Mine a single session file into MemPalace.
Returns the document ID if stored, None on failure or dry run.
"""
try:
from agent.memory import AgentMemory
except ImportError:
logger.error("Cannot import agent.memory — is the repo in PYTHONPATH?")
return None
turns = parse_session_file(path)
if not turns:
logger.debug(f"Empty session file: {path}")
return None
agent_name = wing.replace("wing_", "")
summary = summarize_session(turns, agent_name)
if dry_run:
print(f"\n--- {path.name} ---")
print(summary[:500])
print(f"({len(turns)} turns)")
return None
mem = AgentMemory(agent_name=agent_name, wing=wing, palace_path=palace_path)
doc_id = mem.remember(
summary,
room="hermes",
source_file=str(path),
metadata={
"type": "mined_session",
"source": str(path),
"turn_count": len(turns),
"agent": agent_name,
"timestamp": datetime.now(timezone.utc).isoformat(),
},
)
if doc_id:
logger.info(f"Mined {path.name}{doc_id} ({len(turns)} turns)")
else:
logger.warning(f"Failed to mine {path.name}")
return doc_id
def find_session_files(
sessions_dir: Path,
days: int = 7,
pattern: str = "*.jsonl",
) -> list[Path]:
"""
Find session files from the last N days.
"""
cutoff = datetime.now() - timedelta(days=days)
files = []
if not sessions_dir.exists():
logger.warning(f"Sessions directory not found: {sessions_dir}")
return files
for path in sorted(sessions_dir.glob(pattern)):
# Use file modification time as proxy for session date
mtime = datetime.fromtimestamp(path.stat().st_mtime)
if mtime >= cutoff:
files.append(path)
return files
def main(argv: list[str] | None = None) -> int:
parser = argparse.ArgumentParser(
description="Mine session transcripts into MemPalace"
)
parser.add_argument(
"files", nargs="*", help="Session files to mine (JSONL format)"
)
parser.add_argument(
"--days", type=int, default=7,
help="Mine sessions from last N days (default: 7)"
)
parser.add_argument(
"--sessions-dir",
default=str(Path.home() / ".hermes" / "sessions"),
help="Directory containing session JSONL files"
)
parser.add_argument(
"--wing", default=None,
help="Wing name (default: auto-detect from MEMPALACE_WING env or 'wing_timmy')"
)
parser.add_argument(
"--palace-path", default=None,
help="Override palace path"
)
parser.add_argument(
"--dry-run", action="store_true",
help="Show what would be mined without storing"
)
args = parser.parse_args(argv)
wing = args.wing or os.environ.get("MEMPALACE_WING", "wing_timmy")
palace_path = Path(args.palace_path) if args.palace_path else None
if args.files:
files = [Path(f) for f in args.files]
else:
sessions_dir = Path(args.sessions_dir)
files = find_session_files(sessions_dir, days=args.days)
if not files:
logger.info("No session files found to mine.")
return 0
logger.info(f"Mining {len(files)} session files (wing={wing})")
mined = 0
failed = 0
for path in files:
result = mine_session(path, wing=wing, palace_path=palace_path, dry_run=args.dry_run)
if result:
mined += 1
elif result is None and not args.dry_run:
failed += 1
if args.dry_run:
logger.info(f"Dry run complete — {len(files)} files would be mined")
else:
logger.info(f"Mining complete — {mined} mined, {failed} failed")
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,154 +0,0 @@
# NEXUS Performance & Hardware Requirements
## Overview
This document outlines the minimum and recommended hardware requirements for running The Nexus 3D world, based on the LOD (Level of Detail) system, texture auditing, and performance monitoring.
## Performance System
The Nexus now includes:
1. **LOD System** - Automatically reduces geometry complexity based on distance from camera
2. **Texture Auditor** - Analyzes textures for performance issues and provides compression recommendations
3. **Performance Monitor** - Real-time stats.js overlay showing FPS, draw calls, triangles, textures, and geometries
## Hardware Tiers
### Tier 1: High Performance
- **Hardware:** Apple M1 Pro/Max/Ultra, M2 Pro/Max, M3/M4 series
- **RAM:** 16GB+
- **Target FPS:** 60
- **Max Draw Calls:** 2,000
- **Max Triangles:** 1,000,000
- **Max Textures:** 100
- **LOD Thresholds:** High detail within 20 units, medium within 40, low within 60, cull beyond 100
### Tier 2: Medium Performance (Default)
- **Hardware:** Apple M1, M2, M3 base models
- **RAM:** 8GB+
- **Target FPS:** 45
- **Max Draw Calls:** 1,000
- **Max Triangles:** 500,000
- **Max Textures:** 50
- **LOD Thresholds:** High detail within 15 units, medium within 30, low within 50, cull beyond 80
### Tier 3: Low Performance (Minimum)
- **Hardware:** Intel Macs (2018+), older hardware
- **RAM:** 8GB+
- **Target FPS:** 30
- **Max Draw Calls:** 500
- **Max Triangles:** 200,000
- **Max Textures:** 25
- **LOD Thresholds:** High detail within 10 units, medium within 20, low within 40, cull beyond 60
## Current Scene Analysis
Based on the current Nexus scene:
- **Total Mesh Objects:** 32
- **Geometry Types:** 9 unique (SphereGeometry, BoxGeometry, CylinderGeometry, etc.)
- **Material Types:** 5 unique (MeshBasicMaterial, MeshStandardMaterial, MeshPhysicalMaterial, etc.)
- **Texture Files:** 2 (icons only, all other textures are procedural)
- **LOD-Managed Objects:** 8 (4 agent orbs + 4 agent halos)
## Performance Optimization
### LOD System
The LOD system automatically manages detail levels for:
- Agent orbs (spheres): 32x32 → 16x16 → 8x8 segments
- Agent halos (torus): 16x64 → 12x32 → 8x16 segments
- Future: Pillars, portals, and other complex geometry
### Texture Optimization
Current texture audit shows:
- **Total VRAM:** ~0.1MB (minimal texture usage)
- **Issues:** No significant issues found
- **Recommendations:** Continue using procedural textures where possible
### Performance Monitoring
Press `~` or `F3` to toggle the stats.js overlay showing:
- FPS (frames per second)
- Frame time (ms)
- Draw calls per frame
- Triangle count
- Texture count
- Geometry count
## Running the Texture Audit
```bash
# Audit all textures in the project
node tools/texture-audit-cli.js .
# Save results to JSON
node tools/texture-audit-cli.js . audit-results.json
```
## Performance Recommendations
### For All Hardware:
1. **Enable LOD system** - Automatically reduces detail for distant objects
2. **Monitor with stats.js** - Use the overlay to identify bottlenecks
3. **Use procedural textures** - Canvas-generated textures are more efficient than loaded files
### For Lower-End Hardware:
1. **Reduce post-processing** - Bloom and SMAAPass are disabled on "low" tier
2. **Limit particle systems** - Ash storm disabled on "low" tier
3. **Reduce ambient structures** - Disabled on "low" tier
### For Developers:
1. **Register new geometry for LOD** - Use `performanceSystem.registerForLOD()`
2. **Audit new textures** - Run the texture audit before adding new assets
3. **Monitor performance** - Check stats.js during development
## Minimum Sovereign Hardware
Based on current analysis, the minimum hardware for a sovereign Nexus instance:
**Absolute Minimum:**
- **CPU:** Any modern processor (Intel i5/AMD Ryzen 5 or Apple M1)
- **RAM:** 8GB
- **GPU:** Integrated graphics (Intel Iris, AMD Radeon, Apple GPU)
- **Storage:** 1GB free space
- **Browser:** Chrome 90+, Firefox 88+, Safari 14+, Edge 90+
**Recommended for 60 FPS:**
- **CPU:** Apple M1 or better
- **RAM:** 16GB
- **GPU:** Apple M1 GPU or dedicated graphics
- **Storage:** 2GB free space
- **Browser:** Latest Chrome or Safari
## Future Optimizations
1. **Texture Atlasing** - Combine multiple textures into single atlases
2. **Instanced Rendering** - For repeated geometry (pillars, portals)
3. **Occlusion Culling** - Don't render objects behind other objects
4. **WebGL 2.0 Features** - Use compute shaders and transform feedback
5. **WebGPU Migration** - Future-proof for next-generation graphics
## Troubleshooting
### Low FPS
1. Check stats.js overlay for bottlenecks
2. Verify LOD system is active
3. Reduce browser zoom level
4. Close other browser tabs
5. Update graphics drivers
### High Memory Usage
1. Run texture audit to identify large textures
2. Reduce texture sizes or use compression
3. Limit particle counts
4. Check for memory leaks in browser console
### Visual Artifacts
1. Ensure textures are power-of-two dimensions
2. Check material settings for transparency issues
3. Verify LOD transitions are smooth
4. Test on different browsers
---
*Generated by NEXUS Performance System v1.0*
*Last updated: $(date)*

View File

@@ -1,286 +0,0 @@
/**
* ═══════════════════════════════════════════
* NEXUS LOD SYSTEM — Level of Detail Management
* ═══════════════════════════════════════════
*
* Provides automatic LOD switching based on distance from camera.
* Optimizes performance for local hardware.
*/
import * as THREE from 'three';
export class LODManager {
constructor(camera, scene) {
this.camera = camera;
this.scene = scene;
this.lodObjects = new Map(); // object UUID → { levels[], currentLevel }
this.updateInterval = 0.5; // seconds between LOD updates
this.lastUpdate = 0;
this.distanceThresholds = {
high: 15, // High detail within 15 units
medium: 30, // Medium detail within 30 units
low: 50, // Low detail within 50 units
cull: 100, // Cull beyond 100 units
};
this.stats = {
totalObjects: 0,
highDetail: 0,
mediumDetail: 0,
lowDetail: 0,
culled: 0,
};
}
/**
* Register an object for LOD management
* @param {THREE.Object3D} object - The object to manage
* @param {Object} lodLevels - LOD level configurations
* lodLevels = {
* high: { geometry: THREE.BufferGeometry, material: THREE.Material },
* medium: { geometry: THREE.BufferGeometry, material: THREE.Material },
* low: { geometry: THREE.BufferGeometry, material: THREE.Material },
* }
*/
registerObject(object, lodLevels) {
const uuid = object.uuid;
// Store original object data
const original = {
geometry: object.geometry,
material: object.material,
position: object.position.clone(),
rotation: object.rotation.clone(),
scale: object.scale.clone(),
};
// Create LOD meshes
const levels = {};
for (const [levelName, config] of Object.entries(lodLevels)) {
const mesh = new THREE.Mesh(config.geometry, config.material);
mesh.position.copy(original.position);
mesh.rotation.copy(original.rotation);
mesh.scale.copy(original.scale);
mesh.visible = false;
mesh.userData.lodLevel = levelName;
mesh.userData.parentUUID = uuid;
this.scene.add(mesh);
levels[levelName] = mesh;
}
// Store LOD data
this.lodObjects.set(uuid, {
object,
original,
levels,
currentLevel: 'high',
});
// Hide original, show high-detail
object.visible = false;
levels.high.visible = true;
this.stats.totalObjects++;
this.stats.highDetail++;
}
/**
* Create LOD levels for a sphere (agent orbs)
*/
static createSphereLODs(radius, color, emissiveIntensity = 2) {
return {
high: {
geometry: new THREE.SphereGeometry(radius, 32, 32),
material: new THREE.MeshPhysicalMaterial({
color: color,
emissive: color,
emissiveIntensity: emissiveIntensity,
roughness: 0,
metalness: 1,
transmission: 0.8,
thickness: 0.5,
}),
},
medium: {
geometry: new THREE.SphereGeometry(radius, 16, 16),
material: new THREE.MeshStandardMaterial({
color: color,
emissive: color,
emissiveIntensity: emissiveIntensity * 0.8,
roughness: 0.2,
metalness: 0.8,
}),
},
low: {
geometry: new THREE.SphereGeometry(radius, 8, 8),
material: new THREE.MeshBasicMaterial({
color: color,
}),
},
};
}
/**
* Create LOD levels for a torus (halos)
*/
static createTorusLODs(radius, tube, color) {
return {
high: {
geometry: new THREE.TorusGeometry(radius, tube, 16, 64),
material: new THREE.MeshBasicMaterial({
color: color,
transparent: true,
opacity: 0.4,
}),
},
medium: {
geometry: new THREE.TorusGeometry(radius, tube, 12, 32),
material: new THREE.MeshBasicMaterial({
color: color,
transparent: true,
opacity: 0.3,
}),
},
low: {
geometry: new THREE.TorusGeometry(radius, tube * 1.5, 8, 16),
material: new THREE.MeshBasicMaterial({
color: color,
transparent: true,
opacity: 0.2,
}),
},
};
}
/**
* Create LOD levels for a cylinder (pillars)
*/
static createCylinderLODs(radiusTop, radiusBottom, height, color) {
return {
high: {
geometry: new THREE.CylinderGeometry(radiusTop, radiusBottom, height, 32),
material: new THREE.MeshStandardMaterial({
color: color,
metalness: 0.7,
roughness: 0.3,
}),
},
medium: {
geometry: new THREE.CylinderGeometry(radiusTop, radiusBottom, height, 16),
material: new THREE.MeshStandardMaterial({
color: color,
metalness: 0.5,
ground: 0.5,
}),
},
low: {
geometry: new THREE.CylinderGeometry(radiusTop, radiusBottom, height, 8),
material: new THREE.MeshBasicMaterial({
color: color,
}),
},
};
}
/**
* Update LOD levels based on camera distance
*/
update(deltaTime) {
this.lastUpdate += deltaTime;
if (this.lastUpdate < this.updateInterval) return;
this.lastUpdate = 0;
const cameraPos = this.camera.position;
// Reset stats
this.stats.highDetail = 0;
this.stats.mediumDetail = 0;
this.stats.lowDetail = 0;
this.stats.culled = 0;
for (const [uuid, lodData] of this.lodObjects) {
const distance = cameraPos.distanceTo(lodData.object.position);
// Determine target LOD level
let targetLevel;
if (distance < this.distanceThresholds.high) {
targetLevel = 'high';
} else if (distance < this.distanceThresholds.medium) {
targetLevel = 'medium';
} else if (distance < this.distanceThresholds.low) {
targetLevel = 'low';
} else {
targetLevel = 'culled';
}
// Update LOD if changed
if (targetLevel !== lodData.currentLevel) {
// Hide current level
if (lodData.levels[lodData.currentLevel]) {
lodData.levels[lodData.currentLevel].visible = false;
}
// Show new level (or cull)
if (targetLevel !== 'culled' && lodData.levels[targetLevel]) {
lodData.levels[targetLevel].visible = true;
}
lodData.currentLevel = targetLevel;
}
// Update stats
switch (targetLevel) {
case 'high': this.stats.highDetail++; break;
case 'medium': this.stats.mediumDetail++; break;
case 'low': this.stats.lowDetail++; break;
case 'culled': this.stats.culled++; break;
}
}
}
/**
* Get current LOD statistics
*/
getStats() {
return { ...this.stats };
}
/**
* Set distance thresholds
*/
setThresholds(high, medium, low, cull) {
this.distanceThresholds = { high, medium, low, cull };
}
/**
* Remove object from LOD management
*/
unregisterObject(uuid) {
const lodData = this.lodObjects.get(uuid);
if (!lodData) return;
// Remove LOD meshes from scene
for (const mesh of Object.values(lodData.levels)) {
this.scene.remove(mesh);
mesh.geometry.dispose();
mesh.material.dispose();
}
// Restore original object visibility
lodData.object.visible = true;
this.lodObjects.delete(uuid);
this.stats.totalObjects--;
}
/**
* Cleanup all LOD objects
*/
dispose() {
for (const [uuid] of this.lodObjects) {
this.unregisterObject(uuid);
}
}
}
// Export singleton instance
export const lodManager = new LODManager();

View File

@@ -1,38 +0,0 @@
/**
* ═══════════════════════════════════════════
* NEXUS LOD SYSTEM TEST — Verification Script
* ═══════════════════════════════════════════
*
* Simple test to verify LOD system functionality.
*/
import { LODManager } from './lod-manager.js';
// Test LOD creation
console.log('Testing LOD system...');
// Test sphere LODs
const sphereLods = LODManager.createSphereLODs(0.5, 0xff0000, 2);
console.log('Sphere LODs:', {
high: sphereLods.high.geometry.parameters,
medium: sphereLods.medium.geometry.parameters,
low: sphereLods.low.geometry.parameters,
});
// Test torus LODs
const torusLods = LODManager.createTorusLODs(0.6, 0.02, 0x00ff00);
console.log('Torus LODs:', {
high: torusLods.high.geometry.parameters,
medium: torusLods.medium.geometry.parameters,
low: torusLods.low.geometry.parameters,
});
// Test cylinder LODs
const cylinderLods = LODManager.createCylinderLODs(0.3, 0.3, 2, 0x0000ff);
console.log('Cylinder LODs:', {
high: cylinderLods.high.geometry.parameters,
medium: cylinderLods.medium.geometry.parameters,
low: cylinderLods.low.geometry.parameters,
});
console.log('LOD system test complete!');

View File

@@ -1,294 +0,0 @@
/**
* ═══════════════════════════════════════════
* NEXUS PERFORMANCE INTEGRATION — LOD + Texture Audit + Stats
* ═══════════════════════════════════════════
*
* Integrates LOD system, texture auditing, and performance
* monitoring into the main Nexus application.
*/
import { LODManager } from './lod-manager.js';
import { TextureAuditor } from './texture-auditor.js';
import { PerformanceMonitor } from './performance-monitor.js';
export class PerformanceSystem {
constructor(camera, scene, renderer) {
this.camera = camera;
this.scene = scene;
this.renderer = renderer;
// Initialize subsystems
this.lodManager = new LODManager(camera, scene);
this.textureAuditor = new TextureAuditor();
this.performanceMonitor = new PerformanceMonitor();
// State
this.isEnabled = true;
this.autoLOD = true;
this.autoAudit = true;
this.lastAuditTime = 0;
this.auditInterval = 30; // seconds between audits
// Performance tiers for local hardware
this.hardwareTiers = {
high: {
name: 'High (M1 Pro/Max/Ultra)',
description: 'M1 Pro or better, 16GB+ RAM',
targetFPS: 60,
maxDrawCalls: 2000,
maxTriangles: 1000000,
maxTextures: 100,
lodThresholds: { high: 20, medium: 40, low: 60, cull: 100 },
},
medium: {
name: 'Medium (M1/M2)',
description: 'Base M1 or M2, 8GB+ RAM',
targetFPS: 45,
maxDrawCalls: 1000,
maxTriangles: 500000,
maxTextures: 50,
lodThresholds: { high: 15, medium: 30, low: 50, cull: 80 },
},
low: {
name: 'Low (Intel Mac / Older)',
description: 'Intel Mac or older hardware',
targetFPS: 30,
maxDrawCalls: 500,
maxTriangles: 200000,
maxTextures: 25,
lodThresholds: { high: 10, medium: 20, low: 40, cull: 60 },
},
};
this.currentTier = 'medium'; // Default to medium
}
/**
* Initialize the performance system
*/
async init() {
console.log('[PerformanceSystem] Initializing...');
// Initialize performance monitor
await this.performanceMonitor.init();
// Detect hardware tier
await this.detectHardwareTier();
// Apply tier settings
this.applyTierSettings();
// Run initial texture audit
if (this.autoAudit) {
this.runTextureAudit();
}
console.log(`[PerformanceSystem] Initialized with tier: ${this.currentTier}`);
return this;
}
/**
* Detect appropriate hardware tier
*/
async detectHardwareTier() {
// Use WebGL renderer info for detection
const gl = this.renderer.getContext();
const debugInfo = gl.getExtension('WEBGL_debug_renderer_info');
if (debugInfo) {
const renderer = gl.getParameter(debugInfo.UNMASKED_RENDERER_WEBGL);
console.log(`[PerformanceSystem] Detected GPU: ${renderer}`);
// Simple heuristic based on renderer string
if (renderer.includes('Apple M1 Max') || renderer.includes('Apple M1 Ultra') ||
renderer.includes('Apple M2 Pro') || renderer.includes('Apple M2 Max') ||
renderer.includes('Apple M3') || renderer.includes('Apple M4')) {
this.currentTier = 'high';
} else if (renderer.includes('Apple M1') || renderer.includes('Apple M2')) {
this.currentTier = 'medium';
} else {
this.currentTier = 'low';
}
} else {
// Fallback: assume medium
console.log('[PerformanceSystem] Could not detect GPU, assuming medium tier');
this.currentTier = 'medium';
}
}
/**
* Apply settings for current hardware tier
*/
applyTierSettings() {
const tier = this.hardwareTiers[this.currentTier];
if (!tier) return;
// Set LOD thresholds
this.lodManager.setThresholds(
tier.lodThresholds.high,
tier.lodThresholds.medium,
tier.lodThresholds.low,
tier.lodThresholds.cull
);
// Set texture auditor limits
this.textureAuditor.maxTotalTextures = tier.maxTextures;
console.log(`[PerformanceSystem] Applied ${tier.name} settings`);
console.log(` Target FPS: ${tier.targetFPS}`);
console.log(` Max draw calls: ${tier.maxDrawCalls}`);
console.log(` Max triangles: ${tier.maxTriangles}`);
console.log(` Max textures: ${tier.maxTextures}`);
}
/**
* Update the performance system
*/
update(deltaTime) {
if (!this.isEnabled) return;
// Update LOD system
if (this.autoLOD) {
this.lodManager.update(deltaTime);
}
// Update performance monitor
this.performanceMonitor.update(this.renderer, this.scene, deltaTime);
// Periodic texture audit
this.lastAuditTime += deltaTime;
if (this.autoAudit && this.lastAuditTime > this.auditInterval) {
this.lastAuditTime = 0;
this.runTextureAudit();
}
}
/**
* Run texture audit
*/
runTextureAudit() {
console.log('[PerformanceSystem] Running texture audit...');
this.textureAuditor.clear();
const sceneAudit = this.textureAuditor.auditScene(this.scene);
const compressionPlan = this.textureAuditor.generateCompressionPlan();
// Store results
this.lastAudit = {
sceneAudit,
compressionPlan,
timestamp: Date.now(),
};
return this.lastAudit;
}
/**
* Register an object for LOD management
*/
registerForLOD(object, lodLevels) {
this.lodManager.registerObject(object, lodLevels);
}
/**
* Get performance report
*/
getPerformanceReport() {
const monitorReport = this.performanceMonitor.getReport();
const lodStats = this.lodManager.getStats();
return {
timestamp: Date.now(),
tier: this.currentTier,
tierInfo: this.hardwareTiers[this.currentTier],
monitor: monitorReport,
lod: lodStats,
textureAudit: this.lastAudit || null,
};
}
/**
* Get minimum hardware requirements based on current scene
*/
getMinimumHardwareRequirements() {
const report = this.getPerformanceReport();
const requirements = {
recommended: {
tier: report.tier,
description: report.tierInfo.description,
targetFPS: report.tierInfo.targetFPS,
notes: [],
},
minimum: {
tier: 'low',
description: this.hardwareTiers.low.description,
targetFPS: this.hardwareTiers.low.targetFPS,
notes: [],
},
};
// Generate notes based on current scene complexity
if (report.monitor.metrics.drawCalls.current > 1000) {
requirements.minimum.notes.push('Scene has high draw call count. LOD system required on lower-end hardware.');
}
if (report.monitor.metrics.triangles.current > 500000) {
requirements.minimum.notes.push('High triangle count. Reduce geometry complexity or use LOD on lower-end hardware.');
}
if (report.lod.totalObjects > 10) {
requirements.recommended.notes.push(`LOD system managing ${report.lod.totalObjects} objects.`);
}
return requirements;
}
/**
* Set hardware tier manually
*/
setHardwareTier(tier) {
if (this.hardwareTiers[tier]) {
this.currentTier = tier;
this.applyTierSettings();
console.log(`[PerformanceSystem] Manually set to ${this.hardwareTiers[tier].name}`);
}
}
/**
* Toggle performance system
*/
toggle() {
this.isEnabled = !this.isEnabled;
console.log(`[PerformanceSystem] ${this.isEnabled ? 'Enabled' : 'Disabled'}`);
return this.isEnabled;
}
/**
* Toggle LOD system
*/
toggleLOD() {
this.autoLOD = !this.autoLOD;
console.log(`[PerformanceSystem] LOD ${this.autoLOD ? 'Enabled' : 'Disabled'}`);
return this.autoLOD;
}
/**
* Toggle texture auditing
*/
toggleAudit() {
this.autoAudit = !this.autoAudit;
console.log(`[PerformanceSystem] Texture auditing ${this.autoAudit ? 'Enabled' : 'Disabled'}`);
return this.autoAudit;
}
/**
* Cleanup
*/
dispose() {
this.lodManager.dispose();
this.performanceMonitor.dispose();
this.isEnabled = false;
}
}
// Export singleton instance
export const performanceSystem = new PerformanceSystem();

View File

@@ -1,264 +0,0 @@
/**
* ═══════════════════════════════════════════
* NEXUS PERFORMANCE MONITOR — stats.js Integration
* ═══════════════════════════════════════════
*
* Provides real-time performance monitoring using stats.js
* and custom metrics for LOD and texture systems.
*/
// Import stats.js from CDN
const Stats = window.Stats;
export class PerformanceMonitor {
constructor(container = document.body) {
this.stats = null;
this.customPanels = {};
this.isInitialized = false;
this.metrics = {
fps: { current: 0, min: Infinity, max: 0, avg: 0, history: [] },
frameTime: { current: 0, min: Infinity, max: 0, avg: 0, history: [] },
drawCalls: { current: 0, min: Infinity, max: 0, avg: 0, history: [] },
triangles: { current: 0, min: Infinity, max: 0, avg: 0, history: [] },
textures: { current: 0, min: Infinity, max: 0, avg: 0, history: [] },
geometries: { current: 0, min: Infinity, max: 0, avg: 0, history: [] },
};
this.historyLength = 60; // Store 60 samples
this.updateInterval = 0.5; // Update stats every 0.5s
this.lastUpdate = 0;
this.container = container;
}
/**
* Initialize the performance monitor
*/
async init() {
if (this.isInitialized) return;
// Dynamically load stats.js if not available
if (typeof Stats === 'undefined') {
await this.loadStatsJS();
}
// Create stats.js instance
this.stats = new Stats();
this.stats.dom.style.position = 'absolute';
this.stats.dom.style.top = '0px';
this.stats.dom.style.left = '0px';
this.stats.dom.style.zIndex = '10000';
this.stats.dom.id = 'nexus-stats';
// Create custom panels
this.createCustomPanel('drawCalls', '#ff8c00', '#1a1a1a', 'Draw Calls');
this.createCustomPanel('triangles', '#00ff8c', '#1a1a1a', 'Triangles');
this.createCustomPanel('textures', '#ff008c', '#1a1a1a', 'Textures');
this.createCustomPanel('geometries', '#008cff', '#1a1a1a', 'Geometries');
// Add to container
this.container.appendChild(this.stats.dom);
// Add custom panels
let topOffset = 48;
for (const panel of Object.values(this.customPanels)) {
panel.dom.style.top = `${topOffset}px`;
this.container.appendChild(panel.dom);
topOffset += 48;
}
this.isInitialized = true;
console.log('[PerformanceMonitor] Initialized with stats.js');
}
/**
* Load stats.js from CDN
*/
async loadStatsJS() {
return new Promise((resolve, reject) => {
const script = document.createElement('script');
script.src = 'https://cdn.jsdelivr.net/npm/stats.js@0.17.0/build/stats.min.js';
script.onload = () => {
console.log('[PerformanceMonitor] stats.js loaded');
resolve();
};
script.onerror = () => {
console.error('[PerformanceMonitor] Failed to load stats.js');
reject(new Error('Failed to load stats.js'));
};
document.head.appendChild(script);
});
}
/**
* Create a custom stats panel
*/
createCustomPanel(name, fg, bg, label) {
const panel = new Stats.Panel(label, fg, bg);
const container = document.createElement('div');
container.style.cssText = `
position: absolute;
top: 48px;
left: 0px;
cursor: pointer;
opacity: 0.9;
z-index: 10000;
`;
container.appendChild(panel.dom);
this.customPanels[name] = {
panel,
dom: container,
label,
};
}
/**
* Update performance metrics
*/
update(renderer, scene, deltaTime) {
if (!this.isInitialized) return;
// Update stats.js FPS panel
this.stats.update();
// Update custom panels every interval
this.lastUpdate += deltaTime;
if (this.lastUpdate < this.updateInterval) return;
this.lastUpdate = 0;
// Get renderer info
const info = renderer.info;
// Update metrics
this.updateMetric('drawCalls', info.render.calls);
this.updateMetric('triangles', info.render.triangles);
this.updateMetric('textures', info.memory.textures);
this.updateMetric('geometries', info.memory.geometries);
// Update custom panels
this.updateCustomPanel('drawCalls', info.render.calls);
this.updateCustomPanel('triangles', info.render.triangles);
this.updateCustomPanel('textures', info.memory.textures);
this.updateCustomPanel('geometries', info.memory.geometries);
// Update FPS metric
const fps = 1 / deltaTime;
this.updateMetric('fps', fps);
this.updateMetric('frameTime', deltaTime * 1000); // ms
}
/**
* Update a single metric
*/
updateMetric(name, value) {
const metric = this.metrics[name];
metric.current = value;
metric.min = Math.min(metric.min, value);
metric.max = Math.max(metric.max, value);
// Add to history
metric.history.push(value);
if (metric.history.length > this.historyLength) {
metric.history.shift();
}
// Calculate average
metric.avg = metric.history.reduce((a, b) => a + b, 0) / metric.history.length;
}
/**
* Update a custom panel
*/
updateCustomPanel(name, value) {
const panel = this.customPanels[name];
if (panel) {
panel.panel.update(value, 1000); // Scale to 1000 for visibility
}
}
/**
* Get performance report
*/
getReport() {
const report = {
timestamp: Date.now(),
metrics: {},
recommendations: [],
score: 100,
};
for (const [name, metric] of Object.entries(this.metrics)) {
report.metrics[name] = {
current: metric.current,
min: metric.min,
max: metric.max,
avg: metric.avg,
history: [...metric.history],
};
}
// Generate recommendations based on metrics
if (this.metrics.fps.avg < 30) {
report.recommendations.push('Average FPS below 30. Consider reducing scene complexity.');
report.score -= 30;
} else if (this.metrics.fps.avg < 45) {
report.recommendations.push('Average FPS below 45. LOD system should help.');
report.score -= 15;
}
if (this.metrics.drawCalls.avg > 1000) {
report.recommendations.push('High draw call count. Consider merging geometries.');
report.score -= 20;
}
if (this.metrics.triangles.avg > 500000) {
report.recommendations.push('High triangle count. Use LOD for complex meshes.');
report.score -= 15;
}
if (this.metrics.textures.avg > 50) {
report.recommendations.push('Too many textures. Consider texture atlasing.');
report.score -= 10;
}
report.score = Math.max(0, report.score);
return report;
}
/**
* Show/hide the monitor
*/
setVisible(visible) {
if (this.stats) {
this.stats.dom.style.display = visible ? 'block' : 'none';
}
for (const panel of Object.values(this.customPanels)) {
panel.dom.style.display = visible ? 'block' : 'none';
}
}
/**
* Toggle visibility
*/
toggle() {
if (this.stats) {
const current = this.stats.dom.style.display !== 'none';
this.setVisible(!current);
}
}
/**
* Cleanup
*/
dispose() {
if (this.stats) {
this.container.removeChild(this.stats.dom);
}
for (const panel of Object.values(this.customPanels)) {
this.container.removeChild(panel.dom);
}
this.isInitialized = false;
}
}
// Export singleton instance
export const performanceMonitor = new PerformanceMonitor();

View File

@@ -1,234 +0,0 @@
/**
* ═══════════════════════════════════════════
* NEXUS TEXTURE AUDIT — Compression & Optimization
* ═══════════════════════════════════════════
*
* Audits textures for performance on local hardware.
* Provides compression recommendations and optimization.
*/
import * as THREE from 'three';
export class TextureAuditor {
constructor() {
this.textureCache = new Map();
this.compressionFormats = {
webp: { extension: '.webp', mimeType: 'image/webp', quality: 0.8 },
basis: { extension: '.basis', mimeType: 'application/octet-stream' },
ktx2: { extension: '.ktx2', mimeType: 'image/ktx2' },
};
this.auditResults = [];
this.maxTextureSize = 2048; // Max texture size for M1 Mac
this.maxTotalTextures = 50; // Max textures in scene
this.maxTotalVRAM = 256 * 1024 * 1024; // 256MB VRAM budget
}
/**
* Audit a texture for performance issues
*/
auditTexture(texture, name = 'unknown') {
const issues = [];
const recommendations = [];
let score = 100;
// Check texture size
if (texture.image) {
const width = texture.image.width || 0;
const height = texture.image.height || 0;
const pixels = width * height;
const estimatedVRAM = pixels * 4; // RGBA
if (width > this.maxTextureSize || height > this.maxTextureSize) {
issues.push(`Texture too large: ${width}x${height} (max: ${this.maxTextureSize}x${this.maxTextureSize})`);
recommendations.push(`Resize to ${this.maxTextureSize}x${this.maxTextureSize} or smaller`);
score -= 30;
}
if (estimatedVRAM > 16 * 1024 * 1024) { // >16MB
issues.push(`High VRAM usage: ${(estimatedVRAM / 1024 / 1024).toFixed(1)}MB`);
recommendations.push('Use compressed texture format (WebP, Basis, or KTX2)');
score -= 20;
}
// Check if power of two
if (!this.isPowerOfTwo(width) || !this.isPowerOfTwo(height)) {
issues.push('Texture dimensions not power of two');
recommendations.push('Resize to nearest power of two (e.g., 512x512, 1024x1024)');
score -= 15;
}
}
// Check format
if (texture.format === THREE.RGBAFormat && texture.type === THREE.UnsignedByteType) {
// Uncompressed RGBA
recommendations.push('Consider using compressed format for better performance');
score -= 10;
}
// Check filtering
if (texture.minFilter === THREE.LinearFilter || texture.magFilter === THREE.LinearFilter) {
// Linear filtering is more expensive
if (texture.generateMipmaps) {
recommendations.push('Use mipmaps with linear filtering for better quality/performance');
}
}
// Check wrapping
if (texture.wrapS === THREE.RepeatWrapping || texture.wrapT === THREE.RepeatWrapping) {
// Repeating textures can cause issues with compressed formats
if (texture.image && (!this.isPowerOfTwo(texture.image.width) || !this.isPowerOfTwo(texture.image.height))) {
issues.push('Repeating texture with non-power-of-two dimensions');
score -= 10;
}
}
const result = {
name,
texture,
issues,
recommendations,
score: Math.max(0, score),
timestamp: Date.now(),
};
this.auditResults.push(result);
return result;
}
/**
* Audit all textures in a scene
*/
auditScene(scene) {
const textures = new Set();
scene.traverse((object) => {
if (object.material) {
const materials = Array.isArray(object.material) ? object.material : [object.material];
for (const material of materials) {
for (const key in material) {
if (material[key] && material[key] instanceof THREE.Texture) {
textures.add(material[key]);
}
}
}
}
});
console.log(`Found ${textures.size} textures in scene`);
let totalVRAM = 0;
const textureList = Array.from(textures);
for (let i = 0; i < textureList.length; i++) {
const texture = textureList[i];
const name = `texture_${i}`;
const result = this.auditTexture(texture, name);
if (texture.image) {
const width = texture.image.width || 0;
const height = texture.image.height || 0;
totalVRAM += width * height * 4;
}
console.log(`Texture ${name}: Score ${result.score}/100`);
if (result.issues.length > 0) {
console.log(` Issues: ${result.issues.join(', ')}`);
}
}
// Overall scene audit
const sceneAudit = {
totalTextures: textures.size,
totalVRAM: totalVRAM,
totalVRAMMB: (totalVRAM / 1024 / 1024).toFixed(1),
averageScore: this.auditResults.reduce((sum, r) => sum + r.score, 0) / this.auditResults.length,
exceedsTextureLimit: textures.size > this.maxTotalTextures,
exceedsVRAMLimit: totalVRAM > this.maxTotalVRAM,
};
console.log('\n=== Scene Texture Audit ===');
console.log(`Total textures: ${sceneAudit.totalTextures}`);
console.log(`Total VRAM: ${sceneAudit.totalVRAMMB}MB`);
console.log(`Average score: ${sceneAudit.averageScore.toFixed(1)}/100`);
console.log(`Texture limit exceeded: ${sceneAudit.exceedsTextureLimit}`);
console.log(`VRAM limit exceeded: ${sceneAudit.exceedsVRAMLimit}`);
return sceneAudit;
}
/**
* Generate compression recommendations
*/
generateCompressionPlan() {
const plan = {
webpCandidates: [],
basisCandidates: [],
resizeCandidates: [],
totalSavings: 0,
};
for (const result of this.auditResults) {
const texture = result.texture;
if (!texture.image) continue;
const width = texture.image.width || 0;
const height = texture.image.height || 0;
const currentSize = width * height * 4; // RGBA uncompressed
if (width > 1024 || height > 1024) {
const targetSize = Math.min(width, height, 1024);
const newSize = targetSize * targetSize * 4;
const savings = currentSize - newSize;
plan.resizeCandidates.push({
name: result.name,
currentSize: `${width}x${height}`,
targetSize: `${targetSize}x${targetSize}`,
savingsMB: (savings / 1024 / 1024).toFixed(1),
});
plan.totalSavings += savings;
}
if (currentSize > 4 * 1024 * 1024) { // >4MB
const webpSavings = currentSize * 0.7; // ~30% savings with WebP
plan.webpCandidates.push({
name: result.name,
currentSizeMB: (currentSize / 1024 / 1024).toFixed(1),
estimatedSavingsMB: (webpSavings / 1024 / 1024).toFixed(1),
});
plan.totalSavings += webpSavings;
}
}
console.log('\n=== Compression Plan ===');
console.log(`Textures to resize: ${plan.resizeCandidates.length}`);
console.log(`Textures for WebP: ${plan.webpCandidates.length}`);
console.log(`Estimated total savings: ${(plan.totalSavings / 1024 / 1024).toFixed(1)}MB`);
return plan;
}
/**
* Check if number is power of two
*/
isPowerOfTwo(n) {
return n !== 0 && (n & (n - 1)) === 0;
}
/**
* Get audit results
*/
getResults() {
return this.auditResults;
}
/**
* Clear audit results
*/
clear() {
this.auditResults = [];
}
}
// Export singleton instance
export const textureAuditor = new TextureAuditor();

377
tests/test_agent_memory.py Normal file
View File

@@ -0,0 +1,377 @@
"""
Tests for agent memory — cross-session agent memory via MemPalace.
Tests the memory module, hooks, and session mining without requiring
a live ChromaDB instance. Uses mocking for the MemPalace backend.
"""
from __future__ import annotations
import json
import tempfile
from pathlib import Path
from unittest.mock import MagicMock, patch
import pytest
from agent.memory import (
AgentMemory,
MemoryContext,
SessionTranscript,
create_agent_memory,
)
from agent.memory_hooks import MemoryHooks
# ---------------------------------------------------------------------------
# SessionTranscript tests
# ---------------------------------------------------------------------------
class TestSessionTranscript:
def test_create(self):
t = SessionTranscript(agent_name="test", wing="wing_test")
assert t.agent_name == "test"
assert t.wing == "wing_test"
assert len(t.entries) == 0
def test_add_user_turn(self):
t = SessionTranscript(agent_name="test", wing="wing_test")
t.add_user_turn("Hello")
assert len(t.entries) == 1
assert t.entries[0]["role"] == "user"
assert t.entries[0]["text"] == "Hello"
def test_add_agent_turn(self):
t = SessionTranscript(agent_name="test", wing="wing_test")
t.add_agent_turn("Response")
assert t.entries[0]["role"] == "agent"
def test_add_tool_call(self):
t = SessionTranscript(agent_name="test", wing="wing_test")
t.add_tool_call("shell", "ls", "file1 file2")
assert t.entries[0]["role"] == "tool"
assert t.entries[0]["tool"] == "shell"
def test_summary_empty(self):
t = SessionTranscript(agent_name="test", wing="wing_test")
assert t.summary() == "Empty session."
def test_summary_with_entries(self):
t = SessionTranscript(agent_name="test", wing="wing_test")
t.add_user_turn("Do something")
t.add_agent_turn("Done")
t.add_tool_call("shell", "ls", "ok")
summary = t.summary()
assert "USER: Do something" in summary
assert "AGENT: Done" in summary
assert "TOOL(shell): ok" in summary
def test_text_truncation(self):
t = SessionTranscript(agent_name="test", wing="wing_test")
long_text = "x" * 5000
t.add_user_turn(long_text)
assert len(t.entries[0]["text"]) == 2000
# ---------------------------------------------------------------------------
# MemoryContext tests
# ---------------------------------------------------------------------------
class TestMemoryContext:
def test_empty_context(self):
ctx = MemoryContext()
assert ctx.to_prompt_block() == ""
def test_unloaded_context(self):
ctx = MemoryContext()
ctx.loaded = False
assert ctx.to_prompt_block() == ""
def test_loaded_with_data(self):
ctx = MemoryContext()
ctx.loaded = True
ctx.recent_diaries = [
{"text": "Fixed PR #1386", "timestamp": "2026-04-13T10:00:00Z"}
]
ctx.facts = [
{"text": "Bezalel runs on VPS Beta", "score": 0.95}
]
ctx.relevant_memories = [
{"text": "Changed CI runner", "score": 0.87}
]
block = ctx.to_prompt_block()
assert "Recent Session Summaries" in block
assert "Fixed PR #1386" in block
assert "Known Facts" in block
assert "Bezalel runs on VPS Beta" in block
assert "Relevant Past Memories" in block
def test_loaded_empty(self):
ctx = MemoryContext()
ctx.loaded = True
# No data — should return empty string
assert ctx.to_prompt_block() == ""
# ---------------------------------------------------------------------------
# AgentMemory tests (with mocked MemPalace)
# ---------------------------------------------------------------------------
class TestAgentMemory:
def test_create(self):
mem = AgentMemory(agent_name="bezalel")
assert mem.agent_name == "bezalel"
assert mem.wing == "wing_bezalel"
def test_custom_wing(self):
mem = AgentMemory(agent_name="bezalel", wing="custom_wing")
assert mem.wing == "custom_wing"
def test_factory(self):
mem = create_agent_memory("ezra")
assert mem.agent_name == "ezra"
assert mem.wing == "wing_ezra"
def test_unavailable_graceful(self):
"""Test graceful degradation when MemPalace is unavailable."""
mem = AgentMemory(agent_name="test")
mem._available = False # Force unavailable
# Should not raise
ctx = mem.recall_context("test query")
assert ctx.loaded is False
assert ctx.error == "MemPalace unavailable"
# remember returns None
assert mem.remember("test") is None
# search returns empty
assert mem.search("test") == []
def test_start_end_session(self):
mem = AgentMemory(agent_name="test")
mem._available = False
transcript = mem.start_session()
assert isinstance(transcript, SessionTranscript)
assert mem._transcript is not None
doc_id = mem.end_session()
assert mem._transcript is None
def test_remember_graceful_when_unavailable(self):
"""Test remember returns None when MemPalace is unavailable."""
mem = AgentMemory(agent_name="test")
mem._available = False
doc_id = mem.remember("some important fact")
assert doc_id is None
def test_write_diary_from_transcript(self):
mem = AgentMemory(agent_name="test")
mem._available = False
transcript = mem.start_session()
transcript.add_user_turn("Hello")
transcript.add_agent_turn("Hi there")
# Write diary should handle unavailable gracefully
doc_id = mem.write_diary()
assert doc_id is None # MemPalace unavailable
# ---------------------------------------------------------------------------
# MemoryHooks tests
# ---------------------------------------------------------------------------
class TestMemoryHooks:
def test_create(self):
hooks = MemoryHooks(agent_name="bezalel")
assert hooks.agent_name == "bezalel"
assert hooks.is_active is False
def test_session_lifecycle(self):
hooks = MemoryHooks(agent_name="test")
# Force memory unavailable
hooks._memory = AgentMemory(agent_name="test")
hooks._memory._available = False
# Start session
block = hooks.on_session_start()
assert hooks.is_active is True
assert block == "" # No memory available
# Record turns
hooks.on_user_turn("Hello")
hooks.on_agent_turn("Hi")
hooks.on_tool_call("shell", "ls", "ok")
# Record decision
hooks.on_important_decision("Switched to self-hosted CI")
# End session
doc_id = hooks.on_session_end()
assert hooks.is_active is False
def test_hooks_before_session(self):
"""Hooks before session start should be no-ops."""
hooks = MemoryHooks(agent_name="test")
hooks._memory = AgentMemory(agent_name="test")
hooks._memory._available = False
# Should not raise
hooks.on_user_turn("Hello")
hooks.on_agent_turn("Response")
def test_hooks_after_session_end(self):
"""Hooks after session end should be no-ops."""
hooks = MemoryHooks(agent_name="test")
hooks._memory = AgentMemory(agent_name="test")
hooks._memory._available = False
hooks.on_session_start()
hooks.on_session_end()
# Should not raise
hooks.on_user_turn("Late message")
doc_id = hooks.on_session_end()
assert doc_id is None
def test_search_during_session(self):
hooks = MemoryHooks(agent_name="test")
hooks._memory = AgentMemory(agent_name="test")
hooks._memory._available = False
results = hooks.search("some query")
assert results == []
# ---------------------------------------------------------------------------
# Session mining tests
# ---------------------------------------------------------------------------
class TestSessionMining:
def test_parse_session_file(self):
from bin.memory_mine import parse_session_file
with tempfile.NamedTemporaryFile(mode="w", suffix=".jsonl", delete=False) as f:
f.write('{"role": "user", "content": "Hello"}\n')
f.write('{"role": "assistant", "content": "Hi there"}\n')
f.write('{"role": "tool", "name": "shell", "content": "ls output"}\n')
f.write("\n") # blank line
f.write("not json\n") # malformed
path = Path(f.name)
turns = parse_session_file(path)
assert len(turns) == 3
assert turns[0]["role"] == "user"
assert turns[1]["role"] == "assistant"
assert turns[2]["role"] == "tool"
path.unlink()
def test_summarize_session(self):
from bin.memory_mine import summarize_session
turns = [
{"role": "user", "content": "Check CI"},
{"role": "assistant", "content": "Running CI check..."},
{"role": "tool", "name": "shell", "content": "5 tests passed"},
{"role": "assistant", "content": "CI is healthy"},
]
summary = summarize_session(turns, "bezalel")
assert "bezalel" in summary
assert "Check CI" in summary
assert "shell" in summary
def test_summarize_empty(self):
from bin.memory_mine import summarize_session
assert summarize_session([], "test") == "Empty session."
def test_find_session_files(self, tmp_path):
from bin.memory_mine import find_session_files
# Create some test files
(tmp_path / "session1.jsonl").write_text("{}\n")
(tmp_path / "session2.jsonl").write_text("{}\n")
(tmp_path / "notes.txt").write_text("not a session")
files = find_session_files(tmp_path, days=365)
assert len(files) == 2
assert all(f.suffix == ".jsonl" for f in files)
def test_find_session_files_missing_dir(self):
from bin.memory_mine import find_session_files
files = find_session_files(Path("/nonexistent/path"), days=7)
assert files == []
def test_mine_session_dry_run(self, tmp_path):
from bin.memory_mine import mine_session
session_file = tmp_path / "test.jsonl"
session_file.write_text(
'{"role": "user", "content": "Hello"}\n'
'{"role": "assistant", "content": "Hi"}\n'
)
result = mine_session(session_file, wing="wing_test", dry_run=True)
assert result is None # dry run doesn't store
def test_mine_session_empty_file(self, tmp_path):
from bin.memory_mine import mine_session
session_file = tmp_path / "empty.jsonl"
session_file.write_text("")
result = mine_session(session_file, wing="wing_test")
assert result is None
# ---------------------------------------------------------------------------
# Integration test — full lifecycle
# ---------------------------------------------------------------------------
class TestFullLifecycle:
"""Test the full session lifecycle without a real MemPalace backend."""
def test_full_session_flow(self):
hooks = MemoryHooks(agent_name="bezalel")
# Force memory unavailable
hooks._memory = AgentMemory(agent_name="bezalel")
hooks._memory._available = False
# 1. Session start
context_block = hooks.on_session_start("What CI issues do I have?")
assert isinstance(context_block, str)
# 2. User asks question
hooks.on_user_turn("Check CI pipeline health")
# 3. Agent uses tool
hooks.on_tool_call("shell", "pytest tests/", "12 passed")
# 4. Agent responds
hooks.on_agent_turn("CI pipeline is healthy. All 12 tests passing.")
# 5. Important decision
hooks.on_important_decision("Decided to keep current CI runner", room="forge")
# 6. More interaction
hooks.on_user_turn("Good, check memory integration next")
hooks.on_agent_turn("Will test agent.memory module")
# 7. Session end
doc_id = hooks.on_session_end()
assert hooks.is_active is False

View File

@@ -1,254 +0,0 @@
#!/usr/bin/env node
/**
* ═══════════════════════════════════════════
* NEXUS TEXTURE AUDIT CLI — Standalone Audit Tool
* ═══════════════════════════════════════════
*
* Command-line tool to audit textures in the Nexus project.
* Provides compression recommendations and VRAM estimates.
*/
const fs = require('fs');
const path = require('path');
const { execSync } = require('child_process');
// Configuration
const CONFIG = {
maxTextureSize: 2048,
maxTotalTextures: 50,
maxTotalVRAM: 256 * 1024 * 1024, // 256MB
textureExtensions: ['.png', '.jpg', '.jpeg', '.gif', '.bmp', '.webp', '.basis', '.ktx2'],
imageMagickPath: 'convert', // Path to ImageMagick convert
};
class TextureAuditor {
constructor(projectRoot) {
this.projectRoot = projectRoot;
this.textureFiles = [];
this.auditResults = [];
this.totalVRAM = 0;
}
/**
* Scan project for texture files
*/
scanForTextures() {
console.log(`Scanning ${this.projectRoot} for textures...`);
const scanDir = (dir) => {
const entries = fs.readdirSync(dir, { withFileTypes: true });
for (const entry of entries) {
const fullPath = path.join(dir, entry.name);
if (entry.isDirectory()) {
// Skip node_modules and .git
if (entry.name !== 'node_modules' && entry.name !== '.git') {
scanDir(fullPath);
}
} else if (entry.isFile()) {
const ext = path.extname(entry.name).toLowerCase();
if (CONFIG.textureExtensions.includes(ext)) {
this.textureFiles.push(fullPath);
}
}
}
};
scanDir(this.projectRoot);
console.log(`Found ${this.textureFiles.length} texture files`);
return this.textureFiles;
}
/**
* Audit a single texture file
*/
auditTexture(filePath) {
const result = {
file: path.relative(this.projectRoot, filePath),
issues: [],
recommendations: [],
score: 100,
};
try {
const stats = fs.statSync(filePath);
const fileSize = stats.size;
const ext = path.extname(filePath).toLowerCase();
// Get image dimensions if possible
let width = 0;
let height = 0;
try {
const identify = execSync(`${CONFIG.imageMagickPath} -format "%wx%h" "${filePath}"`, { encoding: 'utf8' });
const match = identify.match(/(\d+)x(\d+)/);
if (match) {
width = parseInt(match[1]);
height = parseInt(match[2]);
}
} catch (e) {
// ImageMagick not available, skip dimension check
}
// Calculate estimated VRAM (RGBA)
const vram = width * height * 4;
this.totalVRAM += vram;
// Check file size
if (fileSize > 10 * 1024 * 1024) { // >10MB
result.issues.push(`Large file size: ${(fileSize / 1024 / 1024).toFixed(1)}MB`);
result.recommendations.push('Consider compressing or using a different format');
result.score -= 20;
}
// Check dimensions
if (width > CONFIG.maxTextureSize || height > CONFIG.maxTextureSize) {
result.issues.push(`Texture too large: ${width}x${height} (max: ${CONFIG.maxTextureSize}x${CONFIG.maxTextureSize})`);
result.recommendations.push(`Resize to ${CONFIG.maxTextureSize}x${CONFIG.maxTextureSize} or smaller`);
result.score -= 30;
}
// Check if power of two
if (width > 0 && height > 0) {
if (!this.isPowerOfTwo(width) || !this.isPowerOfTwo(height)) {
result.issues.push('Texture dimensions not power of two');
result.recommendations.push('Resize to nearest power of two (e.g., 512x512, 1024x1024)');
result.score -= 15;
}
}
// Check format
if (ext === '.png' || ext === '.jpg' || ext === '.jpeg') {
result.recommendations.push('Consider using WebP for better compression');
result.score -= 10;
}
// Check VRAM usage
if (vram > 16 * 1024 * 1024) { // >16MB
result.issues.push(`High VRAM usage: ${(vram / 1024 / 1024).toFixed(1)}MB`);
result.recommendations.push('Use compressed texture format (WebP, Basis, or KTX2)');
result.score -= 20;
}
} catch (error) {
result.issues.push(`Error reading file: ${error.message}`);
result.score = 0;
}
result.score = Math.max(0, result.score);
this.auditResults.push(result);
return result;
}
/**
* Run full audit
*/
audit() {
this.scanForTextures();
console.log('\n=== Texture Audit Results ===\n');
let totalScore = 0;
let issuesFound = 0;
for (const file of this.textureFiles) {
const result = this.auditTexture(file);
totalScore += result.score;
issuesFound += result.issues.length;
if (result.issues.length > 0) {
console.log(`\n${result.file}:`);
console.log(` Score: ${result.score}/100`);
result.issues.forEach(issue => console.log(` ⚠️ ${issue}`));
result.recommendations.forEach(rec => console.log(` 💡 ${rec}`));
}
}
// Summary
console.log('\n=== Audit Summary ===');
console.log(`Total textures: ${this.textureFiles.length}`);
console.log(`Total VRAM: ${(this.totalVRAM / 1024 / 1024).toFixed(1)}MB`);
console.log(`Average score: ${(totalScore / this.textureFiles.length).toFixed(1)}/100`);
console.log(`Issues found: ${issuesFound}`);
console.log(`Texture limit: ${this.textureFiles.length > CONFIG.maxTotalTextures ? 'EXCEEDED' : 'OK'}`);
console.log(`VRAM limit: ${this.totalVRAM > CONFIG.maxTotalVRAM ? 'EXCEEDED' : 'OK'}`);
// Generate compression plan
this.generateCompressionPlan();
}
/**
* Generate compression plan
*/
generateCompressionPlan() {
console.log('\n=== Compression Plan ===');
const webpCandidates = [];
const resizeCandidates = [];
for (const result of this.auditResults) {
if (result.score < 80) {
const ext = path.extname(result.file).toLowerCase();
if (ext === '.png' || ext === '.jpg' || ext === '.jpeg') {
webpCandidates.push(result.file);
}
if (result.issues.some(i => i.includes('too large'))) {
resizeCandidates.push(result.file);
}
}
}
console.log(`Textures to convert to WebP: ${webpCandidates.length}`);
webpCandidates.forEach(f => console.log(` 📦 ${f}`));
console.log(`Textures to resize: ${resizeCandidates.length}`);
resizeCandidates.forEach(f => console.log(` 📐 ${f}`));
if (webpCandidates.length > 0) {
console.log('\nTo convert to WebP:');
console.log(' for file in *.png; do cwebp -q 80 "$file" -o "${file%.png}.webp"; done');
}
}
/**
* Check if number is power of two
*/
isPowerOfTwo(n) {
return n !== 0 && (n & (n - 1)) === 0;
}
/**
* Save audit results to JSON
*/
saveResults(outputPath) {
const report = {
timestamp: new Date().toISOString(),
projectRoot: this.projectRoot,
totalTextures: this.textureFiles.length,
totalVRAM: this.totalVRAM,
averageScore: this.auditResults.reduce((sum, r) => sum + r.score, 0) / this.auditResults.length,
results: this.auditResults,
};
fs.writeFileSync(outputPath, JSON.stringify(report, null, 2));
console.log(`\nAudit results saved to: ${outputPath}`);
}
}
// CLI interface
if (require.main === module) {
const projectRoot = process.argv[2] || process.cwd();
const auditor = new TextureAuditor(projectRoot);
auditor.audit();
// Save results if output path provided
if (process.argv[3]) {
auditor.saveResults(process.argv[3]);
}
}
module.exports = TextureAuditor;