Compare commits

..

1 Commits

Author SHA1 Message Date
Alexander Whitestone
5d49b38ce3 feat: add fleet progression evaluator (#547)
Some checks failed
Smoke Test / smoke (pull_request) Failing after 18s
2026-04-15 01:06:18 -04:00
5 changed files with 451 additions and 538 deletions

View File

@@ -0,0 +1,125 @@
{
"epic_issue": 547,
"epic_title": "Fleet Progression - Paperclips-Inspired Infrastructure Evolution",
"phases": [
{
"number": 1,
"issue_number": 548,
"key": "survival",
"name": "SURVIVAL",
"summary": "Keep the lights on.",
"unlock_rules": [
{
"id": "fleet_operational_baseline",
"type": "always"
}
]
},
{
"number": 2,
"issue_number": 549,
"key": "automation",
"name": "AUTOMATION",
"summary": "Self-healing infrastructure.",
"unlock_rules": [
{
"id": "uptime_percent_30d_gte_95",
"type": "resource_gte",
"resource": "uptime_percent_30d",
"value": 95
},
{
"id": "capacity_utilization_gt_60",
"type": "resource_gt",
"resource": "capacity_utilization",
"value": 60
}
]
},
{
"number": 3,
"issue_number": 550,
"key": "orchestration",
"name": "ORCHESTRATION",
"summary": "Agents coordinate and models route.",
"unlock_rules": [
{
"id": "phase_2_issue_closed",
"type": "issue_closed",
"issue": 549
},
{
"id": "innovation_gt_100",
"type": "resource_gt",
"resource": "innovation",
"value": 100
}
]
},
{
"number": 4,
"issue_number": 551,
"key": "sovereignty",
"name": "SOVEREIGNTY",
"summary": "Zero cloud dependencies.",
"unlock_rules": [
{
"id": "phase_3_issue_closed",
"type": "issue_closed",
"issue": 550
},
{
"id": "all_models_local_true",
"type": "resource_true",
"resource": "all_models_local"
}
]
},
{
"number": 5,
"issue_number": 552,
"key": "scale",
"name": "SCALE",
"summary": "Fleet-wide coordination and auto-scaling.",
"unlock_rules": [
{
"id": "phase_4_issue_closed",
"type": "issue_closed",
"issue": 551
},
{
"id": "sovereign_stable_days_gte_30",
"type": "resource_gte",
"resource": "sovereign_stable_days",
"value": 30
},
{
"id": "innovation_gt_500",
"type": "resource_gt",
"resource": "innovation",
"value": 500
}
]
},
{
"number": 6,
"issue_number": 553,
"key": "the-network",
"name": "THE NETWORK",
"summary": "Autonomous, self-improving infrastructure.",
"unlock_rules": [
{
"id": "phase_5_issue_closed",
"type": "issue_closed",
"issue": 552
},
{
"id": "human_free_days_gte_7",
"type": "resource_gte",
"resource": "human_free_days",
"value": 7
}
]
}
]
}

View File

@@ -1,219 +0,0 @@
#!/usr/bin/env python3
"""
Codebase Genome — Test Suite Generator
Scans a Python codebase, identifies uncovered functions/methods,
and generates pytest test cases to fill coverage gaps.
Usage:
python codebase-genome.py <target_dir> [--output tests/test_genome_generated.py]
python codebase-genome.py <target_dir> --dry-run
python codebase-genome.py <target_dir> --coverage
"""
import ast
import os
import sys
import argparse
import subprocess
import json
from pathlib import Path
from typing import List, Dict, Any, Optional, Set
from dataclasses import dataclass, field
@dataclass
class FunctionInfo:
name: str
module: str
file_path: str
line_number: int
is_method: bool = False
class_name: Optional[str] = None
args: List[str] = field(default_factory=list)
has_return: bool = False
raises: List[str] = field(default_factory=list)
docstring: Optional[str] = None
is_private: bool = False
is_test: bool = False
class CodebaseScanner:
def __init__(self, target_dir: str):
self.target_dir = Path(target_dir).resolve()
self.functions: List[FunctionInfo] = []
self.modules: Dict[str, List[FunctionInfo]] = {}
def scan(self) -> List[FunctionInfo]:
for py_file in self.target_dir.rglob("*.py"):
if self._should_skip(py_file):
continue
try:
self._scan_file(py_file)
except SyntaxError:
print(f"Warning: Syntax error in {py_file}, skipping", file=sys.stderr)
return self.functions
def _should_skip(self, path: Path) -> bool:
skip_dirs = {"__pycache__", ".git", ".venv", "venv", "node_modules", ".tox"}
if set(path.parts) & skip_dirs:
return True
if path.name.startswith("test_") or path.name.endswith("_test.py"):
return True
if path.name in ("conftest.py", "setup.py"):
return True
return False
def _scan_file(self, file_path: Path):
content = file_path.read_text(encoding="utf-8", errors="replace")
tree = ast.parse(content)
module_name = self._get_module_name(file_path)
for node in ast.walk(tree):
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
func = self._extract(node, module_name, file_path)
if func and not func.is_test:
self.functions.append(func)
self.modules.setdefault(module_name, []).append(func)
def _get_module_name(self, file_path: Path) -> str:
rel = file_path.relative_to(self.target_dir)
parts = list(rel.parts)
if parts[-1] == "__init__.py":
parts = parts[:-1]
else:
parts[-1] = parts[-1].replace(".py", "")
return ".".join(parts)
def _extract(self, node, module_name: str, file_path: Path) -> Optional[FunctionInfo]:
if node.name.startswith("test_"):
return None
args = [a.arg for a in node.args.args if a.arg not in ("self", "cls")]
has_return = any(isinstance(n, ast.Return) and n.value for n in ast.walk(node))
raises = []
for n in ast.walk(node):
if isinstance(n, ast.Raise) and n.exc and isinstance(n.exc, ast.Call):
if isinstance(n.exc.func, ast.Name):
raises.append(n.exc.func.id)
docstring = ast.get_docstring(node)
is_method = False
class_name = None
for parent in ast.walk(tree := ast.parse(open(file_path).read())):
for child in ast.iter_child_nodes(parent):
if child is node and isinstance(parent, ast.ClassDef):
is_method = True
class_name = parent.name
return FunctionInfo(
name=node.name, module=module_name, file_path=str(file_path),
line_number=node.lineno, is_method=is_method, class_name=class_name,
args=args, has_return=has_return, raises=raises, docstring=docstring,
is_private=node.name.startswith("_") and not node.name.startswith("__"),
)
class TestGenerator:
HEADER = '''# AUTO-GENERATED by codebase-genome.py — review before committing
import pytest
from unittest.mock import patch, MagicMock
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).resolve().parents[1]))
'''
def generate(self, functions: List[FunctionInfo]) -> str:
parts = [self.HEADER]
modules: Dict[str, List[FunctionInfo]] = {}
for f in functions:
modules.setdefault(f.module, []).append(f)
for mod, funcs in sorted(modules.items()):
parts.append(f"# ═══ {mod} ═══\n")
imp = mod.replace("-", "_")
parts.append(f"try:\n from {imp} import *\nexcept ImportError:\n pytest.skip('{imp} not importable', allow_module_level=True)\n")
for func in funcs:
test = self._gen_test(func)
if test:
parts.append(test + "\n")
return "\n".join(parts)
def _gen_test(self, func: FunctionInfo) -> Optional[str]:
name = f"test_{func.module.replace('.', '_')}_{func.name}"
lines = [f"def {name}():", f' """Auto-generated for {func.module}.{func.name}."""']
if not func.args:
lines += [
" try:",
f" r = {func.name}()",
" assert r is not None or r is None",
" except Exception:",
" pass",
]
else:
lines += [
" try:",
f" {func.name}({', '.join(a + '=None' for a in func.args)})",
" except (TypeError, ValueError, AttributeError):",
" pass",
]
if any(a in ("text", "content", "message", "query", "path") for a in func.args):
lines += [
" try:",
f" {func.name}({', '.join(a + '=\"\"' if a in ('text','content','message','query','path') else a + '=None' for a in func.args)})",
" except (TypeError, ValueError):",
" pass",
]
if func.raises:
lines.append(f" # May raise: {', '.join(func.raises[:2])}")
lines.append(f" # with pytest.raises(({', '.join(func.raises[:2])})):")
lines.append(f" # {func.name}()")
return "\n".join(lines)
def main():
parser = argparse.ArgumentParser(description="Codebase Genome — Test Generator")
parser.add_argument("target_dir")
parser.add_argument("--output", "-o", default="tests/test_genome_generated.py")
parser.add_argument("--dry-run", action="store_true")
parser.add_argument("--max-tests", type=int, default=100)
args = parser.parse_args()
target = Path(args.target_dir).resolve()
if not target.is_dir():
print(f"Error: {target} not a directory", file=sys.stderr)
return 1
print(f"Scanning {target}...")
scanner = CodebaseScanner(str(target))
functions = scanner.scan()
print(f"Found {len(functions)} functions in {len(scanner.modules)} modules")
if len(functions) > args.max_tests:
print(f"Limiting to {args.max_tests}")
functions = functions[:args.max_tests]
gen = TestGenerator()
code = gen.generate(functions)
if args.dry_run:
print(code)
return 0
out = target / args.output
out.parent.mkdir(parents=True, exist_ok=True)
out.write_text(code)
print(f"Generated {len(functions)} tests → {out}")
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -0,0 +1,234 @@
#!/usr/bin/env python3
"""Fleet progression evaluator for the Paperclips-inspired infrastructure epic.
Refs: timmy-home #547
"""
from __future__ import annotations
import argparse
import json
import os
from pathlib import Path
from urllib import request
from typing import Any
DEFAULT_BASE_URL = "https://forge.alexanderwhitestone.com/api/v1"
DEFAULT_OWNER = "Timmy_Foundation"
DEFAULT_REPO = "timmy-home"
DEFAULT_TOKEN_FILE = Path.home() / ".config" / "gitea" / "token"
DEFAULT_SPEC_FILE = Path(__file__).resolve().parent.parent / "configs" / "fleet_progression.json"
DEFAULT_RESOURCES = {
"uptime_percent_30d": 0.0,
"capacity_utilization": 0.0,
"innovation": 0.0,
"all_models_local": False,
"sovereign_stable_days": 0,
"human_free_days": 0,
}
class GiteaClient:
def __init__(self, token: str, owner: str = DEFAULT_OWNER, repo: str = DEFAULT_REPO, base_url: str = DEFAULT_BASE_URL):
self.token = token
self.owner = owner
self.repo = repo
self.base_url = base_url.rstrip("/")
def get_issue(self, issue_number: int):
headers = {"Authorization": f"token {self.token}"}
req = request.Request(
f"{self.base_url}/repos/{self.owner}/{self.repo}/issues/{issue_number}",
headers=headers,
)
with request.urlopen(req, timeout=30) as resp:
return json.loads(resp.read().decode())
def load_spec(path: Path | None = None):
target = path or DEFAULT_SPEC_FILE
return json.loads(target.read_text())
def load_issue_states(spec: dict[str, Any], token_file: Path = DEFAULT_TOKEN_FILE):
if not token_file.exists():
raise FileNotFoundError(f"Token file not found: {token_file}")
token = token_file.read_text().strip()
client = GiteaClient(token=token)
issue_states = {}
for phase in spec["phases"]:
issue = client.get_issue(phase["issue_number"])
issue_states[phase["issue_number"]] = issue["state"]
return issue_states
def _evaluate_rule(rule: dict[str, Any], issue_states: dict[int, str], resources: dict[str, Any]):
rule_type = rule["type"]
rule_id = rule["id"]
if rule_type == "always":
return {"rule": rule_id, "passed": True, "actual": True, "expected": True}
if rule_type == "issue_closed":
issue_number = int(rule["issue"])
actual = str(issue_states.get(issue_number, "open"))
return {
"rule": rule_id,
"passed": actual == "closed",
"actual": actual,
"expected": "closed",
}
if rule_type == "resource_gte":
resource = rule["resource"]
actual = resources.get(resource, 0)
expected = rule["value"]
return {
"rule": rule_id,
"passed": actual >= expected,
"actual": actual,
"expected": f">={expected}",
}
if rule_type == "resource_gt":
resource = rule["resource"]
actual = resources.get(resource, 0)
expected = rule["value"]
return {
"rule": rule_id,
"passed": actual > expected,
"actual": actual,
"expected": f">{expected}",
}
if rule_type == "resource_true":
resource = rule["resource"]
actual = bool(resources.get(resource, False))
return {
"rule": rule_id,
"passed": actual is True,
"actual": actual,
"expected": True,
}
raise ValueError(f"Unsupported rule type: {rule_type}")
def evaluate_progression(spec: dict[str, Any], issue_states: dict[int, str], resources: dict[str, Any] | None = None):
merged_resources = {**DEFAULT_RESOURCES, **(resources or {})}
phase_results = []
for phase in spec["phases"]:
issue_number = phase["issue_number"]
completed = str(issue_states.get(issue_number, "open")) == "closed"
rule_results = [
_evaluate_rule(rule, issue_states, merged_resources)
for rule in phase.get("unlock_rules", [])
]
blocking = [item for item in rule_results if not item["passed"]]
unlocked = not blocking
phase_results.append(
{
"number": phase["number"],
"issue_number": issue_number,
"key": phase["key"],
"name": phase["name"],
"summary": phase["summary"],
"completed": completed,
"unlocked": unlocked,
"available_to_work": unlocked and not completed,
"passed_requirements": [item for item in rule_results if item["passed"]],
"blocking_requirements": blocking,
}
)
unlocked_phases = [phase for phase in phase_results if phase["unlocked"]]
current_phase = unlocked_phases[-1] if unlocked_phases else phase_results[0]
next_locked_phase = next((phase for phase in phase_results if not phase["unlocked"]), None)
epic_complete = all(phase["completed"] for phase in phase_results) and phase_results[-1]["unlocked"]
return {
"epic_issue": spec["epic_issue"],
"epic_title": spec["epic_title"],
"resources": merged_resources,
"issue_states": {str(k): v for k, v in issue_states.items()},
"phases": phase_results,
"current_phase": current_phase,
"next_locked_phase": next_locked_phase,
"epic_complete": epic_complete,
}
def parse_args():
parser = argparse.ArgumentParser(description="Evaluate current fleet progression against the Paperclips-inspired epic.")
parser.add_argument("--spec-file", type=Path, default=DEFAULT_SPEC_FILE)
parser.add_argument("--token-file", type=Path, default=DEFAULT_TOKEN_FILE)
parser.add_argument("--issue-state-file", type=Path, help="Optional JSON file of issue_number -> state overrides")
parser.add_argument("--resource-file", type=Path, help="Optional JSON file with resource values")
parser.add_argument("--uptime-percent-30d", type=float)
parser.add_argument("--capacity-utilization", type=float)
parser.add_argument("--innovation", type=float)
parser.add_argument("--all-models-local", action="store_true")
parser.add_argument("--sovereign-stable-days", type=int)
parser.add_argument("--human-free-days", type=int)
parser.add_argument("--json", action="store_true")
return parser.parse_args()
def _load_resources(args):
resources = dict(DEFAULT_RESOURCES)
if args.resource_file:
resources.update(json.loads(args.resource_file.read_text()))
overrides = {
"uptime_percent_30d": args.uptime_percent_30d,
"capacity_utilization": args.capacity_utilization,
"innovation": args.innovation,
"sovereign_stable_days": args.sovereign_stable_days,
"human_free_days": args.human_free_days,
}
for key, value in overrides.items():
if value is not None:
resources[key] = value
if args.all_models_local:
resources["all_models_local"] = True
return resources
def _load_issue_states(args, spec):
if args.issue_state_file:
raw = json.loads(args.issue_state_file.read_text())
return {int(k): v for k, v in raw.items()}
return load_issue_states(spec, token_file=args.token_file)
def main():
args = parse_args()
spec = load_spec(args.spec_file)
issue_states = _load_issue_states(args, spec)
resources = _load_resources(args)
result = evaluate_progression(spec, issue_states, resources)
if args.json:
print(json.dumps(result, indent=2))
return
print("--- Fleet Progression Evaluator ---")
print(f"Epic #{result['epic_issue']}: {result['epic_title']}")
print(f"Current phase: {result['current_phase']['number']}{result['current_phase']['name']}")
if result["next_locked_phase"]:
print(f"Next locked phase: {result['next_locked_phase']['number']}{result['next_locked_phase']['name']}")
print(f"Epic complete: {result['epic_complete']}")
print()
for phase in result["phases"]:
state = "COMPLETE" if phase["completed"] else "ACTIVE" if phase["available_to_work"] else "LOCKED"
print(f"Phase {phase['number']} [{state}] {phase['name']}")
if phase["blocking_requirements"]:
for blocker in phase["blocking_requirements"]:
print(f" - blocked by {blocker['rule']}: actual={blocker['actual']} expected={blocker['expected']}")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,92 @@
from scripts.fleet_progression import evaluate_progression, load_spec
def test_load_spec_contains_epic_and_all_six_phases():
spec = load_spec()
assert spec["epic_issue"] == 547
assert [phase["number"] for phase in spec["phases"]] == [1, 2, 3, 4, 5, 6]
assert [phase["issue_number"] for phase in spec["phases"]] == [548, 549, 550, 551, 552, 553]
def test_phase_two_unlocks_when_uptime_and_capacity_thresholds_are_met():
spec = load_spec()
result = evaluate_progression(
spec,
issue_states={548: "open", 549: "open", 550: "open", 551: "open", 552: "open", 553: "open"},
resources={
"uptime_percent_30d": 95.0,
"capacity_utilization": 61.0,
"innovation": 0,
"all_models_local": False,
"sovereign_stable_days": 0,
"human_free_days": 0,
},
)
phase2 = result["phases"][1]
assert phase2["unlocked"] is True
assert result["current_phase"]["number"] == 2
assert result["next_locked_phase"]["number"] == 3
def test_phase_three_stays_locked_until_phase_two_issue_is_closed():
spec = load_spec()
result = evaluate_progression(
spec,
issue_states={548: "open", 549: "open", 550: "open", 551: "open", 552: "open", 553: "open"},
resources={
"uptime_percent_30d": 99.0,
"capacity_utilization": 90.0,
"innovation": 250,
"all_models_local": False,
"sovereign_stable_days": 0,
"human_free_days": 0,
},
)
phase3 = result["phases"][2]
assert phase3["unlocked"] is False
blockers = {item["rule"] for item in phase3["blocking_requirements"]}
assert blockers == {"phase_2_issue_closed"}
def test_phase_five_requires_sovereign_stability_and_innovation():
spec = load_spec()
result = evaluate_progression(
spec,
issue_states={548: "closed", 549: "closed", 550: "closed", 551: "closed", 552: "open", 553: "open"},
resources={
"uptime_percent_30d": 99.0,
"capacity_utilization": 85.0,
"innovation": 400,
"all_models_local": True,
"sovereign_stable_days": 12,
"human_free_days": 0,
},
)
phase5 = result["phases"][4]
assert phase5["unlocked"] is False
blockers = {item["rule"] for item in phase5["blocking_requirements"]}
assert blockers == {"innovation_gt_500", "sovereign_stable_days_gte_30"}
def test_epic_complete_only_when_all_phase_issues_are_closed_and_phase_six_unlocked():
spec = load_spec()
result = evaluate_progression(
spec,
issue_states={548: "closed", 549: "closed", 550: "closed", 551: "closed", 552: "closed", 553: "closed"},
resources={
"uptime_percent_30d": 99.5,
"capacity_utilization": 85.0,
"innovation": 900,
"all_models_local": True,
"sovereign_stable_days": 45,
"human_free_days": 9,
},
)
assert result["epic_complete"] is True
assert result["current_phase"]["number"] == 6
assert result["next_locked_phase"] is None

View File

@@ -1,319 +0,0 @@
# GENOME.md — the-nexus
**Generated:** 2026-04-14
**Repo:** Timmy_Foundation/the-nexus
**Analysis:** Codebase Genome #672
---
## Project Overview
The Nexus is Timmy's canonical 3D home-world — a browser-based Three.js application that serves as:
1. **Local-first training ground** for Timmy (the sovereign AI)
2. **Wizardly visualization surface** for the fleet system
3. **Portal architecture** connecting to other worlds and services
The app is a real-time 3D environment with spatial memory, GOFAI reasoning, agent presence, and portal-based navigation.
---
## Architecture
```mermaid
graph TB
subgraph Browser["BROWSER LAYER"]
HTML[index.html]
APP[app.js - 4082 lines]
CSS[style.css]
Worker[gofai_worker.js]
end
subgraph ThreeJS["THREE.JS RENDERING"]
Scene[Scene Management]
Camera[Camera System]
Renderer[WebGL Renderer]
Post[Post-processing<br/>Bloom, SMAA]
Physics[Physics/Player]
end
subgraph Nexus["NEXUS COMPONENTS"]
SM[SpatialMemory]
SA[SpatialAudio]
MB[MemoryBirth]
MO[MemoryOptimizer]
MI[MemoryInspect]
MP[MemoryPulse]
RT[ReasoningTrace]
RV[ResonanceVisualizer]
end
subgraph GOFAI["GOFAI REASONING"]
Worker2[Web Worker]
Rules[Rule Engine]
Facts[Fact Store]
Inference[Inference Loop]
end
subgraph Backend["BACKEND SERVICES"]
Server[server.py<br/>WebSocket Bridge]
L402[L402 Cost API]
Portal[Portal Registry]
end
subgraph Data["DATA/PERSISTENCE"]
Local[localStorage]
IDB[IndexedDB]
JSON[portals.json]
Vision[vision.json]
end
HTML --> APP
APP --> ThreeJS
APP --> Nexus
APP --> GOFAI
APP --> Backend
APP --> Data
Worker2 --> APP
Server --> APP
```
---
## Entry Points
### Primary Entry
- **`index.html`** — Main HTML shell, loads app.js
- **`app.js`** — Main application (4082 lines), Three.js scene setup
### Secondary Entry Points
- **`boot.js`** — Bootstrap sequence
- **`bootstrap.mjs`** — ES module bootstrap
- **`server.py`** — WebSocket bridge server
### Configuration Entry Points
- **`portals.json`** — Portal definitions and destinations
- **`vision.json`** — Vision/agent configuration
- **`config/fleet_agents.json`** — Fleet agent definitions
---
## Data Flow
```
User Input
app.js (Event Loop)
┌─────────────────────────────────────┐
│ Three.js Scene │
│ - Player movement │
│ - Camera controls │
│ - Physics simulation │
│ - Portal detection │
└─────────────────────────────────────┘
┌─────────────────────────────────────┐
│ Nexus Components │
│ - SpatialMemory (room/context) │
│ - MemoryBirth (new memories) │
│ - MemoryPulse (heartbeat) │
│ - ReasoningTrace (GOFAI output) │
└─────────────────────────────────────┘
┌─────────────────────────────────────┐
│ GOFAI Worker (off-thread) │
│ - Rule evaluation │
│ - Fact inference │
│ - Decision making │
└─────────────────────────────────────┘
┌─────────────────────────────────────┐
│ Backend Services │
│ - WebSocket (server.py) │
│ - L402 cost API │
│ - Portal registry │
└─────────────────────────────────────┘
Persistence (localStorage/IndexedDB)
```
---
## Key Abstractions
### 1. Nexus Object (`NEXUS`)
Central configuration and state object containing:
- Color palette
- Room definitions
- Portal configurations
- Agent settings
### 2. SpatialMemory
Manages room-based context for the AI agent:
- Room transitions trigger context switches
- Facts are stored per-room
- NPCs have location awareness
### 3. Portal System
Connects the 3D world to external services:
- Portals defined in `portals.json`
- Each portal links to a service/endpoint
- Visual indicators in 3D space
### 4. GOFAI Worker
Off-thread reasoning engine:
- Rule-based inference
- Fact store with persistence
- Decision making for agent behavior
### 5. Memory Components
- **MemoryBirth**: Creates new memories from interactions
- **MemoryOptimizer**: Compresses and deduplicates memories
- **MemoryPulse**: Heartbeat system for memory health
- **MemoryInspect**: Debug/inspection interface
---
## API Surface
### Internal APIs (JavaScript)
| Module | Export | Purpose |
|--------|--------|---------|
| `app.js` | `NEXUS` | Main config/state object |
| `SpatialMemory` | class | Room-based context management |
| `SpatialAudio` | class | 3D positional audio |
| `MemoryBirth` | class | Memory creation |
| `MemoryOptimizer` | class | Memory compression |
| `ReasoningTrace` | class | GOFAI reasoning visualization |
### External APIs (HTTP/WebSocket)
| Endpoint | Protocol | Purpose |
|----------|----------|---------|
| `ws://localhost:PORT` | WebSocket | Real-time bridge to backend |
| `http://localhost:8080/api/cost-estimate` | HTTP | L402 cost estimation |
| Portal endpoints | Various | External service connections |
---
## Dependencies
### Runtime Dependencies
- **Three.js** — 3D rendering engine
- **Three.js Addons** — Post-processing (Bloom, SMAA)
### Build Dependencies
- **ES Modules** — Native browser modules
- **No bundler** — Direct script loading
### Backend Dependencies
- **Python 3.x** — server.py
- **WebSocket** — Real-time communication
---
## Test Coverage
### Existing Tests
- `tests/boot.test.js` — Bootstrap sequence tests
### Test Gaps
1. **Three.js scene initialization** — No tests
2. **Portal system** — No tests
3. **Memory components** — No tests
4. **GOFAI worker** — No tests
5. **WebSocket communication** — No tests
6. **Spatial memory transitions** — No tests
7. **Physics/player movement** — No tests
### Recommended Test Priorities
1. Portal detection and activation
2. Spatial memory room transitions
3. GOFAI worker message passing
4. WebSocket connection handling
5. Memory persistence (localStorage/IndexedDB)
---
## Security Considerations
### Current Risks
1. **WebSocket without auth** — server.py has no authentication
2. **localStorage sensitive data** — Memories stored unencrypted
3. **CORS open** — No origin restrictions on WebSocket
4. **L402 endpoint** — Cost API may expose internal state
### Mitigations
1. Add WebSocket authentication
2. Encrypt sensitive memories
3. Restrict CORS origins
4. Rate limit L402 endpoint
---
## File Structure
```
the-nexus/
├── app.js # Main app (4082 lines)
├── index.html # HTML shell
├── style.css # Styles
├── server.py # WebSocket bridge
├── boot.js # Bootstrap
├── bootstrap.mjs # ES module bootstrap
├── gofai_worker.js # GOFAI web worker
├── portals.json # Portal definitions
├── vision.json # Vision config
├── nexus/ # Nexus components
│ └── components/
│ ├── spatial-memory.js
│ ├── spatial-audio.js
│ ├── memory-birth.js
│ ├── memory-optimizer.js
│ ├── memory-inspect.js
│ ├── memory-pulse.js
│ ├── reasoning-trace.js
│ └── resonance-visualizer.js
├── config/ # Configuration
├── docs/ # Documentation
├── tests/ # Tests
├── agent/ # Agent components
├── bin/ # Scripts
└── assets/ # Static assets
```
---
## Technical Debt
1. **Large app.js** (4082 lines) — Should be split into modules
2. **No TypeScript** — Pure JavaScript, no type safety
3. **Manual DOM manipulation** — Could use a framework
4. **No build system** — Direct ES modules, no optimization
5. **Limited error handling** — Minimal try/catch coverage
---
## Migration Notes
From CLAUDE.md:
- Current `main` does NOT ship the old root frontend files
- A clean checkout serves a directory listing
- The live browser shell exists in legacy form at `/Users/apayne/the-matrix`
- Migration priorities: #684 (docs), #685 (legacy audit), #686 (smoke tests), #687 (restore shell)
---
## Next Steps
1. **Restore browser shell** — Bring frontend back to main
2. **Add tests** — Cover critical paths (portals, memory, GOFAI)
3. **Split app.js** — Modularize the 4082-line file
4. **Add authentication** — Secure WebSocket and APIs
5. **TypeScript migration** — Add type safety
---
*Generated by Codebase Genome pipeline — Issue #672*