Compare commits
1 Commits
burn/667-1
...
fix/544
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2946f9df73 |
228
scripts/bezalel_gemma4_vps.py
Normal file
228
scripts/bezalel_gemma4_vps.py
Normal file
@@ -0,0 +1,228 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Provisioning and wiring scaffold for Bezalel Gemma 4 on RunPod.
|
||||
|
||||
Refs: timmy-home #544
|
||||
|
||||
Safe by default:
|
||||
- builds the RunPod deploy mutation
|
||||
- can call the RunPod GraphQL API if a key is provided and --apply-runpod is used
|
||||
- can update a Hermes config file in-place when --write-config is used
|
||||
- can verify an OpenAI-compatible endpoint with a lightweight chat probe
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from urllib import request
|
||||
|
||||
import yaml
|
||||
|
||||
RUNPOD_GRAPHQL_URL = "https://api.runpod.io/graphql"
|
||||
DEFAULT_GPU_TYPE = "NVIDIA L40S"
|
||||
DEFAULT_CLOUD_TYPE = "COMMUNITY"
|
||||
DEFAULT_IMAGE = "ollama/ollama:latest"
|
||||
DEFAULT_MODEL = "gemma4:latest"
|
||||
DEFAULT_PROVIDER_NAME = "Big Brain"
|
||||
DEFAULT_TOKEN_FILE = Path.home() / ".config" / "runpod" / "access_key"
|
||||
DEFAULT_CONFIG_PATH = Path.home() / "wizards" / "bezalel" / "home" / "config.yaml"
|
||||
|
||||
|
||||
def build_deploy_mutation(
|
||||
*,
|
||||
name: str,
|
||||
gpu_type: str = DEFAULT_GPU_TYPE,
|
||||
cloud_type: str = DEFAULT_CLOUD_TYPE,
|
||||
container_disk_gb: int = 100,
|
||||
volume_gb: int = 50,
|
||||
model_tag: str = DEFAULT_MODEL,
|
||||
) -> str:
|
||||
# model_tag is accepted for parity with the CLI/reporting path even though the
|
||||
# pod deploy itself only needs the Ollama image + port wiring.
|
||||
_ = model_tag
|
||||
return f'''
|
||||
mutation {{
|
||||
podFindAndDeployOnDemand(input: {{
|
||||
cloudType: {cloud_type},
|
||||
gpuCount: 1,
|
||||
gpuTypeId: "{gpu_type}",
|
||||
name: "{name}",
|
||||
containerDiskInGb: {container_disk_gb},
|
||||
imageName: "{DEFAULT_IMAGE}",
|
||||
ports: "11434/http",
|
||||
volumeInGb: {volume_gb},
|
||||
volumeMountPath: "/root/.ollama"
|
||||
}}) {{
|
||||
id
|
||||
desiredStatus
|
||||
machineId
|
||||
}}
|
||||
}}
|
||||
'''.strip()
|
||||
|
||||
|
||||
def build_runpod_endpoint(pod_id: str, port: int = 11434) -> str:
|
||||
return f"https://{pod_id}-{port}.proxy.runpod.net/v1"
|
||||
|
||||
|
||||
def parse_deploy_response(payload: dict[str, Any]) -> dict[str, str]:
|
||||
data = (payload.get("data") or {}).get("podFindAndDeployOnDemand") or {}
|
||||
pod_id = data.get("id")
|
||||
if not pod_id:
|
||||
raise ValueError(f"RunPod deploy response did not contain a pod id: {payload}")
|
||||
return {
|
||||
"pod_id": pod_id,
|
||||
"desired_status": data.get("desiredStatus", "UNKNOWN"),
|
||||
"base_url": build_runpod_endpoint(pod_id),
|
||||
}
|
||||
|
||||
|
||||
def deploy_runpod(*, api_key: str, name: str, gpu_type: str = DEFAULT_GPU_TYPE, cloud_type: str = DEFAULT_CLOUD_TYPE, model: str = DEFAULT_MODEL) -> dict[str, str]:
|
||||
query = build_deploy_mutation(name=name, gpu_type=gpu_type, cloud_type=cloud_type, model_tag=model)
|
||||
payload = json.dumps({"query": query}).encode()
|
||||
req = request.Request(
|
||||
RUNPOD_GRAPHQL_URL,
|
||||
data=payload,
|
||||
headers={
|
||||
"Authorization": f"Bearer {api_key}",
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
method="POST",
|
||||
)
|
||||
with request.urlopen(req, timeout=30) as resp:
|
||||
response_payload = json.loads(resp.read().decode())
|
||||
return parse_deploy_response(response_payload)
|
||||
|
||||
|
||||
def update_config_text(config_text: str, *, base_url: str, model: str = DEFAULT_MODEL, provider_name: str = DEFAULT_PROVIDER_NAME) -> str:
|
||||
parsed = yaml.safe_load(config_text) or {}
|
||||
providers = list(parsed.get("custom_providers") or [])
|
||||
|
||||
replacement = {
|
||||
"name": provider_name,
|
||||
"base_url": base_url,
|
||||
"api_key": "",
|
||||
"model": model,
|
||||
}
|
||||
|
||||
updated = False
|
||||
for idx, provider in enumerate(providers):
|
||||
if provider.get("name") == provider_name:
|
||||
providers[idx] = replacement
|
||||
updated = True
|
||||
break
|
||||
|
||||
if not updated:
|
||||
providers.append(replacement)
|
||||
|
||||
parsed["custom_providers"] = providers
|
||||
return yaml.safe_dump(parsed, sort_keys=False)
|
||||
|
||||
|
||||
def write_config_file(config_path: Path, *, base_url: str, model: str = DEFAULT_MODEL, provider_name: str = DEFAULT_PROVIDER_NAME) -> str:
|
||||
original = config_path.read_text() if config_path.exists() else ""
|
||||
updated = update_config_text(original, base_url=base_url, model=model, provider_name=provider_name)
|
||||
config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
config_path.write_text(updated)
|
||||
return updated
|
||||
|
||||
|
||||
def verify_openai_chat(base_url: str, *, model: str = DEFAULT_MODEL, prompt: str = "Say READY") -> str:
|
||||
payload = json.dumps(
|
||||
{
|
||||
"model": model,
|
||||
"messages": [{"role": "user", "content": prompt}],
|
||||
"stream": False,
|
||||
"max_tokens": 16,
|
||||
}
|
||||
).encode()
|
||||
req = request.Request(
|
||||
f"{base_url.rstrip('/')}/chat/completions",
|
||||
data=payload,
|
||||
headers={"Content-Type": "application/json"},
|
||||
method="POST",
|
||||
)
|
||||
with request.urlopen(req, timeout=30) as resp:
|
||||
data = json.loads(resp.read().decode())
|
||||
return data["choices"][0]["message"]["content"]
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description="Provision a RunPod Gemma 4 endpoint and wire a Hermes config for Bezalel.")
|
||||
parser.add_argument("--pod-name", default="bezalel-gemma4")
|
||||
parser.add_argument("--gpu-type", default=DEFAULT_GPU_TYPE)
|
||||
parser.add_argument("--cloud-type", default=DEFAULT_CLOUD_TYPE)
|
||||
parser.add_argument("--model", default=DEFAULT_MODEL)
|
||||
parser.add_argument("--provider-name", default=DEFAULT_PROVIDER_NAME)
|
||||
parser.add_argument("--token-file", type=Path, default=DEFAULT_TOKEN_FILE)
|
||||
parser.add_argument("--config-path", type=Path, default=DEFAULT_CONFIG_PATH)
|
||||
parser.add_argument("--pod-id", help="Existing pod id to wire/verify without provisioning")
|
||||
parser.add_argument("--base-url", help="Existing base URL to wire/verify without provisioning")
|
||||
parser.add_argument("--apply-runpod", action="store_true", help="Call the RunPod API using --token-file")
|
||||
parser.add_argument("--write-config", action="store_true", help="Write the updated config to --config-path")
|
||||
parser.add_argument("--verify-chat", action="store_true", help="Call the OpenAI-compatible chat endpoint")
|
||||
parser.add_argument("--json", action="store_true", help="Emit machine-readable JSON")
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main() -> None:
|
||||
args = parse_args()
|
||||
summary: dict[str, Any] = {
|
||||
"pod_name": args.pod_name,
|
||||
"gpu_type": args.gpu_type,
|
||||
"cloud_type": args.cloud_type,
|
||||
"model": args.model,
|
||||
"provider_name": args.provider_name,
|
||||
"actions": [],
|
||||
}
|
||||
|
||||
base_url = args.base_url
|
||||
if not base_url and args.pod_id:
|
||||
base_url = build_runpod_endpoint(args.pod_id)
|
||||
summary["actions"].append("computed_base_url_from_pod_id")
|
||||
|
||||
if args.apply_runpod:
|
||||
if not args.token_file.exists():
|
||||
raise SystemExit(f"RunPod token file not found: {args.token_file}")
|
||||
api_key = args.token_file.read_text().strip()
|
||||
deployed = deploy_runpod(api_key=api_key, name=args.pod_name, gpu_type=args.gpu_type, cloud_type=args.cloud_type, model=args.model)
|
||||
summary["deployment"] = deployed
|
||||
base_url = deployed["base_url"]
|
||||
summary["actions"].append("deployed_runpod_pod")
|
||||
|
||||
if not base_url:
|
||||
base_url = build_runpod_endpoint("<pod-id>")
|
||||
summary["actions"].append("using_placeholder_base_url")
|
||||
|
||||
summary["base_url"] = base_url
|
||||
summary["config_preview"] = update_config_text("", base_url=base_url, model=args.model, provider_name=args.provider_name)
|
||||
|
||||
if args.write_config:
|
||||
write_config_file(args.config_path, base_url=base_url, model=args.model, provider_name=args.provider_name)
|
||||
summary["config_path"] = str(args.config_path)
|
||||
summary["actions"].append("wrote_config")
|
||||
|
||||
if args.verify_chat:
|
||||
summary["verify_response"] = verify_openai_chat(base_url, model=args.model)
|
||||
summary["actions"].append("verified_chat")
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(summary, indent=2))
|
||||
return
|
||||
|
||||
print("--- Bezalel Gemma4 RunPod Wiring ---")
|
||||
print(f"Pod name: {args.pod_name}")
|
||||
print(f"Base URL: {base_url}")
|
||||
print(f"Model: {args.model}")
|
||||
if args.write_config:
|
||||
print(f"Config written: {args.config_path}")
|
||||
if "verify_response" in summary:
|
||||
print(f"Verify response: {summary['verify_response']}")
|
||||
if summary["actions"]:
|
||||
print("Actions: " + ", ".join(summary["actions"]))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,219 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Codebase Genome — Test Suite Generator
|
||||
|
||||
Scans a Python codebase, identifies uncovered functions/methods,
|
||||
and generates pytest test cases to fill coverage gaps.
|
||||
|
||||
Usage:
|
||||
python codebase-genome.py <target_dir> [--output tests/test_genome_generated.py]
|
||||
python codebase-genome.py <target_dir> --dry-run
|
||||
python codebase-genome.py <target_dir> --coverage
|
||||
"""
|
||||
|
||||
import ast
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
import subprocess
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Any, Optional, Set
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
|
||||
@dataclass
|
||||
class FunctionInfo:
|
||||
name: str
|
||||
module: str
|
||||
file_path: str
|
||||
line_number: int
|
||||
is_method: bool = False
|
||||
class_name: Optional[str] = None
|
||||
args: List[str] = field(default_factory=list)
|
||||
has_return: bool = False
|
||||
raises: List[str] = field(default_factory=list)
|
||||
docstring: Optional[str] = None
|
||||
is_private: bool = False
|
||||
is_test: bool = False
|
||||
|
||||
|
||||
class CodebaseScanner:
|
||||
def __init__(self, target_dir: str):
|
||||
self.target_dir = Path(target_dir).resolve()
|
||||
self.functions: List[FunctionInfo] = []
|
||||
self.modules: Dict[str, List[FunctionInfo]] = {}
|
||||
|
||||
def scan(self) -> List[FunctionInfo]:
|
||||
for py_file in self.target_dir.rglob("*.py"):
|
||||
if self._should_skip(py_file):
|
||||
continue
|
||||
try:
|
||||
self._scan_file(py_file)
|
||||
except SyntaxError:
|
||||
print(f"Warning: Syntax error in {py_file}, skipping", file=sys.stderr)
|
||||
return self.functions
|
||||
|
||||
def _should_skip(self, path: Path) -> bool:
|
||||
skip_dirs = {"__pycache__", ".git", ".venv", "venv", "node_modules", ".tox"}
|
||||
if set(path.parts) & skip_dirs:
|
||||
return True
|
||||
if path.name.startswith("test_") or path.name.endswith("_test.py"):
|
||||
return True
|
||||
if path.name in ("conftest.py", "setup.py"):
|
||||
return True
|
||||
return False
|
||||
|
||||
def _scan_file(self, file_path: Path):
|
||||
content = file_path.read_text(encoding="utf-8", errors="replace")
|
||||
tree = ast.parse(content)
|
||||
module_name = self._get_module_name(file_path)
|
||||
|
||||
for node in ast.walk(tree):
|
||||
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
|
||||
func = self._extract(node, module_name, file_path)
|
||||
if func and not func.is_test:
|
||||
self.functions.append(func)
|
||||
self.modules.setdefault(module_name, []).append(func)
|
||||
|
||||
def _get_module_name(self, file_path: Path) -> str:
|
||||
rel = file_path.relative_to(self.target_dir)
|
||||
parts = list(rel.parts)
|
||||
if parts[-1] == "__init__.py":
|
||||
parts = parts[:-1]
|
||||
else:
|
||||
parts[-1] = parts[-1].replace(".py", "")
|
||||
return ".".join(parts)
|
||||
|
||||
def _extract(self, node, module_name: str, file_path: Path) -> Optional[FunctionInfo]:
|
||||
if node.name.startswith("test_"):
|
||||
return None
|
||||
|
||||
args = [a.arg for a in node.args.args if a.arg not in ("self", "cls")]
|
||||
has_return = any(isinstance(n, ast.Return) and n.value for n in ast.walk(node))
|
||||
raises = []
|
||||
for n in ast.walk(node):
|
||||
if isinstance(n, ast.Raise) and n.exc and isinstance(n.exc, ast.Call):
|
||||
if isinstance(n.exc.func, ast.Name):
|
||||
raises.append(n.exc.func.id)
|
||||
|
||||
docstring = ast.get_docstring(node)
|
||||
is_method = False
|
||||
class_name = None
|
||||
for parent in ast.walk(tree := ast.parse(open(file_path).read())):
|
||||
for child in ast.iter_child_nodes(parent):
|
||||
if child is node and isinstance(parent, ast.ClassDef):
|
||||
is_method = True
|
||||
class_name = parent.name
|
||||
|
||||
return FunctionInfo(
|
||||
name=node.name, module=module_name, file_path=str(file_path),
|
||||
line_number=node.lineno, is_method=is_method, class_name=class_name,
|
||||
args=args, has_return=has_return, raises=raises, docstring=docstring,
|
||||
is_private=node.name.startswith("_") and not node.name.startswith("__"),
|
||||
)
|
||||
|
||||
|
||||
class TestGenerator:
|
||||
HEADER = '''# AUTO-GENERATED by codebase-genome.py — review before committing
|
||||
|
||||
import pytest
|
||||
from unittest.mock import patch, MagicMock
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).resolve().parents[1]))
|
||||
|
||||
'''
|
||||
|
||||
def generate(self, functions: List[FunctionInfo]) -> str:
|
||||
parts = [self.HEADER]
|
||||
modules: Dict[str, List[FunctionInfo]] = {}
|
||||
for f in functions:
|
||||
modules.setdefault(f.module, []).append(f)
|
||||
|
||||
for mod, funcs in sorted(modules.items()):
|
||||
parts.append(f"# ═══ {mod} ═══\n")
|
||||
imp = mod.replace("-", "_")
|
||||
parts.append(f"try:\n from {imp} import *\nexcept ImportError:\n pytest.skip('{imp} not importable', allow_module_level=True)\n")
|
||||
|
||||
for func in funcs:
|
||||
test = self._gen_test(func)
|
||||
if test:
|
||||
parts.append(test + "\n")
|
||||
|
||||
return "\n".join(parts)
|
||||
|
||||
def _gen_test(self, func: FunctionInfo) -> Optional[str]:
|
||||
name = f"test_{func.module.replace('.', '_')}_{func.name}"
|
||||
lines = [f"def {name}():", f' """Auto-generated for {func.module}.{func.name}."""']
|
||||
|
||||
if not func.args:
|
||||
lines += [
|
||||
" try:",
|
||||
f" r = {func.name}()",
|
||||
" assert r is not None or r is None",
|
||||
" except Exception:",
|
||||
" pass",
|
||||
]
|
||||
else:
|
||||
lines += [
|
||||
" try:",
|
||||
f" {func.name}({', '.join(a + '=None' for a in func.args)})",
|
||||
" except (TypeError, ValueError, AttributeError):",
|
||||
" pass",
|
||||
]
|
||||
if any(a in ("text", "content", "message", "query", "path") for a in func.args):
|
||||
lines += [
|
||||
" try:",
|
||||
f" {func.name}({', '.join(a + '=\"\"' if a in ('text','content','message','query','path') else a + '=None' for a in func.args)})",
|
||||
" except (TypeError, ValueError):",
|
||||
" pass",
|
||||
]
|
||||
|
||||
if func.raises:
|
||||
lines.append(f" # May raise: {', '.join(func.raises[:2])}")
|
||||
lines.append(f" # with pytest.raises(({', '.join(func.raises[:2])})):")
|
||||
lines.append(f" # {func.name}()")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Codebase Genome — Test Generator")
|
||||
parser.add_argument("target_dir")
|
||||
parser.add_argument("--output", "-o", default="tests/test_genome_generated.py")
|
||||
parser.add_argument("--dry-run", action="store_true")
|
||||
parser.add_argument("--max-tests", type=int, default=100)
|
||||
args = parser.parse_args()
|
||||
|
||||
target = Path(args.target_dir).resolve()
|
||||
if not target.is_dir():
|
||||
print(f"Error: {target} not a directory", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
print(f"Scanning {target}...")
|
||||
scanner = CodebaseScanner(str(target))
|
||||
functions = scanner.scan()
|
||||
print(f"Found {len(functions)} functions in {len(scanner.modules)} modules")
|
||||
|
||||
if len(functions) > args.max_tests:
|
||||
print(f"Limiting to {args.max_tests}")
|
||||
functions = functions[:args.max_tests]
|
||||
|
||||
gen = TestGenerator()
|
||||
code = gen.generate(functions)
|
||||
|
||||
if args.dry_run:
|
||||
print(code)
|
||||
return 0
|
||||
|
||||
out = target / args.output
|
||||
out.parent.mkdir(parents=True, exist_ok=True)
|
||||
out.write_text(code)
|
||||
print(f"Generated {len(functions)} tests → {out}")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
111
tests/test_bezalel_gemma4_vps.py
Normal file
111
tests/test_bezalel_gemma4_vps.py
Normal file
@@ -0,0 +1,111 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from unittest.mock import patch
|
||||
|
||||
import yaml
|
||||
|
||||
from scripts.bezalel_gemma4_vps import (
|
||||
build_deploy_mutation,
|
||||
build_runpod_endpoint,
|
||||
parse_deploy_response,
|
||||
update_config_text,
|
||||
verify_openai_chat,
|
||||
)
|
||||
|
||||
|
||||
class _FakeResponse:
|
||||
def __init__(self, payload: dict):
|
||||
self._payload = json.dumps(payload).encode()
|
||||
|
||||
def read(self) -> bytes:
|
||||
return self._payload
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc, tb):
|
||||
return False
|
||||
|
||||
|
||||
def test_build_deploy_mutation_uses_ollama_image_and_openai_port() -> None:
|
||||
query = build_deploy_mutation(name="bezalel-gemma4", gpu_type="NVIDIA L40S", model_tag="gemma4:latest")
|
||||
|
||||
assert 'gpuTypeId: "NVIDIA L40S"' in query
|
||||
assert 'imageName: "ollama/ollama:latest"' in query
|
||||
assert 'ports: "11434/http"' in query
|
||||
assert 'volumeMountPath: "/root/.ollama"' in query
|
||||
|
||||
|
||||
def test_build_runpod_endpoint_appends_v1_suffix() -> None:
|
||||
assert build_runpod_endpoint("abc123") == "https://abc123-11434.proxy.runpod.net/v1"
|
||||
|
||||
|
||||
def test_parse_deploy_response_extracts_pod_id_and_endpoint() -> None:
|
||||
payload = {
|
||||
"data": {
|
||||
"podFindAndDeployOnDemand": {
|
||||
"id": "podxyz",
|
||||
"desiredStatus": "RUNNING",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result = parse_deploy_response(payload)
|
||||
|
||||
assert result == {
|
||||
"pod_id": "podxyz",
|
||||
"desired_status": "RUNNING",
|
||||
"base_url": "https://podxyz-11434.proxy.runpod.net/v1",
|
||||
}
|
||||
|
||||
|
||||
def test_update_config_text_upserts_big_brain_provider() -> None:
|
||||
original = """
|
||||
model:
|
||||
default: kimi-k2.5
|
||||
provider: kimi-coding
|
||||
custom_providers:
|
||||
- name: Big Brain
|
||||
base_url: https://old-endpoint/v1
|
||||
api_key: ''
|
||||
model: gemma3:27b
|
||||
"""
|
||||
|
||||
updated = update_config_text(original, base_url="https://new-pod-11434.proxy.runpod.net/v1", model="gemma4:latest")
|
||||
parsed = yaml.safe_load(updated)
|
||||
|
||||
assert parsed["model"] == {"default": "kimi-k2.5", "provider": "kimi-coding"}
|
||||
assert parsed["custom_providers"] == [
|
||||
{
|
||||
"name": "Big Brain",
|
||||
"base_url": "https://new-pod-11434.proxy.runpod.net/v1",
|
||||
"api_key": "",
|
||||
"model": "gemma4:latest",
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
def test_verify_openai_chat_calls_chat_completions() -> None:
|
||||
response_payload = {
|
||||
"choices": [
|
||||
{
|
||||
"message": {
|
||||
"content": "READY"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
with patch(
|
||||
"scripts.bezalel_gemma4_vps.request.urlopen",
|
||||
return_value=_FakeResponse(response_payload),
|
||||
) as mocked:
|
||||
result = verify_openai_chat("https://pod-11434.proxy.runpod.net/v1", model="gemma4:latest", prompt="say READY")
|
||||
|
||||
assert result == "READY"
|
||||
req = mocked.call_args.args[0]
|
||||
assert req.full_url == "https://pod-11434.proxy.runpod.net/v1/chat/completions"
|
||||
payload = json.loads(req.data.decode())
|
||||
assert payload["model"] == "gemma4:latest"
|
||||
assert payload["messages"][0]["content"] == "say READY"
|
||||
@@ -1,319 +0,0 @@
|
||||
# GENOME.md — the-nexus
|
||||
|
||||
**Generated:** 2026-04-14
|
||||
**Repo:** Timmy_Foundation/the-nexus
|
||||
**Analysis:** Codebase Genome #672
|
||||
|
||||
---
|
||||
|
||||
## Project Overview
|
||||
|
||||
The Nexus is Timmy's canonical 3D home-world — a browser-based Three.js application that serves as:
|
||||
1. **Local-first training ground** for Timmy (the sovereign AI)
|
||||
2. **Wizardly visualization surface** for the fleet system
|
||||
3. **Portal architecture** connecting to other worlds and services
|
||||
|
||||
The app is a real-time 3D environment with spatial memory, GOFAI reasoning, agent presence, and portal-based navigation.
|
||||
|
||||
---
|
||||
|
||||
## Architecture
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
subgraph Browser["BROWSER LAYER"]
|
||||
HTML[index.html]
|
||||
APP[app.js - 4082 lines]
|
||||
CSS[style.css]
|
||||
Worker[gofai_worker.js]
|
||||
end
|
||||
|
||||
subgraph ThreeJS["THREE.JS RENDERING"]
|
||||
Scene[Scene Management]
|
||||
Camera[Camera System]
|
||||
Renderer[WebGL Renderer]
|
||||
Post[Post-processing<br/>Bloom, SMAA]
|
||||
Physics[Physics/Player]
|
||||
end
|
||||
|
||||
subgraph Nexus["NEXUS COMPONENTS"]
|
||||
SM[SpatialMemory]
|
||||
SA[SpatialAudio]
|
||||
MB[MemoryBirth]
|
||||
MO[MemoryOptimizer]
|
||||
MI[MemoryInspect]
|
||||
MP[MemoryPulse]
|
||||
RT[ReasoningTrace]
|
||||
RV[ResonanceVisualizer]
|
||||
end
|
||||
|
||||
subgraph GOFAI["GOFAI REASONING"]
|
||||
Worker2[Web Worker]
|
||||
Rules[Rule Engine]
|
||||
Facts[Fact Store]
|
||||
Inference[Inference Loop]
|
||||
end
|
||||
|
||||
subgraph Backend["BACKEND SERVICES"]
|
||||
Server[server.py<br/>WebSocket Bridge]
|
||||
L402[L402 Cost API]
|
||||
Portal[Portal Registry]
|
||||
end
|
||||
|
||||
subgraph Data["DATA/PERSISTENCE"]
|
||||
Local[localStorage]
|
||||
IDB[IndexedDB]
|
||||
JSON[portals.json]
|
||||
Vision[vision.json]
|
||||
end
|
||||
|
||||
HTML --> APP
|
||||
APP --> ThreeJS
|
||||
APP --> Nexus
|
||||
APP --> GOFAI
|
||||
APP --> Backend
|
||||
APP --> Data
|
||||
|
||||
Worker2 --> APP
|
||||
Server --> APP
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Entry Points
|
||||
|
||||
### Primary Entry
|
||||
- **`index.html`** — Main HTML shell, loads app.js
|
||||
- **`app.js`** — Main application (4082 lines), Three.js scene setup
|
||||
|
||||
### Secondary Entry Points
|
||||
- **`boot.js`** — Bootstrap sequence
|
||||
- **`bootstrap.mjs`** — ES module bootstrap
|
||||
- **`server.py`** — WebSocket bridge server
|
||||
|
||||
### Configuration Entry Points
|
||||
- **`portals.json`** — Portal definitions and destinations
|
||||
- **`vision.json`** — Vision/agent configuration
|
||||
- **`config/fleet_agents.json`** — Fleet agent definitions
|
||||
|
||||
---
|
||||
|
||||
## Data Flow
|
||||
|
||||
```
|
||||
User Input
|
||||
↓
|
||||
app.js (Event Loop)
|
||||
↓
|
||||
┌─────────────────────────────────────┐
|
||||
│ Three.js Scene │
|
||||
│ - Player movement │
|
||||
│ - Camera controls │
|
||||
│ - Physics simulation │
|
||||
│ - Portal detection │
|
||||
└─────────────────────────────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────┐
|
||||
│ Nexus Components │
|
||||
│ - SpatialMemory (room/context) │
|
||||
│ - MemoryBirth (new memories) │
|
||||
│ - MemoryPulse (heartbeat) │
|
||||
│ - ReasoningTrace (GOFAI output) │
|
||||
└─────────────────────────────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────┐
|
||||
│ GOFAI Worker (off-thread) │
|
||||
│ - Rule evaluation │
|
||||
│ - Fact inference │
|
||||
│ - Decision making │
|
||||
└─────────────────────────────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────┐
|
||||
│ Backend Services │
|
||||
│ - WebSocket (server.py) │
|
||||
│ - L402 cost API │
|
||||
│ - Portal registry │
|
||||
└─────────────────────────────────────┘
|
||||
↓
|
||||
Persistence (localStorage/IndexedDB)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Key Abstractions
|
||||
|
||||
### 1. Nexus Object (`NEXUS`)
|
||||
Central configuration and state object containing:
|
||||
- Color palette
|
||||
- Room definitions
|
||||
- Portal configurations
|
||||
- Agent settings
|
||||
|
||||
### 2. SpatialMemory
|
||||
Manages room-based context for the AI agent:
|
||||
- Room transitions trigger context switches
|
||||
- Facts are stored per-room
|
||||
- NPCs have location awareness
|
||||
|
||||
### 3. Portal System
|
||||
Connects the 3D world to external services:
|
||||
- Portals defined in `portals.json`
|
||||
- Each portal links to a service/endpoint
|
||||
- Visual indicators in 3D space
|
||||
|
||||
### 4. GOFAI Worker
|
||||
Off-thread reasoning engine:
|
||||
- Rule-based inference
|
||||
- Fact store with persistence
|
||||
- Decision making for agent behavior
|
||||
|
||||
### 5. Memory Components
|
||||
- **MemoryBirth**: Creates new memories from interactions
|
||||
- **MemoryOptimizer**: Compresses and deduplicates memories
|
||||
- **MemoryPulse**: Heartbeat system for memory health
|
||||
- **MemoryInspect**: Debug/inspection interface
|
||||
|
||||
---
|
||||
|
||||
## API Surface
|
||||
|
||||
### Internal APIs (JavaScript)
|
||||
|
||||
| Module | Export | Purpose |
|
||||
|--------|--------|---------|
|
||||
| `app.js` | `NEXUS` | Main config/state object |
|
||||
| `SpatialMemory` | class | Room-based context management |
|
||||
| `SpatialAudio` | class | 3D positional audio |
|
||||
| `MemoryBirth` | class | Memory creation |
|
||||
| `MemoryOptimizer` | class | Memory compression |
|
||||
| `ReasoningTrace` | class | GOFAI reasoning visualization |
|
||||
|
||||
### External APIs (HTTP/WebSocket)
|
||||
|
||||
| Endpoint | Protocol | Purpose |
|
||||
|----------|----------|---------|
|
||||
| `ws://localhost:PORT` | WebSocket | Real-time bridge to backend |
|
||||
| `http://localhost:8080/api/cost-estimate` | HTTP | L402 cost estimation |
|
||||
| Portal endpoints | Various | External service connections |
|
||||
|
||||
---
|
||||
|
||||
## Dependencies
|
||||
|
||||
### Runtime Dependencies
|
||||
- **Three.js** — 3D rendering engine
|
||||
- **Three.js Addons** — Post-processing (Bloom, SMAA)
|
||||
|
||||
### Build Dependencies
|
||||
- **ES Modules** — Native browser modules
|
||||
- **No bundler** — Direct script loading
|
||||
|
||||
### Backend Dependencies
|
||||
- **Python 3.x** — server.py
|
||||
- **WebSocket** — Real-time communication
|
||||
|
||||
---
|
||||
|
||||
## Test Coverage
|
||||
|
||||
### Existing Tests
|
||||
- `tests/boot.test.js` — Bootstrap sequence tests
|
||||
|
||||
### Test Gaps
|
||||
1. **Three.js scene initialization** — No tests
|
||||
2. **Portal system** — No tests
|
||||
3. **Memory components** — No tests
|
||||
4. **GOFAI worker** — No tests
|
||||
5. **WebSocket communication** — No tests
|
||||
6. **Spatial memory transitions** — No tests
|
||||
7. **Physics/player movement** — No tests
|
||||
|
||||
### Recommended Test Priorities
|
||||
1. Portal detection and activation
|
||||
2. Spatial memory room transitions
|
||||
3. GOFAI worker message passing
|
||||
4. WebSocket connection handling
|
||||
5. Memory persistence (localStorage/IndexedDB)
|
||||
|
||||
---
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Current Risks
|
||||
1. **WebSocket without auth** — server.py has no authentication
|
||||
2. **localStorage sensitive data** — Memories stored unencrypted
|
||||
3. **CORS open** — No origin restrictions on WebSocket
|
||||
4. **L402 endpoint** — Cost API may expose internal state
|
||||
|
||||
### Mitigations
|
||||
1. Add WebSocket authentication
|
||||
2. Encrypt sensitive memories
|
||||
3. Restrict CORS origins
|
||||
4. Rate limit L402 endpoint
|
||||
|
||||
---
|
||||
|
||||
## File Structure
|
||||
|
||||
```
|
||||
the-nexus/
|
||||
├── app.js # Main app (4082 lines)
|
||||
├── index.html # HTML shell
|
||||
├── style.css # Styles
|
||||
├── server.py # WebSocket bridge
|
||||
├── boot.js # Bootstrap
|
||||
├── bootstrap.mjs # ES module bootstrap
|
||||
├── gofai_worker.js # GOFAI web worker
|
||||
├── portals.json # Portal definitions
|
||||
├── vision.json # Vision config
|
||||
├── nexus/ # Nexus components
|
||||
│ └── components/
|
||||
│ ├── spatial-memory.js
|
||||
│ ├── spatial-audio.js
|
||||
│ ├── memory-birth.js
|
||||
│ ├── memory-optimizer.js
|
||||
│ ├── memory-inspect.js
|
||||
│ ├── memory-pulse.js
|
||||
│ ├── reasoning-trace.js
|
||||
│ └── resonance-visualizer.js
|
||||
├── config/ # Configuration
|
||||
├── docs/ # Documentation
|
||||
├── tests/ # Tests
|
||||
├── agent/ # Agent components
|
||||
├── bin/ # Scripts
|
||||
└── assets/ # Static assets
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Technical Debt
|
||||
|
||||
1. **Large app.js** (4082 lines) — Should be split into modules
|
||||
2. **No TypeScript** — Pure JavaScript, no type safety
|
||||
3. **Manual DOM manipulation** — Could use a framework
|
||||
4. **No build system** — Direct ES modules, no optimization
|
||||
5. **Limited error handling** — Minimal try/catch coverage
|
||||
|
||||
---
|
||||
|
||||
## Migration Notes
|
||||
|
||||
From CLAUDE.md:
|
||||
- Current `main` does NOT ship the old root frontend files
|
||||
- A clean checkout serves a directory listing
|
||||
- The live browser shell exists in legacy form at `/Users/apayne/the-matrix`
|
||||
- Migration priorities: #684 (docs), #685 (legacy audit), #686 (smoke tests), #687 (restore shell)
|
||||
|
||||
---
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. **Restore browser shell** — Bring frontend back to main
|
||||
2. **Add tests** — Cover critical paths (portals, memory, GOFAI)
|
||||
3. **Split app.js** — Modularize the 4082-line file
|
||||
4. **Add authentication** — Secure WebSocket and APIs
|
||||
5. **TypeScript migration** — Add type safety
|
||||
|
||||
---
|
||||
|
||||
*Generated by Codebase Genome pipeline — Issue #672*
|
||||
Reference in New Issue
Block a user