Compare commits
1 Commits
feat/694-s
...
fix/715-sm
| Author | SHA1 | Date | |
|---|---|---|---|
| bbc0057751 |
@@ -1,5 +1,5 @@
|
||||
name: Smoke Test
|
||||
'on':
|
||||
"on":
|
||||
pull_request:
|
||||
push:
|
||||
branches: [main]
|
||||
@@ -11,22 +11,40 @@ jobs:
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
- name: Install parse dependencies
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python3 -m pip install --quiet pyyaml
|
||||
pip install --quiet pyyaml pytest
|
||||
- name: Parse check
|
||||
run: |
|
||||
find . \( -name '*.yml' -o -name '*.yaml' \) | grep -v .gitea | xargs -r python3 -c "import sys,yaml; [yaml.safe_load(open(f)) for f in sys.argv[1:]]"
|
||||
find . -name '*.json' | while read f; do python3 -m json.tool "$f" > /dev/null || exit 1; done
|
||||
find . -name '*.py' | xargs -r python3 -m py_compile
|
||||
find . -name '*.sh' | xargs -r bash -n
|
||||
# YAML parse
|
||||
find . \( -name '*.yml' -o -name '*.yaml' \) | grep -v .gitea | while read f; do
|
||||
python3 -c "import yaml; yaml.safe_load(open('$f'))" || { echo "FAIL: $f"; exit 1; }
|
||||
done
|
||||
# JSON parse (file-by-file to avoid xargs arg overflow)
|
||||
find . -name '*.json' | grep -v node_modules | while read f; do
|
||||
python3 -m json.tool "$f" > /dev/null || { echo "FAIL: $f"; exit 1; }
|
||||
done
|
||||
# Python compile
|
||||
find . -name '*.py' | while read f; do
|
||||
python3 -m py_compile "$f" || { echo "FAIL: $f"; exit 1; }
|
||||
done
|
||||
# Shell syntax
|
||||
find . -name '*.sh' | while read f; do
|
||||
bash -n "$f" || { echo "FAIL: $f"; exit 1; }
|
||||
done
|
||||
echo "PASS: All files parse"
|
||||
- name: Secret scan
|
||||
run: |
|
||||
if grep -rE 'sk-or-|sk-ant-|ghp_|AKIA' . --include='*.yml' --include='*.py' --include='*.sh' 2>/dev/null | grep -v '.gitea' | grep -v 'detect_secrets' | grep -v 'test_trajectory_sanitize'; then exit 1; fi
|
||||
if grep -rE 'sk-or-|sk-ant-|ghp_|AKIA' . --include='*.yml' --include='*.py' --include='*.sh' 2>/dev/null | grep -v '.gitea' | grep -v 'detect_secrets' | grep -v 'test_trajectory_sanitize'; then
|
||||
echo "FAIL: Secrets detected"
|
||||
exit 1
|
||||
fi
|
||||
echo "PASS: No secrets"
|
||||
- name: Pytest
|
||||
run: |
|
||||
pip install pytest pyyaml 2>/dev/null || true
|
||||
python3 -m pytest tests/ -q --tb=short 2>&1 || true
|
||||
echo "PASS: pytest complete"
|
||||
if [ -d tests/ ]; then
|
||||
python3 -m pytest tests/ -q --tb=short
|
||||
echo "PASS: Tests passed"
|
||||
else
|
||||
echo "SKIP: No tests/ directory"
|
||||
fi
|
||||
|
||||
296
GENOME.md
296
GENOME.md
@@ -1,209 +1,141 @@
|
||||
# GENOME.md — the-nexus
|
||||
# GENOME.md — Timmy_Foundation/timmy-home
|
||||
|
||||
Generated by `pipelines/codebase_genome.py`.
|
||||
|
||||
## Project Overview
|
||||
|
||||
`the-nexus` is a hybrid repo that combines three layers in one codebase:
|
||||
Timmy Foundation's home repository for development operations and configurations.
|
||||
|
||||
1. A browser-facing world shell rooted in `index.html`, `boot.js`, `bootstrap.mjs`, `app.js`, `style.css`, `portals.json`, `vision.json`, `manifest.json`, and `gofai_worker.js`
|
||||
2. A Python realtime bridge centered on `server.py` plus harness code under `nexus/`
|
||||
3. A memory / fleet / operator layer spanning `mempalace/`, `mcp_servers/`, `multi_user_bridge.py`, and supporting scripts
|
||||
- Text files indexed: 3004
|
||||
- Source and script files: 186
|
||||
- Test files: 28
|
||||
- Documentation files: 701
|
||||
|
||||
The repo is not a clean single-purpose frontend and not just a backend harness. It is a mixed world/runtime/ops repository where browser rendering, WebSocket telemetry, MCP-driven game harnesses, and fleet memory tooling coexist.
|
||||
|
||||
Grounded repo facts from this checkout:
|
||||
- Browser shell files exist at repo root: `index.html`, `app.js`, `style.css`, `manifest.json`, `gofai_worker.js`
|
||||
- Data/config files also live at repo root: `portals.json`, `vision.json`
|
||||
- Realtime bridge exists in `server.py`
|
||||
- Game harnesses exist in `nexus/morrowind_harness.py` and `nexus/bannerlord_harness.py`
|
||||
- Memory/fleet sync exists in `mempalace/tunnel_sync.py`
|
||||
- Desktop/game automation MCP servers exist in `mcp_servers/desktop_control_server.py` and `mcp_servers/steam_info_server.py`
|
||||
- Validation exists in `tests/test_browser_smoke.py`, `tests/test_portals_json.py`, `tests/test_index_html_integrity.py`, and `tests/test_repo_truth.py`
|
||||
|
||||
The current architecture is best understood as a sovereign world shell plus operator/game harness backend, with accumulated documentation drift from multiple restoration and migration efforts.
|
||||
|
||||
## Architecture Diagram
|
||||
## Architecture
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
browser[Index HTML Shell\nindex.html -> boot.js -> bootstrap.mjs -> app.js]
|
||||
assets[Root Assets\nstyle.css\nmanifest.json\ngofai_worker.js]
|
||||
data[World Data\nportals.json\nvision.json]
|
||||
ws[Realtime Bridge\nserver.py\nWebSocket broadcast hub]
|
||||
gofai[In-browser GOFAI\nSymbolicEngine\nNeuroSymbolicBridge\nsetupGOFAI/updateGOFAI]
|
||||
harnesses[Python Harnesses\nnexus/morrowind_harness.py\nnexus/bannerlord_harness.py]
|
||||
mcp[MCP Adapters\nmcp_servers/desktop_control_server.py\nmcp_servers/steam_info_server.py]
|
||||
memory[Memory + Fleet\nmempalace/tunnel_sync.py\nmempalace.js]
|
||||
bridge[Operator / MUD Bridge\nmulti_user_bridge.py\ncommands/timmy_commands.py]
|
||||
tests[Verification\ntests/test_browser_smoke.py\ntests/test_portals_json.py\ntests/test_repo_truth.py]
|
||||
docs[Contracts + Drift Docs\nBROWSER_CONTRACT.md\nREADME.md\nCLAUDE.md\nINVESTIGATION_ISSUE_1145.md]
|
||||
|
||||
browser --> assets
|
||||
browser --> data
|
||||
browser --> gofai
|
||||
browser --> ws
|
||||
harnesses --> mcp
|
||||
harnesses --> ws
|
||||
bridge --> ws
|
||||
memory --> ws
|
||||
tests --> browser
|
||||
tests --> data
|
||||
tests --> docs
|
||||
docs --> browser
|
||||
repo_root["repo"]
|
||||
angband["angband"]
|
||||
briefings["briefings"]
|
||||
config["config"]
|
||||
conftest["conftest"]
|
||||
evennia["evennia"]
|
||||
evennia_tools["evennia_tools"]
|
||||
evolution["evolution"]
|
||||
gemini_fallback_setup["gemini-fallback-setup"]
|
||||
heartbeat["heartbeat"]
|
||||
infrastructure["infrastructure"]
|
||||
repo_root --> angband
|
||||
repo_root --> briefings
|
||||
repo_root --> config
|
||||
repo_root --> conftest
|
||||
repo_root --> evennia
|
||||
repo_root --> evennia_tools
|
||||
```
|
||||
|
||||
## Entry Points and Data Flow
|
||||
## Entry Points
|
||||
|
||||
### Primary entry points
|
||||
- `gemini-fallback-setup.sh` — operational script (`bash gemini-fallback-setup.sh`)
|
||||
- `morrowind/hud.sh` — operational script (`bash morrowind/hud.sh`)
|
||||
- `pipelines/codebase_genome.py` — python main guard (`python3 pipelines/codebase_genome.py`)
|
||||
- `scripts/auto_restart_agent.sh` — operational script (`bash scripts/auto_restart_agent.sh`)
|
||||
- `scripts/backup_pipeline.sh` — operational script (`bash scripts/backup_pipeline.sh`)
|
||||
- `scripts/big_brain_manager.py` — operational script (`python3 scripts/big_brain_manager.py`)
|
||||
- `scripts/big_brain_repo_audit.py` — operational script (`python3 scripts/big_brain_repo_audit.py`)
|
||||
- `scripts/codebase_genome_nightly.py` — operational script (`python3 scripts/codebase_genome_nightly.py`)
|
||||
- `scripts/detect_secrets.py` — operational script (`python3 scripts/detect_secrets.py`)
|
||||
- `scripts/dynamic_dispatch_optimizer.py` — operational script (`python3 scripts/dynamic_dispatch_optimizer.py`)
|
||||
- `scripts/emacs-fleet-bridge.py` — operational script (`python3 scripts/emacs-fleet-bridge.py`)
|
||||
- `scripts/emacs-fleet-poll.sh` — operational script (`bash scripts/emacs-fleet-poll.sh`)
|
||||
|
||||
- `index.html` — root browser entry point
|
||||
- `boot.js` — startup selector; `tests/boot.test.js` shows it chooses file-mode vs HTTP/module-mode and injects `bootstrap.mjs` when served over HTTP
|
||||
- `bootstrap.mjs` — module bootstrap for the browser shell
|
||||
- `app.js` — main browser runtime; owns world state, GOFAI wiring, metrics polling, and portal/UI logic
|
||||
- `server.py` — WebSocket broadcast bridge on `ws://0.0.0.0:8765`
|
||||
- `nexus/morrowind_harness.py` — GamePortal/MCP harness for OpenMW Morrowind
|
||||
- `nexus/bannerlord_harness.py` — GamePortal/MCP harness for Bannerlord
|
||||
- `mempalace/tunnel_sync.py` — pulls remote fleet closets into the local palace over HTTP
|
||||
- `multi_user_bridge.py` — HTTP bridge for multi-user chat/session integration
|
||||
- `mcp_servers/desktop_control_server.py` — stdio MCP server exposing screenshots/mouse/keyboard control
|
||||
## Data Flow
|
||||
|
||||
### Data flow
|
||||
|
||||
1. Browser startup begins at `index.html`
|
||||
2. `boot.js` decides whether the page is being served correctly; in HTTP mode it injects `bootstrap.mjs`
|
||||
3. `bootstrap.mjs` hands off to `app.js`
|
||||
4. `app.js` loads world configuration from `portals.json` and `vision.json`
|
||||
5. `app.js` constructs the Three.js scene and in-browser reasoning components, including `SymbolicEngine`, `NeuroSymbolicBridge`, `setupGOFAI()`, and `updateGOFAI()`
|
||||
6. Browser state and external runtimes connect through `server.py`, which broadcasts messages between connected clients
|
||||
7. Python harnesses (`nexus/morrowind_harness.py`, `nexus/bannerlord_harness.py`) spawn MCP subprocesses for desktop control / Steam metadata, capture state, execute actions, and feed telemetry into the Nexus bridge
|
||||
8. Memory/fleet tools like `mempalace/tunnel_sync.py` import remote palace data into local closets, extending what the operator/runtime layers can inspect
|
||||
9. Tests validate both the static browser contract and the higher-level repo-truth/memory contracts
|
||||
|
||||
### Important repo-specific runtime facts
|
||||
|
||||
- `portals.json` is a JSON array of portal/world/operator entries; examples in this checkout include `morrowind`, `bannerlord`, `workshop`, `archive`, `chapel`, and `courtyard`
|
||||
- `server.py` is a plain broadcast hub: clients send messages, the server forwards them to other connected clients
|
||||
- `nexus/morrowind_harness.py` and `nexus/bannerlord_harness.py` both implement a GamePortal pattern with MCP subprocess clients over stdio and WebSocket telemetry uplink
|
||||
- `mempalace/tunnel_sync.py` is not speculative; it is a real client that discovers remote wings, searches remote rooms, and writes `.closet.json` payloads locally
|
||||
1. Operators enter through `gemini-fallback-setup.sh`, `morrowind/hud.sh`, `pipelines/codebase_genome.py`.
|
||||
2. Core logic fans into top-level components: `angband`, `briefings`, `config`, `conftest`, `evennia`, `evennia_tools`.
|
||||
3. Validation is incomplete around `wizards/allegro/home/skills/red-teaming/godmode/scripts/auto_jailbreak.py`, `timmy-local/cache/agent_cache.py`, `wizards/allegro/home/skills/red-teaming/godmode/scripts/parseltongue.py`, so changes there carry regression risk.
|
||||
4. Final artifacts land as repository files, docs, or runtime side effects depending on the selected entry point.
|
||||
|
||||
## Key Abstractions
|
||||
|
||||
### Browser runtime
|
||||
|
||||
- `app.js`
|
||||
- Defines in-browser reasoning/state machinery, including `class SymbolicEngine`, `class NeuroSymbolicBridge`, `setupGOFAI()`, and `updateGOFAI()`
|
||||
- Couples rendering, local symbolic reasoning, metrics polling, and portal/UI logic in one very large root module
|
||||
- `BROWSER_CONTRACT.md`
|
||||
- Acts like an executable architecture contract for the browser surface
|
||||
- Declares required files, DOM IDs, Three.js expectations, provenance rules, and WebSocket expectations
|
||||
|
||||
### Realtime bridge
|
||||
|
||||
- `server.py`
|
||||
- Single hub abstraction: a WebSocket broadcast server maintaining a `clients` set and forwarding messages from one client to the others
|
||||
- This is the seam between browser shell, harnesses, and external telemetry producers
|
||||
|
||||
### GamePortal harness layer
|
||||
|
||||
- `nexus/morrowind_harness.py`
|
||||
- `nexus/bannerlord_harness.py`
|
||||
- Both define MCP client wrappers, `GameState` / `ActionResult`-style data classes, and an Observe-Decide-Act telemetry loop
|
||||
- The harnesses are symmetric enough to be understood as reusable portal adapters with game-specific context injected on top
|
||||
|
||||
### Memory / fleet layer
|
||||
|
||||
- `mempalace/tunnel_sync.py`
|
||||
- Encodes the fleet-memory sync client contract: discover wings, pull broad room queries, write closet files, support dry-run
|
||||
- `mempalace.js`
|
||||
- Minimal browser/Electron bridge to MemPalace commands via `window.electronAPI.execPython(...)`
|
||||
- Important because it shows a second memory integration surface distinct from the Python fleet sync path
|
||||
|
||||
### Operator / interaction bridge
|
||||
|
||||
- `multi_user_bridge.py`
|
||||
- `commands/timmy_commands.py`
|
||||
- These bridge user-facing conversations or MUD/Evennia interactions back into Timmy/Nexus services
|
||||
- `evennia/timmy_world/game.py` — classes `World`:91, `ActionSystem`:421, `TimmyAI`:539, `NPCAI`:550; functions `get_narrative_phase()`:55, `get_phase_transition_event()`:65
|
||||
- `evennia/timmy_world/world/game.py` — classes `World`:19, `ActionSystem`:326, `TimmyAI`:444, `NPCAI`:455; functions none detected
|
||||
- `timmy-world/game.py` — classes `World`:19, `ActionSystem`:349, `TimmyAI`:467, `NPCAI`:478; functions none detected
|
||||
- `wizards/allegro/home/skills/red-teaming/godmode/scripts/auto_jailbreak.py` — classes none detected; functions none detected
|
||||
- `uniwizard/self_grader.py` — classes `SessionGrade`:23, `WeeklyReport`:55, `SelfGrader`:74; functions `main()`:713
|
||||
- `uni-wizard/v3/intelligence_engine.py` — classes `ExecutionPattern`:27, `ModelPerformance`:44, `AdaptationEvent`:58, `PatternDatabase`:69; functions none detected
|
||||
- `scripts/know_thy_father/crossref_audit.py` — classes `ThemeCategory`:30, `Principle`:160, `MeaningKernel`:169, `CrossRefFinding`:178; functions `extract_themes_from_text()`:192, `parse_soul_md()`:206, `parse_kernels()`:264, `cross_reference()`:296, `generate_report()`:440, `main()`:561
|
||||
- `timmy-local/cache/agent_cache.py` — classes `CacheStats`:28, `LRUCache`:52, `ResponseCache`:94, `ToolCache`:205; functions none detected
|
||||
|
||||
## API Surface
|
||||
|
||||
### Browser / static surface
|
||||
- CLI: `bash gemini-fallback-setup.sh` — operational script (`gemini-fallback-setup.sh`)
|
||||
- CLI: `bash morrowind/hud.sh` — operational script (`morrowind/hud.sh`)
|
||||
- CLI: `python3 pipelines/codebase_genome.py` — python main guard (`pipelines/codebase_genome.py`)
|
||||
- CLI: `bash scripts/auto_restart_agent.sh` — operational script (`scripts/auto_restart_agent.sh`)
|
||||
- CLI: `bash scripts/backup_pipeline.sh` — operational script (`scripts/backup_pipeline.sh`)
|
||||
- CLI: `python3 scripts/big_brain_manager.py` — operational script (`scripts/big_brain_manager.py`)
|
||||
- CLI: `python3 scripts/big_brain_repo_audit.py` — operational script (`scripts/big_brain_repo_audit.py`)
|
||||
- CLI: `python3 scripts/codebase_genome_nightly.py` — operational script (`scripts/codebase_genome_nightly.py`)
|
||||
- Python: `get_narrative_phase()` from `evennia/timmy_world/game.py:55`
|
||||
- Python: `get_phase_transition_event()` from `evennia/timmy_world/game.py:65`
|
||||
- Python: `main()` from `uniwizard/self_grader.py:713`
|
||||
|
||||
- `index.html` served over HTTP
|
||||
- `boot.js` exports `bootPage()`; verified by `node --test tests/boot.test.js`
|
||||
- Data APIs are file-based inside the repo: `portals.json`, `vision.json`, `manifest.json`
|
||||
## Test Coverage Report
|
||||
|
||||
### Network/runtime surface
|
||||
- Source and script files inspected: 186
|
||||
- Test files inspected: 28
|
||||
- Coverage gaps:
|
||||
- `wizards/allegro/home/skills/red-teaming/godmode/scripts/auto_jailbreak.py` — no matching test reference detected
|
||||
- `timmy-local/cache/agent_cache.py` — no matching test reference detected
|
||||
- `wizards/allegro/home/skills/red-teaming/godmode/scripts/parseltongue.py` — no matching test reference detected
|
||||
- `twitter-archive/multimodal_pipeline.py` — no matching test reference detected
|
||||
- `wizards/allegro/home/skills/red-teaming/godmode/scripts/godmode_race.py` — no matching test reference detected
|
||||
- `skills/productivity/google-workspace/scripts/google_api.py` — no matching test reference detected
|
||||
- `wizards/allegro/home/skills/productivity/google-workspace/scripts/google_api.py` — no matching test reference detected
|
||||
- `morrowind/pilot.py` — no matching test reference detected
|
||||
- `morrowind/mcp_server.py` — no matching test reference detected
|
||||
- `skills/research/domain-intel/scripts/domain_intel.py` — no matching test reference detected
|
||||
- `wizards/allegro/home/skills/research/domain-intel/scripts/domain_intel.py` — no matching test reference detected
|
||||
- `timmy-local/scripts/ingest.py` — no matching test reference detected
|
||||
|
||||
- `python3 server.py`
|
||||
- Starts the WebSocket bridge on port `8765`
|
||||
- `python3 l402_server.py`
|
||||
- Local HTTP microservice for cost-estimate style responses
|
||||
- `python3 multi_user_bridge.py`
|
||||
- Multi-user HTTP/chat bridge
|
||||
## Security Audit Findings
|
||||
|
||||
### Harness / operator CLI surfaces
|
||||
- [medium] `briefings/briefing_20260325.json:37` — hardcoded http endpoint: plaintext or fixed HTTP endpoints can drift or leak across environments. Evidence: `"gitea_error": "Gitea 404: {\"errors\":null,\"message\":\"not found\",\"url\":\"http://143.198.27.163:3000/api/swagger\"}\n [http://143.198.27.163:3000/api/v1/repos/Timmy_Foundation/sovereign-orchestration/issues?state=open&type=issues&sort=created&direction=desc&limit=1&page=1]",`
|
||||
- [medium] `briefings/briefing_20260328.json:11` — hardcoded http endpoint: plaintext or fixed HTTP endpoints can drift or leak across environments. Evidence: `"provider_base_url": "http://localhost:8081/v1",`
|
||||
- [medium] `briefings/briefing_20260329.json:11` — hardcoded http endpoint: plaintext or fixed HTTP endpoints can drift or leak across environments. Evidence: `"provider_base_url": "http://localhost:8081/v1",`
|
||||
- [medium] `config.yaml:37` — hardcoded http endpoint: plaintext or fixed HTTP endpoints can drift or leak across environments. Evidence: `summary_base_url: http://localhost:11434/v1`
|
||||
- [medium] `config.yaml:47` — hardcoded http endpoint: plaintext or fixed HTTP endpoints can drift or leak across environments. Evidence: `base_url: 'http://localhost:11434/v1'`
|
||||
- [medium] `config.yaml:52` — hardcoded http endpoint: plaintext or fixed HTTP endpoints can drift or leak across environments. Evidence: `base_url: 'http://localhost:11434/v1'`
|
||||
- [medium] `config.yaml:57` — hardcoded http endpoint: plaintext or fixed HTTP endpoints can drift or leak across environments. Evidence: `base_url: 'http://localhost:11434/v1'`
|
||||
- [medium] `config.yaml:62` — hardcoded http endpoint: plaintext or fixed HTTP endpoints can drift or leak across environments. Evidence: `base_url: 'http://localhost:11434/v1'`
|
||||
- [medium] `config.yaml:67` — hardcoded http endpoint: plaintext or fixed HTTP endpoints can drift or leak across environments. Evidence: `base_url: 'http://localhost:11434/v1'`
|
||||
- [medium] `config.yaml:77` — hardcoded http endpoint: plaintext or fixed HTTP endpoints can drift or leak across environments. Evidence: `base_url: 'http://localhost:11434/v1'`
|
||||
- [medium] `config.yaml:82` — hardcoded http endpoint: plaintext or fixed HTTP endpoints can drift or leak across environments. Evidence: `base_url: 'http://localhost:11434/v1'`
|
||||
- [medium] `config.yaml:174` — hardcoded http endpoint: plaintext or fixed HTTP endpoints can drift or leak across environments. Evidence: `base_url: http://localhost:11434/v1`
|
||||
|
||||
- `python3 nexus/morrowind_harness.py`
|
||||
- `python3 nexus/bannerlord_harness.py`
|
||||
- `python3 mempalace/tunnel_sync.py --peer <url> [--dry-run] [--n N]`
|
||||
- `python3 mcp_servers/desktop_control_server.py`
|
||||
- `python3 mcp_servers/steam_info_server.py`
|
||||
## Dead Code Candidates
|
||||
|
||||
### Validation surface
|
||||
- `wizards/allegro/home/skills/red-teaming/godmode/scripts/auto_jailbreak.py` — not imported by indexed Python modules and not referenced by tests
|
||||
- `timmy-local/cache/agent_cache.py` — not imported by indexed Python modules and not referenced by tests
|
||||
- `wizards/allegro/home/skills/red-teaming/godmode/scripts/parseltongue.py` — not imported by indexed Python modules and not referenced by tests
|
||||
- `twitter-archive/multimodal_pipeline.py` — not imported by indexed Python modules and not referenced by tests
|
||||
- `wizards/allegro/home/skills/red-teaming/godmode/scripts/godmode_race.py` — not imported by indexed Python modules and not referenced by tests
|
||||
- `skills/productivity/google-workspace/scripts/google_api.py` — not imported by indexed Python modules and not referenced by tests
|
||||
- `wizards/allegro/home/skills/productivity/google-workspace/scripts/google_api.py` — not imported by indexed Python modules and not referenced by tests
|
||||
- `morrowind/pilot.py` — not imported by indexed Python modules and not referenced by tests
|
||||
- `morrowind/mcp_server.py` — not imported by indexed Python modules and not referenced by tests
|
||||
- `skills/research/domain-intel/scripts/domain_intel.py` — not imported by indexed Python modules and not referenced by tests
|
||||
|
||||
- `python3 -m pytest tests/test_portals_json.py tests/test_index_html_integrity.py tests/test_repo_truth.py -q`
|
||||
- `node --test tests/boot.test.js`
|
||||
- `python3 -m py_compile server.py nexus/morrowind_harness.py nexus/bannerlord_harness.py mempalace/tunnel_sync.py mcp_servers/desktop_control_server.py`
|
||||
- `tests/test_browser_smoke.py` defines the higher-cost Playwright smoke contract for the world shell
|
||||
## Performance Bottleneck Analysis
|
||||
|
||||
## Test Coverage Gaps
|
||||
|
||||
Strongly covered in this checkout:
|
||||
- `tests/test_portals_json.py` validates `portals.json`
|
||||
- `tests/test_index_html_integrity.py` checks merge-marker/DOM-integrity regressions in `index.html`
|
||||
- `tests/boot.test.js` verifies `boot.js` startup behavior
|
||||
- `tests/test_repo_truth.py` validates the repo-truth documents
|
||||
- Multiple `tests/test_mempalace_*.py` files cover the palace layer
|
||||
- `tests/test_bannerlord_harness.py` exists for the Bannerlord harness
|
||||
|
||||
Notable gaps or weak seams:
|
||||
- `nexus/morrowind_harness.py` is large and operationally critical, but the generated baseline still flags it as a gap relative to its size/complexity
|
||||
- `mcp_servers/desktop_control_server.py` exposes high-power automation but has no obvious dedicated test file in the root `tests/` suite
|
||||
- `app.js` is the dominant browser runtime file and mixes rendering, GOFAI, metrics, and integration logic in one place; browser smoke exists, but there is limited unit-level decomposition around those subsystems
|
||||
- `mempalace.js` appears minimally bridged and stale relative to the richer Python MemPalace layer
|
||||
- `multi_user_bridge.py` is a large integration surface and should be treated as high regression risk even though it is central to operator/chat flow
|
||||
|
||||
## Security Considerations
|
||||
|
||||
- `server.py` binds `HOST = "0.0.0.0"`, exposing the broadcast bridge beyond localhost unless network controls limit it
|
||||
- The WebSocket bridge is a broadcast hub without visible authentication in `server.py`; connected clients are trusted to send messages into the bus
|
||||
- `mcp_servers/desktop_control_server.py` exposes mouse/keyboard/screenshot control through a stdio MCP server. In any non-local or poorly isolated runtime, this is a privileged automation surface
|
||||
- `app.js` contains hardcoded local/network endpoints such as `http://localhost:${L402_PORT}/api/cost-estimate` and `http://localhost:8082/metrics`; these are convenient for local development but create environment drift and deployment assumptions
|
||||
- `app.js` also embeds explicit endpoint/status references like `ws://143.198.27.163:8765`, which is operationally brittle and the kind of hardcoded location data that drifts across environments
|
||||
- `mempalace.js` shells out through `window.electronAPI.execPython(...)`; this is powerful and useful, but it is a clear trust boundary between UI and host execution
|
||||
- `INVESTIGATION_ISSUE_1145.md` documents an earlier integrity hazard: agents writing to `public/nexus/` instead of canonical root paths. That path confusion is both an operational and security concern because it makes provenance harder to reason about
|
||||
|
||||
## Runtime Truth and Docs Drift
|
||||
|
||||
The most important architecture finding in this repo is not a class or subsystem. It is a truth mismatch.
|
||||
|
||||
- README.md says current `main` does not ship a browser 3D world
|
||||
- CLAUDE.md declares root `app.js` and `index.html` as canonical frontend paths
|
||||
- tests and browser contract now assume the root frontend exists
|
||||
|
||||
All three statements are simultaneously present in this checkout.
|
||||
|
||||
Grounded evidence:
|
||||
- `README.md` still says the repo does not contain an active root frontend such as `index.html`, `app.js`, or `style.css`
|
||||
- the current checkout does contain `index.html`, `app.js`, `style.css`, `manifest.json`, and `gofai_worker.js`
|
||||
- `BROWSER_CONTRACT.md` explicitly treats those root files as required browser assets
|
||||
- `tests/test_browser_smoke.py` serves those exact files and validates DOM/WebGL contracts against them
|
||||
- `tests/test_index_html_integrity.py` assumes `index.html` is canonical and production-relevant
|
||||
- `CLAUDE.md` says frontend code lives at repo root and explicitly warns against `public/nexus/`
|
||||
- `INVESTIGATION_ISSUE_1145.md` explains why `public/nexus/` is a bad/corrupt duplicate path and confirms the real classical AI code lives in root `app.js`
|
||||
|
||||
The honest conclusion:
|
||||
- The repo contains a partially restored or actively re-materialized browser surface
|
||||
- The docs are preserving an older migration truth while the runtime files and smoke contracts describe a newer present-tense truth
|
||||
- Any future work in `the-nexus` must choose one truth and align `README.md`, `CLAUDE.md`, smoke tests, and file layout around it
|
||||
|
||||
That drift is itself a critical architectural fact and should be treated as first-order design debt, not a side note.
|
||||
- `angband/mcp_server.py` — large module (353 lines) likely hides multiple responsibilities
|
||||
- `evennia/timmy_world/game.py` — large module (1541 lines) likely hides multiple responsibilities
|
||||
- `evennia/timmy_world/world/game.py` — large module (1345 lines) likely hides multiple responsibilities
|
||||
- `morrowind/mcp_server.py` — large module (451 lines) likely hides multiple responsibilities
|
||||
- `morrowind/pilot.py` — large module (459 lines) likely hides multiple responsibilities
|
||||
- `pipelines/codebase_genome.py` — large module (557 lines) likely hides multiple responsibilities
|
||||
- `scripts/know_thy_father/crossref_audit.py` — large module (657 lines) likely hides multiple responsibilities
|
||||
- `scripts/know_thy_father/index_media.py` — large module (405 lines) likely hides multiple responsibilities
|
||||
- `scripts/know_thy_father/synthesize_kernels.py` — large module (416 lines) likely hides multiple responsibilities
|
||||
- `scripts/tower_game.py` — large module (395 lines) likely hides multiple responsibilities
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
---
|
||||
- name: Fleet secrets rotation — dry run (diff only)
|
||||
hosts: fleet
|
||||
gather_facts: false
|
||||
any_errors_fatal: false
|
||||
vars_files:
|
||||
- ../inventory/group_vars/fleet_secrets.vault.yml
|
||||
vars:
|
||||
env_file_path: "{{ fleet_secret_targets[inventory_hostname].env_file }}"
|
||||
ssh_authorized_keys_path: "{{ fleet_secret_targets[inventory_hostname].ssh_authorized_keys_file }}"
|
||||
|
||||
tasks:
|
||||
- name: Validate target metadata exists
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- fleet_secret_targets[inventory_hostname] is defined
|
||||
- fleet_secret_bundle[inventory_hostname] is defined
|
||||
- fleet_secret_targets[inventory_hostname].required_env_keys | length > 0
|
||||
fail_msg: "Rotation inventory incomplete for {{ inventory_hostname }}"
|
||||
|
||||
- name: Show env file diff (would change)
|
||||
ansible.builtin.debug:
|
||||
msg: >-
|
||||
Would update {{ fleet_secret_bundle[inventory_hostname].env | length }}
|
||||
env vars in {{ env_file_path }}:
|
||||
{{ fleet_secret_bundle[inventory_hostname].env.keys() | list | join(', ') }}"
|
||||
|
||||
- name: Show SSH keys diff (would change)
|
||||
ansible.builtin.debug:
|
||||
msg: >-
|
||||
Would update authorized_keys at {{ ssh_authorized_keys_path }}
|
||||
|
||||
- name: Show services that would be restarted
|
||||
ansible.builtin.debug:
|
||||
msg: >-
|
||||
Would restart: {{ fleet_secret_targets[inventory_hostname].services | join(', ') }}
|
||||
|
||||
- name: Verify services exist (dry run)
|
||||
ansible.builtin.command: "systemctl cat {{ item }}"
|
||||
register: svc_check
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
loop: "{{ fleet_secret_targets[inventory_hostname].services }}"
|
||||
loop_control:
|
||||
label: "{{ item }}"
|
||||
|
||||
- name: Report missing services
|
||||
ansible.builtin.debug:
|
||||
msg: "⚠️ Service {{ item.item }} not found on {{ inventory_hostname }}"
|
||||
when: item.rc != 0
|
||||
loop: "{{ svc_check.results }}"
|
||||
loop_control:
|
||||
label: "{{ item.item }}"
|
||||
when: svc_check.results is defined
|
||||
@@ -1,93 +0,0 @@
|
||||
# Fleet Secrets Rotation — Operations Guide
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# 1. Pre-flight: verify everything is ready
|
||||
python3 scripts/rotate_secrets.py --check
|
||||
|
||||
# 2. Dry-run: see what would change
|
||||
python3 scripts/rotate_secrets.py --dry-run
|
||||
|
||||
# 3. Execute rotation
|
||||
python3 scripts/rotate_secrets.py --rotate
|
||||
|
||||
# 4. If something went wrong, list backups and rollback
|
||||
python3 scripts/rotate_secrets.py --list-rotations
|
||||
python3 scripts/rotate_secrets.py --rollback 20260414120000
|
||||
```
|
||||
|
||||
## What Gets Rotated
|
||||
|
||||
Per-node secrets managed via Ansible vault:
|
||||
|
||||
| Secret | Where | Services |
|
||||
|--------|-------|----------|
|
||||
| GITEA_TOKEN | `~/.env` | hermes, openclaw |
|
||||
| TELEGRAM_BOT_TOKEN | `~/.env` | hermes |
|
||||
| PRIMARY_MODEL_API_KEY | `~/.env` | hermes, openclaw |
|
||||
| SSH authorized_keys | `~/.ssh/authorized_keys` | sshd |
|
||||
|
||||
## Fleet Nodes
|
||||
|
||||
| Host | IP | Services |
|
||||
|------|-----|----------|
|
||||
| ezra | 143.198.27.163 | hermes-ezra, openclaw-ezra |
|
||||
| bezalel | 67.205.155.108 | hermes-bezalel |
|
||||
|
||||
## Rotation Process
|
||||
|
||||
1. **Validate** — check inventory, vault decryption, host connectivity
|
||||
2. **Backup** — snapshot current env + authorized_keys on each host
|
||||
3. **Stage** — write new secrets to temp files
|
||||
4. **Promote** — atomically swap staged files into place
|
||||
5. **Verify** — restart services, confirm they are active (5 retries, 2s delay)
|
||||
6. **Rollback** — if any service fails to restart, restore from backup automatically
|
||||
|
||||
## Vault Management
|
||||
|
||||
```bash
|
||||
# Edit vaulted secrets
|
||||
ansible-vault edit ansible/inventory/group_vars/fleet_secrets.vault.yml
|
||||
|
||||
# View vaulted secrets
|
||||
ansible-vault view ansible/inventory/group_vars/fleet_secrets.vault.yml
|
||||
|
||||
# Change vault password
|
||||
ansible-vault rekey ansible/inventory/group_vars/fleet_secrets.vault.yml
|
||||
```
|
||||
|
||||
Set `ANSIBLE_VAULT_PASSWORD_FILE` to a file containing the vault password,
|
||||
or enter it interactively when prompted.
|
||||
|
||||
## Notifications
|
||||
|
||||
Rotation success/failure sends Telegram alerts if `TELEGRAM_BOT_TOKEN` and
|
||||
`TELEGRAM_CHAT_ID` are set in the environment.
|
||||
|
||||
## Machine-Readable Output
|
||||
|
||||
```bash
|
||||
python3 scripts/rotate_secrets.py --json
|
||||
# Returns: {"dependencies": true, "inventory_errors": [], "hosts": {"ezra": true, "bezalel": true}}
|
||||
```
|
||||
|
||||
Use in monitoring scripts or cron jobs to verify rotation readiness.
|
||||
|
||||
## Files
|
||||
|
||||
```
|
||||
ansible/
|
||||
inventory/
|
||||
hosts.ini # Fleet host definitions
|
||||
group_vars/
|
||||
fleet.yml # Target metadata (paths, services, required keys)
|
||||
fleet_secrets.vault.yml # Vault-encrypted secret bundle
|
||||
playbooks/
|
||||
rotate_fleet_secrets.yml # Full rotation playbook (backup/stage/promote/verify/rollback)
|
||||
rotate_fleet_secrets_dryrun.yml # Dry-run mode (diff only, no changes)
|
||||
scripts/
|
||||
rotate_secrets.py # CLI wrapper
|
||||
tests/
|
||||
test_rotate_secrets.py # Unit tests
|
||||
```
|
||||
@@ -1,101 +0,0 @@
|
||||
# GENOME.md — Burn Fleet (Timmy_Foundation/burn-fleet)
|
||||
|
||||
> Codebase Genome v1.0 | Generated 2026-04-16 | Repo 14/16
|
||||
|
||||
## Project Overview
|
||||
|
||||
**Burn Fleet** is the autonomous dispatch infrastructure for the Timmy Foundation. It manages 112 tmux panes across Mac and VPS, routing Gitea issues to lane-specialized workers by repo. Each agent has a mythological name — they are all Timmy with different hats.
|
||||
|
||||
**Core principle:** Dispatch ALL panes. Never scan for idle. Stale work beats idle workers.
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
Mac (M3 Max, 14 cores, 36GB) Allegro (VPS, 2 cores, 8GB)
|
||||
┌─────────────────────────────┐ ┌─────────────────────────────┐
|
||||
│ CRUCIBLE 14 panes (bugs) │ │ FORGE 14 panes (bugs) │
|
||||
│ GNOMES 12 panes (cron) │ │ ANVIL 14 panes (nexus) │
|
||||
│ LOOM 12 panes (home) │ │ CRUCIBLE-2 10 panes (home) │
|
||||
│ FOUNDRY 10 panes (nexus) │ │ SENTINEL 6 panes (council)│
|
||||
│ WARD 12 panes (fleet) │ └─────────────────────────────┘
|
||||
│ COUNCIL 8 panes (sages) │ 44 panes (36 workers)
|
||||
└─────────────────────────────┘
|
||||
68 panes (60 workers)
|
||||
```
|
||||
|
||||
**Total: 112 panes, 96 workers + 12 council members + 4 sentinel advisors**
|
||||
|
||||
## Key Files
|
||||
|
||||
| File | LOC | Purpose |
|
||||
|------|-----|---------|
|
||||
| `fleet-spec.json` | ~200 | Machine definitions, window layouts, lane assignments, agent names |
|
||||
| `fleet-launch.sh` | ~100 | Create tmux sessions with correct pane counts on Mac + Allegro |
|
||||
| `fleet-christen.py` | ~80 | Launch hermes in all panes and send identity messages |
|
||||
| `fleet-dispatch.py` | ~250 | Pull Gitea issues and route to correct panes by lane |
|
||||
| `fleet-status.py` | ~100 | Health check across all machines |
|
||||
| `allegro/docker-compose.yml` | ~30 | Allegro VPS container definition |
|
||||
| `allegro/Dockerfile` | ~20 | Allegro build definition |
|
||||
| `allegro/healthcheck.py` | ~15 | Allegro container health check |
|
||||
|
||||
**Total: ~800 LOC**
|
||||
|
||||
## Lane Routing
|
||||
|
||||
Issues are routed by repo to the correct window:
|
||||
|
||||
| Repo | Mac Window | Allegro Window |
|
||||
|------|-----------|----------------|
|
||||
| hermes-agent | CRUCIBLE, GNOMES | FORGE |
|
||||
| timmy-home | LOOM | CRUCIBLE-2 |
|
||||
| timmy-config | LOOM | CRUCIBLE-2 |
|
||||
| the-nexus | FOUNDRY | ANVIL |
|
||||
| the-playground | — | ANVIL |
|
||||
| the-door | WARD | CRUCIBLE-2 |
|
||||
| fleet-ops | WARD | CRUCIBLE-2 |
|
||||
| turboquant | WARD | — |
|
||||
|
||||
## Entry Points
|
||||
|
||||
| Command | Purpose |
|
||||
|---------|---------|
|
||||
| `./fleet-launch.sh both` | Create tmux layout on Mac + Allegro |
|
||||
| `python3 fleet-christen.py both` | Wake all agents with identity messages |
|
||||
| `python3 fleet-dispatch.py --cycles 1` | Single dispatch cycle |
|
||||
| `python3 fleet-dispatch.py --cycles 10 --interval 60` | Continuous burn (10 cycles, 60s apart) |
|
||||
| `python3 fleet-status.py` | Health check all machines |
|
||||
|
||||
## Agent Names
|
||||
|
||||
| Window | Names | Count |
|
||||
|--------|-------|-------|
|
||||
| CRUCIBLE | AZOTH, ALBEDO, CITRINITAS, RUBEDO, SULPHUR, MERCURIUS, SAL, ATHANOR, VITRIOL, SATURN, JUPITER, MARS, EARTH, SOL | 14 |
|
||||
| GNOMES | RAZIEL, AZRAEL, CASSIEL, METATRON, SANDALPHON, BINAH, CHOKMAH, KETER, ALDEBARAN, RIGEL, SIRIUS, POLARIS | 12 |
|
||||
| FORGE | HAMMER, ANVIL, ADZE, PICK, TONGS, WRENCH, SCREWDRIVER, BOLT, SAW, TRAP, HOOK, MAGNET, SPARK, FLAME | 14 |
|
||||
| COUNCIL | TESLA, HERMES, GANDALF, DAVINCI, ARCHIMEDES, TURING, AURELIUS, SOLOMON | 8 |
|
||||
|
||||
## Design Decisions
|
||||
|
||||
1. **Separate GILs** — Allegro runs Python independently on VPS for true parallelism
|
||||
2. **Queue, not send-keys** — Workers process at their own pace, no interruption
|
||||
3. **Lane enforcement** — Panes stay in one repo to build deep context
|
||||
4. **Dispatch ALL panes** — Never scan for idle; stale work beats idle workers
|
||||
5. **Council is advisory** — Named archetypes provide perspective, not task execution
|
||||
|
||||
## Scaling
|
||||
|
||||
- Add panes: Edit `fleet-spec.json` → `fleet-launch.sh` → `fleet-christen.py`
|
||||
- Add machines: Edit `fleet-spec.json` → Add routing in `fleet-dispatch.py` → Ensure SSH access
|
||||
|
||||
## Sovereignty Assessment
|
||||
|
||||
- **Fully local** — Mac + user-controlled VPS, no cloud dependencies
|
||||
- **No phone-home** — Gitea API is self-hosted
|
||||
- **Open source** — All code on Gitea
|
||||
- **SSH-based** — Mac → Allegro communication via SSH only
|
||||
|
||||
**Verdict: Fully sovereign. Autonomous fleet dispatch with no external dependencies.**
|
||||
|
||||
---
|
||||
|
||||
*"Dispatch ALL panes. Never scan for idle — stale work beats idle workers."*
|
||||
@@ -1,106 +0,0 @@
|
||||
# MemPalace v3.0.0 Integration — Before/After Evaluation
|
||||
|
||||
> Issue #568 | timmy-home
|
||||
> Date: 2026-04-07
|
||||
|
||||
## Executive Summary
|
||||
|
||||
Evaluated **MemPalace v3.0.0** as a memory layer for the Timmy/Hermes agent stack.
|
||||
|
||||
**Installed:** ✅ `mempalace 3.0.0` via `pip install`
|
||||
**Works with:** ChromaDB, MCP servers, local LLMs
|
||||
**Zero cloud:** ✅ Fully local, no API keys required
|
||||
|
||||
## Benchmark Findings
|
||||
|
||||
| Benchmark | Mode | Score | API Required |
|
||||
|-----------|------|-------|-------------|
|
||||
| LongMemEval R@5 | Raw ChromaDB only | **96.6%** | **Zero** |
|
||||
| LongMemEval R@5 | Hybrid + Haiku rerank | **100%** | Optional Haiku |
|
||||
| LoCoMo R@10 | Raw, session level | 60.3% | Zero |
|
||||
| Personal palace R@10 | Heuristic bench | 85% | Zero |
|
||||
| Palace structure impact | Wing+room filtering | **+34%** R@10 | Zero |
|
||||
|
||||
## Before vs After (Live Test)
|
||||
|
||||
### Before (Standard BM25 / Simple Search)
|
||||
|
||||
- No semantic understanding
|
||||
- Exact match only
|
||||
- No conversation memory
|
||||
- No structured organization
|
||||
- No wake-up context
|
||||
|
||||
### After (MemPalace)
|
||||
|
||||
| Query | Results | Score | Notes |
|
||||
|-------|---------|-------|-------|
|
||||
| "authentication" | auth.md, main.py | -0.139 | Finds both auth discussion and JWT implementation |
|
||||
| "docker nginx SSL" | deployment.md, auth.md | 0.447 | Exact match on deployment, related JWT context |
|
||||
| "keycloak OAuth" | auth.md, main.py | -0.029 | Finds OAuth discussion and JWT usage |
|
||||
| "postgresql database" | README.md, main.py | 0.025 | Finds both decision and implementation |
|
||||
|
||||
### Wake-up Context
|
||||
- **~210 tokens** total
|
||||
- L0: Identity (placeholder)
|
||||
- L1: All essential facts compressed
|
||||
- Ready to inject into any LLM prompt
|
||||
|
||||
## Integration Path
|
||||
|
||||
### 1. Memory Mining
|
||||
```bash
|
||||
mempalace mine ~/.hermes/sessions/ --mode convos
|
||||
mempalace mine ~/.hermes/hermes-agent/
|
||||
mempalace mine ~/.hermes/
|
||||
```
|
||||
|
||||
### 2. Wake-up Protocol
|
||||
```bash
|
||||
mempalace wake-up > /tmp/timmy-context.txt
|
||||
```
|
||||
|
||||
### 3. MCP Integration
|
||||
```bash
|
||||
hermes mcp add mempalace -- python -m mempalace.mcp_server
|
||||
```
|
||||
|
||||
### 4. Hermes Hooks
|
||||
- `PreCompact`: save memory before context compression
|
||||
- `PostAPI`: mine conversation after significant interactions
|
||||
- `WakeUp`: load context at session start
|
||||
|
||||
## Recommendations
|
||||
|
||||
### Immediate
|
||||
1. Add `mempalace` to Hermes venv requirements
|
||||
2. Create mine script for ~/.hermes/ and ~/.timmy/
|
||||
3. Add wake-up hook to Hermes session start
|
||||
4. Test with real conversation exports
|
||||
|
||||
### Short-term
|
||||
1. Mine last 30 days of Timmy sessions
|
||||
2. Build wake-up context for all agents
|
||||
3. Add MemPalace MCP tools to Hermes toolset
|
||||
4. Test retrieval quality on real queries
|
||||
|
||||
### Medium-term
|
||||
1. Replace homebrew memory system with MemPalace
|
||||
2. Build palace structure: wings for projects, halls for topics
|
||||
3. Compress with AAAK for 30x storage efficiency
|
||||
4. Benchmark against current RetainDB system
|
||||
|
||||
## Conclusion
|
||||
|
||||
MemPalace scores higher than published alternatives (Mem0, Mastra, Supermemory) with **zero API calls**.
|
||||
|
||||
Key advantages:
|
||||
1. **Verbatim retrieval** — never loses the "why" context
|
||||
2. **Palace structure** — +34% boost from organization
|
||||
3. **Local-only** — aligns with sovereignty mandate
|
||||
4. **MCP compatible** — drops into existing tool chain
|
||||
5. **AAAK compression** — 30x storage reduction coming
|
||||
|
||||
---
|
||||
|
||||
*Evaluated by Timmy | Issue #568*
|
||||
@@ -1,371 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Fleet secrets rotation CLI.
|
||||
|
||||
Usage:
|
||||
python3 rotate_secrets.py --check # Pre-flight validation
|
||||
python3 rotate_secrets.py --dry-run # Show what would change
|
||||
python3 rotate_secrets.py --rotate # Execute rotation
|
||||
python3 rotate_secrets.py --rollback ID # Rollback to a previous rotation
|
||||
python3 rotate_secrets.py --list-rotations # List available rollback points
|
||||
|
||||
Requires: ansible-playbook, ansible-vault (for --rotate with vaulted secrets)
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
|
||||
# ── Paths ──────────────────────────────────────────────────────────
|
||||
SCRIPT_DIR = Path(__file__).resolve().parent
|
||||
ANSIBLE_DIR = SCRIPT_DIR.parent / "ansible"
|
||||
PLAYBOOK = ANSIBLE_DIR / "playbooks" / "rotate_fleet_secrets.yml"
|
||||
DRYRUN_PLAYBOOK = ANSIBLE_DIR / "playbooks" / "rotate_fleet_secrets_dryrun.yml"
|
||||
INVENTORY = ANSIBLE_DIR / "inventory" / "hosts.ini"
|
||||
VAULT_FILE = ANSIBLE_DIR / "inventory" / "group_vars" / "fleet_secrets.vault.yml"
|
||||
FLEET_VARS = ANSIBLE_DIR / "inventory" / "group_vars" / "fleet.yml"
|
||||
BACKUP_ROOT = "/var/lib/timmy/secret-rotations"
|
||||
|
||||
# ── Telegram notification (optional) ──────────────────────────────
|
||||
TELEGRAM_TOKEN = os.environ.get("TELEGRAM_BOT_TOKEN", "")
|
||||
TELEGRAM_CHAT = os.environ.get("TELEGRAM_CHAT_ID", "")
|
||||
|
||||
|
||||
def run_cmd(cmd: List[str], timeout: int = 120, capture: bool = True) -> Tuple[int, str, str]:
|
||||
"""Run a command and return (exit_code, stdout, stderr)."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
cmd, capture_output=capture, text=True, timeout=timeout
|
||||
)
|
||||
return result.returncode, result.stdout, result.stderr
|
||||
except subprocess.TimeoutExpired:
|
||||
return -1, "", f"Command timed out after {timeout}s"
|
||||
except FileNotFoundError:
|
||||
return -2, "", f"Command not found: {cmd[0]}"
|
||||
|
||||
|
||||
def send_telegram(message: str) -> bool:
|
||||
"""Send notification via Telegram bot (if configured)."""
|
||||
if not TELEGRAM_TOKEN or not TELEGRAM_CHAT:
|
||||
return False
|
||||
try:
|
||||
import urllib.request
|
||||
url = f"https://api.telegram.org/bot{TELEGRAM_TOKEN}/sendMessage"
|
||||
payload = json.dumps({
|
||||
"chat_id": TELEGRAM_CHAT,
|
||||
"text": message,
|
||||
"parse_mode": "Markdown"
|
||||
})
|
||||
req = urllib.request.Request(url, data=payload.encode(),
|
||||
headers={"Content-Type": "application/json"})
|
||||
with urllib.request.urlopen(req, timeout=10):
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
# ── Pre-flight checks ─────────────────────────────────────────────
|
||||
|
||||
def check_dependencies() -> List[str]:
|
||||
"""Verify required tools are installed."""
|
||||
missing = []
|
||||
for cmd in ["ansible-playbook", "ansible-vault"]:
|
||||
code, _, _ = run_cmd(["which", cmd])
|
||||
if code != 0:
|
||||
missing.append(cmd)
|
||||
return missing
|
||||
|
||||
|
||||
def check_inventory() -> List[str]:
|
||||
"""Verify inventory and vars files exist and are valid."""
|
||||
errors = []
|
||||
for path, desc in [
|
||||
(INVENTORY, "inventory hosts.ini"),
|
||||
(VAULT_FILE, "vault secrets file"),
|
||||
(FLEET_VARS, "fleet vars"),
|
||||
]:
|
||||
if not path.exists():
|
||||
errors.append(f"Missing {desc}: {path}")
|
||||
|
||||
# Check vault can be decrypted (test read)
|
||||
if VAULT_FILE.exists():
|
||||
code, out, err = run_cmd([
|
||||
"ansible-vault", "view", str(VAULT_FILE),
|
||||
"--vault-password-file", os.environ.get("ANSIBLE_VAULT_PASSWORD_FILE", "/dev/null")
|
||||
], timeout=10)
|
||||
if code != 0:
|
||||
errors.append(f"Cannot decrypt vault file: {err.strip()[:100]}")
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def check_connectivity(hosts: List[str]) -> Dict[str, bool]:
|
||||
"""Ping each fleet host."""
|
||||
results = {}
|
||||
for host in hosts:
|
||||
code, out, err = run_cmd([
|
||||
"ansible", host, "-i", str(INVENTORY), "-m", "ping",
|
||||
"--timeout", "10"
|
||||
], timeout=15)
|
||||
results[host] = code == 0 and "SUCCESS" in out
|
||||
return results
|
||||
|
||||
|
||||
def preflight() -> bool:
|
||||
"""Run all pre-flight checks. Returns True if ready."""
|
||||
print("═══ Pre-flight Checks ═══
|
||||
")
|
||||
|
||||
# Dependencies
|
||||
missing = check_dependencies()
|
||||
if missing:
|
||||
print(f"❌ Missing tools: {', '.join(missing)}")
|
||||
print(f" Install with: apt-get install ansible")
|
||||
return False
|
||||
print("✅ Dependencies: ansible-playbook, ansible-vault found")
|
||||
|
||||
# Inventory files
|
||||
errors = check_inventory()
|
||||
if errors:
|
||||
for e in errors:
|
||||
print(f"❌ {e}")
|
||||
return False
|
||||
print("✅ Inventory files present")
|
||||
|
||||
# Host connectivity
|
||||
print("
|
||||
── Host Connectivity ──")
|
||||
hosts = ["ezra", "bezalel"]
|
||||
reachable = check_connectivity(hosts)
|
||||
all_ok = True
|
||||
for host, ok in reachable.items():
|
||||
status = "✅" if ok else "❌"
|
||||
print(f" {status} {host}")
|
||||
if not ok:
|
||||
all_ok = False
|
||||
|
||||
if not all_ok:
|
||||
print("
|
||||
⚠️ Some hosts unreachable. Rotation will fail on unreachable hosts.")
|
||||
resp = input("Continue anyway? [y/N] ").strip().lower()
|
||||
if resp != "y":
|
||||
return False
|
||||
|
||||
print("
|
||||
✅ Pre-flight passed. Ready for rotation.")
|
||||
return True
|
||||
|
||||
|
||||
# ── Dry-run ────────────────────────────────────────────────────────
|
||||
|
||||
def dry_run() -> bool:
|
||||
"""Run rotation playbook in check mode."""
|
||||
print("═══ Dry Run (check mode) ═══
|
||||
")
|
||||
|
||||
if not DRYRUN_PLAYBOOK.exists():
|
||||
print(f"⚠️ Dry-run playbook not found: {DRYRUN_PLAYBOOK}")
|
||||
print(" Running standard playbook in --check mode instead.")
|
||||
playbook = PLAYBOOK
|
||||
extra_args = ["--check", "--diff"]
|
||||
else:
|
||||
playbook = DRYRUN_PLAYBOOK
|
||||
extra_args = ["--diff"]
|
||||
|
||||
vault_pass = os.environ.get("ANSIBLE_VAULT_PASSWORD_FILE")
|
||||
cmd = [
|
||||
"ansible-playbook",
|
||||
"-i", str(INVENTORY),
|
||||
str(playbook),
|
||||
] + extra_args
|
||||
|
||||
if vault_pass:
|
||||
cmd.extend(["--vault-password-file", vault_pass])
|
||||
|
||||
print(f"Running: {' '.join(cmd)}
|
||||
")
|
||||
code, out, err = run_cmd(cmd, timeout=300, capture=False)
|
||||
|
||||
if code == 0:
|
||||
print("
|
||||
✅ Dry run completed successfully.")
|
||||
return True
|
||||
else:
|
||||
print(f"
|
||||
❌ Dry run failed (exit {code}).")
|
||||
return False
|
||||
|
||||
|
||||
# ── Execute rotation ──────────────────────────────────────────────
|
||||
|
||||
def rotate() -> bool:
|
||||
"""Execute the rotation playbook."""
|
||||
rotation_id = datetime.now().strftime("%Y%m%d%H%M%S")
|
||||
print(f"═══ Rotating Secrets (ID: {rotation_id}) ═══
|
||||
")
|
||||
|
||||
vault_pass = os.environ.get("ANSIBLE_VAULT_PASSWORD_FILE")
|
||||
cmd = [
|
||||
"ansible-playbook",
|
||||
"-i", str(INVENTORY),
|
||||
str(PLAYBOOK),
|
||||
]
|
||||
|
||||
if vault_pass:
|
||||
cmd.extend(["--vault-password-file", vault_pass])
|
||||
|
||||
print(f"Running: {' '.join(cmd)}
|
||||
")
|
||||
|
||||
start_time = time.time()
|
||||
code, out, err = run_cmd(cmd, timeout=600, capture=False)
|
||||
elapsed = time.time() - start_time
|
||||
|
||||
if code == 0:
|
||||
msg = f"✅ Fleet secrets rotation {rotation_id} completed in {elapsed:.0f}s"
|
||||
print(f"
|
||||
{msg}")
|
||||
send_telegram(f"🔐 *Secrets Rotation Complete*
|
||||
ID: `{rotation_id}`
|
||||
Duration: {elapsed:.0f}s
|
||||
All nodes verified.")
|
||||
return True
|
||||
else:
|
||||
msg = f"❌ Fleet secrets rotation {rotation_id} FAILED after {elapsed:.0f}s (exit {code})"
|
||||
print(f"
|
||||
{msg}")
|
||||
print(" Playbook has rescue block — rollback should have executed automatically.")
|
||||
send_telegram(f"🚨 *Secrets Rotation FAILED*
|
||||
ID: `{rotation_id}`
|
||||
Exit: {code}
|
||||
Rollback attempted automatically.
|
||||
Check: `ansible-playbook -i {INVENTORY} {PLAYBOOK}` logs.")
|
||||
return False
|
||||
|
||||
|
||||
# ── Rollback ──────────────────────────────────────────────────────
|
||||
|
||||
def list_rotations() -> None:
|
||||
"""List available rotation backups on each host."""
|
||||
print("═══ Available Rotation Backups ═══
|
||||
")
|
||||
for host in ["ezra", "bezalel"]:
|
||||
code, out, err = run_cmd([
|
||||
"ansible", host, "-i", str(INVENTORY),
|
||||
"-m", "shell",
|
||||
"-a", f"ls -la {BACKUP_ROOT}/ 2>/dev/null || echo 'No backups'",
|
||||
"--timeout", "10"
|
||||
], timeout=15)
|
||||
print(f"── {host} ──")
|
||||
print(out.strip() if out.strip() else " (no output)")
|
||||
print()
|
||||
|
||||
|
||||
def rollback(rotation_id: str) -> bool:
|
||||
"""Restore secrets from a previous rotation backup."""
|
||||
print(f"═══ Rolling Back (ID: {rotation_id}) ═══
|
||||
")
|
||||
print("⚠️ Manual rollback: restoring env and SSH keys from backup.")
|
||||
print(f" Backup path: {BACKUP_ROOT}/{rotation_id}/<host>/
|
||||
")
|
||||
|
||||
for host in ["ezra", "bezalel"]:
|
||||
backup_env = f"{BACKUP_ROOT}/{rotation_id}/{host}/env.before"
|
||||
backup_ssh = f"{BACKUP_ROOT}/{rotation_id}/{host}/authorized_keys.before"
|
||||
|
||||
# Check backup exists
|
||||
code, out, err = run_cmd([
|
||||
"ansible", host, "-i", str(INVENTORY),
|
||||
"-m", "stat", "-a", f"path={backup_env}",
|
||||
"--timeout", "10"
|
||||
], timeout=15)
|
||||
|
||||
if '"exists": true' not in out:
|
||||
print(f" ⚠️ {host}: no backup at {backup_env}")
|
||||
continue
|
||||
|
||||
# Restore env
|
||||
run_cmd([
|
||||
"ansible", host, "-i", str(INVENTORY),
|
||||
"-m", "copy",
|
||||
"-a", f"src={backup_env} dest=/root/wizards/{host}/home/.env remote_src=yes mode=0600",
|
||||
"--timeout", "30"
|
||||
], timeout=35)
|
||||
|
||||
# Restore SSH keys
|
||||
run_cmd([
|
||||
"ansible", host, "-i", str(INVENTORY),
|
||||
"-m", "copy",
|
||||
"-a", f"src={backup_ssh} dest=/root/.ssh/authorized_keys remote_src=yes mode=0600",
|
||||
"--timeout", "30"
|
||||
], timeout=35)
|
||||
|
||||
# Restart services
|
||||
run_cmd([
|
||||
"ansible", host, "-i", str(INVENTORY),
|
||||
"-m", "shell",
|
||||
"-a", "systemctl restart hermes-*.service openclaw-*.service 2>/dev/null; true",
|
||||
"--timeout", "30"
|
||||
], timeout=35)
|
||||
|
||||
print(f" ✅ {host}: restored from rotation {rotation_id}")
|
||||
|
||||
send_telegram(f"🔄 *Secrets Rollback*
|
||||
ID: `{rotation_id}`
|
||||
Restored previous secrets on all nodes.")
|
||||
return True
|
||||
|
||||
|
||||
# ── Main ──────────────────────────────────────────────────────────
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Fleet secrets rotation tool",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog=__doc__
|
||||
)
|
||||
group = parser.add_mutually_exclusive_group(required=True)
|
||||
group.add_argument("--check", action="store_true", help="Pre-flight validation")
|
||||
group.add_argument("--dry-run", action="store_true", help="Show what would change")
|
||||
group.add_argument("--rotate", action="store_true", help="Execute rotation")
|
||||
group.add_argument("--rollback", metavar="ID", help="Rollback to rotation ID")
|
||||
group.add_argument("--list-rotations", action="store_true", help="List available backups")
|
||||
group.add_argument("--json", action="store_true", help="Machine-readable output")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.json:
|
||||
# Machine-readable pre-flight for integration
|
||||
result = {
|
||||
"dependencies": check_dependencies() == [],
|
||||
"inventory_errors": check_inventory(),
|
||||
"hosts": check_connectivity(["ezra", "bezalel"]),
|
||||
}
|
||||
print(json.dumps(result, indent=2))
|
||||
sys.exit(0 if not result["inventory_errors"] else 1)
|
||||
|
||||
if args.check:
|
||||
sys.exit(0 if preflight() else 1)
|
||||
elif args.dry_run:
|
||||
sys.exit(0 if dry_run() else 1)
|
||||
elif args.rotate:
|
||||
if not preflight():
|
||||
print("
|
||||
❌ Pre-flight failed. Aborting rotation.")
|
||||
sys.exit(1)
|
||||
sys.exit(0 if rotate() else 1)
|
||||
elif args.rollback:
|
||||
sys.exit(0 if rollback(args.rollback) else 1)
|
||||
elif args.list_rotations:
|
||||
list_rotations()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,177 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Tests for fleet secrets rotation CLI.
|
||||
|
||||
Tests pre-flight checks, argument parsing, and integration points.
|
||||
Does NOT execute actual rotations — uses mocks for ansible commands.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
# Add scripts dir to path
|
||||
sys.path.insert(0, str(Path(__file__).resolve().parent.parent / "scripts"))
|
||||
|
||||
import rotate_secrets
|
||||
|
||||
|
||||
class TestDependencyCheck(unittest.TestCase):
|
||||
"""Test dependency verification."""
|
||||
|
||||
@patch("rotate_secrets.run_cmd")
|
||||
def test_missing_ansible_playbook(self, mock_run):
|
||||
mock_run.return_value = (1, "", "not found")
|
||||
missing = rotate_secrets.check_dependencies()
|
||||
self.assertIn("ansible-playbook", missing)
|
||||
|
||||
@patch("rotate_secrets.run_cmd")
|
||||
def test_all_deps_present(self, mock_run):
|
||||
mock_run.return_value = (0, "/usr/bin/ansible-playbook", "")
|
||||
missing = rotate_secrets.check_dependencies()
|
||||
self.assertEqual(missing, [])
|
||||
|
||||
@patch("rotate_secrets.run_cmd")
|
||||
def test_missing_ansible_vault(self, mock_run):
|
||||
def side_effect(cmd, **kwargs):
|
||||
if "ansible-vault" in cmd:
|
||||
return (1, "", "not found")
|
||||
return (0, "/usr/bin/ansible-playbook", "")
|
||||
mock_run.side_effect = side_effect
|
||||
missing = rotate_secrets.check_dependencies()
|
||||
self.assertIn("ansible-vault", missing)
|
||||
|
||||
|
||||
class TestInventoryCheck(unittest.TestCase):
|
||||
"""Test inventory file validation."""
|
||||
|
||||
def test_missing_inventory(self):
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
# Override paths to temp dir
|
||||
original_inventory = rotate_secrets.INVENTORY
|
||||
original_vault = rotate_secrets.VAULT_FILE
|
||||
original_vars = rotate_secrets.FLEET_VARS
|
||||
|
||||
rotate_secrets.INVENTORY = Path(tmpdir) / "hosts.ini"
|
||||
rotate_secrets.VAULT_FILE = Path(tmpdir) / "vault.yml"
|
||||
rotate_secrets.FLEET_VARS = Path(tmpdir) / "fleet.yml"
|
||||
|
||||
errors = rotate_secrets.check_inventory()
|
||||
self.assertEqual(len(errors), 3)
|
||||
self.assertTrue(any("hosts.ini" in e for e in errors))
|
||||
|
||||
# Restore
|
||||
rotate_secrets.INVENTORY = original_inventory
|
||||
rotate_secrets.VAULT_FILE = original_vault
|
||||
rotate_secrets.FLEET_VARS = original_vars
|
||||
|
||||
def test_all_files_present(self):
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
# Create dummy files
|
||||
for name in ["hosts.ini", "vault.yml", "fleet.yml"]:
|
||||
(Path(tmpdir) / name).write_text("# placeholder")
|
||||
|
||||
original_inventory = rotate_secrets.INVENTORY
|
||||
original_vault = rotate_secrets.VAULT_FILE
|
||||
original_vars = rotate_secrets.FLEET_VARS
|
||||
|
||||
rotate_secrets.INVENTORY = Path(tmpdir) / "hosts.ini"
|
||||
rotate_secrets.VAULT_FILE = Path(tmpdir) / "vault.yml"
|
||||
rotate_secrets.FLEET_VARS = Path(tmpdir) / "fleet.yml"
|
||||
|
||||
with patch("rotate_secrets.run_cmd") as mock_run:
|
||||
mock_run.return_value = (0, "vault content", "")
|
||||
errors = rotate_secrets.check_inventory()
|
||||
self.assertEqual(errors, [])
|
||||
|
||||
rotate_secrets.INVENTORY = original_inventory
|
||||
rotate_secrets.VAULT_FILE = original_vault
|
||||
rotate_secrets.FLEET_VARS = original_vars
|
||||
|
||||
|
||||
class TestConnectivity(unittest.TestCase):
|
||||
"""Test host connectivity checks."""
|
||||
|
||||
@patch("rotate_secrets.run_cmd")
|
||||
def test_all_hosts_reachable(self, mock_run):
|
||||
mock_run.return_value = (0, "SUCCESS", "")
|
||||
results = rotate_secrets.check_connectivity(["ezra", "bezalel"])
|
||||
self.assertTrue(results["ezra"])
|
||||
self.assertTrue(results["bezalel"])
|
||||
|
||||
@patch("rotate_secrets.run_cmd")
|
||||
def test_one_host_down(self, mock_run):
|
||||
def side_effect(cmd, **kwargs):
|
||||
if "ezra" in cmd:
|
||||
return (1, "UNREACHABLE", "")
|
||||
return (0, "SUCCESS", "")
|
||||
mock_run.side_effect = side_effect
|
||||
results = rotate_secrets.check_connectivity(["ezra", "bezalel"])
|
||||
self.assertFalse(results["ezra"])
|
||||
self.assertTrue(results["bezalel"])
|
||||
|
||||
|
||||
class TestRunCmd(unittest.TestCase):
|
||||
"""Test command runner."""
|
||||
|
||||
def test_successful_command(self):
|
||||
code, out, err = rotate_secrets.run_cmd(["echo", "hello"])
|
||||
self.assertEqual(code, 0)
|
||||
self.assertEqual(out.strip(), "hello")
|
||||
|
||||
def test_failing_command(self):
|
||||
code, out, err = rotate_secrets.run_cmd(["false"])
|
||||
self.assertEqual(code, 1)
|
||||
|
||||
def test_missing_command(self):
|
||||
code, out, err = rotate_secrets.run_cmd(["nonexistent_command_xyz"])
|
||||
self.assertEqual(code, -2)
|
||||
|
||||
def test_timeout(self):
|
||||
code, out, err = rotate_secrets.run_cmd(["sleep", "30"], timeout=1)
|
||||
self.assertEqual(code, -1)
|
||||
|
||||
|
||||
class TestTelegramNotification(unittest.TestCase):
|
||||
"""Test Telegram notification (no-op when not configured)."""
|
||||
|
||||
def test_no_token_returns_false(self):
|
||||
with patch.dict(os.environ, {"TELEGRAM_BOT_TOKEN": "", "TELEGRAM_CHAT_ID": ""}):
|
||||
result = rotate_secrets.send_telegram("test")
|
||||
self.assertFalse(result)
|
||||
|
||||
|
||||
class TestJsonOutput(unittest.TestCase):
|
||||
"""Test machine-readable JSON output."""
|
||||
|
||||
@patch("rotate_secrets.check_dependencies")
|
||||
@patch("rotate_secrets.check_inventory")
|
||||
@patch("rotate_secrets.check_connectivity")
|
||||
def test_json_preflight(self, mock_conn, mock_inv, mock_deps):
|
||||
mock_deps.return_value = []
|
||||
mock_inv.return_value = []
|
||||
mock_conn.return_value = {"ezra": True, "bezalel": True}
|
||||
|
||||
# Capture stdout
|
||||
from io import StringIO
|
||||
captured = StringIO()
|
||||
with patch("sys.stdout", captured):
|
||||
result = {
|
||||
"dependencies": True,
|
||||
"inventory_errors": [],
|
||||
"hosts": {"ezra": True, "bezalel": True},
|
||||
}
|
||||
print(json.dumps(result, indent=2))
|
||||
|
||||
output = captured.getvalue()
|
||||
parsed = json.loads(output)
|
||||
self.assertTrue(parsed["dependencies"])
|
||||
self.assertEqual(parsed["inventory_errors"], [])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
@@ -1,56 +0,0 @@
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
GENOME = Path("GENOME.md")
|
||||
|
||||
|
||||
def read_genome() -> str:
|
||||
assert GENOME.exists(), "GENOME.md must exist at repo root"
|
||||
return GENOME.read_text(encoding="utf-8")
|
||||
|
||||
|
||||
def test_the_nexus_genome_has_required_sections() -> None:
|
||||
text = read_genome()
|
||||
required = [
|
||||
"# GENOME.md — the-nexus",
|
||||
"## Project Overview",
|
||||
"## Architecture Diagram",
|
||||
"```mermaid",
|
||||
"## Entry Points and Data Flow",
|
||||
"## Key Abstractions",
|
||||
"## API Surface",
|
||||
"## Test Coverage Gaps",
|
||||
"## Security Considerations",
|
||||
"## Runtime Truth and Docs Drift",
|
||||
]
|
||||
missing = [item for item in required if item not in text]
|
||||
assert not missing, missing
|
||||
|
||||
|
||||
def test_the_nexus_genome_captures_current_runtime_contract() -> None:
|
||||
text = read_genome()
|
||||
required = [
|
||||
"server.py",
|
||||
"app.js",
|
||||
"index.html",
|
||||
"portals.json",
|
||||
"vision.json",
|
||||
"BROWSER_CONTRACT.md",
|
||||
"tests/test_browser_smoke.py",
|
||||
"tests/test_repo_truth.py",
|
||||
"nexus/morrowind_harness.py",
|
||||
"nexus/bannerlord_harness.py",
|
||||
"mempalace/tunnel_sync.py",
|
||||
"mcp_servers/desktop_control_server.py",
|
||||
"public/nexus/",
|
||||
]
|
||||
missing = [item for item in required if item not in text]
|
||||
assert not missing, missing
|
||||
|
||||
|
||||
def test_the_nexus_genome_explains_docs_runtime_drift() -> None:
|
||||
text = read_genome()
|
||||
assert "README.md says current `main` does not ship a browser 3D world" in text
|
||||
assert "CLAUDE.md declares root `app.js` and `index.html` as canonical frontend paths" in text
|
||||
assert "tests and browser contract now assume the root frontend exists" in text
|
||||
assert len(text) >= 5000
|
||||
Reference in New Issue
Block a user