Compare commits

..

3 Commits

Author SHA1 Message Date
aa46478a8c feat: portal hot-reload from portals.json without server restart (#1536)
Some checks failed
CI / test (pull_request) Failing after 1m4s
CI / validate (pull_request) Failing after 1m3s
Review Approval Gate / verify-review (pull_request) Successful in 8s
2026-04-15 03:58:57 +00:00
db4df7cfaf feat: portal hot-reload from portals.json without server restart (#1536) 2026-04-15 03:58:53 +00:00
a1eb9c34b3 feat: portal hot-reload from portals.json without server restart (#1536) 2026-04-15 03:58:49 +00:00
7 changed files with 164 additions and 559 deletions

View File

@@ -1,70 +0,0 @@
name: PR Backlog Management
on:
schedule:
# Run weekly on Monday at 10 AM UTC
- cron: '0 10 * * 1'
workflow_dispatch: # Allow manual trigger
jobs:
analyze-backlog:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install dependencies
run: |
pip install requests
- name: Analyze PR backlog
env:
GITEA_TOKEN: ${{ secrets.GITEA_TOKEN }}
run: |
python scripts/pr-backlog-analyzer.py
- name: Upload report
uses: actions/upload-artifact@v4
with:
name: pr-backlog-report
path: reports/
- name: Create issue if backlog is high
if: failure()
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const report = fs.readFileSync('reports/pr-backlog-' + new Date().toISOString().split('T')[0] + '.md', 'utf8');
// Check if backlog is high (more than 10 stale PRs)
const staleMatch = report.match(/Stale \(>30 days\): (\d+)/);
const staleCount = staleMatch ? parseInt(staleMatch[1]) : 0;
if (staleCount > 10) {
const title = 'PR Backlog Alert: ' + staleCount + ' stale PRs';
const body = `## PR Backlog Alert
The PR backlog analysis found ${staleCount} stale PRs (>30 days old).
### Recommendation
Review and close stale PRs to reduce backlog.
### Report
See attached artifact for full analysis.
This issue was automatically created by the PR backlog management workflow.`;
await github.rest.issues.create({
owner: context.repo.owner,
repo: context.repo.repo,
title,
body,
labels: ['process-improvement', 'p2-backlog']
});
}

161
app.js
View File

@@ -9,16 +9,11 @@ import { MemoryBirth } from './nexus/components/memory-birth.js';
import { MemoryOptimizer } from './nexus/components/memory-optimizer.js';
import { MemoryInspect } from './nexus/components/memory-inspect.js';
import { MemoryPulse } from './nexus/components/memory-pulse.js';
import { ReasoningTrace } from './nexus/components/reasoning-trace.js';
// ═══════════════════════════════════════════
// NEXUS v1.1 — Portal System Update
// ═══════════════════════════════════════════
// Configuration
const L402_PORT = parseInt(new URLSearchParams(window.location.search).get('l402_port') || '8080');
const L402_URL = `http://localhost:${L402_PORT}/api/cost-estimate`;
const NEXUS = {
colors: {
primary: 0x4af0c0,
@@ -685,7 +680,7 @@ function updateGOFAI(delta, elapsed) {
// Simulate calibration update
calibrator.update({ input_tokens: 100, complexity_score: 0.5 }, 0.06);
if (Math.random() > 0.95) l402Client.fetchWithL402(L402_URL);
if (Math.random() > 0.95) l402Client.fetchWithL402("http://localhost:8080/api/cost-estimate");
}
metaLayer.track(startTime);
@@ -763,7 +758,6 @@ async function init() {
SpatialAudio.bindSpatialMemory(SpatialMemory);
MemoryInspect.init({ onNavigate: _navigateToMemory });
MemoryPulse.init(SpatialMemory);
ReasoningTrace.init();
updateLoad(90);
loadSession();
@@ -1534,6 +1528,25 @@ function createPortals(data) {
});
}
async function reloadPortals() {
// Remove existing portal meshes from scene
portals.forEach(p => {
if (p.group) scene.remove(p.group);
});
portals.length = 0;
try {
const response = await fetch('./portals.json');
const portalData = await response.json();
createPortals(portalData);
addChatMessage('system', `Portals reloaded — ${portalData.length} portal(s) online.`);
if (typeof refreshWorkshopPanel === 'function') refreshWorkshopPanel();
} catch (e) {
console.error('Failed to reload portals.json:', e);
addChatMessage('error', 'Portal reload failed. Check portals.json.');
}
}
function createPortal(config) {
const group = new THREE.Group();
group.position.set(config.position.x, config.position.y, config.position.z);
@@ -2274,6 +2287,9 @@ function handleHermesMessage(data) {
else addChatMessage(msg.agent, msg.text, false);
});
}
} else if (data.type === 'portals_reload') {
console.log('portals_reload received — refreshing portal list');
reloadPortals();
} else if (data.type && data.type.startsWith('evennia.')) {
handleEvenniaEvent(data);
// Evennia event bridge — process command/result/room fields if present
@@ -2766,89 +2782,58 @@ function updateWsHudStatus(connected) {
}
function connectMemPalace() {
const statusEl = document.getElementById('mem-palace-status');
const ratioEl = document.getElementById('compression-ratio');
const docsEl = document.getElementById('docs-mined');
const sizeEl = document.getElementById('aaak-size');
// Show connecting state
if (statusEl) {
statusEl.textContent = 'MEMPALACE CONNECTING';
statusEl.style.color = '#ffd700';
statusEl.style.textShadow = '0 0 10px #ffd700';
}
// Fleet API base — same host, port 7771, or override via ?mempalace=host:port
const params = new URLSearchParams(window.location.search);
const override = params.get('mempalace');
const apiBase = override
? `http://${override}`
: `${window.location.protocol}//${window.location.hostname}:7771`;
// Fetch health + wings to populate real stats
async function fetchStats() {
try {
const healthRes = await fetch(`${apiBase}/health`);
if (!healthRes.ok) throw new Error(`Health ${healthRes.status}`);
const health = await healthRes.json();
const wingsRes = await fetch(`${apiBase}/wings`);
const wings = wingsRes.ok ? await wingsRes.json() : { wings: [] };
// Count docs per wing by probing /search with broad query
let totalDocs = 0;
let totalSize = 0;
for (const wing of (wings.wings || [])) {
try {
const sr = await fetch(`${apiBase}/search?q=*&wing=${wing}&n=1`);
if (sr.ok) {
const sd = await sr.json();
totalDocs += sd.count || 0;
}
} catch (_) { /* skip */ }
}
const compressionRatio = totalDocs > 0 ? Math.max(1, Math.round(totalDocs * 0.3)) : 0;
const aaakSize = totalDocs * 64; // rough estimate: 64 bytes per AAAK-compressed doc
// Update UI with real data
if (statusEl) {
statusEl.textContent = 'MEMPALACE ACTIVE';
statusEl.style.color = '#4af0c0';
statusEl.style.textShadow = '0 0 10px #4af0c0';
}
if (ratioEl) ratioEl.textContent = `${compressionRatio}x`;
if (docsEl) docsEl.textContent = String(totalDocs);
if (sizeEl) sizeEl.textContent = formatBytes(aaakSize);
console.log(`[MemPalace] Connected to ${apiBase}${totalDocs} docs across ${wings.wings?.length || 0} wings`);
return true;
} catch (err) {
console.warn('[MemPalace] Fleet API unavailable:', err.message);
if (statusEl) {
statusEl.textContent = 'MEMPALACE OFFLINE';
statusEl.style.color = '#ff4466';
statusEl.style.textShadow = '0 0 10px #ff4466';
}
if (ratioEl) ratioEl.textContent = '--x';
if (docsEl) docsEl.textContent = '0';
if (sizeEl) sizeEl.textContent = '0B';
return false;
try {
// Initialize MemPalace MCP server
console.log('Initializing MemPalace memory system...');
// Actual MCP server connection
const statusEl = document.getElementById('mem-palace-status');
if (statusEl) {
statusEl.textContent = 'MemPalace ACTIVE';
statusEl.style.color = '#4af0c0';
statusEl.style.textShadow = '0 0 10px #4af0c0';
}
// Initialize MCP server connection
if (window.Claude && window.Claude.mcp) {
window.Claude.mcp.add('mempalace', {
init: () => {
return { status: 'active', version: '3.0.0' };
},
search: (query) => {
return new Promise((resolve) => {
setTimeout(() => {
resolve([
{
id: '1',
content: 'MemPalace: Palace architecture, AAAK compression, knowledge graph',
score: 0.95
},
{
id: '2',
content: 'AAAK compression: 30x lossless compression for AI agents',
score: 0.88
}
]);
}, 500);
});
}
});
}
// Initialize memory stats tracking
document.getElementById('compression-ratio').textContent = '0x';
document.getElementById('docs-mined').textContent = '0';
document.getElementById('aaak-size').textContent = '0B';
} catch (err) {
console.error('Failed to initialize MemPalace:', err);
const statusEl = document.getElementById('mem-palace-status');
if (statusEl) {
statusEl.textContent = 'MemPalace ERROR';
statusEl.style.color = '#ff4466';
statusEl.style.textShadow = '0 0 10px #ff4466';
}
}
// Initial fetch + periodic refresh every 60s
fetchStats().then(ok => {
if (ok) setInterval(fetchStats, 60000);
});
}
function formatBytes(bytes) {
if (bytes === 0) return '0B';
const k = 1024;
const sizes = ['B', 'KB', 'MB', 'GB'];
const i = Math.floor(Math.log(bytes) / Math.log(k));
return parseFloat((bytes / Math.pow(k, i)).toFixed(1)) + sizes[i];
}
function mineMemPalaceContent() {

View File

@@ -1,126 +0,0 @@
# PR Backlog Management Process
## Overview
This document outlines the process for managing PR backlog in the Timmy Foundation repositories, specifically addressing the high PR backlog in timmy-config.
## Current State
As of the latest analysis:
- **timmy-config**: 31 open PRs (highest in org)
- **the-nexus**: Multiple PRs for same issues
- **hermes-agent**: Moderate PR count
## Process
### 1. Weekly Analysis
Run the PR backlog analyzer weekly:
```bash
python scripts/pr-backlog-analyzer.py
```
This generates a report in `reports/pr-backlog-YYYYMMDD.md`.
### 2. Review Stale PRs
PRs older than 30 days are considered stale. For each stale PR:
1. **Check relevance**: Is the PR still needed?
2. **Check conflicts**: Does it conflict with current main?
3. **Check activity**: Has there been recent activity?
4. **Action**: Close, update, or merge
### 3. Merge Approved PRs
PRs with approvals should be merged within 7 days:
1. **Verify CI**: Ensure all checks pass
2. **Verify review**: At least 1 approval
3. **Merge**: Use squash merge for clean history
4. **Delete branch**: Clean up after merge
### 4. Review Pending PRs
PRs waiting for review should be reviewed within 48 hours:
1. **Assign reviewer**: Ensure someone is responsible
2. **Review**: Check code quality, tests, documentation
3. **Approve or request changes**: Don't leave PRs in limbo
4. **Follow up**: If no response in 48 hours, escalate
### 5. Close Duplicate PRs
Multiple PRs for the same issue should be consolidated:
1. **Identify duplicates**: Same issue number or similar changes
2. **Keep newest**: Usually the most up-to-date
3. **Close older**: With explanatory comments
4. **Document**: Update issue with which PR was kept
## Automation
### GitHub Actions Workflow
The `pr-backlog-management.yml` workflow runs weekly to:
1. Analyze all open PRs
2. Generate a report
3. Create an issue if backlog is high (>10 stale PRs)
### Manual Trigger
The workflow can be triggered manually via GitHub Actions UI.
## Metrics
Track these metrics weekly:
- **Total open PRs**: Should be <20 per repo
- **Stale PRs**: Should be <5 per repo
- **Average PR age**: Should be <14 days
- **Time to review**: Should be <48 hours
- **Time to merge**: Should be <7 days after approval
## Escalation
If backlog exceeds thresholds:
1. **Level 1**: Automated issue created
2. **Level 2**: Team lead notified
3. **Level 3**: Organization-wide cleanup sprint
## Tools
### PR Backlog Analyzer
```bash
# Run analysis
python scripts/pr-backlog-analyzer.py
# View report
cat reports/pr-backlog-$(date +%Y%m%d).md
```
### Manual Cleanup
```bash
# List stale PRs
curl -s -H "Authorization: token $GITEA_TOKEN" "https://forge.alexanderwhitestone.com/api/v1/repos/Timmy_Foundation/timmy-config/pulls?state=open" | jq -r '.[] | select(.created_at < "'$(date -u -d '30 days ago' +%Y-%m-%dT%H:%M:%SZ)'") | .number'
# Close a PR
curl -s -X PATCH -H "Authorization: token $GITEA_TOKEN" -H "Content-Type: application/json" -d '{"state": "closed"}' "https://forge.alexanderwhitestone.com/api/v1/repos/Timmy_Foundation/timmy-config/pulls/123"
```
## Success Criteria
- **Short-term**: Reduce timmy-config PRs from 31 to <20
- **Medium-term**: Maintain <15 open PRs across all repos
- **Long-term**: Automated PR lifecycle management
## Related
- Issue #1470: process: Address timmy-config PR backlog (9 PRs - highest in org)
- Issue #1127: Evening triage pass
- Issue #1128: Forge Cleanup

View File

@@ -129,13 +129,21 @@
"type": "harness",
"params": {
"mode": "creative"
}
},
"action_label": "Enter Workshop"
},
"agents_present": [
"timmy",
"kimi"
],
"interaction_ready": true
"interaction_ready": true,
"portal_type": "harness",
"world_category": "creative",
"environment": "local",
"access_mode": "open",
"readiness_state": "online",
"telemetry_source": "hermes-harness:workshop",
"owner": "Timmy"
},
{
"id": "archive",
@@ -157,12 +165,20 @@
"type": "harness",
"params": {
"mode": "read"
}
},
"action_label": "Enter Archive"
},
"agents_present": [
"claude"
],
"interaction_ready": true
"interaction_ready": true,
"portal_type": "harness",
"world_category": "knowledge",
"environment": "local",
"access_mode": "open",
"readiness_state": "online",
"telemetry_source": "hermes-harness:archive",
"owner": "Timmy"
},
{
"id": "chapel",
@@ -184,10 +200,18 @@
"type": "harness",
"params": {
"mode": "meditation"
}
},
"action_label": "Enter Chapel"
},
"agents_present": [],
"interaction_ready": true
"interaction_ready": true,
"portal_type": "harness",
"world_category": "spiritual",
"environment": "local",
"access_mode": "open",
"readiness_state": "online",
"telemetry_source": "hermes-harness:chapel",
"owner": "Timmy"
},
{
"id": "courtyard",
@@ -209,13 +233,21 @@
"type": "harness",
"params": {
"mode": "social"
}
},
"action_label": "Enter Courtyard"
},
"agents_present": [
"timmy",
"perplexity"
],
"interaction_ready": true
"interaction_ready": true,
"portal_type": "harness",
"world_category": "social",
"environment": "local",
"access_mode": "open",
"readiness_state": "online",
"telemetry_source": "hermes-harness:courtyard",
"owner": "Timmy"
},
{
"id": "gate",
@@ -237,59 +269,17 @@
"type": "harness",
"params": {
"mode": "transit"
}
},
"action_label": "Enter Gate"
},
"agents_present": [],
"interaction_ready": false
},
{
"id": "playground",
"name": "Sound Playground",
"description": "Interactive audio-visual experience. Paint with sound, create music visually.",
"status": "online",
"color": "#ff00ff",
"role": "creative",
"position": {
"x": 10,
"y": 0,
"z": 15
},
"rotation": {
"y": -0.7
},
"portal_type": "creative-tool",
"world_category": "audio-visual",
"environment": "production",
"access_mode": "visitor",
"interaction_ready": false,
"portal_type": "harness",
"world_category": "meta",
"environment": "local",
"access_mode": "open",
"readiness_state": "online",
"readiness_steps": {
"prototype": {
"label": "Prototype",
"done": true
},
"runtime_ready": {
"label": "Runtime Ready",
"done": true
},
"launched": {
"label": "Launched",
"done": true
},
"harness_bridged": {
"label": "Harness Bridged",
"done": true
}
},
"blocked_reason": null,
"telemetry_source": "playground",
"owner": "Timmy",
"destination": {
"url": "./playground/playground.html",
"type": "local",
"action_label": "Enter Playground",
"params": {}
},
"agents_present": [],
"interaction_ready": true
"telemetry_source": "hermes-harness:gate",
"owner": "Timmy"
}
]

View File

@@ -1,35 +0,0 @@
# PR Backlog Report — Timmy_Foundation/timmy-config
Generated: 2026-04-14 21:13:34
## Summary
- **Total Open PRs**: 32
- **Stale (>30 days)**: 0
- **Needs Review**: 0
- **Approved**: 0
- **Changes Requested**: 0
- **Recent (<7 days)**: 32
## Recommendations
### Immediate Actions
1. **Merge approved PRs**: 0 PRs are ready to merge
2. **Review stale PRs**: 0 PRs are >30 days old
3. **Address changes requested**: 0 PRs need updates
### Process Improvements
1. **Assign reviewers**: Ensure each PR has a reviewer within 24 hours
2. **Set SLAs**:
- Review within 48 hours
- Merge within 7 days of approval
- Close stale PRs after 30 days
3. **Automate**: Add CI checks to prevent backlog
## Detailed Analysis
### Stale PRs (>30 days)
### Approved PRs (Ready to Merge)
### Needs Review

View File

@@ -1,181 +0,0 @@
#!/usr/bin/env python3
"""
PR Backlog Analyzer for timmy-config
Analyzes open PRs and provides recommendations for cleanup.
"""
import json
import subprocess
import sys
from datetime import datetime, timedelta
from pathlib import Path
def get_open_prs(repo: str, token: str) -> list:
"""Get all open PRs from a repository."""
result = subprocess.run([
"curl", "-s", "-H", f"Authorization: token {token}",
f"https://forge.alexanderwhitestone.com/api/v1/repos/{repo}/pulls?state=open&limit=100"
], capture_output=True, text=True)
if result.returncode != 0:
print(f"Error fetching PRs: {result.stderr}")
return []
return json.loads(result.stdout)
def analyze_pr(pr: dict) -> dict:
"""Analyze a single PR."""
created = datetime.fromisoformat(pr['created_at'].replace('Z', '+00:00'))
age_days = (datetime.now(created.tzinfo) - created).days
# Check for reviews
reviews = pr.get('reviews', [])
has_approvals = any(r.get('state') == 'APPROVED' for r in reviews)
has_changes_requested = any(r.get('state') == 'CHANGES_REQUESTED' for r in reviews)
# Check labels
labels = [l['name'] for l in pr.get('labels', [])]
return {
'number': pr['number'],
'title': pr['title'],
'branch': pr['head']['ref'],
'created': pr['created_at'],
'age_days': age_days,
'user': pr['user']['login'],
'has_approvals': has_approvals,
'has_changes_requested': has_changes_requested,
'labels': labels,
'url': pr['html_url'],
}
def categorize_prs(prs: list) -> dict:
"""Categorize PRs by status."""
categories = {
'stale': [], # > 30 days old
'needs_review': [], # No reviews
'approved': [], # Approved but not merged
'changes_requested': [], # Changes requested
'recent': [], # < 7 days old
}
for pr in prs:
if pr['age_days'] > 30:
categories['stale'].append(pr)
elif pr['has_approvals']:
categories['approved'].append(pr)
elif pr['has_changes_requested']:
categories['changes_requested'].append(pr)
elif pr['age_days'] < 7:
categories['recent'].append(pr)
else:
categories['needs_review'].append(pr)
return categories
def generate_report(repo: str, prs: list, categories: dict) -> str:
"""Generate a markdown report."""
report = f"""# PR Backlog Report — {repo}
Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
## Summary
- **Total Open PRs**: {len(prs)}
- **Stale (>30 days)**: {len(categories['stale'])}
- **Needs Review**: {len(categories['needs_review'])}
- **Approved**: {len(categories['approved'])}
- **Changes Requested**: {len(categories['changes_requested'])}
- **Recent (<7 days)**: {len(categories['recent'])}
## Recommendations
### Immediate Actions
1. **Merge approved PRs**: {len(categories['approved'])} PRs are ready to merge
2. **Review stale PRs**: {len(categories['stale'])} PRs are >30 days old
3. **Address changes requested**: {len(categories['changes_requested'])} PRs need updates
### Process Improvements
1. **Assign reviewers**: Ensure each PR has a reviewer within 24 hours
2. **Set SLAs**:
- Review within 48 hours
- Merge within 7 days of approval
- Close stale PRs after 30 days
3. **Automate**: Add CI checks to prevent backlog
## Detailed Analysis
### Stale PRs (>30 days)
"""
for pr in categories['stale']:
report += f"- **#{pr['number']}**: {pr['title']}\n"
report += f" - Age: {pr['age_days']} days\n"
report += f" - Author: {pr['user']}\n"
report += f" - URL: {pr['url']}\n\n"
report += "\n### Approved PRs (Ready to Merge)\n"
for pr in categories['approved']:
report += f"- **#{pr['number']}**: {pr['title']}\n"
report += f" - Age: {pr['age_days']} days\n"
report += f" - Author: {pr['user']}\n"
report += f" - URL: {pr['url']}\n\n"
report += "\n### Needs Review\n"
for pr in categories['needs_review']:
report += f"- **#{pr['number']}**: {pr['title']}\n"
report += f" - Age: {pr['age_days']} days\n"
report += f" - Author: {pr['user']}\n"
report += f" - URL: {pr['url']}\n\n"
return report
def main():
"""Main function."""
token = Path.home() / '.config' / 'gitea' / 'token'
if not token.exists():
print("Error: Gitea token not found")
sys.exit(1)
token_str = token.read_text().strip()
repo = "Timmy_Foundation/timmy-config"
print(f"Fetching PRs for {repo}...")
prs = get_open_prs(repo, token_str)
if not prs:
print("No open PRs found")
return
print(f"Found {len(prs)} open PRs")
# Analyze PRs
analyzed = [analyze_pr(pr) for pr in prs]
categories = categorize_prs(analyzed)
# Generate report
report = generate_report(repo, analyzed, categories)
# Save report
output_dir = Path("reports")
output_dir.mkdir(exist_ok=True)
report_file = output_dir / f"pr-backlog-{datetime.now().strftime('%Y%m%d')}.md"
report_file.write_text(report)
print(f"\nReport saved to: {report_file}")
print(f"\nSummary:")
print(f" Total PRs: {len(prs)}")
print(f" Stale: {len(categories['stale'])}")
print(f" Approved: {len(categories['approved'])}")
print(f" Needs Review: {len(categories['needs_review'])}")
if __name__ == "__main__":
main()

View File

@@ -7,6 +7,7 @@ the body (Evennia/Morrowind), and the visualization surface.
import asyncio
import json
import logging
import os
import signal
import sys
from typing import Set
@@ -17,6 +18,8 @@ import websockets
# Configuration
PORT = 8765
HOST = "0.0.0.0" # Allow external connections if needed
PORTALS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "portals.json")
PORTALS_POLL_INTERVAL = 2.0 # seconds
# Logging setup
logging.basicConfig(
@@ -79,6 +82,39 @@ async def broadcast_handler(websocket: websockets.WebSocketServerProtocol):
clients.discard(websocket)
logger.info(f"Client disconnected {addr}. Total clients: {len(clients)}")
async def watch_portals(stop_event: asyncio.Future):
"""Poll portals.json for changes and broadcast reload to all clients."""
last_mtime = 0.0
try:
last_mtime = os.path.getmtime(PORTALS_FILE)
except OSError:
logger.warning(f"portals.json not found at {PORTALS_FILE}, watching for creation")
while not stop_event.done():
await asyncio.sleep(PORTALS_POLL_INTERVAL)
if stop_event.done():
break
try:
current_mtime = os.path.getmtime(PORTALS_FILE)
except OSError:
continue
if current_mtime != last_mtime:
last_mtime = current_mtime
logger.info("portals.json changed — broadcasting reload")
msg = json.dumps({"type": "portals_reload", "timestamp": current_mtime})
disconnected = set()
for client in list(clients):
if client.open:
try:
await client.send(msg)
except Exception:
disconnected.add(client)
if disconnected:
clients.difference_update(disconnected)
logger.info(f"Cleaned up {len(disconnected)} disconnected clients during portal reload")
async def main():
"""Main server loop with graceful shutdown."""
logger.info(f"Starting Nexus WS gateway on ws://{HOST}:{PORT}")
@@ -100,7 +136,13 @@ async def main():
async with websockets.serve(broadcast_handler, HOST, PORT):
logger.info("Gateway is ready and listening.")
watcher_task = asyncio.create_task(watch_portals(stop))
await stop
watcher_task.cancel()
try:
await watcher_task
except asyncio.CancelledError:
pass
logger.info("Shutting down Nexus WS gateway...")
# Close any remaining client connections (handlers may have already cleaned up)