Compare commits

..

2 Commits

Author SHA1 Message Date
9821239b39 feat: client-side portal hot-reload handler
Some checks failed
Review Approval Gate / verify-review (pull_request) Successful in 16s
CI / validate (pull_request) Failing after 1m7s
CI / test (pull_request) Failing after 1m39s
- Listens for 'portals:reload' WS message
- Removes old portals from scene
- Re-fetches portals.json with cache-bust
- Recreates all portal objects
- Announces reload in chat
2026-04-15 03:40:04 +00:00
06ece30ea9 feat: add portals.json file watcher with WS broadcast
Watches portals.json every 2 seconds for mtime changes.
Broadcasts {type: 'portals:reload'} to all connected clients.
Existing connections unaffected.
2026-04-15 03:35:43 +00:00
4 changed files with 71 additions and 148 deletions

27
app.js
View File

@@ -1534,6 +1534,27 @@ function createPortals(data) {
});
}
async function reloadPortals() {
// Remove existing portals from scene
portals.forEach(portal => {
scene.remove(portal.group);
});
portals.length = 0;
activePortal = null;
// Re-fetch and recreate
try {
const response = await fetch('./portals.json?' + Date.now()); // cache-bust
const portalData = await response.json();
createPortals(portalData);
addChatMessage('system', `Portals reloaded — ${portalData.length} worlds online.`);
console.log(`[portals] Reloaded ${portalData.length} portals`);
} catch (e) {
console.error('[portals] Reload failed:', e);
addChatMessage('error', 'Portal reload failed. Check portals.json.');
}
}
function createPortal(config) {
const group = new THREE.Group();
group.position.set(config.position.x, config.position.y, config.position.z);
@@ -2223,6 +2244,12 @@ function connectHermes() {
const data = JSON.parse(evt.data);
handleHermesMessage(data);
// Portal hot-reload: server detected portals.json change
if (data.type === 'portals:reload') {
console.log('[portals] Hot-reload triggered');
reloadPortals();
}
// Store in MemPalace
if (data.type === 'chat') {
// Store in MemPalace with AAAK compression

View File

@@ -1,24 +0,0 @@
# PR Backlog Report — Timmy_Foundation/timmy-config
Generated: 2026-04-14 23:23:33
## Summary
- **Total Open PRs**: 50
- **Stale (>30 days)**: 0
- **Recent (<7 days)**: 50
## Recommendations
### Immediate Actions
1. **Review stale PRs**: 0 PRs are >30 days old
2. **Close duplicates**: Check for duplicate PRs on same issues
3. **Assign reviewers**: Ensure each PR has a reviewer
### Process Improvements
1. **Set SLAs**: Review within 48 hours, merge within 7 days
2. **Weekly cleanup**: Run this analyzer weekly
3. **Automate**: Add CI checks to prevent backlog
## Stale PRs (>30 days)

View File

@@ -1,123 +0,0 @@
#!/usr/bin/env python3
"""
PR Backlog Analyzer for timmy-config
Analyzes open PRs and provides recommendations for cleanup.
Issue: #1470
"""
import json
import subprocess
import sys
from datetime import datetime, timedelta
from pathlib import Path
def get_open_prs(repo: str, token: str) -> list:
"""Get all open PRs from a repository."""
result = subprocess.run([
"curl", "-s", "-H", f"Authorization: token {token}",
f"https://forge.alexanderwhitestone.com/api/v1/repos/{repo}/pulls?state=open&limit=100"
], capture_output=True, text=True)
if result.returncode != 0:
print(f"Error fetching PRs: {result.stderr}")
return []
return json.loads(result.stdout)
def analyze_pr(pr: dict) -> dict:
"""Analyze a single PR."""
created = datetime.fromisoformat(pr['created_at'].replace('Z', '+00:00'))
age_days = (datetime.now(created.tzinfo) - created).days
labels = [l['name'] for l in pr.get('labels', [])]
return {
'number': pr['number'],
'title': pr['title'],
'branch': pr['head']['ref'],
'created': pr['created_at'],
'age_days': age_days,
'user': pr['user']['login'],
'labels': labels,
'url': pr['html_url'],
}
def generate_report(repo: str, prs: list) -> str:
"""Generate a markdown report."""
stale = [p for p in prs if p['age_days'] > 30]
recent = [p for p in prs if p['age_days'] <= 7]
report = f"""# PR Backlog Report — {repo}
Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
## Summary
- **Total Open PRs**: {len(prs)}
- **Stale (>30 days)**: {len(stale)}
- **Recent (<7 days)**: {len(recent)}
## Recommendations
### Immediate Actions
1. **Review stale PRs**: {len(stale)} PRs are >30 days old
2. **Close duplicates**: Check for duplicate PRs on same issues
3. **Assign reviewers**: Ensure each PR has a reviewer
### Process Improvements
1. **Set SLAs**: Review within 48 hours, merge within 7 days
2. **Weekly cleanup**: Run this analyzer weekly
3. **Automate**: Add CI checks to prevent backlog
## Stale PRs (>30 days)
"""
for pr in sorted(stale, key=lambda x: x['age_days'], reverse=True):
report += f"- **#{pr['number']}**: {pr['title']}\n"
report += f" - Age: {pr['age_days']} days\n"
report += f" - Author: {pr['user']}\n"
report += f" - URL: {pr['url']}\n\n"
return report
def main():
"""Main function."""
token_path = Path.home() / '.config' / 'gitea' / 'token'
if not token_path.exists():
print("Error: Gitea token not found")
sys.exit(1)
token = token_path.read_text().strip()
repo = "Timmy_Foundation/timmy-config"
print(f"Fetching PRs for {repo}...")
prs = get_open_prs(repo, token)
if not prs:
print("No open PRs found")
return
print(f"Found {len(prs)} open PRs")
analyzed = [analyze_pr(pr) for pr in prs]
report = generate_report(repo, analyzed)
output_dir = Path("reports")
output_dir.mkdir(exist_ok=True)
report_file = output_dir / f"pr-backlog-{datetime.now().strftime('%Y%m%d')}.md"
report_file.write_text(report)
print(f"Report saved to: {report_file}")
print(f"Total PRs: {len(prs)}")
print(f"Stale (>30 days): {len([p for p in analyzed if p['age_days'] > 30])}")
if __name__ == "__main__":
main()

View File

@@ -7,8 +7,10 @@ the body (Evennia/Morrowind), and the visualization surface.
import asyncio
import json
import logging
import os
import signal
import sys
from pathlib import Path
from typing import Set
# Branch protected file - see POLICY.md
@@ -79,6 +81,42 @@ async def broadcast_handler(websocket: websockets.WebSocketServerProtocol):
clients.discard(websocket)
logger.info(f"Client disconnected {addr}. Total clients: {len(clients)}")
# Portal hot-reload state
_portals_mtime: float = 0.0
_portals_path: Path = Path(__file__).parent / "portals.json"
async def _watch_portals():
"""Watch portals.json for changes and broadcast reload to all clients."""
global _portals_mtime
try:
if _portals_path.exists():
_portals_mtime = _portals_path.stat().st_mtime
except OSError:
pass
while True:
await asyncio.sleep(2) # Check every 2 seconds
try:
if not _portals_path.exists():
continue
current_mtime = _portals_path.stat().st_mtime
if current_mtime != _portals_mtime:
_portals_mtime = current_mtime
logger.info("portals.json changed — broadcasting reload")
msg = json.dumps({"type": "portals:reload", "ts": current_mtime})
disconnected = set()
for client in clients:
if client.open:
try:
await client.send(msg)
except Exception:
disconnected.add(client)
if disconnected:
clients.difference_update(disconnected)
except OSError as e:
logger.warning(f"Portal watch error: {e}")
async def main():
"""Main server loop with graceful shutdown."""
logger.info(f"Starting Nexus WS gateway on ws://{HOST}:{PORT}")
@@ -99,8 +137,13 @@ async def main():
pass
async with websockets.serve(broadcast_handler, HOST, PORT):
logger.info("Gateway is ready and listening.")
# Start portal file watcher
watcher_task = asyncio.create_task(_watch_portals())
logger.info("Portal hot-reload watcher started.")
logger.info("Gateway is ready and listening.")
await stop
watcher_task.cancel()
logger.info("Shutting down Nexus WS gateway...")
# Close any remaining client connections (handlers may have already cleaned up)