Compare commits
2 Commits
fix/534-v2
...
fix/543
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
24985a29db | ||
|
|
d6c90df391 |
@@ -1,87 +0,0 @@
|
||||
# Bezalel World Server Configuration
|
||||
|
||||
This directory contains the Evennia server configuration for Bezalel, the forge-and-testbed wizard house.
|
||||
|
||||
## Quick Start
|
||||
|
||||
To fix the Evennia settings on the Bezalel VPS (104.131.15.18):
|
||||
|
||||
```bash
|
||||
# SSH to Bezalel and run the fix script
|
||||
ssh root@104.131.15.18 'bash -s' < scripts/fix_evennia_settings.sh
|
||||
```
|
||||
|
||||
Or manually:
|
||||
|
||||
```bash
|
||||
cd /root/wizards/bezalel/evennia/bezalel_world/server/conf
|
||||
|
||||
# Copy the fixed settings
|
||||
cp ~/timmy-home/evennia/bezalel_world/server/conf/settings.py ./settings.py
|
||||
|
||||
# Clean and reinitialize DB
|
||||
cd /root/wizards/bezalel/evennia/bezalel_world
|
||||
rm -f server/evennia.db3
|
||||
/root/wizards/bezalel/evennia/venv/bin/evennia migrate
|
||||
|
||||
# Create superuser
|
||||
/root/wizards/bezalel/evennia/venv/bin/python3 -c "
|
||||
import sys, os
|
||||
sys.setrecursionlimit(5000)
|
||||
os.environ['DJANGO_SETTINGS_MODULE'] = 'server.conf.settings'
|
||||
import django
|
||||
django.setup()
|
||||
from evennia.accounts.accounts import AccountDB
|
||||
AccountDB.objects.create_superuser('Timmy', 'timmy@tower.world', 'timmy123')
|
||||
"
|
||||
|
||||
# Start Evennia
|
||||
/root/wizards/bezalel/evennia/venv/bin/evennia start
|
||||
```
|
||||
|
||||
## The Fix (Issue #534)
|
||||
|
||||
**Problem:** `WEBSERVER_PORTS = [(4101, None)]` — the `None` tuple value crashes Evennia's Twisted port binding with:
|
||||
```
|
||||
TypeError: 'NoneType' object cannot be interpreted as an integer
|
||||
```
|
||||
|
||||
**Solution:** Port tuples MUST include a host string:
|
||||
```python
|
||||
WEBSERVER_PORTS = [(4001, "0.0.0.0")]
|
||||
TELNET_PORTS = [(4000, "0.0.0.0")]
|
||||
WEBSOCKET_PORTS = [(4002, "0.0.0.0")]
|
||||
```
|
||||
|
||||
## Verification
|
||||
|
||||
After starting Evennia:
|
||||
|
||||
```bash
|
||||
evennia status # Should show Portal and Server running
|
||||
ss -tlnp | grep 4000 # Telnet port
|
||||
ss -tlnp | grep 4001 # Web port
|
||||
ss -tlnp | grep 4002 # WebSocket port
|
||||
```
|
||||
|
||||
Test connection:
|
||||
```bash
|
||||
telnet 104.131.15.18 4000
|
||||
```
|
||||
|
||||
## File Structure
|
||||
|
||||
```
|
||||
server/
|
||||
├── conf/
|
||||
│ ├── __init__.py
|
||||
│ └── settings.py # Main settings file (FIXED for #534)
|
||||
├── logs/ # Evennia logs
|
||||
└── evennia.db3 # SQLite database (created at runtime)
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
- Gitea Issue: [timmy-home#534](https://forge.alexanderwhitestone.com/Timmy_Foundation/timmy-home/issues/534)
|
||||
- Evennia Docs: https://www.evennia.com/docs/latest/Setup/Settings-Default.html
|
||||
- World Plan: docs/BEZALEL_EVENNIA_WORLD.md
|
||||
@@ -1,87 +0,0 @@
|
||||
r"""
|
||||
Evennia settings file for Bezalel World.
|
||||
|
||||
This is the sovereign Evennia configuration for the Bezalel forge-and-testbed wizard.
|
||||
Reference: timmy-home#534
|
||||
|
||||
The available options are found in the default settings file found here:
|
||||
https://www.evennia.com/docs/latest/Setup/Settings-Default.html
|
||||
"""
|
||||
|
||||
# Use the defaults from Evennia unless explicitly overridden
|
||||
from evennia.settings_default import *
|
||||
|
||||
######################################################################
|
||||
# Evennia base server config
|
||||
######################################################################
|
||||
|
||||
# Server name
|
||||
SERVERNAME = "bezalel_world"
|
||||
|
||||
######################################################################
|
||||
# Network ports - FIXED for #534
|
||||
# Port tuples MUST include a host string, not None
|
||||
######################################################################
|
||||
|
||||
# Web server port (HTTP)
|
||||
WEBSERVER_PORTS = [(4001, "0.0.0.0")]
|
||||
|
||||
# Telnet server port
|
||||
TELNET_PORTS = [(4000, "0.0.0.0")]
|
||||
|
||||
# WebSocket port for webclient
|
||||
WEBSOCKET_PORTS = [(4002, "0.0.0.0")]
|
||||
|
||||
######################################################################
|
||||
# Database configuration
|
||||
# Using SQLite for sovereign local deployment
|
||||
######################################################################
|
||||
|
||||
DATABASES = {
|
||||
'default': {
|
||||
'ENGINE': 'django.db.backends.sqlite3',
|
||||
'NAME': os.path.join(GAME_DIR, 'server', 'evennia.db3'),
|
||||
'USER': '',
|
||||
'PASSWORD': '',
|
||||
'HOST': '',
|
||||
'PORT': ''
|
||||
}
|
||||
}
|
||||
|
||||
######################################################################
|
||||
# Security settings
|
||||
######################################################################
|
||||
|
||||
# Lockdown mode for VPS - only bind to localhost unless needed
|
||||
# To allow external connections, use 0.0.0.0 in port tuples above
|
||||
ALLOWED_HOSTS = ['*'] # VPS needs this for external access
|
||||
|
||||
######################################################################
|
||||
# Game world defaults
|
||||
######################################################################
|
||||
|
||||
# Start location for new characters
|
||||
DEFAULT_HOME = "#2" # Limbo
|
||||
|
||||
# Start location for guests
|
||||
GUEST_HOME = "#2"
|
||||
|
||||
######################################################################
|
||||
# Telnet settings
|
||||
######################################################################
|
||||
|
||||
TELNET_INTERFACES = ['0.0.0.0']
|
||||
|
||||
######################################################################
|
||||
# Web server settings
|
||||
######################################################################
|
||||
|
||||
WEBSERVER_INTERFACES = ['0.0.0.0']
|
||||
|
||||
######################################################################
|
||||
# Settings given in secret_settings.py override those in this file.
|
||||
######################################################################
|
||||
try:
|
||||
from server.conf.secret_settings import *
|
||||
except ImportError:
|
||||
print("secret_settings.py file not found or failed to import.")
|
||||
@@ -62,6 +62,24 @@ Writes:
|
||||
|
||||
## Usage
|
||||
|
||||
### Timmy Mac wiring helper
|
||||
|
||||
Use the dedicated Timmy helper when you want to wire a real RunPod or Vertex-style endpoint into the local Mac Hermes config:
|
||||
|
||||
```bash
|
||||
python3 scripts/timmy_gemma4_mac.py --base-url https://your-openai-bridge.example/v1 --write-config
|
||||
python3 scripts/timmy_gemma4_mac.py --vertex-base-url https://your-vertex-bridge.example --write-config
|
||||
python3 scripts/timmy_gemma4_mac.py --pod-id <runpod-id> --write-config --verify-chat
|
||||
```
|
||||
|
||||
The helper writes to `~/.hermes/config.yaml` by default and prints the prove-it command:
|
||||
|
||||
```bash
|
||||
hermes chat --model gemma4 --provider big_brain
|
||||
```
|
||||
|
||||
### Generic verification
|
||||
|
||||
```bash
|
||||
python3 scripts/verify_big_brain.py
|
||||
python3 scripts/big_brain_manager.py
|
||||
|
||||
@@ -15,20 +15,13 @@ EVENNIA_DIR="/root/wizards/bezalel/evennia/bezalel_world"
|
||||
SETTINGS="${EVENNIA_DIR}/server/conf/settings.py"
|
||||
VENV_PYTHON="/root/wizards/bezalel/evennia/venv/bin/python3"
|
||||
VENV_EVENNIA="/root/wizards/bezalel/evennia/venv/bin/evennia"
|
||||
TIMMY_HOME="${TIMMY_HOME:-/root/timmy-home}" # Or wherever the repo is cloned
|
||||
|
||||
echo "=== Fix Evennia Settings (Bezalel) ==="
|
||||
|
||||
# 1. Fix settings.py — prefer repo version, fallback to sed patch
|
||||
# 1. Fix settings.py — remove bad port tuples
|
||||
echo "Fixing settings.py..."
|
||||
if [ -f "${TIMMY_HOME}/evennia/bezalel_world/server/conf/settings.py" ]; then
|
||||
# Use the fixed settings from the repo
|
||||
mkdir -p "$(dirname "$SETTINGS")"
|
||||
cp "${TIMMY_HOME}/evennia/bezalel_world/server/conf/settings.py" "$SETTINGS"
|
||||
echo "Copied fixed settings from timmy-home repo."
|
||||
elif [ -f "$SETTINGS" ]; then
|
||||
# Fallback: patch in place
|
||||
echo "Patching existing settings..."
|
||||
if [ -f "$SETTINGS" ]; then
|
||||
# Remove broken port lines
|
||||
sed -i '/WEBSERVER_PORTS/d' "$SETTINGS"
|
||||
sed -i '/TELNET_PORTS/d' "$SETTINGS"
|
||||
sed -i '/WEBSOCKET_PORTS/d' "$SETTINGS"
|
||||
@@ -42,7 +35,7 @@ elif [ -f "$SETTINGS" ]; then
|
||||
echo 'TELNET_PORTS = [(4000, "0.0.0.0")]' >> "$SETTINGS"
|
||||
echo 'WEBSOCKET_PORTS = [(4002, "0.0.0.0")]' >> "$SETTINGS"
|
||||
|
||||
echo "Patched existing settings file."
|
||||
echo "Settings fixed."
|
||||
else
|
||||
echo "ERROR: Settings file not found at $SETTINGS"
|
||||
exit 1
|
||||
|
||||
164
scripts/timmy_gemma4_mac.py
Normal file
164
scripts/timmy_gemma4_mac.py
Normal file
@@ -0,0 +1,164 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Timmy Mac Gemma 4 wiring helper for RunPod / Vertex-style Big Brain providers.
|
||||
|
||||
Refs: timmy-home #543
|
||||
|
||||
Safe by default:
|
||||
- computes a Big Brain base URL from an explicit URL, Vertex bridge URL, or RunPod pod id
|
||||
- can provision a RunPod pod when --apply-runpod is used and a token is available
|
||||
- can write the resolved endpoint into a Hermes config when --write-config is used
|
||||
- can verify an OpenAI-compatible chat endpoint when --verify-chat is used
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from urllib import request
|
||||
|
||||
from scripts.bezalel_gemma4_vps import (
|
||||
DEFAULT_CLOUD_TYPE,
|
||||
DEFAULT_GPU_TYPE,
|
||||
DEFAULT_MODEL,
|
||||
DEFAULT_PROVIDER_NAME,
|
||||
build_runpod_endpoint,
|
||||
deploy_runpod,
|
||||
update_config_text,
|
||||
)
|
||||
|
||||
DEFAULT_TOKEN_FILE = Path.home() / ".config" / "runpod" / "access_key"
|
||||
DEFAULT_CONFIG_PATH = Path.home() / ".hermes" / "config.yaml"
|
||||
|
||||
|
||||
def _normalize_openai_base(base_url: str | None) -> str:
|
||||
if not base_url:
|
||||
return ""
|
||||
cleaned = str(base_url).strip().rstrip("/")
|
||||
return cleaned if cleaned.endswith("/v1") else f"{cleaned}/v1"
|
||||
|
||||
|
||||
def choose_base_url(*, vertex_base_url: str | None = None, base_url: str | None = None, pod_id: str | None = None) -> str:
|
||||
if vertex_base_url:
|
||||
return _normalize_openai_base(vertex_base_url)
|
||||
if base_url:
|
||||
return _normalize_openai_base(base_url)
|
||||
if pod_id:
|
||||
return build_runpod_endpoint(pod_id)
|
||||
return "https://YOUR_BIG_BRAIN_HOST/v1"
|
||||
|
||||
|
||||
def write_config_file(config_path: Path, *, base_url: str, model: str = DEFAULT_MODEL, provider_name: str = DEFAULT_PROVIDER_NAME) -> str:
|
||||
original = config_path.read_text() if config_path.exists() else ""
|
||||
updated = update_config_text(original, base_url=base_url, model=model, provider_name=provider_name)
|
||||
config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
config_path.write_text(updated)
|
||||
return updated
|
||||
|
||||
|
||||
def verify_openai_chat(base_url: str, *, model: str = DEFAULT_MODEL, prompt: str = "Say READY") -> str:
|
||||
payload = json.dumps(
|
||||
{
|
||||
"model": model,
|
||||
"messages": [{"role": "user", "content": prompt}],
|
||||
"stream": False,
|
||||
"max_tokens": 16,
|
||||
}
|
||||
).encode()
|
||||
req = request.Request(
|
||||
f"{base_url.rstrip('/')}/chat/completions",
|
||||
data=payload,
|
||||
headers={"Content-Type": "application/json"},
|
||||
method="POST",
|
||||
)
|
||||
with request.urlopen(req, timeout=30) as resp:
|
||||
data = json.loads(resp.read().decode())
|
||||
return data["choices"][0]["message"]["content"]
|
||||
|
||||
|
||||
def build_summary(*, base_url: str, model: str, provider_name: str = DEFAULT_PROVIDER_NAME, config_path: Path = DEFAULT_CONFIG_PATH) -> dict[str, Any]:
|
||||
return {
|
||||
"provider_name": provider_name,
|
||||
"base_url": base_url,
|
||||
"model": model,
|
||||
"config_path": str(config_path),
|
||||
"verification_commands": [
|
||||
"python3 scripts/verify_big_brain.py",
|
||||
f"python3 scripts/timmy_gemma4_mac.py --base-url {base_url} --write-config --verify-chat",
|
||||
"hermes chat --model gemma4 --provider big_brain",
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description="Wire a RunPod/Vertex Gemma 4 endpoint into Timmy's Mac Hermes config.")
|
||||
parser.add_argument("--pod-name", default="timmy-gemma4")
|
||||
parser.add_argument("--gpu-type", default=DEFAULT_GPU_TYPE)
|
||||
parser.add_argument("--cloud-type", default=DEFAULT_CLOUD_TYPE)
|
||||
parser.add_argument("--model", default=DEFAULT_MODEL)
|
||||
parser.add_argument("--provider-name", default=DEFAULT_PROVIDER_NAME)
|
||||
parser.add_argument("--token-file", type=Path, default=DEFAULT_TOKEN_FILE)
|
||||
parser.add_argument("--config-path", type=Path, default=DEFAULT_CONFIG_PATH)
|
||||
parser.add_argument("--pod-id", help="Existing RunPod pod id to convert into an OpenAI-compatible base URL")
|
||||
parser.add_argument("--base-url", help="Explicit OpenAI-compatible base URL")
|
||||
parser.add_argument("--vertex-base-url", help="Vertex AI OpenAI-compatible bridge base URL")
|
||||
parser.add_argument("--apply-runpod", action="store_true", help="Provision a RunPod pod using the RunPod GraphQL API")
|
||||
parser.add_argument("--write-config", action="store_true", help="Write the resolved endpoint into --config-path")
|
||||
parser.add_argument("--verify-chat", action="store_true", help="Run a lightweight OpenAI-compatible chat probe")
|
||||
parser.add_argument("--json", action="store_true", help="Emit machine-readable JSON")
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main() -> None:
|
||||
args = parse_args()
|
||||
summary: dict[str, Any] = {
|
||||
"pod_name": args.pod_name,
|
||||
"gpu_type": args.gpu_type,
|
||||
"cloud_type": args.cloud_type,
|
||||
"model": args.model,
|
||||
"provider_name": args.provider_name,
|
||||
"actions": [],
|
||||
}
|
||||
|
||||
base_url = choose_base_url(vertex_base_url=args.vertex_base_url, base_url=args.base_url, pod_id=args.pod_id)
|
||||
|
||||
if args.apply_runpod:
|
||||
if not args.token_file.exists():
|
||||
raise SystemExit(f"RunPod token file not found: {args.token_file}")
|
||||
api_key = args.token_file.read_text().strip()
|
||||
deployed = deploy_runpod(api_key=api_key, name=args.pod_name, gpu_type=args.gpu_type, cloud_type=args.cloud_type, model=args.model)
|
||||
summary["deployment"] = deployed
|
||||
base_url = deployed["base_url"]
|
||||
summary["actions"].append("deployed_runpod_pod")
|
||||
|
||||
summary.update(build_summary(base_url=base_url, model=args.model, provider_name=args.provider_name, config_path=args.config_path))
|
||||
|
||||
if args.write_config:
|
||||
write_config_file(args.config_path, base_url=base_url, model=args.model, provider_name=args.provider_name)
|
||||
summary["actions"].append("wrote_config")
|
||||
|
||||
if args.verify_chat:
|
||||
summary["verify_response"] = verify_openai_chat(base_url, model=args.model)
|
||||
summary["actions"].append("verified_chat")
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(summary, indent=2))
|
||||
return
|
||||
|
||||
print("--- Timmy Gemma4 Mac Wiring ---")
|
||||
print(f"Provider: {args.provider_name}")
|
||||
print(f"Base URL: {base_url}")
|
||||
print(f"Model: {args.model}")
|
||||
print(f"Config path: {args.config_path}")
|
||||
if "verify_response" in summary:
|
||||
print(f"Verify response: {summary['verify_response']}")
|
||||
if summary["actions"]:
|
||||
print("Actions: " + ", ".join(summary["actions"]))
|
||||
print("Verification commands:")
|
||||
for command in summary["verification_commands"]:
|
||||
print(f" - {command}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
85
tests/test_timmy_gemma4_mac.py
Normal file
85
tests/test_timmy_gemma4_mac.py
Normal file
@@ -0,0 +1,85 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib.util
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
|
||||
ROOT = Path(__file__).resolve().parent.parent
|
||||
SCRIPT = ROOT / "scripts" / "timmy_gemma4_mac.py"
|
||||
README = ROOT / "scripts" / "README_big_brain.md"
|
||||
|
||||
|
||||
def load_module():
|
||||
spec = importlib.util.spec_from_file_location("timmy_gemma4_mac", str(SCRIPT))
|
||||
mod = importlib.util.module_from_spec(spec)
|
||||
sys.modules["timmy_gemma4_mac"] = mod
|
||||
spec.loader.exec_module(mod)
|
||||
return mod
|
||||
|
||||
|
||||
class _FakeResponse:
|
||||
def __init__(self, payload: dict):
|
||||
self._payload = json.dumps(payload).encode()
|
||||
|
||||
def read(self) -> bytes:
|
||||
return self._payload
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc, tb):
|
||||
return False
|
||||
|
||||
|
||||
def test_script_exists() -> None:
|
||||
assert SCRIPT.exists(), "scripts/timmy_gemma4_mac.py must exist"
|
||||
|
||||
|
||||
def test_default_paths_target_timmy_mac_hermes() -> None:
|
||||
mod = load_module()
|
||||
assert mod.DEFAULT_CONFIG_PATH == Path.home() / ".hermes" / "config.yaml"
|
||||
assert mod.DEFAULT_TOKEN_FILE == Path.home() / ".config" / "runpod" / "access_key"
|
||||
|
||||
|
||||
def test_choose_base_url_prefers_vertex_then_explicit_then_runpod() -> None:
|
||||
mod = load_module()
|
||||
assert mod.choose_base_url(vertex_base_url="https://vertex-proxy.example/v1") == "https://vertex-proxy.example/v1"
|
||||
assert mod.choose_base_url(base_url="https://custom-endpoint/v1") == "https://custom-endpoint/v1"
|
||||
assert mod.choose_base_url(pod_id="abc123") == "https://abc123-11434.proxy.runpod.net/v1"
|
||||
|
||||
|
||||
def test_build_summary_includes_prove_it_commands() -> None:
|
||||
mod = load_module()
|
||||
summary = mod.build_summary(base_url="https://vertex-proxy.example/v1", model="gemma4:latest")
|
||||
assert summary["verification_commands"][0] == "python3 scripts/verify_big_brain.py"
|
||||
assert any("hermes chat --model gemma4 --provider big_brain" in cmd for cmd in summary["verification_commands"])
|
||||
|
||||
|
||||
def test_verify_openai_chat_targets_chat_completions() -> None:
|
||||
mod = load_module()
|
||||
response_payload = {
|
||||
"choices": [{"message": {"content": "READY"}}]
|
||||
}
|
||||
|
||||
with patch("timmy_gemma4_mac.request.urlopen", return_value=_FakeResponse(response_payload)) as mocked:
|
||||
result = mod.verify_openai_chat("https://vertex-proxy.example/v1", model="gemma4:latest", prompt="say READY")
|
||||
|
||||
assert result == "READY"
|
||||
req = mocked.call_args.args[0]
|
||||
assert req.full_url == "https://vertex-proxy.example/v1/chat/completions"
|
||||
|
||||
|
||||
def test_readme_mentions_timmy_mac_wiring_flow() -> None:
|
||||
text = README.read_text(encoding="utf-8")
|
||||
required = [
|
||||
"scripts/timmy_gemma4_mac.py",
|
||||
"--vertex-base-url",
|
||||
"--write-config",
|
||||
"python3 scripts/verify_big_brain.py",
|
||||
"hermes chat --model gemma4 --provider big_brain",
|
||||
]
|
||||
missing = [item for item in required if item not in text]
|
||||
assert not missing, missing
|
||||
Reference in New Issue
Block a user