Compare commits

..

1 Commits

Author SHA1 Message Date
Alexander Whitestone
7c99058b0b fix(#544): harden Bezalel Gemma4 VPS wiring proof
Some checks failed
Self-Healing Smoke / self-healing-smoke (pull_request) Failing after 27s
Smoke Test / smoke (pull_request) Failing after 28s
Agent PR Gate / gate (pull_request) Failing after 37s
Agent PR Gate / report (pull_request) Successful in 7s
2026-04-22 10:33:28 -04:00
4 changed files with 259 additions and 275 deletions

View File

@@ -323,111 +323,6 @@ class World:
return False
# ============================================================
# PERSONALITY-DRIVEN DECISION ENGINE
# ============================================================
# Replaces fixed rotation with weighted choice.
# Each character has:
# - home_room: preferred location
# - room_weights: base probabilities for each room
# - explore_chance: probability to explore randomly (10%)
# - social_weight: bonus when others are present
# - goal_weights: adjustments based on active_goal
PERSONALITY_DICT = {
"Marcus": {
"home_room": "Garden",
"room_weights": {"Garden": 0.4, "Bridge": 0.2, "Threshold": 0.2, "Tower": 0.1, "Forge": 0.1},
"explore_chance": 0.1,
"social_weight": 0.3,
"goal_weights": {
"sit": {"Garden": +0.3},
"speak_truth": {"Tower": +0.2, "Bridge": +0.2},
"remember": {"Garden": +0.2, "Threshold": +0.1},
},
},
"Bezalel": {
"home_room": "Forge",
"room_weights": {"Forge": 0.5, "Threshold": 0.2, "Garden": 0.1, "Bridge": 0.1, "Tower": 0.1},
"explore_chance": 0.1,
"social_weight": 0.15,
"goal_weights": {
"forge": {"Forge": +0.4},
"tend_fire": {"Forge": +0.5},
"create_key": {"Forge": +0.3},
},
},
"Allegro": {
"home_room": "Threshold",
"room_weights": {"Threshold": 0.35, "Tower": 0.25, "Forge": 0.15, "Garden": 0.15, "Bridge": 0.1},
"explore_chance": 0.1,
"social_weight": 0.25,
"goal_weights": {
"oversee": {"Threshold": +0.3},
"keep_time": {"Tower": +0.3},
"check_tunnel": {"Bridge": +0.2, "Threshold": +0.1},
},
},
"Ezra": {
"home_room": "Tower",
"room_weights": {"Tower": 0.45, "Threshold": 0.2, "Garden": 0.15, "Forge": 0.1, "Bridge": 0.1},
"explore_chance": 0.1,
"social_weight": 0.15,
"goal_weights": {
"study": {"Tower": +0.4},
"read_whiteboard": {"Tower": +0.4},
"find_pattern": {"Garden": +0.2, "Bridge": +0.1},
},
},
"Gemini": {
"home_room": "Garden",
"room_weights": {"Garden": 0.45, "Threshold": 0.2, "Bridge": 0.15, "Tower": 0.1, "Forge": 0.1},
"explore_chance": 0.1,
"social_weight": 0.25,
"goal_weights": {
"observe": {"Garden": +0.2, "Tower": +0.2},
"tend_garden": {"Garden": +0.5},
"listen": {"Bridge": +0.1, "Threshold": +0.1},
},
},
"Claude": {
"home_room": "Threshold",
"room_weights": {"Threshold": 0.3, "Tower": 0.25, "Forge": 0.2, "Garden": 0.15, "Bridge": 0.1},
"explore_chance": 0.1,
"social_weight": 0.2,
"goal_weights": {
"inspect": {"Threshold": +0.2, "Tower": +0.2},
"organize": {"Tower": +0.2, "Forge": +0.1},
"enforce_order": {"Threshold": +0.3, "Bridge": +0.1},
},
},
"ClawCode": {
"home_room": "Forge",
"room_weights": {"Forge": 0.5, "Threshold": 0.2, "Garden": 0.1, "Bridge": 0.1, "Tower": 0.1},
"explore_chance": 0.1,
"social_weight": 0.1,
"goal_weights": {
"forge": {"Forge": +0.4},
"test_edge": {"Forge": +0.4},
"build_weapon": {"Forge": +0.5},
},
},
"Kimi": {
"home_room": "Garden",
"room_weights": {"Garden": 0.4, "Threshold": 0.2, "Tower": 0.15, "Bridge": 0.15, "Forge": 0.1},
"explore_chance": 0.1,
"social_weight": 0.2,
"goal_weights": {
"contemplate": {"Garden": +0.3, "Tower": +0.1},
"read": {"Tower": +0.3},
"remember": {"Bridge": +0.2, "Threshold": +0.1},
},
},
}
# All available rooms
ALL_ROOMS = ["Threshold", "Tower", "Forge", "Garden", "Bridge"]
class ActionSystem:
"""Defines what actions are possible and what they cost."""
@@ -558,167 +453,100 @@ class TimmyAI:
class NPCAI:
"""AI for non-player characters. Weighted decision engine — agents choose, do not rotate."""
"""AI for non-player characters. They make choices based on goals."""
def __init__(self, world):
self.world = world
self._last_reasoning = {} # Store reasoning per char for tick logging
def get_reasoning(self, char_name):
"""Return reasoning dict for last decision."""
return self._last_reasoning.get(char_name, {})
def make_choice(self, char_name):
"""Make a weighted choice for this NPC. Returns (action, reasoning_dict)."""
"""Make a choice for this NPC this tick."""
char = self.world.characters[char_name]
room = char["room"]
available = ActionSystem.get_available_actions(char_name, self.world)
goal = char["active_goal"]
# Low energy → immediate rest
# If low energy, rest
if char["energy"] <= 1:
self._last_reasoning[char_name] = {"trigger": "low_energy", "reason": "Energy ≤ 1, resting"}
return "rest"
# Find personality profile
personality = PERSONALITY_DICT.get(char_name)
if not personality:
# Fallback: move toward home room if not there
if room != char.get("home", "Tower"):
action = f"move:{self._direction_to_home(room, char.get('home', 'Tower'))}"
self._last_reasoning[char_name] = {"trigger": "fallback_no_personality", "action": action}
return action
action = random.choice(["rest", "examine"])
self._last_reasoning[char_name] = {"trigger": "fallback_no_personality", "action": action}
return action
# Goal-driven behavior
goal = char["active_goal"]
# Build weighted action list
weights = self._compute_weights(char_name, char, room, available, personality, goal)
if char_name == "Marcus":
return self._marcus_choice(char, room, available)
elif char_name == "Bezalel":
return self._bezalel_choice(char, room, available)
elif char_name == "Allegro":
return self._allegro_choice(char, room, available)
elif char_name == "Ezra":
return self._ezra_choice(char, room, available)
elif char_name == "Gemini":
return self._gemini_choice(char, room, available)
elif char_name == "Claude":
return self._claude_choice(char, room, available)
elif char_name == "ClawCode":
return self._clawcode_choice(char, room, available)
elif char_name == "Kimi":
return self._kimi_choice(char, room, available)
if not weights:
action = "rest"
self._last_reasoning[char_name] = {"trigger": "fallback", "reason": "No weighted actions available"}
return action
# Sample action
actions, probs = zip(*weights)
action = random.choices(actions, weights=probs)[0]
# Store reasoning
reasoning = self._build_reasoning(char_name, char, room, weights, action, personality, goal)
self._last_reasoning[char_name] = reasoning
return action
return "rest"
def _direction_to_home(self, current_room, home_room):
"""Return direction name to get from current to home (simple adjacency)."""
# For now: use known map directions (fragile but minimal)
# Better: derive from world.rooms connections by searching
connections = self.world.rooms[current_room].get("connections", {})
for direction, dest in connections.items():
if dest == home_room:
return direction
# Fallback: pick a random connected room to explore toward home
if connections:
return random.choice(list(connections.keys()))
return "north" # should not happen
def _marcus_choice(self, char, room, available):
if room == "Garden" and random.random() < 0.7:
return "rest"
if room != "Garden":
return "move:west"
# Speak to someone if possible
others = [a.split(":")[1] for a in available if a.startswith("speak:")]
if others and random.random() < 0.4:
return f"speak:{random.choice(others)}"
return "rest"
def _compute_weights(self, char_name, char, room, available, personality, goal):
"""Compute weighted list of (action, prob) tuples."""
weights = []
room_weights = personality["room_weights"]
social_weight = personality["social_weight"]
goal_bonus = personality["goal_weights"].get(goal, {})
# Count others in the room
others_in_room = [n for n in self.world.characters
if self.world.characters[n]["room"] == room and n != char_name]
social_present = len(others_in_room) > 0
for action in available:
base_w = 0.05 # small floor for every action
# Movement-specific
if action.startswith("move:"):
direction = action.split(":")[1]
dest = action.split(" -> ")[1] if " -> " in action else None
if dest:
# Room probability
base_w += room_weights.get(dest, 0.05)
# Home room bonus
if dest == personality["home_room"]:
base_w += 0.2
# Social bonus
if social_present:
base_w += social_weight
# Goal bonus
if dest in goal_bonus:
base_w += goal_bonus[dest]
# Exploration penalty for home room (sometimes leave)
if dest == personality["home_room"]:
base_w *= (1 - personality.get("explore_chance", 0.1))
# Social actions
elif action.startswith("speak:") or action.startswith("listen:") or action.startswith("help:"):
person = action.split(":")[1]
base_w += 0.2 # base social interest
# Goal bonus
base_w += goal_bonus.get(person, 0)
# Other in same room bonus
if any(n == person for n in others_in_room):
base_w += 0.3
# Social weight
base_w += social_weight * 0.5
elif action.startswith("confront:"):
person = action.split(":")[1]
base_w += 0.1 # lower baseline
if any(n == person for n in others_in_room):
base_w += 0.2
# Room-specific craft/production actions
elif action in ["forge", "tend_fire", "study", "write_rule", "carve", "plant"]:
# These are location-bound; should only be available in correct room
if (action == "forge" and room != "Forge") or (action == "tend_fire" and room != "Forge") or (action == "study" and room != "Tower") or (action == "write_rule" and room != "Tower") or (action == "carve" and room != "Bridge") or (action == "plant" and room != "Garden"):
continue # skip (shouldn't be available but guard)
base_w += room_weights.get(room, 0.1) * 1.5 # being in the right room = high weight
# Goal bonus
if action in goal_bonus:
base_w += goal_bonus[action]
# Rest
elif action == "rest":
base_w += char["energy"] * 0.1 # higher energy → less rest
if char["energy"] < 3:
base_w += 0.4
else:
base_w += 0.05
# Examine
elif action == "examine":
base_w += 0.1
weights.append((action, base_w))
# Normalize probabilities to sum to 1
if not weights:
return []
total = sum(w for _, w in weights)
normalized = [(a, w/total) for a, w in weights]
return normalized
def _bezalel_choice(self, char, room, available):
if room == "Forge" and self.world.rooms["Forge"]["fire"] == "glowing":
return random.choice(["forge", "rest"] if char["energy"] > 2 else ["rest"])
if room != "Forge":
return "move:west"
if random.random() < 0.3:
return "tend_fire"
return "forge"
def _build_reasoning(self, char_name, char, room, weights, action, personality, goal):
"""Build reasoning dict explaining the decision."""
# Find top contenders
sorted_w = sorted(weights, key=lambda x: x[1], reverse=True)
reasoning = {
"char": char_name,
"room": room,
"goal": goal,
"energy": char["energy"],
"chosen": action,
"top_contenders": sorted_w[:3],
}
return reasoning
def _kimi_choice(self, char, room, available):
others = [a.split(":")[1] for a in available if a.startswith("speak:")]
if room == "Garden" and others and random.random() < 0.3:
return f"speak:{random.choice(others)}"
if room == "Tower":
return "study" if char["energy"] > 2 else "rest"
return "move:east" # Head back toward Garden
def _gemini_choice(self, char, room, available):
others = [a.split(":")[1] for a in available if a.startswith("listen:")]
if room == "Garden" and others and random.random() < 0.4:
return f"listen:{random.choice(others)}"
return random.choice(["plant", "rest"] if room == "Garden" else ["move:west"])
def _ezra_choice(self, char, room, available):
if room == "Tower" and char["energy"] > 2:
return random.choice(["study", "write_rule", "help:Timmy"])
if room != "Tower":
return "move:south"
return "rest"
def _claude_choice(self, char, room, available):
others = [a.split(":")[1] for a in available if a.startswith("confront:")]
if others and random.random() < 0.2:
return f"confront:{random.choice(others)}"
return random.choice(["examine", "rest"])
def _clawcode_choice(self, char, room, available):
if room == "Forge" and char["energy"] > 2:
return "forge"
return random.choice(["move:east", "forge", "rest"])
def _allegro_choice(self, char, room, available):
others = [a.split(":")[1] for a in available if a.startswith("speak:")]
if others and random.random() < 0.3:
return f"speak:{random.choice(others)}"
return random.choice(["move:north", "move:south", "examine"])
class DialogueSystem:
@@ -1396,16 +1224,7 @@ class GameEngine:
self.world.characters[char_name]["room"] = dest
self.world.characters[char_name]["energy"] -= 1
scene["npc_actions"].append(f"{char_name} moves from The {old_room} to The {dest}")
# Collect NPC reasoning for debugging (Decision Engine trace)
scene["npc_reasoning"] = {}
for npc_name in self.world.characters:
if npc_name == "Timmy":
continue
reasoning = self.npc_ai.get_reasoning(npc_name)
if reasoning:
scene["npc_reasoning"][npc_name] = reasoning
# Random NPC events
room_name = self.world.characters["Timmy"]["room"]
for char_name in self.world.characters:

View File

@@ -0,0 +1,51 @@
# Bezalel Gemma 4 VPS Wiring
Issue: timmy-home #544
This helper is the repo-side operator bundle for wiring a live Gemma 4 endpoint into Bezalel's VPS config without hardcoding one dead pod forever.
What `scripts/bezalel_gemma4_vps.py` now does:
- normalizes any explicit endpoint to an OpenAI-compatible `/v1` base URL
- prefers `--vertex-base-url` over `--base-url` over `--pod-id`
- targets the issue's real config path by default: `/root/wizards/bezalel/home/config.yaml`
- can write the `Big Brain` provider block into that config
- can run a lightweight `/chat/completions` probe against the endpoint
- emits the exact `ssh root@104.131.15.18 ... curl ...` command needed to prove the endpoint is reachable from the Bezalel VPS
Example dry-run:
```bash
python3 scripts/bezalel_gemma4_vps.py \
--base-url https://<pod-id>-11434.proxy.runpod.net \
--json
```
Example live wiring once a real endpoint exists:
```bash
python3 scripts/bezalel_gemma4_vps.py \
--base-url https://<pod-id>-11434.proxy.runpod.net \
--config-path /root/wizards/bezalel/home/config.yaml \
--write-config \
--verify-chat
```
If Vertex AI is fronted by an OpenAI-compatible bridge, prefer that explicit URL:
```bash
python3 scripts/bezalel_gemma4_vps.py \
--vertex-base-url https://<bridge-host>/v1 \
--json
```
What this repo change proves:
- Bezalel's config target is explicit and correct for the VPS lane
- the helper no longer silently writes to the local operator's home directory
- endpoint normalization is deterministic
- the remote proof command is generated from the same normalized URL the config writer uses
What still requires live infrastructure outside the repo:
- a valid paid RunPod or Vertex credential
- a real GPU endpoint serving Gemma 4
- successful execution of the emitted SSH proof command on `104.131.15.18`
- successful Bezalel Hermes chat against that live endpoint

View File

@@ -8,12 +8,14 @@ Safe by default:
- can call the RunPod GraphQL API if a key is provided and --apply-runpod is used
- can update a Hermes config file in-place when --write-config is used
- can verify an OpenAI-compatible endpoint with a lightweight chat probe
- emits the exact Bezalel VPS curl proof command for remote verification
"""
from __future__ import annotations
import argparse
import json
import shlex
from pathlib import Path
from typing import Any
from urllib import request
@@ -27,7 +29,9 @@ DEFAULT_IMAGE = "ollama/ollama:latest"
DEFAULT_MODEL = "gemma4:latest"
DEFAULT_PROVIDER_NAME = "Big Brain"
DEFAULT_TOKEN_FILE = Path.home() / ".config" / "runpod" / "access_key"
DEFAULT_CONFIG_PATH = Path.home() / "wizards" / "bezalel" / "home" / "config.yaml"
DEFAULT_CONFIG_PATH = Path("/root/wizards/bezalel/home/config.yaml")
DEFAULT_BEZALEL_VPS_HOST = "104.131.15.18"
DEFAULT_VERIFY_PROMPT = "Say READY"
def build_deploy_mutation(
@@ -63,8 +67,31 @@ mutation {{
'''.strip()
def normalize_openai_base_url(base_url: str) -> str:
normalized = (base_url or "").strip().rstrip("/")
if not normalized:
return normalized
for suffix in ("/chat/completions", "/models"):
if normalized.endswith(suffix):
normalized = normalized[: -len(suffix)]
break
if not normalized.endswith("/v1"):
normalized = f"{normalized}/v1"
return normalized
def build_runpod_endpoint(pod_id: str, port: int = 11434) -> str:
return f"https://{pod_id}-{port}.proxy.runpod.net/v1"
return normalize_openai_base_url(f"https://{pod_id}-{port}.proxy.runpod.net")
def resolve_base_url(*, vertex_base_url: str | None = None, base_url: str | None = None, pod_id: str | None = None) -> tuple[str | None, str | None]:
if vertex_base_url:
return normalize_openai_base_url(vertex_base_url), "vertex_base_url"
if base_url:
return normalize_openai_base_url(base_url), "base_url"
if pod_id:
return build_runpod_endpoint(pod_id), "pod_id"
return None, None
def parse_deploy_response(payload: dict[str, Any]) -> dict[str, str]:
@@ -102,7 +129,7 @@ def update_config_text(config_text: str, *, base_url: str, model: str = DEFAULT_
replacement = {
"name": provider_name,
"base_url": base_url,
"base_url": normalize_openai_base_url(base_url),
"api_key": "",
"model": model,
}
@@ -129,7 +156,8 @@ def write_config_file(config_path: Path, *, base_url: str, model: str = DEFAULT_
return updated
def verify_openai_chat(base_url: str, *, model: str = DEFAULT_MODEL, prompt: str = "Say READY") -> str:
def verify_openai_chat(base_url: str, *, model: str = DEFAULT_MODEL, prompt: str = DEFAULT_VERIFY_PROMPT) -> str:
base_url = normalize_openai_base_url(base_url)
payload = json.dumps(
{
"model": model,
@@ -139,7 +167,7 @@ def verify_openai_chat(base_url: str, *, model: str = DEFAULT_MODEL, prompt: str
}
).encode()
req = request.Request(
f"{base_url.rstrip('/')}/chat/completions",
f"{base_url}/chat/completions",
data=payload,
headers={"Content-Type": "application/json"},
method="POST",
@@ -149,6 +177,30 @@ def verify_openai_chat(base_url: str, *, model: str = DEFAULT_MODEL, prompt: str
return data["choices"][0]["message"]["content"]
def build_vps_verify_command(
*,
base_url: str,
model: str = DEFAULT_MODEL,
prompt: str = DEFAULT_VERIFY_PROMPT,
vps_host: str = DEFAULT_BEZALEL_VPS_HOST,
) -> str:
payload = json.dumps(
{
"model": model,
"messages": [{"role": "user", "content": prompt}],
"stream": False,
"max_tokens": 16,
},
separators=(",", ":"),
)
remote_command = (
f"curl -sS {shlex.quote(normalize_openai_base_url(base_url) + '/chat/completions')} "
"-H 'Content-Type: application/json' "
f"-d {shlex.quote(payload)}"
)
return f"ssh root@{vps_host} {shlex.quote(remote_command)}"
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Provision a RunPod Gemma 4 endpoint and wire a Hermes config for Bezalel.")
parser.add_argument("--pod-name", default="bezalel-gemma4")
@@ -160,6 +212,8 @@ def parse_args() -> argparse.Namespace:
parser.add_argument("--config-path", type=Path, default=DEFAULT_CONFIG_PATH)
parser.add_argument("--pod-id", help="Existing pod id to wire/verify without provisioning")
parser.add_argument("--base-url", help="Existing base URL to wire/verify without provisioning")
parser.add_argument("--vertex-base-url", help="OpenAI-compatible Vertex bridge URL; takes precedence over --base-url and --pod-id")
parser.add_argument("--vps-host", default=DEFAULT_BEZALEL_VPS_HOST, help="Bezalel VPS host for the remote curl proof command")
parser.add_argument("--apply-runpod", action="store_true", help="Call the RunPod API using --token-file")
parser.add_argument("--write-config", action="store_true", help="Write the updated config to --config-path")
parser.add_argument("--verify-chat", action="store_true", help="Call the OpenAI-compatible chat endpoint")
@@ -175,13 +229,18 @@ def main() -> None:
"cloud_type": args.cloud_type,
"model": args.model,
"provider_name": args.provider_name,
"config_path": str(args.config_path),
"vps_host": args.vps_host,
"actions": [],
}
base_url = args.base_url
if not base_url and args.pod_id:
base_url = build_runpod_endpoint(args.pod_id)
summary["actions"].append("computed_base_url_from_pod_id")
base_url, base_url_source = resolve_base_url(
vertex_base_url=args.vertex_base_url,
base_url=args.base_url,
pod_id=args.pod_id,
)
if base_url_source:
summary["actions"].append(f"resolved_base_url_from_{base_url_source}")
if args.apply_runpod:
if not args.token_file.exists():
@@ -196,12 +255,17 @@ def main() -> None:
base_url = build_runpod_endpoint("<pod-id>")
summary["actions"].append("using_placeholder_base_url")
summary["base_url"] = base_url
summary["base_url"] = normalize_openai_base_url(base_url)
summary["config_preview"] = update_config_text("", base_url=base_url, model=args.model, provider_name=args.provider_name)
summary["vps_verify_command"] = build_vps_verify_command(
base_url=base_url,
model=args.model,
prompt=DEFAULT_VERIFY_PROMPT,
vps_host=args.vps_host,
)
if args.write_config:
write_config_file(args.config_path, base_url=base_url, model=args.model, provider_name=args.provider_name)
summary["config_path"] = str(args.config_path)
summary["actions"].append("wrote_config")
if args.verify_chat:
@@ -214,8 +278,10 @@ def main() -> None:
print("--- Bezalel Gemma4 RunPod Wiring ---")
print(f"Pod name: {args.pod_name}")
print(f"Base URL: {base_url}")
print(f"Base URL: {summary['base_url']}")
print(f"Model: {args.model}")
print(f"Config target: {args.config_path}")
print(f"Bezalel VPS proof: {summary['vps_verify_command']}")
if args.write_config:
print(f"Config written: {args.config_path}")
if "verify_response" in summary:

View File

@@ -1,14 +1,20 @@
from __future__ import annotations
import json
from pathlib import Path
from unittest.mock import patch
import yaml
from scripts.bezalel_gemma4_vps import (
DEFAULT_CONFIG_PATH,
DEFAULT_BEZALEL_VPS_HOST,
build_deploy_mutation,
build_runpod_endpoint,
build_vps_verify_command,
normalize_openai_base_url,
parse_deploy_response,
resolve_base_url,
update_config_text,
verify_openai_chat,
)
@@ -28,6 +34,10 @@ class _FakeResponse:
return False
def test_default_config_path_targets_bezalel_vps_root_config() -> None:
assert DEFAULT_CONFIG_PATH == Path("/root/wizards/bezalel/home/config.yaml")
def test_build_deploy_mutation_uses_ollama_image_and_openai_port() -> None:
query = build_deploy_mutation(name="bezalel-gemma4", gpu_type="NVIDIA L40S", model_tag="gemma4:latest")
@@ -37,6 +47,30 @@ def test_build_deploy_mutation_uses_ollama_image_and_openai_port() -> None:
assert 'volumeMountPath: "/root/.ollama"' in query
def test_normalize_openai_base_url_adds_v1_suffix() -> None:
assert normalize_openai_base_url("https://pod-11434.proxy.runpod.net") == "https://pod-11434.proxy.runpod.net/v1"
def test_normalize_openai_base_url_trims_chat_completions_suffix() -> None:
assert normalize_openai_base_url("https://pod-11434.proxy.runpod.net/v1/chat/completions") == "https://pod-11434.proxy.runpod.net/v1"
def test_resolve_base_url_prefers_vertex_over_base_and_pod_id() -> None:
base_url, source = resolve_base_url(
vertex_base_url="https://vertex.example.com/openai",
base_url="https://plain.example.com",
pod_id="abc123",
)
assert source == "vertex_base_url"
assert base_url == "https://vertex.example.com/openai/v1"
def test_resolve_base_url_falls_back_to_base_url_before_pod_id() -> None:
base_url, source = resolve_base_url(base_url="https://plain.example.com", pod_id="abc123")
assert source == "base_url"
assert base_url == "https://plain.example.com/v1"
def test_build_runpod_endpoint_appends_v1_suffix() -> None:
assert build_runpod_endpoint("abc123") == "https://abc123-11434.proxy.runpod.net/v1"
@@ -60,7 +94,7 @@ def test_parse_deploy_response_extracts_pod_id_and_endpoint() -> None:
}
def test_update_config_text_upserts_big_brain_provider() -> None:
def test_update_config_text_upserts_big_brain_provider_and_normalizes_base_url() -> None:
original = """
model:
default: kimi-k2.5
@@ -72,7 +106,7 @@ custom_providers:
model: gemma3:27b
"""
updated = update_config_text(original, base_url="https://new-pod-11434.proxy.runpod.net/v1", model="gemma4:latest")
updated = update_config_text(original, base_url="https://new-pod-11434.proxy.runpod.net", model="gemma4:latest")
parsed = yaml.safe_load(updated)
assert parsed["model"] == {"default": "kimi-k2.5", "provider": "kimi-coding"}
@@ -86,7 +120,14 @@ custom_providers:
]
def test_verify_openai_chat_calls_chat_completions() -> None:
def test_build_vps_verify_command_targets_bezalel_host_and_chat_completions() -> None:
command = build_vps_verify_command(base_url="https://pod-11434.proxy.runpod.net", model="gemma4:latest")
assert command.startswith(f"ssh root@{DEFAULT_BEZALEL_VPS_HOST} ")
assert "/v1/chat/completions" in command
assert "gemma4:latest" in command
def test_verify_openai_chat_calls_chat_completions_with_normalized_base_url() -> None:
response_payload = {
"choices": [
{
@@ -101,7 +142,7 @@ def test_verify_openai_chat_calls_chat_completions() -> None:
"scripts.bezalel_gemma4_vps.request.urlopen",
return_value=_FakeResponse(response_payload),
) as mocked:
result = verify_openai_chat("https://pod-11434.proxy.runpod.net/v1", model="gemma4:latest", prompt="say READY")
result = verify_openai_chat("https://pod-11434.proxy.runpod.net", model="gemma4:latest", prompt="say READY")
assert result == "READY"
req = mocked.call_args.args[0]
@@ -109,3 +150,10 @@ def test_verify_openai_chat_calls_chat_completions() -> None:
payload = json.loads(req.data.decode())
assert payload["model"] == "gemma4:latest"
assert payload["messages"][0]["content"] == "say READY"
def test_readme_documents_root_config_path_and_vps_proof_command() -> None:
readme = Path("scripts/README_bezalel_gemma4_vps.md").read_text()
assert "/root/wizards/bezalel/home/config.yaml" in readme
assert "ssh root@104.131.15.18" in readme
assert "--vertex-base-url" in readme