Compare commits
1 Commits
step35/669
...
fix/544
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7c99058b0b |
51
scripts/README_bezalel_gemma4_vps.md
Normal file
51
scripts/README_bezalel_gemma4_vps.md
Normal file
@@ -0,0 +1,51 @@
|
||||
# Bezalel Gemma 4 VPS Wiring
|
||||
|
||||
Issue: timmy-home #544
|
||||
|
||||
This helper is the repo-side operator bundle for wiring a live Gemma 4 endpoint into Bezalel's VPS config without hardcoding one dead pod forever.
|
||||
|
||||
What `scripts/bezalel_gemma4_vps.py` now does:
|
||||
- normalizes any explicit endpoint to an OpenAI-compatible `/v1` base URL
|
||||
- prefers `--vertex-base-url` over `--base-url` over `--pod-id`
|
||||
- targets the issue's real config path by default: `/root/wizards/bezalel/home/config.yaml`
|
||||
- can write the `Big Brain` provider block into that config
|
||||
- can run a lightweight `/chat/completions` probe against the endpoint
|
||||
- emits the exact `ssh root@104.131.15.18 ... curl ...` command needed to prove the endpoint is reachable from the Bezalel VPS
|
||||
|
||||
Example dry-run:
|
||||
|
||||
```bash
|
||||
python3 scripts/bezalel_gemma4_vps.py \
|
||||
--base-url https://<pod-id>-11434.proxy.runpod.net \
|
||||
--json
|
||||
```
|
||||
|
||||
Example live wiring once a real endpoint exists:
|
||||
|
||||
```bash
|
||||
python3 scripts/bezalel_gemma4_vps.py \
|
||||
--base-url https://<pod-id>-11434.proxy.runpod.net \
|
||||
--config-path /root/wizards/bezalel/home/config.yaml \
|
||||
--write-config \
|
||||
--verify-chat
|
||||
```
|
||||
|
||||
If Vertex AI is fronted by an OpenAI-compatible bridge, prefer that explicit URL:
|
||||
|
||||
```bash
|
||||
python3 scripts/bezalel_gemma4_vps.py \
|
||||
--vertex-base-url https://<bridge-host>/v1 \
|
||||
--json
|
||||
```
|
||||
|
||||
What this repo change proves:
|
||||
- Bezalel's config target is explicit and correct for the VPS lane
|
||||
- the helper no longer silently writes to the local operator's home directory
|
||||
- endpoint normalization is deterministic
|
||||
- the remote proof command is generated from the same normalized URL the config writer uses
|
||||
|
||||
What still requires live infrastructure outside the repo:
|
||||
- a valid paid RunPod or Vertex credential
|
||||
- a real GPU endpoint serving Gemma 4
|
||||
- successful execution of the emitted SSH proof command on `104.131.15.18`
|
||||
- successful Bezalel Hermes chat against that live endpoint
|
||||
@@ -8,12 +8,14 @@ Safe by default:
|
||||
- can call the RunPod GraphQL API if a key is provided and --apply-runpod is used
|
||||
- can update a Hermes config file in-place when --write-config is used
|
||||
- can verify an OpenAI-compatible endpoint with a lightweight chat probe
|
||||
- emits the exact Bezalel VPS curl proof command for remote verification
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import shlex
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from urllib import request
|
||||
@@ -27,7 +29,9 @@ DEFAULT_IMAGE = "ollama/ollama:latest"
|
||||
DEFAULT_MODEL = "gemma4:latest"
|
||||
DEFAULT_PROVIDER_NAME = "Big Brain"
|
||||
DEFAULT_TOKEN_FILE = Path.home() / ".config" / "runpod" / "access_key"
|
||||
DEFAULT_CONFIG_PATH = Path.home() / "wizards" / "bezalel" / "home" / "config.yaml"
|
||||
DEFAULT_CONFIG_PATH = Path("/root/wizards/bezalel/home/config.yaml")
|
||||
DEFAULT_BEZALEL_VPS_HOST = "104.131.15.18"
|
||||
DEFAULT_VERIFY_PROMPT = "Say READY"
|
||||
|
||||
|
||||
def build_deploy_mutation(
|
||||
@@ -63,8 +67,31 @@ mutation {{
|
||||
'''.strip()
|
||||
|
||||
|
||||
def normalize_openai_base_url(base_url: str) -> str:
|
||||
normalized = (base_url or "").strip().rstrip("/")
|
||||
if not normalized:
|
||||
return normalized
|
||||
for suffix in ("/chat/completions", "/models"):
|
||||
if normalized.endswith(suffix):
|
||||
normalized = normalized[: -len(suffix)]
|
||||
break
|
||||
if not normalized.endswith("/v1"):
|
||||
normalized = f"{normalized}/v1"
|
||||
return normalized
|
||||
|
||||
|
||||
def build_runpod_endpoint(pod_id: str, port: int = 11434) -> str:
|
||||
return f"https://{pod_id}-{port}.proxy.runpod.net/v1"
|
||||
return normalize_openai_base_url(f"https://{pod_id}-{port}.proxy.runpod.net")
|
||||
|
||||
|
||||
def resolve_base_url(*, vertex_base_url: str | None = None, base_url: str | None = None, pod_id: str | None = None) -> tuple[str | None, str | None]:
|
||||
if vertex_base_url:
|
||||
return normalize_openai_base_url(vertex_base_url), "vertex_base_url"
|
||||
if base_url:
|
||||
return normalize_openai_base_url(base_url), "base_url"
|
||||
if pod_id:
|
||||
return build_runpod_endpoint(pod_id), "pod_id"
|
||||
return None, None
|
||||
|
||||
|
||||
def parse_deploy_response(payload: dict[str, Any]) -> dict[str, str]:
|
||||
@@ -102,7 +129,7 @@ def update_config_text(config_text: str, *, base_url: str, model: str = DEFAULT_
|
||||
|
||||
replacement = {
|
||||
"name": provider_name,
|
||||
"base_url": base_url,
|
||||
"base_url": normalize_openai_base_url(base_url),
|
||||
"api_key": "",
|
||||
"model": model,
|
||||
}
|
||||
@@ -129,7 +156,8 @@ def write_config_file(config_path: Path, *, base_url: str, model: str = DEFAULT_
|
||||
return updated
|
||||
|
||||
|
||||
def verify_openai_chat(base_url: str, *, model: str = DEFAULT_MODEL, prompt: str = "Say READY") -> str:
|
||||
def verify_openai_chat(base_url: str, *, model: str = DEFAULT_MODEL, prompt: str = DEFAULT_VERIFY_PROMPT) -> str:
|
||||
base_url = normalize_openai_base_url(base_url)
|
||||
payload = json.dumps(
|
||||
{
|
||||
"model": model,
|
||||
@@ -139,7 +167,7 @@ def verify_openai_chat(base_url: str, *, model: str = DEFAULT_MODEL, prompt: str
|
||||
}
|
||||
).encode()
|
||||
req = request.Request(
|
||||
f"{base_url.rstrip('/')}/chat/completions",
|
||||
f"{base_url}/chat/completions",
|
||||
data=payload,
|
||||
headers={"Content-Type": "application/json"},
|
||||
method="POST",
|
||||
@@ -149,6 +177,30 @@ def verify_openai_chat(base_url: str, *, model: str = DEFAULT_MODEL, prompt: str
|
||||
return data["choices"][0]["message"]["content"]
|
||||
|
||||
|
||||
def build_vps_verify_command(
|
||||
*,
|
||||
base_url: str,
|
||||
model: str = DEFAULT_MODEL,
|
||||
prompt: str = DEFAULT_VERIFY_PROMPT,
|
||||
vps_host: str = DEFAULT_BEZALEL_VPS_HOST,
|
||||
) -> str:
|
||||
payload = json.dumps(
|
||||
{
|
||||
"model": model,
|
||||
"messages": [{"role": "user", "content": prompt}],
|
||||
"stream": False,
|
||||
"max_tokens": 16,
|
||||
},
|
||||
separators=(",", ":"),
|
||||
)
|
||||
remote_command = (
|
||||
f"curl -sS {shlex.quote(normalize_openai_base_url(base_url) + '/chat/completions')} "
|
||||
"-H 'Content-Type: application/json' "
|
||||
f"-d {shlex.quote(payload)}"
|
||||
)
|
||||
return f"ssh root@{vps_host} {shlex.quote(remote_command)}"
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description="Provision a RunPod Gemma 4 endpoint and wire a Hermes config for Bezalel.")
|
||||
parser.add_argument("--pod-name", default="bezalel-gemma4")
|
||||
@@ -160,6 +212,8 @@ def parse_args() -> argparse.Namespace:
|
||||
parser.add_argument("--config-path", type=Path, default=DEFAULT_CONFIG_PATH)
|
||||
parser.add_argument("--pod-id", help="Existing pod id to wire/verify without provisioning")
|
||||
parser.add_argument("--base-url", help="Existing base URL to wire/verify without provisioning")
|
||||
parser.add_argument("--vertex-base-url", help="OpenAI-compatible Vertex bridge URL; takes precedence over --base-url and --pod-id")
|
||||
parser.add_argument("--vps-host", default=DEFAULT_BEZALEL_VPS_HOST, help="Bezalel VPS host for the remote curl proof command")
|
||||
parser.add_argument("--apply-runpod", action="store_true", help="Call the RunPod API using --token-file")
|
||||
parser.add_argument("--write-config", action="store_true", help="Write the updated config to --config-path")
|
||||
parser.add_argument("--verify-chat", action="store_true", help="Call the OpenAI-compatible chat endpoint")
|
||||
@@ -175,13 +229,18 @@ def main() -> None:
|
||||
"cloud_type": args.cloud_type,
|
||||
"model": args.model,
|
||||
"provider_name": args.provider_name,
|
||||
"config_path": str(args.config_path),
|
||||
"vps_host": args.vps_host,
|
||||
"actions": [],
|
||||
}
|
||||
|
||||
base_url = args.base_url
|
||||
if not base_url and args.pod_id:
|
||||
base_url = build_runpod_endpoint(args.pod_id)
|
||||
summary["actions"].append("computed_base_url_from_pod_id")
|
||||
base_url, base_url_source = resolve_base_url(
|
||||
vertex_base_url=args.vertex_base_url,
|
||||
base_url=args.base_url,
|
||||
pod_id=args.pod_id,
|
||||
)
|
||||
if base_url_source:
|
||||
summary["actions"].append(f"resolved_base_url_from_{base_url_source}")
|
||||
|
||||
if args.apply_runpod:
|
||||
if not args.token_file.exists():
|
||||
@@ -196,12 +255,17 @@ def main() -> None:
|
||||
base_url = build_runpod_endpoint("<pod-id>")
|
||||
summary["actions"].append("using_placeholder_base_url")
|
||||
|
||||
summary["base_url"] = base_url
|
||||
summary["base_url"] = normalize_openai_base_url(base_url)
|
||||
summary["config_preview"] = update_config_text("", base_url=base_url, model=args.model, provider_name=args.provider_name)
|
||||
summary["vps_verify_command"] = build_vps_verify_command(
|
||||
base_url=base_url,
|
||||
model=args.model,
|
||||
prompt=DEFAULT_VERIFY_PROMPT,
|
||||
vps_host=args.vps_host,
|
||||
)
|
||||
|
||||
if args.write_config:
|
||||
write_config_file(args.config_path, base_url=base_url, model=args.model, provider_name=args.provider_name)
|
||||
summary["config_path"] = str(args.config_path)
|
||||
summary["actions"].append("wrote_config")
|
||||
|
||||
if args.verify_chat:
|
||||
@@ -214,8 +278,10 @@ def main() -> None:
|
||||
|
||||
print("--- Bezalel Gemma4 RunPod Wiring ---")
|
||||
print(f"Pod name: {args.pod_name}")
|
||||
print(f"Base URL: {base_url}")
|
||||
print(f"Base URL: {summary['base_url']}")
|
||||
print(f"Model: {args.model}")
|
||||
print(f"Config target: {args.config_path}")
|
||||
print(f"Bezalel VPS proof: {summary['vps_verify_command']}")
|
||||
if args.write_config:
|
||||
print(f"Config written: {args.config_path}")
|
||||
if "verify_response" in summary:
|
||||
|
||||
@@ -1,14 +1,20 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
import yaml
|
||||
|
||||
from scripts.bezalel_gemma4_vps import (
|
||||
DEFAULT_CONFIG_PATH,
|
||||
DEFAULT_BEZALEL_VPS_HOST,
|
||||
build_deploy_mutation,
|
||||
build_runpod_endpoint,
|
||||
build_vps_verify_command,
|
||||
normalize_openai_base_url,
|
||||
parse_deploy_response,
|
||||
resolve_base_url,
|
||||
update_config_text,
|
||||
verify_openai_chat,
|
||||
)
|
||||
@@ -28,6 +34,10 @@ class _FakeResponse:
|
||||
return False
|
||||
|
||||
|
||||
def test_default_config_path_targets_bezalel_vps_root_config() -> None:
|
||||
assert DEFAULT_CONFIG_PATH == Path("/root/wizards/bezalel/home/config.yaml")
|
||||
|
||||
|
||||
def test_build_deploy_mutation_uses_ollama_image_and_openai_port() -> None:
|
||||
query = build_deploy_mutation(name="bezalel-gemma4", gpu_type="NVIDIA L40S", model_tag="gemma4:latest")
|
||||
|
||||
@@ -37,6 +47,30 @@ def test_build_deploy_mutation_uses_ollama_image_and_openai_port() -> None:
|
||||
assert 'volumeMountPath: "/root/.ollama"' in query
|
||||
|
||||
|
||||
def test_normalize_openai_base_url_adds_v1_suffix() -> None:
|
||||
assert normalize_openai_base_url("https://pod-11434.proxy.runpod.net") == "https://pod-11434.proxy.runpod.net/v1"
|
||||
|
||||
|
||||
def test_normalize_openai_base_url_trims_chat_completions_suffix() -> None:
|
||||
assert normalize_openai_base_url("https://pod-11434.proxy.runpod.net/v1/chat/completions") == "https://pod-11434.proxy.runpod.net/v1"
|
||||
|
||||
|
||||
def test_resolve_base_url_prefers_vertex_over_base_and_pod_id() -> None:
|
||||
base_url, source = resolve_base_url(
|
||||
vertex_base_url="https://vertex.example.com/openai",
|
||||
base_url="https://plain.example.com",
|
||||
pod_id="abc123",
|
||||
)
|
||||
assert source == "vertex_base_url"
|
||||
assert base_url == "https://vertex.example.com/openai/v1"
|
||||
|
||||
|
||||
def test_resolve_base_url_falls_back_to_base_url_before_pod_id() -> None:
|
||||
base_url, source = resolve_base_url(base_url="https://plain.example.com", pod_id="abc123")
|
||||
assert source == "base_url"
|
||||
assert base_url == "https://plain.example.com/v1"
|
||||
|
||||
|
||||
def test_build_runpod_endpoint_appends_v1_suffix() -> None:
|
||||
assert build_runpod_endpoint("abc123") == "https://abc123-11434.proxy.runpod.net/v1"
|
||||
|
||||
@@ -60,7 +94,7 @@ def test_parse_deploy_response_extracts_pod_id_and_endpoint() -> None:
|
||||
}
|
||||
|
||||
|
||||
def test_update_config_text_upserts_big_brain_provider() -> None:
|
||||
def test_update_config_text_upserts_big_brain_provider_and_normalizes_base_url() -> None:
|
||||
original = """
|
||||
model:
|
||||
default: kimi-k2.5
|
||||
@@ -72,7 +106,7 @@ custom_providers:
|
||||
model: gemma3:27b
|
||||
"""
|
||||
|
||||
updated = update_config_text(original, base_url="https://new-pod-11434.proxy.runpod.net/v1", model="gemma4:latest")
|
||||
updated = update_config_text(original, base_url="https://new-pod-11434.proxy.runpod.net", model="gemma4:latest")
|
||||
parsed = yaml.safe_load(updated)
|
||||
|
||||
assert parsed["model"] == {"default": "kimi-k2.5", "provider": "kimi-coding"}
|
||||
@@ -86,7 +120,14 @@ custom_providers:
|
||||
]
|
||||
|
||||
|
||||
def test_verify_openai_chat_calls_chat_completions() -> None:
|
||||
def test_build_vps_verify_command_targets_bezalel_host_and_chat_completions() -> None:
|
||||
command = build_vps_verify_command(base_url="https://pod-11434.proxy.runpod.net", model="gemma4:latest")
|
||||
assert command.startswith(f"ssh root@{DEFAULT_BEZALEL_VPS_HOST} ")
|
||||
assert "/v1/chat/completions" in command
|
||||
assert "gemma4:latest" in command
|
||||
|
||||
|
||||
def test_verify_openai_chat_calls_chat_completions_with_normalized_base_url() -> None:
|
||||
response_payload = {
|
||||
"choices": [
|
||||
{
|
||||
@@ -101,7 +142,7 @@ def test_verify_openai_chat_calls_chat_completions() -> None:
|
||||
"scripts.bezalel_gemma4_vps.request.urlopen",
|
||||
return_value=_FakeResponse(response_payload),
|
||||
) as mocked:
|
||||
result = verify_openai_chat("https://pod-11434.proxy.runpod.net/v1", model="gemma4:latest", prompt="say READY")
|
||||
result = verify_openai_chat("https://pod-11434.proxy.runpod.net", model="gemma4:latest", prompt="say READY")
|
||||
|
||||
assert result == "READY"
|
||||
req = mocked.call_args.args[0]
|
||||
@@ -109,3 +150,10 @@ def test_verify_openai_chat_calls_chat_completions() -> None:
|
||||
payload = json.loads(req.data.decode())
|
||||
assert payload["model"] == "gemma4:latest"
|
||||
assert payload["messages"][0]["content"] == "say READY"
|
||||
|
||||
|
||||
def test_readme_documents_root_config_path_and_vps_proof_command() -> None:
|
||||
readme = Path("scripts/README_bezalel_gemma4_vps.md").read_text()
|
||||
assert "/root/wizards/bezalel/home/config.yaml" in readme
|
||||
assert "ssh root@104.131.15.18" in readme
|
||||
assert "--vertex-base-url" in readme
|
||||
|
||||
Reference in New Issue
Block a user