Compare commits

...

1 Commits

Author SHA1 Message Date
Alexander Whitestone
2946f9df73 feat: add Bezalel Gemma4 wiring scaffold (#544)
Some checks are pending
Smoke Test / smoke (pull_request) Waiting to run
2026-04-15 01:17:32 -04:00
2 changed files with 339 additions and 0 deletions

View File

@@ -0,0 +1,228 @@
#!/usr/bin/env python3
"""Provisioning and wiring scaffold for Bezalel Gemma 4 on RunPod.
Refs: timmy-home #544
Safe by default:
- builds the RunPod deploy mutation
- can call the RunPod GraphQL API if a key is provided and --apply-runpod is used
- can update a Hermes config file in-place when --write-config is used
- can verify an OpenAI-compatible endpoint with a lightweight chat probe
"""
from __future__ import annotations
import argparse
import json
from pathlib import Path
from typing import Any
from urllib import request
import yaml
RUNPOD_GRAPHQL_URL = "https://api.runpod.io/graphql"
DEFAULT_GPU_TYPE = "NVIDIA L40S"
DEFAULT_CLOUD_TYPE = "COMMUNITY"
DEFAULT_IMAGE = "ollama/ollama:latest"
DEFAULT_MODEL = "gemma4:latest"
DEFAULT_PROVIDER_NAME = "Big Brain"
DEFAULT_TOKEN_FILE = Path.home() / ".config" / "runpod" / "access_key"
DEFAULT_CONFIG_PATH = Path.home() / "wizards" / "bezalel" / "home" / "config.yaml"
def build_deploy_mutation(
*,
name: str,
gpu_type: str = DEFAULT_GPU_TYPE,
cloud_type: str = DEFAULT_CLOUD_TYPE,
container_disk_gb: int = 100,
volume_gb: int = 50,
model_tag: str = DEFAULT_MODEL,
) -> str:
# model_tag is accepted for parity with the CLI/reporting path even though the
# pod deploy itself only needs the Ollama image + port wiring.
_ = model_tag
return f'''
mutation {{
podFindAndDeployOnDemand(input: {{
cloudType: {cloud_type},
gpuCount: 1,
gpuTypeId: "{gpu_type}",
name: "{name}",
containerDiskInGb: {container_disk_gb},
imageName: "{DEFAULT_IMAGE}",
ports: "11434/http",
volumeInGb: {volume_gb},
volumeMountPath: "/root/.ollama"
}}) {{
id
desiredStatus
machineId
}}
}}
'''.strip()
def build_runpod_endpoint(pod_id: str, port: int = 11434) -> str:
return f"https://{pod_id}-{port}.proxy.runpod.net/v1"
def parse_deploy_response(payload: dict[str, Any]) -> dict[str, str]:
data = (payload.get("data") or {}).get("podFindAndDeployOnDemand") or {}
pod_id = data.get("id")
if not pod_id:
raise ValueError(f"RunPod deploy response did not contain a pod id: {payload}")
return {
"pod_id": pod_id,
"desired_status": data.get("desiredStatus", "UNKNOWN"),
"base_url": build_runpod_endpoint(pod_id),
}
def deploy_runpod(*, api_key: str, name: str, gpu_type: str = DEFAULT_GPU_TYPE, cloud_type: str = DEFAULT_CLOUD_TYPE, model: str = DEFAULT_MODEL) -> dict[str, str]:
query = build_deploy_mutation(name=name, gpu_type=gpu_type, cloud_type=cloud_type, model_tag=model)
payload = json.dumps({"query": query}).encode()
req = request.Request(
RUNPOD_GRAPHQL_URL,
data=payload,
headers={
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
},
method="POST",
)
with request.urlopen(req, timeout=30) as resp:
response_payload = json.loads(resp.read().decode())
return parse_deploy_response(response_payload)
def update_config_text(config_text: str, *, base_url: str, model: str = DEFAULT_MODEL, provider_name: str = DEFAULT_PROVIDER_NAME) -> str:
parsed = yaml.safe_load(config_text) or {}
providers = list(parsed.get("custom_providers") or [])
replacement = {
"name": provider_name,
"base_url": base_url,
"api_key": "",
"model": model,
}
updated = False
for idx, provider in enumerate(providers):
if provider.get("name") == provider_name:
providers[idx] = replacement
updated = True
break
if not updated:
providers.append(replacement)
parsed["custom_providers"] = providers
return yaml.safe_dump(parsed, sort_keys=False)
def write_config_file(config_path: Path, *, base_url: str, model: str = DEFAULT_MODEL, provider_name: str = DEFAULT_PROVIDER_NAME) -> str:
original = config_path.read_text() if config_path.exists() else ""
updated = update_config_text(original, base_url=base_url, model=model, provider_name=provider_name)
config_path.parent.mkdir(parents=True, exist_ok=True)
config_path.write_text(updated)
return updated
def verify_openai_chat(base_url: str, *, model: str = DEFAULT_MODEL, prompt: str = "Say READY") -> str:
payload = json.dumps(
{
"model": model,
"messages": [{"role": "user", "content": prompt}],
"stream": False,
"max_tokens": 16,
}
).encode()
req = request.Request(
f"{base_url.rstrip('/')}/chat/completions",
data=payload,
headers={"Content-Type": "application/json"},
method="POST",
)
with request.urlopen(req, timeout=30) as resp:
data = json.loads(resp.read().decode())
return data["choices"][0]["message"]["content"]
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Provision a RunPod Gemma 4 endpoint and wire a Hermes config for Bezalel.")
parser.add_argument("--pod-name", default="bezalel-gemma4")
parser.add_argument("--gpu-type", default=DEFAULT_GPU_TYPE)
parser.add_argument("--cloud-type", default=DEFAULT_CLOUD_TYPE)
parser.add_argument("--model", default=DEFAULT_MODEL)
parser.add_argument("--provider-name", default=DEFAULT_PROVIDER_NAME)
parser.add_argument("--token-file", type=Path, default=DEFAULT_TOKEN_FILE)
parser.add_argument("--config-path", type=Path, default=DEFAULT_CONFIG_PATH)
parser.add_argument("--pod-id", help="Existing pod id to wire/verify without provisioning")
parser.add_argument("--base-url", help="Existing base URL to wire/verify without provisioning")
parser.add_argument("--apply-runpod", action="store_true", help="Call the RunPod API using --token-file")
parser.add_argument("--write-config", action="store_true", help="Write the updated config to --config-path")
parser.add_argument("--verify-chat", action="store_true", help="Call the OpenAI-compatible chat endpoint")
parser.add_argument("--json", action="store_true", help="Emit machine-readable JSON")
return parser.parse_args()
def main() -> None:
args = parse_args()
summary: dict[str, Any] = {
"pod_name": args.pod_name,
"gpu_type": args.gpu_type,
"cloud_type": args.cloud_type,
"model": args.model,
"provider_name": args.provider_name,
"actions": [],
}
base_url = args.base_url
if not base_url and args.pod_id:
base_url = build_runpod_endpoint(args.pod_id)
summary["actions"].append("computed_base_url_from_pod_id")
if args.apply_runpod:
if not args.token_file.exists():
raise SystemExit(f"RunPod token file not found: {args.token_file}")
api_key = args.token_file.read_text().strip()
deployed = deploy_runpod(api_key=api_key, name=args.pod_name, gpu_type=args.gpu_type, cloud_type=args.cloud_type, model=args.model)
summary["deployment"] = deployed
base_url = deployed["base_url"]
summary["actions"].append("deployed_runpod_pod")
if not base_url:
base_url = build_runpod_endpoint("<pod-id>")
summary["actions"].append("using_placeholder_base_url")
summary["base_url"] = base_url
summary["config_preview"] = update_config_text("", base_url=base_url, model=args.model, provider_name=args.provider_name)
if args.write_config:
write_config_file(args.config_path, base_url=base_url, model=args.model, provider_name=args.provider_name)
summary["config_path"] = str(args.config_path)
summary["actions"].append("wrote_config")
if args.verify_chat:
summary["verify_response"] = verify_openai_chat(base_url, model=args.model)
summary["actions"].append("verified_chat")
if args.json:
print(json.dumps(summary, indent=2))
return
print("--- Bezalel Gemma4 RunPod Wiring ---")
print(f"Pod name: {args.pod_name}")
print(f"Base URL: {base_url}")
print(f"Model: {args.model}")
if args.write_config:
print(f"Config written: {args.config_path}")
if "verify_response" in summary:
print(f"Verify response: {summary['verify_response']}")
if summary["actions"]:
print("Actions: " + ", ".join(summary["actions"]))
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,111 @@
from __future__ import annotations
import json
from unittest.mock import patch
import yaml
from scripts.bezalel_gemma4_vps import (
build_deploy_mutation,
build_runpod_endpoint,
parse_deploy_response,
update_config_text,
verify_openai_chat,
)
class _FakeResponse:
def __init__(self, payload: dict):
self._payload = json.dumps(payload).encode()
def read(self) -> bytes:
return self._payload
def __enter__(self):
return self
def __exit__(self, exc_type, exc, tb):
return False
def test_build_deploy_mutation_uses_ollama_image_and_openai_port() -> None:
query = build_deploy_mutation(name="bezalel-gemma4", gpu_type="NVIDIA L40S", model_tag="gemma4:latest")
assert 'gpuTypeId: "NVIDIA L40S"' in query
assert 'imageName: "ollama/ollama:latest"' in query
assert 'ports: "11434/http"' in query
assert 'volumeMountPath: "/root/.ollama"' in query
def test_build_runpod_endpoint_appends_v1_suffix() -> None:
assert build_runpod_endpoint("abc123") == "https://abc123-11434.proxy.runpod.net/v1"
def test_parse_deploy_response_extracts_pod_id_and_endpoint() -> None:
payload = {
"data": {
"podFindAndDeployOnDemand": {
"id": "podxyz",
"desiredStatus": "RUNNING",
}
}
}
result = parse_deploy_response(payload)
assert result == {
"pod_id": "podxyz",
"desired_status": "RUNNING",
"base_url": "https://podxyz-11434.proxy.runpod.net/v1",
}
def test_update_config_text_upserts_big_brain_provider() -> None:
original = """
model:
default: kimi-k2.5
provider: kimi-coding
custom_providers:
- name: Big Brain
base_url: https://old-endpoint/v1
api_key: ''
model: gemma3:27b
"""
updated = update_config_text(original, base_url="https://new-pod-11434.proxy.runpod.net/v1", model="gemma4:latest")
parsed = yaml.safe_load(updated)
assert parsed["model"] == {"default": "kimi-k2.5", "provider": "kimi-coding"}
assert parsed["custom_providers"] == [
{
"name": "Big Brain",
"base_url": "https://new-pod-11434.proxy.runpod.net/v1",
"api_key": "",
"model": "gemma4:latest",
}
]
def test_verify_openai_chat_calls_chat_completions() -> None:
response_payload = {
"choices": [
{
"message": {
"content": "READY"
}
}
]
}
with patch(
"scripts.bezalel_gemma4_vps.request.urlopen",
return_value=_FakeResponse(response_payload),
) as mocked:
result = verify_openai_chat("https://pod-11434.proxy.runpod.net/v1", model="gemma4:latest", prompt="say READY")
assert result == "READY"
req = mocked.call_args.args[0]
assert req.full_url == "https://pod-11434.proxy.runpod.net/v1/chat/completions"
payload = json.loads(req.data.decode())
assert payload["model"] == "gemma4:latest"
assert payload["messages"][0]["content"] == "say READY"