Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2946f9df73 |
@@ -1,61 +0,0 @@
|
||||
# [PHASE-1] Survival - Keep the Lights On
|
||||
|
||||
Phase 1 is the manual-clicker stage of the fleet. The machines exist. The services exist. The human is still the automation loop.
|
||||
|
||||
## Phase Definition
|
||||
|
||||
- Current state: fleet exists, agents run, everything important still depends on human vigilance.
|
||||
- Resources tracked here: Capacity, Uptime.
|
||||
- Next phase: [PHASE-2] Automation - Self-Healing Infrastructure
|
||||
|
||||
## Current Buildings
|
||||
|
||||
- VPS hosts: Ezra, Allegro, Bezalel
|
||||
- Agents: Timmy harness, Code Claw heartbeat, Gemini AI Studio worker
|
||||
- Gitea forge
|
||||
- Evennia worlds
|
||||
|
||||
## Current Resource Snapshot
|
||||
|
||||
- Fleet operational: yes
|
||||
- Uptime baseline: 0.0%
|
||||
- Days at or above 95% uptime: 0
|
||||
- Capacity utilization: 0.0%
|
||||
|
||||
## Next Phase Trigger
|
||||
|
||||
To unlock [PHASE-2] Automation - Self-Healing Infrastructure, the fleet must hold both of these conditions at once:
|
||||
- Uptime >= 95% for 30 consecutive days
|
||||
- Capacity utilization > 60%
|
||||
- Current trigger state: NOT READY
|
||||
|
||||
## Missing Requirements
|
||||
|
||||
- Uptime 0.0% / 95.0%
|
||||
- Days at or above 95% uptime: 0/30
|
||||
- Capacity utilization 0.0% / >60.0%
|
||||
|
||||
## Manual Clicker Interpretation
|
||||
|
||||
Paperclips analogy: Phase 1 = Manual clicker. You ARE the automation.
|
||||
Every restart, every SSH, every check is a manual click.
|
||||
|
||||
## Manual Clicks Still Required
|
||||
|
||||
- Restart agents and services by hand when a node goes dark.
|
||||
- SSH into machines to verify health, disk, and memory.
|
||||
- Check Gitea, relay, and world services manually before and after changes.
|
||||
- Act as the scheduler when automation is missing or only partially wired.
|
||||
|
||||
## Repo Signals Already Present
|
||||
|
||||
- `scripts/fleet_health_probe.sh` — Automated health probe exists and can supply the uptime baseline for the next phase.
|
||||
- `scripts/fleet_milestones.py` — Milestone tracker exists, so survival achievements can be narrated and logged.
|
||||
- `scripts/auto_restart_agent.sh` — Auto-restart tooling already exists as phase-2 groundwork.
|
||||
- `scripts/backup_pipeline.sh` — Backup pipeline scaffold exists for post-survival automation work.
|
||||
- `infrastructure/timmy-bridge/reports/generate_report.py` — Bridge reporting exists and can summarize heartbeat-driven uptime.
|
||||
|
||||
## Notes
|
||||
|
||||
- The fleet is alive, but the human is still the control loop.
|
||||
- Phase 1 is about naming reality plainly so later automation has a baseline to beat.
|
||||
@@ -12,7 +12,6 @@ Quick-reference index for common operational tasks across the Timmy Foundation i
|
||||
| Check fleet health | fleet-ops | `python3 scripts/fleet_readiness.py` |
|
||||
| Agent scorecard | fleet-ops | `python3 scripts/agent_scorecard.py` |
|
||||
| View fleet manifest | fleet-ops | `cat manifest.yaml` |
|
||||
| Render Phase-1 survival report | timmy-home | `python3 scripts/fleet_phase_status.py --output docs/FLEET_PHASE_1_SURVIVAL.md` |
|
||||
|
||||
## the-nexus (Frontend + Brain)
|
||||
|
||||
|
||||
228
scripts/bezalel_gemma4_vps.py
Normal file
228
scripts/bezalel_gemma4_vps.py
Normal file
@@ -0,0 +1,228 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Provisioning and wiring scaffold for Bezalel Gemma 4 on RunPod.
|
||||
|
||||
Refs: timmy-home #544
|
||||
|
||||
Safe by default:
|
||||
- builds the RunPod deploy mutation
|
||||
- can call the RunPod GraphQL API if a key is provided and --apply-runpod is used
|
||||
- can update a Hermes config file in-place when --write-config is used
|
||||
- can verify an OpenAI-compatible endpoint with a lightweight chat probe
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from urllib import request
|
||||
|
||||
import yaml
|
||||
|
||||
RUNPOD_GRAPHQL_URL = "https://api.runpod.io/graphql"
|
||||
DEFAULT_GPU_TYPE = "NVIDIA L40S"
|
||||
DEFAULT_CLOUD_TYPE = "COMMUNITY"
|
||||
DEFAULT_IMAGE = "ollama/ollama:latest"
|
||||
DEFAULT_MODEL = "gemma4:latest"
|
||||
DEFAULT_PROVIDER_NAME = "Big Brain"
|
||||
DEFAULT_TOKEN_FILE = Path.home() / ".config" / "runpod" / "access_key"
|
||||
DEFAULT_CONFIG_PATH = Path.home() / "wizards" / "bezalel" / "home" / "config.yaml"
|
||||
|
||||
|
||||
def build_deploy_mutation(
|
||||
*,
|
||||
name: str,
|
||||
gpu_type: str = DEFAULT_GPU_TYPE,
|
||||
cloud_type: str = DEFAULT_CLOUD_TYPE,
|
||||
container_disk_gb: int = 100,
|
||||
volume_gb: int = 50,
|
||||
model_tag: str = DEFAULT_MODEL,
|
||||
) -> str:
|
||||
# model_tag is accepted for parity with the CLI/reporting path even though the
|
||||
# pod deploy itself only needs the Ollama image + port wiring.
|
||||
_ = model_tag
|
||||
return f'''
|
||||
mutation {{
|
||||
podFindAndDeployOnDemand(input: {{
|
||||
cloudType: {cloud_type},
|
||||
gpuCount: 1,
|
||||
gpuTypeId: "{gpu_type}",
|
||||
name: "{name}",
|
||||
containerDiskInGb: {container_disk_gb},
|
||||
imageName: "{DEFAULT_IMAGE}",
|
||||
ports: "11434/http",
|
||||
volumeInGb: {volume_gb},
|
||||
volumeMountPath: "/root/.ollama"
|
||||
}}) {{
|
||||
id
|
||||
desiredStatus
|
||||
machineId
|
||||
}}
|
||||
}}
|
||||
'''.strip()
|
||||
|
||||
|
||||
def build_runpod_endpoint(pod_id: str, port: int = 11434) -> str:
|
||||
return f"https://{pod_id}-{port}.proxy.runpod.net/v1"
|
||||
|
||||
|
||||
def parse_deploy_response(payload: dict[str, Any]) -> dict[str, str]:
|
||||
data = (payload.get("data") or {}).get("podFindAndDeployOnDemand") or {}
|
||||
pod_id = data.get("id")
|
||||
if not pod_id:
|
||||
raise ValueError(f"RunPod deploy response did not contain a pod id: {payload}")
|
||||
return {
|
||||
"pod_id": pod_id,
|
||||
"desired_status": data.get("desiredStatus", "UNKNOWN"),
|
||||
"base_url": build_runpod_endpoint(pod_id),
|
||||
}
|
||||
|
||||
|
||||
def deploy_runpod(*, api_key: str, name: str, gpu_type: str = DEFAULT_GPU_TYPE, cloud_type: str = DEFAULT_CLOUD_TYPE, model: str = DEFAULT_MODEL) -> dict[str, str]:
|
||||
query = build_deploy_mutation(name=name, gpu_type=gpu_type, cloud_type=cloud_type, model_tag=model)
|
||||
payload = json.dumps({"query": query}).encode()
|
||||
req = request.Request(
|
||||
RUNPOD_GRAPHQL_URL,
|
||||
data=payload,
|
||||
headers={
|
||||
"Authorization": f"Bearer {api_key}",
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
method="POST",
|
||||
)
|
||||
with request.urlopen(req, timeout=30) as resp:
|
||||
response_payload = json.loads(resp.read().decode())
|
||||
return parse_deploy_response(response_payload)
|
||||
|
||||
|
||||
def update_config_text(config_text: str, *, base_url: str, model: str = DEFAULT_MODEL, provider_name: str = DEFAULT_PROVIDER_NAME) -> str:
|
||||
parsed = yaml.safe_load(config_text) or {}
|
||||
providers = list(parsed.get("custom_providers") or [])
|
||||
|
||||
replacement = {
|
||||
"name": provider_name,
|
||||
"base_url": base_url,
|
||||
"api_key": "",
|
||||
"model": model,
|
||||
}
|
||||
|
||||
updated = False
|
||||
for idx, provider in enumerate(providers):
|
||||
if provider.get("name") == provider_name:
|
||||
providers[idx] = replacement
|
||||
updated = True
|
||||
break
|
||||
|
||||
if not updated:
|
||||
providers.append(replacement)
|
||||
|
||||
parsed["custom_providers"] = providers
|
||||
return yaml.safe_dump(parsed, sort_keys=False)
|
||||
|
||||
|
||||
def write_config_file(config_path: Path, *, base_url: str, model: str = DEFAULT_MODEL, provider_name: str = DEFAULT_PROVIDER_NAME) -> str:
|
||||
original = config_path.read_text() if config_path.exists() else ""
|
||||
updated = update_config_text(original, base_url=base_url, model=model, provider_name=provider_name)
|
||||
config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
config_path.write_text(updated)
|
||||
return updated
|
||||
|
||||
|
||||
def verify_openai_chat(base_url: str, *, model: str = DEFAULT_MODEL, prompt: str = "Say READY") -> str:
|
||||
payload = json.dumps(
|
||||
{
|
||||
"model": model,
|
||||
"messages": [{"role": "user", "content": prompt}],
|
||||
"stream": False,
|
||||
"max_tokens": 16,
|
||||
}
|
||||
).encode()
|
||||
req = request.Request(
|
||||
f"{base_url.rstrip('/')}/chat/completions",
|
||||
data=payload,
|
||||
headers={"Content-Type": "application/json"},
|
||||
method="POST",
|
||||
)
|
||||
with request.urlopen(req, timeout=30) as resp:
|
||||
data = json.loads(resp.read().decode())
|
||||
return data["choices"][0]["message"]["content"]
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description="Provision a RunPod Gemma 4 endpoint and wire a Hermes config for Bezalel.")
|
||||
parser.add_argument("--pod-name", default="bezalel-gemma4")
|
||||
parser.add_argument("--gpu-type", default=DEFAULT_GPU_TYPE)
|
||||
parser.add_argument("--cloud-type", default=DEFAULT_CLOUD_TYPE)
|
||||
parser.add_argument("--model", default=DEFAULT_MODEL)
|
||||
parser.add_argument("--provider-name", default=DEFAULT_PROVIDER_NAME)
|
||||
parser.add_argument("--token-file", type=Path, default=DEFAULT_TOKEN_FILE)
|
||||
parser.add_argument("--config-path", type=Path, default=DEFAULT_CONFIG_PATH)
|
||||
parser.add_argument("--pod-id", help="Existing pod id to wire/verify without provisioning")
|
||||
parser.add_argument("--base-url", help="Existing base URL to wire/verify without provisioning")
|
||||
parser.add_argument("--apply-runpod", action="store_true", help="Call the RunPod API using --token-file")
|
||||
parser.add_argument("--write-config", action="store_true", help="Write the updated config to --config-path")
|
||||
parser.add_argument("--verify-chat", action="store_true", help="Call the OpenAI-compatible chat endpoint")
|
||||
parser.add_argument("--json", action="store_true", help="Emit machine-readable JSON")
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main() -> None:
|
||||
args = parse_args()
|
||||
summary: dict[str, Any] = {
|
||||
"pod_name": args.pod_name,
|
||||
"gpu_type": args.gpu_type,
|
||||
"cloud_type": args.cloud_type,
|
||||
"model": args.model,
|
||||
"provider_name": args.provider_name,
|
||||
"actions": [],
|
||||
}
|
||||
|
||||
base_url = args.base_url
|
||||
if not base_url and args.pod_id:
|
||||
base_url = build_runpod_endpoint(args.pod_id)
|
||||
summary["actions"].append("computed_base_url_from_pod_id")
|
||||
|
||||
if args.apply_runpod:
|
||||
if not args.token_file.exists():
|
||||
raise SystemExit(f"RunPod token file not found: {args.token_file}")
|
||||
api_key = args.token_file.read_text().strip()
|
||||
deployed = deploy_runpod(api_key=api_key, name=args.pod_name, gpu_type=args.gpu_type, cloud_type=args.cloud_type, model=args.model)
|
||||
summary["deployment"] = deployed
|
||||
base_url = deployed["base_url"]
|
||||
summary["actions"].append("deployed_runpod_pod")
|
||||
|
||||
if not base_url:
|
||||
base_url = build_runpod_endpoint("<pod-id>")
|
||||
summary["actions"].append("using_placeholder_base_url")
|
||||
|
||||
summary["base_url"] = base_url
|
||||
summary["config_preview"] = update_config_text("", base_url=base_url, model=args.model, provider_name=args.provider_name)
|
||||
|
||||
if args.write_config:
|
||||
write_config_file(args.config_path, base_url=base_url, model=args.model, provider_name=args.provider_name)
|
||||
summary["config_path"] = str(args.config_path)
|
||||
summary["actions"].append("wrote_config")
|
||||
|
||||
if args.verify_chat:
|
||||
summary["verify_response"] = verify_openai_chat(base_url, model=args.model)
|
||||
summary["actions"].append("verified_chat")
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(summary, indent=2))
|
||||
return
|
||||
|
||||
print("--- Bezalel Gemma4 RunPod Wiring ---")
|
||||
print(f"Pod name: {args.pod_name}")
|
||||
print(f"Base URL: {base_url}")
|
||||
print(f"Model: {args.model}")
|
||||
if args.write_config:
|
||||
print(f"Config written: {args.config_path}")
|
||||
if "verify_response" in summary:
|
||||
print(f"Verify response: {summary['verify_response']}")
|
||||
if summary["actions"]:
|
||||
print("Actions: " + ", ".join(summary["actions"]))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,224 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Render the current fleet survival phase as a durable report."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
from copy import deepcopy
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
|
||||
PHASE_NAME = "[PHASE-1] Survival - Keep the Lights On"
|
||||
NEXT_PHASE_NAME = "[PHASE-2] Automation - Self-Healing Infrastructure"
|
||||
TARGET_UPTIME_PERCENT = 95.0
|
||||
TARGET_UPTIME_DAYS = 30
|
||||
TARGET_CAPACITY_PERCENT = 60.0
|
||||
|
||||
DEFAULT_BUILDINGS = [
|
||||
"VPS hosts: Ezra, Allegro, Bezalel",
|
||||
"Agents: Timmy harness, Code Claw heartbeat, Gemini AI Studio worker",
|
||||
"Gitea forge",
|
||||
"Evennia worlds",
|
||||
]
|
||||
|
||||
DEFAULT_MANUAL_CLICKS = [
|
||||
"Restart agents and services by hand when a node goes dark.",
|
||||
"SSH into machines to verify health, disk, and memory.",
|
||||
"Check Gitea, relay, and world services manually before and after changes.",
|
||||
"Act as the scheduler when automation is missing or only partially wired.",
|
||||
]
|
||||
|
||||
REPO_SIGNAL_FILES = {
|
||||
"scripts/fleet_health_probe.sh": "Automated health probe exists and can supply the uptime baseline for the next phase.",
|
||||
"scripts/fleet_milestones.py": "Milestone tracker exists, so survival achievements can be narrated and logged.",
|
||||
"scripts/auto_restart_agent.sh": "Auto-restart tooling already exists as phase-2 groundwork.",
|
||||
"scripts/backup_pipeline.sh": "Backup pipeline scaffold exists for post-survival automation work.",
|
||||
"infrastructure/timmy-bridge/reports/generate_report.py": "Bridge reporting exists and can summarize heartbeat-driven uptime.",
|
||||
}
|
||||
|
||||
DEFAULT_SNAPSHOT = {
|
||||
"fleet_operational": True,
|
||||
"resources": {
|
||||
"uptime_percent": 0.0,
|
||||
"days_at_or_above_95_percent": 0,
|
||||
"capacity_utilization_percent": 0.0,
|
||||
},
|
||||
"current_buildings": DEFAULT_BUILDINGS,
|
||||
"manual_clicks": DEFAULT_MANUAL_CLICKS,
|
||||
"notes": [
|
||||
"The fleet is alive, but the human is still the control loop.",
|
||||
"Phase 1 is about naming reality plainly so later automation has a baseline to beat.",
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
def default_snapshot() -> dict[str, Any]:
|
||||
return deepcopy(DEFAULT_SNAPSHOT)
|
||||
|
||||
|
||||
def _deep_merge(base: dict[str, Any], override: dict[str, Any]) -> dict[str, Any]:
|
||||
result = deepcopy(base)
|
||||
for key, value in override.items():
|
||||
if isinstance(value, dict) and isinstance(result.get(key), dict):
|
||||
result[key] = _deep_merge(result[key], value)
|
||||
else:
|
||||
result[key] = value
|
||||
return result
|
||||
|
||||
|
||||
def load_snapshot(snapshot_path: Path | None = None) -> dict[str, Any]:
|
||||
snapshot = default_snapshot()
|
||||
if snapshot_path is None:
|
||||
return snapshot
|
||||
override = json.loads(snapshot_path.read_text(encoding="utf-8"))
|
||||
return _deep_merge(snapshot, override)
|
||||
|
||||
|
||||
def collect_repo_signals(repo_root: Path) -> list[str]:
|
||||
signals: list[str] = []
|
||||
for rel_path, description in REPO_SIGNAL_FILES.items():
|
||||
if (repo_root / rel_path).exists():
|
||||
signals.append(f"`{rel_path}` — {description}")
|
||||
return signals
|
||||
|
||||
|
||||
def compute_phase_status(snapshot: dict[str, Any], repo_root: Path | None = None) -> dict[str, Any]:
|
||||
repo_root = repo_root or Path(__file__).resolve().parents[1]
|
||||
resources = snapshot.get("resources", {})
|
||||
uptime_percent = float(resources.get("uptime_percent", 0.0))
|
||||
uptime_days = int(resources.get("days_at_or_above_95_percent", 0))
|
||||
capacity_percent = float(resources.get("capacity_utilization_percent", 0.0))
|
||||
fleet_operational = bool(snapshot.get("fleet_operational", False))
|
||||
|
||||
missing: list[str] = []
|
||||
if not fleet_operational:
|
||||
missing.append("Fleet operational flag is false.")
|
||||
if uptime_percent < TARGET_UPTIME_PERCENT:
|
||||
missing.append(f"Uptime {uptime_percent:.1f}% / {TARGET_UPTIME_PERCENT:.1f}%")
|
||||
if uptime_days < TARGET_UPTIME_DAYS:
|
||||
missing.append(f"Days at or above 95% uptime: {uptime_days}/{TARGET_UPTIME_DAYS}")
|
||||
if capacity_percent <= TARGET_CAPACITY_PERCENT:
|
||||
missing.append(f"Capacity utilization {capacity_percent:.1f}% / >{TARGET_CAPACITY_PERCENT:.1f}%")
|
||||
|
||||
return {
|
||||
"title": PHASE_NAME,
|
||||
"current_phase": "PHASE-1 Survival",
|
||||
"fleet_operational": fleet_operational,
|
||||
"resources": {
|
||||
"uptime_percent": uptime_percent,
|
||||
"days_at_or_above_95_percent": uptime_days,
|
||||
"capacity_utilization_percent": capacity_percent,
|
||||
},
|
||||
"current_buildings": list(snapshot.get("current_buildings", DEFAULT_BUILDINGS)),
|
||||
"manual_clicks": list(snapshot.get("manual_clicks", DEFAULT_MANUAL_CLICKS)),
|
||||
"notes": list(snapshot.get("notes", [])),
|
||||
"repo_signals": collect_repo_signals(repo_root),
|
||||
"next_phase": NEXT_PHASE_NAME,
|
||||
"next_phase_ready": fleet_operational and not missing,
|
||||
"missing_requirements": missing,
|
||||
}
|
||||
|
||||
|
||||
def render_markdown(status: dict[str, Any]) -> str:
|
||||
resources = status["resources"]
|
||||
missing = status["missing_requirements"]
|
||||
ready_line = "READY" if status["next_phase_ready"] else "NOT READY"
|
||||
|
||||
lines = [
|
||||
f"# {status['title']}",
|
||||
"",
|
||||
"Phase 1 is the manual-clicker stage of the fleet. The machines exist. The services exist. The human is still the automation loop.",
|
||||
"",
|
||||
"## Phase Definition",
|
||||
"",
|
||||
"- Current state: fleet exists, agents run, everything important still depends on human vigilance.",
|
||||
"- Resources tracked here: Capacity, Uptime.",
|
||||
f"- Next phase: {status['next_phase']}",
|
||||
"",
|
||||
"## Current Buildings",
|
||||
"",
|
||||
]
|
||||
lines.extend(f"- {item}" for item in status["current_buildings"])
|
||||
|
||||
lines.extend([
|
||||
"",
|
||||
"## Current Resource Snapshot",
|
||||
"",
|
||||
f"- Fleet operational: {'yes' if status['fleet_operational'] else 'no'}",
|
||||
f"- Uptime baseline: {resources['uptime_percent']:.1f}%",
|
||||
f"- Days at or above 95% uptime: {resources['days_at_or_above_95_percent']}",
|
||||
f"- Capacity utilization: {resources['capacity_utilization_percent']:.1f}%",
|
||||
"",
|
||||
"## Next Phase Trigger",
|
||||
"",
|
||||
f"To unlock {status['next_phase']}, the fleet must hold both of these conditions at once:",
|
||||
f"- Uptime >= {TARGET_UPTIME_PERCENT:.0f}% for {TARGET_UPTIME_DAYS} consecutive days",
|
||||
f"- Capacity utilization > {TARGET_CAPACITY_PERCENT:.0f}%",
|
||||
f"- Current trigger state: {ready_line}",
|
||||
"",
|
||||
"## Missing Requirements",
|
||||
"",
|
||||
])
|
||||
if missing:
|
||||
lines.extend(f"- {item}" for item in missing)
|
||||
else:
|
||||
lines.append("- None. Phase 2 can unlock now.")
|
||||
|
||||
lines.extend([
|
||||
"",
|
||||
"## Manual Clicker Interpretation",
|
||||
"",
|
||||
"Paperclips analogy: Phase 1 = Manual clicker. You ARE the automation.",
|
||||
"Every restart, every SSH, every check is a manual click.",
|
||||
"",
|
||||
"## Manual Clicks Still Required",
|
||||
"",
|
||||
])
|
||||
lines.extend(f"- {item}" for item in status["manual_clicks"])
|
||||
|
||||
lines.extend([
|
||||
"",
|
||||
"## Repo Signals Already Present",
|
||||
"",
|
||||
])
|
||||
if status["repo_signals"]:
|
||||
lines.extend(f"- {item}" for item in status["repo_signals"])
|
||||
else:
|
||||
lines.append("- No survival-adjacent repo signals detected.")
|
||||
|
||||
if status["notes"]:
|
||||
lines.extend(["", "## Notes", ""])
|
||||
lines.extend(f"- {item}" for item in status["notes"])
|
||||
|
||||
return "\n".join(lines).rstrip() + "\n"
|
||||
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser(description="Render the fleet phase-1 survival report")
|
||||
parser.add_argument("--snapshot", help="Optional JSON snapshot overriding the default phase-1 baseline")
|
||||
parser.add_argument("--output", help="Write markdown report to this path")
|
||||
parser.add_argument("--json", action="store_true", help="Print computed status as JSON instead of markdown")
|
||||
args = parser.parse_args()
|
||||
|
||||
snapshot = load_snapshot(Path(args.snapshot).expanduser() if args.snapshot else None)
|
||||
repo_root = Path(__file__).resolve().parents[1]
|
||||
status = compute_phase_status(snapshot, repo_root=repo_root)
|
||||
|
||||
if args.json:
|
||||
rendered = json.dumps(status, indent=2)
|
||||
else:
|
||||
rendered = render_markdown(status)
|
||||
|
||||
if args.output:
|
||||
output_path = Path(args.output).expanduser()
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
output_path.write_text(rendered, encoding="utf-8")
|
||||
print(f"Phase status written to {output_path}")
|
||||
else:
|
||||
print(rendered)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
111
tests/test_bezalel_gemma4_vps.py
Normal file
111
tests/test_bezalel_gemma4_vps.py
Normal file
@@ -0,0 +1,111 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from unittest.mock import patch
|
||||
|
||||
import yaml
|
||||
|
||||
from scripts.bezalel_gemma4_vps import (
|
||||
build_deploy_mutation,
|
||||
build_runpod_endpoint,
|
||||
parse_deploy_response,
|
||||
update_config_text,
|
||||
verify_openai_chat,
|
||||
)
|
||||
|
||||
|
||||
class _FakeResponse:
|
||||
def __init__(self, payload: dict):
|
||||
self._payload = json.dumps(payload).encode()
|
||||
|
||||
def read(self) -> bytes:
|
||||
return self._payload
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc, tb):
|
||||
return False
|
||||
|
||||
|
||||
def test_build_deploy_mutation_uses_ollama_image_and_openai_port() -> None:
|
||||
query = build_deploy_mutation(name="bezalel-gemma4", gpu_type="NVIDIA L40S", model_tag="gemma4:latest")
|
||||
|
||||
assert 'gpuTypeId: "NVIDIA L40S"' in query
|
||||
assert 'imageName: "ollama/ollama:latest"' in query
|
||||
assert 'ports: "11434/http"' in query
|
||||
assert 'volumeMountPath: "/root/.ollama"' in query
|
||||
|
||||
|
||||
def test_build_runpod_endpoint_appends_v1_suffix() -> None:
|
||||
assert build_runpod_endpoint("abc123") == "https://abc123-11434.proxy.runpod.net/v1"
|
||||
|
||||
|
||||
def test_parse_deploy_response_extracts_pod_id_and_endpoint() -> None:
|
||||
payload = {
|
||||
"data": {
|
||||
"podFindAndDeployOnDemand": {
|
||||
"id": "podxyz",
|
||||
"desiredStatus": "RUNNING",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result = parse_deploy_response(payload)
|
||||
|
||||
assert result == {
|
||||
"pod_id": "podxyz",
|
||||
"desired_status": "RUNNING",
|
||||
"base_url": "https://podxyz-11434.proxy.runpod.net/v1",
|
||||
}
|
||||
|
||||
|
||||
def test_update_config_text_upserts_big_brain_provider() -> None:
|
||||
original = """
|
||||
model:
|
||||
default: kimi-k2.5
|
||||
provider: kimi-coding
|
||||
custom_providers:
|
||||
- name: Big Brain
|
||||
base_url: https://old-endpoint/v1
|
||||
api_key: ''
|
||||
model: gemma3:27b
|
||||
"""
|
||||
|
||||
updated = update_config_text(original, base_url="https://new-pod-11434.proxy.runpod.net/v1", model="gemma4:latest")
|
||||
parsed = yaml.safe_load(updated)
|
||||
|
||||
assert parsed["model"] == {"default": "kimi-k2.5", "provider": "kimi-coding"}
|
||||
assert parsed["custom_providers"] == [
|
||||
{
|
||||
"name": "Big Brain",
|
||||
"base_url": "https://new-pod-11434.proxy.runpod.net/v1",
|
||||
"api_key": "",
|
||||
"model": "gemma4:latest",
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
def test_verify_openai_chat_calls_chat_completions() -> None:
|
||||
response_payload = {
|
||||
"choices": [
|
||||
{
|
||||
"message": {
|
||||
"content": "READY"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
with patch(
|
||||
"scripts.bezalel_gemma4_vps.request.urlopen",
|
||||
return_value=_FakeResponse(response_payload),
|
||||
) as mocked:
|
||||
result = verify_openai_chat("https://pod-11434.proxy.runpod.net/v1", model="gemma4:latest", prompt="say READY")
|
||||
|
||||
assert result == "READY"
|
||||
req = mocked.call_args.args[0]
|
||||
assert req.full_url == "https://pod-11434.proxy.runpod.net/v1/chat/completions"
|
||||
payload = json.loads(req.data.decode())
|
||||
assert payload["model"] == "gemma4:latest"
|
||||
assert payload["messages"][0]["content"] == "say READY"
|
||||
@@ -1,67 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib.util
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
ROOT = Path(__file__).resolve().parents[1]
|
||||
SCRIPT_PATH = ROOT / "scripts" / "fleet_phase_status.py"
|
||||
DOC_PATH = ROOT / "docs" / "FLEET_PHASE_1_SURVIVAL.md"
|
||||
|
||||
|
||||
def _load_module(path: Path, name: str):
|
||||
assert path.exists(), f"missing {path.relative_to(ROOT)}"
|
||||
spec = importlib.util.spec_from_file_location(name, path)
|
||||
assert spec and spec.loader
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(module)
|
||||
return module
|
||||
|
||||
|
||||
def test_compute_phase_status_tracks_survival_gate_requirements() -> None:
|
||||
mod = _load_module(SCRIPT_PATH, "fleet_phase_status")
|
||||
|
||||
status = mod.compute_phase_status(
|
||||
{
|
||||
"fleet_operational": True,
|
||||
"resources": {
|
||||
"uptime_percent": 94.5,
|
||||
"days_at_or_above_95_percent": 12,
|
||||
"capacity_utilization_percent": 45.0,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
assert status["current_phase"] == "PHASE-1 Survival"
|
||||
assert status["next_phase_ready"] is False
|
||||
assert any("94.5% / 95.0%" in item for item in status["missing_requirements"])
|
||||
assert any("12/30" in item for item in status["missing_requirements"])
|
||||
assert any("45.0% / >60.0%" in item for item in status["missing_requirements"])
|
||||
|
||||
|
||||
def test_render_markdown_preserves_phase_buildings_and_manual_clicker_language() -> None:
|
||||
mod = _load_module(SCRIPT_PATH, "fleet_phase_status")
|
||||
status = mod.compute_phase_status(mod.default_snapshot())
|
||||
report = mod.render_markdown(status)
|
||||
|
||||
for snippet in (
|
||||
"# [PHASE-1] Survival - Keep the Lights On",
|
||||
"VPS hosts: Ezra, Allegro, Bezalel",
|
||||
"Timmy harness",
|
||||
"Gitea forge",
|
||||
"Evennia worlds",
|
||||
"Every restart, every SSH, every check is a manual click.",
|
||||
):
|
||||
assert snippet in report
|
||||
|
||||
|
||||
def test_repo_contains_generated_phase_1_doc() -> None:
|
||||
assert DOC_PATH.exists(), "missing committed phase-1 survival doc"
|
||||
text = DOC_PATH.read_text(encoding="utf-8")
|
||||
for snippet in (
|
||||
"# [PHASE-1] Survival - Keep the Lights On",
|
||||
"## Current Buildings",
|
||||
"## Next Phase Trigger",
|
||||
"## Manual Clicker Interpretation",
|
||||
):
|
||||
assert snippet in text
|
||||
Reference in New Issue
Block a user