Files
timmy-home/scripts/big_brain_manager.py
Alexander Whitestone 3efee347bd
Some checks failed
Smoke Test / smoke (push) Has been cancelled
Fix #573: Add Big Brain pod verification scripts (#619)
Merge PR #619
2026-04-14 22:14:31 +00:00

215 lines
7.7 KiB
Python
Executable File

#!/usr/bin/env python3
"""
Big Brain Pod Management and Verification
Comprehensive script for managing and verifying Big Brain pod.
"""
import requests
import time
import json
import os
import sys
from datetime import datetime
# Configuration
CONFIG = {
"pod_id": "8lfr3j47a5r3gn",
"endpoint": "https://8lfr3j47a5r3gn-11434.proxy.runpod.net",
"cost_per_hour": 0.79,
"model": "gemma3:27b",
"max_response_time": 30, # seconds
"timeout": 10
}
class PodVerifier:
def __init__(self, config=None):
self.config = config or CONFIG
self.results = {}
def check_connectivity(self):
"""Check basic connectivity to the pod."""
print(f"[{datetime.now().isoformat()}] Checking connectivity to {self.config['endpoint']}...")
try:
response = requests.get(self.config['endpoint'], timeout=self.config['timeout'])
print(f" Status: {response.status_code}")
print(f" Headers: {dict(response.headers)}")
return response.status_code
except requests.exceptions.ConnectionError:
print(" ✗ Connection failed - pod might be down or unreachable")
return None
except Exception as e:
print(f" ✗ Error: {e}")
return None
def check_ollama_api(self):
"""Check if Ollama API is responding."""
print(f"[{datetime.now().isoformat()}] Checking Ollama API...")
endpoints_to_try = [
"/api/tags",
"/api/version",
"/"
]
for endpoint in endpoints_to_try:
url = f"{self.config['endpoint']}{endpoint}"
try:
print(f" Trying {url}...")
response = requests.get(url, timeout=self.config['timeout'])
print(f" Status: {response.status_code}")
if response.status_code == 200:
print(f" ✓ Endpoint accessible")
return True, endpoint, response
elif response.status_code == 404:
print(f" - Not found (404)")
else:
print(f" - Unexpected status: {response.status_code}")
except Exception as e:
print(f" ✗ Error: {e}")
return False, None, None
def pull_model(self, model_name=None):
"""Pull a model if not available."""
model = model_name or self.config['model']
print(f"[{datetime.now().isoformat()}] Pulling model {model}...")
try:
payload = {"name": model}
response = requests.post(
f"{self.config['endpoint']}/api/pull",
json=payload,
timeout=60
)
if response.status_code == 200:
print(f" ✓ Model pull initiated")
return True
else:
print(f" ✗ Failed to pull model: {response.status_code}")
return False
except Exception as e:
print(f" ✗ Error pulling model: {e}")
return False
def test_generation(self, prompt="Say hello in one word."):
"""Test generation with the model."""
print(f"[{datetime.now().isoformat()}] Testing generation...")
try:
payload = {
"model": self.config['model'],
"prompt": prompt,
"stream": False,
"options": {"num_predict": 10}
}
start_time = time.time()
response = requests.post(
f"{self.config['endpoint']}/api/generate",
json=payload,
timeout=self.config['max_response_time']
)
elapsed = time.time() - start_time
if response.status_code == 200:
data = response.json()
response_text = data.get("response", "").strip()
print(f" ✓ Generation successful in {elapsed:.2f}s")
print(f" Response: {response_text[:100]}...")
if elapsed <= self.config['max_response_time']:
print(f" ✓ Response time within limit ({self.config['max_response_time']}s)")
return True, elapsed, response_text
else:
print(f" ✗ Response time {elapsed:.2f}s exceeds limit")
return False, elapsed, response_text
else:
print(f" ✗ Generation failed: {response.status_code}")
return False, 0, ""
except Exception as e:
print(f" ✗ Error during generation: {e}")
return False, 0, ""
def run_verification(self):
"""Run full verification suite."""
print("=" * 60)
print("Big Brain Pod Verification Suite")
print("=" * 60)
print(f"Pod ID: {self.config['pod_id']}")
print(f"Endpoint: {self.config['endpoint']}")
print(f"Model: {self.config['model']}")
print(f"Cost: ${self.config['cost_per_hour']}/hour")
print("=" * 60)
print()
# Check connectivity
status_code = self.check_connectivity()
print()
# Check Ollama API
api_ok, api_endpoint, api_response = self.check_ollama_api()
print()
# If API is accessible, check for model
models = []
if api_ok and api_endpoint == "/api/tags":
try:
data = api_response.json()
models = [m.get("name", "") for m in data.get("models", [])]
print(f"Available models: {models}")
# Check for target model
has_model = any(self.config['model'] in m.lower() for m in models)
if not has_model:
print(f"Model {self.config['model']} not found. Attempting to pull...")
self.pull_model()
else:
print(f"✓ Model {self.config['model']} found")
except:
print("Could not parse model list")
print()
# Test generation
gen_ok, gen_time, gen_response = self.test_generation()
print()
# Summary
print("=" * 60)
print("VERIFICATION SUMMARY")
print("=" * 60)
print(f"Connectivity: {'' if status_code else ''}")
print(f"Ollama API: {'' if api_ok else ''}")
print(f"Generation: {'' if gen_ok else ''}")
print(f"Response time: {gen_time:.2f}s (limit: {self.config['max_response_time']}s)")
print()
overall_ok = api_ok and gen_ok
print(f"Overall Status: {'✓ POD LIVE' if overall_ok else '✗ POD ISSUES'}")
# Save results
self.results = {
"timestamp": datetime.now().isoformat(),
"pod_id": self.config['pod_id'],
"endpoint": self.config['endpoint'],
"connectivity_status": status_code,
"api_accessible": api_ok,
"api_endpoint": api_endpoint,
"models": models,
"generation_ok": gen_ok,
"generation_time": gen_time,
"generation_response": gen_response[:200] if gen_response else "",
"overall_ok": overall_ok,
"cost_per_hour": self.config['cost_per_hour']
}
with open("pod_verification_results.json", "w") as f:
json.dump(self.results, f, indent=2)
print("Results saved to pod_verification_results.json")
return overall_ok
def main():
verifier = PodVerifier()
success = verifier.run_verification()
sys.exit(0 if success else 1)
if __name__ == "__main__":
main()