#!/usr/bin/env python3 """ Big Brain provider management and verification. Uses the repo's Big Brain provider config rather than a stale hardcoded pod id. Supports both OpenAI-compatible and raw Ollama backends. """ from __future__ import annotations import json import sys from datetime import datetime import requests from scripts.big_brain_provider import ( build_generate_payload, resolve_big_brain_provider, resolve_generate_url, resolve_models_url, ) class ProviderVerifier: def __init__(self, provider: dict | None = None, timeout: int = 10, max_response_time: int = 30): self.provider = provider or resolve_big_brain_provider() self.timeout = timeout self.max_response_time = max_response_time self.results: dict[str, object] = {} def _headers(self) -> dict[str, str]: headers = {"Content-Type": "application/json"} api_key = self.provider.get("api_key", "") if api_key: headers["Authorization"] = f"Bearer {api_key}" return headers def check_models(self): url = resolve_models_url(self.provider) print(f"[{datetime.now().isoformat()}] Checking models endpoint: {url}") try: response = requests.get(url, headers=self._headers(), timeout=self.timeout) models = [] if response.status_code == 200: data = response.json() if self.provider["backend"] == "openai": models = [m.get("id", "") for m in data.get("data", [])] else: models = [m.get("name", "") for m in data.get("models", [])] print(f" ✓ Models endpoint OK ({response.status_code})") else: print(f" ✗ Models endpoint failed ({response.status_code})") return response.status_code == 200, models, response.status_code except Exception as e: print(f" ✗ Models endpoint error: {e}") return False, [], None def test_generation(self, prompt: str = "Say READY"): url = resolve_generate_url(self.provider) payload = build_generate_payload(self.provider, prompt=prompt) print(f"[{datetime.now().isoformat()}] Testing generation endpoint: {url}") try: response = requests.post(url, headers=self._headers(), json=payload, timeout=self.max_response_time) text = "" if response.status_code == 200: data = response.json() if self.provider["backend"] == "openai": text = data.get("choices", [{}])[0].get("message", {}).get("content", "").strip() else: text = data.get("response", "").strip() print(f" ✓ Generation OK ({response.status_code})") else: print(f" ✗ Generation failed ({response.status_code})") return response.status_code == 200, text, response.status_code except Exception as e: print(f" ✗ Generation error: {e}") return False, "", None def run_verification(self): print("=" * 60) print("Big Brain Provider Verification Suite") print("=" * 60) print(f"Provider: {self.provider['name']}") print(f"Backend: {self.provider['backend']}") print(f"Base URL: {self.provider['base_url']}") print(f"Model: {self.provider['model']}") print("=" * 60) print() models_ok, models, models_status = self.check_models() print() gen_ok, gen_response, gen_status = self.test_generation() print() overall_ok = models_ok and gen_ok self.results = { "timestamp": datetime.now().isoformat(), "provider": self.provider, "models_ok": models_ok, "models_status": models_status, "models": models, "generation_ok": gen_ok, "generation_status": gen_status, "generation_response": gen_response[:200], "overall_ok": overall_ok, } with open("pod_verification_results.json", "w") as f: json.dump(self.results, f, indent=2) print("=" * 60) print(f"Overall Status: {'✓ PROVIDER LIVE' if overall_ok else '✗ PROVIDER ISSUES'}") print("Results saved to pod_verification_results.json") return overall_ok def main(): verifier = ProviderVerifier() success = verifier.run_verification() sys.exit(0 if success else 1) if __name__ == "__main__": main()