Compare commits
1 Commits
fix/680-py
...
issue-514-
| Author | SHA1 | Date | |
|---|---|---|---|
| 0f680d70b8 |
443
bin/model-fallback-verify.py
Normal file
443
bin/model-fallback-verify.py
Normal file
@@ -0,0 +1,443 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Model Fallback Verification Script
|
||||
Issue #514: [Robustness] Model fallback verification — test before trusting
|
||||
|
||||
Tests model switches with verification prompts, validates context windows,
|
||||
and ensures at least one viable model is available before starting loops.
|
||||
|
||||
Usage:
|
||||
python3 model-fallback-verify.py # Run full verification
|
||||
python3 model-fallback-verify.py check <model> # Test specific model
|
||||
python3 model-fallback-verify.py context <model> # Check context window
|
||||
python3 model-fallback-verify.py list # List available models
|
||||
"""
|
||||
|
||||
import os, sys, json, yaml, urllib.request
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
|
||||
# Configuration
|
||||
HERMES_HOME = Path(os.environ.get("HERMES_HOME", Path.home() / ".hermes"))
|
||||
CONFIG_FILE = HERMES_HOME / "config.yaml"
|
||||
LOG_DIR = HERMES_HOME / "logs"
|
||||
LOG_FILE = LOG_DIR / "model-verify.log"
|
||||
MIN_CONTEXT_WINDOW = 64 * 1024 # 64K tokens minimum
|
||||
|
||||
# Provider endpoints
|
||||
PROVIDER_CONFIGS = {
|
||||
"openrouter": {
|
||||
"base_url": "https://openrouter.ai/api/v1",
|
||||
"headers": lambda api_key: {"Authorization": "Bearer " + api_key},
|
||||
"chat_url": "/chat/completions",
|
||||
},
|
||||
"anthropic": {
|
||||
"base_url": "https://api.anthropic.com/v1",
|
||||
"headers": lambda api_key: {"x-api-key": api_key, "anthropic-version": "2023-06-01"},
|
||||
"chat_url": "/messages",
|
||||
},
|
||||
"nous": {
|
||||
"base_url": "https://inference.nousresearch.com/v1",
|
||||
"headers": lambda api_key: {"Authorization": "Bearer " + api_key},
|
||||
"chat_url": "/chat/completions",
|
||||
},
|
||||
"kimi-coding": {
|
||||
"base_url": "https://api.kimi.com/coding/v1",
|
||||
"headers": lambda api_key: {"x-api-key": api_key, "x-api-provider": "kimi-coding"},
|
||||
"chat_url": "/chat/completions",
|
||||
},
|
||||
"custom": {
|
||||
"base_url": None,
|
||||
"headers": lambda api_key: {"Authorization": "Bearer " + api_key},
|
||||
"chat_url": "/chat/completions",
|
||||
},
|
||||
}
|
||||
|
||||
# Known context windows for common models
|
||||
KNOWN_CONTEXT_WINDOWS = {
|
||||
"claude-opus-4-6": 200000,
|
||||
"claude-sonnet-4": 200000,
|
||||
"claude-3.5-sonnet": 200000,
|
||||
"gpt-4o": 128000,
|
||||
"gpt-4": 128000,
|
||||
"gpt-3.5-turbo": 16385,
|
||||
"qwen3:30b": 32768,
|
||||
"qwen2.5:7b": 32768,
|
||||
"hermes4:14b": 32768,
|
||||
"gemma3:1b": 8192,
|
||||
"gemma4": 32768,
|
||||
"phi3:3.8b": 128000,
|
||||
"kimi-k2.5": 128000,
|
||||
"google/gemini-2.5-pro": 1048576,
|
||||
"xiaomi/mimo-v2-pro": 131072,
|
||||
"deepseek/deepseek-r1": 131072,
|
||||
"deepseek/deepseek-chat-v3-0324": 131072,
|
||||
}
|
||||
|
||||
def log(msg):
|
||||
"""Log message to file and optionally to console."""
|
||||
timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S")
|
||||
log_entry = "[" + timestamp + "] " + msg
|
||||
|
||||
LOG_DIR.mkdir(parents=True, exist_ok=True)
|
||||
with open(LOG_FILE, "a") as f:
|
||||
f.write(log_entry + "\n")
|
||||
|
||||
if "--quiet" not in sys.argv:
|
||||
print(log_entry)
|
||||
|
||||
def load_config():
|
||||
"""Load Hermes config.yaml."""
|
||||
if not CONFIG_FILE.exists():
|
||||
return None
|
||||
|
||||
with open(CONFIG_FILE) as f:
|
||||
return yaml.safe_load(f)
|
||||
|
||||
def get_provider_api_key(provider):
|
||||
"""Get API key for a provider from .env or environment."""
|
||||
env_file = HERMES_HOME / ".env"
|
||||
if env_file.exists():
|
||||
with open(env_file) as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if line.startswith(provider.upper() + "_API_KEY="):
|
||||
return line.split("=", 1)[1].strip().strip("'\"")
|
||||
|
||||
return os.environ.get(provider.upper() + "_API_KEY")
|
||||
|
||||
def get_ollama_models():
|
||||
"""Get list of available Ollama models."""
|
||||
ollama_host = os.environ.get("OLLAMA_HOST", "localhost:11434")
|
||||
try:
|
||||
resp = urllib.request.urlopen("http://" + ollama_host + "/api/tags", timeout=5)
|
||||
data = json.loads(resp.read())
|
||||
return [m["name"] for m in data.get("models", [])]
|
||||
except:
|
||||
return []
|
||||
|
||||
def test_model(model, provider, api_key=None, base_url=None):
|
||||
"""
|
||||
Test a model with a verification prompt.
|
||||
Returns (success, response, error_message)
|
||||
"""
|
||||
if provider == "ollama" or ":" in model:
|
||||
# Local Ollama model
|
||||
ollama_host = os.environ.get("OLLAMA_HOST", "localhost:11434")
|
||||
try:
|
||||
body = json.dumps({
|
||||
"model": model,
|
||||
"prompt": "Say exactly VERIFIED and nothing else.",
|
||||
"stream": False,
|
||||
"options": {"num_predict": 10}
|
||||
}).encode()
|
||||
req = urllib.request.Request(
|
||||
"http://" + ollama_host + "/api/generate",
|
||||
data=body,
|
||||
headers={"Content-Type": "application/json"}
|
||||
)
|
||||
resp = urllib.request.urlopen(req, timeout=30)
|
||||
result = json.loads(resp.read())
|
||||
response_text = result.get("response", "").strip()
|
||||
if "VERIFIED" in response_text.upper():
|
||||
return True, response_text, None
|
||||
return False, response_text, "Unexpected response: " + response_text[:100]
|
||||
except Exception as e:
|
||||
return False, "", "Ollama error: " + str(e)[:200]
|
||||
|
||||
# Cloud provider
|
||||
config = PROVIDER_CONFIGS.get(provider)
|
||||
if not config:
|
||||
return False, "", "Unknown provider: " + provider
|
||||
|
||||
url = base_url or config["base_url"]
|
||||
if not url:
|
||||
return False, "", "No base URL for provider: " + provider
|
||||
|
||||
headers = config["headers"](api_key or "")
|
||||
headers["Content-Type"] = "application/json"
|
||||
|
||||
try:
|
||||
body = json.dumps({
|
||||
"model": model,
|
||||
"max_tokens": 20,
|
||||
"messages": [{"role": "user", "content": "Say exactly VERIFIED and nothing else."}]
|
||||
}).encode()
|
||||
|
||||
req = urllib.request.Request(
|
||||
url + config["chat_url"],
|
||||
data=body,
|
||||
headers=headers
|
||||
)
|
||||
resp = urllib.request.urlopen(req, timeout=30)
|
||||
result = json.loads(resp.read())
|
||||
|
||||
if provider == "anthropic":
|
||||
content = result.get("content", [{}])[0].get("text", "")
|
||||
else:
|
||||
choices = result.get("choices", [{}])
|
||||
content = choices[0].get("message", {}).get("content", "") if choices else ""
|
||||
|
||||
if "VERIFIED" in content.upper():
|
||||
return True, content, None
|
||||
return False, content, "Unexpected response: " + content[:100]
|
||||
|
||||
except urllib.error.HTTPError as e:
|
||||
error_body = e.read().decode() if e.fp else str(e)
|
||||
if e.code == 404:
|
||||
return False, "", "Model not found (404): " + error_body[:200]
|
||||
elif e.code == 429:
|
||||
return True, "", "Rate limited but model exists"
|
||||
elif e.code >= 500:
|
||||
return False, "", "Server error (" + str(e.code) + "): " + error_body[:200]
|
||||
else:
|
||||
return False, "", "HTTP " + str(e.code) + ": " + error_body[:200]
|
||||
except Exception as e:
|
||||
return False, "", "Request error: " + str(e)[:200]
|
||||
|
||||
def get_context_window(model, provider):
|
||||
"""
|
||||
Get the context window size for a model.
|
||||
Returns (window_size, source)
|
||||
"""
|
||||
if model in KNOWN_CONTEXT_WINDOWS:
|
||||
return KNOWN_CONTEXT_WINDOWS[model], "known"
|
||||
|
||||
model_lower = model.lower()
|
||||
if "claude" in model_lower:
|
||||
return 200000, "inferred (claude)"
|
||||
elif "gpt-4" in model_lower:
|
||||
return 128000, "inferred (gpt-4)"
|
||||
elif "gemini" in model_lower:
|
||||
return 1048576, "inferred (gemini)"
|
||||
elif "qwen" in model_lower:
|
||||
return 32768, "inferred (qwen)"
|
||||
elif "gemma" in model_lower:
|
||||
return 8192, "inferred (gemma)"
|
||||
elif "phi" in model_lower:
|
||||
return 128000, "inferred (phi)"
|
||||
|
||||
return 32768, "default"
|
||||
|
||||
def verify_model(model, provider, api_key=None, base_url=None):
|
||||
"""
|
||||
Full verification of a model: test prompt + context window.
|
||||
Returns dict with verification results.
|
||||
"""
|
||||
result = {
|
||||
"model": model,
|
||||
"provider": provider,
|
||||
"tested": False,
|
||||
"responded": False,
|
||||
"response": "",
|
||||
"error": None,
|
||||
"context_window": 0,
|
||||
"context_source": "unknown",
|
||||
"meets_minimum": False,
|
||||
"viable": False,
|
||||
}
|
||||
|
||||
success, response, error = test_model(model, provider, api_key, base_url)
|
||||
result["tested"] = True
|
||||
result["responded"] = success
|
||||
result["response"] = response[:200] if response else ""
|
||||
result["error"] = error
|
||||
|
||||
window, source = get_context_window(model, provider)
|
||||
result["context_window"] = window
|
||||
result["context_source"] = source
|
||||
result["meets_minimum"] = window >= MIN_CONTEXT_WINDOW
|
||||
|
||||
result["viable"] = success and result["meets_minimum"]
|
||||
|
||||
return result
|
||||
|
||||
def get_fallback_chain(config):
|
||||
"""Get the fallback chain from config or defaults."""
|
||||
chain = []
|
||||
|
||||
model_config = config.get("model", {})
|
||||
if isinstance(model_config, dict):
|
||||
primary = model_config.get("default", "")
|
||||
provider = model_config.get("provider", "")
|
||||
if primary and provider:
|
||||
chain.append({"model": primary, "provider": provider, "role": "primary"})
|
||||
elif model_config:
|
||||
chain.append({"model": str(model_config), "provider": "unknown", "role": "primary"})
|
||||
|
||||
auxiliary = config.get("auxiliary", {})
|
||||
for aux_name, aux_config in auxiliary.items():
|
||||
if isinstance(aux_config, dict):
|
||||
aux_model = aux_config.get("model", "")
|
||||
aux_provider = aux_config.get("provider", "")
|
||||
if aux_model and aux_provider and aux_provider != "auto":
|
||||
chain.append({"model": aux_model, "provider": aux_provider, "role": "auxiliary:" + aux_name})
|
||||
|
||||
ollama_models = get_ollama_models()
|
||||
for model in ollama_models[:3]:
|
||||
if not any(c["model"] == model for c in chain):
|
||||
chain.append({"model": model, "provider": "ollama", "role": "local-fallback"})
|
||||
|
||||
return chain
|
||||
|
||||
def run_verification():
|
||||
"""Run full model fallback verification."""
|
||||
log("=== Model Fallback Verification ===")
|
||||
|
||||
config = load_config()
|
||||
if not config:
|
||||
log("ERROR: No config.yaml found")
|
||||
return {"success": False, "error": "No config file"}
|
||||
|
||||
chain = get_fallback_chain(config)
|
||||
if not chain:
|
||||
log("ERROR: No models configured")
|
||||
return {"success": False, "error": "No models in chain"}
|
||||
|
||||
results = []
|
||||
viable_models = []
|
||||
|
||||
for entry in chain:
|
||||
model = entry["model"]
|
||||
provider = entry["provider"]
|
||||
role = entry["role"]
|
||||
|
||||
api_key = get_provider_api_key(provider) if provider != "ollama" else None
|
||||
|
||||
base_url = None
|
||||
if provider == "custom":
|
||||
provider_config = config.get("auxiliary", {}).get("vision", {})
|
||||
base_url = provider_config.get("base_url")
|
||||
|
||||
log("Testing [" + role + "] " + model + " (" + provider + ")...")
|
||||
result = verify_model(model, provider, api_key, base_url)
|
||||
result["role"] = role
|
||||
results.append(result)
|
||||
|
||||
status = "PASS" if result["viable"] else "FAIL"
|
||||
details = []
|
||||
if not result["responded"]:
|
||||
details.append("no response: " + str(result["error"]))
|
||||
if not result["meets_minimum"]:
|
||||
details.append("context " + str(result["context_window"]) + " < " + str(MIN_CONTEXT_WINDOW))
|
||||
|
||||
log(" [" + status + "] " + model + " - " + (", ".join(details) if details else "verified"))
|
||||
|
||||
if result["viable"]:
|
||||
viable_models.append(result)
|
||||
|
||||
log("=== Results: " + str(len(viable_models)) + "/" + str(len(results)) + " models viable ===")
|
||||
|
||||
if not viable_models:
|
||||
log("CRITICAL: No viable models found!")
|
||||
for r in results:
|
||||
log(" - " + r["model"] + " (" + r["provider"] + "): responded=" + str(r["responded"]) + ", context=" + str(r["context_window"]))
|
||||
return {"success": False, "results": results, "viable": []}
|
||||
|
||||
log("Viable models (in priority order):")
|
||||
for i, r in enumerate(viable_models, 1):
|
||||
log(" " + str(i) + ". " + r["model"] + " (" + r["provider"] + ") - context: " + str(r["context_window"]) + " tokens [" + r["role"] + "]")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"results": results,
|
||||
"viable": viable_models,
|
||||
"primary": viable_models[0] if viable_models else None,
|
||||
}
|
||||
|
||||
def check_single_model(model):
|
||||
"""Check a specific model."""
|
||||
if ":" in model:
|
||||
provider = "ollama"
|
||||
elif "/" in model:
|
||||
provider = "openrouter"
|
||||
else:
|
||||
provider = "unknown"
|
||||
|
||||
config = load_config() or {}
|
||||
api_key = get_provider_api_key(provider) if provider != "ollama" else None
|
||||
|
||||
result = verify_model(model, provider, api_key)
|
||||
|
||||
if result["viable"]:
|
||||
print("PASS: " + model)
|
||||
print(" Context window: " + str(result["context_window"]) + " tokens")
|
||||
print(" Response: " + result["response"][:100])
|
||||
else:
|
||||
print("FAIL: " + model)
|
||||
if result["error"]:
|
||||
print(" Error: " + str(result["error"]))
|
||||
if not result["meets_minimum"]:
|
||||
print(" Context window: " + str(result["context_window"]) + " < " + str(MIN_CONTEXT_WINDOW) + " minimum")
|
||||
|
||||
return result["viable"]
|
||||
|
||||
def check_context_window(model):
|
||||
"""Check context window for a model."""
|
||||
if ":" in model:
|
||||
provider = "ollama"
|
||||
elif "/" in model:
|
||||
provider = "openrouter"
|
||||
else:
|
||||
provider = "unknown"
|
||||
|
||||
window, source = get_context_window(model, provider)
|
||||
meets = window >= MIN_CONTEXT_WINDOW
|
||||
|
||||
print("Model: " + model)
|
||||
print("Provider: " + provider)
|
||||
print("Context window: " + str(window) + " tokens (" + source + ")")
|
||||
print("Minimum (" + str(MIN_CONTEXT_WINDOW) + "): " + ("PASS" if meets else "FAIL"))
|
||||
|
||||
return meets
|
||||
|
||||
def list_models():
|
||||
"""List all available models."""
|
||||
config = load_config() or {}
|
||||
chain = get_fallback_chain(config)
|
||||
|
||||
print("Configured models:")
|
||||
for entry in chain:
|
||||
print(" " + entry["model"].ljust(30) + " " + entry["provider"].ljust(15) + " [" + entry["role"] + "]")
|
||||
|
||||
ollama = get_ollama_models()
|
||||
if ollama:
|
||||
print("")
|
||||
print("Ollama models:")
|
||||
for m in ollama:
|
||||
print(" " + m)
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 2:
|
||||
result = run_verification()
|
||||
sys.exit(0 if result["success"] else 1)
|
||||
|
||||
cmd = sys.argv[1]
|
||||
|
||||
if cmd == "check" and len(sys.argv) > 2:
|
||||
model = sys.argv[2]
|
||||
success = check_single_model(model)
|
||||
sys.exit(0 if success else 1)
|
||||
|
||||
elif cmd == "context" and len(sys.argv) > 2:
|
||||
model = sys.argv[2]
|
||||
meets = check_context_window(model)
|
||||
sys.exit(0 if meets else 1)
|
||||
|
||||
elif cmd == "list":
|
||||
list_models()
|
||||
|
||||
elif cmd == "test":
|
||||
result = run_verification()
|
||||
sys.exit(0 if result["success"] else 1)
|
||||
|
||||
else:
|
||||
print("Usage:")
|
||||
print(" model-fallback-verify.py Run full verification")
|
||||
print(" model-fallback-verify.py check <model> Test specific model")
|
||||
print(" model-fallback-verify.py context <model> Check context window")
|
||||
print(" model-fallback-verify.py list List available models")
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user