Compare commits

..

6 Commits

Author SHA1 Message Date
0f680d70b8 feat: add model fallback verification script (#514)
Some checks failed
Architecture Lint / Linter Tests (pull_request) Successful in 44s
Smoke Test / smoke (pull_request) Failing after 28s
Validate Config / YAML Lint (pull_request) Failing after 20s
Validate Config / JSON Validate (pull_request) Successful in 27s
Validate Config / Python Syntax & Import Check (pull_request) Failing after 1m41s
Validate Config / Shell Script Lint (pull_request) Failing after 44s
Validate Config / Cron Syntax Check (pull_request) Successful in 4s
Validate Config / Deploy Script Dry Run (pull_request) Successful in 6s
Validate Config / Playbook Schema Validation (pull_request) Successful in 13s
PR Checklist / pr-checklist (pull_request) Failing after 12m11s
Architecture Lint / Lint Repository (pull_request) Has been cancelled
Validate Config / Python Test Suite (pull_request) Has been cancelled
- Tests model switches with verification prompts
- Validates context window meets 64K minimum
- Checks primary model and fallback chain
- Supports OpenRouter, Anthropic, Nous, Kimi, Ollama
- Exits with error if no viable model found

Closes #514
2026-04-15 03:20:49 +00:00
d120526244 fix: add python3 shebang to scripts/visual_pr_reviewer.py (#681) 2026-04-15 02:57:53 +00:00
8596ff761b fix: add python3 shebang to scripts/diagram_meaning_extractor.py (#681) 2026-04-15 02:57:40 +00:00
7553fd4f3e fix: add python3 shebang to scripts/captcha_bypass_handler.py (#681) 2026-04-15 02:57:25 +00:00
71082fe06f fix: add python3 shebang to bin/soul_eval_gate.py (#681) 2026-04-15 02:57:14 +00:00
6d678e938e fix: add python3 shebang to bin/nostr-agent-demo.py (#681) 2026-04-15 02:57:00 +00:00
8 changed files with 448 additions and 221 deletions

View File

@@ -0,0 +1,443 @@
#!/usr/bin/env python3
"""
Model Fallback Verification Script
Issue #514: [Robustness] Model fallback verification — test before trusting
Tests model switches with verification prompts, validates context windows,
and ensures at least one viable model is available before starting loops.
Usage:
python3 model-fallback-verify.py # Run full verification
python3 model-fallback-verify.py check <model> # Test specific model
python3 model-fallback-verify.py context <model> # Check context window
python3 model-fallback-verify.py list # List available models
"""
import os, sys, json, yaml, urllib.request
from datetime import datetime, timezone
from pathlib import Path
# Configuration
HERMES_HOME = Path(os.environ.get("HERMES_HOME", Path.home() / ".hermes"))
CONFIG_FILE = HERMES_HOME / "config.yaml"
LOG_DIR = HERMES_HOME / "logs"
LOG_FILE = LOG_DIR / "model-verify.log"
MIN_CONTEXT_WINDOW = 64 * 1024 # 64K tokens minimum
# Provider endpoints
PROVIDER_CONFIGS = {
"openrouter": {
"base_url": "https://openrouter.ai/api/v1",
"headers": lambda api_key: {"Authorization": "Bearer " + api_key},
"chat_url": "/chat/completions",
},
"anthropic": {
"base_url": "https://api.anthropic.com/v1",
"headers": lambda api_key: {"x-api-key": api_key, "anthropic-version": "2023-06-01"},
"chat_url": "/messages",
},
"nous": {
"base_url": "https://inference.nousresearch.com/v1",
"headers": lambda api_key: {"Authorization": "Bearer " + api_key},
"chat_url": "/chat/completions",
},
"kimi-coding": {
"base_url": "https://api.kimi.com/coding/v1",
"headers": lambda api_key: {"x-api-key": api_key, "x-api-provider": "kimi-coding"},
"chat_url": "/chat/completions",
},
"custom": {
"base_url": None,
"headers": lambda api_key: {"Authorization": "Bearer " + api_key},
"chat_url": "/chat/completions",
},
}
# Known context windows for common models
KNOWN_CONTEXT_WINDOWS = {
"claude-opus-4-6": 200000,
"claude-sonnet-4": 200000,
"claude-3.5-sonnet": 200000,
"gpt-4o": 128000,
"gpt-4": 128000,
"gpt-3.5-turbo": 16385,
"qwen3:30b": 32768,
"qwen2.5:7b": 32768,
"hermes4:14b": 32768,
"gemma3:1b": 8192,
"gemma4": 32768,
"phi3:3.8b": 128000,
"kimi-k2.5": 128000,
"google/gemini-2.5-pro": 1048576,
"xiaomi/mimo-v2-pro": 131072,
"deepseek/deepseek-r1": 131072,
"deepseek/deepseek-chat-v3-0324": 131072,
}
def log(msg):
"""Log message to file and optionally to console."""
timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S")
log_entry = "[" + timestamp + "] " + msg
LOG_DIR.mkdir(parents=True, exist_ok=True)
with open(LOG_FILE, "a") as f:
f.write(log_entry + "\n")
if "--quiet" not in sys.argv:
print(log_entry)
def load_config():
"""Load Hermes config.yaml."""
if not CONFIG_FILE.exists():
return None
with open(CONFIG_FILE) as f:
return yaml.safe_load(f)
def get_provider_api_key(provider):
"""Get API key for a provider from .env or environment."""
env_file = HERMES_HOME / ".env"
if env_file.exists():
with open(env_file) as f:
for line in f:
line = line.strip()
if line.startswith(provider.upper() + "_API_KEY="):
return line.split("=", 1)[1].strip().strip("'\"")
return os.environ.get(provider.upper() + "_API_KEY")
def get_ollama_models():
"""Get list of available Ollama models."""
ollama_host = os.environ.get("OLLAMA_HOST", "localhost:11434")
try:
resp = urllib.request.urlopen("http://" + ollama_host + "/api/tags", timeout=5)
data = json.loads(resp.read())
return [m["name"] for m in data.get("models", [])]
except:
return []
def test_model(model, provider, api_key=None, base_url=None):
"""
Test a model with a verification prompt.
Returns (success, response, error_message)
"""
if provider == "ollama" or ":" in model:
# Local Ollama model
ollama_host = os.environ.get("OLLAMA_HOST", "localhost:11434")
try:
body = json.dumps({
"model": model,
"prompt": "Say exactly VERIFIED and nothing else.",
"stream": False,
"options": {"num_predict": 10}
}).encode()
req = urllib.request.Request(
"http://" + ollama_host + "/api/generate",
data=body,
headers={"Content-Type": "application/json"}
)
resp = urllib.request.urlopen(req, timeout=30)
result = json.loads(resp.read())
response_text = result.get("response", "").strip()
if "VERIFIED" in response_text.upper():
return True, response_text, None
return False, response_text, "Unexpected response: " + response_text[:100]
except Exception as e:
return False, "", "Ollama error: " + str(e)[:200]
# Cloud provider
config = PROVIDER_CONFIGS.get(provider)
if not config:
return False, "", "Unknown provider: " + provider
url = base_url or config["base_url"]
if not url:
return False, "", "No base URL for provider: " + provider
headers = config["headers"](api_key or "")
headers["Content-Type"] = "application/json"
try:
body = json.dumps({
"model": model,
"max_tokens": 20,
"messages": [{"role": "user", "content": "Say exactly VERIFIED and nothing else."}]
}).encode()
req = urllib.request.Request(
url + config["chat_url"],
data=body,
headers=headers
)
resp = urllib.request.urlopen(req, timeout=30)
result = json.loads(resp.read())
if provider == "anthropic":
content = result.get("content", [{}])[0].get("text", "")
else:
choices = result.get("choices", [{}])
content = choices[0].get("message", {}).get("content", "") if choices else ""
if "VERIFIED" in content.upper():
return True, content, None
return False, content, "Unexpected response: " + content[:100]
except urllib.error.HTTPError as e:
error_body = e.read().decode() if e.fp else str(e)
if e.code == 404:
return False, "", "Model not found (404): " + error_body[:200]
elif e.code == 429:
return True, "", "Rate limited but model exists"
elif e.code >= 500:
return False, "", "Server error (" + str(e.code) + "): " + error_body[:200]
else:
return False, "", "HTTP " + str(e.code) + ": " + error_body[:200]
except Exception as e:
return False, "", "Request error: " + str(e)[:200]
def get_context_window(model, provider):
"""
Get the context window size for a model.
Returns (window_size, source)
"""
if model in KNOWN_CONTEXT_WINDOWS:
return KNOWN_CONTEXT_WINDOWS[model], "known"
model_lower = model.lower()
if "claude" in model_lower:
return 200000, "inferred (claude)"
elif "gpt-4" in model_lower:
return 128000, "inferred (gpt-4)"
elif "gemini" in model_lower:
return 1048576, "inferred (gemini)"
elif "qwen" in model_lower:
return 32768, "inferred (qwen)"
elif "gemma" in model_lower:
return 8192, "inferred (gemma)"
elif "phi" in model_lower:
return 128000, "inferred (phi)"
return 32768, "default"
def verify_model(model, provider, api_key=None, base_url=None):
"""
Full verification of a model: test prompt + context window.
Returns dict with verification results.
"""
result = {
"model": model,
"provider": provider,
"tested": False,
"responded": False,
"response": "",
"error": None,
"context_window": 0,
"context_source": "unknown",
"meets_minimum": False,
"viable": False,
}
success, response, error = test_model(model, provider, api_key, base_url)
result["tested"] = True
result["responded"] = success
result["response"] = response[:200] if response else ""
result["error"] = error
window, source = get_context_window(model, provider)
result["context_window"] = window
result["context_source"] = source
result["meets_minimum"] = window >= MIN_CONTEXT_WINDOW
result["viable"] = success and result["meets_minimum"]
return result
def get_fallback_chain(config):
"""Get the fallback chain from config or defaults."""
chain = []
model_config = config.get("model", {})
if isinstance(model_config, dict):
primary = model_config.get("default", "")
provider = model_config.get("provider", "")
if primary and provider:
chain.append({"model": primary, "provider": provider, "role": "primary"})
elif model_config:
chain.append({"model": str(model_config), "provider": "unknown", "role": "primary"})
auxiliary = config.get("auxiliary", {})
for aux_name, aux_config in auxiliary.items():
if isinstance(aux_config, dict):
aux_model = aux_config.get("model", "")
aux_provider = aux_config.get("provider", "")
if aux_model and aux_provider and aux_provider != "auto":
chain.append({"model": aux_model, "provider": aux_provider, "role": "auxiliary:" + aux_name})
ollama_models = get_ollama_models()
for model in ollama_models[:3]:
if not any(c["model"] == model for c in chain):
chain.append({"model": model, "provider": "ollama", "role": "local-fallback"})
return chain
def run_verification():
"""Run full model fallback verification."""
log("=== Model Fallback Verification ===")
config = load_config()
if not config:
log("ERROR: No config.yaml found")
return {"success": False, "error": "No config file"}
chain = get_fallback_chain(config)
if not chain:
log("ERROR: No models configured")
return {"success": False, "error": "No models in chain"}
results = []
viable_models = []
for entry in chain:
model = entry["model"]
provider = entry["provider"]
role = entry["role"]
api_key = get_provider_api_key(provider) if provider != "ollama" else None
base_url = None
if provider == "custom":
provider_config = config.get("auxiliary", {}).get("vision", {})
base_url = provider_config.get("base_url")
log("Testing [" + role + "] " + model + " (" + provider + ")...")
result = verify_model(model, provider, api_key, base_url)
result["role"] = role
results.append(result)
status = "PASS" if result["viable"] else "FAIL"
details = []
if not result["responded"]:
details.append("no response: " + str(result["error"]))
if not result["meets_minimum"]:
details.append("context " + str(result["context_window"]) + " < " + str(MIN_CONTEXT_WINDOW))
log(" [" + status + "] " + model + " - " + (", ".join(details) if details else "verified"))
if result["viable"]:
viable_models.append(result)
log("=== Results: " + str(len(viable_models)) + "/" + str(len(results)) + " models viable ===")
if not viable_models:
log("CRITICAL: No viable models found!")
for r in results:
log(" - " + r["model"] + " (" + r["provider"] + "): responded=" + str(r["responded"]) + ", context=" + str(r["context_window"]))
return {"success": False, "results": results, "viable": []}
log("Viable models (in priority order):")
for i, r in enumerate(viable_models, 1):
log(" " + str(i) + ". " + r["model"] + " (" + r["provider"] + ") - context: " + str(r["context_window"]) + " tokens [" + r["role"] + "]")
return {
"success": True,
"results": results,
"viable": viable_models,
"primary": viable_models[0] if viable_models else None,
}
def check_single_model(model):
"""Check a specific model."""
if ":" in model:
provider = "ollama"
elif "/" in model:
provider = "openrouter"
else:
provider = "unknown"
config = load_config() or {}
api_key = get_provider_api_key(provider) if provider != "ollama" else None
result = verify_model(model, provider, api_key)
if result["viable"]:
print("PASS: " + model)
print(" Context window: " + str(result["context_window"]) + " tokens")
print(" Response: " + result["response"][:100])
else:
print("FAIL: " + model)
if result["error"]:
print(" Error: " + str(result["error"]))
if not result["meets_minimum"]:
print(" Context window: " + str(result["context_window"]) + " < " + str(MIN_CONTEXT_WINDOW) + " minimum")
return result["viable"]
def check_context_window(model):
"""Check context window for a model."""
if ":" in model:
provider = "ollama"
elif "/" in model:
provider = "openrouter"
else:
provider = "unknown"
window, source = get_context_window(model, provider)
meets = window >= MIN_CONTEXT_WINDOW
print("Model: " + model)
print("Provider: " + provider)
print("Context window: " + str(window) + " tokens (" + source + ")")
print("Minimum (" + str(MIN_CONTEXT_WINDOW) + "): " + ("PASS" if meets else "FAIL"))
return meets
def list_models():
"""List all available models."""
config = load_config() or {}
chain = get_fallback_chain(config)
print("Configured models:")
for entry in chain:
print(" " + entry["model"].ljust(30) + " " + entry["provider"].ljust(15) + " [" + entry["role"] + "]")
ollama = get_ollama_models()
if ollama:
print("")
print("Ollama models:")
for m in ollama:
print(" " + m)
def main():
if len(sys.argv) < 2:
result = run_verification()
sys.exit(0 if result["success"] else 1)
cmd = sys.argv[1]
if cmd == "check" and len(sys.argv) > 2:
model = sys.argv[2]
success = check_single_model(model)
sys.exit(0 if success else 1)
elif cmd == "context" and len(sys.argv) > 2:
model = sys.argv[2]
meets = check_context_window(model)
sys.exit(0 if meets else 1)
elif cmd == "list":
list_models()
elif cmd == "test":
result = run_verification()
sys.exit(0 if result["success"] else 1)
else:
print("Usage:")
print(" model-fallback-verify.py Run full verification")
print(" model-fallback-verify.py check <model> Test specific model")
print(" model-fallback-verify.py context <model> Check context window")
print(" model-fallback-verify.py list List available models")
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -1,3 +1,4 @@
#!/usr/bin/env python3
"""
Full Nostr agent-to-agent communication demo - FINAL WORKING
"""

View File

@@ -1,3 +1,4 @@
#!/usr/bin/env python3
"""
Soul Eval Gate — The Conscience of the Training Pipeline

View File

@@ -1,3 +1,4 @@
#!/usr/bin/env python3
import json
from hermes_tools import browser_navigate, browser_vision

View File

@@ -1,3 +1,4 @@
#!/usr/bin/env python3
import json
from hermes_tools import browser_navigate, browser_vision

View File

@@ -1,176 +0,0 @@
#!/usr/bin/env python3
"""PR Triage Automation -- Categorize, deduplicate, report (#659)."""
import argparse, json, os, re, sys, subprocess
from collections import Counter, defaultdict
from datetime import datetime
from urllib.request import Request, urlopen
from urllib.error import HTTPError
def _token():
t = os.environ.get("GITEA_TOKEN", "")
if not t:
p = os.path.expanduser("~/.config/gitea/token")
if os.path.exists(p):
t = open(p).read().strip()
return t
def _api(url, token, method="GET", data=None):
h = {"Authorization": "token " + token, "Accept": "application/json"}
body = json.dumps(data).encode() if data else None
if data:
h["Content-Type"] = "application/json"
req = Request(url, data=body, headers=h, method=method)
try:
return json.loads(urlopen(req, timeout=30).read())
except HTTPError:
return None
def fetch_prs(base, token, owner, repo):
prs, page = [], 1
while True:
b = _api(base + "/api/v1/repos/" + owner + "/" + repo + "/pulls?state=open&limit=50&page=" + str(page), token)
if not b:
break
prs.extend(b)
if len(b) < 50:
break
page += 1
return prs
def fetch_issues(base, token, owner, repo):
iss, page = {}, 1
while True:
b = _api(base + "/api/v1/repos/" + owner + "/" + repo + "/issues?state=open&limit=50&page=" + str(page), token)
if not b:
break
for i in b:
if "pull_request" not in i:
iss[i["number"]] = i
if len(b) < 50:
break
page += 1
return iss
def categorize(pr):
c = (pr.get("title", "") + " " + pr.get("body", "") + " " + " ".join(l.get("name", "") for l in pr.get("labels", []))).lower()
for kw, cat in [("training data", "training-data"), ("dpo", "training-data"), ("grpo", "training-data"),
("fix:", "bug-fix"), ("bug", "bug-fix"), ("hotfix", "bug-fix"),
("feat:", "feature"), ("feature", "feature"),
("refactor", "maintenance"), ("cleanup", "maintenance"),
("doc", "documentation"), ("test", "testing"), ("infra", "infrastructure")]:
if kw in c:
return cat
return "other"
def refs(pr):
return [int(m) for m in re.findall(r"#(\d+)", pr.get("title", "") + " " + pr.get("body", ""))]
def find_duplicates(prs):
by = defaultdict(list)
for p in prs:
for r in refs(p):
by[r].append(p)
return [g for g in by.values() if len(g) > 1]
def health(pr, issues):
r = refs(pr)
created = datetime.fromisoformat(pr["created_at"].replace("Z", "+00:00"))
updated = datetime.fromisoformat(pr["updated_at"].replace("Z", "+00:00"))
now = datetime.now(created.tzinfo)
return {
"pr": pr["number"], "title": pr["title"], "head": pr["head"]["ref"],
"category": categorize(pr), "refs": r,
"open": [x for x in r if x in issues], "closed": [x for x in r if x not in issues],
"age": (now - created).days, "stale": (now - updated).days,
"mergeable": pr.get("mergeable"), "author": pr.get("user", {}).get("login", ""),
}
def report(repo, checks, dups):
lines = ["# PR Triage -- " + repo,
"Generated: " + datetime.now().strftime("%Y-%m-%d %H:%M"),
"Open PRs: " + str(len(checks)), "", "## Summary", ""]
cats = Counter(h["category"] for h in checks)
lines.append("| Category | Count |")
lines.append("|----------|-------|")
for c, n in cats.most_common():
lines.append("| " + c + " | " + str(n) + " |")
stale = [h for h in checks if h["stale"] > 7]
lines.extend(["", "Stale (>7d): " + str(len(stale)),
"Duplicate groups: " + str(len(dups)), ""])
if dups:
lines.append("## Duplicates")
for g in dups:
rs = set()
for p in g:
rs.update(refs(p))
lines.append("Issues " + ", ".join("#" + str(r) for r in sorted(rs)) + ":")
for p in g:
lines.append(" - #" + str(p["number"]) + ": " + p["title"])
lines.append("")
if stale:
lines.append("## Stale (>7d)")
for h in sorted(stale, key=lambda x: x["stale"], reverse=True):
lines.append("- #" + str(h["pr"]) + ": " + h["title"] + " -- " + str(h["stale"]) + "d")
lines.append("")
lines.append("## All PRs")
lines.append("| # | Title | Category | Age | Stale | Merge |")
lines.append("|---|-------|----------|-----|-------|-------|")
for h in sorted(checks, key=lambda x: x["pr"]):
m = "Y" if h["mergeable"] else ("N" if h["mergeable"] is False else "?")
s = str(h["stale"]) + "d" if h["stale"] > 7 else "-"
lines.append("| " + str(h["pr"]) + " | " + h["title"][:50] + " | " + h["category"] +
" | " + str(h["age"]) + "d | " + s + " | " + m + " |")
return chr(10).join(lines)
def main():
p = argparse.ArgumentParser(description="PR Triage Automation")
p.add_argument("--base-url", default="https://forge.alexanderwhitestone.com")
p.add_argument("--owner", default="Timmy_Foundation")
p.add_argument("--repo", default="")
p.add_argument("--json", action="store_true", dest="js")
p.add_argument("--output", default="")
a = p.parse_args()
token = _token()
if not token:
print("No token"); sys.exit(1)
repo = a.repo
if not repo:
try:
remote = subprocess.check_output(["git", "remote", "get-url", "origin"], text=True).strip()
m = re.search(r"[/:](\w[\w-]*)/(\w[\w-]*?)(?:\.git)?$", remote)
if m:
a.owner, repo = m.group(1), m.group(2)
except Exception:
pass
if not repo:
print("No repo specified"); sys.exit(1)
print("Triaging " + a.owner + "/" + repo + "...", file=sys.stderr)
prs = fetch_prs(a.base_url, token, a.owner, repo)
issues = fetch_issues(a.base_url, token, a.owner, repo)
checks = [health(pr, issues) for pr in prs]
dups = find_duplicates(prs)
if a.js:
print(json.dumps({"repo": repo, "prs": checks,
"duplicates": [[{"number": p["number"], "title": p["title"]} for p in g] for g in dups]},
indent=2))
else:
r = report(repo, checks, dups)
print(r)
if a.output:
with open(a.output, "w") as f:
f.write(r)
print("\n" + str(len(checks)) + " PRs, " + str(len(dups)) + " duplicate groups", file=sys.stderr)
if __name__ == "__main__":
main()

View File

@@ -1,3 +1,4 @@
#!/usr/bin/env python3
import json
from hermes_tools import browser_navigate, browser_vision

View File

@@ -1,45 +0,0 @@
"""Tests for PR triage automation (#659)."""
import pytest
class TestCategorize:
def _pr(self, title="", body=""):
return {"title": title, "body": body, "labels": []}
def test_training(self):
from scripts.pr_triage import categorize
assert categorize(self._pr("Add DPO pairs")) == "training-data"
def test_bug(self):
from scripts.pr_triage import categorize
assert categorize(self._pr("fix: crash")) == "bug-fix"
def test_feature(self):
from scripts.pr_triage import categorize
assert categorize(self._pr("feat: dark mode")) == "feature"
def test_other(self):
from scripts.pr_triage import categorize
assert categorize(self._pr("random")) == "other"
class TestRefs:
def test_simple(self):
from scripts.pr_triage import refs
assert 123 in refs({"title": "Fix #123", "body": ""})
def test_multiple(self):
from scripts.pr_triage import refs
r = refs({"title": "", "body": "Closes #100, Refs #200"})
assert 100 in r and 200 in r
class TestDuplicates:
def test_found(self):
from scripts.pr_triage import find_duplicates
prs = [{"title": "", "body": "Fix #1", "number": 1, "head": {"ref": "a"}, "created_at": "2026-01-01T00:00:00Z", "updated_at": "2026-01-01T00:00:00Z", "user": {}},
{"title": "", "body": "Refs #1", "number": 2, "head": {"ref": "b"}, "created_at": "2026-01-01T00:00:00Z", "updated_at": "2026-01-01T00:00:00Z", "user": {}}]
assert len(find_duplicates(prs)) == 1
def test_none(self):
from scripts.pr_triage import find_duplicates
prs = [{"title": "", "body": "Fix #1", "number": 1, "head": {"ref": "a"}, "created_at": "2026-01-01T00:00:00Z", "updated_at": "2026-01-01T00:00:00Z", "user": {}},
{"title": "", "body": "Fix #2", "number": 2, "head": {"ref": "b"}, "created_at": "2026-01-01T00:00:00Z", "updated_at": "2026-01-01T00:00:00Z", "user": {}}]
assert find_duplicates(prs) == []