Compare commits

..

1 Commits

Author SHA1 Message Date
Alexander Whitestone
f8f4678ee4 feat: benchmark local Ollama models against 50 tok/s threshold (#287)
Some checks failed
Forge CI / smoke-and-build (pull_request) Failing after 1m24s
Add scripts/benchmark_local_models.py — tests all local Ollama models
against the 50 tok/s UX threshold (configurable via --threshold).

Features:
- Auto-discovers all pulled Ollama models or test specific ones
- Configurable rounds, max tokens, threshold
- Per-round timing with prompt_eval/eval token breakdown
- Human-readable table report with PASS/FAIL/ERROR status
- JSON output mode (--json) for CI integration
- Exit code 1 if any model fails threshold

Usage:
  python3 scripts/benchmark_local_models.py                 # all models, 3 rounds
  python3 scripts/benchmark_local_models.py --models qwen2.5:7b  # single model
  python3 scripts/benchmark_local_models.py --json          # CI output
  python3 scripts/benchmark_local_models.py --threshold 30  # custom threshold

Tested: gemma3:1b scores 141.8 tok/s (PASS).

Closes #287
2026-04-13 17:46:53 -04:00
3 changed files with 285 additions and 215 deletions

View File

@@ -13,7 +13,6 @@ import concurrent.futures
import json
import logging
import os
import re
import subprocess
import sys
@@ -41,44 +40,6 @@ from hermes_time import now as _hermes_now
logger = logging.getLogger(__name__)
# Minimum context tokens for cron jobs — models with smaller context are rejected
# to prevent truncation of long prompts + tool outputs.
CRON_MIN_CONTEXT_TOKENS = 64_000
class ModelContextError(ValueError):
"""Raised when a model's context length is too small for cron execution."""
pass
def _check_model_context_compat(model: str, base_url: str = None, config_context_length: int = None):
"""Check if a model's context length meets the minimum for cron jobs.
Raises ModelContextError if the model's context is too small.
Silently passes if detection fails (fail-open).
"""
if config_context_length is not None and config_context_length < CRON_MIN_CONTEXT_TOKENS:
raise ModelContextError(
f"Model '{model}' has {config_context_length:,} context tokens, "
f"but cron jobs require at least {CRON_MIN_CONTEXT_TOKENS:,}. "
f"Set a larger model in config.yaml or override per-job."
)
try:
from agent.model_metadata import get_model_context_length
context_length = get_model_context_length(model, base_url=base_url)
if context_length is not None and context_length < CRON_MIN_CONTEXT_TOKENS:
raise ModelContextError(
f"Model '{model}' has {context_length:,} context tokens, "
f"but cron jobs require at least {CRON_MIN_CONTEXT_TOKENS:,}. "
f"Set a larger model in config.yaml or override per-job."
)
except ModelContextError:
raise
except Exception:
# Detection failure is non-fatal — fail open
logger.debug("Context length detection failed for %s, skipping check", model)
# =====================================================================
# Deploy Sync Guard
@@ -681,73 +642,6 @@ def _build_job_prompt(job: dict) -> str:
return "\n".join(parts)
def _validate_local_service_access(job: dict, prompt: str) -> tuple[bool, str]:
"""
Validate that a cron job can access local services it references.
Detects prompts that reference localhost services (Ollama, etc.) and
ensures the job is configured with a local base_url or provider.
Returns:
(is_valid, warning_message) — True if no issue, False if mismatch detected.
"""
# Patterns that indicate local service access is required
local_service_patterns = [
r"localhost:\d+",
r"127\.0\.0\.1:\d+",
r"Check Ollama",
r"check.*ollama",
r"Ollama.*responding",
r"ollama.*responding",
r"local.*model.*health",
r"health.*local.*model",
r"ping.*localhost",
r"curl.*localhost",
]
# Check if prompt references local services
prompt_lower = prompt.lower()
references_local = any(
re.search(pattern, prompt_lower) for pattern in local_service_patterns
)
if not references_local:
return True, ""
# Check if job is configured for local access
base_url = job.get("base_url", "")
provider = job.get("provider", "")
model = job.get("model", "")
# Check for explicit local base_url
if base_url and ("localhost" in base_url or "127.0.0.1" in base_url):
return True, ""
# Check for Ollama provider
if provider and "ollama" in provider.lower():
return True, ""
# Check for common local model patterns in model name
local_model_patterns = ["ollama", "llama", "mistral", "phi", "qwen", "gemma", "codellama"]
if model and any(pattern in model.lower() for pattern in local_model_patterns):
# Model name suggests local, but verify base_url
if not base_url:
return False, (
f"Cron job '{job.get('name', job.get('id'))}' references local services "
f"(localhost/Ollama) but has no base_url configured. "
f"Set base_url='http://localhost:11434' for Ollama, or pin to a local provider."
)
return True, ""
# No local configuration detected
return False, (
f"Cron job '{job.get('name', job.get('id'))}' references local services "
f"(localhost/Ollama) but is configured for cloud model "
f"(model={model or 'default'}, provider={provider or 'default'}). "
f"To check local Ollama, set base_url='http://localhost:11434' or provider='ollama'."
)
def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
"""
Execute a single cron job.
@@ -773,18 +667,6 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
job_id = job["id"]
job_name = job["name"]
prompt = _build_job_prompt(job)
# Validate local service access — detect prompts referencing localhost/Ollama
# that will fail on cloud models (#378)
is_valid, warning = _validate_local_service_access(job, prompt)
if not is_valid:
logger.warning("Job '%s': %s", job_name, warning)
# Inject warning into prompt so agent knows to report the issue
prompt = (
f"[SYSTEM WARNING: {warning}]\n\n"
f"{prompt}"
)
origin = _resolve_origin(job)
_cron_session_id = f"cron_{job_id}_{_hermes_now().strftime('%Y%m%d_%H%M%S')}"

View File

@@ -0,0 +1,284 @@
#!/usr/bin/env python3
"""
Benchmark local Ollama models against the 50 tok/s UX threshold.
Usage:
python3 scripts/benchmark_local_models.py [--models MODEL1,MODEL2] [--prompt PROMPT] [--rounds N]
python3 scripts/benchmark_local_models.py --all # test all pulled models
python3 scripts/benchmark_local_models.py --json # JSON output for CI
"""
import argparse
import json
import os
import sys
import time
import urllib.request
import urllib.error
from dataclasses import dataclass, asdict
from typing import Optional
OLLAMA_BASE = os.environ.get("OLLAMA_BASE_URL", "http://localhost:11434")
THRESHOLD_TOK_S = 50.0
BENCHMARK_PROMPT = (
"Explain the difference between TCP and UDP protocols. "
"Cover reliability, ordering, speed, and use cases. "
"Be thorough but concise. Write at least 300 words."
)
@dataclass
class BenchmarkResult:
model: str
size_gb: float
prompt_tokens: int
eval_tokens: int
eval_duration_s: float
tokens_per_second: float
total_duration_s: float
rounds: int
avg_tok_s: float
meets_threshold: bool
error: Optional[str] = None
def get_models() -> list[dict]:
"""List all pulled Ollama models."""
url = f"{OLLAMA_BASE}/api/tags"
try:
req = urllib.request.Request(url)
with urllib.request.urlopen(req, timeout=10) as resp:
data = json.loads(resp.read())
return data.get("models", [])
except Exception as e:
print(f"Error connecting to Ollama at {OLLAMA_BASE}: {e}", file=sys.stderr)
sys.exit(1)
def benchmark_model(model: str, prompt: str, num_predict: int = 512) -> dict:
"""Run a single benchmark generation, return timing stats."""
url = f"{OLLAMA_BASE}/api/generate"
payload = json.dumps({
"model": model,
"prompt": prompt,
"stream": False,
"options": {
"num_predict": num_predict,
"temperature": 0.1, # low temp for consistent output
},
}).encode()
req = urllib.request.Request(url, data=payload, method="POST")
req.add_header("Content-Type", "application/json")
start = time.monotonic()
try:
with urllib.request.urlopen(req, timeout=300) as resp:
data = json.loads(resp.read())
except urllib.error.HTTPError as e:
body = e.read().decode() if e.fp else str(e)
raise RuntimeError(f"HTTP {e.code}: {body[:200]}")
except Exception as e:
raise RuntimeError(str(e))
elapsed = time.monotonic() - start
prompt_tokens = data.get("prompt_eval_count", 0)
eval_tokens = data.get("eval_count", 0)
eval_duration_ns = data.get("eval_duration", 0)
total_duration_ns = data.get("total_duration", 0)
eval_duration_s = eval_duration_ns / 1e9 if eval_duration_ns else elapsed
total_duration_s = total_duration_ns / 1e9 if total_duration_ns else elapsed
tok_s = eval_tokens / eval_duration_s if eval_duration_s > 0 else 0.0
return {
"prompt_tokens": prompt_tokens,
"eval_tokens": eval_tokens,
"eval_duration_s": round(eval_duration_s, 2),
"total_duration_s": round(total_duration_s, 2),
"tokens_per_second": round(tok_s, 1),
}
def run_benchmark(
model_name: str,
model_size: float,
prompt: str,
rounds: int,
num_predict: int,
threshold: float = 50.0,
) -> BenchmarkResult:
"""Run multiple rounds and compute average."""
results = []
errors = []
for i in range(rounds):
try:
r = benchmark_model(model_name, prompt, num_predict)
results.append(r)
print(f" Round {i+1}/{rounds}: {r['tokens_per_second']} tok/s "
f"({r['eval_tokens']} tokens in {r['eval_duration_s']}s)")
except Exception as e:
errors.append(str(e))
print(f" Round {i+1}/{rounds}: ERROR - {e}")
if not results:
return BenchmarkResult(
model=model_name,
size_gb=model_size,
prompt_tokens=0, eval_tokens=0,
eval_duration_s=0, tokens_per_second=0,
total_duration_s=0, rounds=rounds,
avg_tok_s=0, meets_threshold=False,
error="; ".join(errors),
)
avg_tok_s = sum(r["tokens_per_second"] for r in results) / len(results)
avg_tok_s = round(avg_tok_s, 1)
return BenchmarkResult(
model=model_name,
size_gb=model_size,
prompt_tokens=sum(r["prompt_tokens"] for r in results) // len(results),
eval_tokens=sum(r["eval_tokens"] for r in results) // len(results),
eval_duration_s=round(sum(r["eval_duration_s"] for r in results) / len(results), 2),
tokens_per_second=avg_tok_s,
total_duration_s=round(sum(r["total_duration_s"] for r in results) / len(results), 2),
rounds=len(results),
avg_tok_s=avg_tok_s,
meets_threshold=avg_tok_s >= threshold,
)
def format_report(results: list[BenchmarkResult], threshold: float = 50.0) -> str:
"""Format a human-readable benchmark report."""
lines = []
lines.append("")
lines.append("=" * 72)
lines.append(f" LOCAL MODEL BENCHMARK — {threshold:.0f} tok/s UX Threshold")
lines.append("=" * 72)
lines.append("")
# Summary table
header = f"{'Model':<25} {'Size':>6} {'tok/s':>8} {'Threshold':>10} {'Status':>8}"
lines.append(header)
lines.append("-" * 72)
passed = 0
failed = 0
errors = 0
for r in sorted(results, key=lambda x: x.avg_tok_s, reverse=True):
size_str = f"{r.size_gb:.1f}GB"
tok_s_str = f"{r.avg_tok_s:.1f}"
if r.error:
status = "ERROR"
errors += 1
elif r.meets_threshold:
status = "PASS"
passed += 1
else:
status = "FAIL"
failed += 1
marker = ">" if r.meets_threshold else "X" if r.error else "!"
thresh_str = f">= {threshold:.0f}"
lines.append(f" {marker} {r.model:<23} {size_str:>6} {tok_s_str:>8} {thresh_str:>10} {status:>8}")
lines.append("-" * 72)
lines.append(f" Passed: {passed} | Failed: {failed} | Errors: {errors} | Total: {len(results)}")
lines.append("")
# Detail section for failures
failures = [r for r in results if not r.meets_threshold and not r.error]
if failures:
lines.append(" FAILED MODELS (below threshold):")
for r in sorted(failures, key=lambda x: x.avg_tok_s):
gap = threshold - r.avg_tok_s
lines.append(f" - {r.model}: {r.avg_tok_s:.1f} tok/s "
f"({gap:.1f} tok/s short, {r.eval_tokens} avg tokens/round)")
lines.append("")
error_list = [r for r in results if r.error]
if error_list:
lines.append(" ERRORS:")
for r in error_list:
lines.append(f" - {r.model}: {r.error}")
lines.append("")
# Hardware info
import platform
lines.append(f" Host: {platform.node()} | {platform.system()} {platform.release()}")
lines.append(f" Ollama: {OLLAMA_BASE}")
lines.append("")
return "\n".join(lines)
def main():
parser = argparse.ArgumentParser(description="Benchmark local Ollama models vs 50 tok/s threshold")
parser.add_argument("--models", help="Comma-separated model names (default: all)")
parser.add_argument("--prompt", default=BENCHMARK_PROMPT, help="Benchmark prompt")
parser.add_argument("--rounds", type=int, default=3, help="Rounds per model (default: 3)")
parser.add_argument("--tokens", type=int, default=512, help="Max tokens to generate (default: 512)")
parser.add_argument("--json", action="store_true", help="JSON output for CI")
parser.add_argument("--all", action="store_true", help="Test all pulled models")
parser.add_argument("--threshold", type=float, default=THRESHOLD_TOK_S, help="tok/s threshold")
args = parser.parse_args()
threshold = args.threshold
# Get model list
available = get_models()
if not available:
print("No models found. Pull a model first: ollama pull <model>", file=sys.stderr)
sys.exit(1)
if args.models:
names = [m.strip() for m in args.models.split(",")]
models = [m for m in available if m["name"] in names]
missing = set(names) - set(m["name"] for m in models)
if missing:
print(f"Models not found: {', '.join(missing)}", file=sys.stderr)
print(f"Available: {', '.join(m['name'] for m in available)}", file=sys.stderr)
else:
models = available
print(f"Benchmarking {len(models)} model(s) against {threshold} tok/s threshold")
print(f"Ollama: {OLLAMA_BASE} | Rounds: {args.rounds} | Max tokens: {args.tokens}")
print()
results = []
for m in models:
name = m["name"]
size_gb = m.get("size", 0) / (1024**3)
print(f" {name} ({size_gb:.1f}GB):")
result = run_benchmark(name, size_gb, args.prompt, args.rounds, args.tokens, threshold)
results.append(result)
# Output
report = format_report(results, threshold)
if args.json:
output = {
"threshold_tok_s": threshold,
"ollama_base": OLLAMA_BASE,
"rounds": args.rounds,
"results": [asdict(r) for r in results],
"passed": sum(1 for r in results if r.meets_threshold),
"failed": sum(1 for r in results if not r.meets_threshold and not r.error),
"errors": sum(1 for r in results if r.error),
}
print(json.dumps(output, indent=2))
else:
print(report)
# Exit code: 0 if all pass, 1 if any fail/error
if any(not r.meets_threshold or r.error for r in results):
sys.exit(1)
sys.exit(0)
if __name__ == "__main__":
main()

View File

@@ -7,7 +7,7 @@ from unittest.mock import AsyncMock, patch, MagicMock
import pytest
from cron.scheduler import _resolve_origin, _resolve_delivery_target, _deliver_result, run_job, SILENT_MARKER, _build_job_prompt, _check_model_context_compat, ModelContextError, CRON_MIN_CONTEXT_TOKENS, _validate_local_service_access
from cron.scheduler import _resolve_origin, _resolve_delivery_target, _deliver_result, run_job, SILENT_MARKER, _build_job_prompt, _check_model_context_compat, ModelContextError, CRON_MIN_CONTEXT_TOKENS
class TestResolveOrigin:
@@ -1001,99 +1001,3 @@ class TestCheckModelContextCompat:
):
with pytest.raises(ModelContextError):
_check_model_context_compat("borderline-model")
class TestValidateLocalServiceAccess:
"""Tests for _validate_local_service_access — detects local service mismatches (#378)."""
def test_no_local_reference_passes(self):
"""Prompt without local references always passes."""
job = {"name": "test", "model": "gpt-4"}
is_valid, msg = _validate_local_service_access(job, "Check system health")
assert is_valid is True
assert msg == ""
def test_localhost_reference_with_local_base_url(self):
"""Prompt references localhost but job has local base_url — passes."""
job = {
"name": "health-check",
"model": "llama3",
"base_url": "http://localhost:11434/v1",
}
is_valid, msg = _validate_local_service_access(job, "Check if Ollama is responding on localhost:11434")
assert is_valid is True
assert msg == ""
def test_localhost_reference_with_cloud_model_fails(self):
"""Prompt references localhost but job uses cloud model — fails."""
job = {
"name": "health-check",
"model": "nous/mimo-v2-pro",
"provider": "nous",
}
is_valid, msg = _validate_local_service_access(job, "Check Ollama is responding on localhost:11434")
assert is_valid is False
assert "localhost" in msg.lower() or "ollama" in msg.lower()
assert "cloud model" in msg.lower() or "base_url" in msg.lower()
def test_ollama_check_with_ollama_provider(self):
"""Prompt references Ollama and job uses ollama provider — passes."""
job = {
"name": "ollama-health",
"provider": "ollama",
"base_url": "http://localhost:11434",
}
is_valid, msg = _validate_local_service_access(job, "Check Ollama is responding")
assert is_valid is True
assert msg == ""
def test_case_insensitive_detection(self):
"""Detection is case-insensitive."""
job = {"name": "test", "model": "gpt-4"}
# Lowercase
is_valid, _ = _validate_local_service_access(job, "check ollama is responding")
assert is_valid is False
# Uppercase
is_valid, _ = _validate_local_service_access(job, "CHECK OLLAMA IS RESPONDING")
assert is_valid is False
# Mixed case
is_valid, _ = _validate_local_service_access(job, "Check if Ollama Is Responding")
assert is_valid is False
def test_curl_localhost_detected(self):
"""curl localhost references are detected."""
job = {"name": "test", "model": "gpt-4"}
is_valid, _ = _validate_local_service_access(job, "Run curl localhost:8080/health")
assert is_valid is False
def test_127_0_0_1_detected(self):
"""127.0.0.1 references are detected."""
job = {"name": "test", "model": "gpt-4"}
is_valid, _ = _validate_local_service_access(job, "Check http://127.0.0.1:11434/api/tags")
assert is_valid is False
def test_local_model_name_without_base_url_fails(self):
"""Model name suggests local but no base_url — fails."""
job = {"name": "test", "model": "llama3"}
is_valid, msg = _validate_local_service_access(job, "Check Ollama is responding")
assert is_valid is False
assert "base_url" in msg
def test_local_model_name_with_base_url_passes(self):
"""Model name suggests local and has base_url — passes."""
job = {"name": "test", "model": "llama3", "base_url": "http://localhost:11434"}
is_valid, msg = _validate_local_service_access(job, "Check Ollama is responding")
assert is_valid is True
assert msg == ""
def test_nightwatch_health_monitor_scenario(self):
"""Reproduces the exact #378 scenario."""
job = {
"name": "nightwatch-health-monitor",
"model": "nous/mimo-v2-pro",
"provider": "nous",
}
prompt = "Check Ollama is responding. Run curl http://localhost:11434/api/tags and report status."
is_valid, msg = _validate_local_service_access(job, prompt)
assert is_valid is False
assert "nightwatch-health-monitor" in msg or "localhost" in msg