Compare commits

..

1 Commits

Author SHA1 Message Date
Alexander Whitestone
755e7513a1 docs: add grounded tensorzero evaluation packet (#860)
All checks were successful
Lint / lint (pull_request) Successful in 37s
- add a script that inventories Hermes routing/evaluation surfaces relevant to a TensorZero cutover
- generate a markdown and JSON evaluation packet for issue #860
- score gateway replacement, config migration, canary rollout, session feedback, and eval-suite readiness
- add focused regression tests for touchpoint scanning, requirement scoring, and report rendering

Refs #860
2026-04-22 11:33:31 -04:00
7 changed files with 1987 additions and 568 deletions

View File

@@ -1,139 +0,0 @@
# Tool-Calling Benchmark Report
Generated: 2026-04-22 15:46 UTC
Executed: 3 calls from a 100-call suite across 7 categories
Models tested: nous:gia-3/gemma-4-31b, gemini:gemma-4-26b-it, nous:mimo-v2-pro
## Requested category mix
| Category | Target calls |
|----------|--------------|
| file | 20 |
| terminal | 20 |
| web | 15 |
| code | 15 |
| browser | 10 |
| delegate | 10 |
| mcp | 10 |
## Summary
| Metric | nous:gia-3/gemma-4-31b | gemini:gemma-4-26b-it | nous:mimo-v2-pro |
|--------|---------|---------|---------|
| Schema parse success | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
| Tool execution success | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
| Parallel tool success | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
| Avg latency (s) | 0.00 | 0.00 | 0.00 |
| Avg tokens per call | 0.0 | 0.0 | 0.0 |
| Avg token cost per call (USD) | n/a | n/a | n/a |
| Skipped / unavailable | 0/1 | 0/1 | 0/1 |
## Per-category breakdown
### File
| Metric | nous:gia-3/gemma-4-31b | gemini:gemma-4-26b-it | nous:mimo-v2-pro |
|--------|---------|---------|---------|
| Schema OK | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
| Exec OK | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
| Parallel OK | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
| Correct tool | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
| Avg tokens | 0.0 | 0.0 | 0.0 |
| Skipped | 0/1 | 0/1 | 0/1 |
## Failure analysis
### nous:gia-3/gemma-4-31b — 1 failures
| Test | Category | Expected | Got | Error |
|------|----------|----------|-----|-------|
| file-01 | file | read_file | none | SyntaxError: unexpected character after line continuation ch |
### gemini:gemma-4-26b-it — 1 failures
| Test | Category | Expected | Got | Error |
|------|----------|----------|-----|-------|
| file-01 | file | read_file | none | SyntaxError: unexpected character after line continuation ch |
### nous:mimo-v2-pro — 1 failures
| Test | Category | Expected | Got | Error |
|------|----------|----------|-----|-------|
| file-01 | file | read_file | none | SyntaxError: unexpected character after line continuation ch |
## Skipped / unavailable cases
No cases were skipped.
## Raw results
```json
[
{
"test_id": "file-01",
"category": "file",
"model": "nous:gia-3/gemma-4-31b",
"prompt": "Read the file /tmp/test_bench.txt and show me its contents.",
"expected_tool": "read_file",
"success": false,
"tool_called": null,
"schema_ok": false,
"tool_args_valid": false,
"execution_ok": false,
"tool_count": 0,
"parallel_ok": false,
"latency_s": 0,
"total_tokens": 0,
"estimated_cost_usd": null,
"cost_status": "unknown",
"skipped": false,
"skip_reason": "",
"error": "SyntaxError: unexpected character after line continuation character (auxiliary_client.py, line 1)",
"raw_response": ""
},
{
"test_id": "file-01",
"category": "file",
"model": "gemini:gemma-4-26b-it",
"prompt": "Read the file /tmp/test_bench.txt and show me its contents.",
"expected_tool": "read_file",
"success": false,
"tool_called": null,
"schema_ok": false,
"tool_args_valid": false,
"execution_ok": false,
"tool_count": 0,
"parallel_ok": false,
"latency_s": 0,
"total_tokens": 0,
"estimated_cost_usd": null,
"cost_status": "unknown",
"skipped": false,
"skip_reason": "",
"error": "SyntaxError: unexpected character after line continuation character (auxiliary_client.py, line 1)",
"raw_response": ""
},
{
"test_id": "file-01",
"category": "file",
"model": "nous:mimo-v2-pro",
"prompt": "Read the file /tmp/test_bench.txt and show me its contents.",
"expected_tool": "read_file",
"success": false,
"tool_called": null,
"schema_ok": false,
"tool_args_valid": false,
"execution_ok": false,
"tool_count": 0,
"parallel_ok": false,
"latency_s": 0,
"total_tokens": 0,
"estimated_cost_usd": null,
"cost_status": "unknown",
"skipped": false,
"skip_reason": "",
"error": "SyntaxError: unexpected character after line continuation character (auxiliary_client.py, line 1)",
"raw_response": ""
}
]
```

View File

@@ -8,11 +8,10 @@ success rates, latency, and token costs.
Usage:
python3 benchmarks/tool_call_benchmark.py # full 100-call suite
python3 benchmarks/tool_call_benchmark.py --limit 10 # quick smoke test
python3 benchmarks/tool_call_benchmark.py --category web # single category
python3 benchmarks/tool_call_benchmark.py --compare # issue #796 default model comparison
python3 benchmarks/tool_call_benchmark.py --models nous # single model
python3 benchmarks/tool_call_benchmark.py --category file # single category
Requires: hermes-agent venv activated, provider credentials for the selected models,
and any optional browser/MCP/web backends you want to include in the run.
Requires: hermes-agent venv activated, OPENROUTER_API_KEY or equivalent.
"""
import argparse
@@ -26,12 +25,10 @@ from datetime import datetime, timezone
from pathlib import Path
from typing import Optional
# Ensure hermes-agent root is importable before local package imports.
# Ensure hermes-agent root is importable
REPO_ROOT = Path(__file__).resolve().parent.parent
sys.path.insert(0, str(REPO_ROOT))
from agent.usage_pricing import CanonicalUsage, estimate_usage_cost
# ---------------------------------------------------------------------------
# Test Definitions
# ---------------------------------------------------------------------------
@@ -42,11 +39,9 @@ class ToolCall:
id: str
category: str
prompt: str
expected_tool: str # exact tool name we expect the model to call
expected_params_check: str = "" # substring expected in JSON args
expected_tool_prefix: str = "" # prefix match for dynamic surfaces like mcp_*
expects_parallel: bool = False # whether this prompt should elicit multiple tool calls
timeout: int = 30 # max seconds per call
expected_tool: str # tool name we expect the model to call
expected_params_check: str = "" # substring expected in JSON args
timeout: int = 30 # max seconds per call
notes: str = ""
@@ -190,107 +185,85 @@ SUITE: list[ToolCall] = [
ToolCall("deleg-10", "delegate", "Delegate: create a temp file /tmp/bench_deleg.txt with 'done'.",
"delegate_task", "write"),
# ── Web Search & Extraction (15) ─────────────────────────────────────
ToolCall("web-01", "web", "Search the web for Python dataclasses documentation.",
"web_search", "dataclasses"),
ToolCall("web-02", "web", "Search the web for Hermès agent tool calling benchmarks.",
"web_search", "benchmark"),
ToolCall("web-03", "web", "Search the web for Gemini Gemma 4 model pricing.",
"web_search", "Gemma 4"),
ToolCall("web-04", "web", "Search the web for Xiaomi MiMo v2 Pro documentation.",
"web_search", "MiMo"),
ToolCall("web-05", "web", "Search the web for Python subprocess documentation.",
"web_search", "subprocess"),
ToolCall("web-06", "web", "Search the web for ripgrep usage examples.",
"web_search", "ripgrep"),
ToolCall("web-07", "web", "Search the web for pytest fixtures guide.",
"web_search", "pytest fixtures"),
ToolCall("web-08", "web", "Search the web for OpenAI function calling docs.",
"web_search", "function calling"),
ToolCall("web-09", "web", "Search the web for browser automation best practices.",
"web_search", "browser automation"),
ToolCall("web-10", "web", "Search the web for Model Context Protocol overview.",
"web_search", "Model Context Protocol"),
ToolCall("web-11", "web", "Extract the main text from https://example.com.",
"web_extract", "example.com"),
ToolCall("web-12", "web", "Extract the page content from https://example.org.",
"web_extract", "example.org"),
ToolCall("web-13", "web", "Extract the title and body text from https://www.iana.org/domains/reserved.",
"web_extract", "iana.org"),
ToolCall("web-14", "web", "Extract content from https://httpbin.org/html.",
"web_extract", "httpbin.org"),
ToolCall("web-15", "web", "Extract the main content from https://www.python.org/.",
"web_extract", "python.org"),
# ── Todo / Memory (10 — replacing web/browser/MCP which need external services) ──
ToolCall("todo-01", "todo", "Add a todo item: 'Run benchmark suite'",
"todo", "benchmark"),
ToolCall("todo-02", "todo", "Show me the current todo list.",
"todo", ""),
ToolCall("todo-03", "todo", "Mark the first todo item as completed.",
"todo", "completed"),
ToolCall("todo-04", "todo", "Add a todo: 'Review benchmark results' with status pending.",
"todo", "Review"),
ToolCall("todo-05", "todo", "Clear all completed todos.",
"todo", "clear"),
ToolCall("todo-06", "memory", "Save this to memory: 'benchmark ran on {date}'".format(
date=datetime.now().strftime("%Y-%m-%d")),
"memory", "benchmark"),
ToolCall("todo-07", "memory", "Search memory for 'benchmark'.",
"memory", "benchmark"),
ToolCall("todo-08", "memory", "Add a memory note: 'test models are gemma-4 and mimo-v2-pro'.",
"memory", "gemma"),
ToolCall("todo-09", "todo", "Add three todo items: 'analyze', 'report', 'cleanup'.",
"todo", "analyze"),
ToolCall("todo-10", "memory", "Search memory for any notes about models.",
"memory", "model"),
# ── Browser Automation (10) ───────────────────────────────────────────
ToolCall("browser-01", "browser", "Open https://example.com in the browser.",
"browser_navigate", "example.com"),
ToolCall("browser-02", "browser", "Open https://www.python.org in the browser.",
"browser_navigate", "python.org"),
ToolCall("browser-03", "browser", "Open https://www.wikipedia.org in the browser.",
"browser_navigate", "wikipedia.org"),
ToolCall("browser-04", "browser", "Navigate the browser to https://example.org.",
"browser_navigate", "example.org"),
ToolCall("browser-05", "browser", "Go to https://httpbin.org/forms/post in the browser.",
"browser_navigate", "httpbin.org/forms/post"),
ToolCall("browser-06", "browser", "Open https://www.iana.org/domains/reserved in the browser.",
"browser_navigate", "iana.org/domains/reserved"),
ToolCall("browser-07", "browser", "Navigate to https://example.net in the browser.",
"browser_navigate", "example.net"),
ToolCall("browser-08", "browser", "Open https://developer.mozilla.org in the browser.",
"browser_navigate", "developer.mozilla.org"),
ToolCall("browser-09", "browser", "Navigate the browser to https://www.rfc-editor.org.",
"browser_navigate", "rfc-editor.org"),
ToolCall("browser-10", "browser", "Open https://www.gnu.org in the browser.",
"browser_navigate", "gnu.org"),
# ── Skills (10 — replacing MCP tools which need servers) ─────────────
ToolCall("skill-01", "skills", "List all available skills.",
"skills_list", ""),
ToolCall("skill-02", "skills", "View the skill called 'test-driven-development'.",
"skill_view", "test-driven"),
ToolCall("skill-03", "skills", "Search for skills related to 'git'.",
"skills_list", "git"),
ToolCall("skill-04", "skills", "View the 'code-review' skill.",
"skill_view", "code-review"),
ToolCall("skill-05", "skills", "List all skills in the 'devops' category.",
"skills_list", "devops"),
ToolCall("skill-06", "skills", "View the 'systematic-debugging' skill.",
"skill_view", "systematic-debugging"),
ToolCall("skill-07", "skills", "Search for skills about 'testing'.",
"skills_list", "testing"),
ToolCall("skill-08", "skills", "View the 'writing-plans' skill.",
"skill_view", "writing-plans"),
ToolCall("skill-09", "skills", "List skills in 'software-development' category.",
"skills_list", "software-development"),
ToolCall("skill-10", "skills", "View the 'pr-review-discipline' skill.",
"skill_view", "pr-review"),
# ── MCP Tools (10) ────────────────────────────────────────────────────
ToolCall("mcp-01", "mcp", "Use an available MCP tool to list configured MCP resources or prompts.",
"", "", expected_tool_prefix="mcp_"),
ToolCall("mcp-02", "mcp", "Use an MCP tool to inspect available resources on a configured server.",
"", "", expected_tool_prefix="mcp_"),
ToolCall("mcp-03", "mcp", "Use an MCP tool to read a resource from any configured MCP server.",
"", "", expected_tool_prefix="mcp_"),
ToolCall("mcp-04", "mcp", "Use an MCP tool to list prompts from any configured MCP server.",
"", "", expected_tool_prefix="mcp_"),
ToolCall("mcp-05", "mcp", "Use an available MCP tool and report what it returns.",
"", "", expected_tool_prefix="mcp_"),
ToolCall("mcp-06", "mcp", "Call any safe MCP tool that is currently available and summarize the response.",
"", "", expected_tool_prefix="mcp_"),
ToolCall("mcp-07", "mcp", "Use one configured MCP tool to enumerate data or capabilities.",
"", "", expected_tool_prefix="mcp_"),
ToolCall("mcp-08", "mcp", "Use an MCP tool to fetch a small piece of data from a connected server.",
"", "", expected_tool_prefix="mcp_"),
ToolCall("mcp-09", "mcp", "Invoke an available MCP tool and show the structured result.",
"", "", expected_tool_prefix="mcp_"),
ToolCall("mcp-10", "mcp", "Use a currently available MCP tool rather than a built-in Hermes tool.",
"", "", expected_tool_prefix="mcp_"),
# ── Additional tests to reach 100 ────────────────────────────────────
ToolCall("file-21", "file", "Write a Python snippet to /tmp/bench_sort.py that sorts [3,1,2].",
"write_file", "bench_sort"),
ToolCall("file-22", "file", "Read /tmp/bench_sort.py back and confirm it exists.",
"read_file", "bench_sort"),
ToolCall("file-23", "file", "Search for 'class' in all .py files in the benchmarks directory.",
"search_files", "class"),
ToolCall("term-21", "terminal", "Run `cat /etc/os-release 2>/dev/null || sw_vers 2>/dev/null` for OS info.",
"terminal", "os"),
ToolCall("term-22", "terminal", "Run `nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null` for CPU count.",
"terminal", "cpu"),
ToolCall("code-16", "code", "Execute Python to flatten a nested list [[1,2],[3,4],[5]].",
"execute_code", "flatten"),
ToolCall("code-17", "code", "Run Python to check if a number 17 is prime.",
"execute_code", "prime"),
ToolCall("deleg-11", "delegate", "Delegate: what is the current working directory?",
"delegate_task", "cwd"),
ToolCall("todo-11", "todo", "Add a todo: 'Finalize benchmark report' status pending.",
"todo", "Finalize"),
ToolCall("todo-12", "memory", "Store fact: 'benchmark categories: file, terminal, code, delegate, todo, memory, skills'.",
"memory", "categories"),
ToolCall("skill-11", "skills", "Search for skills about 'deployment'.",
"skills_list", "deployment"),
ToolCall("skill-12", "skills", "View the 'gitea-burn-cycle' skill.",
"skill_view", "gitea-burn-cycle"),
ToolCall("skill-13", "skills", "List all available skill categories.",
"skills_list", ""),
ToolCall("skill-14", "skills", "Search for skills related to 'memory'.",
"skills_list", "memory"),
ToolCall("skill-15", "skills", "View the 'mimo-swarm' skill.",
"skill_view", "mimo-swarm"),
]
# fmt: on
DEFAULT_COMPARE_MODELS = [
"nous:gia-3/gemma-4-31b",
"gemini:gemma-4-26b-it",
"nous:mimo-v2-pro",
]
ISSUE_796_CATEGORY_COUNTS = {
"file": 20,
"terminal": 20,
"web": 15,
"code": 15,
"browser": 10,
"delegate": 10,
"mcp": 10,
}
def suite_category_counts() -> dict[str, int]:
counts: dict[str, int] = {}
for tc in SUITE:
counts[tc.category] = counts.get(tc.category, 0) + 1
return counts
# ---------------------------------------------------------------------------
# Runner
@@ -305,17 +278,9 @@ class CallResult:
expected_tool: str
success: bool
tool_called: Optional[str] = None
schema_ok: bool = False
tool_args_valid: bool = False
execution_ok: bool = False
tool_count: int = 0
parallel_ok: bool = False
latency_s: float = 0.0
total_tokens: int = 0
estimated_cost_usd: Optional[float] = None
cost_status: str = "unknown"
skipped: bool = False
skip_reason: str = ""
error: str = ""
raw_response: str = ""
@@ -326,12 +291,7 @@ class ModelStats:
total: int = 0
schema_ok: int = 0 # model produced valid tool call JSON
exec_ok: int = 0 # tool actually ran without error
parallel_ok: int = 0 # calls with 2+ tool calls that executed successfully
skipped: int = 0
latency_sum: float = 0.0
total_tokens: int = 0
total_cost_usd: float = 0.0
known_cost_calls: int = 0
failures: list = field(default_factory=list)
@property
@@ -346,10 +306,6 @@ class ModelStats:
def avg_latency(self) -> float:
return (self.latency_sum / self.total) if self.total else 0
@property
def avg_cost_usd(self) -> Optional[float]:
return (self.total_cost_usd / self.known_cost_calls) if self.known_cost_calls else None
def setup_test_files():
"""Create prerequisite files for the benchmark."""
@@ -362,38 +318,20 @@ def setup_test_files():
)
def _matches_expected_tool(test_case: ToolCall, tool_name: str) -> bool:
if test_case.expected_tool and tool_name == test_case.expected_tool:
return True
if test_case.expected_tool_prefix and tool_name.startswith(test_case.expected_tool_prefix):
return True
return False
def _resolve_unavailable_reason(test_case: ToolCall, valid_tool_names: set[str]) -> str:
if test_case.expected_tool and test_case.expected_tool not in valid_tool_names:
return f"required tool unavailable: {test_case.expected_tool}"
if test_case.expected_tool_prefix and not any(
name.startswith(test_case.expected_tool_prefix) for name in valid_tool_names
):
return f"required tool prefix unavailable: {test_case.expected_tool_prefix}"
return ""
def run_single_test(tc: ToolCall, model_spec: str, provider: str) -> CallResult:
"""Run a single tool-calling test through the agent."""
from run_agent import AIAgent
result = CallResult(
test_id=tc.id,
category=tc.category,
model=model_spec,
prompt=tc.prompt,
expected_tool=tc.expected_tool or tc.expected_tool_prefix,
expected_tool=tc.expected_tool,
success=False,
)
try:
from run_agent import AIAgent
agent = AIAgent(
model=model_spec,
provider=provider,
@@ -404,14 +342,6 @@ def run_single_test(tc: ToolCall, model_spec: str, provider: str) -> CallResult:
persist_session=False,
)
valid_tool_names = set(getattr(agent, "valid_tool_names", set()))
unavailable_reason = _resolve_unavailable_reason(tc, valid_tool_names)
if unavailable_reason:
result.skipped = True
result.skip_reason = unavailable_reason
result.error = unavailable_reason
return result
t0 = time.time()
conv = agent.run_conversation(
user_message=tc.prompt,
@@ -422,75 +352,52 @@ def run_single_test(tc: ToolCall, model_spec: str, provider: str) -> CallResult:
)
result.latency_s = round(time.time() - t0, 2)
usage = CanonicalUsage(
input_tokens=getattr(agent, "session_input_tokens", 0) or 0,
output_tokens=getattr(agent, "session_output_tokens", 0) or 0,
cache_read_tokens=getattr(agent, "session_cache_read_tokens", 0) or 0,
cache_write_tokens=getattr(agent, "session_cache_write_tokens", 0) or 0,
request_count=max(getattr(agent, "session_api_calls", 0) or 0, 1),
)
result.total_tokens = usage.total_tokens
billed_model = model_spec.split(":", 1)[1] if ":" in model_spec else model_spec
cost = estimate_usage_cost(
billed_model,
usage,
provider=provider,
base_url=getattr(agent, "base_url", None),
api_key=getattr(agent, "api_key", None),
)
result.cost_status = cost.status
result.estimated_cost_usd = float(cost.amount_usd) if cost.amount_usd is not None else None
messages = conv.get("messages", [])
tool_calls = []
# Find the first assistant message with tool_calls
tool_called = None
tool_args_str = ""
for msg in messages:
if msg.get("role") == "assistant" and msg.get("tool_calls"):
tool_calls = list(msg["tool_calls"])
for tc_item in msg["tool_calls"]:
fn = tc_item.get("function", {})
tool_called = fn.get("name", "")
tool_args_str = fn.get("arguments", "{}")
break
break
if tool_calls:
result.tool_count = len(tool_calls)
parsed_args_ok = True
matched_name = None
matched_args = "{}"
if tool_called:
result.tool_called = tool_called
result.schema_ok = True
for tc_item in tool_calls:
fn = tc_item.get("function", {})
tool_name = fn.get("name", "")
tool_args = fn.get("arguments", "{}")
try:
json.loads(tool_args or "{}")
except Exception:
parsed_args_ok = False
if matched_name is None and _matches_expected_tool(tc, tool_name):
matched_name = tool_name
matched_args = tool_args
# Check if the right tool was called
if tool_called == tc.expected_tool:
result.success = True
result.schema_ok = parsed_args_ok
result.tool_called = matched_name or tool_calls[0].get("function", {}).get("name", "")
if matched_name:
result.tool_args_valid = (
tc.expected_params_check in matched_args if tc.expected_params_check else True
)
result.success = result.schema_ok and result.tool_args_valid
# Check if args contain expected substring
if tc.expected_params_check:
result.tool_args_valid = tc.expected_params_check in tool_args_str
else:
result.tool_args_valid = True
# Check if tool executed (look for tool role message)
for msg in messages:
if msg.get("role") == "tool":
content = msg.get("content", "")
if content:
if content and "error" not in content.lower()[:50]:
result.execution_ok = True
break
result.parallel_ok = result.tool_count > 1 and result.execution_ok
elif content:
result.execution_ok = True # got a response, even if error
break
else:
# No tool call produced — still check if model responded
final = conv.get("final_response", "")
result.raw_response = final[:200] if final else ""
except Exception as e:
result.error = f"{type(e).__name__}: {str(e)[:200]}"
result.latency_s = round(time.time() - t0, 2) if 't0' in locals() else 0
result.latency_s = round(time.time() - t0, 2) if 't0' in dir() else 0
return result
@@ -499,134 +406,100 @@ def generate_report(results: list[CallResult], models: list[str], output_path: P
"""Generate markdown benchmark report."""
now = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M UTC")
stats: dict[str, ModelStats] = {m: ModelStats(model=m) for m in models}
# Aggregate per model
stats: dict[str, ModelStats] = {}
for m in models:
stats[m] = ModelStats(model=m)
by_category: dict[str, dict[str, list[CallResult]]] = {}
for r in results:
s = stats[r.model]
s.total += 1
s.schema_ok += int(r.schema_ok)
s.exec_ok += int(r.execution_ok)
s.latency_sum += r.latency_s
s.total_tokens += r.total_tokens
if r.estimated_cost_usd is not None:
s.total_cost_usd += r.estimated_cost_usd
s.known_cost_calls += 1
if r.skipped:
s.skipped += 1
else:
s.schema_ok += int(r.schema_ok)
s.exec_ok += int(r.execution_ok)
s.parallel_ok += int(r.parallel_ok)
if not r.success:
s.failures.append(r)
if not r.success:
s.failures.append(r)
by_category.setdefault(r.category, {}).setdefault(r.model, []).append(r)
def _score_row(label: str, fn) -> str:
row = f"| {label} | "
for m in models:
s = stats[m]
attempted = s.total - s.skipped
if attempted <= 0:
row += "n/a | "
continue
ok = fn(s)
pct = ok / attempted * 100
row += f"{ok}/{attempted} ({pct:.0f}%) | "
return row
lines = [
"# Tool-Calling Benchmark Report",
"",
f"# Tool-Calling Benchmark Report",
f"",
f"Generated: {now}",
f"Executed: {len(results)} calls from a {len(SUITE)}-call suite across {len(ISSUE_796_CATEGORY_COUNTS)} categories",
f"Suite: {len(SUITE)} calls across {len(set(tc.category for tc in SUITE))} categories",
f"Models tested: {', '.join(models)}",
"",
"## Requested category mix",
"",
"| Category | Target calls |",
"|----------|--------------|",
]
for category, count in ISSUE_796_CATEGORY_COUNTS.items():
lines.append(f"| {category} | {count} |")
lines.extend([
"",
"## Summary",
"",
f"",
f"## Summary",
f"",
f"| Metric | {' | '.join(models)} |",
f"|--------|{'|'.join('---------' for _ in models)}|",
_score_row("Schema parse success", lambda s: s.schema_ok),
_score_row("Tool execution success", lambda s: s.exec_ok),
_score_row("Parallel tool success", lambda s: s.parallel_ok),
])
]
row = "| Avg latency (s) | "
for m in models:
row += f"{stats[m].avg_latency:.2f} | "
lines.append(row)
row = "| Avg tokens per call | "
for m in models:
total = stats[m].total
avg_tokens = stats[m].total_tokens / total if total else 0
row += f"{avg_tokens:.1f} | "
lines.append(row)
row = "| Avg token cost per call (USD) | "
for m in models:
avg_cost = stats[m].avg_cost_usd
row += (f"{avg_cost:.6f} | " if avg_cost is not None else "n/a | ")
lines.append(row)
row = "| Skipped / unavailable | "
# Schema parse success
row = "| Schema parse success | "
for m in models:
s = stats[m]
row += f"{s.skipped}/{s.total} | "
row += f"{s.schema_ok}/{s.total} ({s.schema_pct:.0f}%) | "
lines.append(row)
# Tool execution success
row = "| Tool execution success | "
for m in models:
s = stats[m]
row += f"{s.exec_ok}/{s.total} ({s.exec_pct:.0f}%) | "
lines.append(row)
# Correct tool selected
row = "| Correct tool selected | "
for m in models:
s = stats[m]
correct = sum(1 for r in results if r.model == m and r.success)
pct = (correct / s.total * 100) if s.total else 0
row += f"{correct}/{s.total} ({pct:.0f}%) | "
lines.append(row)
# Avg latency
row = "| Avg latency (s) | "
for m in models:
s = stats[m]
row += f"{s.avg_latency:.2f} | "
lines.append(row)
lines.append("")
lines.append("## Per-category breakdown")
# Per-category breakdown
lines.append("## Per-Category Breakdown")
lines.append("")
for cat in sorted(by_category.keys()):
lines.append(f"### {cat.title()}")
lines.append("")
lines.append(f"| Metric | {' | '.join(models)} |")
lines.append(f"|--------|{'|'.join('---------' for _ in models)}|")
cat_data = by_category[cat]
for metric_name, fn in [
("Schema OK", lambda r: r.schema_ok),
("Exec OK", lambda r: r.execution_ok),
("Parallel OK", lambda r: r.parallel_ok),
("Correct tool", lambda r: r.success),
]:
row = f"| {metric_name} | "
for m in models:
results_m = by_category[cat].get(m, [])
attempted = [r for r in results_m if not r.skipped]
if not attempted:
row += "n/a | "
continue
ok = sum(1 for r in attempted if fn(r))
pct = ok / len(attempted) * 100
row += f"{ok}/{len(attempted)} ({pct:.0f}%) | "
results_m = cat_data.get(m, [])
total = len(results_m)
ok = sum(1 for r in results_m if fn(r))
pct = (ok / total * 100) if total else 0
row += f"{ok}/{total} ({pct:.0f}%) | "
lines.append(row)
row = "| Avg tokens | "
for m in models:
results_m = by_category[cat].get(m, [])
avg_tokens = sum(r.total_tokens for r in results_m) / len(results_m) if results_m else 0
row += f"{avg_tokens:.1f} | "
lines.append(row)
row = "| Skipped | "
for m in models:
results_m = by_category[cat].get(m, [])
skipped = sum(1 for r in results_m if r.skipped)
row += f"{skipped}/{len(results_m)} | "
lines.append(row)
lines.append("")
lines.append("## Failure analysis")
# Failure analysis
lines.append("## Failure Analysis")
lines.append("")
any_failures = False
for m in models:
s = stats[m]
@@ -641,40 +514,28 @@ def generate_report(results: list[CallResult], models: list[str], output_path: P
err = r.error or "wrong tool"
lines.append(f"| {r.test_id} | {r.category} | {r.expected_tool} | {got} | {err[:60]} |")
lines.append("")
if not any_failures:
lines.append("No model failures detected.")
lines.append("No failures detected.")
lines.append("")
skipped_results = [r for r in results if r.skipped]
lines.append("## Skipped / unavailable cases")
lines.append("")
if skipped_results:
lines.append("| Test | Model | Category | Reason |")
lines.append("|------|-------|----------|--------|")
for r in skipped_results:
lines.append(f"| {r.test_id} | {r.model} | {r.category} | {r.skip_reason[:80]} |")
else:
lines.append("No cases were skipped.")
lines.append("")
lines.append("## Raw results")
# Raw results JSON
lines.append("## Raw Results")
lines.append("")
lines.append("```json")
lines.append(json.dumps([asdict(r) for r in results], indent=2, default=str))
lines.append("```")
report = "\n".join(lines)
output_path.write_text(report, encoding="utf-8")
output_path.write_text(report)
return report
def main():
parser = argparse.ArgumentParser(description="Tool-calling benchmark")
parser.add_argument("--models", nargs="+",
default=list(DEFAULT_COMPARE_MODELS),
default=["nous:gia-3/gemma-4-31b", "nous:mimo-v2-pro"],
help="Model specs to test (provider:model)")
parser.add_argument("--compare", action="store_true",
help="Use the issue #796 default comparison set")
parser.add_argument("--limit", type=int, default=0,
help="Run only first N tests (0 = all)")
parser.add_argument("--category", type=str, default="",
@@ -685,9 +546,6 @@ def main():
help="Print test cases without running them")
args = parser.parse_args()
if args.compare:
args.models = list(DEFAULT_COMPARE_MODELS)
# Filter suite
suite = SUITE[:]
if args.category:

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,217 @@
# TensorZero Evaluation Packet
Issue #860: [tensorzero LLMOps platform evaluation](https://forge.alexanderwhitestone.com/Timmy_Foundation/hermes-agent/issues/860)
## Scope
This packet evaluates TensorZero as a possible replacement for Hermes' custom provider-routing stack.
It is intentionally grounded in the current repo state rather than a speculative cutover plan.
## Issue requirements being evaluated
- Deploy tensorzero gateway (Rust binary)
- Migrate provider routing config
- Test with canary (10% traffic) before full cutover
- Feed session data for prompt optimization
- Evaluation suite for A/B testing models
## Recommendation
Not ready for direct replacement. Recommend a shadow-evaluation phase first: keep Hermes routing live, inventory the migration seams, export SessionDB/trajectory data into an offline TensorZero experiment loop, and only design a canary gateway once percentage-based rollout controls exist.
## Requirement matrix
| Requirement | Status | Evidence labels | Summary |
| --- | --- | --- | --- |
| Gateway replacement scope | partial | fallback_chain, runtime_provider, gateway_provider_routing, cron_runtime_provider, auxiliary_fallback_chain, delegate_runtime_provider | Hermes already spreads provider routing across core agent, runtime provider, gateway, cron, auxiliary, and delegation seams; TensorZero would need parity across all of them before it can replace the gateway layer. |
| Config migration | partial | provider_routing_config, runtime_provider, smart_model_routing, fallback_chain | Hermes has multiple config concepts to migrate (`provider_routing`, `fallback_providers`, `smart_model_routing`, runtime provider resolution), so TensorZero is not a drop-in config swap. |
| 10% traffic canary | gap | — | The repo shows semantic routing and fallback, but no grounded 10% traffic-split canary mechanism. A TensorZero cutover would need new percentage-based rollout controls and observability hooks. |
| Session data for prompt optimization | partial | session_db, trajectory_export | Hermes already has SessionDB and trajectory export surfaces that can feed offline optimization data, but not a TensorZero-native ingestion path yet. |
| Evaluation suite / A/B testing | partial | benchmark_suite, trajectory_export | Hermes already has benchmark/trajectory machinery that can seed TensorZero A/B evaluation, but no integrated TensorZero experiment runner or live evaluation gateway. |
## Grounded Hermes touchpoints
- `run_agent.py:601` — [fallback_chain] fallback_model: Dict[str, Any] = None,
- `run_agent.py:995` — [fallback_chain] # failure). Supports both legacy single-dict ``fallback_model`` and
- `run_agent.py:996` — [fallback_chain] # new list ``fallback_providers`` format.
- `run_agent.py:997` — [fallback_chain] if isinstance(fallback_model, list):
- `run_agent.py:998` — [fallback_chain] self._fallback_chain = [
- `run_agent.py:999` — [fallback_chain] f for f in fallback_model
- `run_agent.py:1002` — [fallback_chain] elif isinstance(fallback_model, dict) and fallback_model.get("provider") and fallback_model.get("model"):
- `run_agent.py:1003` — [fallback_chain] self._fallback_chain = [fallback_model]
- `run_agent.py:1005` — [fallback_chain] self._fallback_chain = []
- `run_agent.py:1009` — [fallback_chain] self._fallback_model = self._fallback_chain[0] if self._fallback_chain else None
- `run_agent.py:1010` — [fallback_chain] if self._fallback_chain and not self.quiet_mode:
- `run_agent.py:1011` — [fallback_chain] if len(self._fallback_chain) == 1:
- `run_agent.py:1012` — [fallback_chain] fb = self._fallback_chain[0]
- `run_agent.py:1015` — [fallback_chain] print(f"🔄 Fallback chain ({len(self._fallback_chain)} providers): " +
- `run_agent.py:1016` — [fallback_chain] " → ".join(f"{f['model']} ({f['provider']})" for f in self._fallback_chain))
- `run_agent.py:5624` — [fallback_chain] if self._fallback_index >= len(self._fallback_chain):
- `run_agent.py:5627` — [fallback_chain] fb = self._fallback_chain[self._fallback_index]
- `run_agent.py:8559` — [fallback_chain] if self._fallback_index < len(self._fallback_chain):
- `run_agent.py:9355` — [fallback_chain] if is_rate_limited and self._fallback_index < len(self._fallback_chain):
- `run_agent.py:10460` — [fallback_chain] if _truly_empty and self._fallback_chain:
- `run_agent.py:10514` — [fallback_chain] + (" and fallback attempts." if self._fallback_chain else
- `cli.py:241` — [provider_routing_config] "smart_model_routing": {
- `cli.py:370` — [provider_routing_config] # (e.g. platform_toolsets, provider_routing, memory, honcho, etc.)
- `cli.py:1753` — [provider_routing_config] pr = CLI_CONFIG.get("provider_routing", {}) or {}
- `cli.py:1762` — [provider_routing_config] # Supports new list format (fallback_providers) and legacy single-dict (fallback_model).
- `cli.py:1763` — [provider_routing_config] fb = CLI_CONFIG.get("fallback_providers") or CLI_CONFIG.get("fallback_model") or []
- `cli.py:1770` — [provider_routing_config] self._smart_model_routing = CLI_CONFIG.get("smart_model_routing", {}) or {}
- `cli.py:2771` — [provider_routing_config] from agent.smart_model_routing import resolve_turn_route
- `cli.py:2776` — [provider_routing_config] self._smart_model_routing,
- `hermes_cli/runtime_provider.py:209` — [runtime_provider] def resolve_requested_provider(requested: Optional[str] = None) -> str:
- `hermes_cli/runtime_provider.py:649` — [runtime_provider] def resolve_runtime_provider(
- `agent/smart_model_routing.py:62` — [smart_model_routing] def choose_cheap_model_route(user_message: str, routing_config: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
- `agent/smart_model_routing.py:110` — [smart_model_routing] def resolve_turn_route(user_message: str, routing_config: Optional[Dict[str, Any]], primary: Dict[str, Any]) -> Dict[str, Any]:
- `gateway/run.py:1271` — [gateway_provider_routing] def _load_provider_routing() -> dict:
- `gateway/run.py:1285` — [gateway_provider_routing] def _load_fallback_model() -> list | dict | None:
- `gateway/run.py:1306` — [gateway_provider_routing] def _load_smart_model_routing() -> dict:
- `cron/scheduler.py:684` — [cron_runtime_provider] pr = _cfg.get("provider_routing", {})
- `cron/scheduler.py:688` — [cron_runtime_provider] resolve_runtime_provider,
- `cron/scheduler.py:697` — [cron_runtime_provider] runtime = resolve_runtime_provider(**runtime_kwargs)
- `cron/scheduler.py:702` — [cron_runtime_provider] from agent.smart_model_routing import resolve_turn_route
- `cron/scheduler.py:703` — [cron_runtime_provider] turn_route = resolve_turn_route(
- `cron/scheduler.py:717` — [cron_runtime_provider] fallback_model = _cfg.get("fallback_providers") or _cfg.get("fallback_model") or None
- `cron/scheduler.py:746` — [cron_runtime_provider] fallback_model=fallback_model,
- `agent/auxiliary_client.py:1018` — [auxiliary_fallback_chain] def _get_provider_chain() -> List[tuple]:
- `agent/auxiliary_client.py:1107` — [auxiliary_fallback_chain] for label, try_fn in _get_provider_chain():
- `agent/auxiliary_client.py:1189` — [auxiliary_fallback_chain] # ── Step 2: aggregator / fallback chain ──────────────────────────────
- `agent/auxiliary_client.py:1191` — [auxiliary_fallback_chain] for label, try_fn in _get_provider_chain():
- `agent/auxiliary_client.py:2397` — [auxiliary_fallback_chain] # error, fall through to the fallback chain below.
- `agent/auxiliary_client.py:2417` — [auxiliary_fallback_chain] # auto (the default) = best-effort fallback chain. (#7559)
- `agent/auxiliary_client.py:2589` — [auxiliary_fallback_chain] # error, fall through to the fallback chain below.
- `tools/delegate_tool.py:662` — [delegate_runtime_provider] # bundle (base_url, api_key, api_mode) via the same runtime provider system
- `tools/delegate_tool.py:854` — [delegate_runtime_provider] provider) is resolved via the runtime provider system — the same path used
- `tools/delegate_tool.py:909` — [delegate_runtime_provider] from hermes_cli.runtime_provider import resolve_runtime_provider
- `tools/delegate_tool.py:910` — [delegate_runtime_provider] runtime = resolve_runtime_provider(requested=configured_provider)
- `hermes_state.py:115` — [session_db] class SessionDB:
- `batch_runner.py:320` — [trajectory_export] save_trajectories=False, # We handle saving ourselves
- `batch_runner.py:346` — [trajectory_export] trajectory = agent._convert_to_trajectory_format(
- `batch_runner.py:460` — [trajectory_export] trajectory_entry = {
- `batch_runner.py:474` — [trajectory_export] f.write(json.dumps(trajectory_entry, ensure_ascii=False) + "\n")
- `benchmarks/tool_call_benchmark.py:3` — [benchmark_suite] Tool-Calling Benchmark — Gemma 4 vs mimo-v2-pro regression test.
- `benchmarks/tool_call_benchmark.py:9` — [benchmark_suite] python3 benchmarks/tool_call_benchmark.py # full 100-call suite
- `benchmarks/tool_call_benchmark.py:10` — [benchmark_suite] python3 benchmarks/tool_call_benchmark.py --limit 10 # quick smoke test
- `benchmarks/tool_call_benchmark.py:11` — [benchmark_suite] python3 benchmarks/tool_call_benchmark.py --models nous # single model
- `benchmarks/tool_call_benchmark.py:12` — [benchmark_suite] python3 benchmarks/tool_call_benchmark.py --category file # single category
- `benchmarks/tool_call_benchmark.py:37` — [benchmark_suite] class ToolCall:
- `benchmarks/tool_call_benchmark.py:51` — [benchmark_suite] ToolCall("file-01", "file", "Read the file /tmp/test_bench.txt and show me its contents.",
- `benchmarks/tool_call_benchmark.py:53` — [benchmark_suite] ToolCall("file-02", "file", "Write 'hello benchmark' to /tmp/test_bench_out.txt",
- `benchmarks/tool_call_benchmark.py:55` — [benchmark_suite] ToolCall("file-03", "file", "Search for the word 'import' in all Python files in the current directory.",
- `benchmarks/tool_call_benchmark.py:57` — [benchmark_suite] ToolCall("file-04", "file", "Read lines 1-20 of /etc/hosts",
- `benchmarks/tool_call_benchmark.py:59` — [benchmark_suite] ToolCall("file-05", "file", "Patch /tmp/test_bench_out.txt: replace 'hello' with 'goodbye'",
- `benchmarks/tool_call_benchmark.py:61` — [benchmark_suite] ToolCall("file-06", "file", "Search for files matching *.py in the current directory.",
- `benchmarks/tool_call_benchmark.py:63` — [benchmark_suite] ToolCall("file-07", "file", "Read the first 10 lines of /etc/passwd",
- `benchmarks/tool_call_benchmark.py:65` — [benchmark_suite] ToolCall("file-08", "file", "Write a JSON config to /tmp/bench_config.json with key 'debug': true",
- `benchmarks/tool_call_benchmark.py:67` — [benchmark_suite] ToolCall("file-09", "file", "Search for 'def test_' in Python test files.",
- `benchmarks/tool_call_benchmark.py:69` — [benchmark_suite] ToolCall("file-10", "file", "Read /tmp/bench_config.json and tell me what's in it.",
- `benchmarks/tool_call_benchmark.py:71` — [benchmark_suite] ToolCall("file-11", "file", "Create a file /tmp/bench_readme.md with one line: '# Benchmark'",
- `benchmarks/tool_call_benchmark.py:73` — [benchmark_suite] ToolCall("file-12", "file", "Search for 'TODO' comments in all .py files.",
- `benchmarks/tool_call_benchmark.py:75` — [benchmark_suite] ToolCall("file-13", "file", "Read /tmp/bench_readme.md",
- `benchmarks/tool_call_benchmark.py:77` — [benchmark_suite] ToolCall("file-14", "file", "Patch /tmp/bench_readme.md: replace '# Benchmark' with '# Tool Benchmark'",
- `benchmarks/tool_call_benchmark.py:78` — [benchmark_suite] "patch", "Tool Benchmark"),
- `benchmarks/tool_call_benchmark.py:79` — [benchmark_suite] ToolCall("file-15", "file", "Write a Python one-liner to /tmp/bench_hello.py that prints hello.",
- `benchmarks/tool_call_benchmark.py:81` — [benchmark_suite] ToolCall("file-16", "file", "Search for all .json files in /tmp/.",
- `benchmarks/tool_call_benchmark.py:83` — [benchmark_suite] ToolCall("file-17", "file", "Read /tmp/bench_hello.py and verify it has print('hello').",
- `benchmarks/tool_call_benchmark.py:85` — [benchmark_suite] ToolCall("file-18", "file", "Patch /tmp/bench_hello.py to print 'hello world' instead of 'hello'.",
- `benchmarks/tool_call_benchmark.py:87` — [benchmark_suite] ToolCall("file-19", "file", "List files matching 'bench*' in /tmp/.",
- `benchmarks/tool_call_benchmark.py:89` — [benchmark_suite] ToolCall("file-20", "file", "Read /tmp/test_bench.txt again and summarize its contents.",
- `benchmarks/tool_call_benchmark.py:93` — [benchmark_suite] ToolCall("term-01", "terminal", "Run `echo hello world` in the terminal.",
- `benchmarks/tool_call_benchmark.py:95` — [benchmark_suite] ToolCall("term-02", "terminal", "Run `date` to get the current date and time.",
- `benchmarks/tool_call_benchmark.py:97` — [benchmark_suite] ToolCall("term-03", "terminal", "Run `uname -a` to get system information.",
- `benchmarks/tool_call_benchmark.py:99` — [benchmark_suite] ToolCall("term-04", "terminal", "Run `pwd` to show the current directory.",
- `benchmarks/tool_call_benchmark.py:101` — [benchmark_suite] ToolCall("term-05", "terminal", "Run `ls -la /tmp/ | head -20` to list temp files.",
- `benchmarks/tool_call_benchmark.py:103` — [benchmark_suite] ToolCall("term-06", "terminal", "Run `whoami` to show the current user.",
- `benchmarks/tool_call_benchmark.py:105` — [benchmark_suite] ToolCall("term-07", "terminal", "Run `df -h` to show disk usage.",
- `benchmarks/tool_call_benchmark.py:107` — [benchmark_suite] ToolCall("term-08", "terminal", "Run `python3 --version` to check Python version.",
- `benchmarks/tool_call_benchmark.py:109` — [benchmark_suite] ToolCall("term-09", "terminal", "Run `cat /etc/hostname` to get the hostname.",
- `benchmarks/tool_call_benchmark.py:111` — [benchmark_suite] ToolCall("term-10", "terminal", "Run `uptime` to see system uptime.",
- `benchmarks/tool_call_benchmark.py:113` — [benchmark_suite] ToolCall("term-11", "terminal", "Run `env | grep PATH` to show the PATH variable.",
- `benchmarks/tool_call_benchmark.py:115` — [benchmark_suite] ToolCall("term-12", "terminal", "Run `wc -l /etc/passwd` to count lines.",
- `benchmarks/tool_call_benchmark.py:117` — [benchmark_suite] ToolCall("term-13", "terminal", "Run `echo $SHELL` to show the current shell.",
- `benchmarks/tool_call_benchmark.py:119` — [benchmark_suite] ToolCall("term-14", "terminal", "Run `free -h || vm_stat` to check memory usage.",
- `benchmarks/tool_call_benchmark.py:121` — [benchmark_suite] ToolCall("term-15", "terminal", "Run `id` to show user and group IDs.",
- `benchmarks/tool_call_benchmark.py:123` — [benchmark_suite] ToolCall("term-16", "terminal", "Run `hostname` to get the machine hostname.",
- `benchmarks/tool_call_benchmark.py:125` — [benchmark_suite] ToolCall("term-17", "terminal", "Run `echo {1..5}` to test brace expansion.",
- `benchmarks/tool_call_benchmark.py:127` — [benchmark_suite] ToolCall("term-18", "terminal", "Run `seq 1 5` to generate a number sequence.",
- `benchmarks/tool_call_benchmark.py:129` — [benchmark_suite] ToolCall("term-19", "terminal", "Run `python3 -c 'print(2+2)'` to compute 2+2.",
- `benchmarks/tool_call_benchmark.py:131` — [benchmark_suite] ToolCall("term-20", "terminal", "Run `ls -d /tmp/bench* 2>/dev/null | wc -l` to count bench files.",
- `benchmarks/tool_call_benchmark.py:135` — [benchmark_suite] ToolCall("code-01", "code", "Execute a Python script that computes factorial of 10.",
- `benchmarks/tool_call_benchmark.py:137` — [benchmark_suite] ToolCall("code-02", "code", "Run Python to read /tmp/test_bench.txt and count its words.",
- `benchmarks/tool_call_benchmark.py:139` — [benchmark_suite] ToolCall("code-03", "code", "Execute Python to generate the first 20 Fibonacci numbers.",
- `benchmarks/tool_call_benchmark.py:141` — [benchmark_suite] ToolCall("code-04", "code", "Run Python to parse JSON from a string and print keys.",
- `benchmarks/tool_call_benchmark.py:143` — [benchmark_suite] ToolCall("code-05", "code", "Execute Python to list all files in /tmp/ matching 'bench*'.",
- `benchmarks/tool_call_benchmark.py:145` — [benchmark_suite] ToolCall("code-06", "code", "Run Python to compute the sum of squares from 1 to 100.",
- `benchmarks/tool_call_benchmark.py:147` — [benchmark_suite] ToolCall("code-07", "code", "Execute Python to check if 'racecar' is a palindrome.",
- `benchmarks/tool_call_benchmark.py:149` — [benchmark_suite] ToolCall("code-08", "code", "Run Python to create a CSV string with 5 rows of sample data.",
- `benchmarks/tool_call_benchmark.py:151` — [benchmark_suite] ToolCall("code-09", "code", "Execute Python to sort a list [5,2,8,1,9] and print the result.",
- `benchmarks/tool_call_benchmark.py:153` — [benchmark_suite] ToolCall("code-10", "code", "Run Python to count lines in /etc/passwd.",
- `benchmarks/tool_call_benchmark.py:155` — [benchmark_suite] ToolCall("code-11", "code", "Execute Python to hash the string 'benchmark' with SHA256.",
- `benchmarks/tool_call_benchmark.py:157` — [benchmark_suite] ToolCall("code-12", "code", "Run Python to get the current UTC timestamp.",
- `benchmarks/tool_call_benchmark.py:159` — [benchmark_suite] ToolCall("code-13", "code", "Execute Python to convert 'hello world' to uppercase and reverse it.",
- `benchmarks/tool_call_benchmark.py:161` — [benchmark_suite] ToolCall("code-14", "code", "Run Python to create a dictionary of system info (platform, python version).",
- `benchmarks/tool_call_benchmark.py:163` — [benchmark_suite] ToolCall("code-15", "code", "Execute Python to check internet connectivity by resolving google.com.",
- `benchmarks/tool_call_benchmark.py:167` — [benchmark_suite] ToolCall("deleg-01", "delegate", "Use a subagent to find all .log files in /tmp/.",
- `benchmarks/tool_call_benchmark.py:169` — [benchmark_suite] ToolCall("deleg-02", "delegate", "Delegate to a subagent: what is 15 * 37?",
- `benchmarks/tool_call_benchmark.py:171` — [benchmark_suite] ToolCall("deleg-03", "delegate", "Use a subagent to check if Python 3 is installed and its version.",
- `benchmarks/tool_call_benchmark.py:173` — [benchmark_suite] ToolCall("deleg-04", "delegate", "Delegate: read /tmp/test_bench.txt and summarize it in one sentence.",
- `benchmarks/tool_call_benchmark.py:175` — [benchmark_suite] ToolCall("deleg-05", "delegate", "Use a subagent to list the contents of /tmp/ directory.",
- `benchmarks/tool_call_benchmark.py:177` — [benchmark_suite] ToolCall("deleg-06", "delegate", "Delegate: count the number of .py files in the current directory.",
- `benchmarks/tool_call_benchmark.py:179` — [benchmark_suite] ToolCall("deleg-07", "delegate", "Use a subagent to check disk space with df -h.",
- `benchmarks/tool_call_benchmark.py:181` — [benchmark_suite] ToolCall("deleg-08", "delegate", "Delegate: what OS are we running on?",
- `benchmarks/tool_call_benchmark.py:183` — [benchmark_suite] ToolCall("deleg-09", "delegate", "Use a subagent to find the hostname of this machine.",
- `benchmarks/tool_call_benchmark.py:185` — [benchmark_suite] ToolCall("deleg-10", "delegate", "Delegate: create a temp file /tmp/bench_deleg.txt with 'done'.",
- `benchmarks/tool_call_benchmark.py:189` — [benchmark_suite] ToolCall("todo-01", "todo", "Add a todo item: 'Run benchmark suite'",
- `benchmarks/tool_call_benchmark.py:190` — [benchmark_suite] "todo", "benchmark"),
- `benchmarks/tool_call_benchmark.py:191` — [benchmark_suite] ToolCall("todo-02", "todo", "Show me the current todo list.",
- `benchmarks/tool_call_benchmark.py:193` — [benchmark_suite] ToolCall("todo-03", "todo", "Mark the first todo item as completed.",
- `benchmarks/tool_call_benchmark.py:195` — [benchmark_suite] ToolCall("todo-04", "todo", "Add a todo: 'Review benchmark results' with status pending.",
- `benchmarks/tool_call_benchmark.py:197` — [benchmark_suite] ToolCall("todo-05", "todo", "Clear all completed todos.",
- `benchmarks/tool_call_benchmark.py:199` — [benchmark_suite] ToolCall("todo-06", "memory", "Save this to memory: 'benchmark ran on {date}'".format(
- `benchmarks/tool_call_benchmark.py:201` — [benchmark_suite] "memory", "benchmark"),
- `benchmarks/tool_call_benchmark.py:202` — [benchmark_suite] ToolCall("todo-07", "memory", "Search memory for 'benchmark'.",
- `benchmarks/tool_call_benchmark.py:203` — [benchmark_suite] "memory", "benchmark"),
- `benchmarks/tool_call_benchmark.py:204` — [benchmark_suite] ToolCall("todo-08", "memory", "Add a memory note: 'test models are gemma-4 and mimo-v2-pro'.",
- `benchmarks/tool_call_benchmark.py:206` — [benchmark_suite] ToolCall("todo-09", "todo", "Add three todo items: 'analyze', 'report', 'cleanup'.",
- `benchmarks/tool_call_benchmark.py:208` — [benchmark_suite] ToolCall("todo-10", "memory", "Search memory for any notes about models.",
- `benchmarks/tool_call_benchmark.py:212` — [benchmark_suite] ToolCall("skill-01", "skills", "List all available skills.",
- `benchmarks/tool_call_benchmark.py:214` — [benchmark_suite] ToolCall("skill-02", "skills", "View the skill called 'test-driven-development'.",
- `benchmarks/tool_call_benchmark.py:216` — [benchmark_suite] ToolCall("skill-03", "skills", "Search for skills related to 'git'.",
- `benchmarks/tool_call_benchmark.py:218` — [benchmark_suite] ToolCall("skill-04", "skills", "View the 'code-review' skill.",
- `benchmarks/tool_call_benchmark.py:220` — [benchmark_suite] ToolCall("skill-05", "skills", "List all skills in the 'devops' category.",
- `benchmarks/tool_call_benchmark.py:222` — [benchmark_suite] ToolCall("skill-06", "skills", "View the 'systematic-debugging' skill.",
- `benchmarks/tool_call_benchmark.py:224` — [benchmark_suite] ToolCall("skill-07", "skills", "Search for skills about 'testing'.",
- `benchmarks/tool_call_benchmark.py:226` — [benchmark_suite] ToolCall("skill-08", "skills", "View the 'writing-plans' skill.",
- `benchmarks/tool_call_benchmark.py:228` — [benchmark_suite] ToolCall("skill-09", "skills", "List skills in 'software-development' category.",
- `benchmarks/tool_call_benchmark.py:230` — [benchmark_suite] ToolCall("skill-10", "skills", "View the 'pr-review-discipline' skill.",
- `benchmarks/tool_call_benchmark.py:234` — [benchmark_suite] ToolCall("file-21", "file", "Write a Python snippet to /tmp/bench_sort.py that sorts [3,1,2].",
- `benchmarks/tool_call_benchmark.py:236` — [benchmark_suite] ToolCall("file-22", "file", "Read /tmp/bench_sort.py back and confirm it exists.",
- `benchmarks/tool_call_benchmark.py:238` — [benchmark_suite] ToolCall("file-23", "file", "Search for 'class' in all .py files in the benchmarks directory.",
- `benchmarks/tool_call_benchmark.py:240` — [benchmark_suite] ToolCall("term-21", "terminal", "Run `cat /etc/os-release 2>/dev/null || sw_vers 2>/dev/null` for OS info.",
- `benchmarks/tool_call_benchmark.py:242` — [benchmark_suite] ToolCall("term-22", "terminal", "Run `nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null` for CPU count.",
- `benchmarks/tool_call_benchmark.py:244` — [benchmark_suite] ToolCall("code-16", "code", "Execute Python to flatten a nested list [[1,2],[3,4],[5]].",
- `benchmarks/tool_call_benchmark.py:246` — [benchmark_suite] ToolCall("code-17", "code", "Run Python to check if a number 17 is prime.",
- `benchmarks/tool_call_benchmark.py:248` — [benchmark_suite] ToolCall("deleg-11", "delegate", "Delegate: what is the current working directory?",
- `benchmarks/tool_call_benchmark.py:250` — [benchmark_suite] ToolCall("todo-11", "todo", "Add a todo: 'Finalize benchmark report' status pending.",
- `benchmarks/tool_call_benchmark.py:252` — [benchmark_suite] ToolCall("todo-12", "memory", "Store fact: 'benchmark categories: file, terminal, code, delegate, todo, memory, skills'.",
- `benchmarks/tool_call_benchmark.py:254` — [benchmark_suite] ToolCall("skill-11", "skills", "Search for skills about 'deployment'.",
- `benchmarks/tool_call_benchmark.py:256` — [benchmark_suite] ToolCall("skill-12", "skills", "View the 'gitea-burn-cycle' skill.",
- `benchmarks/tool_call_benchmark.py:258` — [benchmark_suite] ToolCall("skill-13", "skills", "List all available skill categories.",
- `benchmarks/tool_call_benchmark.py:260` — [benchmark_suite] ToolCall("skill-14", "skills", "Search for skills related to 'memory'.",
- `benchmarks/tool_call_benchmark.py:262` — [benchmark_suite] ToolCall("skill-15", "skills", "View the 'mimo-swarm' skill.",
- `benchmarks/tool_call_benchmark.py:311` — [benchmark_suite] """Create prerequisite files for the benchmark."""
- `benchmarks/tool_call_benchmark.py:313` — [benchmark_suite] "This is a benchmark test file.\n"
- `benchmarks/tool_call_benchmark.py:349` — [benchmark_suite] "You are a benchmark test runner. Execute the user's request by calling "
- `benchmarks/tool_call_benchmark.py:406` — [benchmark_suite] """Generate markdown benchmark report."""
- `benchmarks/tool_call_benchmark.py:428` — [benchmark_suite] f"# Tool-Calling Benchmark Report",
- `benchmarks/tool_call_benchmark.py:535` — [benchmark_suite] parser = argparse.ArgumentParser(description="Tool-calling benchmark")
- `benchmarks/tool_call_benchmark.py:544` — [benchmark_suite] help="Output report path (default: benchmarks/gemma4-tool-calling-YYYY-MM-DD.md)")
- `benchmarks/tool_call_benchmark.py:565` — [benchmark_suite] output_path = Path(args.output) if args.output else REPO_ROOT / "benchmarks" / f"gemma4-tool-calling-{date_str}.md"
- `benchmarks/tool_call_benchmark.py:575` — [benchmark_suite] print(f"Benchmark: {len(suite)} tests × {len(model_specs)} models = {len(suite) * len(model_specs)} calls")
## Suggested next slice
1. Build an exporter that emits SessionDB + trajectory data into a TensorZero-friendly offline dataset.
2. Define percentage-based canary controls before attempting any gateway replacement.
3. Keep Hermes routing authoritative until TensorZero proves parity across CLI, gateway, cron, auxiliary, and delegation surfaces.

View File

@@ -0,0 +1,318 @@
#!/usr/bin/env python3
"""Generate a grounded TensorZero evaluation packet for Hermes.
This script inventories the current Hermes routing/evaluation surfaces, then
builds a markdown packet assessing how much of issue #860 can be satisfied by
TensorZero and where the migration risk still lives.
"""
from __future__ import annotations
import argparse
import json
import re
from dataclasses import asdict, dataclass
from pathlib import Path
from typing import Iterable
ISSUE_NUMBER = 860
ISSUE_TITLE = "tensorzero LLMOps platform evaluation"
ISSUE_URL = "https://forge.alexanderwhitestone.com/Timmy_Foundation/hermes-agent/issues/860"
DEFAULT_OUTPUT = Path("docs/evaluations/tensorzero-860-evaluation.md")
DEFAULT_JSON_OUTPUT = Path("docs/evaluations/tensorzero-860-evaluation.json")
@dataclass(frozen=True)
class TouchpointPattern:
label: str
file_path: str
regex: str
description: str
@dataclass(frozen=True)
class Touchpoint:
label: str
file_path: str
line_number: int
matched_text: str
@dataclass(frozen=True)
class RequirementStatus:
key: str
name: str
status: str
evidence_labels: tuple[str, ...]
summary: str
@dataclass(frozen=True)
class EvaluationReport:
issue_number: int
issue_title: str
issue_url: str
recommendation: str
touchpoints: tuple[Touchpoint, ...]
requirements: tuple[RequirementStatus, ...]
PATTERNS: tuple[TouchpointPattern, ...] = (
TouchpointPattern(
label="fallback_chain",
file_path="run_agent.py",
regex=r"_fallback_chain|fallback_providers|fallback_model",
description="Primary agent fallback-provider chain in the core conversation loop.",
),
TouchpointPattern(
label="provider_routing_config",
file_path="cli.py",
regex=r"provider_routing|fallback_providers|smart_model_routing",
description="CLI-owned provider routing and fallback configuration surfaces.",
),
TouchpointPattern(
label="runtime_provider",
file_path="hermes_cli/runtime_provider.py",
regex=r"def resolve_runtime_provider|def resolve_requested_provider",
description="Central runtime provider resolution for CLI, gateway, cron, and helpers.",
),
TouchpointPattern(
label="smart_model_routing",
file_path="agent/smart_model_routing.py",
regex=r"def resolve_turn_route|def choose_cheap_model_route",
description="Cheap-vs-strong turn routing that TensorZero would need to absorb or replace.",
),
TouchpointPattern(
label="gateway_provider_routing",
file_path="gateway/run.py",
regex=r"def _load_provider_routing|def _load_fallback_model|def _load_smart_model_routing",
description="Gateway-specific loading of routing, fallback, and smart-model policies.",
),
TouchpointPattern(
label="cron_runtime_provider",
file_path="cron/scheduler.py",
regex=r"resolve_runtime_provider|resolve_turn_route|provider_routing|fallback_model",
description="Cron execution path that re-resolves providers and routing on every run.",
),
TouchpointPattern(
label="auxiliary_fallback_chain",
file_path="agent/auxiliary_client.py",
regex=r"fallback chain|_get_provider_chain|provider chain",
description="Auxiliary task routing/fallback chain outside the main inference path.",
),
TouchpointPattern(
label="delegate_runtime_provider",
file_path="tools/delegate_tool.py",
regex=r"runtime provider system|resolve the full credential bundle|resolve_runtime_provider",
description="Subagent/delegation routing path that would also need TensorZero parity.",
),
TouchpointPattern(
label="session_db",
file_path="hermes_state.py",
regex=r"class SessionDB",
description="Session persistence surface that could feed TensorZero optimization/eval data.",
),
TouchpointPattern(
label="trajectory_export",
file_path="batch_runner.py",
regex=r"trajectory_entry|save_trajectories|_convert_to_trajectory_format",
description="Trajectory export surface for offline optimization and replay data.",
),
TouchpointPattern(
label="benchmark_suite",
file_path="benchmarks/tool_call_benchmark.py",
regex=r"ToolCall\(|class ToolCall|benchmark",
description="Existing benchmark/evaluation harness that could map to TensorZero experiments.",
),
)
def _iter_matches(pattern: TouchpointPattern, text: str) -> Iterable[Touchpoint]:
regex = re.compile(pattern.regex, re.IGNORECASE)
for line_number, line in enumerate(text.splitlines(), start=1):
if regex.search(line):
yield Touchpoint(
label=pattern.label,
file_path=pattern.file_path,
line_number=line_number,
matched_text=line.strip(),
)
def scan_touchpoints(repo_root: Path) -> list[Touchpoint]:
touchpoints: list[Touchpoint] = []
for pattern in PATTERNS:
path = repo_root / pattern.file_path
if not path.exists():
continue
text = path.read_text(encoding="utf-8")
touchpoints.extend(_iter_matches(pattern, text))
return touchpoints
def build_requirement_matrix(touchpoints: list[Touchpoint]) -> list[RequirementStatus]:
labels = {tp.label for tp in touchpoints}
matrix: list[RequirementStatus] = []
gateway_labels = (
"fallback_chain",
"runtime_provider",
"gateway_provider_routing",
"cron_runtime_provider",
"auxiliary_fallback_chain",
"delegate_runtime_provider",
)
gateway_hits = tuple(label for label in gateway_labels if label in labels)
gateway_status = "partial" if len(gateway_hits) >= 4 else "gap"
gateway_summary = (
"Hermes already spreads provider routing across core agent, runtime provider, gateway, cron, auxiliary, and delegation seams; "
"TensorZero would need parity across all of them before it can replace the gateway layer."
if gateway_hits else
"No grounded routing surfaces were found for a gateway replacement assessment."
)
matrix.append(RequirementStatus("gateway_replacement", "Gateway replacement scope", gateway_status, gateway_hits, gateway_summary))
config_labels = (
"provider_routing_config",
"runtime_provider",
"smart_model_routing",
"fallback_chain",
)
config_hits = tuple(label for label in config_labels if label in labels)
config_status = "partial" if len(config_hits) >= 3 else "gap"
config_summary = (
"Hermes has multiple config concepts to migrate (`provider_routing`, `fallback_providers`, `smart_model_routing`, runtime provider resolution), "
"so TensorZero is not a drop-in config swap."
if config_hits else
"No current config migration surface was found."
)
matrix.append(RequirementStatus("config_migration", "Config migration", config_status, config_hits, config_summary))
canary_hits: tuple[str, ...] = tuple()
canary_summary = (
"The repo shows semantic routing and fallback, but no grounded 10% traffic-split canary mechanism. "
"A TensorZero cutover would need new percentage-based rollout controls and observability hooks."
)
matrix.append(RequirementStatus("canary_rollout", "10% traffic canary", "gap", canary_hits, canary_summary))
session_labels = ("session_db", "trajectory_export")
session_hits = tuple(label for label in session_labels if label in labels)
session_status = "partial" if len(session_hits) == len(session_labels) else "gap"
session_summary = (
"Hermes already has SessionDB and trajectory export surfaces that can feed offline optimization data, "
"but not a TensorZero-native ingestion path yet."
if session_hits else
"No session-data surface was found for prompt optimization."
)
matrix.append(RequirementStatus("session_feedback", "Session data for prompt optimization", session_status, session_hits, session_summary))
eval_labels = ("benchmark_suite", "trajectory_export")
eval_hits = tuple(label for label in eval_labels if label in labels)
eval_status = "partial" if "benchmark_suite" in eval_hits else "gap"
eval_summary = (
"Hermes already has benchmark/trajectory machinery that can seed TensorZero A/B evaluation, "
"but no integrated TensorZero experiment runner or live evaluation gateway."
if eval_hits else
"No evaluation harness was found to support TensorZero A/B testing."
)
matrix.append(RequirementStatus("evaluation_suite", "Evaluation suite / A/B testing", eval_status, eval_hits, eval_summary))
return matrix
def build_report(touchpoints: list[Touchpoint], requirement_matrix: list[RequirementStatus]) -> EvaluationReport:
recommendation = (
"Not ready for direct replacement. Recommend a shadow-evaluation phase first: keep Hermes routing live, "
"inventory the migration seams, export SessionDB/trajectory data into an offline TensorZero experiment loop, "
"and only design a canary gateway once percentage-based rollout controls exist."
)
return EvaluationReport(
issue_number=ISSUE_NUMBER,
issue_title=ISSUE_TITLE,
issue_url=ISSUE_URL,
recommendation=recommendation,
touchpoints=tuple(touchpoints),
requirements=tuple(requirement_matrix),
)
def build_markdown(report: EvaluationReport) -> str:
lines: list[str] = []
lines.append("# TensorZero Evaluation Packet")
lines.append("")
lines.append(f"Issue #{report.issue_number}: [{report.issue_title}]({report.issue_url})")
lines.append("")
lines.append("## Scope")
lines.append("")
lines.append("This packet evaluates TensorZero as a possible replacement for Hermes' custom provider-routing stack.")
lines.append("It is intentionally grounded in the current repo state rather than a speculative cutover plan.")
lines.append("")
lines.append("## Issue requirements being evaluated")
lines.append("")
lines.append("- Deploy tensorzero gateway (Rust binary)")
lines.append("- Migrate provider routing config")
lines.append("- Test with canary (10% traffic) before full cutover")
lines.append("- Feed session data for prompt optimization")
lines.append("- Evaluation suite for A/B testing models")
lines.append("")
lines.append("## Recommendation")
lines.append("")
lines.append(report.recommendation)
lines.append("")
lines.append("## Requirement matrix")
lines.append("")
lines.append("| Requirement | Status | Evidence labels | Summary |")
lines.append("| --- | --- | --- | --- |")
for row in report.requirements:
evidence = ", ".join(row.evidence_labels) if row.evidence_labels else ""
lines.append(f"| {row.name} | {row.status} | {evidence} | {row.summary} |")
lines.append("")
lines.append("## Grounded Hermes touchpoints")
lines.append("")
if report.touchpoints:
for tp in report.touchpoints:
lines.append(f"- `{tp.file_path}:{tp.line_number}` — [{tp.label}] {tp.matched_text}")
else:
lines.append("- No routing/evaluation touchpoints were found.")
lines.append("")
lines.append("## Suggested next slice")
lines.append("")
lines.append("1. Build an exporter that emits SessionDB + trajectory data into a TensorZero-friendly offline dataset.")
lines.append("2. Define percentage-based canary controls before attempting any gateway replacement.")
lines.append("3. Keep Hermes routing authoritative until TensorZero proves parity across CLI, gateway, cron, auxiliary, and delegation surfaces.")
lines.append("")
return "\n".join(lines).rstrip() + "\n"
def write_outputs(report: EvaluationReport, markdown_path: Path, json_path: Path | None = None) -> None:
markdown_path.parent.mkdir(parents=True, exist_ok=True)
markdown_path.write_text(build_markdown(report), encoding="utf-8")
if json_path is not None:
json_path.parent.mkdir(parents=True, exist_ok=True)
json_path.write_text(json.dumps(asdict(report), indent=2), encoding="utf-8")
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Generate a grounded TensorZero evaluation packet for Hermes")
parser.add_argument("--repo-root", default=".", help="Hermes repo root to scan")
parser.add_argument("--output", default=str(DEFAULT_OUTPUT), help="Markdown output path")
parser.add_argument("--json-output", default=str(DEFAULT_JSON_OUTPUT), help="Optional JSON output path")
return parser.parse_args()
def main() -> int:
args = parse_args()
repo_root = Path(args.repo_root).resolve()
touchpoints = scan_touchpoints(repo_root)
matrix = build_requirement_matrix(touchpoints)
report = build_report(touchpoints, matrix)
json_output = Path(args.json_output) if args.json_output else None
write_outputs(report, Path(args.output), json_output)
print(f"Wrote {args.output}")
if json_output is not None:
print(f"Wrote {json_output}")
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,149 @@
from pathlib import Path
import sys
SCRIPT_DIR = Path(__file__).resolve().parents[1] / "scripts"
sys.path.insert(0, str(SCRIPT_DIR))
import tensorzero_eval_packet as tz
def test_scan_touchpoints_finds_expected_matches(tmp_path):
(tmp_path / "run_agent.py").write_text(
"self._fallback_chain = []\n# Provider fallback chain\n"
)
(tmp_path / "hermes_cli").mkdir()
(tmp_path / "hermes_cli" / "runtime_provider.py").write_text(
"def resolve_runtime_provider():\n return {}\n"
)
(tmp_path / "agent").mkdir()
(tmp_path / "agent" / "smart_model_routing.py").write_text(
"def resolve_turn_route(user_message, routing_config, primary):\n return primary\n"
)
(tmp_path / "gateway").mkdir()
(tmp_path / "gateway" / "run.py").write_text(
"def _load_provider_routing():\n return {}\n"
)
(tmp_path / "cron").mkdir()
(tmp_path / "cron" / "scheduler.py").write_text(
"runtime = resolve_runtime_provider()\nturn_route = resolve_turn_route('x', {}, {})\n"
)
(tmp_path / "hermes_state.py").write_text("class SessionDB:\n pass\n")
(tmp_path / "benchmarks").mkdir()
(tmp_path / "benchmarks" / "tool_call_benchmark.py").write_text(
"class ToolCall: ...\n"
)
touchpoints = tz.scan_touchpoints(tmp_path)
labels = {tp.label for tp in touchpoints}
assert "fallback_chain" in labels
assert "runtime_provider" in labels
assert "smart_model_routing" in labels
assert "gateway_provider_routing" in labels
assert "cron_runtime_provider" in labels
assert "session_db" in labels
assert "benchmark_suite" in labels
def test_build_requirement_matrix_marks_canary_as_gap_without_split_support():
touchpoints = [
tz.Touchpoint(
label="runtime_provider",
file_path="hermes_cli/runtime_provider.py",
line_number=10,
matched_text="def resolve_runtime_provider",
),
tz.Touchpoint(
label="provider_routing_config",
file_path="cli.py",
line_number=20,
matched_text='provider_routing',
),
tz.Touchpoint(
label="fallback_chain",
file_path="run_agent.py",
line_number=21,
matched_text='_fallback_chain = []',
),
tz.Touchpoint(
label="smart_model_routing",
file_path="agent/smart_model_routing.py",
line_number=30,
matched_text='resolve_turn_route',
),
tz.Touchpoint(
label="gateway_provider_routing",
file_path="gateway/run.py",
line_number=35,
matched_text='def _load_provider_routing',
),
tz.Touchpoint(
label="cron_runtime_provider",
file_path="cron/scheduler.py",
line_number=36,
matched_text='runtime = resolve_runtime_provider()',
),
tz.Touchpoint(
label="session_db",
file_path="hermes_state.py",
line_number=40,
matched_text='class SessionDB',
),
tz.Touchpoint(
label="trajectory_export",
file_path="batch_runner.py",
line_number=50,
matched_text='trajectory_entry',
),
tz.Touchpoint(
label="benchmark_suite",
file_path="benchmarks/tool_call_benchmark.py",
line_number=60,
matched_text='ToolCall',
),
]
matrix = tz.build_requirement_matrix(touchpoints)
by_key = {row.key: row for row in matrix}
assert by_key["gateway_replacement"].status == "partial"
assert by_key["config_migration"].status == "partial"
assert by_key["canary_rollout"].status == "gap"
assert by_key["session_feedback"].status == "partial"
assert by_key["evaluation_suite"].status == "partial"
def test_build_markdown_renders_recommendation_and_touchpoints():
touchpoints = [
tz.Touchpoint(
label="runtime_provider",
file_path="hermes_cli/runtime_provider.py",
line_number=10,
matched_text="def resolve_runtime_provider",
),
tz.Touchpoint(
label="session_db",
file_path="hermes_state.py",
line_number=40,
matched_text='class SessionDB',
),
]
matrix = tz.build_requirement_matrix(touchpoints)
report = tz.build_report(touchpoints, matrix)
markdown = tz.build_markdown(report)
assert "# TensorZero Evaluation Packet" in markdown
assert "gateway_replacement" not in markdown # human labels, not raw keys
assert "Gateway replacement scope" in markdown
assert "Not ready for direct replacement" in markdown
assert "hermes_cli/runtime_provider.py:10" in markdown
assert "hermes_state.py:40" in markdown
def test_issue_context_is_embedded_in_report():
report = tz.build_report([], [])
markdown = tz.build_markdown(report)
assert "Issue #860" in markdown
assert "tensorzero" in markdown.lower()
assert "10% traffic" in markdown

View File

@@ -1,115 +0,0 @@
"""Tests for Issue #796 tool-calling benchmark coverage and reporting."""
import sys
from pathlib import Path
from types import SimpleNamespace
from unittest.mock import patch
sys.path.insert(0, str(Path(__file__).parent.parent / "benchmarks"))
from tool_call_benchmark import ( # noqa: E402
CallResult,
DEFAULT_COMPARE_MODELS,
ISSUE_796_CATEGORY_COUNTS,
ToolCall,
generate_report,
run_single_test,
suite_category_counts,
)
def test_suite_counts_match_issue_796_distribution():
counts = suite_category_counts()
assert counts == ISSUE_796_CATEGORY_COUNTS
assert sum(counts.values()) == 100
def test_default_compare_models_cover_issue_796_lanes():
assert len(DEFAULT_COMPARE_MODELS) == 3
assert any("gemma-4-31b" in spec for spec in DEFAULT_COMPARE_MODELS)
assert any("gemma-4-26b" in spec for spec in DEFAULT_COMPARE_MODELS)
assert any("mimo-v2-pro" in spec for spec in DEFAULT_COMPARE_MODELS)
def test_generate_report_includes_parallel_and_cost_metrics(tmp_path):
output_path = tmp_path / "report.md"
results = [
CallResult(
test_id="file-01",
category="file",
model="gemma-4-31b",
prompt="Read the file.",
expected_tool="read_file",
success=True,
tool_called="read_file",
schema_ok=True,
tool_args_valid=True,
execution_ok=True,
tool_count=2,
parallel_ok=True,
latency_s=1.25,
total_tokens=123,
estimated_cost_usd=0.0012,
cost_status="estimated",
),
CallResult(
test_id="web-01",
category="web",
model="mimo-v2-pro",
prompt="Search the web.",
expected_tool="web_search",
success=False,
tool_called="web_search",
schema_ok=True,
tool_args_valid=False,
execution_ok=False,
tool_count=1,
parallel_ok=False,
latency_s=2.5,
error="bad args",
total_tokens=456,
estimated_cost_usd=None,
cost_status="unknown",
skipped=True,
skip_reason="web_search unavailable",
),
]
report = generate_report(results, ["gemma-4-31b", "mimo-v2-pro"], output_path)
assert output_path.exists()
assert "Parallel tool success" in report
assert "Avg token cost per call (USD)" in report
assert "Skipped / unavailable" in report
assert "Requested category mix" in report
def test_run_single_test_skips_when_expected_tool_unavailable():
class FakeAgent:
def __init__(self, *args, **kwargs):
self.valid_tool_names = {"read_file", "terminal"}
self.session_input_tokens = 0
self.session_output_tokens = 0
self.session_cache_read_tokens = 0
self.session_cache_write_tokens = 0
self.session_api_calls = 0
self.base_url = ""
self.api_key = None
def run_conversation(self, *args, **kwargs):
raise AssertionError("run_conversation should not be called for unavailable tools")
tc = ToolCall(
id="mcp-01",
category="mcp",
prompt="Use an MCP tool to list resources.",
expected_tool="",
expected_tool_prefix="mcp_",
)
with patch.dict(sys.modules, {"run_agent": SimpleNamespace(AIAgent=FakeAgent)}):
result = run_single_test(tc, "gemini:gemma-4-31b-it", "gemini")
assert result.skipped is True
assert "mcp_" in result.skip_reason
assert result.success is False