Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
411aea9edf | ||
|
|
877005b06e |
@@ -1,4 +1,4 @@
|
||||
"""Shared auxiliary client router for side tasks.
|
||||
from agent.telemetry_logger import log_token_usage\n"""Shared auxiliary client router for side tasks.
|
||||
|
||||
Provides a single resolution chain so every consumer (context compression,
|
||||
session search, web extraction, vision analysis, browser vision) picks up
|
||||
@@ -38,7 +38,6 @@ import json
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
from agent.telemetry_logger import log_token_usage
|
||||
import time
|
||||
from pathlib import Path # noqa: F401 — used by test mocks
|
||||
from types import SimpleNamespace
|
||||
@@ -123,16 +122,6 @@ _OR_HEADERS = {
|
||||
"X-OpenRouter-Categories": "productivity,cli-agent",
|
||||
}
|
||||
|
||||
# Vercel AI Gateway app attribution headers. HTTP-Referer maps to
|
||||
# referrerUrl and X-Title maps to appName in the gateway analytics.
|
||||
from hermes_cli import __version__ as _HERMES_VERSION
|
||||
|
||||
_AI_GATEWAY_HEADERS = {
|
||||
"HTTP-Referer": "https://hermes-agent.nousresearch.com",
|
||||
"X-Title": "Hermes Agent",
|
||||
"User-Agent": f"HermesAgent/{_HERMES_VERSION}",
|
||||
}
|
||||
|
||||
# Nous Portal extra_body for product attribution.
|
||||
# Callers should pass this as extra_body in chat.completions.create()
|
||||
# when the auxiliary client is backed by Nous Portal.
|
||||
@@ -407,8 +396,7 @@ class _CodexCompletionsAdapter:
|
||||
prompt_tokens=getattr(resp_usage, "input_tokens", 0),
|
||||
completion_tokens=getattr(resp_usage, "output_tokens", 0),
|
||||
total_tokens=getattr(resp_usage, "total_tokens", 0),
|
||||
)
|
||||
log_token_usage(usage.prompt_tokens, usage.completion_tokens, model)
|
||||
)\n log_token_usage(usage.prompt_tokens, usage.completion_tokens, model)
|
||||
except Exception as exc:
|
||||
logger.debug("Codex auxiliary Responses API call failed: %s", exc)
|
||||
raise
|
||||
@@ -541,8 +529,7 @@ class _AnthropicCompletionsAdapter:
|
||||
prompt_tokens=prompt_tokens,
|
||||
completion_tokens=completion_tokens,
|
||||
total_tokens=total_tokens,
|
||||
)
|
||||
log_token_usage(usage.prompt_tokens, usage.completion_tokens, model)
|
||||
)\n log_token_usage(usage.prompt_tokens, usage.completion_tokens, model)
|
||||
|
||||
choice = SimpleNamespace(
|
||||
index=0,
|
||||
|
||||
139
benchmarks/gemma4-tool-calling-2026-04-22.md
Normal file
139
benchmarks/gemma4-tool-calling-2026-04-22.md
Normal file
@@ -0,0 +1,139 @@
|
||||
# Tool-Calling Benchmark Report
|
||||
|
||||
Generated: 2026-04-22 15:46 UTC
|
||||
Executed: 3 calls from a 100-call suite across 7 categories
|
||||
Models tested: nous:gia-3/gemma-4-31b, gemini:gemma-4-26b-it, nous:mimo-v2-pro
|
||||
|
||||
## Requested category mix
|
||||
|
||||
| Category | Target calls |
|
||||
|----------|--------------|
|
||||
| file | 20 |
|
||||
| terminal | 20 |
|
||||
| web | 15 |
|
||||
| code | 15 |
|
||||
| browser | 10 |
|
||||
| delegate | 10 |
|
||||
| mcp | 10 |
|
||||
|
||||
## Summary
|
||||
|
||||
| Metric | nous:gia-3/gemma-4-31b | gemini:gemma-4-26b-it | nous:mimo-v2-pro |
|
||||
|--------|---------|---------|---------|
|
||||
| Schema parse success | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Tool execution success | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Parallel tool success | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Avg latency (s) | 0.00 | 0.00 | 0.00 |
|
||||
| Avg tokens per call | 0.0 | 0.0 | 0.0 |
|
||||
| Avg token cost per call (USD) | n/a | n/a | n/a |
|
||||
| Skipped / unavailable | 0/1 | 0/1 | 0/1 |
|
||||
|
||||
## Per-category breakdown
|
||||
|
||||
### File
|
||||
|
||||
| Metric | nous:gia-3/gemma-4-31b | gemini:gemma-4-26b-it | nous:mimo-v2-pro |
|
||||
|--------|---------|---------|---------|
|
||||
| Schema OK | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Exec OK | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Parallel OK | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Correct tool | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Avg tokens | 0.0 | 0.0 | 0.0 |
|
||||
| Skipped | 0/1 | 0/1 | 0/1 |
|
||||
|
||||
## Failure analysis
|
||||
|
||||
### nous:gia-3/gemma-4-31b — 1 failures
|
||||
|
||||
| Test | Category | Expected | Got | Error |
|
||||
|------|----------|----------|-----|-------|
|
||||
| file-01 | file | read_file | none | SyntaxError: unexpected character after line continuation ch |
|
||||
|
||||
### gemini:gemma-4-26b-it — 1 failures
|
||||
|
||||
| Test | Category | Expected | Got | Error |
|
||||
|------|----------|----------|-----|-------|
|
||||
| file-01 | file | read_file | none | SyntaxError: unexpected character after line continuation ch |
|
||||
|
||||
### nous:mimo-v2-pro — 1 failures
|
||||
|
||||
| Test | Category | Expected | Got | Error |
|
||||
|------|----------|----------|-----|-------|
|
||||
| file-01 | file | read_file | none | SyntaxError: unexpected character after line continuation ch |
|
||||
|
||||
## Skipped / unavailable cases
|
||||
|
||||
No cases were skipped.
|
||||
|
||||
## Raw results
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"test_id": "file-01",
|
||||
"category": "file",
|
||||
"model": "nous:gia-3/gemma-4-31b",
|
||||
"prompt": "Read the file /tmp/test_bench.txt and show me its contents.",
|
||||
"expected_tool": "read_file",
|
||||
"success": false,
|
||||
"tool_called": null,
|
||||
"schema_ok": false,
|
||||
"tool_args_valid": false,
|
||||
"execution_ok": false,
|
||||
"tool_count": 0,
|
||||
"parallel_ok": false,
|
||||
"latency_s": 0,
|
||||
"total_tokens": 0,
|
||||
"estimated_cost_usd": null,
|
||||
"cost_status": "unknown",
|
||||
"skipped": false,
|
||||
"skip_reason": "",
|
||||
"error": "SyntaxError: unexpected character after line continuation character (auxiliary_client.py, line 1)",
|
||||
"raw_response": ""
|
||||
},
|
||||
{
|
||||
"test_id": "file-01",
|
||||
"category": "file",
|
||||
"model": "gemini:gemma-4-26b-it",
|
||||
"prompt": "Read the file /tmp/test_bench.txt and show me its contents.",
|
||||
"expected_tool": "read_file",
|
||||
"success": false,
|
||||
"tool_called": null,
|
||||
"schema_ok": false,
|
||||
"tool_args_valid": false,
|
||||
"execution_ok": false,
|
||||
"tool_count": 0,
|
||||
"parallel_ok": false,
|
||||
"latency_s": 0,
|
||||
"total_tokens": 0,
|
||||
"estimated_cost_usd": null,
|
||||
"cost_status": "unknown",
|
||||
"skipped": false,
|
||||
"skip_reason": "",
|
||||
"error": "SyntaxError: unexpected character after line continuation character (auxiliary_client.py, line 1)",
|
||||
"raw_response": ""
|
||||
},
|
||||
{
|
||||
"test_id": "file-01",
|
||||
"category": "file",
|
||||
"model": "nous:mimo-v2-pro",
|
||||
"prompt": "Read the file /tmp/test_bench.txt and show me its contents.",
|
||||
"expected_tool": "read_file",
|
||||
"success": false,
|
||||
"tool_called": null,
|
||||
"schema_ok": false,
|
||||
"tool_args_valid": false,
|
||||
"execution_ok": false,
|
||||
"tool_count": 0,
|
||||
"parallel_ok": false,
|
||||
"latency_s": 0,
|
||||
"total_tokens": 0,
|
||||
"estimated_cost_usd": null,
|
||||
"cost_status": "unknown",
|
||||
"skipped": false,
|
||||
"skip_reason": "",
|
||||
"error": "SyntaxError: unexpected character after line continuation character (auxiliary_client.py, line 1)",
|
||||
"raw_response": ""
|
||||
}
|
||||
]
|
||||
```
|
||||
@@ -8,10 +8,11 @@ success rates, latency, and token costs.
|
||||
Usage:
|
||||
python3 benchmarks/tool_call_benchmark.py # full 100-call suite
|
||||
python3 benchmarks/tool_call_benchmark.py --limit 10 # quick smoke test
|
||||
python3 benchmarks/tool_call_benchmark.py --models nous # single model
|
||||
python3 benchmarks/tool_call_benchmark.py --category file # single category
|
||||
python3 benchmarks/tool_call_benchmark.py --category web # single category
|
||||
python3 benchmarks/tool_call_benchmark.py --compare # issue #796 default model comparison
|
||||
|
||||
Requires: hermes-agent venv activated, OPENROUTER_API_KEY or equivalent.
|
||||
Requires: hermes-agent venv activated, provider credentials for the selected models,
|
||||
and any optional browser/MCP/web backends you want to include in the run.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
@@ -25,10 +26,12 @@ from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
# Ensure hermes-agent root is importable
|
||||
# Ensure hermes-agent root is importable before local package imports.
|
||||
REPO_ROOT = Path(__file__).resolve().parent.parent
|
||||
sys.path.insert(0, str(REPO_ROOT))
|
||||
|
||||
from agent.usage_pricing import CanonicalUsage, estimate_usage_cost
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Test Definitions
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -39,9 +42,11 @@ class ToolCall:
|
||||
id: str
|
||||
category: str
|
||||
prompt: str
|
||||
expected_tool: str # tool name we expect the model to call
|
||||
expected_params_check: str = "" # substring expected in JSON args
|
||||
timeout: int = 30 # max seconds per call
|
||||
expected_tool: str # exact tool name we expect the model to call
|
||||
expected_params_check: str = "" # substring expected in JSON args
|
||||
expected_tool_prefix: str = "" # prefix match for dynamic surfaces like mcp_*
|
||||
expects_parallel: bool = False # whether this prompt should elicit multiple tool calls
|
||||
timeout: int = 30 # max seconds per call
|
||||
notes: str = ""
|
||||
|
||||
|
||||
@@ -185,85 +190,107 @@ SUITE: list[ToolCall] = [
|
||||
ToolCall("deleg-10", "delegate", "Delegate: create a temp file /tmp/bench_deleg.txt with 'done'.",
|
||||
"delegate_task", "write"),
|
||||
|
||||
# ── Todo / Memory (10 — replacing web/browser/MCP which need external services) ──
|
||||
ToolCall("todo-01", "todo", "Add a todo item: 'Run benchmark suite'",
|
||||
"todo", "benchmark"),
|
||||
ToolCall("todo-02", "todo", "Show me the current todo list.",
|
||||
"todo", ""),
|
||||
ToolCall("todo-03", "todo", "Mark the first todo item as completed.",
|
||||
"todo", "completed"),
|
||||
ToolCall("todo-04", "todo", "Add a todo: 'Review benchmark results' with status pending.",
|
||||
"todo", "Review"),
|
||||
ToolCall("todo-05", "todo", "Clear all completed todos.",
|
||||
"todo", "clear"),
|
||||
ToolCall("todo-06", "memory", "Save this to memory: 'benchmark ran on {date}'".format(
|
||||
date=datetime.now().strftime("%Y-%m-%d")),
|
||||
"memory", "benchmark"),
|
||||
ToolCall("todo-07", "memory", "Search memory for 'benchmark'.",
|
||||
"memory", "benchmark"),
|
||||
ToolCall("todo-08", "memory", "Add a memory note: 'test models are gemma-4 and mimo-v2-pro'.",
|
||||
"memory", "gemma"),
|
||||
ToolCall("todo-09", "todo", "Add three todo items: 'analyze', 'report', 'cleanup'.",
|
||||
"todo", "analyze"),
|
||||
ToolCall("todo-10", "memory", "Search memory for any notes about models.",
|
||||
"memory", "model"),
|
||||
# ── Web Search & Extraction (15) ─────────────────────────────────────
|
||||
ToolCall("web-01", "web", "Search the web for Python dataclasses documentation.",
|
||||
"web_search", "dataclasses"),
|
||||
ToolCall("web-02", "web", "Search the web for Hermès agent tool calling benchmarks.",
|
||||
"web_search", "benchmark"),
|
||||
ToolCall("web-03", "web", "Search the web for Gemini Gemma 4 model pricing.",
|
||||
"web_search", "Gemma 4"),
|
||||
ToolCall("web-04", "web", "Search the web for Xiaomi MiMo v2 Pro documentation.",
|
||||
"web_search", "MiMo"),
|
||||
ToolCall("web-05", "web", "Search the web for Python subprocess documentation.",
|
||||
"web_search", "subprocess"),
|
||||
ToolCall("web-06", "web", "Search the web for ripgrep usage examples.",
|
||||
"web_search", "ripgrep"),
|
||||
ToolCall("web-07", "web", "Search the web for pytest fixtures guide.",
|
||||
"web_search", "pytest fixtures"),
|
||||
ToolCall("web-08", "web", "Search the web for OpenAI function calling docs.",
|
||||
"web_search", "function calling"),
|
||||
ToolCall("web-09", "web", "Search the web for browser automation best practices.",
|
||||
"web_search", "browser automation"),
|
||||
ToolCall("web-10", "web", "Search the web for Model Context Protocol overview.",
|
||||
"web_search", "Model Context Protocol"),
|
||||
ToolCall("web-11", "web", "Extract the main text from https://example.com.",
|
||||
"web_extract", "example.com"),
|
||||
ToolCall("web-12", "web", "Extract the page content from https://example.org.",
|
||||
"web_extract", "example.org"),
|
||||
ToolCall("web-13", "web", "Extract the title and body text from https://www.iana.org/domains/reserved.",
|
||||
"web_extract", "iana.org"),
|
||||
ToolCall("web-14", "web", "Extract content from https://httpbin.org/html.",
|
||||
"web_extract", "httpbin.org"),
|
||||
ToolCall("web-15", "web", "Extract the main content from https://www.python.org/.",
|
||||
"web_extract", "python.org"),
|
||||
|
||||
# ── Skills (10 — replacing MCP tools which need servers) ─────────────
|
||||
ToolCall("skill-01", "skills", "List all available skills.",
|
||||
"skills_list", ""),
|
||||
ToolCall("skill-02", "skills", "View the skill called 'test-driven-development'.",
|
||||
"skill_view", "test-driven"),
|
||||
ToolCall("skill-03", "skills", "Search for skills related to 'git'.",
|
||||
"skills_list", "git"),
|
||||
ToolCall("skill-04", "skills", "View the 'code-review' skill.",
|
||||
"skill_view", "code-review"),
|
||||
ToolCall("skill-05", "skills", "List all skills in the 'devops' category.",
|
||||
"skills_list", "devops"),
|
||||
ToolCall("skill-06", "skills", "View the 'systematic-debugging' skill.",
|
||||
"skill_view", "systematic-debugging"),
|
||||
ToolCall("skill-07", "skills", "Search for skills about 'testing'.",
|
||||
"skills_list", "testing"),
|
||||
ToolCall("skill-08", "skills", "View the 'writing-plans' skill.",
|
||||
"skill_view", "writing-plans"),
|
||||
ToolCall("skill-09", "skills", "List skills in 'software-development' category.",
|
||||
"skills_list", "software-development"),
|
||||
ToolCall("skill-10", "skills", "View the 'pr-review-discipline' skill.",
|
||||
"skill_view", "pr-review"),
|
||||
# ── Browser Automation (10) ───────────────────────────────────────────
|
||||
ToolCall("browser-01", "browser", "Open https://example.com in the browser.",
|
||||
"browser_navigate", "example.com"),
|
||||
ToolCall("browser-02", "browser", "Open https://www.python.org in the browser.",
|
||||
"browser_navigate", "python.org"),
|
||||
ToolCall("browser-03", "browser", "Open https://www.wikipedia.org in the browser.",
|
||||
"browser_navigate", "wikipedia.org"),
|
||||
ToolCall("browser-04", "browser", "Navigate the browser to https://example.org.",
|
||||
"browser_navigate", "example.org"),
|
||||
ToolCall("browser-05", "browser", "Go to https://httpbin.org/forms/post in the browser.",
|
||||
"browser_navigate", "httpbin.org/forms/post"),
|
||||
ToolCall("browser-06", "browser", "Open https://www.iana.org/domains/reserved in the browser.",
|
||||
"browser_navigate", "iana.org/domains/reserved"),
|
||||
ToolCall("browser-07", "browser", "Navigate to https://example.net in the browser.",
|
||||
"browser_navigate", "example.net"),
|
||||
ToolCall("browser-08", "browser", "Open https://developer.mozilla.org in the browser.",
|
||||
"browser_navigate", "developer.mozilla.org"),
|
||||
ToolCall("browser-09", "browser", "Navigate the browser to https://www.rfc-editor.org.",
|
||||
"browser_navigate", "rfc-editor.org"),
|
||||
ToolCall("browser-10", "browser", "Open https://www.gnu.org in the browser.",
|
||||
"browser_navigate", "gnu.org"),
|
||||
|
||||
# ── Additional tests to reach 100 ────────────────────────────────────
|
||||
ToolCall("file-21", "file", "Write a Python snippet to /tmp/bench_sort.py that sorts [3,1,2].",
|
||||
"write_file", "bench_sort"),
|
||||
ToolCall("file-22", "file", "Read /tmp/bench_sort.py back and confirm it exists.",
|
||||
"read_file", "bench_sort"),
|
||||
ToolCall("file-23", "file", "Search for 'class' in all .py files in the benchmarks directory.",
|
||||
"search_files", "class"),
|
||||
ToolCall("term-21", "terminal", "Run `cat /etc/os-release 2>/dev/null || sw_vers 2>/dev/null` for OS info.",
|
||||
"terminal", "os"),
|
||||
ToolCall("term-22", "terminal", "Run `nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null` for CPU count.",
|
||||
"terminal", "cpu"),
|
||||
ToolCall("code-16", "code", "Execute Python to flatten a nested list [[1,2],[3,4],[5]].",
|
||||
"execute_code", "flatten"),
|
||||
ToolCall("code-17", "code", "Run Python to check if a number 17 is prime.",
|
||||
"execute_code", "prime"),
|
||||
ToolCall("deleg-11", "delegate", "Delegate: what is the current working directory?",
|
||||
"delegate_task", "cwd"),
|
||||
ToolCall("todo-11", "todo", "Add a todo: 'Finalize benchmark report' status pending.",
|
||||
"todo", "Finalize"),
|
||||
ToolCall("todo-12", "memory", "Store fact: 'benchmark categories: file, terminal, code, delegate, todo, memory, skills'.",
|
||||
"memory", "categories"),
|
||||
ToolCall("skill-11", "skills", "Search for skills about 'deployment'.",
|
||||
"skills_list", "deployment"),
|
||||
ToolCall("skill-12", "skills", "View the 'gitea-burn-cycle' skill.",
|
||||
"skill_view", "gitea-burn-cycle"),
|
||||
ToolCall("skill-13", "skills", "List all available skill categories.",
|
||||
"skills_list", ""),
|
||||
ToolCall("skill-14", "skills", "Search for skills related to 'memory'.",
|
||||
"skills_list", "memory"),
|
||||
ToolCall("skill-15", "skills", "View the 'mimo-swarm' skill.",
|
||||
"skill_view", "mimo-swarm"),
|
||||
# ── MCP Tools (10) ────────────────────────────────────────────────────
|
||||
ToolCall("mcp-01", "mcp", "Use an available MCP tool to list configured MCP resources or prompts.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-02", "mcp", "Use an MCP tool to inspect available resources on a configured server.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-03", "mcp", "Use an MCP tool to read a resource from any configured MCP server.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-04", "mcp", "Use an MCP tool to list prompts from any configured MCP server.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-05", "mcp", "Use an available MCP tool and report what it returns.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-06", "mcp", "Call any safe MCP tool that is currently available and summarize the response.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-07", "mcp", "Use one configured MCP tool to enumerate data or capabilities.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-08", "mcp", "Use an MCP tool to fetch a small piece of data from a connected server.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-09", "mcp", "Invoke an available MCP tool and show the structured result.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-10", "mcp", "Use a currently available MCP tool rather than a built-in Hermes tool.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
]
|
||||
# fmt: on
|
||||
|
||||
DEFAULT_COMPARE_MODELS = [
|
||||
"nous:gia-3/gemma-4-31b",
|
||||
"gemini:gemma-4-26b-it",
|
||||
"nous:mimo-v2-pro",
|
||||
]
|
||||
|
||||
ISSUE_796_CATEGORY_COUNTS = {
|
||||
"file": 20,
|
||||
"terminal": 20,
|
||||
"web": 15,
|
||||
"code": 15,
|
||||
"browser": 10,
|
||||
"delegate": 10,
|
||||
"mcp": 10,
|
||||
}
|
||||
|
||||
|
||||
def suite_category_counts() -> dict[str, int]:
|
||||
counts: dict[str, int] = {}
|
||||
for tc in SUITE:
|
||||
counts[tc.category] = counts.get(tc.category, 0) + 1
|
||||
return counts
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Runner
|
||||
@@ -278,9 +305,17 @@ class CallResult:
|
||||
expected_tool: str
|
||||
success: bool
|
||||
tool_called: Optional[str] = None
|
||||
schema_ok: bool = False
|
||||
tool_args_valid: bool = False
|
||||
execution_ok: bool = False
|
||||
tool_count: int = 0
|
||||
parallel_ok: bool = False
|
||||
latency_s: float = 0.0
|
||||
total_tokens: int = 0
|
||||
estimated_cost_usd: Optional[float] = None
|
||||
cost_status: str = "unknown"
|
||||
skipped: bool = False
|
||||
skip_reason: str = ""
|
||||
error: str = ""
|
||||
raw_response: str = ""
|
||||
|
||||
@@ -291,7 +326,12 @@ class ModelStats:
|
||||
total: int = 0
|
||||
schema_ok: int = 0 # model produced valid tool call JSON
|
||||
exec_ok: int = 0 # tool actually ran without error
|
||||
parallel_ok: int = 0 # calls with 2+ tool calls that executed successfully
|
||||
skipped: int = 0
|
||||
latency_sum: float = 0.0
|
||||
total_tokens: int = 0
|
||||
total_cost_usd: float = 0.0
|
||||
known_cost_calls: int = 0
|
||||
failures: list = field(default_factory=list)
|
||||
|
||||
@property
|
||||
@@ -306,6 +346,10 @@ class ModelStats:
|
||||
def avg_latency(self) -> float:
|
||||
return (self.latency_sum / self.total) if self.total else 0
|
||||
|
||||
@property
|
||||
def avg_cost_usd(self) -> Optional[float]:
|
||||
return (self.total_cost_usd / self.known_cost_calls) if self.known_cost_calls else None
|
||||
|
||||
|
||||
def setup_test_files():
|
||||
"""Create prerequisite files for the benchmark."""
|
||||
@@ -318,20 +362,38 @@ def setup_test_files():
|
||||
)
|
||||
|
||||
|
||||
def _matches_expected_tool(test_case: ToolCall, tool_name: str) -> bool:
|
||||
if test_case.expected_tool and tool_name == test_case.expected_tool:
|
||||
return True
|
||||
if test_case.expected_tool_prefix and tool_name.startswith(test_case.expected_tool_prefix):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _resolve_unavailable_reason(test_case: ToolCall, valid_tool_names: set[str]) -> str:
|
||||
if test_case.expected_tool and test_case.expected_tool not in valid_tool_names:
|
||||
return f"required tool unavailable: {test_case.expected_tool}"
|
||||
if test_case.expected_tool_prefix and not any(
|
||||
name.startswith(test_case.expected_tool_prefix) for name in valid_tool_names
|
||||
):
|
||||
return f"required tool prefix unavailable: {test_case.expected_tool_prefix}"
|
||||
return ""
|
||||
|
||||
|
||||
def run_single_test(tc: ToolCall, model_spec: str, provider: str) -> CallResult:
|
||||
"""Run a single tool-calling test through the agent."""
|
||||
from run_agent import AIAgent
|
||||
|
||||
result = CallResult(
|
||||
test_id=tc.id,
|
||||
category=tc.category,
|
||||
model=model_spec,
|
||||
prompt=tc.prompt,
|
||||
expected_tool=tc.expected_tool,
|
||||
expected_tool=tc.expected_tool or tc.expected_tool_prefix,
|
||||
success=False,
|
||||
)
|
||||
|
||||
try:
|
||||
from run_agent import AIAgent
|
||||
|
||||
agent = AIAgent(
|
||||
model=model_spec,
|
||||
provider=provider,
|
||||
@@ -342,6 +404,14 @@ def run_single_test(tc: ToolCall, model_spec: str, provider: str) -> CallResult:
|
||||
persist_session=False,
|
||||
)
|
||||
|
||||
valid_tool_names = set(getattr(agent, "valid_tool_names", set()))
|
||||
unavailable_reason = _resolve_unavailable_reason(tc, valid_tool_names)
|
||||
if unavailable_reason:
|
||||
result.skipped = True
|
||||
result.skip_reason = unavailable_reason
|
||||
result.error = unavailable_reason
|
||||
return result
|
||||
|
||||
t0 = time.time()
|
||||
conv = agent.run_conversation(
|
||||
user_message=tc.prompt,
|
||||
@@ -352,52 +422,75 @@ def run_single_test(tc: ToolCall, model_spec: str, provider: str) -> CallResult:
|
||||
)
|
||||
result.latency_s = round(time.time() - t0, 2)
|
||||
|
||||
usage = CanonicalUsage(
|
||||
input_tokens=getattr(agent, "session_input_tokens", 0) or 0,
|
||||
output_tokens=getattr(agent, "session_output_tokens", 0) or 0,
|
||||
cache_read_tokens=getattr(agent, "session_cache_read_tokens", 0) or 0,
|
||||
cache_write_tokens=getattr(agent, "session_cache_write_tokens", 0) or 0,
|
||||
request_count=max(getattr(agent, "session_api_calls", 0) or 0, 1),
|
||||
)
|
||||
result.total_tokens = usage.total_tokens
|
||||
billed_model = model_spec.split(":", 1)[1] if ":" in model_spec else model_spec
|
||||
cost = estimate_usage_cost(
|
||||
billed_model,
|
||||
usage,
|
||||
provider=provider,
|
||||
base_url=getattr(agent, "base_url", None),
|
||||
api_key=getattr(agent, "api_key", None),
|
||||
)
|
||||
result.cost_status = cost.status
|
||||
result.estimated_cost_usd = float(cost.amount_usd) if cost.amount_usd is not None else None
|
||||
|
||||
messages = conv.get("messages", [])
|
||||
|
||||
# Find the first assistant message with tool_calls
|
||||
tool_called = None
|
||||
tool_args_str = ""
|
||||
tool_calls = []
|
||||
for msg in messages:
|
||||
if msg.get("role") == "assistant" and msg.get("tool_calls"):
|
||||
for tc_item in msg["tool_calls"]:
|
||||
fn = tc_item.get("function", {})
|
||||
tool_called = fn.get("name", "")
|
||||
tool_args_str = fn.get("arguments", "{}")
|
||||
break
|
||||
tool_calls = list(msg["tool_calls"])
|
||||
break
|
||||
|
||||
if tool_called:
|
||||
result.tool_called = tool_called
|
||||
result.schema_ok = True
|
||||
if tool_calls:
|
||||
result.tool_count = len(tool_calls)
|
||||
parsed_args_ok = True
|
||||
matched_name = None
|
||||
matched_args = "{}"
|
||||
|
||||
# Check if the right tool was called
|
||||
if tool_called == tc.expected_tool:
|
||||
result.success = True
|
||||
for tc_item in tool_calls:
|
||||
fn = tc_item.get("function", {})
|
||||
tool_name = fn.get("name", "")
|
||||
tool_args = fn.get("arguments", "{}")
|
||||
try:
|
||||
json.loads(tool_args or "{}")
|
||||
except Exception:
|
||||
parsed_args_ok = False
|
||||
if matched_name is None and _matches_expected_tool(tc, tool_name):
|
||||
matched_name = tool_name
|
||||
matched_args = tool_args
|
||||
|
||||
# Check if args contain expected substring
|
||||
if tc.expected_params_check:
|
||||
result.tool_args_valid = tc.expected_params_check in tool_args_str
|
||||
else:
|
||||
result.tool_args_valid = True
|
||||
result.schema_ok = parsed_args_ok
|
||||
result.tool_called = matched_name or tool_calls[0].get("function", {}).get("name", "")
|
||||
|
||||
if matched_name:
|
||||
result.tool_args_valid = (
|
||||
tc.expected_params_check in matched_args if tc.expected_params_check else True
|
||||
)
|
||||
result.success = result.schema_ok and result.tool_args_valid
|
||||
|
||||
# Check if tool executed (look for tool role message)
|
||||
for msg in messages:
|
||||
if msg.get("role") == "tool":
|
||||
content = msg.get("content", "")
|
||||
if content and "error" not in content.lower()[:50]:
|
||||
if content:
|
||||
result.execution_ok = True
|
||||
break
|
||||
elif content:
|
||||
result.execution_ok = True # got a response, even if error
|
||||
break
|
||||
|
||||
result.parallel_ok = result.tool_count > 1 and result.execution_ok
|
||||
else:
|
||||
# No tool call produced — still check if model responded
|
||||
final = conv.get("final_response", "")
|
||||
result.raw_response = final[:200] if final else ""
|
||||
|
||||
except Exception as e:
|
||||
result.error = f"{type(e).__name__}: {str(e)[:200]}"
|
||||
result.latency_s = round(time.time() - t0, 2) if 't0' in dir() else 0
|
||||
result.latency_s = round(time.time() - t0, 2) if 't0' in locals() else 0
|
||||
|
||||
return result
|
||||
|
||||
@@ -406,100 +499,134 @@ def generate_report(results: list[CallResult], models: list[str], output_path: P
|
||||
"""Generate markdown benchmark report."""
|
||||
now = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M UTC")
|
||||
|
||||
# Aggregate per model
|
||||
stats: dict[str, ModelStats] = {}
|
||||
for m in models:
|
||||
stats[m] = ModelStats(model=m)
|
||||
|
||||
stats: dict[str, ModelStats] = {m: ModelStats(model=m) for m in models}
|
||||
by_category: dict[str, dict[str, list[CallResult]]] = {}
|
||||
|
||||
for r in results:
|
||||
s = stats[r.model]
|
||||
s.total += 1
|
||||
s.schema_ok += int(r.schema_ok)
|
||||
s.exec_ok += int(r.execution_ok)
|
||||
s.latency_sum += r.latency_s
|
||||
if not r.success:
|
||||
s.failures.append(r)
|
||||
s.total_tokens += r.total_tokens
|
||||
if r.estimated_cost_usd is not None:
|
||||
s.total_cost_usd += r.estimated_cost_usd
|
||||
s.known_cost_calls += 1
|
||||
if r.skipped:
|
||||
s.skipped += 1
|
||||
else:
|
||||
s.schema_ok += int(r.schema_ok)
|
||||
s.exec_ok += int(r.execution_ok)
|
||||
s.parallel_ok += int(r.parallel_ok)
|
||||
if not r.success:
|
||||
s.failures.append(r)
|
||||
|
||||
by_category.setdefault(r.category, {}).setdefault(r.model, []).append(r)
|
||||
|
||||
def _score_row(label: str, fn) -> str:
|
||||
row = f"| {label} | "
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
attempted = s.total - s.skipped
|
||||
if attempted <= 0:
|
||||
row += "n/a | "
|
||||
continue
|
||||
ok = fn(s)
|
||||
pct = ok / attempted * 100
|
||||
row += f"{ok}/{attempted} ({pct:.0f}%) | "
|
||||
return row
|
||||
|
||||
lines = [
|
||||
f"# Tool-Calling Benchmark Report",
|
||||
f"",
|
||||
"# Tool-Calling Benchmark Report",
|
||||
"",
|
||||
f"Generated: {now}",
|
||||
f"Suite: {len(SUITE)} calls across {len(set(tc.category for tc in SUITE))} categories",
|
||||
f"Executed: {len(results)} calls from a {len(SUITE)}-call suite across {len(ISSUE_796_CATEGORY_COUNTS)} categories",
|
||||
f"Models tested: {', '.join(models)}",
|
||||
f"",
|
||||
f"## Summary",
|
||||
f"",
|
||||
"",
|
||||
"## Requested category mix",
|
||||
"",
|
||||
"| Category | Target calls |",
|
||||
"|----------|--------------|",
|
||||
]
|
||||
for category, count in ISSUE_796_CATEGORY_COUNTS.items():
|
||||
lines.append(f"| {category} | {count} |")
|
||||
|
||||
lines.extend([
|
||||
"",
|
||||
"## Summary",
|
||||
"",
|
||||
f"| Metric | {' | '.join(models)} |",
|
||||
f"|--------|{'|'.join('---------' for _ in models)}|",
|
||||
]
|
||||
_score_row("Schema parse success", lambda s: s.schema_ok),
|
||||
_score_row("Tool execution success", lambda s: s.exec_ok),
|
||||
_score_row("Parallel tool success", lambda s: s.parallel_ok),
|
||||
])
|
||||
|
||||
# Schema parse success
|
||||
row = "| Schema parse success | "
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
row += f"{s.schema_ok}/{s.total} ({s.schema_pct:.0f}%) | "
|
||||
lines.append(row)
|
||||
|
||||
# Tool execution success
|
||||
row = "| Tool execution success | "
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
row += f"{s.exec_ok}/{s.total} ({s.exec_pct:.0f}%) | "
|
||||
lines.append(row)
|
||||
|
||||
# Correct tool selected
|
||||
row = "| Correct tool selected | "
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
correct = sum(1 for r in results if r.model == m and r.success)
|
||||
pct = (correct / s.total * 100) if s.total else 0
|
||||
row += f"{correct}/{s.total} ({pct:.0f}%) | "
|
||||
lines.append(row)
|
||||
|
||||
# Avg latency
|
||||
row = "| Avg latency (s) | "
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
row += f"{s.avg_latency:.2f} | "
|
||||
row += f"{stats[m].avg_latency:.2f} | "
|
||||
lines.append(row)
|
||||
|
||||
row = "| Avg tokens per call | "
|
||||
for m in models:
|
||||
total = stats[m].total
|
||||
avg_tokens = stats[m].total_tokens / total if total else 0
|
||||
row += f"{avg_tokens:.1f} | "
|
||||
lines.append(row)
|
||||
|
||||
row = "| Avg token cost per call (USD) | "
|
||||
for m in models:
|
||||
avg_cost = stats[m].avg_cost_usd
|
||||
row += (f"{avg_cost:.6f} | " if avg_cost is not None else "n/a | ")
|
||||
lines.append(row)
|
||||
|
||||
row = "| Skipped / unavailable | "
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
row += f"{s.skipped}/{s.total} | "
|
||||
lines.append(row)
|
||||
lines.append("")
|
||||
|
||||
# Per-category breakdown
|
||||
lines.append("## Per-Category Breakdown")
|
||||
lines.append("## Per-category breakdown")
|
||||
lines.append("")
|
||||
|
||||
for cat in sorted(by_category.keys()):
|
||||
lines.append(f"### {cat.title()}")
|
||||
lines.append("")
|
||||
lines.append(f"| Metric | {' | '.join(models)} |")
|
||||
lines.append(f"|--------|{'|'.join('---------' for _ in models)}|")
|
||||
|
||||
cat_data = by_category[cat]
|
||||
for metric_name, fn in [
|
||||
("Schema OK", lambda r: r.schema_ok),
|
||||
("Exec OK", lambda r: r.execution_ok),
|
||||
("Parallel OK", lambda r: r.parallel_ok),
|
||||
("Correct tool", lambda r: r.success),
|
||||
]:
|
||||
row = f"| {metric_name} | "
|
||||
for m in models:
|
||||
results_m = cat_data.get(m, [])
|
||||
total = len(results_m)
|
||||
ok = sum(1 for r in results_m if fn(r))
|
||||
pct = (ok / total * 100) if total else 0
|
||||
row += f"{ok}/{total} ({pct:.0f}%) | "
|
||||
results_m = by_category[cat].get(m, [])
|
||||
attempted = [r for r in results_m if not r.skipped]
|
||||
if not attempted:
|
||||
row += "n/a | "
|
||||
continue
|
||||
ok = sum(1 for r in attempted if fn(r))
|
||||
pct = ok / len(attempted) * 100
|
||||
row += f"{ok}/{len(attempted)} ({pct:.0f}%) | "
|
||||
lines.append(row)
|
||||
|
||||
row = "| Avg tokens | "
|
||||
for m in models:
|
||||
results_m = by_category[cat].get(m, [])
|
||||
avg_tokens = sum(r.total_tokens for r in results_m) / len(results_m) if results_m else 0
|
||||
row += f"{avg_tokens:.1f} | "
|
||||
lines.append(row)
|
||||
|
||||
row = "| Skipped | "
|
||||
for m in models:
|
||||
results_m = by_category[cat].get(m, [])
|
||||
skipped = sum(1 for r in results_m if r.skipped)
|
||||
row += f"{skipped}/{len(results_m)} | "
|
||||
lines.append(row)
|
||||
lines.append("")
|
||||
|
||||
# Failure analysis
|
||||
lines.append("## Failure Analysis")
|
||||
lines.append("## Failure analysis")
|
||||
lines.append("")
|
||||
|
||||
any_failures = False
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
@@ -514,28 +641,40 @@ def generate_report(results: list[CallResult], models: list[str], output_path: P
|
||||
err = r.error or "wrong tool"
|
||||
lines.append(f"| {r.test_id} | {r.category} | {r.expected_tool} | {got} | {err[:60]} |")
|
||||
lines.append("")
|
||||
|
||||
if not any_failures:
|
||||
lines.append("No failures detected.")
|
||||
lines.append("No model failures detected.")
|
||||
lines.append("")
|
||||
|
||||
# Raw results JSON
|
||||
lines.append("## Raw Results")
|
||||
skipped_results = [r for r in results if r.skipped]
|
||||
lines.append("## Skipped / unavailable cases")
|
||||
lines.append("")
|
||||
if skipped_results:
|
||||
lines.append("| Test | Model | Category | Reason |")
|
||||
lines.append("|------|-------|----------|--------|")
|
||||
for r in skipped_results:
|
||||
lines.append(f"| {r.test_id} | {r.model} | {r.category} | {r.skip_reason[:80]} |")
|
||||
else:
|
||||
lines.append("No cases were skipped.")
|
||||
lines.append("")
|
||||
|
||||
lines.append("## Raw results")
|
||||
lines.append("")
|
||||
lines.append("```json")
|
||||
lines.append(json.dumps([asdict(r) for r in results], indent=2, default=str))
|
||||
lines.append("```")
|
||||
|
||||
report = "\n".join(lines)
|
||||
output_path.write_text(report)
|
||||
output_path.write_text(report, encoding="utf-8")
|
||||
return report
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Tool-calling benchmark")
|
||||
parser.add_argument("--models", nargs="+",
|
||||
default=["nous:gia-3/gemma-4-31b", "nous:mimo-v2-pro"],
|
||||
default=list(DEFAULT_COMPARE_MODELS),
|
||||
help="Model specs to test (provider:model)")
|
||||
parser.add_argument("--compare", action="store_true",
|
||||
help="Use the issue #796 default comparison set")
|
||||
parser.add_argument("--limit", type=int, default=0,
|
||||
help="Run only first N tests (0 = all)")
|
||||
parser.add_argument("--category", type=str, default="",
|
||||
@@ -546,6 +685,9 @@ def main():
|
||||
help="Print test cases without running them")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.compare:
|
||||
args.models = list(DEFAULT_COMPARE_MODELS)
|
||||
|
||||
# Filter suite
|
||||
suite = SUITE[:]
|
||||
if args.category:
|
||||
|
||||
@@ -168,7 +168,7 @@ import time as _time
|
||||
from datetime import datetime
|
||||
|
||||
from hermes_cli import __version__, __release_date__
|
||||
from hermes_constants import AI_GATEWAY_BASE_URL, OPENROUTER_BASE_URL
|
||||
from hermes_constants import OPENROUTER_BASE_URL
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -1112,8 +1112,6 @@ def select_provider_and_model(args=None):
|
||||
# Step 2: Provider-specific setup + model selection
|
||||
if selected_provider == "openrouter":
|
||||
_model_flow_openrouter(config, current_model)
|
||||
elif selected_provider == "ai-gateway":
|
||||
_model_flow_ai_gateway(config, current_model)
|
||||
elif selected_provider == "nous":
|
||||
_model_flow_nous(config, current_model, args=args)
|
||||
elif selected_provider == "openai-codex":
|
||||
@@ -1269,55 +1267,6 @@ def _model_flow_openrouter(config, current_model=""):
|
||||
print("No change.")
|
||||
|
||||
|
||||
def _model_flow_ai_gateway(config, current_model=""):
|
||||
"""Vercel AI Gateway provider: ensure API key, then pick model with pricing."""
|
||||
from hermes_cli.auth import _prompt_model_selection, _save_model_choice, deactivate_provider
|
||||
from hermes_cli.config import get_env_value, save_env_value
|
||||
from hermes_cli.models import ai_gateway_model_ids, get_pricing_for_provider
|
||||
|
||||
api_key = get_env_value("AI_GATEWAY_API_KEY")
|
||||
if not api_key:
|
||||
print("No Vercel AI Gateway API key configured.")
|
||||
print("Create API key here: https://vercel.com/d?to=%2F%5Bteam%5D%2F%7E%2Fai-gateway&title=AI+Gateway")
|
||||
print("Add a payment method to get $5 in free credits.")
|
||||
print()
|
||||
try:
|
||||
import getpass
|
||||
key = getpass.getpass("AI Gateway API key (or Enter to cancel): ").strip()
|
||||
except (KeyboardInterrupt, EOFError):
|
||||
print()
|
||||
return
|
||||
if not key:
|
||||
print("Cancelled.")
|
||||
return
|
||||
save_env_value("AI_GATEWAY_API_KEY", key)
|
||||
print("API key saved.")
|
||||
print()
|
||||
|
||||
models_list = ai_gateway_model_ids(force_refresh=True)
|
||||
pricing = get_pricing_for_provider("ai-gateway", force_refresh=True)
|
||||
|
||||
selected = _prompt_model_selection(models_list, current_model=current_model, pricing=pricing)
|
||||
if selected:
|
||||
_save_model_choice(selected)
|
||||
|
||||
from hermes_cli.config import load_config, save_config
|
||||
|
||||
cfg = load_config()
|
||||
model = cfg.get("model")
|
||||
if not isinstance(model, dict):
|
||||
model = {"default": model} if model else {}
|
||||
cfg["model"] = model
|
||||
model["provider"] = "ai-gateway"
|
||||
model["base_url"] = AI_GATEWAY_BASE_URL
|
||||
model["api_mode"] = "chat_completions"
|
||||
save_config(cfg)
|
||||
deactivate_provider()
|
||||
print(f"Default model set to: {selected} (via Vercel AI Gateway)")
|
||||
else:
|
||||
print("No change.")
|
||||
|
||||
|
||||
def _model_flow_nous(config, current_model="", args=None):
|
||||
"""Nous Portal provider: ensure logged in, then pick model."""
|
||||
from hermes_cli.auth import (
|
||||
|
||||
@@ -58,28 +58,6 @@ OPENROUTER_MODELS: list[tuple[str, str]] = [
|
||||
|
||||
_openrouter_catalog_cache: list[tuple[str, str]] | None = None
|
||||
|
||||
# Fallback Vercel AI Gateway snapshot used when the live catalog is unavailable.
|
||||
# OSS / open-weight models prioritized first, then closed-source by family.
|
||||
VERCEL_AI_GATEWAY_MODELS: list[tuple[str, str]] = [
|
||||
("moonshotai/kimi-k2.6", "recommended"),
|
||||
("alibaba/qwen3.6-plus", ""),
|
||||
("zai/glm-5.1", ""),
|
||||
("minimax/minimax-m2.7", ""),
|
||||
("anthropic/claude-sonnet-4.6", ""),
|
||||
("anthropic/claude-opus-4.7", ""),
|
||||
("anthropic/claude-opus-4.6", ""),
|
||||
("anthropic/claude-haiku-4.5", ""),
|
||||
("openai/gpt-5.4", ""),
|
||||
("openai/gpt-5.4-mini", ""),
|
||||
("openai/gpt-5.3-codex", ""),
|
||||
("google/gemini-3.1-pro-preview", ""),
|
||||
("google/gemini-3-flash", ""),
|
||||
("google/gemini-3.1-flash-lite-preview", ""),
|
||||
("xai/grok-4.20-reasoning", ""),
|
||||
]
|
||||
|
||||
_ai_gateway_catalog_cache: list[tuple[str, str]] | None = None
|
||||
|
||||
|
||||
def _codex_curated_models() -> list[str]:
|
||||
"""Derive the openai-codex curated list from codex_models.py.
|
||||
@@ -280,21 +258,18 @@ _PROVIDER_MODELS: dict[str, list[str]] = {
|
||||
"minimax-m2.5",
|
||||
],
|
||||
"ai-gateway": [
|
||||
"moonshotai/kimi-k2.6",
|
||||
"alibaba/qwen3.6-plus",
|
||||
"zai/glm-5.1",
|
||||
"minimax/minimax-m2.7",
|
||||
"anthropic/claude-sonnet-4.6",
|
||||
"anthropic/claude-opus-4.7",
|
||||
"anthropic/claude-opus-4.6",
|
||||
"anthropic/claude-sonnet-4.6",
|
||||
"anthropic/claude-sonnet-4.5",
|
||||
"anthropic/claude-haiku-4.5",
|
||||
"openai/gpt-5.4",
|
||||
"openai/gpt-5.4-mini",
|
||||
"openai/gpt-5.3-codex",
|
||||
"google/gemini-3.1-pro-preview",
|
||||
"openai/gpt-5",
|
||||
"openai/gpt-4.1",
|
||||
"openai/gpt-4.1-mini",
|
||||
"google/gemini-3-pro-preview",
|
||||
"google/gemini-3-flash",
|
||||
"google/gemini-3.1-flash-lite-preview",
|
||||
"xai/grok-4.20-reasoning",
|
||||
"google/gemini-2.5-pro",
|
||||
"google/gemini-2.5-flash",
|
||||
"deepseek/deepseek-v3.2",
|
||||
],
|
||||
"kilocode": [
|
||||
"anthropic/claude-opus-4.6",
|
||||
@@ -541,7 +516,6 @@ class ProviderEntry(NamedTuple):
|
||||
CANONICAL_PROVIDERS: list[ProviderEntry] = [
|
||||
ProviderEntry("nous", "Nous Portal", "Nous Portal (Nous Research subscription)"),
|
||||
ProviderEntry("openrouter", "OpenRouter", "OpenRouter (100+ models, pay-per-use)"),
|
||||
ProviderEntry("ai-gateway", "Vercel AI Gateway", "Vercel AI Gateway (200+ models, $5 free credit, no markup)"),
|
||||
ProviderEntry("anthropic", "Anthropic", "Anthropic (Claude models — API key or Claude Code)"),
|
||||
ProviderEntry("openai-codex", "OpenAI Codex", "OpenAI Codex"),
|
||||
ProviderEntry("xiaomi", "Xiaomi MiMo", "Xiaomi MiMo (MiMo-V2 models — pro, omni, flash)"),
|
||||
@@ -562,6 +536,7 @@ CANONICAL_PROVIDERS: list[ProviderEntry] = [
|
||||
ProviderEntry("kilocode", "Kilo Code", "Kilo Code (Kilo Gateway API)"),
|
||||
ProviderEntry("opencode-zen", "OpenCode Zen", "OpenCode Zen (35+ curated models, pay-as-you-go)"),
|
||||
ProviderEntry("opencode-go", "OpenCode Go", "OpenCode Go (open models, $10/month subscription)"),
|
||||
ProviderEntry("ai-gateway", "Vercel AI Gateway", "Vercel AI Gateway (200+ models, pay-per-use)"),
|
||||
]
|
||||
|
||||
# Derived dicts — used throughout the codebase
|
||||
@@ -704,90 +679,6 @@ def model_ids(*, force_refresh: bool = False) -> list[str]:
|
||||
|
||||
|
||||
|
||||
def _ai_gateway_model_is_free(pricing: Any) -> bool:
|
||||
"""Return True if an AI Gateway model has $0 input AND output pricing."""
|
||||
if not isinstance(pricing, dict):
|
||||
return False
|
||||
try:
|
||||
return float(pricing.get("input", "0")) == 0 and float(pricing.get("output", "0")) == 0
|
||||
except (TypeError, ValueError):
|
||||
return False
|
||||
|
||||
|
||||
def fetch_ai_gateway_models(
|
||||
timeout: float = 8.0,
|
||||
*,
|
||||
force_refresh: bool = False,
|
||||
) -> list[tuple[str, str]]:
|
||||
"""Return the curated AI Gateway picker list, refreshed from the live catalog when possible."""
|
||||
global _ai_gateway_catalog_cache
|
||||
|
||||
if _ai_gateway_catalog_cache is not None and not force_refresh:
|
||||
return list(_ai_gateway_catalog_cache)
|
||||
|
||||
from hermes_constants import AI_GATEWAY_BASE_URL
|
||||
|
||||
fallback = list(VERCEL_AI_GATEWAY_MODELS)
|
||||
preferred_ids = [mid for mid, _ in fallback]
|
||||
|
||||
try:
|
||||
req = urllib.request.Request(
|
||||
f"{AI_GATEWAY_BASE_URL.rstrip('/')}/models",
|
||||
headers={"Accept": "application/json"},
|
||||
)
|
||||
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
||||
payload = json.loads(resp.read().decode())
|
||||
except Exception:
|
||||
return list(_ai_gateway_catalog_cache or fallback)
|
||||
|
||||
live_items = payload.get("data", [])
|
||||
if not isinstance(live_items, list):
|
||||
return list(_ai_gateway_catalog_cache or fallback)
|
||||
|
||||
live_by_id: dict[str, dict[str, Any]] = {}
|
||||
for item in live_items:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
mid = str(item.get("id") or "").strip()
|
||||
if not mid:
|
||||
continue
|
||||
live_by_id[mid] = item
|
||||
|
||||
curated: list[tuple[str, str]] = []
|
||||
for preferred_id in preferred_ids:
|
||||
live_item = live_by_id.get(preferred_id)
|
||||
if live_item is None:
|
||||
continue
|
||||
desc = "free" if _ai_gateway_model_is_free(live_item.get("pricing")) else ""
|
||||
curated.append((preferred_id, desc))
|
||||
|
||||
if not curated:
|
||||
return list(_ai_gateway_catalog_cache or fallback)
|
||||
|
||||
free_moonshot = next(
|
||||
(
|
||||
mid
|
||||
for mid, item in live_by_id.items()
|
||||
if mid.startswith("moonshotai/") and _ai_gateway_model_is_free(item.get("pricing"))
|
||||
),
|
||||
None,
|
||||
)
|
||||
if free_moonshot:
|
||||
curated = [(mid, desc) for mid, desc in curated if mid != free_moonshot]
|
||||
curated.insert(0, (free_moonshot, "recommended"))
|
||||
else:
|
||||
first_id, _ = curated[0]
|
||||
curated[0] = (first_id, "recommended")
|
||||
|
||||
_ai_gateway_catalog_cache = curated
|
||||
return list(curated)
|
||||
|
||||
|
||||
def ai_gateway_model_ids(*, force_refresh: bool = False) -> list[str]:
|
||||
"""Return just the AI Gateway model-id strings."""
|
||||
return [mid for mid, _ in fetch_ai_gateway_models(force_refresh=force_refresh)]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Pricing helpers — fetch live pricing from OpenRouter-compatible /v1/models
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -930,51 +821,6 @@ def fetch_models_with_pricing(
|
||||
return result
|
||||
|
||||
|
||||
def fetch_ai_gateway_pricing(
|
||||
timeout: float = 8.0,
|
||||
*,
|
||||
force_refresh: bool = False,
|
||||
) -> dict[str, dict[str, str]]:
|
||||
"""Fetch Vercel AI Gateway /v1/models and return Hermes-shaped pricing."""
|
||||
from hermes_constants import AI_GATEWAY_BASE_URL
|
||||
|
||||
cache_key = AI_GATEWAY_BASE_URL.rstrip("/")
|
||||
if not force_refresh and cache_key in _pricing_cache:
|
||||
return _pricing_cache[cache_key]
|
||||
|
||||
try:
|
||||
req = urllib.request.Request(
|
||||
f"{cache_key}/models",
|
||||
headers={"Accept": "application/json"},
|
||||
)
|
||||
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
||||
payload = json.loads(resp.read().decode())
|
||||
except Exception:
|
||||
_pricing_cache[cache_key] = {}
|
||||
return {}
|
||||
|
||||
result: dict[str, dict[str, str]] = {}
|
||||
for item in payload.get("data", []):
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
mid = item.get("id")
|
||||
pricing = item.get("pricing")
|
||||
if not (mid and isinstance(pricing, dict)):
|
||||
continue
|
||||
entry: dict[str, str] = {
|
||||
"prompt": str(pricing.get("input", "")),
|
||||
"completion": str(pricing.get("output", "")),
|
||||
}
|
||||
if pricing.get("input_cache_read"):
|
||||
entry["input_cache_read"] = str(pricing["input_cache_read"])
|
||||
if pricing.get("input_cache_write"):
|
||||
entry["input_cache_write"] = str(pricing["input_cache_write"])
|
||||
result[mid] = entry
|
||||
|
||||
_pricing_cache[cache_key] = result
|
||||
return result
|
||||
|
||||
|
||||
def _resolve_openrouter_api_key() -> str:
|
||||
"""Best-effort OpenRouter API key for pricing fetch."""
|
||||
return os.getenv("OPENROUTER_API_KEY", "").strip()
|
||||
@@ -993,7 +839,7 @@ def _resolve_nous_pricing_credentials() -> tuple[str, str]:
|
||||
|
||||
|
||||
def get_pricing_for_provider(provider: str, *, force_refresh: bool = False) -> dict[str, dict[str, str]]:
|
||||
"""Return live pricing for providers that support it (openrouter, ai-gateway, nous)."""
|
||||
"""Return live pricing for providers that support it (openrouter, nous)."""
|
||||
normalized = normalize_provider(provider)
|
||||
if normalized == "openrouter":
|
||||
return fetch_models_with_pricing(
|
||||
@@ -1001,11 +847,11 @@ def get_pricing_for_provider(provider: str, *, force_refresh: bool = False) -> d
|
||||
base_url="https://openrouter.ai/api",
|
||||
force_refresh=force_refresh,
|
||||
)
|
||||
if normalized == "ai-gateway":
|
||||
return fetch_ai_gateway_pricing(force_refresh=force_refresh)
|
||||
if normalized == "nous":
|
||||
api_key, base_url = _resolve_nous_pricing_credentials()
|
||||
if base_url:
|
||||
# Nous base_url typically looks like https://inference-api.nousresearch.com/v1
|
||||
# We need the part before /v1 for our fetch function
|
||||
stripped = base_url.rstrip("/")
|
||||
if stripped.endswith("/v1"):
|
||||
stripped = stripped[:-3]
|
||||
@@ -1407,7 +1253,9 @@ def provider_model_ids(provider: Optional[str], *, force_refresh: bool = False)
|
||||
if live:
|
||||
return live
|
||||
if normalized == "ai-gateway":
|
||||
return ai_gateway_model_ids()
|
||||
live = _fetch_ai_gateway_models()
|
||||
if live:
|
||||
return live
|
||||
if normalized == "custom":
|
||||
base_url = _get_custom_base_url()
|
||||
if base_url:
|
||||
|
||||
@@ -908,10 +908,6 @@ class AIAgent:
|
||||
"X-OpenRouter-Title": "Hermes Agent",
|
||||
"X-OpenRouter-Categories": "productivity,cli-agent",
|
||||
}
|
||||
elif "ai-gateway.vercel.sh" in effective_base.lower():
|
||||
from agent.auxiliary_client import _AI_GATEWAY_HEADERS
|
||||
|
||||
client_kwargs["default_headers"] = dict(_AI_GATEWAY_HEADERS)
|
||||
elif "api.githubcopilot.com" in effective_base.lower():
|
||||
from hermes_cli.models import copilot_default_headers
|
||||
|
||||
@@ -4671,13 +4667,11 @@ class AIAgent:
|
||||
return True
|
||||
|
||||
def _apply_client_headers_for_base_url(self, base_url: str) -> None:
|
||||
from agent.auxiliary_client import _AI_GATEWAY_HEADERS, _OR_HEADERS
|
||||
from agent.auxiliary_client import _OR_HEADERS
|
||||
|
||||
normalized = (base_url or "").lower()
|
||||
if "openrouter" in normalized:
|
||||
self._client_kwargs["default_headers"] = dict(_OR_HEADERS)
|
||||
elif "ai-gateway.vercel.sh" in normalized:
|
||||
self._client_kwargs["default_headers"] = dict(_AI_GATEWAY_HEADERS)
|
||||
elif "api.githubcopilot.com" in normalized:
|
||||
from hermes_cli.models import copilot_default_headers
|
||||
|
||||
|
||||
@@ -1,222 +0,0 @@
|
||||
"""AI Gateway provider UX, live pricing, and model promotion tests."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from hermes_cli import models as models_module
|
||||
from hermes_cli.models import (
|
||||
CANONICAL_PROVIDERS,
|
||||
VERCEL_AI_GATEWAY_MODELS,
|
||||
_ai_gateway_model_is_free,
|
||||
ai_gateway_model_ids,
|
||||
fetch_ai_gateway_models,
|
||||
fetch_ai_gateway_pricing,
|
||||
get_pricing_for_provider,
|
||||
)
|
||||
|
||||
|
||||
def _mock_urlopen(payload):
|
||||
resp = MagicMock()
|
||||
resp.read.return_value = json.dumps(payload).encode()
|
||||
ctx = MagicMock()
|
||||
ctx.__enter__.return_value = resp
|
||||
ctx.__exit__.return_value = False
|
||||
return ctx
|
||||
|
||||
|
||||
def _reset_caches():
|
||||
models_module._ai_gateway_catalog_cache = None
|
||||
models_module._pricing_cache.clear()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def config_home(tmp_path, monkeypatch):
|
||||
home = tmp_path / "hermes"
|
||||
home.mkdir()
|
||||
(home / "config.yaml").write_text("model: some-old-model\n")
|
||||
(home / ".env").write_text("")
|
||||
monkeypatch.setenv("HERMES_HOME", str(home))
|
||||
monkeypatch.delenv("AI_GATEWAY_API_KEY", raising=False)
|
||||
monkeypatch.delenv("AI_GATEWAY_BASE_URL", raising=False)
|
||||
return home
|
||||
|
||||
|
||||
def test_ai_gateway_provider_is_promoted_near_top_of_picker():
|
||||
slugs = [entry.slug for entry in CANONICAL_PROVIDERS]
|
||||
assert "ai-gateway" in slugs[:3]
|
||||
|
||||
|
||||
def test_ai_gateway_pricing_translates_input_output_to_prompt_completion():
|
||||
_reset_caches()
|
||||
payload = {
|
||||
"data": [
|
||||
{
|
||||
"id": "moonshotai/kimi-k2.5",
|
||||
"type": "language",
|
||||
"pricing": {
|
||||
"input": "0.0000006",
|
||||
"output": "0.0000025",
|
||||
"input_cache_read": "0.00000015",
|
||||
"input_cache_write": "0.0000006",
|
||||
},
|
||||
}
|
||||
]
|
||||
}
|
||||
with patch("urllib.request.urlopen", return_value=_mock_urlopen(payload)):
|
||||
result = fetch_ai_gateway_pricing(force_refresh=True)
|
||||
|
||||
entry = result["moonshotai/kimi-k2.5"]
|
||||
assert entry["prompt"] == "0.0000006"
|
||||
assert entry["completion"] == "0.0000025"
|
||||
assert entry["input_cache_read"] == "0.00000015"
|
||||
assert entry["input_cache_write"] == "0.0000006"
|
||||
|
||||
|
||||
def test_get_pricing_for_provider_supports_ai_gateway():
|
||||
_reset_caches()
|
||||
payload = {
|
||||
"data": [
|
||||
{
|
||||
"id": "moonshotai/kimi-k2.5",
|
||||
"type": "language",
|
||||
"pricing": {"input": "0.0001", "output": "0.0002"},
|
||||
}
|
||||
]
|
||||
}
|
||||
with patch("urllib.request.urlopen", return_value=_mock_urlopen(payload)):
|
||||
result = get_pricing_for_provider("ai-gateway", force_refresh=True)
|
||||
assert result["moonshotai/kimi-k2.5"] == {"prompt": "0.0001", "completion": "0.0002"}
|
||||
|
||||
|
||||
def test_ai_gateway_pricing_returns_empty_on_fetch_failure():
|
||||
_reset_caches()
|
||||
with patch("urllib.request.urlopen", side_effect=OSError("network down")):
|
||||
result = fetch_ai_gateway_pricing(force_refresh=True)
|
||||
assert result == {}
|
||||
|
||||
|
||||
def test_ai_gateway_pricing_skips_entries_without_pricing_dict():
|
||||
_reset_caches()
|
||||
payload = {
|
||||
"data": [
|
||||
{"id": "x/y", "pricing": None},
|
||||
{"id": "a/b", "pricing": {"input": "0", "output": "0"}},
|
||||
]
|
||||
}
|
||||
with patch("urllib.request.urlopen", return_value=_mock_urlopen(payload)):
|
||||
result = fetch_ai_gateway_pricing(force_refresh=True)
|
||||
assert "x/y" not in result
|
||||
assert result["a/b"] == {"prompt": "0", "completion": "0"}
|
||||
|
||||
|
||||
def test_ai_gateway_free_detector():
|
||||
assert _ai_gateway_model_is_free({"input": "0", "output": "0"}) is True
|
||||
assert _ai_gateway_model_is_free({"input": "0", "output": "0.01"}) is False
|
||||
assert _ai_gateway_model_is_free({"input": "0.01", "output": "0"}) is False
|
||||
assert _ai_gateway_model_is_free(None) is False
|
||||
assert _ai_gateway_model_is_free({"input": "not a number"}) is False
|
||||
|
||||
|
||||
def test_fetch_ai_gateway_models_filters_against_live_catalog():
|
||||
_reset_caches()
|
||||
preferred = [mid for mid, _ in VERCEL_AI_GATEWAY_MODELS]
|
||||
live_ids = preferred[:3]
|
||||
payload = {
|
||||
"data": [
|
||||
{"id": mid, "pricing": {"input": "0.001", "output": "0.002"}}
|
||||
for mid in live_ids
|
||||
]
|
||||
}
|
||||
with patch("urllib.request.urlopen", return_value=_mock_urlopen(payload)):
|
||||
result = fetch_ai_gateway_models(force_refresh=True)
|
||||
|
||||
assert [mid for mid, _ in result] == live_ids
|
||||
assert result[0][1] == "recommended"
|
||||
assert ai_gateway_model_ids(force_refresh=False) == live_ids
|
||||
|
||||
|
||||
def test_fetch_ai_gateway_models_tags_free_models():
|
||||
_reset_caches()
|
||||
first_id = VERCEL_AI_GATEWAY_MODELS[0][0]
|
||||
second_id = VERCEL_AI_GATEWAY_MODELS[1][0]
|
||||
payload = {
|
||||
"data": [
|
||||
{"id": first_id, "pricing": {"input": "0.001", "output": "0.002"}},
|
||||
{"id": second_id, "pricing": {"input": "0", "output": "0"}},
|
||||
]
|
||||
}
|
||||
with patch("urllib.request.urlopen", return_value=_mock_urlopen(payload)):
|
||||
result = fetch_ai_gateway_models(force_refresh=True)
|
||||
|
||||
by_id = dict(result)
|
||||
assert by_id[first_id] == "recommended"
|
||||
assert by_id[second_id] == "free"
|
||||
|
||||
|
||||
def test_free_moonshot_model_auto_promoted_to_top_even_if_not_curated():
|
||||
_reset_caches()
|
||||
first_curated = VERCEL_AI_GATEWAY_MODELS[0][0]
|
||||
unlisted_free_moonshot = "moonshotai/kimi-coder-free-preview"
|
||||
payload = {
|
||||
"data": [
|
||||
{"id": first_curated, "pricing": {"input": "0.001", "output": "0.002"}},
|
||||
{"id": unlisted_free_moonshot, "pricing": {"input": "0", "output": "0"}},
|
||||
]
|
||||
}
|
||||
with patch("urllib.request.urlopen", return_value=_mock_urlopen(payload)):
|
||||
result = fetch_ai_gateway_models(force_refresh=True)
|
||||
|
||||
assert result[0] == (unlisted_free_moonshot, "recommended")
|
||||
assert any(mid == first_curated for mid, _ in result)
|
||||
|
||||
|
||||
def test_paid_moonshot_does_not_get_auto_promoted():
|
||||
_reset_caches()
|
||||
first_curated = VERCEL_AI_GATEWAY_MODELS[0][0]
|
||||
payload = {
|
||||
"data": [
|
||||
{"id": first_curated, "pricing": {"input": "0.001", "output": "0.002"}},
|
||||
{"id": "moonshotai/some-paid-variant", "pricing": {"input": "0.001", "output": "0.002"}},
|
||||
]
|
||||
}
|
||||
with patch("urllib.request.urlopen", return_value=_mock_urlopen(payload)):
|
||||
result = fetch_ai_gateway_models(force_refresh=True)
|
||||
|
||||
assert result[0][0] == first_curated
|
||||
|
||||
|
||||
def test_fetch_ai_gateway_models_falls_back_on_error():
|
||||
_reset_caches()
|
||||
with patch("urllib.request.urlopen", side_effect=OSError("network")):
|
||||
result = fetch_ai_gateway_models(force_refresh=True)
|
||||
assert result == list(VERCEL_AI_GATEWAY_MODELS)
|
||||
|
||||
|
||||
def test_ai_gateway_setup_flow_shows_deeplink_and_passes_pricing(config_home, monkeypatch, capsys):
|
||||
from hermes_cli.main import _model_flow_ai_gateway
|
||||
from hermes_cli.config import load_config
|
||||
|
||||
pricing = {"moonshotai/kimi-k2.6": {"prompt": "0", "completion": "0"}}
|
||||
monkeypatch.setenv("HERMES_HOME", str(config_home))
|
||||
|
||||
with patch("getpass.getpass", return_value="vercel-key"), \
|
||||
patch("hermes_cli.models.ai_gateway_model_ids", return_value=["moonshotai/kimi-k2.6"]), \
|
||||
patch("hermes_cli.models.get_pricing_for_provider", return_value=pricing), \
|
||||
patch("hermes_cli.auth._prompt_model_selection", return_value="moonshotai/kimi-k2.6") as prompt_selection, \
|
||||
patch("hermes_cli.auth.deactivate_provider"):
|
||||
_model_flow_ai_gateway(load_config(), "")
|
||||
|
||||
out = capsys.readouterr().out
|
||||
assert "vercel.com/d?to=%2F%5Bteam%5D%2F%7E%2Fai-gateway&title=AI+Gateway" in out
|
||||
assert "free credits" in out.lower()
|
||||
assert prompt_selection.call_args.kwargs["pricing"] == pricing
|
||||
|
||||
import yaml
|
||||
config = yaml.safe_load((config_home / "config.yaml").read_text()) or {}
|
||||
model = config["model"]
|
||||
assert model["provider"] == "ai-gateway"
|
||||
assert model["api_mode"] == "chat_completions"
|
||||
@@ -1,62 +0,0 @@
|
||||
"""Attribution default_headers applied per provider via base-URL detection."""
|
||||
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from run_agent import AIAgent
|
||||
|
||||
|
||||
@patch("run_agent.OpenAI")
|
||||
def test_openrouter_base_url_applies_or_headers(mock_openai):
|
||||
mock_openai.return_value = MagicMock()
|
||||
agent = AIAgent(
|
||||
api_key="test-key",
|
||||
base_url="https://openrouter.ai/api/v1",
|
||||
model="test/model",
|
||||
quiet_mode=True,
|
||||
skip_context_files=True,
|
||||
skip_memory=True,
|
||||
)
|
||||
|
||||
agent._apply_client_headers_for_base_url("https://openrouter.ai/api/v1")
|
||||
|
||||
headers = agent._client_kwargs["default_headers"]
|
||||
assert headers["HTTP-Referer"] == "https://hermes-agent.nousresearch.com"
|
||||
assert headers["X-OpenRouter-Title"] == "Hermes Agent"
|
||||
|
||||
|
||||
@patch("run_agent.OpenAI")
|
||||
def test_ai_gateway_base_url_applies_attribution_headers(mock_openai):
|
||||
mock_openai.return_value = MagicMock()
|
||||
agent = AIAgent(
|
||||
api_key="test-key",
|
||||
base_url="https://openrouter.ai/api/v1",
|
||||
model="test/model",
|
||||
quiet_mode=True,
|
||||
skip_context_files=True,
|
||||
skip_memory=True,
|
||||
)
|
||||
|
||||
agent._apply_client_headers_for_base_url("https://ai-gateway.vercel.sh/v1")
|
||||
|
||||
headers = agent._client_kwargs["default_headers"]
|
||||
assert headers["HTTP-Referer"] == "https://hermes-agent.nousresearch.com"
|
||||
assert headers["X-Title"] == "Hermes Agent"
|
||||
assert headers["User-Agent"].startswith("HermesAgent/")
|
||||
|
||||
|
||||
@patch("run_agent.OpenAI")
|
||||
def test_unknown_base_url_clears_default_headers(mock_openai):
|
||||
mock_openai.return_value = MagicMock()
|
||||
agent = AIAgent(
|
||||
api_key="test-key",
|
||||
base_url="https://openrouter.ai/api/v1",
|
||||
model="test/model",
|
||||
quiet_mode=True,
|
||||
skip_context_files=True,
|
||||
skip_memory=True,
|
||||
)
|
||||
agent._client_kwargs["default_headers"] = {"X-Stale": "yes"}
|
||||
|
||||
agent._apply_client_headers_for_base_url("https://api.example.com/v1")
|
||||
|
||||
assert "default_headers" not in agent._client_kwargs
|
||||
115
tests/test_tool_call_benchmark.py
Normal file
115
tests/test_tool_call_benchmark.py
Normal file
@@ -0,0 +1,115 @@
|
||||
"""Tests for Issue #796 tool-calling benchmark coverage and reporting."""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from types import SimpleNamespace
|
||||
from unittest.mock import patch
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent / "benchmarks"))
|
||||
|
||||
from tool_call_benchmark import ( # noqa: E402
|
||||
CallResult,
|
||||
DEFAULT_COMPARE_MODELS,
|
||||
ISSUE_796_CATEGORY_COUNTS,
|
||||
ToolCall,
|
||||
generate_report,
|
||||
run_single_test,
|
||||
suite_category_counts,
|
||||
)
|
||||
|
||||
|
||||
def test_suite_counts_match_issue_796_distribution():
|
||||
counts = suite_category_counts()
|
||||
assert counts == ISSUE_796_CATEGORY_COUNTS
|
||||
assert sum(counts.values()) == 100
|
||||
|
||||
|
||||
def test_default_compare_models_cover_issue_796_lanes():
|
||||
assert len(DEFAULT_COMPARE_MODELS) == 3
|
||||
assert any("gemma-4-31b" in spec for spec in DEFAULT_COMPARE_MODELS)
|
||||
assert any("gemma-4-26b" in spec for spec in DEFAULT_COMPARE_MODELS)
|
||||
assert any("mimo-v2-pro" in spec for spec in DEFAULT_COMPARE_MODELS)
|
||||
|
||||
|
||||
def test_generate_report_includes_parallel_and_cost_metrics(tmp_path):
|
||||
output_path = tmp_path / "report.md"
|
||||
results = [
|
||||
CallResult(
|
||||
test_id="file-01",
|
||||
category="file",
|
||||
model="gemma-4-31b",
|
||||
prompt="Read the file.",
|
||||
expected_tool="read_file",
|
||||
success=True,
|
||||
tool_called="read_file",
|
||||
schema_ok=True,
|
||||
tool_args_valid=True,
|
||||
execution_ok=True,
|
||||
tool_count=2,
|
||||
parallel_ok=True,
|
||||
latency_s=1.25,
|
||||
total_tokens=123,
|
||||
estimated_cost_usd=0.0012,
|
||||
cost_status="estimated",
|
||||
),
|
||||
CallResult(
|
||||
test_id="web-01",
|
||||
category="web",
|
||||
model="mimo-v2-pro",
|
||||
prompt="Search the web.",
|
||||
expected_tool="web_search",
|
||||
success=False,
|
||||
tool_called="web_search",
|
||||
schema_ok=True,
|
||||
tool_args_valid=False,
|
||||
execution_ok=False,
|
||||
tool_count=1,
|
||||
parallel_ok=False,
|
||||
latency_s=2.5,
|
||||
error="bad args",
|
||||
total_tokens=456,
|
||||
estimated_cost_usd=None,
|
||||
cost_status="unknown",
|
||||
skipped=True,
|
||||
skip_reason="web_search unavailable",
|
||||
),
|
||||
]
|
||||
|
||||
report = generate_report(results, ["gemma-4-31b", "mimo-v2-pro"], output_path)
|
||||
|
||||
assert output_path.exists()
|
||||
assert "Parallel tool success" in report
|
||||
assert "Avg token cost per call (USD)" in report
|
||||
assert "Skipped / unavailable" in report
|
||||
assert "Requested category mix" in report
|
||||
|
||||
|
||||
def test_run_single_test_skips_when_expected_tool_unavailable():
|
||||
class FakeAgent:
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.valid_tool_names = {"read_file", "terminal"}
|
||||
self.session_input_tokens = 0
|
||||
self.session_output_tokens = 0
|
||||
self.session_cache_read_tokens = 0
|
||||
self.session_cache_write_tokens = 0
|
||||
self.session_api_calls = 0
|
||||
self.base_url = ""
|
||||
self.api_key = None
|
||||
|
||||
def run_conversation(self, *args, **kwargs):
|
||||
raise AssertionError("run_conversation should not be called for unavailable tools")
|
||||
|
||||
tc = ToolCall(
|
||||
id="mcp-01",
|
||||
category="mcp",
|
||||
prompt="Use an MCP tool to list resources.",
|
||||
expected_tool="",
|
||||
expected_tool_prefix="mcp_",
|
||||
)
|
||||
|
||||
with patch.dict(sys.modules, {"run_agent": SimpleNamespace(AIAgent=FakeAgent)}):
|
||||
result = run_single_test(tc, "gemini:gemma-4-31b-it", "gemini")
|
||||
|
||||
assert result.skipped is True
|
||||
assert "mcp_" in result.skip_reason
|
||||
assert result.success is False
|
||||
Reference in New Issue
Block a user