Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
985488bcbe | ||
|
|
524868d4f4 |
@@ -29,6 +29,8 @@ import logging
|
||||
import os
|
||||
import ssl
|
||||
import threading
|
||||
import time
|
||||
import uuid
|
||||
from http.server import BaseHTTPRequestHandler, HTTPServer
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Dict, Optional
|
||||
@@ -441,3 +443,244 @@ class A2AMTLSClient:
|
||||
def post(self, url: str, json: Optional[Dict[str, Any]] = None, **kwargs: Any) -> Dict[str, Any]:
|
||||
data = (__import__("json").dumps(json).encode() if json is not None else None)
|
||||
return self._request("POST", url, data=data, **kwargs)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Structured A2A task delegation over mTLS
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_TERMINAL_TASK_STATES = {"completed", "failed", "canceled", "rejected"}
|
||||
|
||||
|
||||
def _iso_now() -> str:
|
||||
return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
|
||||
|
||||
|
||||
def _task_status(state: str, message: str) -> Dict[str, Any]:
|
||||
return {
|
||||
"state": state,
|
||||
"message": message,
|
||||
"timestamp": _iso_now(),
|
||||
}
|
||||
|
||||
|
||||
def _coerce_artifact(result: Any) -> Dict[str, Any]:
|
||||
if isinstance(result, dict):
|
||||
if "text" in result:
|
||||
return result
|
||||
if "artifact" in result and isinstance(result["artifact"], dict):
|
||||
return result["artifact"]
|
||||
return {"text": str(result)}
|
||||
|
||||
|
||||
def _build_task_record(task_id: str, task: str, requester: Optional[str], metadata: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
||||
return {
|
||||
"taskId": task_id,
|
||||
"task": task,
|
||||
"requester": requester,
|
||||
"metadata": metadata or {},
|
||||
"artifacts": [],
|
||||
"status": _task_status("submitted", "Task submitted"),
|
||||
}
|
||||
|
||||
|
||||
def _default_agent_card(host: str, port: int) -> Dict[str, Any]:
|
||||
base_url = f"https://{host}:{port}"
|
||||
try:
|
||||
from agent.agent_card import build_agent_card
|
||||
from dataclasses import asdict
|
||||
|
||||
card = asdict(build_agent_card())
|
||||
except Exception as exc: # pragma: no cover - fallback only exercised when card build breaks
|
||||
logger.warning("Falling back to minimal agent card: %s", exc)
|
||||
card = {
|
||||
"name": os.environ.get("HERMES_AGENT_NAME", "hermes"),
|
||||
"description": "Hermes A2A task server",
|
||||
"version": "unknown",
|
||||
}
|
||||
card["url"] = base_url
|
||||
card["a2aTaskEndpoint"] = f"{base_url}/a2a/rpc"
|
||||
return card
|
||||
|
||||
|
||||
def _default_local_hermes_executor(task_payload: Dict[str, Any]) -> Dict[str, Any]:
|
||||
task_text = str(task_payload.get("task", "")).strip()
|
||||
if not task_text:
|
||||
return {"text": ""}
|
||||
from run_agent import AIAgent
|
||||
|
||||
agent = AIAgent(quiet_mode=True)
|
||||
result = agent.chat(task_text)
|
||||
return {
|
||||
"text": result,
|
||||
"metadata": {"executor": "local-hermes"},
|
||||
}
|
||||
|
||||
|
||||
class A2ATaskServer:
|
||||
"""JSON-RPC A2A task server running over the routing mTLS server."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
cert: str | Path,
|
||||
key: str | Path,
|
||||
ca: str | Path,
|
||||
host: str = "127.0.0.1",
|
||||
port: int = 9443,
|
||||
executor: Optional[Callable[[Dict[str, Any]], Dict[str, Any]]] = None,
|
||||
card_factory: Optional[Callable[[], Dict[str, Any]]] = None,
|
||||
) -> None:
|
||||
self.host = host
|
||||
self.port = port
|
||||
self._server = A2AMTLSServer(cert=cert, key=key, ca=ca, host=host, port=port)
|
||||
self._executor = executor or _default_local_hermes_executor
|
||||
self._card_factory = card_factory or (lambda: _default_agent_card(self.host, self.port))
|
||||
self._tasks: Dict[str, Dict[str, Any]] = {}
|
||||
self._lock = threading.Lock()
|
||||
self._server.add_route("/.well-known/agent-card.json", self._handle_agent_card)
|
||||
self._server.add_route("/agent-card.json", self._handle_agent_card)
|
||||
self._server.add_route("/a2a/rpc", self._handle_rpc)
|
||||
|
||||
def __enter__(self) -> "A2ATaskServer":
|
||||
self.start()
|
||||
return self
|
||||
|
||||
def __exit__(self, *_: Any) -> None:
|
||||
self.stop()
|
||||
|
||||
def start(self) -> None:
|
||||
self._server.start()
|
||||
|
||||
def stop(self) -> None:
|
||||
self._server.stop()
|
||||
|
||||
def _handle_agent_card(self, payload: Dict[str, Any], *, peer_cn: str | None = None) -> Dict[str, Any]:
|
||||
return self._card_factory()
|
||||
|
||||
def _handle_rpc(self, payload: Dict[str, Any], *, peer_cn: str | None = None) -> Dict[str, Any]:
|
||||
req_id = payload.get("id")
|
||||
if payload.get("jsonrpc") != "2.0":
|
||||
return {"jsonrpc": "2.0", "id": req_id, "error": {"code": -32600, "message": "invalid jsonrpc version"}}
|
||||
|
||||
method = payload.get("method")
|
||||
params = payload.get("params") or {}
|
||||
try:
|
||||
if method == "tasks/send":
|
||||
result = self._rpc_send_task(params, peer_cn=peer_cn)
|
||||
elif method == "tasks/get":
|
||||
result = self._rpc_get_task(params)
|
||||
else:
|
||||
return {"jsonrpc": "2.0", "id": req_id, "error": {"code": -32601, "message": f"unknown method: {method}"}}
|
||||
except Exception as exc:
|
||||
logger.exception("A2A task RPC failed: %s", exc)
|
||||
return {"jsonrpc": "2.0", "id": req_id, "error": {"code": -32000, "message": str(exc)}}
|
||||
return {"jsonrpc": "2.0", "id": req_id, "result": result}
|
||||
|
||||
def _rpc_send_task(self, params: Dict[str, Any], *, peer_cn: str | None = None) -> Dict[str, Any]:
|
||||
task_text = str(params.get("task", "")).strip()
|
||||
if not task_text:
|
||||
raise ValueError("task is required")
|
||||
task_id = params.get("taskId") or uuid.uuid4().hex
|
||||
requester = params.get("requester") or peer_cn
|
||||
metadata = dict(params.get("metadata") or {})
|
||||
if peer_cn:
|
||||
metadata.setdefault("peer_cn", peer_cn)
|
||||
record = _build_task_record(task_id, task_text, requester, metadata)
|
||||
with self._lock:
|
||||
self._tasks[task_id] = record
|
||||
worker = threading.Thread(target=self._run_task, args=(task_id,), daemon=True, name=f"a2a-task-{task_id[:8]}")
|
||||
worker.start()
|
||||
return self._copy_task(task_id)
|
||||
|
||||
def _rpc_get_task(self, params: Dict[str, Any]) -> Dict[str, Any]:
|
||||
task_id = str(params.get("taskId", "")).strip()
|
||||
if not task_id:
|
||||
raise ValueError("taskId is required")
|
||||
return self._copy_task(task_id)
|
||||
|
||||
def _copy_task(self, task_id: str) -> Dict[str, Any]:
|
||||
with self._lock:
|
||||
if task_id not in self._tasks:
|
||||
raise KeyError(f"unknown taskId: {task_id}")
|
||||
return json.loads(json.dumps(self._tasks[task_id]))
|
||||
|
||||
def _run_task(self, task_id: str) -> None:
|
||||
with self._lock:
|
||||
task = self._tasks[task_id]
|
||||
task["status"] = _task_status("working", "Task is running")
|
||||
task_payload = {
|
||||
"taskId": task["taskId"],
|
||||
"task": task["task"],
|
||||
"requester": task.get("requester"),
|
||||
"metadata": dict(task.get("metadata") or {}),
|
||||
}
|
||||
try:
|
||||
result = self._executor(task_payload)
|
||||
artifact = _coerce_artifact(result)
|
||||
with self._lock:
|
||||
task = self._tasks[task_id]
|
||||
task["artifacts"] = [artifact]
|
||||
task["status"] = _task_status("completed", "Task completed")
|
||||
except Exception as exc:
|
||||
with self._lock:
|
||||
task = self._tasks[task_id]
|
||||
task["status"] = _task_status("failed", f"Task failed: {exc}")
|
||||
|
||||
|
||||
class A2ATaskClient(A2AMTLSClient):
|
||||
"""Client helper for A2A JSON-RPC task send/get flows."""
|
||||
|
||||
def discover_card(self, base_url: str) -> Dict[str, Any]:
|
||||
return self.get(f"{base_url.rstrip('/')}/.well-known/agent-card.json")
|
||||
|
||||
def _rpc_call(self, base_url: str, method: str, params: Dict[str, Any]) -> Dict[str, Any]:
|
||||
payload = {
|
||||
"jsonrpc": "2.0",
|
||||
"id": uuid.uuid4().hex,
|
||||
"method": method,
|
||||
"params": params,
|
||||
}
|
||||
response = self.post(f"{base_url.rstrip('/')}/a2a/rpc", json=payload)
|
||||
if "error" in response:
|
||||
error = response["error"]
|
||||
raise RuntimeError(error.get("message") or str(error))
|
||||
return response.get("result", {})
|
||||
|
||||
def send_task(
|
||||
self,
|
||||
base_url: str,
|
||||
*,
|
||||
task: str,
|
||||
requester: str | None = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
) -> Dict[str, Any]:
|
||||
return self._rpc_call(
|
||||
base_url,
|
||||
"tasks/send",
|
||||
{
|
||||
"task": task,
|
||||
"requester": requester,
|
||||
"metadata": metadata or {},
|
||||
},
|
||||
)
|
||||
|
||||
def get_task(self, base_url: str, task_id: str) -> Dict[str, Any]:
|
||||
return self._rpc_call(base_url, "tasks/get", {"taskId": task_id})
|
||||
|
||||
def wait_for_task(
|
||||
self,
|
||||
base_url: str,
|
||||
task_id: str,
|
||||
*,
|
||||
timeout: float = 30.0,
|
||||
poll_interval: float = 0.5,
|
||||
) -> Dict[str, Any]:
|
||||
deadline = time.monotonic() + timeout
|
||||
while True:
|
||||
task = self.get_task(base_url, task_id)
|
||||
state = str(((task.get("status") or {}).get("state") or "")).lower()
|
||||
if state in _TERMINAL_TASK_STATES:
|
||||
return task
|
||||
if time.monotonic() >= deadline:
|
||||
raise TimeoutError(f"Timed out waiting for task {task_id}")
|
||||
time.sleep(poll_interval)
|
||||
|
||||
@@ -1,139 +0,0 @@
|
||||
# Tool-Calling Benchmark Report
|
||||
|
||||
Generated: 2026-04-22 15:46 UTC
|
||||
Executed: 3 calls from a 100-call suite across 7 categories
|
||||
Models tested: nous:gia-3/gemma-4-31b, gemini:gemma-4-26b-it, nous:mimo-v2-pro
|
||||
|
||||
## Requested category mix
|
||||
|
||||
| Category | Target calls |
|
||||
|----------|--------------|
|
||||
| file | 20 |
|
||||
| terminal | 20 |
|
||||
| web | 15 |
|
||||
| code | 15 |
|
||||
| browser | 10 |
|
||||
| delegate | 10 |
|
||||
| mcp | 10 |
|
||||
|
||||
## Summary
|
||||
|
||||
| Metric | nous:gia-3/gemma-4-31b | gemini:gemma-4-26b-it | nous:mimo-v2-pro |
|
||||
|--------|---------|---------|---------|
|
||||
| Schema parse success | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Tool execution success | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Parallel tool success | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Avg latency (s) | 0.00 | 0.00 | 0.00 |
|
||||
| Avg tokens per call | 0.0 | 0.0 | 0.0 |
|
||||
| Avg token cost per call (USD) | n/a | n/a | n/a |
|
||||
| Skipped / unavailable | 0/1 | 0/1 | 0/1 |
|
||||
|
||||
## Per-category breakdown
|
||||
|
||||
### File
|
||||
|
||||
| Metric | nous:gia-3/gemma-4-31b | gemini:gemma-4-26b-it | nous:mimo-v2-pro |
|
||||
|--------|---------|---------|---------|
|
||||
| Schema OK | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Exec OK | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Parallel OK | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Correct tool | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Avg tokens | 0.0 | 0.0 | 0.0 |
|
||||
| Skipped | 0/1 | 0/1 | 0/1 |
|
||||
|
||||
## Failure analysis
|
||||
|
||||
### nous:gia-3/gemma-4-31b — 1 failures
|
||||
|
||||
| Test | Category | Expected | Got | Error |
|
||||
|------|----------|----------|-----|-------|
|
||||
| file-01 | file | read_file | none | SyntaxError: unexpected character after line continuation ch |
|
||||
|
||||
### gemini:gemma-4-26b-it — 1 failures
|
||||
|
||||
| Test | Category | Expected | Got | Error |
|
||||
|------|----------|----------|-----|-------|
|
||||
| file-01 | file | read_file | none | SyntaxError: unexpected character after line continuation ch |
|
||||
|
||||
### nous:mimo-v2-pro — 1 failures
|
||||
|
||||
| Test | Category | Expected | Got | Error |
|
||||
|------|----------|----------|-----|-------|
|
||||
| file-01 | file | read_file | none | SyntaxError: unexpected character after line continuation ch |
|
||||
|
||||
## Skipped / unavailable cases
|
||||
|
||||
No cases were skipped.
|
||||
|
||||
## Raw results
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"test_id": "file-01",
|
||||
"category": "file",
|
||||
"model": "nous:gia-3/gemma-4-31b",
|
||||
"prompt": "Read the file /tmp/test_bench.txt and show me its contents.",
|
||||
"expected_tool": "read_file",
|
||||
"success": false,
|
||||
"tool_called": null,
|
||||
"schema_ok": false,
|
||||
"tool_args_valid": false,
|
||||
"execution_ok": false,
|
||||
"tool_count": 0,
|
||||
"parallel_ok": false,
|
||||
"latency_s": 0,
|
||||
"total_tokens": 0,
|
||||
"estimated_cost_usd": null,
|
||||
"cost_status": "unknown",
|
||||
"skipped": false,
|
||||
"skip_reason": "",
|
||||
"error": "SyntaxError: unexpected character after line continuation character (auxiliary_client.py, line 1)",
|
||||
"raw_response": ""
|
||||
},
|
||||
{
|
||||
"test_id": "file-01",
|
||||
"category": "file",
|
||||
"model": "gemini:gemma-4-26b-it",
|
||||
"prompt": "Read the file /tmp/test_bench.txt and show me its contents.",
|
||||
"expected_tool": "read_file",
|
||||
"success": false,
|
||||
"tool_called": null,
|
||||
"schema_ok": false,
|
||||
"tool_args_valid": false,
|
||||
"execution_ok": false,
|
||||
"tool_count": 0,
|
||||
"parallel_ok": false,
|
||||
"latency_s": 0,
|
||||
"total_tokens": 0,
|
||||
"estimated_cost_usd": null,
|
||||
"cost_status": "unknown",
|
||||
"skipped": false,
|
||||
"skip_reason": "",
|
||||
"error": "SyntaxError: unexpected character after line continuation character (auxiliary_client.py, line 1)",
|
||||
"raw_response": ""
|
||||
},
|
||||
{
|
||||
"test_id": "file-01",
|
||||
"category": "file",
|
||||
"model": "nous:mimo-v2-pro",
|
||||
"prompt": "Read the file /tmp/test_bench.txt and show me its contents.",
|
||||
"expected_tool": "read_file",
|
||||
"success": false,
|
||||
"tool_called": null,
|
||||
"schema_ok": false,
|
||||
"tool_args_valid": false,
|
||||
"execution_ok": false,
|
||||
"tool_count": 0,
|
||||
"parallel_ok": false,
|
||||
"latency_s": 0,
|
||||
"total_tokens": 0,
|
||||
"estimated_cost_usd": null,
|
||||
"cost_status": "unknown",
|
||||
"skipped": false,
|
||||
"skip_reason": "",
|
||||
"error": "SyntaxError: unexpected character after line continuation character (auxiliary_client.py, line 1)",
|
||||
"raw_response": ""
|
||||
}
|
||||
]
|
||||
```
|
||||
@@ -8,11 +8,10 @@ success rates, latency, and token costs.
|
||||
Usage:
|
||||
python3 benchmarks/tool_call_benchmark.py # full 100-call suite
|
||||
python3 benchmarks/tool_call_benchmark.py --limit 10 # quick smoke test
|
||||
python3 benchmarks/tool_call_benchmark.py --category web # single category
|
||||
python3 benchmarks/tool_call_benchmark.py --compare # issue #796 default model comparison
|
||||
python3 benchmarks/tool_call_benchmark.py --models nous # single model
|
||||
python3 benchmarks/tool_call_benchmark.py --category file # single category
|
||||
|
||||
Requires: hermes-agent venv activated, provider credentials for the selected models,
|
||||
and any optional browser/MCP/web backends you want to include in the run.
|
||||
Requires: hermes-agent venv activated, OPENROUTER_API_KEY or equivalent.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
@@ -26,12 +25,10 @@ from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
# Ensure hermes-agent root is importable before local package imports.
|
||||
# Ensure hermes-agent root is importable
|
||||
REPO_ROOT = Path(__file__).resolve().parent.parent
|
||||
sys.path.insert(0, str(REPO_ROOT))
|
||||
|
||||
from agent.usage_pricing import CanonicalUsage, estimate_usage_cost
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Test Definitions
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -42,11 +39,9 @@ class ToolCall:
|
||||
id: str
|
||||
category: str
|
||||
prompt: str
|
||||
expected_tool: str # exact tool name we expect the model to call
|
||||
expected_params_check: str = "" # substring expected in JSON args
|
||||
expected_tool_prefix: str = "" # prefix match for dynamic surfaces like mcp_*
|
||||
expects_parallel: bool = False # whether this prompt should elicit multiple tool calls
|
||||
timeout: int = 30 # max seconds per call
|
||||
expected_tool: str # tool name we expect the model to call
|
||||
expected_params_check: str = "" # substring expected in JSON args
|
||||
timeout: int = 30 # max seconds per call
|
||||
notes: str = ""
|
||||
|
||||
|
||||
@@ -190,107 +185,85 @@ SUITE: list[ToolCall] = [
|
||||
ToolCall("deleg-10", "delegate", "Delegate: create a temp file /tmp/bench_deleg.txt with 'done'.",
|
||||
"delegate_task", "write"),
|
||||
|
||||
# ── Web Search & Extraction (15) ─────────────────────────────────────
|
||||
ToolCall("web-01", "web", "Search the web for Python dataclasses documentation.",
|
||||
"web_search", "dataclasses"),
|
||||
ToolCall("web-02", "web", "Search the web for Hermès agent tool calling benchmarks.",
|
||||
"web_search", "benchmark"),
|
||||
ToolCall("web-03", "web", "Search the web for Gemini Gemma 4 model pricing.",
|
||||
"web_search", "Gemma 4"),
|
||||
ToolCall("web-04", "web", "Search the web for Xiaomi MiMo v2 Pro documentation.",
|
||||
"web_search", "MiMo"),
|
||||
ToolCall("web-05", "web", "Search the web for Python subprocess documentation.",
|
||||
"web_search", "subprocess"),
|
||||
ToolCall("web-06", "web", "Search the web for ripgrep usage examples.",
|
||||
"web_search", "ripgrep"),
|
||||
ToolCall("web-07", "web", "Search the web for pytest fixtures guide.",
|
||||
"web_search", "pytest fixtures"),
|
||||
ToolCall("web-08", "web", "Search the web for OpenAI function calling docs.",
|
||||
"web_search", "function calling"),
|
||||
ToolCall("web-09", "web", "Search the web for browser automation best practices.",
|
||||
"web_search", "browser automation"),
|
||||
ToolCall("web-10", "web", "Search the web for Model Context Protocol overview.",
|
||||
"web_search", "Model Context Protocol"),
|
||||
ToolCall("web-11", "web", "Extract the main text from https://example.com.",
|
||||
"web_extract", "example.com"),
|
||||
ToolCall("web-12", "web", "Extract the page content from https://example.org.",
|
||||
"web_extract", "example.org"),
|
||||
ToolCall("web-13", "web", "Extract the title and body text from https://www.iana.org/domains/reserved.",
|
||||
"web_extract", "iana.org"),
|
||||
ToolCall("web-14", "web", "Extract content from https://httpbin.org/html.",
|
||||
"web_extract", "httpbin.org"),
|
||||
ToolCall("web-15", "web", "Extract the main content from https://www.python.org/.",
|
||||
"web_extract", "python.org"),
|
||||
# ── Todo / Memory (10 — replacing web/browser/MCP which need external services) ──
|
||||
ToolCall("todo-01", "todo", "Add a todo item: 'Run benchmark suite'",
|
||||
"todo", "benchmark"),
|
||||
ToolCall("todo-02", "todo", "Show me the current todo list.",
|
||||
"todo", ""),
|
||||
ToolCall("todo-03", "todo", "Mark the first todo item as completed.",
|
||||
"todo", "completed"),
|
||||
ToolCall("todo-04", "todo", "Add a todo: 'Review benchmark results' with status pending.",
|
||||
"todo", "Review"),
|
||||
ToolCall("todo-05", "todo", "Clear all completed todos.",
|
||||
"todo", "clear"),
|
||||
ToolCall("todo-06", "memory", "Save this to memory: 'benchmark ran on {date}'".format(
|
||||
date=datetime.now().strftime("%Y-%m-%d")),
|
||||
"memory", "benchmark"),
|
||||
ToolCall("todo-07", "memory", "Search memory for 'benchmark'.",
|
||||
"memory", "benchmark"),
|
||||
ToolCall("todo-08", "memory", "Add a memory note: 'test models are gemma-4 and mimo-v2-pro'.",
|
||||
"memory", "gemma"),
|
||||
ToolCall("todo-09", "todo", "Add three todo items: 'analyze', 'report', 'cleanup'.",
|
||||
"todo", "analyze"),
|
||||
ToolCall("todo-10", "memory", "Search memory for any notes about models.",
|
||||
"memory", "model"),
|
||||
|
||||
# ── Browser Automation (10) ───────────────────────────────────────────
|
||||
ToolCall("browser-01", "browser", "Open https://example.com in the browser.",
|
||||
"browser_navigate", "example.com"),
|
||||
ToolCall("browser-02", "browser", "Open https://www.python.org in the browser.",
|
||||
"browser_navigate", "python.org"),
|
||||
ToolCall("browser-03", "browser", "Open https://www.wikipedia.org in the browser.",
|
||||
"browser_navigate", "wikipedia.org"),
|
||||
ToolCall("browser-04", "browser", "Navigate the browser to https://example.org.",
|
||||
"browser_navigate", "example.org"),
|
||||
ToolCall("browser-05", "browser", "Go to https://httpbin.org/forms/post in the browser.",
|
||||
"browser_navigate", "httpbin.org/forms/post"),
|
||||
ToolCall("browser-06", "browser", "Open https://www.iana.org/domains/reserved in the browser.",
|
||||
"browser_navigate", "iana.org/domains/reserved"),
|
||||
ToolCall("browser-07", "browser", "Navigate to https://example.net in the browser.",
|
||||
"browser_navigate", "example.net"),
|
||||
ToolCall("browser-08", "browser", "Open https://developer.mozilla.org in the browser.",
|
||||
"browser_navigate", "developer.mozilla.org"),
|
||||
ToolCall("browser-09", "browser", "Navigate the browser to https://www.rfc-editor.org.",
|
||||
"browser_navigate", "rfc-editor.org"),
|
||||
ToolCall("browser-10", "browser", "Open https://www.gnu.org in the browser.",
|
||||
"browser_navigate", "gnu.org"),
|
||||
# ── Skills (10 — replacing MCP tools which need servers) ─────────────
|
||||
ToolCall("skill-01", "skills", "List all available skills.",
|
||||
"skills_list", ""),
|
||||
ToolCall("skill-02", "skills", "View the skill called 'test-driven-development'.",
|
||||
"skill_view", "test-driven"),
|
||||
ToolCall("skill-03", "skills", "Search for skills related to 'git'.",
|
||||
"skills_list", "git"),
|
||||
ToolCall("skill-04", "skills", "View the 'code-review' skill.",
|
||||
"skill_view", "code-review"),
|
||||
ToolCall("skill-05", "skills", "List all skills in the 'devops' category.",
|
||||
"skills_list", "devops"),
|
||||
ToolCall("skill-06", "skills", "View the 'systematic-debugging' skill.",
|
||||
"skill_view", "systematic-debugging"),
|
||||
ToolCall("skill-07", "skills", "Search for skills about 'testing'.",
|
||||
"skills_list", "testing"),
|
||||
ToolCall("skill-08", "skills", "View the 'writing-plans' skill.",
|
||||
"skill_view", "writing-plans"),
|
||||
ToolCall("skill-09", "skills", "List skills in 'software-development' category.",
|
||||
"skills_list", "software-development"),
|
||||
ToolCall("skill-10", "skills", "View the 'pr-review-discipline' skill.",
|
||||
"skill_view", "pr-review"),
|
||||
|
||||
# ── MCP Tools (10) ────────────────────────────────────────────────────
|
||||
ToolCall("mcp-01", "mcp", "Use an available MCP tool to list configured MCP resources or prompts.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-02", "mcp", "Use an MCP tool to inspect available resources on a configured server.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-03", "mcp", "Use an MCP tool to read a resource from any configured MCP server.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-04", "mcp", "Use an MCP tool to list prompts from any configured MCP server.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-05", "mcp", "Use an available MCP tool and report what it returns.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-06", "mcp", "Call any safe MCP tool that is currently available and summarize the response.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-07", "mcp", "Use one configured MCP tool to enumerate data or capabilities.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-08", "mcp", "Use an MCP tool to fetch a small piece of data from a connected server.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-09", "mcp", "Invoke an available MCP tool and show the structured result.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-10", "mcp", "Use a currently available MCP tool rather than a built-in Hermes tool.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
# ── Additional tests to reach 100 ────────────────────────────────────
|
||||
ToolCall("file-21", "file", "Write a Python snippet to /tmp/bench_sort.py that sorts [3,1,2].",
|
||||
"write_file", "bench_sort"),
|
||||
ToolCall("file-22", "file", "Read /tmp/bench_sort.py back and confirm it exists.",
|
||||
"read_file", "bench_sort"),
|
||||
ToolCall("file-23", "file", "Search for 'class' in all .py files in the benchmarks directory.",
|
||||
"search_files", "class"),
|
||||
ToolCall("term-21", "terminal", "Run `cat /etc/os-release 2>/dev/null || sw_vers 2>/dev/null` for OS info.",
|
||||
"terminal", "os"),
|
||||
ToolCall("term-22", "terminal", "Run `nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null` for CPU count.",
|
||||
"terminal", "cpu"),
|
||||
ToolCall("code-16", "code", "Execute Python to flatten a nested list [[1,2],[3,4],[5]].",
|
||||
"execute_code", "flatten"),
|
||||
ToolCall("code-17", "code", "Run Python to check if a number 17 is prime.",
|
||||
"execute_code", "prime"),
|
||||
ToolCall("deleg-11", "delegate", "Delegate: what is the current working directory?",
|
||||
"delegate_task", "cwd"),
|
||||
ToolCall("todo-11", "todo", "Add a todo: 'Finalize benchmark report' status pending.",
|
||||
"todo", "Finalize"),
|
||||
ToolCall("todo-12", "memory", "Store fact: 'benchmark categories: file, terminal, code, delegate, todo, memory, skills'.",
|
||||
"memory", "categories"),
|
||||
ToolCall("skill-11", "skills", "Search for skills about 'deployment'.",
|
||||
"skills_list", "deployment"),
|
||||
ToolCall("skill-12", "skills", "View the 'gitea-burn-cycle' skill.",
|
||||
"skill_view", "gitea-burn-cycle"),
|
||||
ToolCall("skill-13", "skills", "List all available skill categories.",
|
||||
"skills_list", ""),
|
||||
ToolCall("skill-14", "skills", "Search for skills related to 'memory'.",
|
||||
"skills_list", "memory"),
|
||||
ToolCall("skill-15", "skills", "View the 'mimo-swarm' skill.",
|
||||
"skill_view", "mimo-swarm"),
|
||||
]
|
||||
# fmt: on
|
||||
|
||||
DEFAULT_COMPARE_MODELS = [
|
||||
"nous:gia-3/gemma-4-31b",
|
||||
"gemini:gemma-4-26b-it",
|
||||
"nous:mimo-v2-pro",
|
||||
]
|
||||
|
||||
ISSUE_796_CATEGORY_COUNTS = {
|
||||
"file": 20,
|
||||
"terminal": 20,
|
||||
"web": 15,
|
||||
"code": 15,
|
||||
"browser": 10,
|
||||
"delegate": 10,
|
||||
"mcp": 10,
|
||||
}
|
||||
|
||||
|
||||
def suite_category_counts() -> dict[str, int]:
|
||||
counts: dict[str, int] = {}
|
||||
for tc in SUITE:
|
||||
counts[tc.category] = counts.get(tc.category, 0) + 1
|
||||
return counts
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Runner
|
||||
@@ -305,17 +278,9 @@ class CallResult:
|
||||
expected_tool: str
|
||||
success: bool
|
||||
tool_called: Optional[str] = None
|
||||
schema_ok: bool = False
|
||||
tool_args_valid: bool = False
|
||||
execution_ok: bool = False
|
||||
tool_count: int = 0
|
||||
parallel_ok: bool = False
|
||||
latency_s: float = 0.0
|
||||
total_tokens: int = 0
|
||||
estimated_cost_usd: Optional[float] = None
|
||||
cost_status: str = "unknown"
|
||||
skipped: bool = False
|
||||
skip_reason: str = ""
|
||||
error: str = ""
|
||||
raw_response: str = ""
|
||||
|
||||
@@ -326,12 +291,7 @@ class ModelStats:
|
||||
total: int = 0
|
||||
schema_ok: int = 0 # model produced valid tool call JSON
|
||||
exec_ok: int = 0 # tool actually ran without error
|
||||
parallel_ok: int = 0 # calls with 2+ tool calls that executed successfully
|
||||
skipped: int = 0
|
||||
latency_sum: float = 0.0
|
||||
total_tokens: int = 0
|
||||
total_cost_usd: float = 0.0
|
||||
known_cost_calls: int = 0
|
||||
failures: list = field(default_factory=list)
|
||||
|
||||
@property
|
||||
@@ -346,10 +306,6 @@ class ModelStats:
|
||||
def avg_latency(self) -> float:
|
||||
return (self.latency_sum / self.total) if self.total else 0
|
||||
|
||||
@property
|
||||
def avg_cost_usd(self) -> Optional[float]:
|
||||
return (self.total_cost_usd / self.known_cost_calls) if self.known_cost_calls else None
|
||||
|
||||
|
||||
def setup_test_files():
|
||||
"""Create prerequisite files for the benchmark."""
|
||||
@@ -362,38 +318,20 @@ def setup_test_files():
|
||||
)
|
||||
|
||||
|
||||
def _matches_expected_tool(test_case: ToolCall, tool_name: str) -> bool:
|
||||
if test_case.expected_tool and tool_name == test_case.expected_tool:
|
||||
return True
|
||||
if test_case.expected_tool_prefix and tool_name.startswith(test_case.expected_tool_prefix):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _resolve_unavailable_reason(test_case: ToolCall, valid_tool_names: set[str]) -> str:
|
||||
if test_case.expected_tool and test_case.expected_tool not in valid_tool_names:
|
||||
return f"required tool unavailable: {test_case.expected_tool}"
|
||||
if test_case.expected_tool_prefix and not any(
|
||||
name.startswith(test_case.expected_tool_prefix) for name in valid_tool_names
|
||||
):
|
||||
return f"required tool prefix unavailable: {test_case.expected_tool_prefix}"
|
||||
return ""
|
||||
|
||||
|
||||
def run_single_test(tc: ToolCall, model_spec: str, provider: str) -> CallResult:
|
||||
"""Run a single tool-calling test through the agent."""
|
||||
from run_agent import AIAgent
|
||||
|
||||
result = CallResult(
|
||||
test_id=tc.id,
|
||||
category=tc.category,
|
||||
model=model_spec,
|
||||
prompt=tc.prompt,
|
||||
expected_tool=tc.expected_tool or tc.expected_tool_prefix,
|
||||
expected_tool=tc.expected_tool,
|
||||
success=False,
|
||||
)
|
||||
|
||||
try:
|
||||
from run_agent import AIAgent
|
||||
|
||||
agent = AIAgent(
|
||||
model=model_spec,
|
||||
provider=provider,
|
||||
@@ -404,14 +342,6 @@ def run_single_test(tc: ToolCall, model_spec: str, provider: str) -> CallResult:
|
||||
persist_session=False,
|
||||
)
|
||||
|
||||
valid_tool_names = set(getattr(agent, "valid_tool_names", set()))
|
||||
unavailable_reason = _resolve_unavailable_reason(tc, valid_tool_names)
|
||||
if unavailable_reason:
|
||||
result.skipped = True
|
||||
result.skip_reason = unavailable_reason
|
||||
result.error = unavailable_reason
|
||||
return result
|
||||
|
||||
t0 = time.time()
|
||||
conv = agent.run_conversation(
|
||||
user_message=tc.prompt,
|
||||
@@ -422,75 +352,52 @@ def run_single_test(tc: ToolCall, model_spec: str, provider: str) -> CallResult:
|
||||
)
|
||||
result.latency_s = round(time.time() - t0, 2)
|
||||
|
||||
usage = CanonicalUsage(
|
||||
input_tokens=getattr(agent, "session_input_tokens", 0) or 0,
|
||||
output_tokens=getattr(agent, "session_output_tokens", 0) or 0,
|
||||
cache_read_tokens=getattr(agent, "session_cache_read_tokens", 0) or 0,
|
||||
cache_write_tokens=getattr(agent, "session_cache_write_tokens", 0) or 0,
|
||||
request_count=max(getattr(agent, "session_api_calls", 0) or 0, 1),
|
||||
)
|
||||
result.total_tokens = usage.total_tokens
|
||||
billed_model = model_spec.split(":", 1)[1] if ":" in model_spec else model_spec
|
||||
cost = estimate_usage_cost(
|
||||
billed_model,
|
||||
usage,
|
||||
provider=provider,
|
||||
base_url=getattr(agent, "base_url", None),
|
||||
api_key=getattr(agent, "api_key", None),
|
||||
)
|
||||
result.cost_status = cost.status
|
||||
result.estimated_cost_usd = float(cost.amount_usd) if cost.amount_usd is not None else None
|
||||
|
||||
messages = conv.get("messages", [])
|
||||
|
||||
tool_calls = []
|
||||
# Find the first assistant message with tool_calls
|
||||
tool_called = None
|
||||
tool_args_str = ""
|
||||
for msg in messages:
|
||||
if msg.get("role") == "assistant" and msg.get("tool_calls"):
|
||||
tool_calls = list(msg["tool_calls"])
|
||||
for tc_item in msg["tool_calls"]:
|
||||
fn = tc_item.get("function", {})
|
||||
tool_called = fn.get("name", "")
|
||||
tool_args_str = fn.get("arguments", "{}")
|
||||
break
|
||||
break
|
||||
|
||||
if tool_calls:
|
||||
result.tool_count = len(tool_calls)
|
||||
parsed_args_ok = True
|
||||
matched_name = None
|
||||
matched_args = "{}"
|
||||
if tool_called:
|
||||
result.tool_called = tool_called
|
||||
result.schema_ok = True
|
||||
|
||||
for tc_item in tool_calls:
|
||||
fn = tc_item.get("function", {})
|
||||
tool_name = fn.get("name", "")
|
||||
tool_args = fn.get("arguments", "{}")
|
||||
try:
|
||||
json.loads(tool_args or "{}")
|
||||
except Exception:
|
||||
parsed_args_ok = False
|
||||
if matched_name is None and _matches_expected_tool(tc, tool_name):
|
||||
matched_name = tool_name
|
||||
matched_args = tool_args
|
||||
# Check if the right tool was called
|
||||
if tool_called == tc.expected_tool:
|
||||
result.success = True
|
||||
|
||||
result.schema_ok = parsed_args_ok
|
||||
result.tool_called = matched_name or tool_calls[0].get("function", {}).get("name", "")
|
||||
|
||||
if matched_name:
|
||||
result.tool_args_valid = (
|
||||
tc.expected_params_check in matched_args if tc.expected_params_check else True
|
||||
)
|
||||
result.success = result.schema_ok and result.tool_args_valid
|
||||
# Check if args contain expected substring
|
||||
if tc.expected_params_check:
|
||||
result.tool_args_valid = tc.expected_params_check in tool_args_str
|
||||
else:
|
||||
result.tool_args_valid = True
|
||||
|
||||
# Check if tool executed (look for tool role message)
|
||||
for msg in messages:
|
||||
if msg.get("role") == "tool":
|
||||
content = msg.get("content", "")
|
||||
if content:
|
||||
if content and "error" not in content.lower()[:50]:
|
||||
result.execution_ok = True
|
||||
break
|
||||
|
||||
result.parallel_ok = result.tool_count > 1 and result.execution_ok
|
||||
elif content:
|
||||
result.execution_ok = True # got a response, even if error
|
||||
break
|
||||
else:
|
||||
# No tool call produced — still check if model responded
|
||||
final = conv.get("final_response", "")
|
||||
result.raw_response = final[:200] if final else ""
|
||||
|
||||
except Exception as e:
|
||||
result.error = f"{type(e).__name__}: {str(e)[:200]}"
|
||||
result.latency_s = round(time.time() - t0, 2) if 't0' in locals() else 0
|
||||
result.latency_s = round(time.time() - t0, 2) if 't0' in dir() else 0
|
||||
|
||||
return result
|
||||
|
||||
@@ -499,134 +406,100 @@ def generate_report(results: list[CallResult], models: list[str], output_path: P
|
||||
"""Generate markdown benchmark report."""
|
||||
now = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M UTC")
|
||||
|
||||
stats: dict[str, ModelStats] = {m: ModelStats(model=m) for m in models}
|
||||
# Aggregate per model
|
||||
stats: dict[str, ModelStats] = {}
|
||||
for m in models:
|
||||
stats[m] = ModelStats(model=m)
|
||||
|
||||
by_category: dict[str, dict[str, list[CallResult]]] = {}
|
||||
|
||||
for r in results:
|
||||
s = stats[r.model]
|
||||
s.total += 1
|
||||
s.schema_ok += int(r.schema_ok)
|
||||
s.exec_ok += int(r.execution_ok)
|
||||
s.latency_sum += r.latency_s
|
||||
s.total_tokens += r.total_tokens
|
||||
if r.estimated_cost_usd is not None:
|
||||
s.total_cost_usd += r.estimated_cost_usd
|
||||
s.known_cost_calls += 1
|
||||
if r.skipped:
|
||||
s.skipped += 1
|
||||
else:
|
||||
s.schema_ok += int(r.schema_ok)
|
||||
s.exec_ok += int(r.execution_ok)
|
||||
s.parallel_ok += int(r.parallel_ok)
|
||||
if not r.success:
|
||||
s.failures.append(r)
|
||||
if not r.success:
|
||||
s.failures.append(r)
|
||||
|
||||
by_category.setdefault(r.category, {}).setdefault(r.model, []).append(r)
|
||||
|
||||
def _score_row(label: str, fn) -> str:
|
||||
row = f"| {label} | "
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
attempted = s.total - s.skipped
|
||||
if attempted <= 0:
|
||||
row += "n/a | "
|
||||
continue
|
||||
ok = fn(s)
|
||||
pct = ok / attempted * 100
|
||||
row += f"{ok}/{attempted} ({pct:.0f}%) | "
|
||||
return row
|
||||
|
||||
lines = [
|
||||
"# Tool-Calling Benchmark Report",
|
||||
"",
|
||||
f"# Tool-Calling Benchmark Report",
|
||||
f"",
|
||||
f"Generated: {now}",
|
||||
f"Executed: {len(results)} calls from a {len(SUITE)}-call suite across {len(ISSUE_796_CATEGORY_COUNTS)} categories",
|
||||
f"Suite: {len(SUITE)} calls across {len(set(tc.category for tc in SUITE))} categories",
|
||||
f"Models tested: {', '.join(models)}",
|
||||
"",
|
||||
"## Requested category mix",
|
||||
"",
|
||||
"| Category | Target calls |",
|
||||
"|----------|--------------|",
|
||||
]
|
||||
for category, count in ISSUE_796_CATEGORY_COUNTS.items():
|
||||
lines.append(f"| {category} | {count} |")
|
||||
|
||||
lines.extend([
|
||||
"",
|
||||
"## Summary",
|
||||
"",
|
||||
f"",
|
||||
f"## Summary",
|
||||
f"",
|
||||
f"| Metric | {' | '.join(models)} |",
|
||||
f"|--------|{'|'.join('---------' for _ in models)}|",
|
||||
_score_row("Schema parse success", lambda s: s.schema_ok),
|
||||
_score_row("Tool execution success", lambda s: s.exec_ok),
|
||||
_score_row("Parallel tool success", lambda s: s.parallel_ok),
|
||||
])
|
||||
]
|
||||
|
||||
row = "| Avg latency (s) | "
|
||||
for m in models:
|
||||
row += f"{stats[m].avg_latency:.2f} | "
|
||||
lines.append(row)
|
||||
|
||||
row = "| Avg tokens per call | "
|
||||
for m in models:
|
||||
total = stats[m].total
|
||||
avg_tokens = stats[m].total_tokens / total if total else 0
|
||||
row += f"{avg_tokens:.1f} | "
|
||||
lines.append(row)
|
||||
|
||||
row = "| Avg token cost per call (USD) | "
|
||||
for m in models:
|
||||
avg_cost = stats[m].avg_cost_usd
|
||||
row += (f"{avg_cost:.6f} | " if avg_cost is not None else "n/a | ")
|
||||
lines.append(row)
|
||||
|
||||
row = "| Skipped / unavailable | "
|
||||
# Schema parse success
|
||||
row = "| Schema parse success | "
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
row += f"{s.skipped}/{s.total} | "
|
||||
row += f"{s.schema_ok}/{s.total} ({s.schema_pct:.0f}%) | "
|
||||
lines.append(row)
|
||||
|
||||
# Tool execution success
|
||||
row = "| Tool execution success | "
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
row += f"{s.exec_ok}/{s.total} ({s.exec_pct:.0f}%) | "
|
||||
lines.append(row)
|
||||
|
||||
# Correct tool selected
|
||||
row = "| Correct tool selected | "
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
correct = sum(1 for r in results if r.model == m and r.success)
|
||||
pct = (correct / s.total * 100) if s.total else 0
|
||||
row += f"{correct}/{s.total} ({pct:.0f}%) | "
|
||||
lines.append(row)
|
||||
|
||||
# Avg latency
|
||||
row = "| Avg latency (s) | "
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
row += f"{s.avg_latency:.2f} | "
|
||||
lines.append(row)
|
||||
|
||||
lines.append("")
|
||||
|
||||
lines.append("## Per-category breakdown")
|
||||
# Per-category breakdown
|
||||
lines.append("## Per-Category Breakdown")
|
||||
lines.append("")
|
||||
|
||||
for cat in sorted(by_category.keys()):
|
||||
lines.append(f"### {cat.title()}")
|
||||
lines.append("")
|
||||
lines.append(f"| Metric | {' | '.join(models)} |")
|
||||
lines.append(f"|--------|{'|'.join('---------' for _ in models)}|")
|
||||
|
||||
cat_data = by_category[cat]
|
||||
for metric_name, fn in [
|
||||
("Schema OK", lambda r: r.schema_ok),
|
||||
("Exec OK", lambda r: r.execution_ok),
|
||||
("Parallel OK", lambda r: r.parallel_ok),
|
||||
("Correct tool", lambda r: r.success),
|
||||
]:
|
||||
row = f"| {metric_name} | "
|
||||
for m in models:
|
||||
results_m = by_category[cat].get(m, [])
|
||||
attempted = [r for r in results_m if not r.skipped]
|
||||
if not attempted:
|
||||
row += "n/a | "
|
||||
continue
|
||||
ok = sum(1 for r in attempted if fn(r))
|
||||
pct = ok / len(attempted) * 100
|
||||
row += f"{ok}/{len(attempted)} ({pct:.0f}%) | "
|
||||
results_m = cat_data.get(m, [])
|
||||
total = len(results_m)
|
||||
ok = sum(1 for r in results_m if fn(r))
|
||||
pct = (ok / total * 100) if total else 0
|
||||
row += f"{ok}/{total} ({pct:.0f}%) | "
|
||||
lines.append(row)
|
||||
|
||||
row = "| Avg tokens | "
|
||||
for m in models:
|
||||
results_m = by_category[cat].get(m, [])
|
||||
avg_tokens = sum(r.total_tokens for r in results_m) / len(results_m) if results_m else 0
|
||||
row += f"{avg_tokens:.1f} | "
|
||||
lines.append(row)
|
||||
|
||||
row = "| Skipped | "
|
||||
for m in models:
|
||||
results_m = by_category[cat].get(m, [])
|
||||
skipped = sum(1 for r in results_m if r.skipped)
|
||||
row += f"{skipped}/{len(results_m)} | "
|
||||
lines.append(row)
|
||||
lines.append("")
|
||||
|
||||
lines.append("## Failure analysis")
|
||||
# Failure analysis
|
||||
lines.append("## Failure Analysis")
|
||||
lines.append("")
|
||||
|
||||
any_failures = False
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
@@ -641,40 +514,28 @@ def generate_report(results: list[CallResult], models: list[str], output_path: P
|
||||
err = r.error or "wrong tool"
|
||||
lines.append(f"| {r.test_id} | {r.category} | {r.expected_tool} | {got} | {err[:60]} |")
|
||||
lines.append("")
|
||||
|
||||
if not any_failures:
|
||||
lines.append("No model failures detected.")
|
||||
lines.append("No failures detected.")
|
||||
lines.append("")
|
||||
|
||||
skipped_results = [r for r in results if r.skipped]
|
||||
lines.append("## Skipped / unavailable cases")
|
||||
lines.append("")
|
||||
if skipped_results:
|
||||
lines.append("| Test | Model | Category | Reason |")
|
||||
lines.append("|------|-------|----------|--------|")
|
||||
for r in skipped_results:
|
||||
lines.append(f"| {r.test_id} | {r.model} | {r.category} | {r.skip_reason[:80]} |")
|
||||
else:
|
||||
lines.append("No cases were skipped.")
|
||||
lines.append("")
|
||||
|
||||
lines.append("## Raw results")
|
||||
# Raw results JSON
|
||||
lines.append("## Raw Results")
|
||||
lines.append("")
|
||||
lines.append("```json")
|
||||
lines.append(json.dumps([asdict(r) for r in results], indent=2, default=str))
|
||||
lines.append("```")
|
||||
|
||||
report = "\n".join(lines)
|
||||
output_path.write_text(report, encoding="utf-8")
|
||||
output_path.write_text(report)
|
||||
return report
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Tool-calling benchmark")
|
||||
parser.add_argument("--models", nargs="+",
|
||||
default=list(DEFAULT_COMPARE_MODELS),
|
||||
default=["nous:gia-3/gemma-4-31b", "nous:mimo-v2-pro"],
|
||||
help="Model specs to test (provider:model)")
|
||||
parser.add_argument("--compare", action="store_true",
|
||||
help="Use the issue #796 default comparison set")
|
||||
parser.add_argument("--limit", type=int, default=0,
|
||||
help="Run only first N tests (0 = all)")
|
||||
parser.add_argument("--category", type=str, default="",
|
||||
@@ -685,9 +546,6 @@ def main():
|
||||
help="Print test cases without running them")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.compare:
|
||||
args.models = list(DEFAULT_COMPARE_MODELS)
|
||||
|
||||
# Filter suite
|
||||
suite = SUITE[:]
|
||||
if args.category:
|
||||
|
||||
132
hermes_cli/a2a_cmd.py
Normal file
132
hermes_cli/a2a_cmd.py
Normal file
@@ -0,0 +1,132 @@
|
||||
"""CLI helpers for A2A task delegation."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from agent.a2a_mtls import A2ATaskClient, A2ATaskServer
|
||||
from hermes_cli.config import get_hermes_home
|
||||
|
||||
|
||||
def _registry_path() -> Path:
|
||||
return get_hermes_home() / "a2a_agents.json"
|
||||
|
||||
|
||||
def _default_identity_paths() -> tuple[str, str, str]:
|
||||
hermes_home = get_hermes_home()
|
||||
agent_name = os.environ.get("HERMES_AGENT_NAME", "hermes").lower()
|
||||
cert = os.environ.get(
|
||||
"HERMES_A2A_CERT",
|
||||
str(hermes_home / "pki" / "agents" / agent_name / f"{agent_name}.crt"),
|
||||
)
|
||||
key = os.environ.get(
|
||||
"HERMES_A2A_KEY",
|
||||
str(hermes_home / "pki" / "agents" / agent_name / f"{agent_name}.key"),
|
||||
)
|
||||
ca = os.environ.get(
|
||||
"HERMES_A2A_CA",
|
||||
str(hermes_home / "pki" / "ca" / "fleet-ca.crt"),
|
||||
)
|
||||
return cert, key, ca
|
||||
|
||||
|
||||
def load_agent_registry(path: Path | None = None) -> dict[str, Any]:
|
||||
registry_path = path or _registry_path()
|
||||
if not registry_path.exists():
|
||||
return {}
|
||||
return json.loads(registry_path.read_text(encoding="utf-8"))
|
||||
|
||||
|
||||
def resolve_agent_url(agent: str, *, registry_path: Path | None = None) -> str:
|
||||
key = re.sub(r"[^A-Za-z0-9]+", "_", agent).upper()
|
||||
env_value = os.getenv(f"HERMES_A2A_{key}_URL")
|
||||
if env_value:
|
||||
return env_value
|
||||
|
||||
registry = load_agent_registry(registry_path)
|
||||
entry = registry.get(agent)
|
||||
if isinstance(entry, str) and entry:
|
||||
return entry
|
||||
if isinstance(entry, dict):
|
||||
url = entry.get("url") or entry.get("base_url") or entry.get("card_url")
|
||||
if url:
|
||||
return str(url)
|
||||
if agent.startswith("https://") or agent.startswith("http://"):
|
||||
return agent
|
||||
raise SystemExit(f"Unknown A2A agent '{agent}'. Set HERMES_A2A_{key}_URL or add it to {_registry_path()}.")
|
||||
|
||||
|
||||
def _print(data: dict[str, Any]) -> None:
|
||||
print(json.dumps(data, indent=2, ensure_ascii=False))
|
||||
|
||||
|
||||
def cmd_send(args) -> None:
|
||||
base_url = args.url or resolve_agent_url(args.agent)
|
||||
cert, key, ca = args.cert, args.key, args.ca
|
||||
if not (cert and key and ca):
|
||||
cert, key, ca = _default_identity_paths()
|
||||
client = A2ATaskClient(cert=cert, key=key, ca=ca)
|
||||
card = client.discover_card(base_url)
|
||||
task = client.send_task(
|
||||
base_url,
|
||||
task=args.task,
|
||||
requester=args.requester,
|
||||
metadata={"agent": args.agent},
|
||||
)
|
||||
if args.wait:
|
||||
task = client.wait_for_task(
|
||||
base_url,
|
||||
task["taskId"],
|
||||
timeout=args.timeout,
|
||||
poll_interval=args.poll_interval,
|
||||
)
|
||||
_print({
|
||||
"agent": args.agent,
|
||||
"url": base_url,
|
||||
"card": card,
|
||||
"task": task,
|
||||
})
|
||||
|
||||
|
||||
def cmd_status(args) -> None:
|
||||
base_url = args.url or resolve_agent_url(args.agent)
|
||||
cert, key, ca = args.cert, args.key, args.ca
|
||||
if not (cert and key and ca):
|
||||
cert, key, ca = _default_identity_paths()
|
||||
client = A2ATaskClient(cert=cert, key=key, ca=ca)
|
||||
task = client.get_task(base_url, args.task_id)
|
||||
_print({"agent": args.agent, "url": base_url, "task": task})
|
||||
|
||||
|
||||
def cmd_serve(args) -> None:
|
||||
cert, key, ca = args.cert, args.key, args.ca
|
||||
if not (cert and key and ca):
|
||||
cert, key, ca = _default_identity_paths()
|
||||
server = A2ATaskServer(cert=cert, key=key, ca=ca, host=args.host, port=args.port)
|
||||
server.start()
|
||||
print(f"A2A task server listening on https://{args.host}:{args.port}")
|
||||
try:
|
||||
while True:
|
||||
time.sleep(1)
|
||||
except KeyboardInterrupt:
|
||||
server.stop()
|
||||
|
||||
|
||||
def cmd_a2a(args) -> None:
|
||||
command = getattr(args, "a2a_command", None) or "send"
|
||||
if command == "send":
|
||||
cmd_send(args)
|
||||
return
|
||||
if command == "status":
|
||||
cmd_status(args)
|
||||
return
|
||||
if command == "serve":
|
||||
cmd_serve(args)
|
||||
return
|
||||
raise SystemExit(f"Unknown a2a command: {command}")
|
||||
@@ -173,6 +173,13 @@ from hermes_constants import OPENROUTER_BASE_URL
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def cmd_a2a(args):
|
||||
"""Dispatch A2A CLI subcommands lazily to avoid heavy imports at startup."""
|
||||
from hermes_cli.a2a_cmd import cmd_a2a as _cmd_a2a
|
||||
|
||||
return _cmd_a2a(args)
|
||||
|
||||
|
||||
def _relative_time(ts) -> str:
|
||||
"""Format a timestamp as relative time (e.g., '2h ago', 'yesterday')."""
|
||||
if not ts:
|
||||
@@ -4781,6 +4788,45 @@ For more help on a command:
|
||||
|
||||
gateway_parser.set_defaults(func=cmd_gateway)
|
||||
|
||||
# =========================================================================
|
||||
# a2a command
|
||||
# =========================================================================
|
||||
a2a_parser = subparsers.add_parser(
|
||||
"a2a",
|
||||
help="A2A task delegation over mutual TLS",
|
||||
description="Send, inspect, and serve structured A2A tasks between Hermes agents",
|
||||
)
|
||||
a2a_subparsers = a2a_parser.add_subparsers(dest="a2a_command")
|
||||
|
||||
a2a_send = a2a_subparsers.add_parser("send", help="Send an A2A task to another agent")
|
||||
a2a_send.add_argument("--agent", required=True, help="Agent alias or URL (for example: allegro)")
|
||||
a2a_send.add_argument("--task", required=True, help="Task text to delegate")
|
||||
a2a_send.add_argument("--url", help="Explicit base URL for the remote agent")
|
||||
a2a_send.add_argument("--requester", default=None, help="Requester label included in task metadata")
|
||||
a2a_send.add_argument("--wait", action="store_true", help="Poll until the task reaches a terminal state")
|
||||
a2a_send.add_argument("--timeout", type=float, default=30.0, help="Wait timeout in seconds (default: 30)")
|
||||
a2a_send.add_argument("--poll-interval", type=float, default=0.5, help="Polling interval in seconds while waiting (default: 0.5)")
|
||||
a2a_send.add_argument("--cert", default=None, help="Client certificate path (defaults from HERMES_A2A_CERT)")
|
||||
a2a_send.add_argument("--key", default=None, help="Client private key path (defaults from HERMES_A2A_KEY)")
|
||||
a2a_send.add_argument("--ca", default=None, help="Fleet CA certificate path (defaults from HERMES_A2A_CA)")
|
||||
|
||||
a2a_status = a2a_subparsers.add_parser("status", help="Fetch the current status of an A2A task")
|
||||
a2a_status.add_argument("--agent", required=True, help="Agent alias or URL (for example: allegro)")
|
||||
a2a_status.add_argument("--task-id", required=True, help="Task identifier returned by a2a send")
|
||||
a2a_status.add_argument("--url", help="Explicit base URL for the remote agent")
|
||||
a2a_status.add_argument("--cert", default=None, help="Client certificate path (defaults from HERMES_A2A_CERT)")
|
||||
a2a_status.add_argument("--key", default=None, help="Client private key path (defaults from HERMES_A2A_KEY)")
|
||||
a2a_status.add_argument("--ca", default=None, help="Fleet CA certificate path (defaults from HERMES_A2A_CA)")
|
||||
|
||||
a2a_serve = a2a_subparsers.add_parser("serve", help="Run the local A2A task server")
|
||||
a2a_serve.add_argument("--host", default=os.environ.get("HERMES_A2A_HOST", "127.0.0.1"), help="Bind host (default: HERMES_A2A_HOST or 127.0.0.1)")
|
||||
a2a_serve.add_argument("--port", type=int, default=int(os.environ.get("HERMES_A2A_PORT", "9443")), help="Bind port (default: HERMES_A2A_PORT or 9443)")
|
||||
a2a_serve.add_argument("--cert", default=None, help="Server certificate path (defaults from HERMES_A2A_CERT)")
|
||||
a2a_serve.add_argument("--key", default=None, help="Server private key path (defaults from HERMES_A2A_KEY)")
|
||||
a2a_serve.add_argument("--ca", default=None, help="Fleet CA certificate path (defaults from HERMES_A2A_CA)")
|
||||
|
||||
a2a_parser.set_defaults(func=cmd_a2a)
|
||||
|
||||
# =========================================================================
|
||||
# setup command
|
||||
# =========================================================================
|
||||
|
||||
@@ -572,3 +572,94 @@ class TestA2AMTLSServerAndClient:
|
||||
|
||||
assert not errors, f"Concurrent connection errors: {errors}"
|
||||
assert len(results) == 3
|
||||
|
||||
|
||||
@_requires_crypto
|
||||
class TestA2ATaskServerAndClient:
|
||||
"""Structured A2A task send/get flow over mTLS."""
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def _pki(self, tmp_path):
|
||||
ca_dir = tmp_path / "ca"
|
||||
ca_dir.mkdir()
|
||||
self.ca_crt, self.ca_key = _make_ca_keypair(ca_dir)
|
||||
agent_dir = tmp_path / "agents"
|
||||
agent_dir.mkdir()
|
||||
self.srv_crt, self.srv_key = _make_agent_keypair(
|
||||
agent_dir, "timmy", self.ca_crt, self.ca_key
|
||||
)
|
||||
self.cli_crt, self.cli_key = _make_agent_keypair(
|
||||
agent_dir, "allegro", self.ca_crt, self.ca_key
|
||||
)
|
||||
|
||||
@pytest.fixture()
|
||||
def task_server(self):
|
||||
from agent.a2a_mtls import A2ATaskServer
|
||||
|
||||
gate = threading.Event()
|
||||
|
||||
def analyze_executor(task: dict[str, object]) -> dict[str, object]:
|
||||
gate.wait(timeout=2)
|
||||
text = str(task.get("task", ""))
|
||||
return {
|
||||
"text": f"analysis:{text}",
|
||||
"metadata": {"tool": "local-hermes-stub"},
|
||||
}
|
||||
|
||||
port = _find_free_port()
|
||||
server = A2ATaskServer(
|
||||
cert=self.srv_crt,
|
||||
key=self.srv_key,
|
||||
ca=self.ca_crt,
|
||||
host="127.0.0.1",
|
||||
port=port,
|
||||
executor=analyze_executor,
|
||||
)
|
||||
with server:
|
||||
time.sleep(0.1)
|
||||
yield server, port, gate
|
||||
|
||||
def test_task_send_get_and_completion_flow(self, task_server):
|
||||
from agent.a2a_mtls import A2ATaskClient
|
||||
|
||||
server, port, gate = task_server
|
||||
client = A2ATaskClient(cert=self.cli_crt, key=self.cli_key, ca=self.ca_crt)
|
||||
base_url = f"https://127.0.0.1:{port}"
|
||||
|
||||
card = client.discover_card(base_url)
|
||||
assert card["name"]
|
||||
|
||||
submitted = client.send_task(base_url, task="Analyze README.md", requester="timmy")
|
||||
assert submitted["status"]["state"] in {"submitted", "working"}
|
||||
|
||||
in_flight = client.get_task(base_url, submitted["taskId"])
|
||||
assert in_flight["status"]["state"] in {"submitted", "working"}
|
||||
|
||||
gate.set()
|
||||
completed = client.wait_for_task(base_url, submitted["taskId"], timeout=5.0, poll_interval=0.05)
|
||||
assert completed["status"]["state"] == "completed"
|
||||
assert completed["artifacts"][0]["text"] == "analysis:Analyze README.md"
|
||||
|
||||
def test_failed_executor_marks_task_failed(self):
|
||||
from agent.a2a_mtls import A2ATaskClient, A2ATaskServer
|
||||
|
||||
def failing_executor(task: dict[str, object]) -> dict[str, object]:
|
||||
raise RuntimeError("boom")
|
||||
|
||||
port = _find_free_port()
|
||||
server = A2ATaskServer(
|
||||
cert=self.srv_crt,
|
||||
key=self.srv_key,
|
||||
ca=self.ca_crt,
|
||||
host="127.0.0.1",
|
||||
port=port,
|
||||
executor=failing_executor,
|
||||
)
|
||||
with server:
|
||||
time.sleep(0.1)
|
||||
client = A2ATaskClient(cert=self.cli_crt, key=self.cli_key, ca=self.ca_crt)
|
||||
base_url = f"https://127.0.0.1:{port}"
|
||||
submitted = client.send_task(base_url, task="explode", requester="timmy")
|
||||
failed = client.wait_for_task(base_url, submitted["taskId"], timeout=5.0, poll_interval=0.05)
|
||||
assert failed["status"]["state"] == "failed"
|
||||
assert "boom" in failed["status"]["message"]
|
||||
|
||||
95
tests/hermes_cli/test_a2a_cmd.py
Normal file
95
tests/hermes_cli/test_a2a_cmd.py
Normal file
@@ -0,0 +1,95 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
def test_cmd_send_uses_registry_and_waits_for_terminal_task(tmp_path, monkeypatch, capsys):
|
||||
hermes_home = tmp_path / ".hermes"
|
||||
hermes_home.mkdir()
|
||||
(hermes_home / "a2a_agents.json").write_text(
|
||||
json.dumps({"allegro": {"url": "https://127.0.0.1:9443"}}),
|
||||
encoding="utf-8",
|
||||
)
|
||||
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
|
||||
|
||||
from hermes_cli.a2a_cmd import cmd_a2a
|
||||
|
||||
class FakeClient:
|
||||
def __init__(self, **kwargs):
|
||||
self.kwargs = kwargs
|
||||
|
||||
def discover_card(self, base_url: str):
|
||||
assert base_url == "https://127.0.0.1:9443"
|
||||
return {"name": "allegro", "url": base_url}
|
||||
|
||||
def send_task(self, base_url: str, *, task: str, requester: str | None = None, metadata=None):
|
||||
assert task == "analyze README"
|
||||
return {"taskId": "task-123", "status": {"state": "submitted"}}
|
||||
|
||||
def wait_for_task(self, base_url: str, task_id: str, *, timeout: float, poll_interval: float):
|
||||
assert task_id == "task-123"
|
||||
return {
|
||||
"taskId": task_id,
|
||||
"status": {"state": "completed"},
|
||||
"artifacts": [{"text": "README looks healthy"}],
|
||||
}
|
||||
|
||||
args = argparse.Namespace(
|
||||
a2a_command="send",
|
||||
agent="allegro",
|
||||
task="analyze README",
|
||||
url=None,
|
||||
wait=True,
|
||||
timeout=5.0,
|
||||
poll_interval=0.01,
|
||||
requester="timmy",
|
||||
cert="cert.pem",
|
||||
key="key.pem",
|
||||
ca="ca.pem",
|
||||
)
|
||||
|
||||
with patch("hermes_cli.a2a_cmd.A2ATaskClient", FakeClient):
|
||||
cmd_a2a(args)
|
||||
|
||||
result = json.loads(capsys.readouterr().out)
|
||||
assert result["agent"] == "allegro"
|
||||
assert result["card"]["name"] == "allegro"
|
||||
assert result["task"]["status"]["state"] == "completed"
|
||||
assert result["task"]["artifacts"][0]["text"] == "README looks healthy"
|
||||
|
||||
|
||||
def test_resolve_agent_url_supports_env_override(monkeypatch):
|
||||
monkeypatch.setenv("HERMES_A2A_ALLEGRO_URL", "https://fleet-allegro:9443")
|
||||
from hermes_cli.a2a_cmd import resolve_agent_url
|
||||
|
||||
assert resolve_agent_url("allegro") == "https://fleet-allegro:9443"
|
||||
|
||||
|
||||
def test_cmd_send_requires_known_agent(tmp_path, monkeypatch):
|
||||
hermes_home = tmp_path / ".hermes"
|
||||
hermes_home.mkdir()
|
||||
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
|
||||
|
||||
from hermes_cli.a2a_cmd import cmd_a2a
|
||||
|
||||
args = argparse.Namespace(
|
||||
a2a_command="send",
|
||||
agent="unknown",
|
||||
task="do work",
|
||||
url=None,
|
||||
wait=False,
|
||||
timeout=5.0,
|
||||
poll_interval=0.05,
|
||||
requester=None,
|
||||
cert="cert.pem",
|
||||
key="key.pem",
|
||||
ca="ca.pem",
|
||||
)
|
||||
|
||||
with pytest.raises(SystemExit):
|
||||
cmd_a2a(args)
|
||||
@@ -1,115 +0,0 @@
|
||||
"""Tests for Issue #796 tool-calling benchmark coverage and reporting."""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from types import SimpleNamespace
|
||||
from unittest.mock import patch
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent / "benchmarks"))
|
||||
|
||||
from tool_call_benchmark import ( # noqa: E402
|
||||
CallResult,
|
||||
DEFAULT_COMPARE_MODELS,
|
||||
ISSUE_796_CATEGORY_COUNTS,
|
||||
ToolCall,
|
||||
generate_report,
|
||||
run_single_test,
|
||||
suite_category_counts,
|
||||
)
|
||||
|
||||
|
||||
def test_suite_counts_match_issue_796_distribution():
|
||||
counts = suite_category_counts()
|
||||
assert counts == ISSUE_796_CATEGORY_COUNTS
|
||||
assert sum(counts.values()) == 100
|
||||
|
||||
|
||||
def test_default_compare_models_cover_issue_796_lanes():
|
||||
assert len(DEFAULT_COMPARE_MODELS) == 3
|
||||
assert any("gemma-4-31b" in spec for spec in DEFAULT_COMPARE_MODELS)
|
||||
assert any("gemma-4-26b" in spec for spec in DEFAULT_COMPARE_MODELS)
|
||||
assert any("mimo-v2-pro" in spec for spec in DEFAULT_COMPARE_MODELS)
|
||||
|
||||
|
||||
def test_generate_report_includes_parallel_and_cost_metrics(tmp_path):
|
||||
output_path = tmp_path / "report.md"
|
||||
results = [
|
||||
CallResult(
|
||||
test_id="file-01",
|
||||
category="file",
|
||||
model="gemma-4-31b",
|
||||
prompt="Read the file.",
|
||||
expected_tool="read_file",
|
||||
success=True,
|
||||
tool_called="read_file",
|
||||
schema_ok=True,
|
||||
tool_args_valid=True,
|
||||
execution_ok=True,
|
||||
tool_count=2,
|
||||
parallel_ok=True,
|
||||
latency_s=1.25,
|
||||
total_tokens=123,
|
||||
estimated_cost_usd=0.0012,
|
||||
cost_status="estimated",
|
||||
),
|
||||
CallResult(
|
||||
test_id="web-01",
|
||||
category="web",
|
||||
model="mimo-v2-pro",
|
||||
prompt="Search the web.",
|
||||
expected_tool="web_search",
|
||||
success=False,
|
||||
tool_called="web_search",
|
||||
schema_ok=True,
|
||||
tool_args_valid=False,
|
||||
execution_ok=False,
|
||||
tool_count=1,
|
||||
parallel_ok=False,
|
||||
latency_s=2.5,
|
||||
error="bad args",
|
||||
total_tokens=456,
|
||||
estimated_cost_usd=None,
|
||||
cost_status="unknown",
|
||||
skipped=True,
|
||||
skip_reason="web_search unavailable",
|
||||
),
|
||||
]
|
||||
|
||||
report = generate_report(results, ["gemma-4-31b", "mimo-v2-pro"], output_path)
|
||||
|
||||
assert output_path.exists()
|
||||
assert "Parallel tool success" in report
|
||||
assert "Avg token cost per call (USD)" in report
|
||||
assert "Skipped / unavailable" in report
|
||||
assert "Requested category mix" in report
|
||||
|
||||
|
||||
def test_run_single_test_skips_when_expected_tool_unavailable():
|
||||
class FakeAgent:
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.valid_tool_names = {"read_file", "terminal"}
|
||||
self.session_input_tokens = 0
|
||||
self.session_output_tokens = 0
|
||||
self.session_cache_read_tokens = 0
|
||||
self.session_cache_write_tokens = 0
|
||||
self.session_api_calls = 0
|
||||
self.base_url = ""
|
||||
self.api_key = None
|
||||
|
||||
def run_conversation(self, *args, **kwargs):
|
||||
raise AssertionError("run_conversation should not be called for unavailable tools")
|
||||
|
||||
tc = ToolCall(
|
||||
id="mcp-01",
|
||||
category="mcp",
|
||||
prompt="Use an MCP tool to list resources.",
|
||||
expected_tool="",
|
||||
expected_tool_prefix="mcp_",
|
||||
)
|
||||
|
||||
with patch.dict(sys.modules, {"run_agent": SimpleNamespace(AIAgent=FakeAgent)}):
|
||||
result = run_single_test(tc, "gemini:gemma-4-31b-it", "gemini")
|
||||
|
||||
assert result.skipped is True
|
||||
assert "mcp_" in result.skip_reason
|
||||
assert result.success is False
|
||||
Reference in New Issue
Block a user