Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
418e601f74 |
@@ -1,139 +0,0 @@
|
||||
# Tool-Calling Benchmark Report
|
||||
|
||||
Generated: 2026-04-22 15:46 UTC
|
||||
Executed: 3 calls from a 100-call suite across 7 categories
|
||||
Models tested: nous:gia-3/gemma-4-31b, gemini:gemma-4-26b-it, nous:mimo-v2-pro
|
||||
|
||||
## Requested category mix
|
||||
|
||||
| Category | Target calls |
|
||||
|----------|--------------|
|
||||
| file | 20 |
|
||||
| terminal | 20 |
|
||||
| web | 15 |
|
||||
| code | 15 |
|
||||
| browser | 10 |
|
||||
| delegate | 10 |
|
||||
| mcp | 10 |
|
||||
|
||||
## Summary
|
||||
|
||||
| Metric | nous:gia-3/gemma-4-31b | gemini:gemma-4-26b-it | nous:mimo-v2-pro |
|
||||
|--------|---------|---------|---------|
|
||||
| Schema parse success | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Tool execution success | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Parallel tool success | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Avg latency (s) | 0.00 | 0.00 | 0.00 |
|
||||
| Avg tokens per call | 0.0 | 0.0 | 0.0 |
|
||||
| Avg token cost per call (USD) | n/a | n/a | n/a |
|
||||
| Skipped / unavailable | 0/1 | 0/1 | 0/1 |
|
||||
|
||||
## Per-category breakdown
|
||||
|
||||
### File
|
||||
|
||||
| Metric | nous:gia-3/gemma-4-31b | gemini:gemma-4-26b-it | nous:mimo-v2-pro |
|
||||
|--------|---------|---------|---------|
|
||||
| Schema OK | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Exec OK | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Parallel OK | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Correct tool | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Avg tokens | 0.0 | 0.0 | 0.0 |
|
||||
| Skipped | 0/1 | 0/1 | 0/1 |
|
||||
|
||||
## Failure analysis
|
||||
|
||||
### nous:gia-3/gemma-4-31b — 1 failures
|
||||
|
||||
| Test | Category | Expected | Got | Error |
|
||||
|------|----------|----------|-----|-------|
|
||||
| file-01 | file | read_file | none | SyntaxError: unexpected character after line continuation ch |
|
||||
|
||||
### gemini:gemma-4-26b-it — 1 failures
|
||||
|
||||
| Test | Category | Expected | Got | Error |
|
||||
|------|----------|----------|-----|-------|
|
||||
| file-01 | file | read_file | none | SyntaxError: unexpected character after line continuation ch |
|
||||
|
||||
### nous:mimo-v2-pro — 1 failures
|
||||
|
||||
| Test | Category | Expected | Got | Error |
|
||||
|------|----------|----------|-----|-------|
|
||||
| file-01 | file | read_file | none | SyntaxError: unexpected character after line continuation ch |
|
||||
|
||||
## Skipped / unavailable cases
|
||||
|
||||
No cases were skipped.
|
||||
|
||||
## Raw results
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"test_id": "file-01",
|
||||
"category": "file",
|
||||
"model": "nous:gia-3/gemma-4-31b",
|
||||
"prompt": "Read the file /tmp/test_bench.txt and show me its contents.",
|
||||
"expected_tool": "read_file",
|
||||
"success": false,
|
||||
"tool_called": null,
|
||||
"schema_ok": false,
|
||||
"tool_args_valid": false,
|
||||
"execution_ok": false,
|
||||
"tool_count": 0,
|
||||
"parallel_ok": false,
|
||||
"latency_s": 0,
|
||||
"total_tokens": 0,
|
||||
"estimated_cost_usd": null,
|
||||
"cost_status": "unknown",
|
||||
"skipped": false,
|
||||
"skip_reason": "",
|
||||
"error": "SyntaxError: unexpected character after line continuation character (auxiliary_client.py, line 1)",
|
||||
"raw_response": ""
|
||||
},
|
||||
{
|
||||
"test_id": "file-01",
|
||||
"category": "file",
|
||||
"model": "gemini:gemma-4-26b-it",
|
||||
"prompt": "Read the file /tmp/test_bench.txt and show me its contents.",
|
||||
"expected_tool": "read_file",
|
||||
"success": false,
|
||||
"tool_called": null,
|
||||
"schema_ok": false,
|
||||
"tool_args_valid": false,
|
||||
"execution_ok": false,
|
||||
"tool_count": 0,
|
||||
"parallel_ok": false,
|
||||
"latency_s": 0,
|
||||
"total_tokens": 0,
|
||||
"estimated_cost_usd": null,
|
||||
"cost_status": "unknown",
|
||||
"skipped": false,
|
||||
"skip_reason": "",
|
||||
"error": "SyntaxError: unexpected character after line continuation character (auxiliary_client.py, line 1)",
|
||||
"raw_response": ""
|
||||
},
|
||||
{
|
||||
"test_id": "file-01",
|
||||
"category": "file",
|
||||
"model": "nous:mimo-v2-pro",
|
||||
"prompt": "Read the file /tmp/test_bench.txt and show me its contents.",
|
||||
"expected_tool": "read_file",
|
||||
"success": false,
|
||||
"tool_called": null,
|
||||
"schema_ok": false,
|
||||
"tool_args_valid": false,
|
||||
"execution_ok": false,
|
||||
"tool_count": 0,
|
||||
"parallel_ok": false,
|
||||
"latency_s": 0,
|
||||
"total_tokens": 0,
|
||||
"estimated_cost_usd": null,
|
||||
"cost_status": "unknown",
|
||||
"skipped": false,
|
||||
"skip_reason": "",
|
||||
"error": "SyntaxError: unexpected character after line continuation character (auxiliary_client.py, line 1)",
|
||||
"raw_response": ""
|
||||
}
|
||||
]
|
||||
```
|
||||
@@ -8,11 +8,10 @@ success rates, latency, and token costs.
|
||||
Usage:
|
||||
python3 benchmarks/tool_call_benchmark.py # full 100-call suite
|
||||
python3 benchmarks/tool_call_benchmark.py --limit 10 # quick smoke test
|
||||
python3 benchmarks/tool_call_benchmark.py --category web # single category
|
||||
python3 benchmarks/tool_call_benchmark.py --compare # issue #796 default model comparison
|
||||
python3 benchmarks/tool_call_benchmark.py --models nous # single model
|
||||
python3 benchmarks/tool_call_benchmark.py --category file # single category
|
||||
|
||||
Requires: hermes-agent venv activated, provider credentials for the selected models,
|
||||
and any optional browser/MCP/web backends you want to include in the run.
|
||||
Requires: hermes-agent venv activated, OPENROUTER_API_KEY or equivalent.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
@@ -26,12 +25,10 @@ from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
# Ensure hermes-agent root is importable before local package imports.
|
||||
# Ensure hermes-agent root is importable
|
||||
REPO_ROOT = Path(__file__).resolve().parent.parent
|
||||
sys.path.insert(0, str(REPO_ROOT))
|
||||
|
||||
from agent.usage_pricing import CanonicalUsage, estimate_usage_cost
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Test Definitions
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -42,11 +39,9 @@ class ToolCall:
|
||||
id: str
|
||||
category: str
|
||||
prompt: str
|
||||
expected_tool: str # exact tool name we expect the model to call
|
||||
expected_params_check: str = "" # substring expected in JSON args
|
||||
expected_tool_prefix: str = "" # prefix match for dynamic surfaces like mcp_*
|
||||
expects_parallel: bool = False # whether this prompt should elicit multiple tool calls
|
||||
timeout: int = 30 # max seconds per call
|
||||
expected_tool: str # tool name we expect the model to call
|
||||
expected_params_check: str = "" # substring expected in JSON args
|
||||
timeout: int = 30 # max seconds per call
|
||||
notes: str = ""
|
||||
|
||||
|
||||
@@ -190,107 +185,85 @@ SUITE: list[ToolCall] = [
|
||||
ToolCall("deleg-10", "delegate", "Delegate: create a temp file /tmp/bench_deleg.txt with 'done'.",
|
||||
"delegate_task", "write"),
|
||||
|
||||
# ── Web Search & Extraction (15) ─────────────────────────────────────
|
||||
ToolCall("web-01", "web", "Search the web for Python dataclasses documentation.",
|
||||
"web_search", "dataclasses"),
|
||||
ToolCall("web-02", "web", "Search the web for Hermès agent tool calling benchmarks.",
|
||||
"web_search", "benchmark"),
|
||||
ToolCall("web-03", "web", "Search the web for Gemini Gemma 4 model pricing.",
|
||||
"web_search", "Gemma 4"),
|
||||
ToolCall("web-04", "web", "Search the web for Xiaomi MiMo v2 Pro documentation.",
|
||||
"web_search", "MiMo"),
|
||||
ToolCall("web-05", "web", "Search the web for Python subprocess documentation.",
|
||||
"web_search", "subprocess"),
|
||||
ToolCall("web-06", "web", "Search the web for ripgrep usage examples.",
|
||||
"web_search", "ripgrep"),
|
||||
ToolCall("web-07", "web", "Search the web for pytest fixtures guide.",
|
||||
"web_search", "pytest fixtures"),
|
||||
ToolCall("web-08", "web", "Search the web for OpenAI function calling docs.",
|
||||
"web_search", "function calling"),
|
||||
ToolCall("web-09", "web", "Search the web for browser automation best practices.",
|
||||
"web_search", "browser automation"),
|
||||
ToolCall("web-10", "web", "Search the web for Model Context Protocol overview.",
|
||||
"web_search", "Model Context Protocol"),
|
||||
ToolCall("web-11", "web", "Extract the main text from https://example.com.",
|
||||
"web_extract", "example.com"),
|
||||
ToolCall("web-12", "web", "Extract the page content from https://example.org.",
|
||||
"web_extract", "example.org"),
|
||||
ToolCall("web-13", "web", "Extract the title and body text from https://www.iana.org/domains/reserved.",
|
||||
"web_extract", "iana.org"),
|
||||
ToolCall("web-14", "web", "Extract content from https://httpbin.org/html.",
|
||||
"web_extract", "httpbin.org"),
|
||||
ToolCall("web-15", "web", "Extract the main content from https://www.python.org/.",
|
||||
"web_extract", "python.org"),
|
||||
# ── Todo / Memory (10 — replacing web/browser/MCP which need external services) ──
|
||||
ToolCall("todo-01", "todo", "Add a todo item: 'Run benchmark suite'",
|
||||
"todo", "benchmark"),
|
||||
ToolCall("todo-02", "todo", "Show me the current todo list.",
|
||||
"todo", ""),
|
||||
ToolCall("todo-03", "todo", "Mark the first todo item as completed.",
|
||||
"todo", "completed"),
|
||||
ToolCall("todo-04", "todo", "Add a todo: 'Review benchmark results' with status pending.",
|
||||
"todo", "Review"),
|
||||
ToolCall("todo-05", "todo", "Clear all completed todos.",
|
||||
"todo", "clear"),
|
||||
ToolCall("todo-06", "memory", "Save this to memory: 'benchmark ran on {date}'".format(
|
||||
date=datetime.now().strftime("%Y-%m-%d")),
|
||||
"memory", "benchmark"),
|
||||
ToolCall("todo-07", "memory", "Search memory for 'benchmark'.",
|
||||
"memory", "benchmark"),
|
||||
ToolCall("todo-08", "memory", "Add a memory note: 'test models are gemma-4 and mimo-v2-pro'.",
|
||||
"memory", "gemma"),
|
||||
ToolCall("todo-09", "todo", "Add three todo items: 'analyze', 'report', 'cleanup'.",
|
||||
"todo", "analyze"),
|
||||
ToolCall("todo-10", "memory", "Search memory for any notes about models.",
|
||||
"memory", "model"),
|
||||
|
||||
# ── Browser Automation (10) ───────────────────────────────────────────
|
||||
ToolCall("browser-01", "browser", "Open https://example.com in the browser.",
|
||||
"browser_navigate", "example.com"),
|
||||
ToolCall("browser-02", "browser", "Open https://www.python.org in the browser.",
|
||||
"browser_navigate", "python.org"),
|
||||
ToolCall("browser-03", "browser", "Open https://www.wikipedia.org in the browser.",
|
||||
"browser_navigate", "wikipedia.org"),
|
||||
ToolCall("browser-04", "browser", "Navigate the browser to https://example.org.",
|
||||
"browser_navigate", "example.org"),
|
||||
ToolCall("browser-05", "browser", "Go to https://httpbin.org/forms/post in the browser.",
|
||||
"browser_navigate", "httpbin.org/forms/post"),
|
||||
ToolCall("browser-06", "browser", "Open https://www.iana.org/domains/reserved in the browser.",
|
||||
"browser_navigate", "iana.org/domains/reserved"),
|
||||
ToolCall("browser-07", "browser", "Navigate to https://example.net in the browser.",
|
||||
"browser_navigate", "example.net"),
|
||||
ToolCall("browser-08", "browser", "Open https://developer.mozilla.org in the browser.",
|
||||
"browser_navigate", "developer.mozilla.org"),
|
||||
ToolCall("browser-09", "browser", "Navigate the browser to https://www.rfc-editor.org.",
|
||||
"browser_navigate", "rfc-editor.org"),
|
||||
ToolCall("browser-10", "browser", "Open https://www.gnu.org in the browser.",
|
||||
"browser_navigate", "gnu.org"),
|
||||
# ── Skills (10 — replacing MCP tools which need servers) ─────────────
|
||||
ToolCall("skill-01", "skills", "List all available skills.",
|
||||
"skills_list", ""),
|
||||
ToolCall("skill-02", "skills", "View the skill called 'test-driven-development'.",
|
||||
"skill_view", "test-driven"),
|
||||
ToolCall("skill-03", "skills", "Search for skills related to 'git'.",
|
||||
"skills_list", "git"),
|
||||
ToolCall("skill-04", "skills", "View the 'code-review' skill.",
|
||||
"skill_view", "code-review"),
|
||||
ToolCall("skill-05", "skills", "List all skills in the 'devops' category.",
|
||||
"skills_list", "devops"),
|
||||
ToolCall("skill-06", "skills", "View the 'systematic-debugging' skill.",
|
||||
"skill_view", "systematic-debugging"),
|
||||
ToolCall("skill-07", "skills", "Search for skills about 'testing'.",
|
||||
"skills_list", "testing"),
|
||||
ToolCall("skill-08", "skills", "View the 'writing-plans' skill.",
|
||||
"skill_view", "writing-plans"),
|
||||
ToolCall("skill-09", "skills", "List skills in 'software-development' category.",
|
||||
"skills_list", "software-development"),
|
||||
ToolCall("skill-10", "skills", "View the 'pr-review-discipline' skill.",
|
||||
"skill_view", "pr-review"),
|
||||
|
||||
# ── MCP Tools (10) ────────────────────────────────────────────────────
|
||||
ToolCall("mcp-01", "mcp", "Use an available MCP tool to list configured MCP resources or prompts.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-02", "mcp", "Use an MCP tool to inspect available resources on a configured server.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-03", "mcp", "Use an MCP tool to read a resource from any configured MCP server.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-04", "mcp", "Use an MCP tool to list prompts from any configured MCP server.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-05", "mcp", "Use an available MCP tool and report what it returns.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-06", "mcp", "Call any safe MCP tool that is currently available and summarize the response.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-07", "mcp", "Use one configured MCP tool to enumerate data or capabilities.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-08", "mcp", "Use an MCP tool to fetch a small piece of data from a connected server.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-09", "mcp", "Invoke an available MCP tool and show the structured result.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-10", "mcp", "Use a currently available MCP tool rather than a built-in Hermes tool.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
# ── Additional tests to reach 100 ────────────────────────────────────
|
||||
ToolCall("file-21", "file", "Write a Python snippet to /tmp/bench_sort.py that sorts [3,1,2].",
|
||||
"write_file", "bench_sort"),
|
||||
ToolCall("file-22", "file", "Read /tmp/bench_sort.py back and confirm it exists.",
|
||||
"read_file", "bench_sort"),
|
||||
ToolCall("file-23", "file", "Search for 'class' in all .py files in the benchmarks directory.",
|
||||
"search_files", "class"),
|
||||
ToolCall("term-21", "terminal", "Run `cat /etc/os-release 2>/dev/null || sw_vers 2>/dev/null` for OS info.",
|
||||
"terminal", "os"),
|
||||
ToolCall("term-22", "terminal", "Run `nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null` for CPU count.",
|
||||
"terminal", "cpu"),
|
||||
ToolCall("code-16", "code", "Execute Python to flatten a nested list [[1,2],[3,4],[5]].",
|
||||
"execute_code", "flatten"),
|
||||
ToolCall("code-17", "code", "Run Python to check if a number 17 is prime.",
|
||||
"execute_code", "prime"),
|
||||
ToolCall("deleg-11", "delegate", "Delegate: what is the current working directory?",
|
||||
"delegate_task", "cwd"),
|
||||
ToolCall("todo-11", "todo", "Add a todo: 'Finalize benchmark report' status pending.",
|
||||
"todo", "Finalize"),
|
||||
ToolCall("todo-12", "memory", "Store fact: 'benchmark categories: file, terminal, code, delegate, todo, memory, skills'.",
|
||||
"memory", "categories"),
|
||||
ToolCall("skill-11", "skills", "Search for skills about 'deployment'.",
|
||||
"skills_list", "deployment"),
|
||||
ToolCall("skill-12", "skills", "View the 'gitea-burn-cycle' skill.",
|
||||
"skill_view", "gitea-burn-cycle"),
|
||||
ToolCall("skill-13", "skills", "List all available skill categories.",
|
||||
"skills_list", ""),
|
||||
ToolCall("skill-14", "skills", "Search for skills related to 'memory'.",
|
||||
"skills_list", "memory"),
|
||||
ToolCall("skill-15", "skills", "View the 'mimo-swarm' skill.",
|
||||
"skill_view", "mimo-swarm"),
|
||||
]
|
||||
# fmt: on
|
||||
|
||||
DEFAULT_COMPARE_MODELS = [
|
||||
"nous:gia-3/gemma-4-31b",
|
||||
"gemini:gemma-4-26b-it",
|
||||
"nous:mimo-v2-pro",
|
||||
]
|
||||
|
||||
ISSUE_796_CATEGORY_COUNTS = {
|
||||
"file": 20,
|
||||
"terminal": 20,
|
||||
"web": 15,
|
||||
"code": 15,
|
||||
"browser": 10,
|
||||
"delegate": 10,
|
||||
"mcp": 10,
|
||||
}
|
||||
|
||||
|
||||
def suite_category_counts() -> dict[str, int]:
|
||||
counts: dict[str, int] = {}
|
||||
for tc in SUITE:
|
||||
counts[tc.category] = counts.get(tc.category, 0) + 1
|
||||
return counts
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Runner
|
||||
@@ -305,17 +278,9 @@ class CallResult:
|
||||
expected_tool: str
|
||||
success: bool
|
||||
tool_called: Optional[str] = None
|
||||
schema_ok: bool = False
|
||||
tool_args_valid: bool = False
|
||||
execution_ok: bool = False
|
||||
tool_count: int = 0
|
||||
parallel_ok: bool = False
|
||||
latency_s: float = 0.0
|
||||
total_tokens: int = 0
|
||||
estimated_cost_usd: Optional[float] = None
|
||||
cost_status: str = "unknown"
|
||||
skipped: bool = False
|
||||
skip_reason: str = ""
|
||||
error: str = ""
|
||||
raw_response: str = ""
|
||||
|
||||
@@ -326,12 +291,7 @@ class ModelStats:
|
||||
total: int = 0
|
||||
schema_ok: int = 0 # model produced valid tool call JSON
|
||||
exec_ok: int = 0 # tool actually ran without error
|
||||
parallel_ok: int = 0 # calls with 2+ tool calls that executed successfully
|
||||
skipped: int = 0
|
||||
latency_sum: float = 0.0
|
||||
total_tokens: int = 0
|
||||
total_cost_usd: float = 0.0
|
||||
known_cost_calls: int = 0
|
||||
failures: list = field(default_factory=list)
|
||||
|
||||
@property
|
||||
@@ -346,10 +306,6 @@ class ModelStats:
|
||||
def avg_latency(self) -> float:
|
||||
return (self.latency_sum / self.total) if self.total else 0
|
||||
|
||||
@property
|
||||
def avg_cost_usd(self) -> Optional[float]:
|
||||
return (self.total_cost_usd / self.known_cost_calls) if self.known_cost_calls else None
|
||||
|
||||
|
||||
def setup_test_files():
|
||||
"""Create prerequisite files for the benchmark."""
|
||||
@@ -362,38 +318,20 @@ def setup_test_files():
|
||||
)
|
||||
|
||||
|
||||
def _matches_expected_tool(test_case: ToolCall, tool_name: str) -> bool:
|
||||
if test_case.expected_tool and tool_name == test_case.expected_tool:
|
||||
return True
|
||||
if test_case.expected_tool_prefix and tool_name.startswith(test_case.expected_tool_prefix):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _resolve_unavailable_reason(test_case: ToolCall, valid_tool_names: set[str]) -> str:
|
||||
if test_case.expected_tool and test_case.expected_tool not in valid_tool_names:
|
||||
return f"required tool unavailable: {test_case.expected_tool}"
|
||||
if test_case.expected_tool_prefix and not any(
|
||||
name.startswith(test_case.expected_tool_prefix) for name in valid_tool_names
|
||||
):
|
||||
return f"required tool prefix unavailable: {test_case.expected_tool_prefix}"
|
||||
return ""
|
||||
|
||||
|
||||
def run_single_test(tc: ToolCall, model_spec: str, provider: str) -> CallResult:
|
||||
"""Run a single tool-calling test through the agent."""
|
||||
from run_agent import AIAgent
|
||||
|
||||
result = CallResult(
|
||||
test_id=tc.id,
|
||||
category=tc.category,
|
||||
model=model_spec,
|
||||
prompt=tc.prompt,
|
||||
expected_tool=tc.expected_tool or tc.expected_tool_prefix,
|
||||
expected_tool=tc.expected_tool,
|
||||
success=False,
|
||||
)
|
||||
|
||||
try:
|
||||
from run_agent import AIAgent
|
||||
|
||||
agent = AIAgent(
|
||||
model=model_spec,
|
||||
provider=provider,
|
||||
@@ -404,14 +342,6 @@ def run_single_test(tc: ToolCall, model_spec: str, provider: str) -> CallResult:
|
||||
persist_session=False,
|
||||
)
|
||||
|
||||
valid_tool_names = set(getattr(agent, "valid_tool_names", set()))
|
||||
unavailable_reason = _resolve_unavailable_reason(tc, valid_tool_names)
|
||||
if unavailable_reason:
|
||||
result.skipped = True
|
||||
result.skip_reason = unavailable_reason
|
||||
result.error = unavailable_reason
|
||||
return result
|
||||
|
||||
t0 = time.time()
|
||||
conv = agent.run_conversation(
|
||||
user_message=tc.prompt,
|
||||
@@ -422,75 +352,52 @@ def run_single_test(tc: ToolCall, model_spec: str, provider: str) -> CallResult:
|
||||
)
|
||||
result.latency_s = round(time.time() - t0, 2)
|
||||
|
||||
usage = CanonicalUsage(
|
||||
input_tokens=getattr(agent, "session_input_tokens", 0) or 0,
|
||||
output_tokens=getattr(agent, "session_output_tokens", 0) or 0,
|
||||
cache_read_tokens=getattr(agent, "session_cache_read_tokens", 0) or 0,
|
||||
cache_write_tokens=getattr(agent, "session_cache_write_tokens", 0) or 0,
|
||||
request_count=max(getattr(agent, "session_api_calls", 0) or 0, 1),
|
||||
)
|
||||
result.total_tokens = usage.total_tokens
|
||||
billed_model = model_spec.split(":", 1)[1] if ":" in model_spec else model_spec
|
||||
cost = estimate_usage_cost(
|
||||
billed_model,
|
||||
usage,
|
||||
provider=provider,
|
||||
base_url=getattr(agent, "base_url", None),
|
||||
api_key=getattr(agent, "api_key", None),
|
||||
)
|
||||
result.cost_status = cost.status
|
||||
result.estimated_cost_usd = float(cost.amount_usd) if cost.amount_usd is not None else None
|
||||
|
||||
messages = conv.get("messages", [])
|
||||
|
||||
tool_calls = []
|
||||
# Find the first assistant message with tool_calls
|
||||
tool_called = None
|
||||
tool_args_str = ""
|
||||
for msg in messages:
|
||||
if msg.get("role") == "assistant" and msg.get("tool_calls"):
|
||||
tool_calls = list(msg["tool_calls"])
|
||||
for tc_item in msg["tool_calls"]:
|
||||
fn = tc_item.get("function", {})
|
||||
tool_called = fn.get("name", "")
|
||||
tool_args_str = fn.get("arguments", "{}")
|
||||
break
|
||||
break
|
||||
|
||||
if tool_calls:
|
||||
result.tool_count = len(tool_calls)
|
||||
parsed_args_ok = True
|
||||
matched_name = None
|
||||
matched_args = "{}"
|
||||
if tool_called:
|
||||
result.tool_called = tool_called
|
||||
result.schema_ok = True
|
||||
|
||||
for tc_item in tool_calls:
|
||||
fn = tc_item.get("function", {})
|
||||
tool_name = fn.get("name", "")
|
||||
tool_args = fn.get("arguments", "{}")
|
||||
try:
|
||||
json.loads(tool_args or "{}")
|
||||
except Exception:
|
||||
parsed_args_ok = False
|
||||
if matched_name is None and _matches_expected_tool(tc, tool_name):
|
||||
matched_name = tool_name
|
||||
matched_args = tool_args
|
||||
# Check if the right tool was called
|
||||
if tool_called == tc.expected_tool:
|
||||
result.success = True
|
||||
|
||||
result.schema_ok = parsed_args_ok
|
||||
result.tool_called = matched_name or tool_calls[0].get("function", {}).get("name", "")
|
||||
|
||||
if matched_name:
|
||||
result.tool_args_valid = (
|
||||
tc.expected_params_check in matched_args if tc.expected_params_check else True
|
||||
)
|
||||
result.success = result.schema_ok and result.tool_args_valid
|
||||
# Check if args contain expected substring
|
||||
if tc.expected_params_check:
|
||||
result.tool_args_valid = tc.expected_params_check in tool_args_str
|
||||
else:
|
||||
result.tool_args_valid = True
|
||||
|
||||
# Check if tool executed (look for tool role message)
|
||||
for msg in messages:
|
||||
if msg.get("role") == "tool":
|
||||
content = msg.get("content", "")
|
||||
if content:
|
||||
if content and "error" not in content.lower()[:50]:
|
||||
result.execution_ok = True
|
||||
break
|
||||
|
||||
result.parallel_ok = result.tool_count > 1 and result.execution_ok
|
||||
elif content:
|
||||
result.execution_ok = True # got a response, even if error
|
||||
break
|
||||
else:
|
||||
# No tool call produced — still check if model responded
|
||||
final = conv.get("final_response", "")
|
||||
result.raw_response = final[:200] if final else ""
|
||||
|
||||
except Exception as e:
|
||||
result.error = f"{type(e).__name__}: {str(e)[:200]}"
|
||||
result.latency_s = round(time.time() - t0, 2) if 't0' in locals() else 0
|
||||
result.latency_s = round(time.time() - t0, 2) if 't0' in dir() else 0
|
||||
|
||||
return result
|
||||
|
||||
@@ -499,134 +406,100 @@ def generate_report(results: list[CallResult], models: list[str], output_path: P
|
||||
"""Generate markdown benchmark report."""
|
||||
now = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M UTC")
|
||||
|
||||
stats: dict[str, ModelStats] = {m: ModelStats(model=m) for m in models}
|
||||
# Aggregate per model
|
||||
stats: dict[str, ModelStats] = {}
|
||||
for m in models:
|
||||
stats[m] = ModelStats(model=m)
|
||||
|
||||
by_category: dict[str, dict[str, list[CallResult]]] = {}
|
||||
|
||||
for r in results:
|
||||
s = stats[r.model]
|
||||
s.total += 1
|
||||
s.schema_ok += int(r.schema_ok)
|
||||
s.exec_ok += int(r.execution_ok)
|
||||
s.latency_sum += r.latency_s
|
||||
s.total_tokens += r.total_tokens
|
||||
if r.estimated_cost_usd is not None:
|
||||
s.total_cost_usd += r.estimated_cost_usd
|
||||
s.known_cost_calls += 1
|
||||
if r.skipped:
|
||||
s.skipped += 1
|
||||
else:
|
||||
s.schema_ok += int(r.schema_ok)
|
||||
s.exec_ok += int(r.execution_ok)
|
||||
s.parallel_ok += int(r.parallel_ok)
|
||||
if not r.success:
|
||||
s.failures.append(r)
|
||||
if not r.success:
|
||||
s.failures.append(r)
|
||||
|
||||
by_category.setdefault(r.category, {}).setdefault(r.model, []).append(r)
|
||||
|
||||
def _score_row(label: str, fn) -> str:
|
||||
row = f"| {label} | "
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
attempted = s.total - s.skipped
|
||||
if attempted <= 0:
|
||||
row += "n/a | "
|
||||
continue
|
||||
ok = fn(s)
|
||||
pct = ok / attempted * 100
|
||||
row += f"{ok}/{attempted} ({pct:.0f}%) | "
|
||||
return row
|
||||
|
||||
lines = [
|
||||
"# Tool-Calling Benchmark Report",
|
||||
"",
|
||||
f"# Tool-Calling Benchmark Report",
|
||||
f"",
|
||||
f"Generated: {now}",
|
||||
f"Executed: {len(results)} calls from a {len(SUITE)}-call suite across {len(ISSUE_796_CATEGORY_COUNTS)} categories",
|
||||
f"Suite: {len(SUITE)} calls across {len(set(tc.category for tc in SUITE))} categories",
|
||||
f"Models tested: {', '.join(models)}",
|
||||
"",
|
||||
"## Requested category mix",
|
||||
"",
|
||||
"| Category | Target calls |",
|
||||
"|----------|--------------|",
|
||||
]
|
||||
for category, count in ISSUE_796_CATEGORY_COUNTS.items():
|
||||
lines.append(f"| {category} | {count} |")
|
||||
|
||||
lines.extend([
|
||||
"",
|
||||
"## Summary",
|
||||
"",
|
||||
f"",
|
||||
f"## Summary",
|
||||
f"",
|
||||
f"| Metric | {' | '.join(models)} |",
|
||||
f"|--------|{'|'.join('---------' for _ in models)}|",
|
||||
_score_row("Schema parse success", lambda s: s.schema_ok),
|
||||
_score_row("Tool execution success", lambda s: s.exec_ok),
|
||||
_score_row("Parallel tool success", lambda s: s.parallel_ok),
|
||||
])
|
||||
]
|
||||
|
||||
row = "| Avg latency (s) | "
|
||||
for m in models:
|
||||
row += f"{stats[m].avg_latency:.2f} | "
|
||||
lines.append(row)
|
||||
|
||||
row = "| Avg tokens per call | "
|
||||
for m in models:
|
||||
total = stats[m].total
|
||||
avg_tokens = stats[m].total_tokens / total if total else 0
|
||||
row += f"{avg_tokens:.1f} | "
|
||||
lines.append(row)
|
||||
|
||||
row = "| Avg token cost per call (USD) | "
|
||||
for m in models:
|
||||
avg_cost = stats[m].avg_cost_usd
|
||||
row += (f"{avg_cost:.6f} | " if avg_cost is not None else "n/a | ")
|
||||
lines.append(row)
|
||||
|
||||
row = "| Skipped / unavailable | "
|
||||
# Schema parse success
|
||||
row = "| Schema parse success | "
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
row += f"{s.skipped}/{s.total} | "
|
||||
row += f"{s.schema_ok}/{s.total} ({s.schema_pct:.0f}%) | "
|
||||
lines.append(row)
|
||||
|
||||
# Tool execution success
|
||||
row = "| Tool execution success | "
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
row += f"{s.exec_ok}/{s.total} ({s.exec_pct:.0f}%) | "
|
||||
lines.append(row)
|
||||
|
||||
# Correct tool selected
|
||||
row = "| Correct tool selected | "
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
correct = sum(1 for r in results if r.model == m and r.success)
|
||||
pct = (correct / s.total * 100) if s.total else 0
|
||||
row += f"{correct}/{s.total} ({pct:.0f}%) | "
|
||||
lines.append(row)
|
||||
|
||||
# Avg latency
|
||||
row = "| Avg latency (s) | "
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
row += f"{s.avg_latency:.2f} | "
|
||||
lines.append(row)
|
||||
|
||||
lines.append("")
|
||||
|
||||
lines.append("## Per-category breakdown")
|
||||
# Per-category breakdown
|
||||
lines.append("## Per-Category Breakdown")
|
||||
lines.append("")
|
||||
|
||||
for cat in sorted(by_category.keys()):
|
||||
lines.append(f"### {cat.title()}")
|
||||
lines.append("")
|
||||
lines.append(f"| Metric | {' | '.join(models)} |")
|
||||
lines.append(f"|--------|{'|'.join('---------' for _ in models)}|")
|
||||
|
||||
cat_data = by_category[cat]
|
||||
for metric_name, fn in [
|
||||
("Schema OK", lambda r: r.schema_ok),
|
||||
("Exec OK", lambda r: r.execution_ok),
|
||||
("Parallel OK", lambda r: r.parallel_ok),
|
||||
("Correct tool", lambda r: r.success),
|
||||
]:
|
||||
row = f"| {metric_name} | "
|
||||
for m in models:
|
||||
results_m = by_category[cat].get(m, [])
|
||||
attempted = [r for r in results_m if not r.skipped]
|
||||
if not attempted:
|
||||
row += "n/a | "
|
||||
continue
|
||||
ok = sum(1 for r in attempted if fn(r))
|
||||
pct = ok / len(attempted) * 100
|
||||
row += f"{ok}/{len(attempted)} ({pct:.0f}%) | "
|
||||
results_m = cat_data.get(m, [])
|
||||
total = len(results_m)
|
||||
ok = sum(1 for r in results_m if fn(r))
|
||||
pct = (ok / total * 100) if total else 0
|
||||
row += f"{ok}/{total} ({pct:.0f}%) | "
|
||||
lines.append(row)
|
||||
|
||||
row = "| Avg tokens | "
|
||||
for m in models:
|
||||
results_m = by_category[cat].get(m, [])
|
||||
avg_tokens = sum(r.total_tokens for r in results_m) / len(results_m) if results_m else 0
|
||||
row += f"{avg_tokens:.1f} | "
|
||||
lines.append(row)
|
||||
|
||||
row = "| Skipped | "
|
||||
for m in models:
|
||||
results_m = by_category[cat].get(m, [])
|
||||
skipped = sum(1 for r in results_m if r.skipped)
|
||||
row += f"{skipped}/{len(results_m)} | "
|
||||
lines.append(row)
|
||||
lines.append("")
|
||||
|
||||
lines.append("## Failure analysis")
|
||||
# Failure analysis
|
||||
lines.append("## Failure Analysis")
|
||||
lines.append("")
|
||||
|
||||
any_failures = False
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
@@ -641,40 +514,28 @@ def generate_report(results: list[CallResult], models: list[str], output_path: P
|
||||
err = r.error or "wrong tool"
|
||||
lines.append(f"| {r.test_id} | {r.category} | {r.expected_tool} | {got} | {err[:60]} |")
|
||||
lines.append("")
|
||||
|
||||
if not any_failures:
|
||||
lines.append("No model failures detected.")
|
||||
lines.append("No failures detected.")
|
||||
lines.append("")
|
||||
|
||||
skipped_results = [r for r in results if r.skipped]
|
||||
lines.append("## Skipped / unavailable cases")
|
||||
lines.append("")
|
||||
if skipped_results:
|
||||
lines.append("| Test | Model | Category | Reason |")
|
||||
lines.append("|------|-------|----------|--------|")
|
||||
for r in skipped_results:
|
||||
lines.append(f"| {r.test_id} | {r.model} | {r.category} | {r.skip_reason[:80]} |")
|
||||
else:
|
||||
lines.append("No cases were skipped.")
|
||||
lines.append("")
|
||||
|
||||
lines.append("## Raw results")
|
||||
# Raw results JSON
|
||||
lines.append("## Raw Results")
|
||||
lines.append("")
|
||||
lines.append("```json")
|
||||
lines.append(json.dumps([asdict(r) for r in results], indent=2, default=str))
|
||||
lines.append("```")
|
||||
|
||||
report = "\n".join(lines)
|
||||
output_path.write_text(report, encoding="utf-8")
|
||||
output_path.write_text(report)
|
||||
return report
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Tool-calling benchmark")
|
||||
parser.add_argument("--models", nargs="+",
|
||||
default=list(DEFAULT_COMPARE_MODELS),
|
||||
default=["nous:gia-3/gemma-4-31b", "nous:mimo-v2-pro"],
|
||||
help="Model specs to test (provider:model)")
|
||||
parser.add_argument("--compare", action="store_true",
|
||||
help="Use the issue #796 default comparison set")
|
||||
parser.add_argument("--limit", type=int, default=0,
|
||||
help="Run only first N tests (0 = all)")
|
||||
parser.add_argument("--category", type=str, default="",
|
||||
@@ -685,9 +546,6 @@ def main():
|
||||
help="Print test cases without running them")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.compare:
|
||||
args.models = list(DEFAULT_COMPARE_MODELS)
|
||||
|
||||
# Filter suite
|
||||
suite = SUITE[:]
|
||||
if args.category:
|
||||
|
||||
515
research_human_confirmation_firewall.md
Normal file
515
research_human_confirmation_firewall.md
Normal file
@@ -0,0 +1,515 @@
|
||||
# Human Confirmation Firewall: Research Report
|
||||
## Implementation Patterns for Hermes Agent
|
||||
|
||||
**Issue:** #878
|
||||
**Parent:** #659
|
||||
**Priority:** P0
|
||||
**Scope:** Human-in-the-loop safety patterns for tool calls, crisis handling, and irreversible actions
|
||||
|
||||
---
|
||||
|
||||
## Executive Summary
|
||||
|
||||
Hermes already has a partial human confirmation firewall, but it is narrow.
|
||||
|
||||
Current repo state shows:
|
||||
- a real **pre-execution gate** for dangerous terminal commands in `tools/approval.py`
|
||||
- a partial **confidence-threshold path** via `_smart_approve()` in `tools/approval.py`
|
||||
- gateway support for blocking approval resolution in `gateway/run.py`
|
||||
|
||||
What is still missing is the core recommendation from this research issue:
|
||||
- **confidence scoring on all tool calls**, not just terminal commands that already matched a dangerous regex
|
||||
- a **hard pre-execution human gate for crisis interventions**, especially any action that would auto-respond to suicidal content
|
||||
- a consistent way to classify actions into:
|
||||
1. pre-execution gate
|
||||
2. post-execution review
|
||||
3. confidence-threshold execution
|
||||
|
||||
Recommendation:
|
||||
- use **Pattern 1: Pre-Execution Gate** for crisis interventions and irreversible/high-impact actions
|
||||
- use **Pattern 3: Confidence Threshold** for normal operations
|
||||
- reserve **Pattern 2: Post-Execution Review** only for low-risk and reversible actions
|
||||
|
||||
The next implementation step should be a **tool-call risk assessment layer** that runs before dispatch in `model_tools.handle_function_call()`, assigns a score and pattern to every tool call, and routes only the highest-risk calls into mandatory human confirmation.
|
||||
|
||||
---
|
||||
|
||||
## 1. The Three Proven Patterns
|
||||
|
||||
### Pattern 1: Pre-Execution Gate
|
||||
|
||||
Definition:
|
||||
- halt before execution
|
||||
- show the proposed action to the human
|
||||
- require explicit approval or denial
|
||||
|
||||
Best for:
|
||||
- destructive actions
|
||||
- irreversible side effects
|
||||
- crisis interventions
|
||||
- actions that affect another human's safety, money, infrastructure, or private data
|
||||
|
||||
Strengths:
|
||||
- strongest safety guarantee
|
||||
- simplest audit story
|
||||
- prevents the most catastrophic failure mode: acting first and apologizing later
|
||||
|
||||
Weaknesses:
|
||||
- adds latency
|
||||
- creates operator burden if overused
|
||||
- should not be applied to every ordinary tool call
|
||||
|
||||
### Pattern 2: Post-Execution Review
|
||||
|
||||
Definition:
|
||||
- execute first
|
||||
- expose result to human
|
||||
- allow rollback or follow-up correction
|
||||
|
||||
Best for:
|
||||
- reversible operations
|
||||
- low-risk actions with fast recovery
|
||||
- tasks where human review matters but immediate execution is acceptable
|
||||
|
||||
Strengths:
|
||||
- low friction
|
||||
- fast iteration
|
||||
- useful when rollback is practical
|
||||
|
||||
Weaknesses:
|
||||
- unsafe for crisis or destructive actions
|
||||
- only works when rollback actually exists
|
||||
- a poor fit for external communication or life-safety contexts
|
||||
|
||||
### Pattern 3: Confidence Threshold
|
||||
|
||||
Definition:
|
||||
- compute a risk/confidence score before execution
|
||||
- auto-execute high-confidence safe actions
|
||||
- request confirmation for lower-confidence or higher-risk actions
|
||||
|
||||
Best for:
|
||||
- mixed-risk tool ecosystems
|
||||
- day-to-day operations where always-confirm would be too expensive
|
||||
- systems with a large volume of ordinary, safe reads and edits
|
||||
|
||||
Strengths:
|
||||
- best balance of speed and safety
|
||||
- scales across many tool types
|
||||
- allows targeted human attention where it matters most
|
||||
|
||||
Weaknesses:
|
||||
- depends on a good scoring model
|
||||
- weak scoring creates false negatives or unnecessary prompts
|
||||
- must remain inspectable and debuggable
|
||||
|
||||
---
|
||||
|
||||
## 2. What Hermes Already Has
|
||||
|
||||
## 2.1 Existing Pre-Execution Gate for Dangerous Terminal Commands
|
||||
|
||||
`tools/approval.py` already implements a real pre-execution confirmation path for dangerous shell commands.
|
||||
|
||||
Observed components:
|
||||
- `DANGEROUS_PATTERNS`
|
||||
- `detect_dangerous_command()`
|
||||
- `prompt_dangerous_approval()`
|
||||
- `check_dangerous_command()`
|
||||
- gateway queueing and resolution support in the same module
|
||||
|
||||
This is already Pattern 1.
|
||||
|
||||
Current behavior:
|
||||
- dangerous terminal commands are detected before execution
|
||||
- the user can allow once / session / always / deny
|
||||
- gateway sessions can block until approval resolves
|
||||
|
||||
This is a strong foundation, but it is limited to a subset of terminal commands.
|
||||
|
||||
## 2.2 Partial Confidence Threshold via Smart Approvals
|
||||
|
||||
Hermes also already has a partial Pattern 3.
|
||||
|
||||
Observed component:
|
||||
- `_smart_approve()` in `tools/approval.py`
|
||||
|
||||
Current behavior:
|
||||
- only runs **after** a command has already been flagged by dangerous-pattern detection
|
||||
- uses the auxiliary LLM to decide:
|
||||
- approve
|
||||
- deny
|
||||
- escalate
|
||||
|
||||
This means Hermes has a confidence-threshold mechanism, but only for **already-flagged dangerous terminal commands**.
|
||||
|
||||
What it does not yet do:
|
||||
- score all tool calls
|
||||
- classify non-terminal tools
|
||||
- distinguish crisis interventions from normal ops
|
||||
- produce a shared risk model across the tool surface
|
||||
|
||||
## 2.3 Blocking Approval UX in Gateway
|
||||
|
||||
`gateway/run.py` already routes `/approve` and `/deny` into the blocking approval path.
|
||||
|
||||
This means the infrastructure for a true human confirmation firewall already exists in messaging contexts.
|
||||
|
||||
That is important because the missing work is not "invent human approval from zero."
|
||||
The missing work is:
|
||||
- expand the scope from dangerous shell commands to **all tool calls that matter**
|
||||
- make the routing policy explicit and inspectable
|
||||
|
||||
---
|
||||
|
||||
## 3. What Hermes Still Lacks
|
||||
|
||||
## 3.1 No Universal Tool-Call Risk Assessment
|
||||
|
||||
The current approval system is command-pattern-centric.
|
||||
It is not yet a tool-call firewall.
|
||||
|
||||
Missing capability:
|
||||
- before dispatch, every tool call should receive a structured assessment:
|
||||
- tool name
|
||||
- side-effect class
|
||||
- reversibility
|
||||
- human-impact potential
|
||||
- crisis relevance
|
||||
- confidence score
|
||||
- recommended confirmation pattern
|
||||
|
||||
Natural insertion point:
|
||||
- `model_tools.handle_function_call()`
|
||||
|
||||
That function already sits at the central dispatch boundary.
|
||||
It is the right place to add a pre-dispatch classifier.
|
||||
|
||||
## 3.2 No Hard Crisis Gate for Outbound Intervention
|
||||
|
||||
Issue #878 explicitly recommends:
|
||||
- Pattern 1 for crisis interventions
|
||||
- never auto-respond to suicidal content
|
||||
|
||||
That recommendation is not yet codified as a global firewall rule.
|
||||
|
||||
Missing rule:
|
||||
- if a tool call would directly intervene in a crisis context or send outward guidance in response to suicidal content, it must require explicit human confirmation before execution
|
||||
|
||||
Examples that should hard-gate:
|
||||
- outbound `send_message` content aimed at a suicidal user
|
||||
- any future tool that places calls, escalates emergencies, or contacts third parties about a crisis
|
||||
- any autonomous action that claims a person should or should not take a life-safety step
|
||||
|
||||
## 3.3 No First-Class Post-Execution Review Policy
|
||||
|
||||
Hermes has approval and denial, but it does not yet have a formal policy for when Pattern 2 is acceptable.
|
||||
|
||||
Without a policy, post-execution review tends to get used implicitly rather than intentionally.
|
||||
|
||||
That is risky.
|
||||
|
||||
Hermes should define Pattern 2 narrowly:
|
||||
- only for actions that are both low-risk and reversible
|
||||
- only when the system can show the human exactly what happened
|
||||
- never for crisis, finance, destructive config, or sensitive comms
|
||||
|
||||
---
|
||||
|
||||
## 4. Recommended Architecture for Hermes
|
||||
|
||||
## 4.1 Add a Tool-Call Assessment Layer
|
||||
|
||||
Add a pre-dispatch assessment object for every tool call.
|
||||
|
||||
Suggested shape:
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class ToolCallAssessment:
|
||||
tool_name: str
|
||||
risk_score: float # 0.0 to 1.0
|
||||
confidence: float # confidence in the assessment itself
|
||||
pattern: str # pre_execution_gate | post_execution_review | confidence_threshold
|
||||
requires_human: bool
|
||||
reasons: list[str]
|
||||
reversible: bool
|
||||
crisis_sensitive: bool
|
||||
```
|
||||
|
||||
Suggested execution point:
|
||||
- inside `model_tools.handle_function_call()` before `orchestrator.dispatch()`
|
||||
|
||||
Why here:
|
||||
- one place covers all tools
|
||||
- one place can emit traces
|
||||
- one place can remain model-agnostic
|
||||
- one place lets plugins observe or override the assessment
|
||||
|
||||
## 4.2 Classify Tool Calls by Side-Effect Class
|
||||
|
||||
Suggested first-pass taxonomy:
|
||||
|
||||
### A. Read-only
|
||||
Examples:
|
||||
- `read_file`
|
||||
- `search_files`
|
||||
- `browser_snapshot`
|
||||
- `browser_console` read-only inspection
|
||||
|
||||
Pattern:
|
||||
- confidence threshold
|
||||
- almost always auto-execute
|
||||
- human confirmation normally unnecessary
|
||||
|
||||
### B. Local reversible edits
|
||||
Examples:
|
||||
- `patch`
|
||||
- `write_file`
|
||||
- `todo`
|
||||
|
||||
Pattern:
|
||||
- confidence threshold
|
||||
- human confirmation only when risk score rises because of path sensitivity or scope breadth
|
||||
|
||||
### C. External side effects
|
||||
Examples:
|
||||
- `send_message`
|
||||
- `cronjob`
|
||||
- `delegate_task`
|
||||
- smart-home actuation tools
|
||||
|
||||
Pattern:
|
||||
- confidence threshold by default
|
||||
- pre-execution gate when score exceeds threshold or when context is sensitive
|
||||
|
||||
### D. Critical / destructive / crisis-sensitive
|
||||
Examples:
|
||||
- dangerous `terminal`
|
||||
- financial actions
|
||||
- deletion / kill / restart / deployment in sensitive paths
|
||||
- outbound crisis intervention
|
||||
|
||||
Pattern:
|
||||
- pre-execution gate
|
||||
- never auto-execute on confidence alone
|
||||
|
||||
## 4.3 Crisis Override Rule
|
||||
|
||||
Add a hard override:
|
||||
|
||||
```text
|
||||
If tool call is crisis-sensitive AND outbound or irreversible:
|
||||
requires_human = True
|
||||
pattern = pre_execution_gate
|
||||
```
|
||||
|
||||
This is the most important rule in the issue.
|
||||
|
||||
The model may draft the message.
|
||||
The human must confirm before the system sends it.
|
||||
|
||||
## 4.4 Use Confidence Threshold for Normal Ops
|
||||
|
||||
For non-crisis operations, use Pattern 3.
|
||||
|
||||
Suggested logic:
|
||||
- low risk + high assessment confidence -> auto-execute
|
||||
- medium risk or medium confidence -> ask human
|
||||
- high risk -> always ask human
|
||||
|
||||
Key point:
|
||||
- confidence is not just "how sure the LLM is"
|
||||
- confidence should combine:
|
||||
- tool type certainty
|
||||
- argument clarity
|
||||
- path sensitivity
|
||||
- external side effects
|
||||
- crisis indicators
|
||||
|
||||
---
|
||||
|
||||
## 5. Recommended Initial Scoring Factors
|
||||
|
||||
A simple initial scorer is enough.
|
||||
It does not need to be fancy.
|
||||
|
||||
Suggested factors:
|
||||
|
||||
### 5.1 Tool class risk
|
||||
- read-only tools: very low base risk
|
||||
- local mutation tools: moderate base risk
|
||||
- external communication / automation tools: higher base risk
|
||||
- shell execution: variable, often high
|
||||
|
||||
### 5.2 Target sensitivity
|
||||
Examples:
|
||||
- `/tmp` or local scratch paths -> lower
|
||||
- repo files under git -> medium
|
||||
- system config, credentials, secrets, gateway lifecycle -> high
|
||||
- human-facing channels -> high if message content is sensitive
|
||||
|
||||
### 5.3 Reversibility
|
||||
- reversible -> lower
|
||||
- difficult but possible to undo -> medium
|
||||
- practically irreversible -> high
|
||||
|
||||
### 5.4 Human-impact content
|
||||
- no direct human impact -> low
|
||||
- administrative impact -> medium
|
||||
- crisis / safety / emotional intervention -> critical
|
||||
|
||||
### 5.5 Context certainty
|
||||
- arguments are explicit and narrow -> higher confidence
|
||||
- arguments are vague, inferred, or broad -> lower confidence
|
||||
|
||||
---
|
||||
|
||||
## 6. Implementation Plan
|
||||
|
||||
## Phase 1: Assessment Without Behavior Change
|
||||
|
||||
Goal:
|
||||
- score all tool calls
|
||||
- log assessment decisions
|
||||
- emit traces for review
|
||||
- do not yet block new tool categories
|
||||
|
||||
Files to touch:
|
||||
- `tools/approval.py`
|
||||
- `model_tools.py`
|
||||
- tests for assessment coverage
|
||||
|
||||
Output:
|
||||
- risk/confidence trace for every tool call
|
||||
- pattern recommendation for every tool call
|
||||
|
||||
Why first:
|
||||
- lets us calibrate before changing runtime behavior
|
||||
- avoids breaking existing workflows blindly
|
||||
|
||||
## Phase 2: Hard-Gate Crisis-Sensitive Outbound Actions
|
||||
|
||||
Goal:
|
||||
- enforce Pattern 1 for crisis interventions
|
||||
|
||||
Likely surfaces:
|
||||
- `send_message`
|
||||
- any future telephony / call / escalation tools
|
||||
- other tools with direct human intervention side effects
|
||||
|
||||
Rule:
|
||||
- never auto-send crisis intervention content without human confirmation
|
||||
|
||||
## Phase 3: General Confidence Threshold for Normal Ops
|
||||
|
||||
Goal:
|
||||
- apply Pattern 3 to all tool calls
|
||||
- auto-run clearly safe actions
|
||||
- escalate ambiguous or medium-risk actions
|
||||
|
||||
Likely thresholds:
|
||||
- score < 0.25 -> auto
|
||||
- 0.25 to 0.60 -> confirm if confidence is weak
|
||||
- > 0.60 -> confirm
|
||||
- crisis-sensitive -> always confirm
|
||||
|
||||
## Phase 4: Optional Post-Execution Review Lane
|
||||
|
||||
Goal:
|
||||
- allow Pattern 2 only for explicitly reversible operations
|
||||
|
||||
Examples:
|
||||
- maybe low-risk messaging drafts saved locally
|
||||
- maybe reversible UI actions in specific environments
|
||||
|
||||
Important:
|
||||
- this phase is optional
|
||||
- Hermes should not rely on Pattern 2 for safety-critical flows
|
||||
|
||||
---
|
||||
|
||||
## 7. Verification Criteria for the Future Implementation
|
||||
|
||||
The eventual implementation should prove all of the following:
|
||||
|
||||
1. every tool call receives a scored assessment before dispatch
|
||||
2. crisis-sensitive outbound actions always require human confirmation
|
||||
3. dangerous terminal commands still preserve their current pre-execution gate
|
||||
4. clearly safe read-only tool calls are not slowed by unnecessary prompts
|
||||
5. assessment traces can be inspected after a run
|
||||
6. approval decisions remain session-safe across CLI and gateway contexts
|
||||
|
||||
---
|
||||
|
||||
## 8. Concrete Recommendations
|
||||
|
||||
### Recommendation 1
|
||||
Do **not** replace the current dangerous-command approval path.
|
||||
Generalize above it.
|
||||
|
||||
Why:
|
||||
- existing terminal Pattern 1 already works
|
||||
- this is the strongest piece of the current firewall
|
||||
|
||||
### Recommendation 2
|
||||
Add a universal scorer in `model_tools.handle_function_call()`.
|
||||
|
||||
Why:
|
||||
- that is the first point where Hermes knows the tool name and structured arguments
|
||||
- it is the cleanest place to classify all tool calls uniformly
|
||||
|
||||
### Recommendation 3
|
||||
Treat crisis-sensitive outbound intervention as a separate safety class.
|
||||
|
||||
Why:
|
||||
- issue #878 explicitly calls for Pattern 1 here
|
||||
- this matches Timmy's SOUL-level safety requirements
|
||||
|
||||
### Recommendation 4
|
||||
Ship scoring traces before enforcement expansion.
|
||||
|
||||
Why:
|
||||
- you cannot tune thresholds you cannot inspect
|
||||
- false positives will otherwise frustrate normal usage
|
||||
|
||||
### Recommendation 5
|
||||
Use Pattern 3 as the default policy for normal operations.
|
||||
|
||||
Why:
|
||||
- full manual confirmation on every tool call is too expensive
|
||||
- full autonomy is too risky
|
||||
- Pattern 3 is the practical middle ground
|
||||
|
||||
---
|
||||
|
||||
## 9. Bottom Line
|
||||
|
||||
Hermes should implement a **two-track human confirmation firewall**:
|
||||
|
||||
1. **Pattern 1: Pre-Execution Gate**
|
||||
- crisis interventions
|
||||
- destructive terminal actions
|
||||
- irreversible or safety-critical tool calls
|
||||
|
||||
2. **Pattern 3: Confidence Threshold**
|
||||
- all ordinary tool calls
|
||||
- driven by a universal tool-call assessment layer
|
||||
- integrated at the central dispatch boundary
|
||||
|
||||
Pattern 2 should remain optional and narrow.
|
||||
It is not the primary answer for Hermes.
|
||||
|
||||
The repo already contains the beginnings of this system.
|
||||
The next step is not new theory.
|
||||
It is to turn the existing approval path into a true **tool-call-wide human confirmation firewall**.
|
||||
|
||||
---
|
||||
|
||||
## References
|
||||
|
||||
- Issue #878 — Human Confirmation Firewall Implementation Patterns
|
||||
- Issue #659 — Critical Research Tasks
|
||||
- `tools/approval.py` — current dangerous-command approval flow and smart approvals
|
||||
- `model_tools.py` — central tool dispatch boundary
|
||||
- `gateway/run.py` — blocking approval handling for messaging sessions
|
||||
@@ -1,115 +0,0 @@
|
||||
"""Tests for Issue #796 tool-calling benchmark coverage and reporting."""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from types import SimpleNamespace
|
||||
from unittest.mock import patch
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent / "benchmarks"))
|
||||
|
||||
from tool_call_benchmark import ( # noqa: E402
|
||||
CallResult,
|
||||
DEFAULT_COMPARE_MODELS,
|
||||
ISSUE_796_CATEGORY_COUNTS,
|
||||
ToolCall,
|
||||
generate_report,
|
||||
run_single_test,
|
||||
suite_category_counts,
|
||||
)
|
||||
|
||||
|
||||
def test_suite_counts_match_issue_796_distribution():
|
||||
counts = suite_category_counts()
|
||||
assert counts == ISSUE_796_CATEGORY_COUNTS
|
||||
assert sum(counts.values()) == 100
|
||||
|
||||
|
||||
def test_default_compare_models_cover_issue_796_lanes():
|
||||
assert len(DEFAULT_COMPARE_MODELS) == 3
|
||||
assert any("gemma-4-31b" in spec for spec in DEFAULT_COMPARE_MODELS)
|
||||
assert any("gemma-4-26b" in spec for spec in DEFAULT_COMPARE_MODELS)
|
||||
assert any("mimo-v2-pro" in spec for spec in DEFAULT_COMPARE_MODELS)
|
||||
|
||||
|
||||
def test_generate_report_includes_parallel_and_cost_metrics(tmp_path):
|
||||
output_path = tmp_path / "report.md"
|
||||
results = [
|
||||
CallResult(
|
||||
test_id="file-01",
|
||||
category="file",
|
||||
model="gemma-4-31b",
|
||||
prompt="Read the file.",
|
||||
expected_tool="read_file",
|
||||
success=True,
|
||||
tool_called="read_file",
|
||||
schema_ok=True,
|
||||
tool_args_valid=True,
|
||||
execution_ok=True,
|
||||
tool_count=2,
|
||||
parallel_ok=True,
|
||||
latency_s=1.25,
|
||||
total_tokens=123,
|
||||
estimated_cost_usd=0.0012,
|
||||
cost_status="estimated",
|
||||
),
|
||||
CallResult(
|
||||
test_id="web-01",
|
||||
category="web",
|
||||
model="mimo-v2-pro",
|
||||
prompt="Search the web.",
|
||||
expected_tool="web_search",
|
||||
success=False,
|
||||
tool_called="web_search",
|
||||
schema_ok=True,
|
||||
tool_args_valid=False,
|
||||
execution_ok=False,
|
||||
tool_count=1,
|
||||
parallel_ok=False,
|
||||
latency_s=2.5,
|
||||
error="bad args",
|
||||
total_tokens=456,
|
||||
estimated_cost_usd=None,
|
||||
cost_status="unknown",
|
||||
skipped=True,
|
||||
skip_reason="web_search unavailable",
|
||||
),
|
||||
]
|
||||
|
||||
report = generate_report(results, ["gemma-4-31b", "mimo-v2-pro"], output_path)
|
||||
|
||||
assert output_path.exists()
|
||||
assert "Parallel tool success" in report
|
||||
assert "Avg token cost per call (USD)" in report
|
||||
assert "Skipped / unavailable" in report
|
||||
assert "Requested category mix" in report
|
||||
|
||||
|
||||
def test_run_single_test_skips_when_expected_tool_unavailable():
|
||||
class FakeAgent:
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.valid_tool_names = {"read_file", "terminal"}
|
||||
self.session_input_tokens = 0
|
||||
self.session_output_tokens = 0
|
||||
self.session_cache_read_tokens = 0
|
||||
self.session_cache_write_tokens = 0
|
||||
self.session_api_calls = 0
|
||||
self.base_url = ""
|
||||
self.api_key = None
|
||||
|
||||
def run_conversation(self, *args, **kwargs):
|
||||
raise AssertionError("run_conversation should not be called for unavailable tools")
|
||||
|
||||
tc = ToolCall(
|
||||
id="mcp-01",
|
||||
category="mcp",
|
||||
prompt="Use an MCP tool to list resources.",
|
||||
expected_tool="",
|
||||
expected_tool_prefix="mcp_",
|
||||
)
|
||||
|
||||
with patch.dict(sys.modules, {"run_agent": SimpleNamespace(AIAgent=FakeAgent)}):
|
||||
result = run_single_test(tc, "gemini:gemma-4-31b-it", "gemini")
|
||||
|
||||
assert result.skipped is True
|
||||
assert "mcp_" in result.skip_reason
|
||||
assert result.success is False
|
||||
Reference in New Issue
Block a user