Compare commits

..

2 Commits

Author SHA1 Message Date
Alexander Whitestone
411aea9edf feat: harden tool-call benchmark coverage and reporting for #796
All checks were successful
Lint / lint (pull_request) Successful in 9s
Refs #796
2026-04-22 11:47:11 -04:00
Alexander Whitestone
877005b06e wip: add failing tool-call benchmark regression tests for #796
Refs #796
2026-04-22 11:31:24 -04:00
11 changed files with 568 additions and 657 deletions

View File

@@ -1,69 +0,0 @@
"""First-class context snapshot artifacts for live runtime memory evaluation."""
from __future__ import annotations
import json
import re
from pathlib import Path
from typing import Any
from hermes_constants import get_hermes_home
_SAFE_SEGMENT_RE = re.compile(r"[^A-Za-z0-9_.-]+")
class ContextSnapshotRecorder:
"""Write per-call prompt-composition artifacts for a Hermes session."""
def __init__(self, session_id: str, *, enabled: bool = False, base_dir: str | Path | None = None):
self.session_id = session_id or "session"
self.enabled = bool(enabled)
self.base_dir = Path(base_dir) if base_dir else get_hermes_home() / "reports" / "context_snapshots"
@property
def session_dir(self) -> Path:
safe_session = _SAFE_SEGMENT_RE.sub("_", self.session_id).strip("._") or "session"
return self.base_dir / safe_session
def record_call(
self,
api_call_count: int,
*,
system_prompt: str,
memory_provider_system_prompt: str = "",
memory_prefetch_raw: str = "",
memory_context_block: str = "",
api_user_message: str = "",
api_messages: list[dict[str, Any]] | None = None,
metadata: dict[str, Any] | None = None,
) -> Path | None:
if not self.enabled:
return None
call_dir = self.session_dir / f"call_{api_call_count:03d}"
call_dir.mkdir(parents=True, exist_ok=True)
self._write_text(call_dir / "system_prompt.txt", system_prompt or "")
self._write_text(call_dir / "memory_provider_system_prompt.txt", memory_provider_system_prompt or "")
self._write_text(call_dir / "memory_prefetch_raw.txt", memory_prefetch_raw or "")
self._write_text(call_dir / "memory_context_block.txt", memory_context_block or "")
self._write_text(call_dir / "api_user_message.txt", api_user_message or "")
self._write_json(call_dir / "api_messages.json", api_messages or [])
self._write_json(
call_dir / "metadata.json",
{
"session_id": self.session_id,
"api_call_count": api_call_count,
**(metadata or {}),
},
)
return call_dir
@staticmethod
def _write_text(path: Path, content: str) -> None:
path.write_text(content, encoding="utf-8")
@staticmethod
def _write_json(path: Path, payload: Any) -> None:
path.write_text(json.dumps(payload, indent=2, ensure_ascii=False), encoding="utf-8")

View File

@@ -0,0 +1,139 @@
# Tool-Calling Benchmark Report
Generated: 2026-04-22 15:46 UTC
Executed: 3 calls from a 100-call suite across 7 categories
Models tested: nous:gia-3/gemma-4-31b, gemini:gemma-4-26b-it, nous:mimo-v2-pro
## Requested category mix
| Category | Target calls |
|----------|--------------|
| file | 20 |
| terminal | 20 |
| web | 15 |
| code | 15 |
| browser | 10 |
| delegate | 10 |
| mcp | 10 |
## Summary
| Metric | nous:gia-3/gemma-4-31b | gemini:gemma-4-26b-it | nous:mimo-v2-pro |
|--------|---------|---------|---------|
| Schema parse success | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
| Tool execution success | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
| Parallel tool success | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
| Avg latency (s) | 0.00 | 0.00 | 0.00 |
| Avg tokens per call | 0.0 | 0.0 | 0.0 |
| Avg token cost per call (USD) | n/a | n/a | n/a |
| Skipped / unavailable | 0/1 | 0/1 | 0/1 |
## Per-category breakdown
### File
| Metric | nous:gia-3/gemma-4-31b | gemini:gemma-4-26b-it | nous:mimo-v2-pro |
|--------|---------|---------|---------|
| Schema OK | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
| Exec OK | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
| Parallel OK | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
| Correct tool | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
| Avg tokens | 0.0 | 0.0 | 0.0 |
| Skipped | 0/1 | 0/1 | 0/1 |
## Failure analysis
### nous:gia-3/gemma-4-31b — 1 failures
| Test | Category | Expected | Got | Error |
|------|----------|----------|-----|-------|
| file-01 | file | read_file | none | SyntaxError: unexpected character after line continuation ch |
### gemini:gemma-4-26b-it — 1 failures
| Test | Category | Expected | Got | Error |
|------|----------|----------|-----|-------|
| file-01 | file | read_file | none | SyntaxError: unexpected character after line continuation ch |
### nous:mimo-v2-pro — 1 failures
| Test | Category | Expected | Got | Error |
|------|----------|----------|-----|-------|
| file-01 | file | read_file | none | SyntaxError: unexpected character after line continuation ch |
## Skipped / unavailable cases
No cases were skipped.
## Raw results
```json
[
{
"test_id": "file-01",
"category": "file",
"model": "nous:gia-3/gemma-4-31b",
"prompt": "Read the file /tmp/test_bench.txt and show me its contents.",
"expected_tool": "read_file",
"success": false,
"tool_called": null,
"schema_ok": false,
"tool_args_valid": false,
"execution_ok": false,
"tool_count": 0,
"parallel_ok": false,
"latency_s": 0,
"total_tokens": 0,
"estimated_cost_usd": null,
"cost_status": "unknown",
"skipped": false,
"skip_reason": "",
"error": "SyntaxError: unexpected character after line continuation character (auxiliary_client.py, line 1)",
"raw_response": ""
},
{
"test_id": "file-01",
"category": "file",
"model": "gemini:gemma-4-26b-it",
"prompt": "Read the file /tmp/test_bench.txt and show me its contents.",
"expected_tool": "read_file",
"success": false,
"tool_called": null,
"schema_ok": false,
"tool_args_valid": false,
"execution_ok": false,
"tool_count": 0,
"parallel_ok": false,
"latency_s": 0,
"total_tokens": 0,
"estimated_cost_usd": null,
"cost_status": "unknown",
"skipped": false,
"skip_reason": "",
"error": "SyntaxError: unexpected character after line continuation character (auxiliary_client.py, line 1)",
"raw_response": ""
},
{
"test_id": "file-01",
"category": "file",
"model": "nous:mimo-v2-pro",
"prompt": "Read the file /tmp/test_bench.txt and show me its contents.",
"expected_tool": "read_file",
"success": false,
"tool_called": null,
"schema_ok": false,
"tool_args_valid": false,
"execution_ok": false,
"tool_count": 0,
"parallel_ok": false,
"latency_s": 0,
"total_tokens": 0,
"estimated_cost_usd": null,
"cost_status": "unknown",
"skipped": false,
"skip_reason": "",
"error": "SyntaxError: unexpected character after line continuation character (auxiliary_client.py, line 1)",
"raw_response": ""
}
]
```

View File

@@ -8,10 +8,11 @@ success rates, latency, and token costs.
Usage:
python3 benchmarks/tool_call_benchmark.py # full 100-call suite
python3 benchmarks/tool_call_benchmark.py --limit 10 # quick smoke test
python3 benchmarks/tool_call_benchmark.py --models nous # single model
python3 benchmarks/tool_call_benchmark.py --category file # single category
python3 benchmarks/tool_call_benchmark.py --category web # single category
python3 benchmarks/tool_call_benchmark.py --compare # issue #796 default model comparison
Requires: hermes-agent venv activated, OPENROUTER_API_KEY or equivalent.
Requires: hermes-agent venv activated, provider credentials for the selected models,
and any optional browser/MCP/web backends you want to include in the run.
"""
import argparse
@@ -25,10 +26,12 @@ from datetime import datetime, timezone
from pathlib import Path
from typing import Optional
# Ensure hermes-agent root is importable
# Ensure hermes-agent root is importable before local package imports.
REPO_ROOT = Path(__file__).resolve().parent.parent
sys.path.insert(0, str(REPO_ROOT))
from agent.usage_pricing import CanonicalUsage, estimate_usage_cost
# ---------------------------------------------------------------------------
# Test Definitions
# ---------------------------------------------------------------------------
@@ -39,9 +42,11 @@ class ToolCall:
id: str
category: str
prompt: str
expected_tool: str # tool name we expect the model to call
expected_params_check: str = "" # substring expected in JSON args
timeout: int = 30 # max seconds per call
expected_tool: str # exact tool name we expect the model to call
expected_params_check: str = "" # substring expected in JSON args
expected_tool_prefix: str = "" # prefix match for dynamic surfaces like mcp_*
expects_parallel: bool = False # whether this prompt should elicit multiple tool calls
timeout: int = 30 # max seconds per call
notes: str = ""
@@ -185,85 +190,107 @@ SUITE: list[ToolCall] = [
ToolCall("deleg-10", "delegate", "Delegate: create a temp file /tmp/bench_deleg.txt with 'done'.",
"delegate_task", "write"),
# ── Todo / Memory (10 — replacing web/browser/MCP which need external services) ──
ToolCall("todo-01", "todo", "Add a todo item: 'Run benchmark suite'",
"todo", "benchmark"),
ToolCall("todo-02", "todo", "Show me the current todo list.",
"todo", ""),
ToolCall("todo-03", "todo", "Mark the first todo item as completed.",
"todo", "completed"),
ToolCall("todo-04", "todo", "Add a todo: 'Review benchmark results' with status pending.",
"todo", "Review"),
ToolCall("todo-05", "todo", "Clear all completed todos.",
"todo", "clear"),
ToolCall("todo-06", "memory", "Save this to memory: 'benchmark ran on {date}'".format(
date=datetime.now().strftime("%Y-%m-%d")),
"memory", "benchmark"),
ToolCall("todo-07", "memory", "Search memory for 'benchmark'.",
"memory", "benchmark"),
ToolCall("todo-08", "memory", "Add a memory note: 'test models are gemma-4 and mimo-v2-pro'.",
"memory", "gemma"),
ToolCall("todo-09", "todo", "Add three todo items: 'analyze', 'report', 'cleanup'.",
"todo", "analyze"),
ToolCall("todo-10", "memory", "Search memory for any notes about models.",
"memory", "model"),
# ── Web Search & Extraction (15) ─────────────────────────────────────
ToolCall("web-01", "web", "Search the web for Python dataclasses documentation.",
"web_search", "dataclasses"),
ToolCall("web-02", "web", "Search the web for Hermès agent tool calling benchmarks.",
"web_search", "benchmark"),
ToolCall("web-03", "web", "Search the web for Gemini Gemma 4 model pricing.",
"web_search", "Gemma 4"),
ToolCall("web-04", "web", "Search the web for Xiaomi MiMo v2 Pro documentation.",
"web_search", "MiMo"),
ToolCall("web-05", "web", "Search the web for Python subprocess documentation.",
"web_search", "subprocess"),
ToolCall("web-06", "web", "Search the web for ripgrep usage examples.",
"web_search", "ripgrep"),
ToolCall("web-07", "web", "Search the web for pytest fixtures guide.",
"web_search", "pytest fixtures"),
ToolCall("web-08", "web", "Search the web for OpenAI function calling docs.",
"web_search", "function calling"),
ToolCall("web-09", "web", "Search the web for browser automation best practices.",
"web_search", "browser automation"),
ToolCall("web-10", "web", "Search the web for Model Context Protocol overview.",
"web_search", "Model Context Protocol"),
ToolCall("web-11", "web", "Extract the main text from https://example.com.",
"web_extract", "example.com"),
ToolCall("web-12", "web", "Extract the page content from https://example.org.",
"web_extract", "example.org"),
ToolCall("web-13", "web", "Extract the title and body text from https://www.iana.org/domains/reserved.",
"web_extract", "iana.org"),
ToolCall("web-14", "web", "Extract content from https://httpbin.org/html.",
"web_extract", "httpbin.org"),
ToolCall("web-15", "web", "Extract the main content from https://www.python.org/.",
"web_extract", "python.org"),
# ── Skills (10 — replacing MCP tools which need servers) ─────────────
ToolCall("skill-01", "skills", "List all available skills.",
"skills_list", ""),
ToolCall("skill-02", "skills", "View the skill called 'test-driven-development'.",
"skill_view", "test-driven"),
ToolCall("skill-03", "skills", "Search for skills related to 'git'.",
"skills_list", "git"),
ToolCall("skill-04", "skills", "View the 'code-review' skill.",
"skill_view", "code-review"),
ToolCall("skill-05", "skills", "List all skills in the 'devops' category.",
"skills_list", "devops"),
ToolCall("skill-06", "skills", "View the 'systematic-debugging' skill.",
"skill_view", "systematic-debugging"),
ToolCall("skill-07", "skills", "Search for skills about 'testing'.",
"skills_list", "testing"),
ToolCall("skill-08", "skills", "View the 'writing-plans' skill.",
"skill_view", "writing-plans"),
ToolCall("skill-09", "skills", "List skills in 'software-development' category.",
"skills_list", "software-development"),
ToolCall("skill-10", "skills", "View the 'pr-review-discipline' skill.",
"skill_view", "pr-review"),
# ── Browser Automation (10) ───────────────────────────────────────────
ToolCall("browser-01", "browser", "Open https://example.com in the browser.",
"browser_navigate", "example.com"),
ToolCall("browser-02", "browser", "Open https://www.python.org in the browser.",
"browser_navigate", "python.org"),
ToolCall("browser-03", "browser", "Open https://www.wikipedia.org in the browser.",
"browser_navigate", "wikipedia.org"),
ToolCall("browser-04", "browser", "Navigate the browser to https://example.org.",
"browser_navigate", "example.org"),
ToolCall("browser-05", "browser", "Go to https://httpbin.org/forms/post in the browser.",
"browser_navigate", "httpbin.org/forms/post"),
ToolCall("browser-06", "browser", "Open https://www.iana.org/domains/reserved in the browser.",
"browser_navigate", "iana.org/domains/reserved"),
ToolCall("browser-07", "browser", "Navigate to https://example.net in the browser.",
"browser_navigate", "example.net"),
ToolCall("browser-08", "browser", "Open https://developer.mozilla.org in the browser.",
"browser_navigate", "developer.mozilla.org"),
ToolCall("browser-09", "browser", "Navigate the browser to https://www.rfc-editor.org.",
"browser_navigate", "rfc-editor.org"),
ToolCall("browser-10", "browser", "Open https://www.gnu.org in the browser.",
"browser_navigate", "gnu.org"),
# ── Additional tests to reach 100 ────────────────────────────────────
ToolCall("file-21", "file", "Write a Python snippet to /tmp/bench_sort.py that sorts [3,1,2].",
"write_file", "bench_sort"),
ToolCall("file-22", "file", "Read /tmp/bench_sort.py back and confirm it exists.",
"read_file", "bench_sort"),
ToolCall("file-23", "file", "Search for 'class' in all .py files in the benchmarks directory.",
"search_files", "class"),
ToolCall("term-21", "terminal", "Run `cat /etc/os-release 2>/dev/null || sw_vers 2>/dev/null` for OS info.",
"terminal", "os"),
ToolCall("term-22", "terminal", "Run `nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null` for CPU count.",
"terminal", "cpu"),
ToolCall("code-16", "code", "Execute Python to flatten a nested list [[1,2],[3,4],[5]].",
"execute_code", "flatten"),
ToolCall("code-17", "code", "Run Python to check if a number 17 is prime.",
"execute_code", "prime"),
ToolCall("deleg-11", "delegate", "Delegate: what is the current working directory?",
"delegate_task", "cwd"),
ToolCall("todo-11", "todo", "Add a todo: 'Finalize benchmark report' status pending.",
"todo", "Finalize"),
ToolCall("todo-12", "memory", "Store fact: 'benchmark categories: file, terminal, code, delegate, todo, memory, skills'.",
"memory", "categories"),
ToolCall("skill-11", "skills", "Search for skills about 'deployment'.",
"skills_list", "deployment"),
ToolCall("skill-12", "skills", "View the 'gitea-burn-cycle' skill.",
"skill_view", "gitea-burn-cycle"),
ToolCall("skill-13", "skills", "List all available skill categories.",
"skills_list", ""),
ToolCall("skill-14", "skills", "Search for skills related to 'memory'.",
"skills_list", "memory"),
ToolCall("skill-15", "skills", "View the 'mimo-swarm' skill.",
"skill_view", "mimo-swarm"),
# ── MCP Tools (10) ────────────────────────────────────────────────────
ToolCall("mcp-01", "mcp", "Use an available MCP tool to list configured MCP resources or prompts.",
"", "", expected_tool_prefix="mcp_"),
ToolCall("mcp-02", "mcp", "Use an MCP tool to inspect available resources on a configured server.",
"", "", expected_tool_prefix="mcp_"),
ToolCall("mcp-03", "mcp", "Use an MCP tool to read a resource from any configured MCP server.",
"", "", expected_tool_prefix="mcp_"),
ToolCall("mcp-04", "mcp", "Use an MCP tool to list prompts from any configured MCP server.",
"", "", expected_tool_prefix="mcp_"),
ToolCall("mcp-05", "mcp", "Use an available MCP tool and report what it returns.",
"", "", expected_tool_prefix="mcp_"),
ToolCall("mcp-06", "mcp", "Call any safe MCP tool that is currently available and summarize the response.",
"", "", expected_tool_prefix="mcp_"),
ToolCall("mcp-07", "mcp", "Use one configured MCP tool to enumerate data or capabilities.",
"", "", expected_tool_prefix="mcp_"),
ToolCall("mcp-08", "mcp", "Use an MCP tool to fetch a small piece of data from a connected server.",
"", "", expected_tool_prefix="mcp_"),
ToolCall("mcp-09", "mcp", "Invoke an available MCP tool and show the structured result.",
"", "", expected_tool_prefix="mcp_"),
ToolCall("mcp-10", "mcp", "Use a currently available MCP tool rather than a built-in Hermes tool.",
"", "", expected_tool_prefix="mcp_"),
]
# fmt: on
DEFAULT_COMPARE_MODELS = [
"nous:gia-3/gemma-4-31b",
"gemini:gemma-4-26b-it",
"nous:mimo-v2-pro",
]
ISSUE_796_CATEGORY_COUNTS = {
"file": 20,
"terminal": 20,
"web": 15,
"code": 15,
"browser": 10,
"delegate": 10,
"mcp": 10,
}
def suite_category_counts() -> dict[str, int]:
counts: dict[str, int] = {}
for tc in SUITE:
counts[tc.category] = counts.get(tc.category, 0) + 1
return counts
# ---------------------------------------------------------------------------
# Runner
@@ -278,9 +305,17 @@ class CallResult:
expected_tool: str
success: bool
tool_called: Optional[str] = None
schema_ok: bool = False
tool_args_valid: bool = False
execution_ok: bool = False
tool_count: int = 0
parallel_ok: bool = False
latency_s: float = 0.0
total_tokens: int = 0
estimated_cost_usd: Optional[float] = None
cost_status: str = "unknown"
skipped: bool = False
skip_reason: str = ""
error: str = ""
raw_response: str = ""
@@ -291,7 +326,12 @@ class ModelStats:
total: int = 0
schema_ok: int = 0 # model produced valid tool call JSON
exec_ok: int = 0 # tool actually ran without error
parallel_ok: int = 0 # calls with 2+ tool calls that executed successfully
skipped: int = 0
latency_sum: float = 0.0
total_tokens: int = 0
total_cost_usd: float = 0.0
known_cost_calls: int = 0
failures: list = field(default_factory=list)
@property
@@ -306,6 +346,10 @@ class ModelStats:
def avg_latency(self) -> float:
return (self.latency_sum / self.total) if self.total else 0
@property
def avg_cost_usd(self) -> Optional[float]:
return (self.total_cost_usd / self.known_cost_calls) if self.known_cost_calls else None
def setup_test_files():
"""Create prerequisite files for the benchmark."""
@@ -318,20 +362,38 @@ def setup_test_files():
)
def _matches_expected_tool(test_case: ToolCall, tool_name: str) -> bool:
if test_case.expected_tool and tool_name == test_case.expected_tool:
return True
if test_case.expected_tool_prefix and tool_name.startswith(test_case.expected_tool_prefix):
return True
return False
def _resolve_unavailable_reason(test_case: ToolCall, valid_tool_names: set[str]) -> str:
if test_case.expected_tool and test_case.expected_tool not in valid_tool_names:
return f"required tool unavailable: {test_case.expected_tool}"
if test_case.expected_tool_prefix and not any(
name.startswith(test_case.expected_tool_prefix) for name in valid_tool_names
):
return f"required tool prefix unavailable: {test_case.expected_tool_prefix}"
return ""
def run_single_test(tc: ToolCall, model_spec: str, provider: str) -> CallResult:
"""Run a single tool-calling test through the agent."""
from run_agent import AIAgent
result = CallResult(
test_id=tc.id,
category=tc.category,
model=model_spec,
prompt=tc.prompt,
expected_tool=tc.expected_tool,
expected_tool=tc.expected_tool or tc.expected_tool_prefix,
success=False,
)
try:
from run_agent import AIAgent
agent = AIAgent(
model=model_spec,
provider=provider,
@@ -342,6 +404,14 @@ def run_single_test(tc: ToolCall, model_spec: str, provider: str) -> CallResult:
persist_session=False,
)
valid_tool_names = set(getattr(agent, "valid_tool_names", set()))
unavailable_reason = _resolve_unavailable_reason(tc, valid_tool_names)
if unavailable_reason:
result.skipped = True
result.skip_reason = unavailable_reason
result.error = unavailable_reason
return result
t0 = time.time()
conv = agent.run_conversation(
user_message=tc.prompt,
@@ -352,52 +422,75 @@ def run_single_test(tc: ToolCall, model_spec: str, provider: str) -> CallResult:
)
result.latency_s = round(time.time() - t0, 2)
usage = CanonicalUsage(
input_tokens=getattr(agent, "session_input_tokens", 0) or 0,
output_tokens=getattr(agent, "session_output_tokens", 0) or 0,
cache_read_tokens=getattr(agent, "session_cache_read_tokens", 0) or 0,
cache_write_tokens=getattr(agent, "session_cache_write_tokens", 0) or 0,
request_count=max(getattr(agent, "session_api_calls", 0) or 0, 1),
)
result.total_tokens = usage.total_tokens
billed_model = model_spec.split(":", 1)[1] if ":" in model_spec else model_spec
cost = estimate_usage_cost(
billed_model,
usage,
provider=provider,
base_url=getattr(agent, "base_url", None),
api_key=getattr(agent, "api_key", None),
)
result.cost_status = cost.status
result.estimated_cost_usd = float(cost.amount_usd) if cost.amount_usd is not None else None
messages = conv.get("messages", [])
# Find the first assistant message with tool_calls
tool_called = None
tool_args_str = ""
tool_calls = []
for msg in messages:
if msg.get("role") == "assistant" and msg.get("tool_calls"):
for tc_item in msg["tool_calls"]:
fn = tc_item.get("function", {})
tool_called = fn.get("name", "")
tool_args_str = fn.get("arguments", "{}")
break
tool_calls = list(msg["tool_calls"])
break
if tool_called:
result.tool_called = tool_called
result.schema_ok = True
if tool_calls:
result.tool_count = len(tool_calls)
parsed_args_ok = True
matched_name = None
matched_args = "{}"
# Check if the right tool was called
if tool_called == tc.expected_tool:
result.success = True
for tc_item in tool_calls:
fn = tc_item.get("function", {})
tool_name = fn.get("name", "")
tool_args = fn.get("arguments", "{}")
try:
json.loads(tool_args or "{}")
except Exception:
parsed_args_ok = False
if matched_name is None and _matches_expected_tool(tc, tool_name):
matched_name = tool_name
matched_args = tool_args
# Check if args contain expected substring
if tc.expected_params_check:
result.tool_args_valid = tc.expected_params_check in tool_args_str
else:
result.tool_args_valid = True
result.schema_ok = parsed_args_ok
result.tool_called = matched_name or tool_calls[0].get("function", {}).get("name", "")
if matched_name:
result.tool_args_valid = (
tc.expected_params_check in matched_args if tc.expected_params_check else True
)
result.success = result.schema_ok and result.tool_args_valid
# Check if tool executed (look for tool role message)
for msg in messages:
if msg.get("role") == "tool":
content = msg.get("content", "")
if content and "error" not in content.lower()[:50]:
if content:
result.execution_ok = True
break
elif content:
result.execution_ok = True # got a response, even if error
break
result.parallel_ok = result.tool_count > 1 and result.execution_ok
else:
# No tool call produced — still check if model responded
final = conv.get("final_response", "")
result.raw_response = final[:200] if final else ""
except Exception as e:
result.error = f"{type(e).__name__}: {str(e)[:200]}"
result.latency_s = round(time.time() - t0, 2) if 't0' in dir() else 0
result.latency_s = round(time.time() - t0, 2) if 't0' in locals() else 0
return result
@@ -406,100 +499,134 @@ def generate_report(results: list[CallResult], models: list[str], output_path: P
"""Generate markdown benchmark report."""
now = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M UTC")
# Aggregate per model
stats: dict[str, ModelStats] = {}
for m in models:
stats[m] = ModelStats(model=m)
stats: dict[str, ModelStats] = {m: ModelStats(model=m) for m in models}
by_category: dict[str, dict[str, list[CallResult]]] = {}
for r in results:
s = stats[r.model]
s.total += 1
s.schema_ok += int(r.schema_ok)
s.exec_ok += int(r.execution_ok)
s.latency_sum += r.latency_s
if not r.success:
s.failures.append(r)
s.total_tokens += r.total_tokens
if r.estimated_cost_usd is not None:
s.total_cost_usd += r.estimated_cost_usd
s.known_cost_calls += 1
if r.skipped:
s.skipped += 1
else:
s.schema_ok += int(r.schema_ok)
s.exec_ok += int(r.execution_ok)
s.parallel_ok += int(r.parallel_ok)
if not r.success:
s.failures.append(r)
by_category.setdefault(r.category, {}).setdefault(r.model, []).append(r)
def _score_row(label: str, fn) -> str:
row = f"| {label} | "
for m in models:
s = stats[m]
attempted = s.total - s.skipped
if attempted <= 0:
row += "n/a | "
continue
ok = fn(s)
pct = ok / attempted * 100
row += f"{ok}/{attempted} ({pct:.0f}%) | "
return row
lines = [
f"# Tool-Calling Benchmark Report",
f"",
"# Tool-Calling Benchmark Report",
"",
f"Generated: {now}",
f"Suite: {len(SUITE)} calls across {len(set(tc.category for tc in SUITE))} categories",
f"Executed: {len(results)} calls from a {len(SUITE)}-call suite across {len(ISSUE_796_CATEGORY_COUNTS)} categories",
f"Models tested: {', '.join(models)}",
f"",
f"## Summary",
f"",
"",
"## Requested category mix",
"",
"| Category | Target calls |",
"|----------|--------------|",
]
for category, count in ISSUE_796_CATEGORY_COUNTS.items():
lines.append(f"| {category} | {count} |")
lines.extend([
"",
"## Summary",
"",
f"| Metric | {' | '.join(models)} |",
f"|--------|{'|'.join('---------' for _ in models)}|",
]
_score_row("Schema parse success", lambda s: s.schema_ok),
_score_row("Tool execution success", lambda s: s.exec_ok),
_score_row("Parallel tool success", lambda s: s.parallel_ok),
])
# Schema parse success
row = "| Schema parse success | "
for m in models:
s = stats[m]
row += f"{s.schema_ok}/{s.total} ({s.schema_pct:.0f}%) | "
lines.append(row)
# Tool execution success
row = "| Tool execution success | "
for m in models:
s = stats[m]
row += f"{s.exec_ok}/{s.total} ({s.exec_pct:.0f}%) | "
lines.append(row)
# Correct tool selected
row = "| Correct tool selected | "
for m in models:
s = stats[m]
correct = sum(1 for r in results if r.model == m and r.success)
pct = (correct / s.total * 100) if s.total else 0
row += f"{correct}/{s.total} ({pct:.0f}%) | "
lines.append(row)
# Avg latency
row = "| Avg latency (s) | "
for m in models:
s = stats[m]
row += f"{s.avg_latency:.2f} | "
row += f"{stats[m].avg_latency:.2f} | "
lines.append(row)
row = "| Avg tokens per call | "
for m in models:
total = stats[m].total
avg_tokens = stats[m].total_tokens / total if total else 0
row += f"{avg_tokens:.1f} | "
lines.append(row)
row = "| Avg token cost per call (USD) | "
for m in models:
avg_cost = stats[m].avg_cost_usd
row += (f"{avg_cost:.6f} | " if avg_cost is not None else "n/a | ")
lines.append(row)
row = "| Skipped / unavailable | "
for m in models:
s = stats[m]
row += f"{s.skipped}/{s.total} | "
lines.append(row)
lines.append("")
# Per-category breakdown
lines.append("## Per-Category Breakdown")
lines.append("## Per-category breakdown")
lines.append("")
for cat in sorted(by_category.keys()):
lines.append(f"### {cat.title()}")
lines.append("")
lines.append(f"| Metric | {' | '.join(models)} |")
lines.append(f"|--------|{'|'.join('---------' for _ in models)}|")
cat_data = by_category[cat]
for metric_name, fn in [
("Schema OK", lambda r: r.schema_ok),
("Exec OK", lambda r: r.execution_ok),
("Parallel OK", lambda r: r.parallel_ok),
("Correct tool", lambda r: r.success),
]:
row = f"| {metric_name} | "
for m in models:
results_m = cat_data.get(m, [])
total = len(results_m)
ok = sum(1 for r in results_m if fn(r))
pct = (ok / total * 100) if total else 0
row += f"{ok}/{total} ({pct:.0f}%) | "
results_m = by_category[cat].get(m, [])
attempted = [r for r in results_m if not r.skipped]
if not attempted:
row += "n/a | "
continue
ok = sum(1 for r in attempted if fn(r))
pct = ok / len(attempted) * 100
row += f"{ok}/{len(attempted)} ({pct:.0f}%) | "
lines.append(row)
row = "| Avg tokens | "
for m in models:
results_m = by_category[cat].get(m, [])
avg_tokens = sum(r.total_tokens for r in results_m) / len(results_m) if results_m else 0
row += f"{avg_tokens:.1f} | "
lines.append(row)
row = "| Skipped | "
for m in models:
results_m = by_category[cat].get(m, [])
skipped = sum(1 for r in results_m if r.skipped)
row += f"{skipped}/{len(results_m)} | "
lines.append(row)
lines.append("")
# Failure analysis
lines.append("## Failure Analysis")
lines.append("## Failure analysis")
lines.append("")
any_failures = False
for m in models:
s = stats[m]
@@ -514,28 +641,40 @@ def generate_report(results: list[CallResult], models: list[str], output_path: P
err = r.error or "wrong tool"
lines.append(f"| {r.test_id} | {r.category} | {r.expected_tool} | {got} | {err[:60]} |")
lines.append("")
if not any_failures:
lines.append("No failures detected.")
lines.append("No model failures detected.")
lines.append("")
# Raw results JSON
lines.append("## Raw Results")
skipped_results = [r for r in results if r.skipped]
lines.append("## Skipped / unavailable cases")
lines.append("")
if skipped_results:
lines.append("| Test | Model | Category | Reason |")
lines.append("|------|-------|----------|--------|")
for r in skipped_results:
lines.append(f"| {r.test_id} | {r.model} | {r.category} | {r.skip_reason[:80]} |")
else:
lines.append("No cases were skipped.")
lines.append("")
lines.append("## Raw results")
lines.append("")
lines.append("```json")
lines.append(json.dumps([asdict(r) for r in results], indent=2, default=str))
lines.append("```")
report = "\n".join(lines)
output_path.write_text(report)
output_path.write_text(report, encoding="utf-8")
return report
def main():
parser = argparse.ArgumentParser(description="Tool-calling benchmark")
parser.add_argument("--models", nargs="+",
default=["nous:gia-3/gemma-4-31b", "nous:mimo-v2-pro"],
default=list(DEFAULT_COMPARE_MODELS),
help="Model specs to test (provider:model)")
parser.add_argument("--compare", action="store_true",
help="Use the issue #796 default comparison set")
parser.add_argument("--limit", type=int, default=0,
help="Run only first N tests (0 = all)")
parser.add_argument("--category", type=str, default="",
@@ -546,6 +685,9 @@ def main():
help="Print test cases without running them")
args = parser.parse_args()
if args.compare:
args.models = list(DEFAULT_COMPARE_MODELS)
# Filter suite
suite = SUITE[:]
if args.category:

View File

@@ -1,132 +0,0 @@
# Hindsight local eval homes for live Hermes runtime testing
Issue: #1010
Parent: #985
This document defines a reproducible, profile-scoped evaluation layout for baseline / MemPalace / Hindsight comparisons without requiring Hindsight Cloud.
## Eval home layout
Use three separate `HERMES_HOME` directories so each run has isolated config, memory, sessions, and artifacts.
```text
~/.hermes/profiles/atlas-baseline/
config.yaml
.env
MEMORY.md
USER.md
reports/context_snapshots/
~/.hermes/profiles/atlas-mempalace/
config.yaml
.env
MEMORY.md
USER.md
reports/context_snapshots/
plugins/ # if a local MemPalace plugin is installed for this eval lane
~/.hermes/profiles/atlas-hindsight/
config.yaml
.env
MEMORY.md
USER.md
hindsight/config.json
reports/context_snapshots/
```
## Hindsight local config
The Hindsight provider already loads config from `$HERMES_HOME/hindsight/config.json` first. For the local eval lane, prefer `local_embedded` so Hermes can bring up a local Hindsight daemon without cloud signup.
Example `~/.hermes/profiles/atlas-hindsight/hindsight/config.json`:
```json
{
"mode": "local_embedded",
"memory_mode": "context",
"recall_prefetch_method": "recall",
"llm_provider": "ollama",
"llm_model": "gemma3:12b",
"api_url": "http://localhost:8888"
}
```
Notes:
- `local_embedded` avoids any Hindsight Cloud dependency.
- If `profile` is omitted, Hermes now derives a stable local Hindsight profile name from the active profile identity / `HERMES_HOME` instead of collapsing all local runs into the shared legacy `hermes` profile.
- `local_external` remains valid if you already run a local Hindsight server yourself.
## Runtime switching procedure
Switch by exporting `HERMES_HOME` before launching Hermes.
### 1. Baseline
```bash
export HERMES_HOME="$HOME/.hermes/profiles/atlas-baseline"
unset HERMES_CONTEXT_SNAPSHOTS
hermes chat
```
### 2. MemPalace lane
```bash
export HERMES_HOME="$HOME/.hermes/profiles/atlas-mempalace"
export HERMES_CONTEXT_SNAPSHOTS=1
hermes chat
```
### 3. Hindsight lane
```bash
export HERMES_HOME="$HOME/.hermes/profiles/atlas-hindsight"
export HERMES_CONTEXT_SNAPSHOTS=1
hermes chat
```
## Raw artifact capture
When `HERMES_CONTEXT_SNAPSHOTS=1` is enabled, Hermes writes first-class prompt-composition artifacts under the active home by default.
Artifact tree:
```text
$HERMES_HOME/reports/context_snapshots/<session-id>/call_001/
system_prompt.txt
memory_provider_system_prompt.txt
memory_prefetch_raw.txt
memory_context_block.txt
api_user_message.txt
api_messages.json
metadata.json
```
Minimum files a benchmark should inspect:
- `system_prompt.txt`
- `memory_prefetch_raw.txt`
- `memory_context_block.txt`
- `api_user_message.txt`
- `api_messages.json`
These prove:
- what the system prompt was
- what the provider prefetched
- what entered `<memory-context>`
- what the final API user message looked like
- what full payload reached the model
## Follow-on benchmark workflow
A benchmark issue can now consume this path without redoing integration work:
1. pick one eval home (`atlas-baseline`, `atlas-mempalace`, `atlas-hindsight`)
2. export the corresponding `HERMES_HOME`
3. run Hermes on the same prompt set
4. compare the snapshot artifacts in `reports/context_snapshots/`
5. score recall quality and answer quality separately
## Why this is sovereign
- no hosted Hindsight Cloud dependency is required
- the Hindsight config is profile-scoped under `hindsight/config.json`
- the runtime artifacts stay under the active `HERMES_HOME`
- switching between baseline / MemPalace / Hindsight is just a `HERMES_HOME` swap

View File

@@ -178,25 +178,6 @@ def _load_config() -> dict:
}
def _derive_local_profile_name(agent_identity: str = "", hermes_home: str = "") -> str:
"""Return a stable profile name for local embedded Hindsight storage.
Prefer the active Hermes profile identity when available, otherwise fall back
to the basename of the active HERMES_HOME path. This prevents all local
Hindsight eval homes from sharing the legacy default profile name "hermes".
"""
from pathlib import Path
import re
raw = (agent_identity or "").strip()
if not raw and hermes_home:
raw = Path(hermes_home).name.strip()
if not raw:
raw = "hermes"
safe = re.sub(r"[^A-Za-z0-9_.-]+", "-", raw).strip(".-_")
return safe or "hermes"
# ---------------------------------------------------------------------------
# MemoryProvider implementation
# ---------------------------------------------------------------------------
@@ -487,8 +468,6 @@ class HindsightMemoryProvider(MemoryProvider):
def initialize(self, session_id: str, **kwargs) -> None:
self._session_id = session_id
hermes_home = str(kwargs.get("hermes_home") or "")
agent_identity = str(kwargs.get("agent_identity") or "")
# Check client version and auto-upgrade if needed
try:
@@ -521,11 +500,6 @@ class HindsightMemoryProvider(MemoryProvider):
# "local" is a legacy alias for "local_embedded"
if self._mode == "local":
self._mode = "local_embedded"
if self._mode == "local_embedded" and not self._config.get("profile"):
self._config["profile"] = _derive_local_profile_name(
agent_identity=agent_identity,
hermes_home=hermes_home,
)
self._api_key = self._config.get("apiKey") or self._config.get("api_key") or os.environ.get("HINDSIGHT_API_KEY", "")
default_url = _DEFAULT_LOCAL_URL if self._mode in ("local_embedded", "local_external") else _DEFAULT_API_URL
self._api_url = self._config.get("api_url") or os.environ.get("HINDSIGHT_API_URL", default_url)

View File

@@ -604,8 +604,6 @@ class AIAgent:
checkpoint_max_snapshots: int = 50,
pass_session_id: bool = False,
persist_session: bool = True,
context_snapshots_enabled: bool | None = None,
context_snapshots_dir: str | None = None,
):
"""
Initialize the AI Agent.
@@ -1131,43 +1129,6 @@ class AIAgent:
except Exception:
_agent_cfg = {}
def _is_enabled(value):
if isinstance(value, bool):
return value
return str(value).strip().lower() in {"1", "true", "yes", "on"}
_debug_cfg = _agent_cfg.get("debug", {}) if isinstance(_agent_cfg, dict) else {}
if not isinstance(_debug_cfg, dict):
_debug_cfg = {}
_snapshot_cfg = _debug_cfg.get("context_snapshots", {})
if not isinstance(_snapshot_cfg, dict):
_snapshot_cfg = {}
_snapshots_env = os.getenv("HERMES_CONTEXT_SNAPSHOTS")
_snapshots_dir_env = os.getenv("HERMES_CONTEXT_SNAPSHOTS_DIR")
if context_snapshots_enabled is None:
if _snapshots_env is not None:
self._context_snapshots_enabled = _is_enabled(_snapshots_env)
else:
self._context_snapshots_enabled = _is_enabled(_snapshot_cfg.get("enabled", False))
else:
self._context_snapshots_enabled = bool(context_snapshots_enabled)
self._context_snapshots_dir = (
context_snapshots_dir
or _snapshots_dir_env
or _snapshot_cfg.get("dir")
or None
)
try:
from agent.context_snapshots import ContextSnapshotRecorder
self._context_snapshot_recorder = ContextSnapshotRecorder(
session_id=self.session_id,
enabled=self._context_snapshots_enabled,
base_dir=self._context_snapshots_dir,
)
except Exception as _snapshot_err:
logger.debug("Context snapshot recorder init failed: %s", _snapshot_err)
self._context_snapshot_recorder = None
# Persistent memory (MEMORY.md + USER.md) -- loaded from disk
self._memory_store = None
self._memory_enabled = False
@@ -8183,17 +8144,12 @@ class AIAgent:
# Use original_user_message (clean input) — user_message may contain
# injected skill content that bloats / breaks provider queries.
_ext_prefetch_cache = ""
_memory_provider_prompt_cache = ""
if self._memory_manager:
try:
_query = original_user_message if isinstance(original_user_message, str) else ""
_ext_prefetch_cache = self._memory_manager.prefetch_all(_query) or ""
except Exception:
pass
try:
_memory_provider_prompt_cache = self._memory_manager.build_system_prompt() or ""
except Exception:
pass
while (api_call_count < self.max_iterations and self.iteration_budget.remaining > 0) or self._budget_grace_call:
# Reset per-turn checkpoint dedup so each iteration can take one snapshot
@@ -8261,8 +8217,6 @@ class AIAgent:
# However, providers like Moonshot AI require a separate 'reasoning_content' field
# on assistant messages with tool_calls. We handle both cases here.
api_messages = []
_current_api_user_message = ""
_current_memory_context_block = ""
for idx, msg in enumerate(messages):
api_msg = msg.copy()
@@ -8277,15 +8231,12 @@ class AIAgent:
_fenced = build_memory_context_block(_ext_prefetch_cache)
if _fenced:
_injections.append(_fenced)
_current_memory_context_block = _fenced
if _plugin_user_context:
_injections.append(_plugin_user_context)
if _injections:
_base = api_msg.get("content", "")
if isinstance(_base, str):
api_msg["content"] = _base + "\n\n" + "\n\n".join(_injections)
if isinstance(api_msg.get("content"), str):
_current_api_user_message = api_msg["content"]
# For ALL assistant messages, pass reasoning back to the API
# This ensures multi-turn reasoning context is preserved
@@ -8320,13 +8271,7 @@ class AIAgent:
from agent.privacy_filter import PrivacyFilter
pf = PrivacyFilter()
# Sanitize messages before they reach the provider
_pf_result = pf.sanitize_messages(api_messages)
if isinstance(_pf_result, tuple):
api_messages, _pf_report = _pf_result
if getattr(pf, "last_report", None) is None:
pf.last_report = _pf_report
else:
api_messages = _pf_result
api_messages = pf.sanitize_messages(api_messages)
if pf.last_report and pf.last_report.had_redactions:
logger.info(f"Privacy Filter: Redacted sensitive data from turn payload. Details: {pf.last_report.summary()}")
except Exception as e:
@@ -8397,27 +8342,6 @@ class AIAgent:
new_tcs.append(tc)
am["tool_calls"] = new_tcs
if self._context_snapshot_recorder:
try:
self._context_snapshot_recorder.record_call(
api_call_count,
system_prompt=effective_system,
memory_provider_system_prompt=_memory_provider_prompt_cache,
memory_prefetch_raw=_ext_prefetch_cache,
memory_context_block=_current_memory_context_block,
api_user_message=_current_api_user_message,
api_messages=api_messages,
metadata={
"model": self.model,
"provider": self.provider,
"platform": self.platform or "",
"api_mode": self.api_mode,
"memory_providers": [p.name for p in getattr(self._memory_manager, "providers", [])],
},
)
except Exception as _snapshot_err:
logger.debug("Context snapshot capture failed: %s", _snapshot_err)
# Calculate approximate request size for logging
total_chars = sum(len(str(msg)) for msg in api_messages)
approx_tokens = estimate_messages_tokens_rough(api_messages)

View File

@@ -1,43 +0,0 @@
from pathlib import Path
from agent.context_snapshots import ContextSnapshotRecorder
def test_disabled_recorder_writes_nothing(tmp_path):
recorder = ContextSnapshotRecorder(session_id="session-1", enabled=False, base_dir=tmp_path)
out = recorder.record_call(
1,
system_prompt="system",
api_messages=[{"role": "user", "content": "hello"}],
)
assert out is None
assert not (tmp_path / "session-1").exists()
def test_enabled_recorder_writes_expected_artifacts(tmp_path):
recorder = ContextSnapshotRecorder(session_id="session-1", enabled=True, base_dir=tmp_path)
out = recorder.record_call(
1,
system_prompt="system prompt",
memory_provider_system_prompt="# Hindsight Memory\nActive.",
memory_prefetch_raw="- remembered fact",
memory_context_block="<memory-context>\nremembered\n</memory-context>",
api_user_message="What do I prefer?\n\n<memory-context>\nremembered\n</memory-context>",
api_messages=[
{"role": "system", "content": "system prompt"},
{"role": "user", "content": "What do I prefer?"},
],
metadata={"provider": "openai", "memory_providers": ["builtin", "hindsight"]},
)
assert out == tmp_path / "session-1" / "call_001"
assert (out / "system_prompt.txt").read_text(encoding="utf-8") == "system prompt"
assert (out / "memory_provider_system_prompt.txt").read_text(encoding="utf-8").startswith("# Hindsight Memory")
assert (out / "memory_prefetch_raw.txt").read_text(encoding="utf-8") == "- remembered fact"
assert "<memory-context>" in (out / "memory_context_block.txt").read_text(encoding="utf-8")
assert "What do I prefer?" in (out / "api_user_message.txt").read_text(encoding="utf-8")
assert (out / "api_messages.json").read_text(encoding="utf-8").startswith("[")
assert '"hindsight"' in (out / "metadata.json").read_text(encoding="utf-8")

View File

@@ -596,26 +596,3 @@ class TestAvailability:
monkeypatch.setenv("HINDSIGHT_MODE", "local")
p = HindsightMemoryProvider()
assert p.is_available()
def test_local_embedded_profile_defaults_to_agent_identity(self, tmp_path, monkeypatch):
config_path = tmp_path / "hindsight" / "config.json"
config_path.parent.mkdir(parents=True, exist_ok=True)
config_path.write_text(json.dumps({
"mode": "local_embedded",
"llm_provider": "ollama",
"llm_model": "gemma3:12b",
}))
monkeypatch.setattr(
"plugins.memory.hindsight.get_hermes_home",
lambda: tmp_path,
)
p = HindsightMemoryProvider()
p.initialize(
session_id="test-session",
hermes_home=str(tmp_path / "profiles" / "atlas-hindsight"),
platform="cli",
agent_identity="atlas-hindsight",
)
assert p._config["profile"] == "atlas-hindsight"

View File

@@ -1,94 +0,0 @@
from pathlib import Path
from types import SimpleNamespace
from unittest.mock import MagicMock, patch
import importlib
import sys
import types
def _make_tool_defs(*names: str) -> list:
return [
{
"type": "function",
"function": {
"name": n,
"description": f"{n} tool",
"parameters": {"type": "object", "properties": {}},
},
}
for n in names
]
def _mock_response(content="Done", finish_reason="stop"):
msg = SimpleNamespace(content=content, tool_calls=None)
choice = SimpleNamespace(message=msg, finish_reason=finish_reason)
return SimpleNamespace(choices=[choice], usage=SimpleNamespace(prompt_tokens=1, completion_tokens=1, total_tokens=2))
def _load_ai_agent():
sys.modules.setdefault("agent.auxiliary_client", types.SimpleNamespace(call_llm=lambda *a, **k: ""))
run_agent = importlib.import_module("run_agent")
return run_agent.AIAgent
def test_run_conversation_writes_context_snapshot_artifacts(tmp_path):
AIAgent = _load_ai_agent()
class _FakePrivacyFilter:
def __init__(self):
self.last_report = None
def sanitize_messages(self, messages):
return list(messages)
with (
patch("run_agent.get_tool_definitions", return_value=_make_tool_defs("web_search")),
patch("run_agent.check_toolset_requirements", return_value={}),
patch("run_agent.OpenAI"),
patch("hermes_cli.plugins.invoke_hook", return_value=[]),
patch.dict(sys.modules, {"agent.privacy_filter": types.SimpleNamespace(PrivacyFilter=_FakePrivacyFilter)}),
):
agent = AIAgent(
api_key="test-key-1234567890",
base_url="https://example.com/v1",
quiet_mode=True,
skip_context_files=True,
skip_memory=True,
context_snapshots_enabled=True,
context_snapshots_dir=str(tmp_path),
)
agent.client = MagicMock()
agent.client.chat.completions.create.return_value = _mock_response(content="Done")
agent._build_system_prompt = MagicMock(return_value="Core system prompt")
agent._memory_manager = MagicMock()
agent._memory_manager.prefetch_all.return_value = "- remembered preference"
agent._memory_manager.build_system_prompt.return_value = "# Hindsight Memory\nActive."
agent._memory_manager.providers = [
SimpleNamespace(name="builtin"),
SimpleNamespace(name="hindsight"),
]
result = agent.run_conversation("What do I prefer?")
assert result["final_response"] == "Done"
call_dir = tmp_path / agent.session_id / "call_001"
assert call_dir.exists()
assert (call_dir / "system_prompt.txt").read_text(encoding="utf-8") == "Core system prompt"
assert (call_dir / "memory_provider_system_prompt.txt").read_text(encoding="utf-8").startswith("# Hindsight Memory")
assert (call_dir / "memory_prefetch_raw.txt").read_text(encoding="utf-8") == "- remembered preference"
assert "<memory-context>" in (call_dir / "memory_context_block.txt").read_text(encoding="utf-8")
api_user_message = (call_dir / "api_user_message.txt").read_text(encoding="utf-8")
assert "What do I prefer?" in api_user_message
assert "remembered preference" in api_user_message
api_messages = (call_dir / "api_messages.json").read_text(encoding="utf-8")
assert '"role": "system"' in api_messages
assert '"role": "user"' in api_messages
metadata = (call_dir / "metadata.json").read_text(encoding="utf-8")
assert '"hindsight"' in metadata

View File

@@ -1,22 +0,0 @@
from pathlib import Path
ROOT = Path(__file__).resolve().parents[1]
DOC = ROOT / "docs" / "hindsight-local-eval.md"
def test_hindsight_local_eval_doc_exists_and_covers_switching():
assert DOC.exists(), "missing Hindsight local eval doc"
text = DOC.read_text(encoding="utf-8")
for snippet in (
"atlas-baseline",
"atlas-mempalace",
"atlas-hindsight",
"HERMES_HOME",
"HERMES_CONTEXT_SNAPSHOTS",
"memory_prefetch_raw.txt",
"api_user_message.txt",
"local_embedded",
"hindsight/config.json",
):
assert snippet in text

View File

@@ -0,0 +1,115 @@
"""Tests for Issue #796 tool-calling benchmark coverage and reporting."""
import sys
from pathlib import Path
from types import SimpleNamespace
from unittest.mock import patch
sys.path.insert(0, str(Path(__file__).parent.parent / "benchmarks"))
from tool_call_benchmark import ( # noqa: E402
CallResult,
DEFAULT_COMPARE_MODELS,
ISSUE_796_CATEGORY_COUNTS,
ToolCall,
generate_report,
run_single_test,
suite_category_counts,
)
def test_suite_counts_match_issue_796_distribution():
counts = suite_category_counts()
assert counts == ISSUE_796_CATEGORY_COUNTS
assert sum(counts.values()) == 100
def test_default_compare_models_cover_issue_796_lanes():
assert len(DEFAULT_COMPARE_MODELS) == 3
assert any("gemma-4-31b" in spec for spec in DEFAULT_COMPARE_MODELS)
assert any("gemma-4-26b" in spec for spec in DEFAULT_COMPARE_MODELS)
assert any("mimo-v2-pro" in spec for spec in DEFAULT_COMPARE_MODELS)
def test_generate_report_includes_parallel_and_cost_metrics(tmp_path):
output_path = tmp_path / "report.md"
results = [
CallResult(
test_id="file-01",
category="file",
model="gemma-4-31b",
prompt="Read the file.",
expected_tool="read_file",
success=True,
tool_called="read_file",
schema_ok=True,
tool_args_valid=True,
execution_ok=True,
tool_count=2,
parallel_ok=True,
latency_s=1.25,
total_tokens=123,
estimated_cost_usd=0.0012,
cost_status="estimated",
),
CallResult(
test_id="web-01",
category="web",
model="mimo-v2-pro",
prompt="Search the web.",
expected_tool="web_search",
success=False,
tool_called="web_search",
schema_ok=True,
tool_args_valid=False,
execution_ok=False,
tool_count=1,
parallel_ok=False,
latency_s=2.5,
error="bad args",
total_tokens=456,
estimated_cost_usd=None,
cost_status="unknown",
skipped=True,
skip_reason="web_search unavailable",
),
]
report = generate_report(results, ["gemma-4-31b", "mimo-v2-pro"], output_path)
assert output_path.exists()
assert "Parallel tool success" in report
assert "Avg token cost per call (USD)" in report
assert "Skipped / unavailable" in report
assert "Requested category mix" in report
def test_run_single_test_skips_when_expected_tool_unavailable():
class FakeAgent:
def __init__(self, *args, **kwargs):
self.valid_tool_names = {"read_file", "terminal"}
self.session_input_tokens = 0
self.session_output_tokens = 0
self.session_cache_read_tokens = 0
self.session_cache_write_tokens = 0
self.session_api_calls = 0
self.base_url = ""
self.api_key = None
def run_conversation(self, *args, **kwargs):
raise AssertionError("run_conversation should not be called for unavailable tools")
tc = ToolCall(
id="mcp-01",
category="mcp",
prompt="Use an MCP tool to list resources.",
expected_tool="",
expected_tool_prefix="mcp_",
)
with patch.dict(sys.modules, {"run_agent": SimpleNamespace(AIAgent=FakeAgent)}):
result = run_single_test(tc, "gemini:gemma-4-31b-it", "gemini")
assert result.skipped is True
assert "mcp_" in result.skip_reason
assert result.success is False