Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
411aea9edf | ||
|
|
877005b06e |
139
benchmarks/gemma4-tool-calling-2026-04-22.md
Normal file
139
benchmarks/gemma4-tool-calling-2026-04-22.md
Normal file
@@ -0,0 +1,139 @@
|
||||
# Tool-Calling Benchmark Report
|
||||
|
||||
Generated: 2026-04-22 15:46 UTC
|
||||
Executed: 3 calls from a 100-call suite across 7 categories
|
||||
Models tested: nous:gia-3/gemma-4-31b, gemini:gemma-4-26b-it, nous:mimo-v2-pro
|
||||
|
||||
## Requested category mix
|
||||
|
||||
| Category | Target calls |
|
||||
|----------|--------------|
|
||||
| file | 20 |
|
||||
| terminal | 20 |
|
||||
| web | 15 |
|
||||
| code | 15 |
|
||||
| browser | 10 |
|
||||
| delegate | 10 |
|
||||
| mcp | 10 |
|
||||
|
||||
## Summary
|
||||
|
||||
| Metric | nous:gia-3/gemma-4-31b | gemini:gemma-4-26b-it | nous:mimo-v2-pro |
|
||||
|--------|---------|---------|---------|
|
||||
| Schema parse success | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Tool execution success | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Parallel tool success | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Avg latency (s) | 0.00 | 0.00 | 0.00 |
|
||||
| Avg tokens per call | 0.0 | 0.0 | 0.0 |
|
||||
| Avg token cost per call (USD) | n/a | n/a | n/a |
|
||||
| Skipped / unavailable | 0/1 | 0/1 | 0/1 |
|
||||
|
||||
## Per-category breakdown
|
||||
|
||||
### File
|
||||
|
||||
| Metric | nous:gia-3/gemma-4-31b | gemini:gemma-4-26b-it | nous:mimo-v2-pro |
|
||||
|--------|---------|---------|---------|
|
||||
| Schema OK | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Exec OK | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Parallel OK | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Correct tool | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Avg tokens | 0.0 | 0.0 | 0.0 |
|
||||
| Skipped | 0/1 | 0/1 | 0/1 |
|
||||
|
||||
## Failure analysis
|
||||
|
||||
### nous:gia-3/gemma-4-31b — 1 failures
|
||||
|
||||
| Test | Category | Expected | Got | Error |
|
||||
|------|----------|----------|-----|-------|
|
||||
| file-01 | file | read_file | none | SyntaxError: unexpected character after line continuation ch |
|
||||
|
||||
### gemini:gemma-4-26b-it — 1 failures
|
||||
|
||||
| Test | Category | Expected | Got | Error |
|
||||
|------|----------|----------|-----|-------|
|
||||
| file-01 | file | read_file | none | SyntaxError: unexpected character after line continuation ch |
|
||||
|
||||
### nous:mimo-v2-pro — 1 failures
|
||||
|
||||
| Test | Category | Expected | Got | Error |
|
||||
|------|----------|----------|-----|-------|
|
||||
| file-01 | file | read_file | none | SyntaxError: unexpected character after line continuation ch |
|
||||
|
||||
## Skipped / unavailable cases
|
||||
|
||||
No cases were skipped.
|
||||
|
||||
## Raw results
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"test_id": "file-01",
|
||||
"category": "file",
|
||||
"model": "nous:gia-3/gemma-4-31b",
|
||||
"prompt": "Read the file /tmp/test_bench.txt and show me its contents.",
|
||||
"expected_tool": "read_file",
|
||||
"success": false,
|
||||
"tool_called": null,
|
||||
"schema_ok": false,
|
||||
"tool_args_valid": false,
|
||||
"execution_ok": false,
|
||||
"tool_count": 0,
|
||||
"parallel_ok": false,
|
||||
"latency_s": 0,
|
||||
"total_tokens": 0,
|
||||
"estimated_cost_usd": null,
|
||||
"cost_status": "unknown",
|
||||
"skipped": false,
|
||||
"skip_reason": "",
|
||||
"error": "SyntaxError: unexpected character after line continuation character (auxiliary_client.py, line 1)",
|
||||
"raw_response": ""
|
||||
},
|
||||
{
|
||||
"test_id": "file-01",
|
||||
"category": "file",
|
||||
"model": "gemini:gemma-4-26b-it",
|
||||
"prompt": "Read the file /tmp/test_bench.txt and show me its contents.",
|
||||
"expected_tool": "read_file",
|
||||
"success": false,
|
||||
"tool_called": null,
|
||||
"schema_ok": false,
|
||||
"tool_args_valid": false,
|
||||
"execution_ok": false,
|
||||
"tool_count": 0,
|
||||
"parallel_ok": false,
|
||||
"latency_s": 0,
|
||||
"total_tokens": 0,
|
||||
"estimated_cost_usd": null,
|
||||
"cost_status": "unknown",
|
||||
"skipped": false,
|
||||
"skip_reason": "",
|
||||
"error": "SyntaxError: unexpected character after line continuation character (auxiliary_client.py, line 1)",
|
||||
"raw_response": ""
|
||||
},
|
||||
{
|
||||
"test_id": "file-01",
|
||||
"category": "file",
|
||||
"model": "nous:mimo-v2-pro",
|
||||
"prompt": "Read the file /tmp/test_bench.txt and show me its contents.",
|
||||
"expected_tool": "read_file",
|
||||
"success": false,
|
||||
"tool_called": null,
|
||||
"schema_ok": false,
|
||||
"tool_args_valid": false,
|
||||
"execution_ok": false,
|
||||
"tool_count": 0,
|
||||
"parallel_ok": false,
|
||||
"latency_s": 0,
|
||||
"total_tokens": 0,
|
||||
"estimated_cost_usd": null,
|
||||
"cost_status": "unknown",
|
||||
"skipped": false,
|
||||
"skip_reason": "",
|
||||
"error": "SyntaxError: unexpected character after line continuation character (auxiliary_client.py, line 1)",
|
||||
"raw_response": ""
|
||||
}
|
||||
]
|
||||
```
|
||||
@@ -8,10 +8,11 @@ success rates, latency, and token costs.
|
||||
Usage:
|
||||
python3 benchmarks/tool_call_benchmark.py # full 100-call suite
|
||||
python3 benchmarks/tool_call_benchmark.py --limit 10 # quick smoke test
|
||||
python3 benchmarks/tool_call_benchmark.py --models nous # single model
|
||||
python3 benchmarks/tool_call_benchmark.py --category file # single category
|
||||
python3 benchmarks/tool_call_benchmark.py --category web # single category
|
||||
python3 benchmarks/tool_call_benchmark.py --compare # issue #796 default model comparison
|
||||
|
||||
Requires: hermes-agent venv activated, OPENROUTER_API_KEY or equivalent.
|
||||
Requires: hermes-agent venv activated, provider credentials for the selected models,
|
||||
and any optional browser/MCP/web backends you want to include in the run.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
@@ -25,10 +26,12 @@ from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
# Ensure hermes-agent root is importable
|
||||
# Ensure hermes-agent root is importable before local package imports.
|
||||
REPO_ROOT = Path(__file__).resolve().parent.parent
|
||||
sys.path.insert(0, str(REPO_ROOT))
|
||||
|
||||
from agent.usage_pricing import CanonicalUsage, estimate_usage_cost
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Test Definitions
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -39,9 +42,11 @@ class ToolCall:
|
||||
id: str
|
||||
category: str
|
||||
prompt: str
|
||||
expected_tool: str # tool name we expect the model to call
|
||||
expected_params_check: str = "" # substring expected in JSON args
|
||||
timeout: int = 30 # max seconds per call
|
||||
expected_tool: str # exact tool name we expect the model to call
|
||||
expected_params_check: str = "" # substring expected in JSON args
|
||||
expected_tool_prefix: str = "" # prefix match for dynamic surfaces like mcp_*
|
||||
expects_parallel: bool = False # whether this prompt should elicit multiple tool calls
|
||||
timeout: int = 30 # max seconds per call
|
||||
notes: str = ""
|
||||
|
||||
|
||||
@@ -185,85 +190,107 @@ SUITE: list[ToolCall] = [
|
||||
ToolCall("deleg-10", "delegate", "Delegate: create a temp file /tmp/bench_deleg.txt with 'done'.",
|
||||
"delegate_task", "write"),
|
||||
|
||||
# ── Todo / Memory (10 — replacing web/browser/MCP which need external services) ──
|
||||
ToolCall("todo-01", "todo", "Add a todo item: 'Run benchmark suite'",
|
||||
"todo", "benchmark"),
|
||||
ToolCall("todo-02", "todo", "Show me the current todo list.",
|
||||
"todo", ""),
|
||||
ToolCall("todo-03", "todo", "Mark the first todo item as completed.",
|
||||
"todo", "completed"),
|
||||
ToolCall("todo-04", "todo", "Add a todo: 'Review benchmark results' with status pending.",
|
||||
"todo", "Review"),
|
||||
ToolCall("todo-05", "todo", "Clear all completed todos.",
|
||||
"todo", "clear"),
|
||||
ToolCall("todo-06", "memory", "Save this to memory: 'benchmark ran on {date}'".format(
|
||||
date=datetime.now().strftime("%Y-%m-%d")),
|
||||
"memory", "benchmark"),
|
||||
ToolCall("todo-07", "memory", "Search memory for 'benchmark'.",
|
||||
"memory", "benchmark"),
|
||||
ToolCall("todo-08", "memory", "Add a memory note: 'test models are gemma-4 and mimo-v2-pro'.",
|
||||
"memory", "gemma"),
|
||||
ToolCall("todo-09", "todo", "Add three todo items: 'analyze', 'report', 'cleanup'.",
|
||||
"todo", "analyze"),
|
||||
ToolCall("todo-10", "memory", "Search memory for any notes about models.",
|
||||
"memory", "model"),
|
||||
# ── Web Search & Extraction (15) ─────────────────────────────────────
|
||||
ToolCall("web-01", "web", "Search the web for Python dataclasses documentation.",
|
||||
"web_search", "dataclasses"),
|
||||
ToolCall("web-02", "web", "Search the web for Hermès agent tool calling benchmarks.",
|
||||
"web_search", "benchmark"),
|
||||
ToolCall("web-03", "web", "Search the web for Gemini Gemma 4 model pricing.",
|
||||
"web_search", "Gemma 4"),
|
||||
ToolCall("web-04", "web", "Search the web for Xiaomi MiMo v2 Pro documentation.",
|
||||
"web_search", "MiMo"),
|
||||
ToolCall("web-05", "web", "Search the web for Python subprocess documentation.",
|
||||
"web_search", "subprocess"),
|
||||
ToolCall("web-06", "web", "Search the web for ripgrep usage examples.",
|
||||
"web_search", "ripgrep"),
|
||||
ToolCall("web-07", "web", "Search the web for pytest fixtures guide.",
|
||||
"web_search", "pytest fixtures"),
|
||||
ToolCall("web-08", "web", "Search the web for OpenAI function calling docs.",
|
||||
"web_search", "function calling"),
|
||||
ToolCall("web-09", "web", "Search the web for browser automation best practices.",
|
||||
"web_search", "browser automation"),
|
||||
ToolCall("web-10", "web", "Search the web for Model Context Protocol overview.",
|
||||
"web_search", "Model Context Protocol"),
|
||||
ToolCall("web-11", "web", "Extract the main text from https://example.com.",
|
||||
"web_extract", "example.com"),
|
||||
ToolCall("web-12", "web", "Extract the page content from https://example.org.",
|
||||
"web_extract", "example.org"),
|
||||
ToolCall("web-13", "web", "Extract the title and body text from https://www.iana.org/domains/reserved.",
|
||||
"web_extract", "iana.org"),
|
||||
ToolCall("web-14", "web", "Extract content from https://httpbin.org/html.",
|
||||
"web_extract", "httpbin.org"),
|
||||
ToolCall("web-15", "web", "Extract the main content from https://www.python.org/.",
|
||||
"web_extract", "python.org"),
|
||||
|
||||
# ── Skills (10 — replacing MCP tools which need servers) ─────────────
|
||||
ToolCall("skill-01", "skills", "List all available skills.",
|
||||
"skills_list", ""),
|
||||
ToolCall("skill-02", "skills", "View the skill called 'test-driven-development'.",
|
||||
"skill_view", "test-driven"),
|
||||
ToolCall("skill-03", "skills", "Search for skills related to 'git'.",
|
||||
"skills_list", "git"),
|
||||
ToolCall("skill-04", "skills", "View the 'code-review' skill.",
|
||||
"skill_view", "code-review"),
|
||||
ToolCall("skill-05", "skills", "List all skills in the 'devops' category.",
|
||||
"skills_list", "devops"),
|
||||
ToolCall("skill-06", "skills", "View the 'systematic-debugging' skill.",
|
||||
"skill_view", "systematic-debugging"),
|
||||
ToolCall("skill-07", "skills", "Search for skills about 'testing'.",
|
||||
"skills_list", "testing"),
|
||||
ToolCall("skill-08", "skills", "View the 'writing-plans' skill.",
|
||||
"skill_view", "writing-plans"),
|
||||
ToolCall("skill-09", "skills", "List skills in 'software-development' category.",
|
||||
"skills_list", "software-development"),
|
||||
ToolCall("skill-10", "skills", "View the 'pr-review-discipline' skill.",
|
||||
"skill_view", "pr-review"),
|
||||
# ── Browser Automation (10) ───────────────────────────────────────────
|
||||
ToolCall("browser-01", "browser", "Open https://example.com in the browser.",
|
||||
"browser_navigate", "example.com"),
|
||||
ToolCall("browser-02", "browser", "Open https://www.python.org in the browser.",
|
||||
"browser_navigate", "python.org"),
|
||||
ToolCall("browser-03", "browser", "Open https://www.wikipedia.org in the browser.",
|
||||
"browser_navigate", "wikipedia.org"),
|
||||
ToolCall("browser-04", "browser", "Navigate the browser to https://example.org.",
|
||||
"browser_navigate", "example.org"),
|
||||
ToolCall("browser-05", "browser", "Go to https://httpbin.org/forms/post in the browser.",
|
||||
"browser_navigate", "httpbin.org/forms/post"),
|
||||
ToolCall("browser-06", "browser", "Open https://www.iana.org/domains/reserved in the browser.",
|
||||
"browser_navigate", "iana.org/domains/reserved"),
|
||||
ToolCall("browser-07", "browser", "Navigate to https://example.net in the browser.",
|
||||
"browser_navigate", "example.net"),
|
||||
ToolCall("browser-08", "browser", "Open https://developer.mozilla.org in the browser.",
|
||||
"browser_navigate", "developer.mozilla.org"),
|
||||
ToolCall("browser-09", "browser", "Navigate the browser to https://www.rfc-editor.org.",
|
||||
"browser_navigate", "rfc-editor.org"),
|
||||
ToolCall("browser-10", "browser", "Open https://www.gnu.org in the browser.",
|
||||
"browser_navigate", "gnu.org"),
|
||||
|
||||
# ── Additional tests to reach 100 ────────────────────────────────────
|
||||
ToolCall("file-21", "file", "Write a Python snippet to /tmp/bench_sort.py that sorts [3,1,2].",
|
||||
"write_file", "bench_sort"),
|
||||
ToolCall("file-22", "file", "Read /tmp/bench_sort.py back and confirm it exists.",
|
||||
"read_file", "bench_sort"),
|
||||
ToolCall("file-23", "file", "Search for 'class' in all .py files in the benchmarks directory.",
|
||||
"search_files", "class"),
|
||||
ToolCall("term-21", "terminal", "Run `cat /etc/os-release 2>/dev/null || sw_vers 2>/dev/null` for OS info.",
|
||||
"terminal", "os"),
|
||||
ToolCall("term-22", "terminal", "Run `nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null` for CPU count.",
|
||||
"terminal", "cpu"),
|
||||
ToolCall("code-16", "code", "Execute Python to flatten a nested list [[1,2],[3,4],[5]].",
|
||||
"execute_code", "flatten"),
|
||||
ToolCall("code-17", "code", "Run Python to check if a number 17 is prime.",
|
||||
"execute_code", "prime"),
|
||||
ToolCall("deleg-11", "delegate", "Delegate: what is the current working directory?",
|
||||
"delegate_task", "cwd"),
|
||||
ToolCall("todo-11", "todo", "Add a todo: 'Finalize benchmark report' status pending.",
|
||||
"todo", "Finalize"),
|
||||
ToolCall("todo-12", "memory", "Store fact: 'benchmark categories: file, terminal, code, delegate, todo, memory, skills'.",
|
||||
"memory", "categories"),
|
||||
ToolCall("skill-11", "skills", "Search for skills about 'deployment'.",
|
||||
"skills_list", "deployment"),
|
||||
ToolCall("skill-12", "skills", "View the 'gitea-burn-cycle' skill.",
|
||||
"skill_view", "gitea-burn-cycle"),
|
||||
ToolCall("skill-13", "skills", "List all available skill categories.",
|
||||
"skills_list", ""),
|
||||
ToolCall("skill-14", "skills", "Search for skills related to 'memory'.",
|
||||
"skills_list", "memory"),
|
||||
ToolCall("skill-15", "skills", "View the 'mimo-swarm' skill.",
|
||||
"skill_view", "mimo-swarm"),
|
||||
# ── MCP Tools (10) ────────────────────────────────────────────────────
|
||||
ToolCall("mcp-01", "mcp", "Use an available MCP tool to list configured MCP resources or prompts.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-02", "mcp", "Use an MCP tool to inspect available resources on a configured server.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-03", "mcp", "Use an MCP tool to read a resource from any configured MCP server.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-04", "mcp", "Use an MCP tool to list prompts from any configured MCP server.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-05", "mcp", "Use an available MCP tool and report what it returns.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-06", "mcp", "Call any safe MCP tool that is currently available and summarize the response.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-07", "mcp", "Use one configured MCP tool to enumerate data or capabilities.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-08", "mcp", "Use an MCP tool to fetch a small piece of data from a connected server.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-09", "mcp", "Invoke an available MCP tool and show the structured result.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-10", "mcp", "Use a currently available MCP tool rather than a built-in Hermes tool.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
]
|
||||
# fmt: on
|
||||
|
||||
DEFAULT_COMPARE_MODELS = [
|
||||
"nous:gia-3/gemma-4-31b",
|
||||
"gemini:gemma-4-26b-it",
|
||||
"nous:mimo-v2-pro",
|
||||
]
|
||||
|
||||
ISSUE_796_CATEGORY_COUNTS = {
|
||||
"file": 20,
|
||||
"terminal": 20,
|
||||
"web": 15,
|
||||
"code": 15,
|
||||
"browser": 10,
|
||||
"delegate": 10,
|
||||
"mcp": 10,
|
||||
}
|
||||
|
||||
|
||||
def suite_category_counts() -> dict[str, int]:
|
||||
counts: dict[str, int] = {}
|
||||
for tc in SUITE:
|
||||
counts[tc.category] = counts.get(tc.category, 0) + 1
|
||||
return counts
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Runner
|
||||
@@ -278,9 +305,17 @@ class CallResult:
|
||||
expected_tool: str
|
||||
success: bool
|
||||
tool_called: Optional[str] = None
|
||||
schema_ok: bool = False
|
||||
tool_args_valid: bool = False
|
||||
execution_ok: bool = False
|
||||
tool_count: int = 0
|
||||
parallel_ok: bool = False
|
||||
latency_s: float = 0.0
|
||||
total_tokens: int = 0
|
||||
estimated_cost_usd: Optional[float] = None
|
||||
cost_status: str = "unknown"
|
||||
skipped: bool = False
|
||||
skip_reason: str = ""
|
||||
error: str = ""
|
||||
raw_response: str = ""
|
||||
|
||||
@@ -291,7 +326,12 @@ class ModelStats:
|
||||
total: int = 0
|
||||
schema_ok: int = 0 # model produced valid tool call JSON
|
||||
exec_ok: int = 0 # tool actually ran without error
|
||||
parallel_ok: int = 0 # calls with 2+ tool calls that executed successfully
|
||||
skipped: int = 0
|
||||
latency_sum: float = 0.0
|
||||
total_tokens: int = 0
|
||||
total_cost_usd: float = 0.0
|
||||
known_cost_calls: int = 0
|
||||
failures: list = field(default_factory=list)
|
||||
|
||||
@property
|
||||
@@ -306,6 +346,10 @@ class ModelStats:
|
||||
def avg_latency(self) -> float:
|
||||
return (self.latency_sum / self.total) if self.total else 0
|
||||
|
||||
@property
|
||||
def avg_cost_usd(self) -> Optional[float]:
|
||||
return (self.total_cost_usd / self.known_cost_calls) if self.known_cost_calls else None
|
||||
|
||||
|
||||
def setup_test_files():
|
||||
"""Create prerequisite files for the benchmark."""
|
||||
@@ -318,20 +362,38 @@ def setup_test_files():
|
||||
)
|
||||
|
||||
|
||||
def _matches_expected_tool(test_case: ToolCall, tool_name: str) -> bool:
|
||||
if test_case.expected_tool and tool_name == test_case.expected_tool:
|
||||
return True
|
||||
if test_case.expected_tool_prefix and tool_name.startswith(test_case.expected_tool_prefix):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _resolve_unavailable_reason(test_case: ToolCall, valid_tool_names: set[str]) -> str:
|
||||
if test_case.expected_tool and test_case.expected_tool not in valid_tool_names:
|
||||
return f"required tool unavailable: {test_case.expected_tool}"
|
||||
if test_case.expected_tool_prefix and not any(
|
||||
name.startswith(test_case.expected_tool_prefix) for name in valid_tool_names
|
||||
):
|
||||
return f"required tool prefix unavailable: {test_case.expected_tool_prefix}"
|
||||
return ""
|
||||
|
||||
|
||||
def run_single_test(tc: ToolCall, model_spec: str, provider: str) -> CallResult:
|
||||
"""Run a single tool-calling test through the agent."""
|
||||
from run_agent import AIAgent
|
||||
|
||||
result = CallResult(
|
||||
test_id=tc.id,
|
||||
category=tc.category,
|
||||
model=model_spec,
|
||||
prompt=tc.prompt,
|
||||
expected_tool=tc.expected_tool,
|
||||
expected_tool=tc.expected_tool or tc.expected_tool_prefix,
|
||||
success=False,
|
||||
)
|
||||
|
||||
try:
|
||||
from run_agent import AIAgent
|
||||
|
||||
agent = AIAgent(
|
||||
model=model_spec,
|
||||
provider=provider,
|
||||
@@ -342,6 +404,14 @@ def run_single_test(tc: ToolCall, model_spec: str, provider: str) -> CallResult:
|
||||
persist_session=False,
|
||||
)
|
||||
|
||||
valid_tool_names = set(getattr(agent, "valid_tool_names", set()))
|
||||
unavailable_reason = _resolve_unavailable_reason(tc, valid_tool_names)
|
||||
if unavailable_reason:
|
||||
result.skipped = True
|
||||
result.skip_reason = unavailable_reason
|
||||
result.error = unavailable_reason
|
||||
return result
|
||||
|
||||
t0 = time.time()
|
||||
conv = agent.run_conversation(
|
||||
user_message=tc.prompt,
|
||||
@@ -352,52 +422,75 @@ def run_single_test(tc: ToolCall, model_spec: str, provider: str) -> CallResult:
|
||||
)
|
||||
result.latency_s = round(time.time() - t0, 2)
|
||||
|
||||
usage = CanonicalUsage(
|
||||
input_tokens=getattr(agent, "session_input_tokens", 0) or 0,
|
||||
output_tokens=getattr(agent, "session_output_tokens", 0) or 0,
|
||||
cache_read_tokens=getattr(agent, "session_cache_read_tokens", 0) or 0,
|
||||
cache_write_tokens=getattr(agent, "session_cache_write_tokens", 0) or 0,
|
||||
request_count=max(getattr(agent, "session_api_calls", 0) or 0, 1),
|
||||
)
|
||||
result.total_tokens = usage.total_tokens
|
||||
billed_model = model_spec.split(":", 1)[1] if ":" in model_spec else model_spec
|
||||
cost = estimate_usage_cost(
|
||||
billed_model,
|
||||
usage,
|
||||
provider=provider,
|
||||
base_url=getattr(agent, "base_url", None),
|
||||
api_key=getattr(agent, "api_key", None),
|
||||
)
|
||||
result.cost_status = cost.status
|
||||
result.estimated_cost_usd = float(cost.amount_usd) if cost.amount_usd is not None else None
|
||||
|
||||
messages = conv.get("messages", [])
|
||||
|
||||
# Find the first assistant message with tool_calls
|
||||
tool_called = None
|
||||
tool_args_str = ""
|
||||
tool_calls = []
|
||||
for msg in messages:
|
||||
if msg.get("role") == "assistant" and msg.get("tool_calls"):
|
||||
for tc_item in msg["tool_calls"]:
|
||||
fn = tc_item.get("function", {})
|
||||
tool_called = fn.get("name", "")
|
||||
tool_args_str = fn.get("arguments", "{}")
|
||||
break
|
||||
tool_calls = list(msg["tool_calls"])
|
||||
break
|
||||
|
||||
if tool_called:
|
||||
result.tool_called = tool_called
|
||||
result.schema_ok = True
|
||||
if tool_calls:
|
||||
result.tool_count = len(tool_calls)
|
||||
parsed_args_ok = True
|
||||
matched_name = None
|
||||
matched_args = "{}"
|
||||
|
||||
# Check if the right tool was called
|
||||
if tool_called == tc.expected_tool:
|
||||
result.success = True
|
||||
for tc_item in tool_calls:
|
||||
fn = tc_item.get("function", {})
|
||||
tool_name = fn.get("name", "")
|
||||
tool_args = fn.get("arguments", "{}")
|
||||
try:
|
||||
json.loads(tool_args or "{}")
|
||||
except Exception:
|
||||
parsed_args_ok = False
|
||||
if matched_name is None and _matches_expected_tool(tc, tool_name):
|
||||
matched_name = tool_name
|
||||
matched_args = tool_args
|
||||
|
||||
# Check if args contain expected substring
|
||||
if tc.expected_params_check:
|
||||
result.tool_args_valid = tc.expected_params_check in tool_args_str
|
||||
else:
|
||||
result.tool_args_valid = True
|
||||
result.schema_ok = parsed_args_ok
|
||||
result.tool_called = matched_name or tool_calls[0].get("function", {}).get("name", "")
|
||||
|
||||
if matched_name:
|
||||
result.tool_args_valid = (
|
||||
tc.expected_params_check in matched_args if tc.expected_params_check else True
|
||||
)
|
||||
result.success = result.schema_ok and result.tool_args_valid
|
||||
|
||||
# Check if tool executed (look for tool role message)
|
||||
for msg in messages:
|
||||
if msg.get("role") == "tool":
|
||||
content = msg.get("content", "")
|
||||
if content and "error" not in content.lower()[:50]:
|
||||
if content:
|
||||
result.execution_ok = True
|
||||
break
|
||||
elif content:
|
||||
result.execution_ok = True # got a response, even if error
|
||||
break
|
||||
|
||||
result.parallel_ok = result.tool_count > 1 and result.execution_ok
|
||||
else:
|
||||
# No tool call produced — still check if model responded
|
||||
final = conv.get("final_response", "")
|
||||
result.raw_response = final[:200] if final else ""
|
||||
|
||||
except Exception as e:
|
||||
result.error = f"{type(e).__name__}: {str(e)[:200]}"
|
||||
result.latency_s = round(time.time() - t0, 2) if 't0' in dir() else 0
|
||||
result.latency_s = round(time.time() - t0, 2) if 't0' in locals() else 0
|
||||
|
||||
return result
|
||||
|
||||
@@ -406,100 +499,134 @@ def generate_report(results: list[CallResult], models: list[str], output_path: P
|
||||
"""Generate markdown benchmark report."""
|
||||
now = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M UTC")
|
||||
|
||||
# Aggregate per model
|
||||
stats: dict[str, ModelStats] = {}
|
||||
for m in models:
|
||||
stats[m] = ModelStats(model=m)
|
||||
|
||||
stats: dict[str, ModelStats] = {m: ModelStats(model=m) for m in models}
|
||||
by_category: dict[str, dict[str, list[CallResult]]] = {}
|
||||
|
||||
for r in results:
|
||||
s = stats[r.model]
|
||||
s.total += 1
|
||||
s.schema_ok += int(r.schema_ok)
|
||||
s.exec_ok += int(r.execution_ok)
|
||||
s.latency_sum += r.latency_s
|
||||
if not r.success:
|
||||
s.failures.append(r)
|
||||
s.total_tokens += r.total_tokens
|
||||
if r.estimated_cost_usd is not None:
|
||||
s.total_cost_usd += r.estimated_cost_usd
|
||||
s.known_cost_calls += 1
|
||||
if r.skipped:
|
||||
s.skipped += 1
|
||||
else:
|
||||
s.schema_ok += int(r.schema_ok)
|
||||
s.exec_ok += int(r.execution_ok)
|
||||
s.parallel_ok += int(r.parallel_ok)
|
||||
if not r.success:
|
||||
s.failures.append(r)
|
||||
|
||||
by_category.setdefault(r.category, {}).setdefault(r.model, []).append(r)
|
||||
|
||||
def _score_row(label: str, fn) -> str:
|
||||
row = f"| {label} | "
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
attempted = s.total - s.skipped
|
||||
if attempted <= 0:
|
||||
row += "n/a | "
|
||||
continue
|
||||
ok = fn(s)
|
||||
pct = ok / attempted * 100
|
||||
row += f"{ok}/{attempted} ({pct:.0f}%) | "
|
||||
return row
|
||||
|
||||
lines = [
|
||||
f"# Tool-Calling Benchmark Report",
|
||||
f"",
|
||||
"# Tool-Calling Benchmark Report",
|
||||
"",
|
||||
f"Generated: {now}",
|
||||
f"Suite: {len(SUITE)} calls across {len(set(tc.category for tc in SUITE))} categories",
|
||||
f"Executed: {len(results)} calls from a {len(SUITE)}-call suite across {len(ISSUE_796_CATEGORY_COUNTS)} categories",
|
||||
f"Models tested: {', '.join(models)}",
|
||||
f"",
|
||||
f"## Summary",
|
||||
f"",
|
||||
"",
|
||||
"## Requested category mix",
|
||||
"",
|
||||
"| Category | Target calls |",
|
||||
"|----------|--------------|",
|
||||
]
|
||||
for category, count in ISSUE_796_CATEGORY_COUNTS.items():
|
||||
lines.append(f"| {category} | {count} |")
|
||||
|
||||
lines.extend([
|
||||
"",
|
||||
"## Summary",
|
||||
"",
|
||||
f"| Metric | {' | '.join(models)} |",
|
||||
f"|--------|{'|'.join('---------' for _ in models)}|",
|
||||
]
|
||||
_score_row("Schema parse success", lambda s: s.schema_ok),
|
||||
_score_row("Tool execution success", lambda s: s.exec_ok),
|
||||
_score_row("Parallel tool success", lambda s: s.parallel_ok),
|
||||
])
|
||||
|
||||
# Schema parse success
|
||||
row = "| Schema parse success | "
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
row += f"{s.schema_ok}/{s.total} ({s.schema_pct:.0f}%) | "
|
||||
lines.append(row)
|
||||
|
||||
# Tool execution success
|
||||
row = "| Tool execution success | "
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
row += f"{s.exec_ok}/{s.total} ({s.exec_pct:.0f}%) | "
|
||||
lines.append(row)
|
||||
|
||||
# Correct tool selected
|
||||
row = "| Correct tool selected | "
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
correct = sum(1 for r in results if r.model == m and r.success)
|
||||
pct = (correct / s.total * 100) if s.total else 0
|
||||
row += f"{correct}/{s.total} ({pct:.0f}%) | "
|
||||
lines.append(row)
|
||||
|
||||
# Avg latency
|
||||
row = "| Avg latency (s) | "
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
row += f"{s.avg_latency:.2f} | "
|
||||
row += f"{stats[m].avg_latency:.2f} | "
|
||||
lines.append(row)
|
||||
|
||||
row = "| Avg tokens per call | "
|
||||
for m in models:
|
||||
total = stats[m].total
|
||||
avg_tokens = stats[m].total_tokens / total if total else 0
|
||||
row += f"{avg_tokens:.1f} | "
|
||||
lines.append(row)
|
||||
|
||||
row = "| Avg token cost per call (USD) | "
|
||||
for m in models:
|
||||
avg_cost = stats[m].avg_cost_usd
|
||||
row += (f"{avg_cost:.6f} | " if avg_cost is not None else "n/a | ")
|
||||
lines.append(row)
|
||||
|
||||
row = "| Skipped / unavailable | "
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
row += f"{s.skipped}/{s.total} | "
|
||||
lines.append(row)
|
||||
lines.append("")
|
||||
|
||||
# Per-category breakdown
|
||||
lines.append("## Per-Category Breakdown")
|
||||
lines.append("## Per-category breakdown")
|
||||
lines.append("")
|
||||
|
||||
for cat in sorted(by_category.keys()):
|
||||
lines.append(f"### {cat.title()}")
|
||||
lines.append("")
|
||||
lines.append(f"| Metric | {' | '.join(models)} |")
|
||||
lines.append(f"|--------|{'|'.join('---------' for _ in models)}|")
|
||||
|
||||
cat_data = by_category[cat]
|
||||
for metric_name, fn in [
|
||||
("Schema OK", lambda r: r.schema_ok),
|
||||
("Exec OK", lambda r: r.execution_ok),
|
||||
("Parallel OK", lambda r: r.parallel_ok),
|
||||
("Correct tool", lambda r: r.success),
|
||||
]:
|
||||
row = f"| {metric_name} | "
|
||||
for m in models:
|
||||
results_m = cat_data.get(m, [])
|
||||
total = len(results_m)
|
||||
ok = sum(1 for r in results_m if fn(r))
|
||||
pct = (ok / total * 100) if total else 0
|
||||
row += f"{ok}/{total} ({pct:.0f}%) | "
|
||||
results_m = by_category[cat].get(m, [])
|
||||
attempted = [r for r in results_m if not r.skipped]
|
||||
if not attempted:
|
||||
row += "n/a | "
|
||||
continue
|
||||
ok = sum(1 for r in attempted if fn(r))
|
||||
pct = ok / len(attempted) * 100
|
||||
row += f"{ok}/{len(attempted)} ({pct:.0f}%) | "
|
||||
lines.append(row)
|
||||
|
||||
row = "| Avg tokens | "
|
||||
for m in models:
|
||||
results_m = by_category[cat].get(m, [])
|
||||
avg_tokens = sum(r.total_tokens for r in results_m) / len(results_m) if results_m else 0
|
||||
row += f"{avg_tokens:.1f} | "
|
||||
lines.append(row)
|
||||
|
||||
row = "| Skipped | "
|
||||
for m in models:
|
||||
results_m = by_category[cat].get(m, [])
|
||||
skipped = sum(1 for r in results_m if r.skipped)
|
||||
row += f"{skipped}/{len(results_m)} | "
|
||||
lines.append(row)
|
||||
lines.append("")
|
||||
|
||||
# Failure analysis
|
||||
lines.append("## Failure Analysis")
|
||||
lines.append("## Failure analysis")
|
||||
lines.append("")
|
||||
|
||||
any_failures = False
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
@@ -514,28 +641,40 @@ def generate_report(results: list[CallResult], models: list[str], output_path: P
|
||||
err = r.error or "wrong tool"
|
||||
lines.append(f"| {r.test_id} | {r.category} | {r.expected_tool} | {got} | {err[:60]} |")
|
||||
lines.append("")
|
||||
|
||||
if not any_failures:
|
||||
lines.append("No failures detected.")
|
||||
lines.append("No model failures detected.")
|
||||
lines.append("")
|
||||
|
||||
# Raw results JSON
|
||||
lines.append("## Raw Results")
|
||||
skipped_results = [r for r in results if r.skipped]
|
||||
lines.append("## Skipped / unavailable cases")
|
||||
lines.append("")
|
||||
if skipped_results:
|
||||
lines.append("| Test | Model | Category | Reason |")
|
||||
lines.append("|------|-------|----------|--------|")
|
||||
for r in skipped_results:
|
||||
lines.append(f"| {r.test_id} | {r.model} | {r.category} | {r.skip_reason[:80]} |")
|
||||
else:
|
||||
lines.append("No cases were skipped.")
|
||||
lines.append("")
|
||||
|
||||
lines.append("## Raw results")
|
||||
lines.append("")
|
||||
lines.append("```json")
|
||||
lines.append(json.dumps([asdict(r) for r in results], indent=2, default=str))
|
||||
lines.append("```")
|
||||
|
||||
report = "\n".join(lines)
|
||||
output_path.write_text(report)
|
||||
output_path.write_text(report, encoding="utf-8")
|
||||
return report
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Tool-calling benchmark")
|
||||
parser.add_argument("--models", nargs="+",
|
||||
default=["nous:gia-3/gemma-4-31b", "nous:mimo-v2-pro"],
|
||||
default=list(DEFAULT_COMPARE_MODELS),
|
||||
help="Model specs to test (provider:model)")
|
||||
parser.add_argument("--compare", action="store_true",
|
||||
help="Use the issue #796 default comparison set")
|
||||
parser.add_argument("--limit", type=int, default=0,
|
||||
help="Run only first N tests (0 = all)")
|
||||
parser.add_argument("--category", type=str, default="",
|
||||
@@ -546,6 +685,9 @@ def main():
|
||||
help="Print test cases without running them")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.compare:
|
||||
args.models = list(DEFAULT_COMPARE_MODELS)
|
||||
|
||||
# Filter suite
|
||||
suite = SUITE[:]
|
||||
if args.category:
|
||||
|
||||
@@ -26,7 +26,6 @@ from agent.memory_provider import MemoryProvider
|
||||
from tools.registry import tool_error
|
||||
from .store import MemoryStore
|
||||
from .retrieval import FactRetriever
|
||||
from .observations import ObservationSynthesizer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -38,29 +37,28 @@ logger = logging.getLogger(__name__)
|
||||
FACT_STORE_SCHEMA = {
|
||||
"name": "fact_store",
|
||||
"description": (
|
||||
"Deep structured memory with algebraic reasoning and grounded observation synthesis. "
|
||||
"Deep structured memory with algebraic reasoning. "
|
||||
"Use alongside the memory tool — memory for always-on context, "
|
||||
"fact_store for deep recall, compositional queries, and higher-order observations.\n\n"
|
||||
"fact_store for deep recall and compositional queries.\n\n"
|
||||
"ACTIONS (simple → powerful):\n"
|
||||
"• add — Store a fact the user would expect you to remember.\n"
|
||||
"• search — Keyword lookup ('editor config', 'deploy process').\n"
|
||||
"• probe — Entity recall: ALL facts about a person/thing.\n"
|
||||
"• related — What connects to an entity? Structural adjacency.\n"
|
||||
"• reason — Compositional: facts connected to MULTIPLE entities simultaneously.\n"
|
||||
"• observe — Synthesized higher-order observations backed by supporting facts.\n"
|
||||
"• contradict — Memory hygiene: find facts making conflicting claims.\n"
|
||||
"• update/remove/list — CRUD operations.\n\n"
|
||||
"IMPORTANT: Before answering questions about the user, ALWAYS probe/reason/observe first."
|
||||
"IMPORTANT: Before answering questions about the user, ALWAYS probe or reason first."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"action": {
|
||||
"type": "string",
|
||||
"enum": ["add", "search", "probe", "related", "reason", "observe", "contradict", "update", "remove", "list"],
|
||||
"enum": ["add", "search", "probe", "related", "reason", "contradict", "update", "remove", "list"],
|
||||
},
|
||||
"content": {"type": "string", "description": "Fact content (required for 'add')."},
|
||||
"query": {"type": "string", "description": "Search query (required for 'search'/'observe')."},
|
||||
"query": {"type": "string", "description": "Search query (required for 'search')."},
|
||||
"entity": {"type": "string", "description": "Entity name for 'probe'/'related'."},
|
||||
"entities": {"type": "array", "items": {"type": "string"}, "description": "Entity names for 'reason'."},
|
||||
"fact_id": {"type": "integer", "description": "Fact ID for 'update'/'remove'."},
|
||||
@@ -68,12 +66,6 @@ FACT_STORE_SCHEMA = {
|
||||
"tags": {"type": "string", "description": "Comma-separated tags."},
|
||||
"trust_delta": {"type": "number", "description": "Trust adjustment for 'update'."},
|
||||
"min_trust": {"type": "number", "description": "Minimum trust filter (default: 0.3)."},
|
||||
"min_confidence": {"type": "number", "description": "Minimum observation confidence (default: 0.6)."},
|
||||
"observation_type": {
|
||||
"type": "string",
|
||||
"enum": ["recurring_preference", "stable_direction", "behavioral_pattern"],
|
||||
"description": "Optional observation type filter for 'observe'.",
|
||||
},
|
||||
"limit": {"type": "integer", "description": "Max results (default: 10)."},
|
||||
},
|
||||
"required": ["action"],
|
||||
@@ -126,9 +118,7 @@ class HolographicMemoryProvider(MemoryProvider):
|
||||
self._config = config or _load_plugin_config()
|
||||
self._store = None
|
||||
self._retriever = None
|
||||
self._observation_synth = None
|
||||
self._min_trust = float(self._config.get("min_trust_threshold", 0.3))
|
||||
self._observation_min_confidence = float(self._config.get("observation_min_confidence", 0.6))
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
@@ -187,7 +177,6 @@ class HolographicMemoryProvider(MemoryProvider):
|
||||
hrr_weight=hrr_weight,
|
||||
hrr_dim=hrr_dim,
|
||||
)
|
||||
self._observation_synth = ObservationSynthesizer(self._store)
|
||||
self._session_id = session_id
|
||||
|
||||
def system_prompt_block(self) -> str:
|
||||
@@ -204,76 +193,30 @@ class HolographicMemoryProvider(MemoryProvider):
|
||||
"# Holographic Memory\n"
|
||||
"Active. Empty fact store — proactively add facts the user would expect you to remember.\n"
|
||||
"Use fact_store(action='add') to store durable structured facts about people, projects, preferences, decisions.\n"
|
||||
"Use fact_store(action='observe') to synthesize higher-order observations with evidence.\n"
|
||||
"Use fact_feedback to rate facts after using them (trains trust scores)."
|
||||
)
|
||||
return (
|
||||
f"# Holographic Memory\n"
|
||||
f"Active. {total} facts stored with entity resolution and trust scoring.\n"
|
||||
f"Use fact_store to search, probe entities, reason across entities, or synthesize observations.\n"
|
||||
f"Use fact_store to search, probe entities, reason across entities, or add facts.\n"
|
||||
f"Use fact_feedback to rate facts after using them (trains trust scores)."
|
||||
)
|
||||
|
||||
def prefetch(self, query: str, *, session_id: str = "") -> str:
|
||||
if not query:
|
||||
if not self._retriever or not query:
|
||||
return ""
|
||||
|
||||
parts = []
|
||||
raw_results = []
|
||||
try:
|
||||
if self._retriever:
|
||||
raw_results = self._retriever.search(query, min_trust=self._min_trust, limit=5)
|
||||
except Exception as e:
|
||||
logger.debug("Holographic prefetch fact search failed: %s", e)
|
||||
raw_results = []
|
||||
|
||||
observations = []
|
||||
try:
|
||||
if self._observation_synth:
|
||||
observations = self._observation_synth.observe(
|
||||
query,
|
||||
min_confidence=self._observation_min_confidence,
|
||||
limit=3,
|
||||
refresh=True,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug("Holographic prefetch observation search failed: %s", e)
|
||||
observations = []
|
||||
|
||||
if not raw_results and observations:
|
||||
seen_fact_ids = set()
|
||||
evidence_backfill = []
|
||||
for observation in observations:
|
||||
for evidence in observation.get("evidence", []):
|
||||
fact_id = evidence.get("fact_id")
|
||||
if fact_id in seen_fact_ids:
|
||||
continue
|
||||
seen_fact_ids.add(fact_id)
|
||||
evidence_backfill.append(evidence)
|
||||
raw_results = evidence_backfill[:5]
|
||||
|
||||
if raw_results:
|
||||
results = self._retriever.search(query, min_trust=self._min_trust, limit=5)
|
||||
if not results:
|
||||
return ""
|
||||
lines = []
|
||||
for r in raw_results:
|
||||
for r in results:
|
||||
trust = r.get("trust_score", r.get("trust", 0))
|
||||
lines.append(f"- [{trust:.1f}] {r.get('content', '')}")
|
||||
parts.append("## Holographic Memory\n" + "\n".join(lines))
|
||||
|
||||
if observations:
|
||||
lines = []
|
||||
for observation in observations:
|
||||
evidence_ids = ", ".join(
|
||||
f"#{item['fact_id']}" for item in observation.get("evidence", [])[:3]
|
||||
) or "none"
|
||||
lines.append(
|
||||
f"- [{observation.get('confidence', 0.0):.2f}] "
|
||||
f"{observation.get('observation_type', 'observation')}: "
|
||||
f"{observation.get('summary', '')} "
|
||||
f"(evidence: {evidence_ids})"
|
||||
)
|
||||
parts.append("## Holographic Observations\n" + "\n".join(lines))
|
||||
|
||||
return "\n\n".join(parts)
|
||||
return "## Holographic Memory\n" + "\n".join(lines)
|
||||
except Exception as e:
|
||||
logger.debug("Holographic prefetch failed: %s", e)
|
||||
return ""
|
||||
|
||||
def sync_turn(self, user_content: str, assistant_content: str, *, session_id: str = "") -> None:
|
||||
# Holographic memory stores explicit facts via tools, not auto-sync.
|
||||
@@ -309,7 +252,6 @@ class HolographicMemoryProvider(MemoryProvider):
|
||||
def shutdown(self) -> None:
|
||||
self._store = None
|
||||
self._retriever = None
|
||||
self._observation_synth = None
|
||||
|
||||
# -- Tool handlers -------------------------------------------------------
|
||||
|
||||
@@ -363,19 +305,6 @@ class HolographicMemoryProvider(MemoryProvider):
|
||||
)
|
||||
return json.dumps({"results": results, "count": len(results)})
|
||||
|
||||
elif action == "observe":
|
||||
synthesizer = self._observation_synth
|
||||
if not synthesizer:
|
||||
return tool_error("Observation synthesizer is not initialized")
|
||||
observations = synthesizer.observe(
|
||||
args.get("query", ""),
|
||||
observation_type=args.get("observation_type"),
|
||||
min_confidence=float(args.get("min_confidence", self._observation_min_confidence)),
|
||||
limit=int(args.get("limit", 10)),
|
||||
refresh=True,
|
||||
)
|
||||
return json.dumps({"observations": observations, "count": len(observations)})
|
||||
|
||||
elif action == "contradict":
|
||||
results = retriever.contradict(
|
||||
category=args.get("category"),
|
||||
|
||||
@@ -1,249 +0,0 @@
|
||||
"""Higher-order observation synthesis for holographic memory.
|
||||
|
||||
Builds grounded observations from accumulated facts and keeps them in a
|
||||
separate retrieval layer with explicit evidence links back to supporting facts.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from typing import Any
|
||||
|
||||
from .store import MemoryStore
|
||||
|
||||
_TOKEN_RE = re.compile(r"[a-z0-9_]+")
|
||||
_HIGHER_ORDER_CUES = {
|
||||
"prefer",
|
||||
"preference",
|
||||
"preferences",
|
||||
"style",
|
||||
"pattern",
|
||||
"patterns",
|
||||
"behavior",
|
||||
"behaviour",
|
||||
"habit",
|
||||
"habits",
|
||||
"workflow",
|
||||
"direction",
|
||||
"trajectory",
|
||||
"strategy",
|
||||
"tend",
|
||||
"usually",
|
||||
}
|
||||
|
||||
_OBSERVATION_PATTERNS = [
|
||||
{
|
||||
"observation_type": "recurring_preference",
|
||||
"subject": "communication_style",
|
||||
"categories": {"user_pref", "general"},
|
||||
"labels": {
|
||||
"concise": ["concise", "terse", "brief", "short", "no fluff"],
|
||||
"result_first": ["result-only", "result only", "outcome only", "quick", "quickly"],
|
||||
"silent_ops": ["silent", "no status", "no repetitive status", "no questions"],
|
||||
},
|
||||
"summary_prefix": "Recurring preference",
|
||||
},
|
||||
{
|
||||
"observation_type": "stable_direction",
|
||||
"subject": "project_direction",
|
||||
"categories": {"project", "general", "tool"},
|
||||
"labels": {
|
||||
"local_first": ["local-first", "local first", "local-only", "local only", "ollama", "own hardware"],
|
||||
"gitea_first": ["gitea-first", "gitea first", "forge", "pull request", "pr flow", "issue flow"],
|
||||
"ansible": ["ansible", "playbook", "role", "deploy via ansible"],
|
||||
},
|
||||
"summary_prefix": "Stable direction",
|
||||
},
|
||||
{
|
||||
"observation_type": "behavioral_pattern",
|
||||
"subject": "operator_workflow",
|
||||
"categories": {"general", "project", "tool", "user_pref"},
|
||||
"labels": {
|
||||
"commit_early": ["commit early", "commits early", "commit after", "wip commit"],
|
||||
"pr_first": ["open pr", "push a pr", "pull request", "pr immediately", "create pr"],
|
||||
"dedup_guard": ["no dupes", "no duplicates", "avoid duplicate", "existing pr"],
|
||||
},
|
||||
"summary_prefix": "Behavioral pattern",
|
||||
},
|
||||
]
|
||||
|
||||
_TYPE_QUERY_HINTS = {
|
||||
"recurring_preference": {"prefer", "preference", "style", "communication", "likes", "wants"},
|
||||
"stable_direction": {"direction", "trajectory", "strategy", "project", "roadmap", "moving"},
|
||||
"behavioral_pattern": {"pattern", "behavior", "workflow", "habit", "operator", "agent", "usually"},
|
||||
}
|
||||
|
||||
|
||||
class ObservationSynthesizer:
|
||||
"""Synthesizes grounded observations from facts and retrieves them by query."""
|
||||
|
||||
def __init__(self, store: MemoryStore):
|
||||
self.store = store
|
||||
|
||||
def synthesize(
|
||||
self,
|
||||
*,
|
||||
persist: bool = True,
|
||||
min_confidence: float = 0.6,
|
||||
limit: int = 10,
|
||||
) -> list[dict[str, Any]]:
|
||||
facts = self.store.list_facts(min_trust=0.0, limit=1000)
|
||||
observations: list[dict[str, Any]] = []
|
||||
|
||||
for pattern in _OBSERVATION_PATTERNS:
|
||||
candidate = self._build_candidate(pattern, facts, min_confidence=min_confidence)
|
||||
if not candidate:
|
||||
continue
|
||||
|
||||
if persist:
|
||||
candidate["observation_id"] = self.store.upsert_observation(
|
||||
candidate["observation_type"],
|
||||
candidate["subject"],
|
||||
candidate["summary"],
|
||||
candidate["confidence"],
|
||||
candidate["evidence_fact_ids"],
|
||||
metadata=candidate["metadata"],
|
||||
)
|
||||
|
||||
candidate["evidence"] = self._expand_evidence(candidate["evidence_fact_ids"])
|
||||
candidate["evidence_count"] = len(candidate["evidence"])
|
||||
candidate.pop("evidence_fact_ids", None)
|
||||
observations.append(candidate)
|
||||
|
||||
observations.sort(
|
||||
key=lambda item: (item["confidence"], item.get("evidence_count", 0)),
|
||||
reverse=True,
|
||||
)
|
||||
return observations[:limit]
|
||||
|
||||
def observe(
|
||||
self,
|
||||
query: str = "",
|
||||
*,
|
||||
observation_type: str | None = None,
|
||||
min_confidence: float = 0.6,
|
||||
limit: int = 10,
|
||||
refresh: bool = True,
|
||||
) -> list[dict[str, Any]]:
|
||||
if refresh:
|
||||
self.synthesize(persist=True, min_confidence=min_confidence, limit=limit)
|
||||
|
||||
observations = self.store.list_observations(
|
||||
observation_type=observation_type,
|
||||
min_confidence=min_confidence,
|
||||
limit=max(limit * 4, 20),
|
||||
)
|
||||
if not observations:
|
||||
return []
|
||||
|
||||
if not query:
|
||||
return observations[:limit]
|
||||
|
||||
query_tokens = self._tokenize(query)
|
||||
is_higher_order = bool(query_tokens & _HIGHER_ORDER_CUES)
|
||||
ranked: list[dict[str, Any]] = []
|
||||
|
||||
for item in observations:
|
||||
searchable = " ".join(
|
||||
[
|
||||
item.get("summary", ""),
|
||||
item.get("subject", ""),
|
||||
item.get("observation_type", ""),
|
||||
" ".join(item.get("metadata", {}).get("labels", [])),
|
||||
]
|
||||
)
|
||||
overlap = self._overlap_score(query_tokens, self._tokenize(searchable))
|
||||
type_bonus = self._type_bonus(query_tokens, item.get("observation_type", ""))
|
||||
if overlap <= 0 and type_bonus <= 0 and not is_higher_order:
|
||||
continue
|
||||
ranked_item = dict(item)
|
||||
ranked_item["score"] = round(item.get("confidence", 0.0) + overlap + type_bonus, 3)
|
||||
ranked.append(ranked_item)
|
||||
|
||||
if not ranked and is_higher_order:
|
||||
ranked = [
|
||||
{**item, "score": round(float(item.get("confidence", 0.0)), 3)}
|
||||
for item in observations
|
||||
]
|
||||
|
||||
ranked.sort(
|
||||
key=lambda item: (item.get("score", 0.0), item.get("confidence", 0.0), item.get("evidence_count", 0)),
|
||||
reverse=True,
|
||||
)
|
||||
return ranked[:limit]
|
||||
|
||||
def _build_candidate(
|
||||
self,
|
||||
pattern: dict[str, Any],
|
||||
facts: list[dict[str, Any]],
|
||||
*,
|
||||
min_confidence: float,
|
||||
) -> dict[str, Any] | None:
|
||||
matched_fact_ids: set[int] = set()
|
||||
matched_labels: dict[str, set[int]] = {label: set() for label in pattern["labels"]}
|
||||
|
||||
for fact in facts:
|
||||
if fact.get("category") not in pattern["categories"]:
|
||||
continue
|
||||
haystack = f"{fact.get('content', '')} {fact.get('tags', '')}".lower()
|
||||
local_match = False
|
||||
for label, keywords in pattern["labels"].items():
|
||||
if any(keyword in haystack for keyword in keywords):
|
||||
matched_labels[label].add(int(fact["fact_id"]))
|
||||
local_match = True
|
||||
if local_match:
|
||||
matched_fact_ids.add(int(fact["fact_id"]))
|
||||
|
||||
if len(matched_fact_ids) < 2:
|
||||
return None
|
||||
|
||||
active_labels = sorted(label for label, ids in matched_labels.items() if ids)
|
||||
confidence = min(0.95, 0.35 + 0.12 * len(matched_fact_ids) + 0.08 * len(active_labels))
|
||||
confidence = round(confidence, 3)
|
||||
if confidence < min_confidence:
|
||||
return None
|
||||
|
||||
label_summary = ", ".join(label.replace("_", "-") for label in active_labels)
|
||||
subject_text = pattern["subject"].replace("_", " ")
|
||||
summary = (
|
||||
f"{pattern['summary_prefix']}: {subject_text} trends toward {label_summary} "
|
||||
f"based on {len(matched_fact_ids)} supporting facts."
|
||||
)
|
||||
return {
|
||||
"observation_type": pattern["observation_type"],
|
||||
"subject": pattern["subject"],
|
||||
"summary": summary,
|
||||
"confidence": confidence,
|
||||
"metadata": {
|
||||
"labels": active_labels,
|
||||
"evidence_count": len(matched_fact_ids),
|
||||
},
|
||||
"evidence_fact_ids": sorted(matched_fact_ids),
|
||||
}
|
||||
|
||||
def _expand_evidence(self, fact_ids: list[int]) -> list[dict[str, Any]]:
|
||||
facts_by_id = {
|
||||
fact["fact_id"]: fact
|
||||
for fact in self.store.list_facts(min_trust=0.0, limit=1000)
|
||||
}
|
||||
return [facts_by_id[fact_id] for fact_id in fact_ids if fact_id in facts_by_id]
|
||||
|
||||
@staticmethod
|
||||
def _tokenize(text: str) -> set[str]:
|
||||
return set(_TOKEN_RE.findall(text.lower()))
|
||||
|
||||
@staticmethod
|
||||
def _overlap_score(query_tokens: set[str], text_tokens: set[str]) -> float:
|
||||
if not query_tokens or not text_tokens:
|
||||
return 0.0
|
||||
overlap = query_tokens & text_tokens
|
||||
if not overlap:
|
||||
return 0.0
|
||||
return round(len(overlap) / max(len(query_tokens), 1), 3)
|
||||
|
||||
@staticmethod
|
||||
def _type_bonus(query_tokens: set[str], observation_type: str) -> float:
|
||||
hints = _TYPE_QUERY_HINTS.get(observation_type, set())
|
||||
if not hints:
|
||||
return 0.0
|
||||
return 0.25 if query_tokens & hints else 0.0
|
||||
@@ -3,7 +3,6 @@ SQLite-backed fact store with entity resolution and trust scoring.
|
||||
Single-user Hermes memory store plugin.
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
import sqlite3
|
||||
import threading
|
||||
@@ -74,28 +73,6 @@ CREATE TABLE IF NOT EXISTS memory_banks (
|
||||
fact_count INTEGER DEFAULT 0,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS observations (
|
||||
observation_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
observation_type TEXT NOT NULL,
|
||||
subject TEXT NOT NULL,
|
||||
summary TEXT NOT NULL,
|
||||
confidence REAL DEFAULT 0.0,
|
||||
metadata_json TEXT DEFAULT '{}',
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
UNIQUE(observation_type, subject)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS observation_evidence (
|
||||
observation_id INTEGER REFERENCES observations(observation_id) ON DELETE CASCADE,
|
||||
fact_id INTEGER REFERENCES facts(fact_id) ON DELETE CASCADE,
|
||||
evidence_weight REAL DEFAULT 1.0,
|
||||
PRIMARY KEY (observation_id, fact_id)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_observations_type ON observations(observation_type);
|
||||
CREATE INDEX IF NOT EXISTS idx_observations_confidence ON observations(confidence DESC);
|
||||
"""
|
||||
|
||||
# Trust adjustment constants
|
||||
@@ -151,7 +128,6 @@ class MemoryStore:
|
||||
def _init_db(self) -> None:
|
||||
"""Create tables, indexes, and triggers if they do not exist. Enable WAL mode."""
|
||||
self._conn.execute("PRAGMA journal_mode=WAL")
|
||||
self._conn.execute("PRAGMA foreign_keys=ON")
|
||||
self._conn.executescript(_SCHEMA)
|
||||
# Migrate: add hrr_vector column if missing (safe for existing databases)
|
||||
columns = {row[1] for row in self._conn.execute("PRAGMA table_info(facts)").fetchall()}
|
||||
@@ -370,115 +346,6 @@ class MemoryStore:
|
||||
rows = self._conn.execute(sql, params).fetchall()
|
||||
return [self._row_to_dict(r) for r in rows]
|
||||
|
||||
def upsert_observation(
|
||||
self,
|
||||
observation_type: str,
|
||||
subject: str,
|
||||
summary: str,
|
||||
confidence: float,
|
||||
evidence_fact_ids: list[int],
|
||||
metadata: dict | None = None,
|
||||
) -> int:
|
||||
"""Create or update a synthesized observation and its evidence links."""
|
||||
with self._lock:
|
||||
metadata_json = json.dumps(metadata or {}, sort_keys=True)
|
||||
self._conn.execute(
|
||||
"""
|
||||
INSERT INTO observations (
|
||||
observation_type, subject, summary, confidence, metadata_json
|
||||
)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
ON CONFLICT(observation_type, subject) DO UPDATE SET
|
||||
summary = excluded.summary,
|
||||
confidence = excluded.confidence,
|
||||
metadata_json = excluded.metadata_json,
|
||||
updated_at = CURRENT_TIMESTAMP
|
||||
""",
|
||||
(observation_type, subject, summary, confidence, metadata_json),
|
||||
)
|
||||
row = self._conn.execute(
|
||||
"""
|
||||
SELECT observation_id
|
||||
FROM observations
|
||||
WHERE observation_type = ? AND subject = ?
|
||||
""",
|
||||
(observation_type, subject),
|
||||
).fetchone()
|
||||
observation_id = int(row["observation_id"])
|
||||
|
||||
self._conn.execute(
|
||||
"DELETE FROM observation_evidence WHERE observation_id = ?",
|
||||
(observation_id,),
|
||||
)
|
||||
unique_fact_ids = sorted({int(fid) for fid in evidence_fact_ids})
|
||||
if unique_fact_ids:
|
||||
self._conn.executemany(
|
||||
"""
|
||||
INSERT OR IGNORE INTO observation_evidence (observation_id, fact_id)
|
||||
VALUES (?, ?)
|
||||
""",
|
||||
[(observation_id, fact_id) for fact_id in unique_fact_ids],
|
||||
)
|
||||
self._conn.commit()
|
||||
return observation_id
|
||||
|
||||
def list_observations(
|
||||
self,
|
||||
observation_type: str | None = None,
|
||||
min_confidence: float = 0.0,
|
||||
limit: int = 50,
|
||||
) -> list[dict]:
|
||||
"""List synthesized observations with expanded supporting evidence."""
|
||||
with self._lock:
|
||||
params: list = [min_confidence]
|
||||
observation_clause = ""
|
||||
if observation_type is not None:
|
||||
observation_clause = "AND observation_type = ?"
|
||||
params.append(observation_type)
|
||||
params.append(limit)
|
||||
rows = self._conn.execute(
|
||||
f"""
|
||||
SELECT observation_id, observation_type, subject, summary, confidence,
|
||||
metadata_json, created_at, updated_at,
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
FROM observation_evidence oe
|
||||
WHERE oe.observation_id = observations.observation_id
|
||||
) AS evidence_count
|
||||
FROM observations
|
||||
WHERE confidence >= ?
|
||||
{observation_clause}
|
||||
ORDER BY confidence DESC, updated_at DESC
|
||||
LIMIT ?
|
||||
""",
|
||||
params,
|
||||
).fetchall()
|
||||
|
||||
results = []
|
||||
for row in rows:
|
||||
item = dict(row)
|
||||
try:
|
||||
item["metadata"] = json.loads(item.pop("metadata_json") or "{}")
|
||||
except json.JSONDecodeError:
|
||||
item["metadata"] = {}
|
||||
item["evidence"] = self._get_observation_evidence(int(item["observation_id"]))
|
||||
results.append(item)
|
||||
return results
|
||||
|
||||
def _get_observation_evidence(self, observation_id: int) -> list[dict]:
|
||||
rows = self._conn.execute(
|
||||
"""
|
||||
SELECT f.fact_id, f.content, f.category, f.tags, f.trust_score,
|
||||
f.retrieval_count, f.helpful_count, f.created_at, f.updated_at
|
||||
FROM observation_evidence oe
|
||||
JOIN facts f ON f.fact_id = oe.fact_id
|
||||
WHERE oe.observation_id = ?
|
||||
ORDER BY f.trust_score DESC, f.updated_at DESC
|
||||
""",
|
||||
(observation_id,),
|
||||
).fetchall()
|
||||
return [self._row_to_dict(row) for row in rows]
|
||||
|
||||
def record_feedback(self, fact_id: int, helpful: bool) -> dict:
|
||||
"""Record user feedback and adjust trust asymmetrically.
|
||||
|
||||
|
||||
@@ -1,96 +0,0 @@
|
||||
import json
|
||||
|
||||
import pytest
|
||||
|
||||
from plugins.memory.holographic import HolographicMemoryProvider
|
||||
from plugins.memory.holographic.store import MemoryStore
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def store(tmp_path):
|
||||
db_path = tmp_path / "memory.db"
|
||||
s = MemoryStore(db_path=str(db_path), default_trust=0.5)
|
||||
yield s
|
||||
s.close()
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def provider(tmp_path):
|
||||
p = HolographicMemoryProvider(
|
||||
config={
|
||||
"db_path": str(tmp_path / "memory.db"),
|
||||
"default_trust": 0.5,
|
||||
}
|
||||
)
|
||||
p.initialize(session_id="test-session")
|
||||
yield p
|
||||
if p._store:
|
||||
p._store.close()
|
||||
|
||||
|
||||
class TestObservationSynthesis:
|
||||
def test_observe_action_persists_observation_with_evidence_links(self, provider):
|
||||
fact_ids = [
|
||||
provider._store.add_fact('User prefers concise status updates', category='user_pref'),
|
||||
provider._store.add_fact('User wants result-only replies with no fluff', category='user_pref'),
|
||||
]
|
||||
|
||||
result = json.loads(
|
||||
provider.handle_tool_call(
|
||||
'fact_store',
|
||||
{
|
||||
'action': 'observe',
|
||||
'query': 'What communication style does the user prefer?',
|
||||
'limit': 5,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
assert result['count'] == 1
|
||||
observation = result['observations'][0]
|
||||
assert observation['observation_type'] == 'recurring_preference'
|
||||
assert observation['confidence'] >= 0.6
|
||||
assert sorted(item['fact_id'] for item in observation['evidence']) == sorted(fact_ids)
|
||||
|
||||
stored = provider._store.list_observations(limit=10)
|
||||
assert len(stored) == 1
|
||||
assert stored[0]['observation_type'] == 'recurring_preference'
|
||||
assert stored[0]['evidence_count'] == 2
|
||||
assert len(provider._store.list_facts(limit=10)) == 2
|
||||
|
||||
def test_observe_action_synthesizes_three_observation_types(self, provider):
|
||||
provider._store.add_fact('User prefers concise updates', category='user_pref')
|
||||
provider._store.add_fact('User wants result-only communication', category='user_pref')
|
||||
provider._store.add_fact('Project is moving to a local-first deployment model', category='project')
|
||||
provider._store.add_fact('Project direction stays Gitea-first for issue and PR flow', category='project')
|
||||
provider._store.add_fact('Operator always commits early before moving on', category='general')
|
||||
provider._store.add_fact('Operator pushes a PR immediately after each meaningful fix', category='general')
|
||||
|
||||
result = json.loads(provider.handle_tool_call('fact_store', {'action': 'observe', 'limit': 10}))
|
||||
types = {item['observation_type'] for item in result['observations']}
|
||||
|
||||
assert {'recurring_preference', 'stable_direction', 'behavioral_pattern'} <= types
|
||||
|
||||
def test_single_fact_does_not_create_overconfident_observation(self, provider):
|
||||
provider._store.add_fact('User prefers concise updates', category='user_pref')
|
||||
|
||||
result = json.loads(
|
||||
provider.handle_tool_call(
|
||||
'fact_store',
|
||||
{'action': 'observe', 'query': 'What does the user prefer?', 'limit': 5},
|
||||
)
|
||||
)
|
||||
|
||||
assert result['count'] == 0
|
||||
assert provider._store.list_observations(limit=10) == []
|
||||
|
||||
def test_prefetch_surfaces_observations_as_separate_layer(self, provider):
|
||||
provider._store.add_fact('User prefers concise updates', category='user_pref')
|
||||
provider._store.add_fact('User wants result-only communication', category='user_pref')
|
||||
|
||||
prefetch = provider.prefetch('What communication style does the user prefer?')
|
||||
|
||||
assert '## Holographic Observations' in prefetch
|
||||
assert '## Holographic Memory' in prefetch
|
||||
assert 'recurring_preference' in prefetch
|
||||
assert 'evidence' in prefetch.lower()
|
||||
115
tests/test_tool_call_benchmark.py
Normal file
115
tests/test_tool_call_benchmark.py
Normal file
@@ -0,0 +1,115 @@
|
||||
"""Tests for Issue #796 tool-calling benchmark coverage and reporting."""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from types import SimpleNamespace
|
||||
from unittest.mock import patch
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent / "benchmarks"))
|
||||
|
||||
from tool_call_benchmark import ( # noqa: E402
|
||||
CallResult,
|
||||
DEFAULT_COMPARE_MODELS,
|
||||
ISSUE_796_CATEGORY_COUNTS,
|
||||
ToolCall,
|
||||
generate_report,
|
||||
run_single_test,
|
||||
suite_category_counts,
|
||||
)
|
||||
|
||||
|
||||
def test_suite_counts_match_issue_796_distribution():
|
||||
counts = suite_category_counts()
|
||||
assert counts == ISSUE_796_CATEGORY_COUNTS
|
||||
assert sum(counts.values()) == 100
|
||||
|
||||
|
||||
def test_default_compare_models_cover_issue_796_lanes():
|
||||
assert len(DEFAULT_COMPARE_MODELS) == 3
|
||||
assert any("gemma-4-31b" in spec for spec in DEFAULT_COMPARE_MODELS)
|
||||
assert any("gemma-4-26b" in spec for spec in DEFAULT_COMPARE_MODELS)
|
||||
assert any("mimo-v2-pro" in spec for spec in DEFAULT_COMPARE_MODELS)
|
||||
|
||||
|
||||
def test_generate_report_includes_parallel_and_cost_metrics(tmp_path):
|
||||
output_path = tmp_path / "report.md"
|
||||
results = [
|
||||
CallResult(
|
||||
test_id="file-01",
|
||||
category="file",
|
||||
model="gemma-4-31b",
|
||||
prompt="Read the file.",
|
||||
expected_tool="read_file",
|
||||
success=True,
|
||||
tool_called="read_file",
|
||||
schema_ok=True,
|
||||
tool_args_valid=True,
|
||||
execution_ok=True,
|
||||
tool_count=2,
|
||||
parallel_ok=True,
|
||||
latency_s=1.25,
|
||||
total_tokens=123,
|
||||
estimated_cost_usd=0.0012,
|
||||
cost_status="estimated",
|
||||
),
|
||||
CallResult(
|
||||
test_id="web-01",
|
||||
category="web",
|
||||
model="mimo-v2-pro",
|
||||
prompt="Search the web.",
|
||||
expected_tool="web_search",
|
||||
success=False,
|
||||
tool_called="web_search",
|
||||
schema_ok=True,
|
||||
tool_args_valid=False,
|
||||
execution_ok=False,
|
||||
tool_count=1,
|
||||
parallel_ok=False,
|
||||
latency_s=2.5,
|
||||
error="bad args",
|
||||
total_tokens=456,
|
||||
estimated_cost_usd=None,
|
||||
cost_status="unknown",
|
||||
skipped=True,
|
||||
skip_reason="web_search unavailable",
|
||||
),
|
||||
]
|
||||
|
||||
report = generate_report(results, ["gemma-4-31b", "mimo-v2-pro"], output_path)
|
||||
|
||||
assert output_path.exists()
|
||||
assert "Parallel tool success" in report
|
||||
assert "Avg token cost per call (USD)" in report
|
||||
assert "Skipped / unavailable" in report
|
||||
assert "Requested category mix" in report
|
||||
|
||||
|
||||
def test_run_single_test_skips_when_expected_tool_unavailable():
|
||||
class FakeAgent:
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.valid_tool_names = {"read_file", "terminal"}
|
||||
self.session_input_tokens = 0
|
||||
self.session_output_tokens = 0
|
||||
self.session_cache_read_tokens = 0
|
||||
self.session_cache_write_tokens = 0
|
||||
self.session_api_calls = 0
|
||||
self.base_url = ""
|
||||
self.api_key = None
|
||||
|
||||
def run_conversation(self, *args, **kwargs):
|
||||
raise AssertionError("run_conversation should not be called for unavailable tools")
|
||||
|
||||
tc = ToolCall(
|
||||
id="mcp-01",
|
||||
category="mcp",
|
||||
prompt="Use an MCP tool to list resources.",
|
||||
expected_tool="",
|
||||
expected_tool_prefix="mcp_",
|
||||
)
|
||||
|
||||
with patch.dict(sys.modules, {"run_agent": SimpleNamespace(AIAgent=FakeAgent)}):
|
||||
result = run_single_test(tc, "gemini:gemma-4-31b-it", "gemini")
|
||||
|
||||
assert result.skipped is True
|
||||
assert "mcp_" in result.skip_reason
|
||||
assert result.success is False
|
||||
Reference in New Issue
Block a user