Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
411aea9edf | ||
|
|
877005b06e |
@@ -1,70 +1,43 @@
|
||||
from __future__ import annotations
|
||||
|
||||
"""
|
||||
A2A agent card generation for fleet discovery.
|
||||
Agent Card — A2A-compliant agent discovery.
|
||||
Part of #843: fix: implement A2A agent card for fleet discovery (#819)
|
||||
|
||||
Refs #801.
|
||||
Closes #802.
|
||||
Provides metadata about the agent's identity, capabilities, and installed skills
|
||||
for discovery by other agents in the fleet.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
from dataclasses import asdict, dataclass, field
|
||||
from typing import Any, Dict, Iterable, List, Mapping, Sequence
|
||||
from urllib.parse import urlparse, urlunparse
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from hermes_cli import __version__
|
||||
from hermes_cli.config import load_config
|
||||
|
||||
from hermes_cli.config import load_config, get_hermes_home
|
||||
from agent.skill_utils import (
|
||||
get_all_skills_dirs,
|
||||
get_disabled_skill_names,
|
||||
iter_skill_index_files,
|
||||
parse_frontmatter,
|
||||
skill_matches_platform,
|
||||
get_all_skills_dirs,
|
||||
get_disabled_skill_names,
|
||||
skill_matches_platform
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DEFAULT_DESCRIPTION = "Sovereign AI agent — orchestration, code, research"
|
||||
DEFAULT_INPUT_MODES = ["text/plain", "application/json"]
|
||||
DEFAULT_OUTPUT_MODES = ["text/plain", "application/json"]
|
||||
_REQUIRED_CAPABILITY_FLAGS = (
|
||||
"streaming",
|
||||
"pushNotifications",
|
||||
"stateTransitionHistory",
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentSkill:
|
||||
id: str
|
||||
name: str
|
||||
description: str = ""
|
||||
tags: List[str] = field(default_factory=list)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
data: Dict[str, Any] = {"id": self.id, "name": self.name}
|
||||
if self.description:
|
||||
data["description"] = self.description
|
||||
if self.tags:
|
||||
data["tags"] = self.tags
|
||||
return data
|
||||
|
||||
version: str = "1.0.0"
|
||||
|
||||
@dataclass
|
||||
class AgentCapabilities:
|
||||
streaming: bool = True
|
||||
pushNotifications: bool = False
|
||||
stateTransitionHistory: bool = True
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return asdict(self)
|
||||
|
||||
tools: bool = True
|
||||
vision: bool = False
|
||||
reasoning: bool = False
|
||||
|
||||
@dataclass
|
||||
class AgentCard:
|
||||
@@ -74,81 +47,14 @@ class AgentCard:
|
||||
version: str = __version__
|
||||
capabilities: AgentCapabilities = field(default_factory=AgentCapabilities)
|
||||
skills: List[AgentSkill] = field(default_factory=list)
|
||||
defaultInputModes: List[str] = field(default_factory=lambda: list(DEFAULT_INPUT_MODES))
|
||||
defaultOutputModes: List[str] = field(default_factory=lambda: list(DEFAULT_OUTPUT_MODES))
|
||||
metadata: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
data: Dict[str, Any] = {
|
||||
"name": self.name,
|
||||
"description": self.description,
|
||||
"url": self.url,
|
||||
"version": self.version,
|
||||
"capabilities": self.capabilities.to_dict(),
|
||||
"skills": [skill.to_dict() for skill in self.skills],
|
||||
"defaultInputModes": list(self.defaultInputModes),
|
||||
"defaultOutputModes": list(self.defaultOutputModes),
|
||||
}
|
||||
if self.metadata:
|
||||
data["metadata"] = dict(self.metadata)
|
||||
return data
|
||||
|
||||
def to_json(self, indent: int = 2) -> str:
|
||||
return json.dumps(self.to_dict(), indent=indent)
|
||||
|
||||
|
||||
def _env_or_empty(key: str) -> str:
|
||||
return os.environ.get(key, "").strip()
|
||||
|
||||
|
||||
def _as_agent_config(config: Mapping[str, Any] | None) -> Dict[str, Any]:
|
||||
if not isinstance(config, Mapping):
|
||||
return {}
|
||||
agent_cfg = config.get("agent")
|
||||
return dict(agent_cfg) if isinstance(agent_cfg, Mapping) else {}
|
||||
|
||||
|
||||
def _as_a2a_config(config: Mapping[str, Any] | None) -> Dict[str, Any]:
|
||||
if not isinstance(config, Mapping):
|
||||
return {}
|
||||
a2a_cfg = config.get("a2a")
|
||||
return dict(a2a_cfg) if isinstance(a2a_cfg, Mapping) else {}
|
||||
|
||||
|
||||
def _normalize_string_list(value: Any) -> List[str]:
|
||||
if value is None:
|
||||
return []
|
||||
if isinstance(value, str):
|
||||
parts = value.split(",")
|
||||
elif isinstance(value, Sequence) and not isinstance(value, (bytes, bytearray, str)):
|
||||
parts = list(value)
|
||||
else:
|
||||
parts = [value]
|
||||
out: List[str] = []
|
||||
seen = set()
|
||||
for item in parts:
|
||||
text = str(item).strip()
|
||||
if not text or text in seen:
|
||||
continue
|
||||
seen.add(text)
|
||||
out.append(text)
|
||||
return out
|
||||
|
||||
|
||||
def _normalize_skill_tags(frontmatter: Mapping[str, Any]) -> List[str]:
|
||||
tags = _normalize_string_list(frontmatter.get("tags"))
|
||||
category = str(frontmatter.get("category") or "").strip()
|
||||
if category and category not in tags:
|
||||
tags.append(category)
|
||||
return tags
|
||||
|
||||
defaultInputModes: List[str] = field(default_factory=lambda: ["text/plain"])
|
||||
defaultOutputModes: List[str] = field(default_factory=lambda: ["text/plain"])
|
||||
|
||||
def _load_skills() -> List[AgentSkill]:
|
||||
"""Scan enabled skills and return A2A skill metadata."""
|
||||
skills: List[AgentSkill] = []
|
||||
"""Scan all enabled skills and return metadata."""
|
||||
skills = []
|
||||
disabled = get_disabled_skill_names()
|
||||
seen_ids = set()
|
||||
|
||||
|
||||
for skills_dir in get_all_skills_dirs():
|
||||
if not skills_dir.is_dir():
|
||||
continue
|
||||
@@ -159,262 +65,71 @@ def _load_skills() -> List[AgentSkill]:
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
skill_name = frontmatter.get("name") or skill_file.parent.name
|
||||
if str(skill_name) in disabled:
|
||||
continue
|
||||
if not skill_matches_platform(frontmatter):
|
||||
continue
|
||||
|
||||
skill_id = str(frontmatter.get("name") or skill_file.parent.name).strip().lower().replace(" ", "-")
|
||||
if skill_id in disabled or skill_id in seen_ids:
|
||||
continue
|
||||
seen_ids.add(skill_id)
|
||||
skills.append(AgentSkill(
|
||||
id=str(skill_name),
|
||||
name=str(frontmatter.get("name", skill_name)),
|
||||
description=str(frontmatter.get("description", "")),
|
||||
version=str(frontmatter.get("version", "1.0.0"))
|
||||
))
|
||||
return skills
|
||||
|
||||
display_name = str(frontmatter.get("title") or frontmatter.get("name") or skill_file.parent.name).strip()
|
||||
description = str(frontmatter.get("description") or "").strip()
|
||||
tags = _normalize_skill_tags(frontmatter)
|
||||
skills.append(
|
||||
AgentSkill(
|
||||
id=skill_id,
|
||||
name=display_name,
|
||||
description=description,
|
||||
tags=tags,
|
||||
)
|
||||
)
|
||||
def build_agent_card() -> AgentCard:
|
||||
"""Build the agent card from current configuration and environment."""
|
||||
config = load_config()
|
||||
|
||||
# Identity
|
||||
name = os.environ.get("HERMES_AGENT_NAME") or config.get("agent", {}).get("name") or "hermes"
|
||||
description = os.environ.get("HERMES_AGENT_DESCRIPTION") or config.get("agent", {}).get("description") or "Sovereign AI agent"
|
||||
|
||||
# URL - try to determine from environment or config
|
||||
port = os.environ.get("HERMES_WEB_PORT") or "9119"
|
||||
host = os.environ.get("HERMES_WEB_HOST") or "localhost"
|
||||
url = f"http://{host}:{port}"
|
||||
|
||||
# Capabilities
|
||||
# In a real scenario, we'd check model metadata for vision/reasoning
|
||||
capabilities = AgentCapabilities(
|
||||
streaming=True,
|
||||
tools=True,
|
||||
vision=False, # Default to false unless we can confirm
|
||||
reasoning=False
|
||||
)
|
||||
|
||||
# Skills
|
||||
skills = _load_skills()
|
||||
|
||||
return AgentCard(
|
||||
name=name,
|
||||
description=description,
|
||||
url=url,
|
||||
version=__version__,
|
||||
capabilities=capabilities,
|
||||
skills=skills
|
||||
)
|
||||
|
||||
return sorted(skills, key=lambda skill: skill.id)
|
||||
|
||||
|
||||
def _get_agent_name(config: Mapping[str, Any] | None, override: str | None = None) -> str:
|
||||
if override:
|
||||
return override
|
||||
env_name = _env_or_empty("HERMES_AGENT_NAME") or _env_or_empty("AGENT_NAME")
|
||||
if env_name:
|
||||
return env_name
|
||||
agent_cfg = _as_agent_config(config)
|
||||
if agent_cfg.get("name"):
|
||||
return str(agent_cfg["name"]).strip()
|
||||
def get_agent_card_json() -> str:
|
||||
"""Return the agent card as a JSON string."""
|
||||
try:
|
||||
hostname = socket.gethostname().split(".", 1)[0].strip()
|
||||
if hostname:
|
||||
return hostname
|
||||
except Exception:
|
||||
pass
|
||||
return "hermes"
|
||||
|
||||
|
||||
def _get_description(config: Mapping[str, Any] | None, override: str | None = None) -> str:
|
||||
if override:
|
||||
return override
|
||||
env_description = _env_or_empty("HERMES_AGENT_DESCRIPTION") or _env_or_empty("AGENT_DESCRIPTION")
|
||||
if env_description:
|
||||
return env_description
|
||||
agent_cfg = _as_agent_config(config)
|
||||
if agent_cfg.get("description"):
|
||||
return str(agent_cfg["description"]).strip()
|
||||
return DEFAULT_DESCRIPTION
|
||||
|
||||
|
||||
def _normalize_a2a_url(url: str) -> str:
|
||||
raw = (url or "").strip()
|
||||
if not raw:
|
||||
return ""
|
||||
parsed = urlparse(raw if "://" in raw else f"https://{raw}")
|
||||
scheme = parsed.scheme or "https"
|
||||
netloc = parsed.netloc or parsed.path
|
||||
path = parsed.path if parsed.netloc else ""
|
||||
normalized_path = path.rstrip("/") if path not in ("", "/") else ""
|
||||
if not normalized_path.endswith("/a2a"):
|
||||
normalized_path = f"{normalized_path}/a2a" if normalized_path else "/a2a"
|
||||
return urlunparse((scheme, netloc, normalized_path, "", "", ""))
|
||||
|
||||
|
||||
def _get_agent_url(config: Mapping[str, Any] | None, override: str | None = None) -> str:
|
||||
if override:
|
||||
return _normalize_a2a_url(override)
|
||||
|
||||
agent_cfg = _as_agent_config(config)
|
||||
a2a_cfg = _as_a2a_config(config)
|
||||
|
||||
explicit = (
|
||||
_env_or_empty("HERMES_A2A_PUBLIC_URL")
|
||||
or str(a2a_cfg.get("public_url") or "").strip()
|
||||
or str(agent_cfg.get("a2a_public_url") or "").strip()
|
||||
)
|
||||
if explicit:
|
||||
return _normalize_a2a_url(explicit)
|
||||
|
||||
host = (
|
||||
_env_or_empty("HERMES_A2A_HOST")
|
||||
or str(a2a_cfg.get("host") or "").strip()
|
||||
or _env_or_empty("HERMES_WEB_HOST")
|
||||
or str(agent_cfg.get("host") or "").strip()
|
||||
or "localhost"
|
||||
)
|
||||
port = (
|
||||
_env_or_empty("HERMES_A2A_PORT")
|
||||
or str(a2a_cfg.get("port") or "").strip()
|
||||
or _env_or_empty("HERMES_WEB_PORT")
|
||||
or str(agent_cfg.get("port") or "").strip()
|
||||
or "9119"
|
||||
)
|
||||
scheme = (
|
||||
_env_or_empty("HERMES_A2A_SCHEME")
|
||||
or str(a2a_cfg.get("scheme") or "").strip()
|
||||
or ("https" if (_env_or_empty("HERMES_MTLS_CERT") or str(port) == "9443") else "http")
|
||||
)
|
||||
return _normalize_a2a_url(f"{scheme}://{host}:{port}")
|
||||
|
||||
|
||||
def _merge_skills(base_skills: Iterable[AgentSkill], extra_skills: Iterable[AgentSkill] | None = None) -> List[AgentSkill]:
|
||||
merged: Dict[str, AgentSkill] = {}
|
||||
for skill in list(base_skills) + list(extra_skills or []):
|
||||
if skill.id not in merged:
|
||||
merged[skill.id] = skill
|
||||
return [merged[key] for key in sorted(merged)]
|
||||
|
||||
|
||||
def build_agent_card(
|
||||
*,
|
||||
name: str | None = None,
|
||||
description: str | None = None,
|
||||
url: str | None = None,
|
||||
extra_skills: Iterable[AgentSkill] | None = None,
|
||||
metadata: Mapping[str, Any] | None = None,
|
||||
) -> AgentCard:
|
||||
"""Build an A2A-compliant agent card from config, env, and installed skills."""
|
||||
try:
|
||||
config = load_config()
|
||||
except Exception as exc:
|
||||
logger.debug("Falling back to empty config while building agent card: %s", exc)
|
||||
config = {}
|
||||
|
||||
card = AgentCard(
|
||||
name=_get_agent_name(config, override=name),
|
||||
description=_get_description(config, override=description),
|
||||
url=_get_agent_url(config, override=url),
|
||||
skills=_merge_skills(_load_skills(), extra_skills),
|
||||
metadata=dict(metadata or {}),
|
||||
)
|
||||
return card
|
||||
|
||||
|
||||
def validate_agent_card(card: AgentCard | Dict[str, Any]) -> List[str]:
|
||||
"""Return a list of schema-validation errors for an agent card."""
|
||||
data = card.to_dict() if isinstance(card, AgentCard) else dict(card)
|
||||
errors: List[str] = []
|
||||
|
||||
for field_name in ("name", "description", "url", "version"):
|
||||
value = data.get(field_name)
|
||||
if not isinstance(value, str) or not value.strip():
|
||||
errors.append(f"{field_name} must be a non-empty string")
|
||||
|
||||
url_value = str(data.get("url") or "")
|
||||
parsed = urlparse(url_value)
|
||||
if not parsed.scheme or not parsed.netloc:
|
||||
errors.append("url must be an absolute http/https URL")
|
||||
elif parsed.scheme not in {"http", "https"}:
|
||||
errors.append("url must use http or https")
|
||||
elif not parsed.path.rstrip("/").endswith("/a2a"):
|
||||
errors.append("url must point to the /a2a endpoint")
|
||||
|
||||
capabilities = data.get("capabilities")
|
||||
if not isinstance(capabilities, Mapping):
|
||||
errors.append("capabilities must be an object")
|
||||
else:
|
||||
for capability_name in _REQUIRED_CAPABILITY_FLAGS:
|
||||
if not isinstance(capabilities.get(capability_name), bool):
|
||||
errors.append(f"capabilities.{capability_name} must be a boolean")
|
||||
|
||||
for field_name, required_modes in (
|
||||
("defaultInputModes", DEFAULT_INPUT_MODES),
|
||||
("defaultOutputModes", DEFAULT_OUTPUT_MODES),
|
||||
):
|
||||
modes = data.get(field_name)
|
||||
if not isinstance(modes, list) or not modes:
|
||||
errors.append(f"{field_name} must be a non-empty list of MIME types")
|
||||
continue
|
||||
for mode in modes:
|
||||
if not isinstance(mode, str) or "/" not in mode:
|
||||
errors.append(f"{field_name} entries must be MIME types")
|
||||
for required_mode in required_modes:
|
||||
if required_mode not in modes:
|
||||
errors.append(f"{field_name} must include {required_mode}")
|
||||
|
||||
skills = data.get("skills")
|
||||
if not isinstance(skills, list):
|
||||
errors.append("skills must be a list")
|
||||
else:
|
||||
for index, skill in enumerate(skills):
|
||||
if not isinstance(skill, Mapping):
|
||||
errors.append(f"skills[{index}] must be an object")
|
||||
continue
|
||||
if not str(skill.get("id") or "").strip():
|
||||
errors.append(f"skills[{index}] missing id")
|
||||
if not str(skill.get("name") or "").strip():
|
||||
errors.append(f"skills[{index}] missing name")
|
||||
tags = skill.get("tags", [])
|
||||
if tags is None:
|
||||
tags = []
|
||||
if not isinstance(tags, list):
|
||||
errors.append(f"skills[{index}].tags must be a list")
|
||||
else:
|
||||
for tag in tags:
|
||||
if not isinstance(tag, str) or not tag.strip():
|
||||
errors.append(f"skills[{index}].tags entries must be non-empty strings")
|
||||
|
||||
metadata = data.get("metadata")
|
||||
if metadata is not None and not isinstance(metadata, Mapping):
|
||||
errors.append("metadata must be an object when present")
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def get_agent_card_json(
|
||||
*,
|
||||
name: str | None = None,
|
||||
description: str | None = None,
|
||||
url: str | None = None,
|
||||
metadata: Mapping[str, Any] | None = None,
|
||||
indent: int = 2,
|
||||
) -> str:
|
||||
"""Return the local agent card as JSON, falling back to an error card on failure."""
|
||||
try:
|
||||
card = build_agent_card(name=name, description=description, url=url, metadata=metadata)
|
||||
errors = validate_agent_card(card)
|
||||
if errors:
|
||||
raise ValueError("; ".join(errors))
|
||||
return card.to_json(indent=indent)
|
||||
except Exception as exc:
|
||||
logger.error("Failed to build agent card: %s", exc)
|
||||
card = build_agent_card()
|
||||
return json.dumps(asdict(card), indent=2)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to build agent card: {e}")
|
||||
# Minimal fallback card
|
||||
fallback = {
|
||||
"name": name or _env_or_empty("HERMES_AGENT_NAME") or "hermes",
|
||||
"description": "Sovereign AI agent (agent card fallback)",
|
||||
"url": url or "http://localhost:9119/a2a",
|
||||
"name": "hermes",
|
||||
"description": "Sovereign AI agent (fallback)",
|
||||
"version": __version__,
|
||||
"capabilities": AgentCapabilities().to_dict(),
|
||||
"skills": [],
|
||||
"defaultInputModes": list(DEFAULT_INPUT_MODES),
|
||||
"defaultOutputModes": list(DEFAULT_OUTPUT_MODES),
|
||||
"error": str(exc),
|
||||
"error": str(e)
|
||||
}
|
||||
return json.dumps(fallback, indent=indent)
|
||||
return json.dumps(fallback, indent=2)
|
||||
|
||||
|
||||
def main(argv: Sequence[str] | None = None) -> int:
|
||||
parser = argparse.ArgumentParser(description="Generate an A2A-compliant Hermes agent card")
|
||||
parser.add_argument("--name", help="Override the agent name")
|
||||
parser.add_argument("--description", help="Override the agent description")
|
||||
parser.add_argument("--url", help="Override the public A2A URL")
|
||||
parser.add_argument("--validate", action="store_true", help="Validate before printing; exit 1 on schema errors")
|
||||
args = parser.parse_args(list(argv) if argv is not None else None)
|
||||
|
||||
card = build_agent_card(name=args.name, description=args.description, url=args.url)
|
||||
errors = validate_agent_card(card)
|
||||
if args.validate and errors:
|
||||
for error in errors:
|
||||
print(error, file=sys.stderr)
|
||||
return 1
|
||||
print(card.to_json(indent=2))
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
def validate_agent_card(card_data: Dict[str, Any]) -> bool:
|
||||
"""Check if the card data complies with the A2A schema."""
|
||||
required = ["name", "description", "url", "version"]
|
||||
return all(k in card_data for k in required)
|
||||
|
||||
139
benchmarks/gemma4-tool-calling-2026-04-22.md
Normal file
139
benchmarks/gemma4-tool-calling-2026-04-22.md
Normal file
@@ -0,0 +1,139 @@
|
||||
# Tool-Calling Benchmark Report
|
||||
|
||||
Generated: 2026-04-22 15:46 UTC
|
||||
Executed: 3 calls from a 100-call suite across 7 categories
|
||||
Models tested: nous:gia-3/gemma-4-31b, gemini:gemma-4-26b-it, nous:mimo-v2-pro
|
||||
|
||||
## Requested category mix
|
||||
|
||||
| Category | Target calls |
|
||||
|----------|--------------|
|
||||
| file | 20 |
|
||||
| terminal | 20 |
|
||||
| web | 15 |
|
||||
| code | 15 |
|
||||
| browser | 10 |
|
||||
| delegate | 10 |
|
||||
| mcp | 10 |
|
||||
|
||||
## Summary
|
||||
|
||||
| Metric | nous:gia-3/gemma-4-31b | gemini:gemma-4-26b-it | nous:mimo-v2-pro |
|
||||
|--------|---------|---------|---------|
|
||||
| Schema parse success | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Tool execution success | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Parallel tool success | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Avg latency (s) | 0.00 | 0.00 | 0.00 |
|
||||
| Avg tokens per call | 0.0 | 0.0 | 0.0 |
|
||||
| Avg token cost per call (USD) | n/a | n/a | n/a |
|
||||
| Skipped / unavailable | 0/1 | 0/1 | 0/1 |
|
||||
|
||||
## Per-category breakdown
|
||||
|
||||
### File
|
||||
|
||||
| Metric | nous:gia-3/gemma-4-31b | gemini:gemma-4-26b-it | nous:mimo-v2-pro |
|
||||
|--------|---------|---------|---------|
|
||||
| Schema OK | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Exec OK | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Parallel OK | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Correct tool | 0/1 (0%) | 0/1 (0%) | 0/1 (0%) |
|
||||
| Avg tokens | 0.0 | 0.0 | 0.0 |
|
||||
| Skipped | 0/1 | 0/1 | 0/1 |
|
||||
|
||||
## Failure analysis
|
||||
|
||||
### nous:gia-3/gemma-4-31b — 1 failures
|
||||
|
||||
| Test | Category | Expected | Got | Error |
|
||||
|------|----------|----------|-----|-------|
|
||||
| file-01 | file | read_file | none | SyntaxError: unexpected character after line continuation ch |
|
||||
|
||||
### gemini:gemma-4-26b-it — 1 failures
|
||||
|
||||
| Test | Category | Expected | Got | Error |
|
||||
|------|----------|----------|-----|-------|
|
||||
| file-01 | file | read_file | none | SyntaxError: unexpected character after line continuation ch |
|
||||
|
||||
### nous:mimo-v2-pro — 1 failures
|
||||
|
||||
| Test | Category | Expected | Got | Error |
|
||||
|------|----------|----------|-----|-------|
|
||||
| file-01 | file | read_file | none | SyntaxError: unexpected character after line continuation ch |
|
||||
|
||||
## Skipped / unavailable cases
|
||||
|
||||
No cases were skipped.
|
||||
|
||||
## Raw results
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"test_id": "file-01",
|
||||
"category": "file",
|
||||
"model": "nous:gia-3/gemma-4-31b",
|
||||
"prompt": "Read the file /tmp/test_bench.txt and show me its contents.",
|
||||
"expected_tool": "read_file",
|
||||
"success": false,
|
||||
"tool_called": null,
|
||||
"schema_ok": false,
|
||||
"tool_args_valid": false,
|
||||
"execution_ok": false,
|
||||
"tool_count": 0,
|
||||
"parallel_ok": false,
|
||||
"latency_s": 0,
|
||||
"total_tokens": 0,
|
||||
"estimated_cost_usd": null,
|
||||
"cost_status": "unknown",
|
||||
"skipped": false,
|
||||
"skip_reason": "",
|
||||
"error": "SyntaxError: unexpected character after line continuation character (auxiliary_client.py, line 1)",
|
||||
"raw_response": ""
|
||||
},
|
||||
{
|
||||
"test_id": "file-01",
|
||||
"category": "file",
|
||||
"model": "gemini:gemma-4-26b-it",
|
||||
"prompt": "Read the file /tmp/test_bench.txt and show me its contents.",
|
||||
"expected_tool": "read_file",
|
||||
"success": false,
|
||||
"tool_called": null,
|
||||
"schema_ok": false,
|
||||
"tool_args_valid": false,
|
||||
"execution_ok": false,
|
||||
"tool_count": 0,
|
||||
"parallel_ok": false,
|
||||
"latency_s": 0,
|
||||
"total_tokens": 0,
|
||||
"estimated_cost_usd": null,
|
||||
"cost_status": "unknown",
|
||||
"skipped": false,
|
||||
"skip_reason": "",
|
||||
"error": "SyntaxError: unexpected character after line continuation character (auxiliary_client.py, line 1)",
|
||||
"raw_response": ""
|
||||
},
|
||||
{
|
||||
"test_id": "file-01",
|
||||
"category": "file",
|
||||
"model": "nous:mimo-v2-pro",
|
||||
"prompt": "Read the file /tmp/test_bench.txt and show me its contents.",
|
||||
"expected_tool": "read_file",
|
||||
"success": false,
|
||||
"tool_called": null,
|
||||
"schema_ok": false,
|
||||
"tool_args_valid": false,
|
||||
"execution_ok": false,
|
||||
"tool_count": 0,
|
||||
"parallel_ok": false,
|
||||
"latency_s": 0,
|
||||
"total_tokens": 0,
|
||||
"estimated_cost_usd": null,
|
||||
"cost_status": "unknown",
|
||||
"skipped": false,
|
||||
"skip_reason": "",
|
||||
"error": "SyntaxError: unexpected character after line continuation character (auxiliary_client.py, line 1)",
|
||||
"raw_response": ""
|
||||
}
|
||||
]
|
||||
```
|
||||
@@ -8,10 +8,11 @@ success rates, latency, and token costs.
|
||||
Usage:
|
||||
python3 benchmarks/tool_call_benchmark.py # full 100-call suite
|
||||
python3 benchmarks/tool_call_benchmark.py --limit 10 # quick smoke test
|
||||
python3 benchmarks/tool_call_benchmark.py --models nous # single model
|
||||
python3 benchmarks/tool_call_benchmark.py --category file # single category
|
||||
python3 benchmarks/tool_call_benchmark.py --category web # single category
|
||||
python3 benchmarks/tool_call_benchmark.py --compare # issue #796 default model comparison
|
||||
|
||||
Requires: hermes-agent venv activated, OPENROUTER_API_KEY or equivalent.
|
||||
Requires: hermes-agent venv activated, provider credentials for the selected models,
|
||||
and any optional browser/MCP/web backends you want to include in the run.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
@@ -25,10 +26,12 @@ from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
# Ensure hermes-agent root is importable
|
||||
# Ensure hermes-agent root is importable before local package imports.
|
||||
REPO_ROOT = Path(__file__).resolve().parent.parent
|
||||
sys.path.insert(0, str(REPO_ROOT))
|
||||
|
||||
from agent.usage_pricing import CanonicalUsage, estimate_usage_cost
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Test Definitions
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -39,9 +42,11 @@ class ToolCall:
|
||||
id: str
|
||||
category: str
|
||||
prompt: str
|
||||
expected_tool: str # tool name we expect the model to call
|
||||
expected_params_check: str = "" # substring expected in JSON args
|
||||
timeout: int = 30 # max seconds per call
|
||||
expected_tool: str # exact tool name we expect the model to call
|
||||
expected_params_check: str = "" # substring expected in JSON args
|
||||
expected_tool_prefix: str = "" # prefix match for dynamic surfaces like mcp_*
|
||||
expects_parallel: bool = False # whether this prompt should elicit multiple tool calls
|
||||
timeout: int = 30 # max seconds per call
|
||||
notes: str = ""
|
||||
|
||||
|
||||
@@ -185,85 +190,107 @@ SUITE: list[ToolCall] = [
|
||||
ToolCall("deleg-10", "delegate", "Delegate: create a temp file /tmp/bench_deleg.txt with 'done'.",
|
||||
"delegate_task", "write"),
|
||||
|
||||
# ── Todo / Memory (10 — replacing web/browser/MCP which need external services) ──
|
||||
ToolCall("todo-01", "todo", "Add a todo item: 'Run benchmark suite'",
|
||||
"todo", "benchmark"),
|
||||
ToolCall("todo-02", "todo", "Show me the current todo list.",
|
||||
"todo", ""),
|
||||
ToolCall("todo-03", "todo", "Mark the first todo item as completed.",
|
||||
"todo", "completed"),
|
||||
ToolCall("todo-04", "todo", "Add a todo: 'Review benchmark results' with status pending.",
|
||||
"todo", "Review"),
|
||||
ToolCall("todo-05", "todo", "Clear all completed todos.",
|
||||
"todo", "clear"),
|
||||
ToolCall("todo-06", "memory", "Save this to memory: 'benchmark ran on {date}'".format(
|
||||
date=datetime.now().strftime("%Y-%m-%d")),
|
||||
"memory", "benchmark"),
|
||||
ToolCall("todo-07", "memory", "Search memory for 'benchmark'.",
|
||||
"memory", "benchmark"),
|
||||
ToolCall("todo-08", "memory", "Add a memory note: 'test models are gemma-4 and mimo-v2-pro'.",
|
||||
"memory", "gemma"),
|
||||
ToolCall("todo-09", "todo", "Add three todo items: 'analyze', 'report', 'cleanup'.",
|
||||
"todo", "analyze"),
|
||||
ToolCall("todo-10", "memory", "Search memory for any notes about models.",
|
||||
"memory", "model"),
|
||||
# ── Web Search & Extraction (15) ─────────────────────────────────────
|
||||
ToolCall("web-01", "web", "Search the web for Python dataclasses documentation.",
|
||||
"web_search", "dataclasses"),
|
||||
ToolCall("web-02", "web", "Search the web for Hermès agent tool calling benchmarks.",
|
||||
"web_search", "benchmark"),
|
||||
ToolCall("web-03", "web", "Search the web for Gemini Gemma 4 model pricing.",
|
||||
"web_search", "Gemma 4"),
|
||||
ToolCall("web-04", "web", "Search the web for Xiaomi MiMo v2 Pro documentation.",
|
||||
"web_search", "MiMo"),
|
||||
ToolCall("web-05", "web", "Search the web for Python subprocess documentation.",
|
||||
"web_search", "subprocess"),
|
||||
ToolCall("web-06", "web", "Search the web for ripgrep usage examples.",
|
||||
"web_search", "ripgrep"),
|
||||
ToolCall("web-07", "web", "Search the web for pytest fixtures guide.",
|
||||
"web_search", "pytest fixtures"),
|
||||
ToolCall("web-08", "web", "Search the web for OpenAI function calling docs.",
|
||||
"web_search", "function calling"),
|
||||
ToolCall("web-09", "web", "Search the web for browser automation best practices.",
|
||||
"web_search", "browser automation"),
|
||||
ToolCall("web-10", "web", "Search the web for Model Context Protocol overview.",
|
||||
"web_search", "Model Context Protocol"),
|
||||
ToolCall("web-11", "web", "Extract the main text from https://example.com.",
|
||||
"web_extract", "example.com"),
|
||||
ToolCall("web-12", "web", "Extract the page content from https://example.org.",
|
||||
"web_extract", "example.org"),
|
||||
ToolCall("web-13", "web", "Extract the title and body text from https://www.iana.org/domains/reserved.",
|
||||
"web_extract", "iana.org"),
|
||||
ToolCall("web-14", "web", "Extract content from https://httpbin.org/html.",
|
||||
"web_extract", "httpbin.org"),
|
||||
ToolCall("web-15", "web", "Extract the main content from https://www.python.org/.",
|
||||
"web_extract", "python.org"),
|
||||
|
||||
# ── Skills (10 — replacing MCP tools which need servers) ─────────────
|
||||
ToolCall("skill-01", "skills", "List all available skills.",
|
||||
"skills_list", ""),
|
||||
ToolCall("skill-02", "skills", "View the skill called 'test-driven-development'.",
|
||||
"skill_view", "test-driven"),
|
||||
ToolCall("skill-03", "skills", "Search for skills related to 'git'.",
|
||||
"skills_list", "git"),
|
||||
ToolCall("skill-04", "skills", "View the 'code-review' skill.",
|
||||
"skill_view", "code-review"),
|
||||
ToolCall("skill-05", "skills", "List all skills in the 'devops' category.",
|
||||
"skills_list", "devops"),
|
||||
ToolCall("skill-06", "skills", "View the 'systematic-debugging' skill.",
|
||||
"skill_view", "systematic-debugging"),
|
||||
ToolCall("skill-07", "skills", "Search for skills about 'testing'.",
|
||||
"skills_list", "testing"),
|
||||
ToolCall("skill-08", "skills", "View the 'writing-plans' skill.",
|
||||
"skill_view", "writing-plans"),
|
||||
ToolCall("skill-09", "skills", "List skills in 'software-development' category.",
|
||||
"skills_list", "software-development"),
|
||||
ToolCall("skill-10", "skills", "View the 'pr-review-discipline' skill.",
|
||||
"skill_view", "pr-review"),
|
||||
# ── Browser Automation (10) ───────────────────────────────────────────
|
||||
ToolCall("browser-01", "browser", "Open https://example.com in the browser.",
|
||||
"browser_navigate", "example.com"),
|
||||
ToolCall("browser-02", "browser", "Open https://www.python.org in the browser.",
|
||||
"browser_navigate", "python.org"),
|
||||
ToolCall("browser-03", "browser", "Open https://www.wikipedia.org in the browser.",
|
||||
"browser_navigate", "wikipedia.org"),
|
||||
ToolCall("browser-04", "browser", "Navigate the browser to https://example.org.",
|
||||
"browser_navigate", "example.org"),
|
||||
ToolCall("browser-05", "browser", "Go to https://httpbin.org/forms/post in the browser.",
|
||||
"browser_navigate", "httpbin.org/forms/post"),
|
||||
ToolCall("browser-06", "browser", "Open https://www.iana.org/domains/reserved in the browser.",
|
||||
"browser_navigate", "iana.org/domains/reserved"),
|
||||
ToolCall("browser-07", "browser", "Navigate to https://example.net in the browser.",
|
||||
"browser_navigate", "example.net"),
|
||||
ToolCall("browser-08", "browser", "Open https://developer.mozilla.org in the browser.",
|
||||
"browser_navigate", "developer.mozilla.org"),
|
||||
ToolCall("browser-09", "browser", "Navigate the browser to https://www.rfc-editor.org.",
|
||||
"browser_navigate", "rfc-editor.org"),
|
||||
ToolCall("browser-10", "browser", "Open https://www.gnu.org in the browser.",
|
||||
"browser_navigate", "gnu.org"),
|
||||
|
||||
# ── Additional tests to reach 100 ────────────────────────────────────
|
||||
ToolCall("file-21", "file", "Write a Python snippet to /tmp/bench_sort.py that sorts [3,1,2].",
|
||||
"write_file", "bench_sort"),
|
||||
ToolCall("file-22", "file", "Read /tmp/bench_sort.py back and confirm it exists.",
|
||||
"read_file", "bench_sort"),
|
||||
ToolCall("file-23", "file", "Search for 'class' in all .py files in the benchmarks directory.",
|
||||
"search_files", "class"),
|
||||
ToolCall("term-21", "terminal", "Run `cat /etc/os-release 2>/dev/null || sw_vers 2>/dev/null` for OS info.",
|
||||
"terminal", "os"),
|
||||
ToolCall("term-22", "terminal", "Run `nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null` for CPU count.",
|
||||
"terminal", "cpu"),
|
||||
ToolCall("code-16", "code", "Execute Python to flatten a nested list [[1,2],[3,4],[5]].",
|
||||
"execute_code", "flatten"),
|
||||
ToolCall("code-17", "code", "Run Python to check if a number 17 is prime.",
|
||||
"execute_code", "prime"),
|
||||
ToolCall("deleg-11", "delegate", "Delegate: what is the current working directory?",
|
||||
"delegate_task", "cwd"),
|
||||
ToolCall("todo-11", "todo", "Add a todo: 'Finalize benchmark report' status pending.",
|
||||
"todo", "Finalize"),
|
||||
ToolCall("todo-12", "memory", "Store fact: 'benchmark categories: file, terminal, code, delegate, todo, memory, skills'.",
|
||||
"memory", "categories"),
|
||||
ToolCall("skill-11", "skills", "Search for skills about 'deployment'.",
|
||||
"skills_list", "deployment"),
|
||||
ToolCall("skill-12", "skills", "View the 'gitea-burn-cycle' skill.",
|
||||
"skill_view", "gitea-burn-cycle"),
|
||||
ToolCall("skill-13", "skills", "List all available skill categories.",
|
||||
"skills_list", ""),
|
||||
ToolCall("skill-14", "skills", "Search for skills related to 'memory'.",
|
||||
"skills_list", "memory"),
|
||||
ToolCall("skill-15", "skills", "View the 'mimo-swarm' skill.",
|
||||
"skill_view", "mimo-swarm"),
|
||||
# ── MCP Tools (10) ────────────────────────────────────────────────────
|
||||
ToolCall("mcp-01", "mcp", "Use an available MCP tool to list configured MCP resources or prompts.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-02", "mcp", "Use an MCP tool to inspect available resources on a configured server.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-03", "mcp", "Use an MCP tool to read a resource from any configured MCP server.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-04", "mcp", "Use an MCP tool to list prompts from any configured MCP server.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-05", "mcp", "Use an available MCP tool and report what it returns.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-06", "mcp", "Call any safe MCP tool that is currently available and summarize the response.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-07", "mcp", "Use one configured MCP tool to enumerate data or capabilities.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-08", "mcp", "Use an MCP tool to fetch a small piece of data from a connected server.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-09", "mcp", "Invoke an available MCP tool and show the structured result.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
ToolCall("mcp-10", "mcp", "Use a currently available MCP tool rather than a built-in Hermes tool.",
|
||||
"", "", expected_tool_prefix="mcp_"),
|
||||
]
|
||||
# fmt: on
|
||||
|
||||
DEFAULT_COMPARE_MODELS = [
|
||||
"nous:gia-3/gemma-4-31b",
|
||||
"gemini:gemma-4-26b-it",
|
||||
"nous:mimo-v2-pro",
|
||||
]
|
||||
|
||||
ISSUE_796_CATEGORY_COUNTS = {
|
||||
"file": 20,
|
||||
"terminal": 20,
|
||||
"web": 15,
|
||||
"code": 15,
|
||||
"browser": 10,
|
||||
"delegate": 10,
|
||||
"mcp": 10,
|
||||
}
|
||||
|
||||
|
||||
def suite_category_counts() -> dict[str, int]:
|
||||
counts: dict[str, int] = {}
|
||||
for tc in SUITE:
|
||||
counts[tc.category] = counts.get(tc.category, 0) + 1
|
||||
return counts
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Runner
|
||||
@@ -278,9 +305,17 @@ class CallResult:
|
||||
expected_tool: str
|
||||
success: bool
|
||||
tool_called: Optional[str] = None
|
||||
schema_ok: bool = False
|
||||
tool_args_valid: bool = False
|
||||
execution_ok: bool = False
|
||||
tool_count: int = 0
|
||||
parallel_ok: bool = False
|
||||
latency_s: float = 0.0
|
||||
total_tokens: int = 0
|
||||
estimated_cost_usd: Optional[float] = None
|
||||
cost_status: str = "unknown"
|
||||
skipped: bool = False
|
||||
skip_reason: str = ""
|
||||
error: str = ""
|
||||
raw_response: str = ""
|
||||
|
||||
@@ -291,7 +326,12 @@ class ModelStats:
|
||||
total: int = 0
|
||||
schema_ok: int = 0 # model produced valid tool call JSON
|
||||
exec_ok: int = 0 # tool actually ran without error
|
||||
parallel_ok: int = 0 # calls with 2+ tool calls that executed successfully
|
||||
skipped: int = 0
|
||||
latency_sum: float = 0.0
|
||||
total_tokens: int = 0
|
||||
total_cost_usd: float = 0.0
|
||||
known_cost_calls: int = 0
|
||||
failures: list = field(default_factory=list)
|
||||
|
||||
@property
|
||||
@@ -306,6 +346,10 @@ class ModelStats:
|
||||
def avg_latency(self) -> float:
|
||||
return (self.latency_sum / self.total) if self.total else 0
|
||||
|
||||
@property
|
||||
def avg_cost_usd(self) -> Optional[float]:
|
||||
return (self.total_cost_usd / self.known_cost_calls) if self.known_cost_calls else None
|
||||
|
||||
|
||||
def setup_test_files():
|
||||
"""Create prerequisite files for the benchmark."""
|
||||
@@ -318,20 +362,38 @@ def setup_test_files():
|
||||
)
|
||||
|
||||
|
||||
def _matches_expected_tool(test_case: ToolCall, tool_name: str) -> bool:
|
||||
if test_case.expected_tool and tool_name == test_case.expected_tool:
|
||||
return True
|
||||
if test_case.expected_tool_prefix and tool_name.startswith(test_case.expected_tool_prefix):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _resolve_unavailable_reason(test_case: ToolCall, valid_tool_names: set[str]) -> str:
|
||||
if test_case.expected_tool and test_case.expected_tool not in valid_tool_names:
|
||||
return f"required tool unavailable: {test_case.expected_tool}"
|
||||
if test_case.expected_tool_prefix and not any(
|
||||
name.startswith(test_case.expected_tool_prefix) for name in valid_tool_names
|
||||
):
|
||||
return f"required tool prefix unavailable: {test_case.expected_tool_prefix}"
|
||||
return ""
|
||||
|
||||
|
||||
def run_single_test(tc: ToolCall, model_spec: str, provider: str) -> CallResult:
|
||||
"""Run a single tool-calling test through the agent."""
|
||||
from run_agent import AIAgent
|
||||
|
||||
result = CallResult(
|
||||
test_id=tc.id,
|
||||
category=tc.category,
|
||||
model=model_spec,
|
||||
prompt=tc.prompt,
|
||||
expected_tool=tc.expected_tool,
|
||||
expected_tool=tc.expected_tool or tc.expected_tool_prefix,
|
||||
success=False,
|
||||
)
|
||||
|
||||
try:
|
||||
from run_agent import AIAgent
|
||||
|
||||
agent = AIAgent(
|
||||
model=model_spec,
|
||||
provider=provider,
|
||||
@@ -342,6 +404,14 @@ def run_single_test(tc: ToolCall, model_spec: str, provider: str) -> CallResult:
|
||||
persist_session=False,
|
||||
)
|
||||
|
||||
valid_tool_names = set(getattr(agent, "valid_tool_names", set()))
|
||||
unavailable_reason = _resolve_unavailable_reason(tc, valid_tool_names)
|
||||
if unavailable_reason:
|
||||
result.skipped = True
|
||||
result.skip_reason = unavailable_reason
|
||||
result.error = unavailable_reason
|
||||
return result
|
||||
|
||||
t0 = time.time()
|
||||
conv = agent.run_conversation(
|
||||
user_message=tc.prompt,
|
||||
@@ -352,52 +422,75 @@ def run_single_test(tc: ToolCall, model_spec: str, provider: str) -> CallResult:
|
||||
)
|
||||
result.latency_s = round(time.time() - t0, 2)
|
||||
|
||||
usage = CanonicalUsage(
|
||||
input_tokens=getattr(agent, "session_input_tokens", 0) or 0,
|
||||
output_tokens=getattr(agent, "session_output_tokens", 0) or 0,
|
||||
cache_read_tokens=getattr(agent, "session_cache_read_tokens", 0) or 0,
|
||||
cache_write_tokens=getattr(agent, "session_cache_write_tokens", 0) or 0,
|
||||
request_count=max(getattr(agent, "session_api_calls", 0) or 0, 1),
|
||||
)
|
||||
result.total_tokens = usage.total_tokens
|
||||
billed_model = model_spec.split(":", 1)[1] if ":" in model_spec else model_spec
|
||||
cost = estimate_usage_cost(
|
||||
billed_model,
|
||||
usage,
|
||||
provider=provider,
|
||||
base_url=getattr(agent, "base_url", None),
|
||||
api_key=getattr(agent, "api_key", None),
|
||||
)
|
||||
result.cost_status = cost.status
|
||||
result.estimated_cost_usd = float(cost.amount_usd) if cost.amount_usd is not None else None
|
||||
|
||||
messages = conv.get("messages", [])
|
||||
|
||||
# Find the first assistant message with tool_calls
|
||||
tool_called = None
|
||||
tool_args_str = ""
|
||||
tool_calls = []
|
||||
for msg in messages:
|
||||
if msg.get("role") == "assistant" and msg.get("tool_calls"):
|
||||
for tc_item in msg["tool_calls"]:
|
||||
fn = tc_item.get("function", {})
|
||||
tool_called = fn.get("name", "")
|
||||
tool_args_str = fn.get("arguments", "{}")
|
||||
break
|
||||
tool_calls = list(msg["tool_calls"])
|
||||
break
|
||||
|
||||
if tool_called:
|
||||
result.tool_called = tool_called
|
||||
result.schema_ok = True
|
||||
if tool_calls:
|
||||
result.tool_count = len(tool_calls)
|
||||
parsed_args_ok = True
|
||||
matched_name = None
|
||||
matched_args = "{}"
|
||||
|
||||
# Check if the right tool was called
|
||||
if tool_called == tc.expected_tool:
|
||||
result.success = True
|
||||
for tc_item in tool_calls:
|
||||
fn = tc_item.get("function", {})
|
||||
tool_name = fn.get("name", "")
|
||||
tool_args = fn.get("arguments", "{}")
|
||||
try:
|
||||
json.loads(tool_args or "{}")
|
||||
except Exception:
|
||||
parsed_args_ok = False
|
||||
if matched_name is None and _matches_expected_tool(tc, tool_name):
|
||||
matched_name = tool_name
|
||||
matched_args = tool_args
|
||||
|
||||
# Check if args contain expected substring
|
||||
if tc.expected_params_check:
|
||||
result.tool_args_valid = tc.expected_params_check in tool_args_str
|
||||
else:
|
||||
result.tool_args_valid = True
|
||||
result.schema_ok = parsed_args_ok
|
||||
result.tool_called = matched_name or tool_calls[0].get("function", {}).get("name", "")
|
||||
|
||||
if matched_name:
|
||||
result.tool_args_valid = (
|
||||
tc.expected_params_check in matched_args if tc.expected_params_check else True
|
||||
)
|
||||
result.success = result.schema_ok and result.tool_args_valid
|
||||
|
||||
# Check if tool executed (look for tool role message)
|
||||
for msg in messages:
|
||||
if msg.get("role") == "tool":
|
||||
content = msg.get("content", "")
|
||||
if content and "error" not in content.lower()[:50]:
|
||||
if content:
|
||||
result.execution_ok = True
|
||||
break
|
||||
elif content:
|
||||
result.execution_ok = True # got a response, even if error
|
||||
break
|
||||
|
||||
result.parallel_ok = result.tool_count > 1 and result.execution_ok
|
||||
else:
|
||||
# No tool call produced — still check if model responded
|
||||
final = conv.get("final_response", "")
|
||||
result.raw_response = final[:200] if final else ""
|
||||
|
||||
except Exception as e:
|
||||
result.error = f"{type(e).__name__}: {str(e)[:200]}"
|
||||
result.latency_s = round(time.time() - t0, 2) if 't0' in dir() else 0
|
||||
result.latency_s = round(time.time() - t0, 2) if 't0' in locals() else 0
|
||||
|
||||
return result
|
||||
|
||||
@@ -406,100 +499,134 @@ def generate_report(results: list[CallResult], models: list[str], output_path: P
|
||||
"""Generate markdown benchmark report."""
|
||||
now = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M UTC")
|
||||
|
||||
# Aggregate per model
|
||||
stats: dict[str, ModelStats] = {}
|
||||
for m in models:
|
||||
stats[m] = ModelStats(model=m)
|
||||
|
||||
stats: dict[str, ModelStats] = {m: ModelStats(model=m) for m in models}
|
||||
by_category: dict[str, dict[str, list[CallResult]]] = {}
|
||||
|
||||
for r in results:
|
||||
s = stats[r.model]
|
||||
s.total += 1
|
||||
s.schema_ok += int(r.schema_ok)
|
||||
s.exec_ok += int(r.execution_ok)
|
||||
s.latency_sum += r.latency_s
|
||||
if not r.success:
|
||||
s.failures.append(r)
|
||||
s.total_tokens += r.total_tokens
|
||||
if r.estimated_cost_usd is not None:
|
||||
s.total_cost_usd += r.estimated_cost_usd
|
||||
s.known_cost_calls += 1
|
||||
if r.skipped:
|
||||
s.skipped += 1
|
||||
else:
|
||||
s.schema_ok += int(r.schema_ok)
|
||||
s.exec_ok += int(r.execution_ok)
|
||||
s.parallel_ok += int(r.parallel_ok)
|
||||
if not r.success:
|
||||
s.failures.append(r)
|
||||
|
||||
by_category.setdefault(r.category, {}).setdefault(r.model, []).append(r)
|
||||
|
||||
def _score_row(label: str, fn) -> str:
|
||||
row = f"| {label} | "
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
attempted = s.total - s.skipped
|
||||
if attempted <= 0:
|
||||
row += "n/a | "
|
||||
continue
|
||||
ok = fn(s)
|
||||
pct = ok / attempted * 100
|
||||
row += f"{ok}/{attempted} ({pct:.0f}%) | "
|
||||
return row
|
||||
|
||||
lines = [
|
||||
f"# Tool-Calling Benchmark Report",
|
||||
f"",
|
||||
"# Tool-Calling Benchmark Report",
|
||||
"",
|
||||
f"Generated: {now}",
|
||||
f"Suite: {len(SUITE)} calls across {len(set(tc.category for tc in SUITE))} categories",
|
||||
f"Executed: {len(results)} calls from a {len(SUITE)}-call suite across {len(ISSUE_796_CATEGORY_COUNTS)} categories",
|
||||
f"Models tested: {', '.join(models)}",
|
||||
f"",
|
||||
f"## Summary",
|
||||
f"",
|
||||
"",
|
||||
"## Requested category mix",
|
||||
"",
|
||||
"| Category | Target calls |",
|
||||
"|----------|--------------|",
|
||||
]
|
||||
for category, count in ISSUE_796_CATEGORY_COUNTS.items():
|
||||
lines.append(f"| {category} | {count} |")
|
||||
|
||||
lines.extend([
|
||||
"",
|
||||
"## Summary",
|
||||
"",
|
||||
f"| Metric | {' | '.join(models)} |",
|
||||
f"|--------|{'|'.join('---------' for _ in models)}|",
|
||||
]
|
||||
_score_row("Schema parse success", lambda s: s.schema_ok),
|
||||
_score_row("Tool execution success", lambda s: s.exec_ok),
|
||||
_score_row("Parallel tool success", lambda s: s.parallel_ok),
|
||||
])
|
||||
|
||||
# Schema parse success
|
||||
row = "| Schema parse success | "
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
row += f"{s.schema_ok}/{s.total} ({s.schema_pct:.0f}%) | "
|
||||
lines.append(row)
|
||||
|
||||
# Tool execution success
|
||||
row = "| Tool execution success | "
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
row += f"{s.exec_ok}/{s.total} ({s.exec_pct:.0f}%) | "
|
||||
lines.append(row)
|
||||
|
||||
# Correct tool selected
|
||||
row = "| Correct tool selected | "
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
correct = sum(1 for r in results if r.model == m and r.success)
|
||||
pct = (correct / s.total * 100) if s.total else 0
|
||||
row += f"{correct}/{s.total} ({pct:.0f}%) | "
|
||||
lines.append(row)
|
||||
|
||||
# Avg latency
|
||||
row = "| Avg latency (s) | "
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
row += f"{s.avg_latency:.2f} | "
|
||||
row += f"{stats[m].avg_latency:.2f} | "
|
||||
lines.append(row)
|
||||
|
||||
row = "| Avg tokens per call | "
|
||||
for m in models:
|
||||
total = stats[m].total
|
||||
avg_tokens = stats[m].total_tokens / total if total else 0
|
||||
row += f"{avg_tokens:.1f} | "
|
||||
lines.append(row)
|
||||
|
||||
row = "| Avg token cost per call (USD) | "
|
||||
for m in models:
|
||||
avg_cost = stats[m].avg_cost_usd
|
||||
row += (f"{avg_cost:.6f} | " if avg_cost is not None else "n/a | ")
|
||||
lines.append(row)
|
||||
|
||||
row = "| Skipped / unavailable | "
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
row += f"{s.skipped}/{s.total} | "
|
||||
lines.append(row)
|
||||
lines.append("")
|
||||
|
||||
# Per-category breakdown
|
||||
lines.append("## Per-Category Breakdown")
|
||||
lines.append("## Per-category breakdown")
|
||||
lines.append("")
|
||||
|
||||
for cat in sorted(by_category.keys()):
|
||||
lines.append(f"### {cat.title()}")
|
||||
lines.append("")
|
||||
lines.append(f"| Metric | {' | '.join(models)} |")
|
||||
lines.append(f"|--------|{'|'.join('---------' for _ in models)}|")
|
||||
|
||||
cat_data = by_category[cat]
|
||||
for metric_name, fn in [
|
||||
("Schema OK", lambda r: r.schema_ok),
|
||||
("Exec OK", lambda r: r.execution_ok),
|
||||
("Parallel OK", lambda r: r.parallel_ok),
|
||||
("Correct tool", lambda r: r.success),
|
||||
]:
|
||||
row = f"| {metric_name} | "
|
||||
for m in models:
|
||||
results_m = cat_data.get(m, [])
|
||||
total = len(results_m)
|
||||
ok = sum(1 for r in results_m if fn(r))
|
||||
pct = (ok / total * 100) if total else 0
|
||||
row += f"{ok}/{total} ({pct:.0f}%) | "
|
||||
results_m = by_category[cat].get(m, [])
|
||||
attempted = [r for r in results_m if not r.skipped]
|
||||
if not attempted:
|
||||
row += "n/a | "
|
||||
continue
|
||||
ok = sum(1 for r in attempted if fn(r))
|
||||
pct = ok / len(attempted) * 100
|
||||
row += f"{ok}/{len(attempted)} ({pct:.0f}%) | "
|
||||
lines.append(row)
|
||||
|
||||
row = "| Avg tokens | "
|
||||
for m in models:
|
||||
results_m = by_category[cat].get(m, [])
|
||||
avg_tokens = sum(r.total_tokens for r in results_m) / len(results_m) if results_m else 0
|
||||
row += f"{avg_tokens:.1f} | "
|
||||
lines.append(row)
|
||||
|
||||
row = "| Skipped | "
|
||||
for m in models:
|
||||
results_m = by_category[cat].get(m, [])
|
||||
skipped = sum(1 for r in results_m if r.skipped)
|
||||
row += f"{skipped}/{len(results_m)} | "
|
||||
lines.append(row)
|
||||
lines.append("")
|
||||
|
||||
# Failure analysis
|
||||
lines.append("## Failure Analysis")
|
||||
lines.append("## Failure analysis")
|
||||
lines.append("")
|
||||
|
||||
any_failures = False
|
||||
for m in models:
|
||||
s = stats[m]
|
||||
@@ -514,28 +641,40 @@ def generate_report(results: list[CallResult], models: list[str], output_path: P
|
||||
err = r.error or "wrong tool"
|
||||
lines.append(f"| {r.test_id} | {r.category} | {r.expected_tool} | {got} | {err[:60]} |")
|
||||
lines.append("")
|
||||
|
||||
if not any_failures:
|
||||
lines.append("No failures detected.")
|
||||
lines.append("No model failures detected.")
|
||||
lines.append("")
|
||||
|
||||
# Raw results JSON
|
||||
lines.append("## Raw Results")
|
||||
skipped_results = [r for r in results if r.skipped]
|
||||
lines.append("## Skipped / unavailable cases")
|
||||
lines.append("")
|
||||
if skipped_results:
|
||||
lines.append("| Test | Model | Category | Reason |")
|
||||
lines.append("|------|-------|----------|--------|")
|
||||
for r in skipped_results:
|
||||
lines.append(f"| {r.test_id} | {r.model} | {r.category} | {r.skip_reason[:80]} |")
|
||||
else:
|
||||
lines.append("No cases were skipped.")
|
||||
lines.append("")
|
||||
|
||||
lines.append("## Raw results")
|
||||
lines.append("")
|
||||
lines.append("```json")
|
||||
lines.append(json.dumps([asdict(r) for r in results], indent=2, default=str))
|
||||
lines.append("```")
|
||||
|
||||
report = "\n".join(lines)
|
||||
output_path.write_text(report)
|
||||
output_path.write_text(report, encoding="utf-8")
|
||||
return report
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Tool-calling benchmark")
|
||||
parser.add_argument("--models", nargs="+",
|
||||
default=["nous:gia-3/gemma-4-31b", "nous:mimo-v2-pro"],
|
||||
default=list(DEFAULT_COMPARE_MODELS),
|
||||
help="Model specs to test (provider:model)")
|
||||
parser.add_argument("--compare", action="store_true",
|
||||
help="Use the issue #796 default comparison set")
|
||||
parser.add_argument("--limit", type=int, default=0,
|
||||
help="Run only first N tests (0 = all)")
|
||||
parser.add_argument("--category", type=str, default="",
|
||||
@@ -546,6 +685,9 @@ def main():
|
||||
help="Print test cases without running them")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.compare:
|
||||
args.models = list(DEFAULT_COMPARE_MODELS)
|
||||
|
||||
# Filter suite
|
||||
suite = SUITE[:]
|
||||
if args.category:
|
||||
|
||||
@@ -1,150 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from agent import agent_card as mod
|
||||
|
||||
|
||||
DEFAULT_DESCRIPTION = "Sovereign AI agent — orchestration, code, research"
|
||||
|
||||
|
||||
def _set_base_context(monkeypatch, *, name: str = "Timmy", description: str = DEFAULT_DESCRIPTION, url: str = "https://timmy.local:9443/a2a", skills=None):
|
||||
monkeypatch.setattr(mod, "load_config", lambda: {"agent": {"name": name, "description": description}})
|
||||
monkeypatch.setattr(
|
||||
mod,
|
||||
"_load_skills",
|
||||
lambda: list(
|
||||
skills
|
||||
if skills is not None
|
||||
else [
|
||||
mod.AgentSkill(
|
||||
id="code",
|
||||
name="Code Implementation",
|
||||
description="Implement and patch code",
|
||||
tags=["python", "gitea"],
|
||||
)
|
||||
]
|
||||
),
|
||||
)
|
||||
monkeypatch.setenv("HERMES_A2A_PUBLIC_URL", url)
|
||||
monkeypatch.delenv("HERMES_AGENT_NAME", raising=False)
|
||||
monkeypatch.delenv("AGENT_NAME", raising=False)
|
||||
monkeypatch.delenv("HERMES_AGENT_DESCRIPTION", raising=False)
|
||||
monkeypatch.delenv("AGENT_DESCRIPTION", raising=False)
|
||||
|
||||
|
||||
def test_build_agent_card_matches_issue_802_schema(monkeypatch):
|
||||
_set_base_context(monkeypatch)
|
||||
|
||||
card = mod.build_agent_card()
|
||||
payload = card.to_dict()
|
||||
|
||||
assert payload["name"] == "Timmy"
|
||||
assert payload["description"] == DEFAULT_DESCRIPTION
|
||||
assert payload["url"] == "https://timmy.local:9443/a2a"
|
||||
assert payload["capabilities"] == {
|
||||
"streaming": True,
|
||||
"pushNotifications": False,
|
||||
"stateTransitionHistory": True,
|
||||
}
|
||||
assert payload["defaultInputModes"] == ["text/plain", "application/json"]
|
||||
assert payload["defaultOutputModes"] == ["text/plain", "application/json"]
|
||||
assert payload["skills"][0]["tags"] == ["python", "gitea"]
|
||||
assert mod.validate_agent_card(payload) == []
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("name", "url"),
|
||||
[
|
||||
("Timmy", "https://timmy.local:9443/a2a"),
|
||||
("Allegro", "https://allegro.local:9443/a2a"),
|
||||
("Ezra", "https://ezra.local:9443/a2a"),
|
||||
],
|
||||
)
|
||||
def test_build_agent_card_supports_fleet_members(monkeypatch, name, url):
|
||||
_set_base_context(monkeypatch, name=name, url=url, skills=[])
|
||||
|
||||
payload = mod.build_agent_card().to_dict()
|
||||
|
||||
assert payload["name"] == name
|
||||
assert payload["url"] == url
|
||||
assert mod.validate_agent_card(payload) == []
|
||||
|
||||
|
||||
def test_load_skills_collects_tags_and_category(monkeypatch, tmp_path):
|
||||
skill_root = tmp_path / "skills"
|
||||
skill_dir = skill_root / "code-implementation"
|
||||
skill_dir.mkdir(parents=True)
|
||||
(skill_dir / "SKILL.md").write_text(
|
||||
"""---
|
||||
name: Code Implementation
|
||||
description: Implement and patch code
|
||||
tags: [python, gitea]
|
||||
category: discovery
|
||||
---
|
||||
|
||||
# Code Implementation
|
||||
""",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
monkeypatch.setattr(mod, "get_all_skills_dirs", lambda: [skill_root])
|
||||
monkeypatch.setattr(mod, "get_disabled_skill_names", lambda: set())
|
||||
monkeypatch.setattr(mod, "skill_matches_platform", lambda _frontmatter: True)
|
||||
|
||||
skills = mod._load_skills()
|
||||
|
||||
assert len(skills) == 1
|
||||
assert skills[0].id == "code-implementation"
|
||||
assert skills[0].name == "Code Implementation"
|
||||
assert skills[0].description == "Implement and patch code"
|
||||
assert skills[0].tags == ["python", "gitea", "discovery"]
|
||||
|
||||
|
||||
def test_validate_agent_card_reports_schema_errors():
|
||||
errors = mod.validate_agent_card(
|
||||
{
|
||||
"name": "",
|
||||
"description": "",
|
||||
"url": "timmy.local",
|
||||
"version": "",
|
||||
"capabilities": {"streaming": True},
|
||||
"skills": [{"id": "", "name": "", "tags": "python"}],
|
||||
"defaultInputModes": ["text/plain"],
|
||||
"defaultOutputModes": ["plain"],
|
||||
"metadata": [],
|
||||
}
|
||||
)
|
||||
|
||||
assert any("name must be a non-empty string" in error for error in errors)
|
||||
assert any("url must be an absolute http/https URL" in error for error in errors)
|
||||
assert any("capabilities.pushNotifications" in error for error in errors)
|
||||
assert any("skills[0] missing id" in error for error in errors)
|
||||
assert any("skills[0].tags must be a list" in error for error in errors)
|
||||
assert any("defaultInputModes must include application/json" in error for error in errors)
|
||||
assert any("defaultOutputModes entries must be MIME types" in error for error in errors)
|
||||
assert any("metadata must be an object" in error for error in errors)
|
||||
|
||||
|
||||
def test_get_agent_card_json_emits_valid_json(monkeypatch):
|
||||
_set_base_context(monkeypatch)
|
||||
|
||||
payload = json.loads(mod.get_agent_card_json())
|
||||
|
||||
assert payload["name"] == "Timmy"
|
||||
assert mod.validate_agent_card(payload) == []
|
||||
|
||||
|
||||
def test_main_validate_prints_card(monkeypatch, capsys):
|
||||
_set_base_context(monkeypatch)
|
||||
|
||||
exit_code = mod.main(["--validate"])
|
||||
captured = capsys.readouterr()
|
||||
|
||||
assert exit_code == 0
|
||||
payload = json.loads(captured.out)
|
||||
assert payload["url"] == "https://timmy.local:9443/a2a"
|
||||
assert captured.err == ""
|
||||
115
tests/test_tool_call_benchmark.py
Normal file
115
tests/test_tool_call_benchmark.py
Normal file
@@ -0,0 +1,115 @@
|
||||
"""Tests for Issue #796 tool-calling benchmark coverage and reporting."""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from types import SimpleNamespace
|
||||
from unittest.mock import patch
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent / "benchmarks"))
|
||||
|
||||
from tool_call_benchmark import ( # noqa: E402
|
||||
CallResult,
|
||||
DEFAULT_COMPARE_MODELS,
|
||||
ISSUE_796_CATEGORY_COUNTS,
|
||||
ToolCall,
|
||||
generate_report,
|
||||
run_single_test,
|
||||
suite_category_counts,
|
||||
)
|
||||
|
||||
|
||||
def test_suite_counts_match_issue_796_distribution():
|
||||
counts = suite_category_counts()
|
||||
assert counts == ISSUE_796_CATEGORY_COUNTS
|
||||
assert sum(counts.values()) == 100
|
||||
|
||||
|
||||
def test_default_compare_models_cover_issue_796_lanes():
|
||||
assert len(DEFAULT_COMPARE_MODELS) == 3
|
||||
assert any("gemma-4-31b" in spec for spec in DEFAULT_COMPARE_MODELS)
|
||||
assert any("gemma-4-26b" in spec for spec in DEFAULT_COMPARE_MODELS)
|
||||
assert any("mimo-v2-pro" in spec for spec in DEFAULT_COMPARE_MODELS)
|
||||
|
||||
|
||||
def test_generate_report_includes_parallel_and_cost_metrics(tmp_path):
|
||||
output_path = tmp_path / "report.md"
|
||||
results = [
|
||||
CallResult(
|
||||
test_id="file-01",
|
||||
category="file",
|
||||
model="gemma-4-31b",
|
||||
prompt="Read the file.",
|
||||
expected_tool="read_file",
|
||||
success=True,
|
||||
tool_called="read_file",
|
||||
schema_ok=True,
|
||||
tool_args_valid=True,
|
||||
execution_ok=True,
|
||||
tool_count=2,
|
||||
parallel_ok=True,
|
||||
latency_s=1.25,
|
||||
total_tokens=123,
|
||||
estimated_cost_usd=0.0012,
|
||||
cost_status="estimated",
|
||||
),
|
||||
CallResult(
|
||||
test_id="web-01",
|
||||
category="web",
|
||||
model="mimo-v2-pro",
|
||||
prompt="Search the web.",
|
||||
expected_tool="web_search",
|
||||
success=False,
|
||||
tool_called="web_search",
|
||||
schema_ok=True,
|
||||
tool_args_valid=False,
|
||||
execution_ok=False,
|
||||
tool_count=1,
|
||||
parallel_ok=False,
|
||||
latency_s=2.5,
|
||||
error="bad args",
|
||||
total_tokens=456,
|
||||
estimated_cost_usd=None,
|
||||
cost_status="unknown",
|
||||
skipped=True,
|
||||
skip_reason="web_search unavailable",
|
||||
),
|
||||
]
|
||||
|
||||
report = generate_report(results, ["gemma-4-31b", "mimo-v2-pro"], output_path)
|
||||
|
||||
assert output_path.exists()
|
||||
assert "Parallel tool success" in report
|
||||
assert "Avg token cost per call (USD)" in report
|
||||
assert "Skipped / unavailable" in report
|
||||
assert "Requested category mix" in report
|
||||
|
||||
|
||||
def test_run_single_test_skips_when_expected_tool_unavailable():
|
||||
class FakeAgent:
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.valid_tool_names = {"read_file", "terminal"}
|
||||
self.session_input_tokens = 0
|
||||
self.session_output_tokens = 0
|
||||
self.session_cache_read_tokens = 0
|
||||
self.session_cache_write_tokens = 0
|
||||
self.session_api_calls = 0
|
||||
self.base_url = ""
|
||||
self.api_key = None
|
||||
|
||||
def run_conversation(self, *args, **kwargs):
|
||||
raise AssertionError("run_conversation should not be called for unavailable tools")
|
||||
|
||||
tc = ToolCall(
|
||||
id="mcp-01",
|
||||
category="mcp",
|
||||
prompt="Use an MCP tool to list resources.",
|
||||
expected_tool="",
|
||||
expected_tool_prefix="mcp_",
|
||||
)
|
||||
|
||||
with patch.dict(sys.modules, {"run_agent": SimpleNamespace(AIAgent=FakeAgent)}):
|
||||
result = run_single_test(tc, "gemini:gemma-4-31b-it", "gemini")
|
||||
|
||||
assert result.skipped is True
|
||||
assert "mcp_" in result.skip_reason
|
||||
assert result.success is False
|
||||
Reference in New Issue
Block a user