Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
755e7513a1 |
@@ -1,70 +1,43 @@
|
||||
from __future__ import annotations
|
||||
|
||||
"""
|
||||
A2A agent card generation for fleet discovery.
|
||||
Agent Card — A2A-compliant agent discovery.
|
||||
Part of #843: fix: implement A2A agent card for fleet discovery (#819)
|
||||
|
||||
Refs #801.
|
||||
Closes #802.
|
||||
Provides metadata about the agent's identity, capabilities, and installed skills
|
||||
for discovery by other agents in the fleet.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
from dataclasses import asdict, dataclass, field
|
||||
from typing import Any, Dict, Iterable, List, Mapping, Sequence
|
||||
from urllib.parse import urlparse, urlunparse
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from hermes_cli import __version__
|
||||
from hermes_cli.config import load_config
|
||||
|
||||
from hermes_cli.config import load_config, get_hermes_home
|
||||
from agent.skill_utils import (
|
||||
get_all_skills_dirs,
|
||||
get_disabled_skill_names,
|
||||
iter_skill_index_files,
|
||||
parse_frontmatter,
|
||||
skill_matches_platform,
|
||||
get_all_skills_dirs,
|
||||
get_disabled_skill_names,
|
||||
skill_matches_platform
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DEFAULT_DESCRIPTION = "Sovereign AI agent — orchestration, code, research"
|
||||
DEFAULT_INPUT_MODES = ["text/plain", "application/json"]
|
||||
DEFAULT_OUTPUT_MODES = ["text/plain", "application/json"]
|
||||
_REQUIRED_CAPABILITY_FLAGS = (
|
||||
"streaming",
|
||||
"pushNotifications",
|
||||
"stateTransitionHistory",
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentSkill:
|
||||
id: str
|
||||
name: str
|
||||
description: str = ""
|
||||
tags: List[str] = field(default_factory=list)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
data: Dict[str, Any] = {"id": self.id, "name": self.name}
|
||||
if self.description:
|
||||
data["description"] = self.description
|
||||
if self.tags:
|
||||
data["tags"] = self.tags
|
||||
return data
|
||||
|
||||
version: str = "1.0.0"
|
||||
|
||||
@dataclass
|
||||
class AgentCapabilities:
|
||||
streaming: bool = True
|
||||
pushNotifications: bool = False
|
||||
stateTransitionHistory: bool = True
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return asdict(self)
|
||||
|
||||
tools: bool = True
|
||||
vision: bool = False
|
||||
reasoning: bool = False
|
||||
|
||||
@dataclass
|
||||
class AgentCard:
|
||||
@@ -74,81 +47,14 @@ class AgentCard:
|
||||
version: str = __version__
|
||||
capabilities: AgentCapabilities = field(default_factory=AgentCapabilities)
|
||||
skills: List[AgentSkill] = field(default_factory=list)
|
||||
defaultInputModes: List[str] = field(default_factory=lambda: list(DEFAULT_INPUT_MODES))
|
||||
defaultOutputModes: List[str] = field(default_factory=lambda: list(DEFAULT_OUTPUT_MODES))
|
||||
metadata: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
data: Dict[str, Any] = {
|
||||
"name": self.name,
|
||||
"description": self.description,
|
||||
"url": self.url,
|
||||
"version": self.version,
|
||||
"capabilities": self.capabilities.to_dict(),
|
||||
"skills": [skill.to_dict() for skill in self.skills],
|
||||
"defaultInputModes": list(self.defaultInputModes),
|
||||
"defaultOutputModes": list(self.defaultOutputModes),
|
||||
}
|
||||
if self.metadata:
|
||||
data["metadata"] = dict(self.metadata)
|
||||
return data
|
||||
|
||||
def to_json(self, indent: int = 2) -> str:
|
||||
return json.dumps(self.to_dict(), indent=indent)
|
||||
|
||||
|
||||
def _env_or_empty(key: str) -> str:
|
||||
return os.environ.get(key, "").strip()
|
||||
|
||||
|
||||
def _as_agent_config(config: Mapping[str, Any] | None) -> Dict[str, Any]:
|
||||
if not isinstance(config, Mapping):
|
||||
return {}
|
||||
agent_cfg = config.get("agent")
|
||||
return dict(agent_cfg) if isinstance(agent_cfg, Mapping) else {}
|
||||
|
||||
|
||||
def _as_a2a_config(config: Mapping[str, Any] | None) -> Dict[str, Any]:
|
||||
if not isinstance(config, Mapping):
|
||||
return {}
|
||||
a2a_cfg = config.get("a2a")
|
||||
return dict(a2a_cfg) if isinstance(a2a_cfg, Mapping) else {}
|
||||
|
||||
|
||||
def _normalize_string_list(value: Any) -> List[str]:
|
||||
if value is None:
|
||||
return []
|
||||
if isinstance(value, str):
|
||||
parts = value.split(",")
|
||||
elif isinstance(value, Sequence) and not isinstance(value, (bytes, bytearray, str)):
|
||||
parts = list(value)
|
||||
else:
|
||||
parts = [value]
|
||||
out: List[str] = []
|
||||
seen = set()
|
||||
for item in parts:
|
||||
text = str(item).strip()
|
||||
if not text or text in seen:
|
||||
continue
|
||||
seen.add(text)
|
||||
out.append(text)
|
||||
return out
|
||||
|
||||
|
||||
def _normalize_skill_tags(frontmatter: Mapping[str, Any]) -> List[str]:
|
||||
tags = _normalize_string_list(frontmatter.get("tags"))
|
||||
category = str(frontmatter.get("category") or "").strip()
|
||||
if category and category not in tags:
|
||||
tags.append(category)
|
||||
return tags
|
||||
|
||||
defaultInputModes: List[str] = field(default_factory=lambda: ["text/plain"])
|
||||
defaultOutputModes: List[str] = field(default_factory=lambda: ["text/plain"])
|
||||
|
||||
def _load_skills() -> List[AgentSkill]:
|
||||
"""Scan enabled skills and return A2A skill metadata."""
|
||||
skills: List[AgentSkill] = []
|
||||
"""Scan all enabled skills and return metadata."""
|
||||
skills = []
|
||||
disabled = get_disabled_skill_names()
|
||||
seen_ids = set()
|
||||
|
||||
|
||||
for skills_dir in get_all_skills_dirs():
|
||||
if not skills_dir.is_dir():
|
||||
continue
|
||||
@@ -159,262 +65,71 @@ def _load_skills() -> List[AgentSkill]:
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
skill_name = frontmatter.get("name") or skill_file.parent.name
|
||||
if str(skill_name) in disabled:
|
||||
continue
|
||||
if not skill_matches_platform(frontmatter):
|
||||
continue
|
||||
|
||||
skill_id = str(frontmatter.get("name") or skill_file.parent.name).strip().lower().replace(" ", "-")
|
||||
if skill_id in disabled or skill_id in seen_ids:
|
||||
continue
|
||||
seen_ids.add(skill_id)
|
||||
skills.append(AgentSkill(
|
||||
id=str(skill_name),
|
||||
name=str(frontmatter.get("name", skill_name)),
|
||||
description=str(frontmatter.get("description", "")),
|
||||
version=str(frontmatter.get("version", "1.0.0"))
|
||||
))
|
||||
return skills
|
||||
|
||||
display_name = str(frontmatter.get("title") or frontmatter.get("name") or skill_file.parent.name).strip()
|
||||
description = str(frontmatter.get("description") or "").strip()
|
||||
tags = _normalize_skill_tags(frontmatter)
|
||||
skills.append(
|
||||
AgentSkill(
|
||||
id=skill_id,
|
||||
name=display_name,
|
||||
description=description,
|
||||
tags=tags,
|
||||
)
|
||||
)
|
||||
def build_agent_card() -> AgentCard:
|
||||
"""Build the agent card from current configuration and environment."""
|
||||
config = load_config()
|
||||
|
||||
# Identity
|
||||
name = os.environ.get("HERMES_AGENT_NAME") or config.get("agent", {}).get("name") or "hermes"
|
||||
description = os.environ.get("HERMES_AGENT_DESCRIPTION") or config.get("agent", {}).get("description") or "Sovereign AI agent"
|
||||
|
||||
# URL - try to determine from environment or config
|
||||
port = os.environ.get("HERMES_WEB_PORT") or "9119"
|
||||
host = os.environ.get("HERMES_WEB_HOST") or "localhost"
|
||||
url = f"http://{host}:{port}"
|
||||
|
||||
# Capabilities
|
||||
# In a real scenario, we'd check model metadata for vision/reasoning
|
||||
capabilities = AgentCapabilities(
|
||||
streaming=True,
|
||||
tools=True,
|
||||
vision=False, # Default to false unless we can confirm
|
||||
reasoning=False
|
||||
)
|
||||
|
||||
# Skills
|
||||
skills = _load_skills()
|
||||
|
||||
return AgentCard(
|
||||
name=name,
|
||||
description=description,
|
||||
url=url,
|
||||
version=__version__,
|
||||
capabilities=capabilities,
|
||||
skills=skills
|
||||
)
|
||||
|
||||
return sorted(skills, key=lambda skill: skill.id)
|
||||
|
||||
|
||||
def _get_agent_name(config: Mapping[str, Any] | None, override: str | None = None) -> str:
|
||||
if override:
|
||||
return override
|
||||
env_name = _env_or_empty("HERMES_AGENT_NAME") or _env_or_empty("AGENT_NAME")
|
||||
if env_name:
|
||||
return env_name
|
||||
agent_cfg = _as_agent_config(config)
|
||||
if agent_cfg.get("name"):
|
||||
return str(agent_cfg["name"]).strip()
|
||||
def get_agent_card_json() -> str:
|
||||
"""Return the agent card as a JSON string."""
|
||||
try:
|
||||
hostname = socket.gethostname().split(".", 1)[0].strip()
|
||||
if hostname:
|
||||
return hostname
|
||||
except Exception:
|
||||
pass
|
||||
return "hermes"
|
||||
|
||||
|
||||
def _get_description(config: Mapping[str, Any] | None, override: str | None = None) -> str:
|
||||
if override:
|
||||
return override
|
||||
env_description = _env_or_empty("HERMES_AGENT_DESCRIPTION") or _env_or_empty("AGENT_DESCRIPTION")
|
||||
if env_description:
|
||||
return env_description
|
||||
agent_cfg = _as_agent_config(config)
|
||||
if agent_cfg.get("description"):
|
||||
return str(agent_cfg["description"]).strip()
|
||||
return DEFAULT_DESCRIPTION
|
||||
|
||||
|
||||
def _normalize_a2a_url(url: str) -> str:
|
||||
raw = (url or "").strip()
|
||||
if not raw:
|
||||
return ""
|
||||
parsed = urlparse(raw if "://" in raw else f"https://{raw}")
|
||||
scheme = parsed.scheme or "https"
|
||||
netloc = parsed.netloc or parsed.path
|
||||
path = parsed.path if parsed.netloc else ""
|
||||
normalized_path = path.rstrip("/") if path not in ("", "/") else ""
|
||||
if not normalized_path.endswith("/a2a"):
|
||||
normalized_path = f"{normalized_path}/a2a" if normalized_path else "/a2a"
|
||||
return urlunparse((scheme, netloc, normalized_path, "", "", ""))
|
||||
|
||||
|
||||
def _get_agent_url(config: Mapping[str, Any] | None, override: str | None = None) -> str:
|
||||
if override:
|
||||
return _normalize_a2a_url(override)
|
||||
|
||||
agent_cfg = _as_agent_config(config)
|
||||
a2a_cfg = _as_a2a_config(config)
|
||||
|
||||
explicit = (
|
||||
_env_or_empty("HERMES_A2A_PUBLIC_URL")
|
||||
or str(a2a_cfg.get("public_url") or "").strip()
|
||||
or str(agent_cfg.get("a2a_public_url") or "").strip()
|
||||
)
|
||||
if explicit:
|
||||
return _normalize_a2a_url(explicit)
|
||||
|
||||
host = (
|
||||
_env_or_empty("HERMES_A2A_HOST")
|
||||
or str(a2a_cfg.get("host") or "").strip()
|
||||
or _env_or_empty("HERMES_WEB_HOST")
|
||||
or str(agent_cfg.get("host") or "").strip()
|
||||
or "localhost"
|
||||
)
|
||||
port = (
|
||||
_env_or_empty("HERMES_A2A_PORT")
|
||||
or str(a2a_cfg.get("port") or "").strip()
|
||||
or _env_or_empty("HERMES_WEB_PORT")
|
||||
or str(agent_cfg.get("port") or "").strip()
|
||||
or "9119"
|
||||
)
|
||||
scheme = (
|
||||
_env_or_empty("HERMES_A2A_SCHEME")
|
||||
or str(a2a_cfg.get("scheme") or "").strip()
|
||||
or ("https" if (_env_or_empty("HERMES_MTLS_CERT") or str(port) == "9443") else "http")
|
||||
)
|
||||
return _normalize_a2a_url(f"{scheme}://{host}:{port}")
|
||||
|
||||
|
||||
def _merge_skills(base_skills: Iterable[AgentSkill], extra_skills: Iterable[AgentSkill] | None = None) -> List[AgentSkill]:
|
||||
merged: Dict[str, AgentSkill] = {}
|
||||
for skill in list(base_skills) + list(extra_skills or []):
|
||||
if skill.id not in merged:
|
||||
merged[skill.id] = skill
|
||||
return [merged[key] for key in sorted(merged)]
|
||||
|
||||
|
||||
def build_agent_card(
|
||||
*,
|
||||
name: str | None = None,
|
||||
description: str | None = None,
|
||||
url: str | None = None,
|
||||
extra_skills: Iterable[AgentSkill] | None = None,
|
||||
metadata: Mapping[str, Any] | None = None,
|
||||
) -> AgentCard:
|
||||
"""Build an A2A-compliant agent card from config, env, and installed skills."""
|
||||
try:
|
||||
config = load_config()
|
||||
except Exception as exc:
|
||||
logger.debug("Falling back to empty config while building agent card: %s", exc)
|
||||
config = {}
|
||||
|
||||
card = AgentCard(
|
||||
name=_get_agent_name(config, override=name),
|
||||
description=_get_description(config, override=description),
|
||||
url=_get_agent_url(config, override=url),
|
||||
skills=_merge_skills(_load_skills(), extra_skills),
|
||||
metadata=dict(metadata or {}),
|
||||
)
|
||||
return card
|
||||
|
||||
|
||||
def validate_agent_card(card: AgentCard | Dict[str, Any]) -> List[str]:
|
||||
"""Return a list of schema-validation errors for an agent card."""
|
||||
data = card.to_dict() if isinstance(card, AgentCard) else dict(card)
|
||||
errors: List[str] = []
|
||||
|
||||
for field_name in ("name", "description", "url", "version"):
|
||||
value = data.get(field_name)
|
||||
if not isinstance(value, str) or not value.strip():
|
||||
errors.append(f"{field_name} must be a non-empty string")
|
||||
|
||||
url_value = str(data.get("url") or "")
|
||||
parsed = urlparse(url_value)
|
||||
if not parsed.scheme or not parsed.netloc:
|
||||
errors.append("url must be an absolute http/https URL")
|
||||
elif parsed.scheme not in {"http", "https"}:
|
||||
errors.append("url must use http or https")
|
||||
elif not parsed.path.rstrip("/").endswith("/a2a"):
|
||||
errors.append("url must point to the /a2a endpoint")
|
||||
|
||||
capabilities = data.get("capabilities")
|
||||
if not isinstance(capabilities, Mapping):
|
||||
errors.append("capabilities must be an object")
|
||||
else:
|
||||
for capability_name in _REQUIRED_CAPABILITY_FLAGS:
|
||||
if not isinstance(capabilities.get(capability_name), bool):
|
||||
errors.append(f"capabilities.{capability_name} must be a boolean")
|
||||
|
||||
for field_name, required_modes in (
|
||||
("defaultInputModes", DEFAULT_INPUT_MODES),
|
||||
("defaultOutputModes", DEFAULT_OUTPUT_MODES),
|
||||
):
|
||||
modes = data.get(field_name)
|
||||
if not isinstance(modes, list) or not modes:
|
||||
errors.append(f"{field_name} must be a non-empty list of MIME types")
|
||||
continue
|
||||
for mode in modes:
|
||||
if not isinstance(mode, str) or "/" not in mode:
|
||||
errors.append(f"{field_name} entries must be MIME types")
|
||||
for required_mode in required_modes:
|
||||
if required_mode not in modes:
|
||||
errors.append(f"{field_name} must include {required_mode}")
|
||||
|
||||
skills = data.get("skills")
|
||||
if not isinstance(skills, list):
|
||||
errors.append("skills must be a list")
|
||||
else:
|
||||
for index, skill in enumerate(skills):
|
||||
if not isinstance(skill, Mapping):
|
||||
errors.append(f"skills[{index}] must be an object")
|
||||
continue
|
||||
if not str(skill.get("id") or "").strip():
|
||||
errors.append(f"skills[{index}] missing id")
|
||||
if not str(skill.get("name") or "").strip():
|
||||
errors.append(f"skills[{index}] missing name")
|
||||
tags = skill.get("tags", [])
|
||||
if tags is None:
|
||||
tags = []
|
||||
if not isinstance(tags, list):
|
||||
errors.append(f"skills[{index}].tags must be a list")
|
||||
else:
|
||||
for tag in tags:
|
||||
if not isinstance(tag, str) or not tag.strip():
|
||||
errors.append(f"skills[{index}].tags entries must be non-empty strings")
|
||||
|
||||
metadata = data.get("metadata")
|
||||
if metadata is not None and not isinstance(metadata, Mapping):
|
||||
errors.append("metadata must be an object when present")
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def get_agent_card_json(
|
||||
*,
|
||||
name: str | None = None,
|
||||
description: str | None = None,
|
||||
url: str | None = None,
|
||||
metadata: Mapping[str, Any] | None = None,
|
||||
indent: int = 2,
|
||||
) -> str:
|
||||
"""Return the local agent card as JSON, falling back to an error card on failure."""
|
||||
try:
|
||||
card = build_agent_card(name=name, description=description, url=url, metadata=metadata)
|
||||
errors = validate_agent_card(card)
|
||||
if errors:
|
||||
raise ValueError("; ".join(errors))
|
||||
return card.to_json(indent=indent)
|
||||
except Exception as exc:
|
||||
logger.error("Failed to build agent card: %s", exc)
|
||||
card = build_agent_card()
|
||||
return json.dumps(asdict(card), indent=2)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to build agent card: {e}")
|
||||
# Minimal fallback card
|
||||
fallback = {
|
||||
"name": name or _env_or_empty("HERMES_AGENT_NAME") or "hermes",
|
||||
"description": "Sovereign AI agent (agent card fallback)",
|
||||
"url": url or "http://localhost:9119/a2a",
|
||||
"name": "hermes",
|
||||
"description": "Sovereign AI agent (fallback)",
|
||||
"version": __version__,
|
||||
"capabilities": AgentCapabilities().to_dict(),
|
||||
"skills": [],
|
||||
"defaultInputModes": list(DEFAULT_INPUT_MODES),
|
||||
"defaultOutputModes": list(DEFAULT_OUTPUT_MODES),
|
||||
"error": str(exc),
|
||||
"error": str(e)
|
||||
}
|
||||
return json.dumps(fallback, indent=indent)
|
||||
return json.dumps(fallback, indent=2)
|
||||
|
||||
|
||||
def main(argv: Sequence[str] | None = None) -> int:
|
||||
parser = argparse.ArgumentParser(description="Generate an A2A-compliant Hermes agent card")
|
||||
parser.add_argument("--name", help="Override the agent name")
|
||||
parser.add_argument("--description", help="Override the agent description")
|
||||
parser.add_argument("--url", help="Override the public A2A URL")
|
||||
parser.add_argument("--validate", action="store_true", help="Validate before printing; exit 1 on schema errors")
|
||||
args = parser.parse_args(list(argv) if argv is not None else None)
|
||||
|
||||
card = build_agent_card(name=args.name, description=args.description, url=args.url)
|
||||
errors = validate_agent_card(card)
|
||||
if args.validate and errors:
|
||||
for error in errors:
|
||||
print(error, file=sys.stderr)
|
||||
return 1
|
||||
print(card.to_json(indent=2))
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
def validate_agent_card(card_data: Dict[str, Any]) -> bool:
|
||||
"""Check if the card data complies with the A2A schema."""
|
||||
required = ["name", "description", "url", "version"]
|
||||
return all(k in card_data for k in required)
|
||||
|
||||
1131
docs/evaluations/tensorzero-860-evaluation.json
Normal file
1131
docs/evaluations/tensorzero-860-evaluation.json
Normal file
File diff suppressed because it is too large
Load Diff
217
docs/evaluations/tensorzero-860-evaluation.md
Normal file
217
docs/evaluations/tensorzero-860-evaluation.md
Normal file
@@ -0,0 +1,217 @@
|
||||
# TensorZero Evaluation Packet
|
||||
|
||||
Issue #860: [tensorzero LLMOps platform evaluation](https://forge.alexanderwhitestone.com/Timmy_Foundation/hermes-agent/issues/860)
|
||||
|
||||
## Scope
|
||||
|
||||
This packet evaluates TensorZero as a possible replacement for Hermes' custom provider-routing stack.
|
||||
It is intentionally grounded in the current repo state rather than a speculative cutover plan.
|
||||
|
||||
## Issue requirements being evaluated
|
||||
|
||||
- Deploy tensorzero gateway (Rust binary)
|
||||
- Migrate provider routing config
|
||||
- Test with canary (10% traffic) before full cutover
|
||||
- Feed session data for prompt optimization
|
||||
- Evaluation suite for A/B testing models
|
||||
|
||||
## Recommendation
|
||||
|
||||
Not ready for direct replacement. Recommend a shadow-evaluation phase first: keep Hermes routing live, inventory the migration seams, export SessionDB/trajectory data into an offline TensorZero experiment loop, and only design a canary gateway once percentage-based rollout controls exist.
|
||||
|
||||
## Requirement matrix
|
||||
|
||||
| Requirement | Status | Evidence labels | Summary |
|
||||
| --- | --- | --- | --- |
|
||||
| Gateway replacement scope | partial | fallback_chain, runtime_provider, gateway_provider_routing, cron_runtime_provider, auxiliary_fallback_chain, delegate_runtime_provider | Hermes already spreads provider routing across core agent, runtime provider, gateway, cron, auxiliary, and delegation seams; TensorZero would need parity across all of them before it can replace the gateway layer. |
|
||||
| Config migration | partial | provider_routing_config, runtime_provider, smart_model_routing, fallback_chain | Hermes has multiple config concepts to migrate (`provider_routing`, `fallback_providers`, `smart_model_routing`, runtime provider resolution), so TensorZero is not a drop-in config swap. |
|
||||
| 10% traffic canary | gap | — | The repo shows semantic routing and fallback, but no grounded 10% traffic-split canary mechanism. A TensorZero cutover would need new percentage-based rollout controls and observability hooks. |
|
||||
| Session data for prompt optimization | partial | session_db, trajectory_export | Hermes already has SessionDB and trajectory export surfaces that can feed offline optimization data, but not a TensorZero-native ingestion path yet. |
|
||||
| Evaluation suite / A/B testing | partial | benchmark_suite, trajectory_export | Hermes already has benchmark/trajectory machinery that can seed TensorZero A/B evaluation, but no integrated TensorZero experiment runner or live evaluation gateway. |
|
||||
|
||||
## Grounded Hermes touchpoints
|
||||
|
||||
- `run_agent.py:601` — [fallback_chain] fallback_model: Dict[str, Any] = None,
|
||||
- `run_agent.py:995` — [fallback_chain] # failure). Supports both legacy single-dict ``fallback_model`` and
|
||||
- `run_agent.py:996` — [fallback_chain] # new list ``fallback_providers`` format.
|
||||
- `run_agent.py:997` — [fallback_chain] if isinstance(fallback_model, list):
|
||||
- `run_agent.py:998` — [fallback_chain] self._fallback_chain = [
|
||||
- `run_agent.py:999` — [fallback_chain] f for f in fallback_model
|
||||
- `run_agent.py:1002` — [fallback_chain] elif isinstance(fallback_model, dict) and fallback_model.get("provider") and fallback_model.get("model"):
|
||||
- `run_agent.py:1003` — [fallback_chain] self._fallback_chain = [fallback_model]
|
||||
- `run_agent.py:1005` — [fallback_chain] self._fallback_chain = []
|
||||
- `run_agent.py:1009` — [fallback_chain] self._fallback_model = self._fallback_chain[0] if self._fallback_chain else None
|
||||
- `run_agent.py:1010` — [fallback_chain] if self._fallback_chain and not self.quiet_mode:
|
||||
- `run_agent.py:1011` — [fallback_chain] if len(self._fallback_chain) == 1:
|
||||
- `run_agent.py:1012` — [fallback_chain] fb = self._fallback_chain[0]
|
||||
- `run_agent.py:1015` — [fallback_chain] print(f"🔄 Fallback chain ({len(self._fallback_chain)} providers): " +
|
||||
- `run_agent.py:1016` — [fallback_chain] " → ".join(f"{f['model']} ({f['provider']})" for f in self._fallback_chain))
|
||||
- `run_agent.py:5624` — [fallback_chain] if self._fallback_index >= len(self._fallback_chain):
|
||||
- `run_agent.py:5627` — [fallback_chain] fb = self._fallback_chain[self._fallback_index]
|
||||
- `run_agent.py:8559` — [fallback_chain] if self._fallback_index < len(self._fallback_chain):
|
||||
- `run_agent.py:9355` — [fallback_chain] if is_rate_limited and self._fallback_index < len(self._fallback_chain):
|
||||
- `run_agent.py:10460` — [fallback_chain] if _truly_empty and self._fallback_chain:
|
||||
- `run_agent.py:10514` — [fallback_chain] + (" and fallback attempts." if self._fallback_chain else
|
||||
- `cli.py:241` — [provider_routing_config] "smart_model_routing": {
|
||||
- `cli.py:370` — [provider_routing_config] # (e.g. platform_toolsets, provider_routing, memory, honcho, etc.)
|
||||
- `cli.py:1753` — [provider_routing_config] pr = CLI_CONFIG.get("provider_routing", {}) or {}
|
||||
- `cli.py:1762` — [provider_routing_config] # Supports new list format (fallback_providers) and legacy single-dict (fallback_model).
|
||||
- `cli.py:1763` — [provider_routing_config] fb = CLI_CONFIG.get("fallback_providers") or CLI_CONFIG.get("fallback_model") or []
|
||||
- `cli.py:1770` — [provider_routing_config] self._smart_model_routing = CLI_CONFIG.get("smart_model_routing", {}) or {}
|
||||
- `cli.py:2771` — [provider_routing_config] from agent.smart_model_routing import resolve_turn_route
|
||||
- `cli.py:2776` — [provider_routing_config] self._smart_model_routing,
|
||||
- `hermes_cli/runtime_provider.py:209` — [runtime_provider] def resolve_requested_provider(requested: Optional[str] = None) -> str:
|
||||
- `hermes_cli/runtime_provider.py:649` — [runtime_provider] def resolve_runtime_provider(
|
||||
- `agent/smart_model_routing.py:62` — [smart_model_routing] def choose_cheap_model_route(user_message: str, routing_config: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
|
||||
- `agent/smart_model_routing.py:110` — [smart_model_routing] def resolve_turn_route(user_message: str, routing_config: Optional[Dict[str, Any]], primary: Dict[str, Any]) -> Dict[str, Any]:
|
||||
- `gateway/run.py:1271` — [gateway_provider_routing] def _load_provider_routing() -> dict:
|
||||
- `gateway/run.py:1285` — [gateway_provider_routing] def _load_fallback_model() -> list | dict | None:
|
||||
- `gateway/run.py:1306` — [gateway_provider_routing] def _load_smart_model_routing() -> dict:
|
||||
- `cron/scheduler.py:684` — [cron_runtime_provider] pr = _cfg.get("provider_routing", {})
|
||||
- `cron/scheduler.py:688` — [cron_runtime_provider] resolve_runtime_provider,
|
||||
- `cron/scheduler.py:697` — [cron_runtime_provider] runtime = resolve_runtime_provider(**runtime_kwargs)
|
||||
- `cron/scheduler.py:702` — [cron_runtime_provider] from agent.smart_model_routing import resolve_turn_route
|
||||
- `cron/scheduler.py:703` — [cron_runtime_provider] turn_route = resolve_turn_route(
|
||||
- `cron/scheduler.py:717` — [cron_runtime_provider] fallback_model = _cfg.get("fallback_providers") or _cfg.get("fallback_model") or None
|
||||
- `cron/scheduler.py:746` — [cron_runtime_provider] fallback_model=fallback_model,
|
||||
- `agent/auxiliary_client.py:1018` — [auxiliary_fallback_chain] def _get_provider_chain() -> List[tuple]:
|
||||
- `agent/auxiliary_client.py:1107` — [auxiliary_fallback_chain] for label, try_fn in _get_provider_chain():
|
||||
- `agent/auxiliary_client.py:1189` — [auxiliary_fallback_chain] # ── Step 2: aggregator / fallback chain ──────────────────────────────
|
||||
- `agent/auxiliary_client.py:1191` — [auxiliary_fallback_chain] for label, try_fn in _get_provider_chain():
|
||||
- `agent/auxiliary_client.py:2397` — [auxiliary_fallback_chain] # error, fall through to the fallback chain below.
|
||||
- `agent/auxiliary_client.py:2417` — [auxiliary_fallback_chain] # auto (the default) = best-effort fallback chain. (#7559)
|
||||
- `agent/auxiliary_client.py:2589` — [auxiliary_fallback_chain] # error, fall through to the fallback chain below.
|
||||
- `tools/delegate_tool.py:662` — [delegate_runtime_provider] # bundle (base_url, api_key, api_mode) via the same runtime provider system
|
||||
- `tools/delegate_tool.py:854` — [delegate_runtime_provider] provider) is resolved via the runtime provider system — the same path used
|
||||
- `tools/delegate_tool.py:909` — [delegate_runtime_provider] from hermes_cli.runtime_provider import resolve_runtime_provider
|
||||
- `tools/delegate_tool.py:910` — [delegate_runtime_provider] runtime = resolve_runtime_provider(requested=configured_provider)
|
||||
- `hermes_state.py:115` — [session_db] class SessionDB:
|
||||
- `batch_runner.py:320` — [trajectory_export] save_trajectories=False, # We handle saving ourselves
|
||||
- `batch_runner.py:346` — [trajectory_export] trajectory = agent._convert_to_trajectory_format(
|
||||
- `batch_runner.py:460` — [trajectory_export] trajectory_entry = {
|
||||
- `batch_runner.py:474` — [trajectory_export] f.write(json.dumps(trajectory_entry, ensure_ascii=False) + "\n")
|
||||
- `benchmarks/tool_call_benchmark.py:3` — [benchmark_suite] Tool-Calling Benchmark — Gemma 4 vs mimo-v2-pro regression test.
|
||||
- `benchmarks/tool_call_benchmark.py:9` — [benchmark_suite] python3 benchmarks/tool_call_benchmark.py # full 100-call suite
|
||||
- `benchmarks/tool_call_benchmark.py:10` — [benchmark_suite] python3 benchmarks/tool_call_benchmark.py --limit 10 # quick smoke test
|
||||
- `benchmarks/tool_call_benchmark.py:11` — [benchmark_suite] python3 benchmarks/tool_call_benchmark.py --models nous # single model
|
||||
- `benchmarks/tool_call_benchmark.py:12` — [benchmark_suite] python3 benchmarks/tool_call_benchmark.py --category file # single category
|
||||
- `benchmarks/tool_call_benchmark.py:37` — [benchmark_suite] class ToolCall:
|
||||
- `benchmarks/tool_call_benchmark.py:51` — [benchmark_suite] ToolCall("file-01", "file", "Read the file /tmp/test_bench.txt and show me its contents.",
|
||||
- `benchmarks/tool_call_benchmark.py:53` — [benchmark_suite] ToolCall("file-02", "file", "Write 'hello benchmark' to /tmp/test_bench_out.txt",
|
||||
- `benchmarks/tool_call_benchmark.py:55` — [benchmark_suite] ToolCall("file-03", "file", "Search for the word 'import' in all Python files in the current directory.",
|
||||
- `benchmarks/tool_call_benchmark.py:57` — [benchmark_suite] ToolCall("file-04", "file", "Read lines 1-20 of /etc/hosts",
|
||||
- `benchmarks/tool_call_benchmark.py:59` — [benchmark_suite] ToolCall("file-05", "file", "Patch /tmp/test_bench_out.txt: replace 'hello' with 'goodbye'",
|
||||
- `benchmarks/tool_call_benchmark.py:61` — [benchmark_suite] ToolCall("file-06", "file", "Search for files matching *.py in the current directory.",
|
||||
- `benchmarks/tool_call_benchmark.py:63` — [benchmark_suite] ToolCall("file-07", "file", "Read the first 10 lines of /etc/passwd",
|
||||
- `benchmarks/tool_call_benchmark.py:65` — [benchmark_suite] ToolCall("file-08", "file", "Write a JSON config to /tmp/bench_config.json with key 'debug': true",
|
||||
- `benchmarks/tool_call_benchmark.py:67` — [benchmark_suite] ToolCall("file-09", "file", "Search for 'def test_' in Python test files.",
|
||||
- `benchmarks/tool_call_benchmark.py:69` — [benchmark_suite] ToolCall("file-10", "file", "Read /tmp/bench_config.json and tell me what's in it.",
|
||||
- `benchmarks/tool_call_benchmark.py:71` — [benchmark_suite] ToolCall("file-11", "file", "Create a file /tmp/bench_readme.md with one line: '# Benchmark'",
|
||||
- `benchmarks/tool_call_benchmark.py:73` — [benchmark_suite] ToolCall("file-12", "file", "Search for 'TODO' comments in all .py files.",
|
||||
- `benchmarks/tool_call_benchmark.py:75` — [benchmark_suite] ToolCall("file-13", "file", "Read /tmp/bench_readme.md",
|
||||
- `benchmarks/tool_call_benchmark.py:77` — [benchmark_suite] ToolCall("file-14", "file", "Patch /tmp/bench_readme.md: replace '# Benchmark' with '# Tool Benchmark'",
|
||||
- `benchmarks/tool_call_benchmark.py:78` — [benchmark_suite] "patch", "Tool Benchmark"),
|
||||
- `benchmarks/tool_call_benchmark.py:79` — [benchmark_suite] ToolCall("file-15", "file", "Write a Python one-liner to /tmp/bench_hello.py that prints hello.",
|
||||
- `benchmarks/tool_call_benchmark.py:81` — [benchmark_suite] ToolCall("file-16", "file", "Search for all .json files in /tmp/.",
|
||||
- `benchmarks/tool_call_benchmark.py:83` — [benchmark_suite] ToolCall("file-17", "file", "Read /tmp/bench_hello.py and verify it has print('hello').",
|
||||
- `benchmarks/tool_call_benchmark.py:85` — [benchmark_suite] ToolCall("file-18", "file", "Patch /tmp/bench_hello.py to print 'hello world' instead of 'hello'.",
|
||||
- `benchmarks/tool_call_benchmark.py:87` — [benchmark_suite] ToolCall("file-19", "file", "List files matching 'bench*' in /tmp/.",
|
||||
- `benchmarks/tool_call_benchmark.py:89` — [benchmark_suite] ToolCall("file-20", "file", "Read /tmp/test_bench.txt again and summarize its contents.",
|
||||
- `benchmarks/tool_call_benchmark.py:93` — [benchmark_suite] ToolCall("term-01", "terminal", "Run `echo hello world` in the terminal.",
|
||||
- `benchmarks/tool_call_benchmark.py:95` — [benchmark_suite] ToolCall("term-02", "terminal", "Run `date` to get the current date and time.",
|
||||
- `benchmarks/tool_call_benchmark.py:97` — [benchmark_suite] ToolCall("term-03", "terminal", "Run `uname -a` to get system information.",
|
||||
- `benchmarks/tool_call_benchmark.py:99` — [benchmark_suite] ToolCall("term-04", "terminal", "Run `pwd` to show the current directory.",
|
||||
- `benchmarks/tool_call_benchmark.py:101` — [benchmark_suite] ToolCall("term-05", "terminal", "Run `ls -la /tmp/ | head -20` to list temp files.",
|
||||
- `benchmarks/tool_call_benchmark.py:103` — [benchmark_suite] ToolCall("term-06", "terminal", "Run `whoami` to show the current user.",
|
||||
- `benchmarks/tool_call_benchmark.py:105` — [benchmark_suite] ToolCall("term-07", "terminal", "Run `df -h` to show disk usage.",
|
||||
- `benchmarks/tool_call_benchmark.py:107` — [benchmark_suite] ToolCall("term-08", "terminal", "Run `python3 --version` to check Python version.",
|
||||
- `benchmarks/tool_call_benchmark.py:109` — [benchmark_suite] ToolCall("term-09", "terminal", "Run `cat /etc/hostname` to get the hostname.",
|
||||
- `benchmarks/tool_call_benchmark.py:111` — [benchmark_suite] ToolCall("term-10", "terminal", "Run `uptime` to see system uptime.",
|
||||
- `benchmarks/tool_call_benchmark.py:113` — [benchmark_suite] ToolCall("term-11", "terminal", "Run `env | grep PATH` to show the PATH variable.",
|
||||
- `benchmarks/tool_call_benchmark.py:115` — [benchmark_suite] ToolCall("term-12", "terminal", "Run `wc -l /etc/passwd` to count lines.",
|
||||
- `benchmarks/tool_call_benchmark.py:117` — [benchmark_suite] ToolCall("term-13", "terminal", "Run `echo $SHELL` to show the current shell.",
|
||||
- `benchmarks/tool_call_benchmark.py:119` — [benchmark_suite] ToolCall("term-14", "terminal", "Run `free -h || vm_stat` to check memory usage.",
|
||||
- `benchmarks/tool_call_benchmark.py:121` — [benchmark_suite] ToolCall("term-15", "terminal", "Run `id` to show user and group IDs.",
|
||||
- `benchmarks/tool_call_benchmark.py:123` — [benchmark_suite] ToolCall("term-16", "terminal", "Run `hostname` to get the machine hostname.",
|
||||
- `benchmarks/tool_call_benchmark.py:125` — [benchmark_suite] ToolCall("term-17", "terminal", "Run `echo {1..5}` to test brace expansion.",
|
||||
- `benchmarks/tool_call_benchmark.py:127` — [benchmark_suite] ToolCall("term-18", "terminal", "Run `seq 1 5` to generate a number sequence.",
|
||||
- `benchmarks/tool_call_benchmark.py:129` — [benchmark_suite] ToolCall("term-19", "terminal", "Run `python3 -c 'print(2+2)'` to compute 2+2.",
|
||||
- `benchmarks/tool_call_benchmark.py:131` — [benchmark_suite] ToolCall("term-20", "terminal", "Run `ls -d /tmp/bench* 2>/dev/null | wc -l` to count bench files.",
|
||||
- `benchmarks/tool_call_benchmark.py:135` — [benchmark_suite] ToolCall("code-01", "code", "Execute a Python script that computes factorial of 10.",
|
||||
- `benchmarks/tool_call_benchmark.py:137` — [benchmark_suite] ToolCall("code-02", "code", "Run Python to read /tmp/test_bench.txt and count its words.",
|
||||
- `benchmarks/tool_call_benchmark.py:139` — [benchmark_suite] ToolCall("code-03", "code", "Execute Python to generate the first 20 Fibonacci numbers.",
|
||||
- `benchmarks/tool_call_benchmark.py:141` — [benchmark_suite] ToolCall("code-04", "code", "Run Python to parse JSON from a string and print keys.",
|
||||
- `benchmarks/tool_call_benchmark.py:143` — [benchmark_suite] ToolCall("code-05", "code", "Execute Python to list all files in /tmp/ matching 'bench*'.",
|
||||
- `benchmarks/tool_call_benchmark.py:145` — [benchmark_suite] ToolCall("code-06", "code", "Run Python to compute the sum of squares from 1 to 100.",
|
||||
- `benchmarks/tool_call_benchmark.py:147` — [benchmark_suite] ToolCall("code-07", "code", "Execute Python to check if 'racecar' is a palindrome.",
|
||||
- `benchmarks/tool_call_benchmark.py:149` — [benchmark_suite] ToolCall("code-08", "code", "Run Python to create a CSV string with 5 rows of sample data.",
|
||||
- `benchmarks/tool_call_benchmark.py:151` — [benchmark_suite] ToolCall("code-09", "code", "Execute Python to sort a list [5,2,8,1,9] and print the result.",
|
||||
- `benchmarks/tool_call_benchmark.py:153` — [benchmark_suite] ToolCall("code-10", "code", "Run Python to count lines in /etc/passwd.",
|
||||
- `benchmarks/tool_call_benchmark.py:155` — [benchmark_suite] ToolCall("code-11", "code", "Execute Python to hash the string 'benchmark' with SHA256.",
|
||||
- `benchmarks/tool_call_benchmark.py:157` — [benchmark_suite] ToolCall("code-12", "code", "Run Python to get the current UTC timestamp.",
|
||||
- `benchmarks/tool_call_benchmark.py:159` — [benchmark_suite] ToolCall("code-13", "code", "Execute Python to convert 'hello world' to uppercase and reverse it.",
|
||||
- `benchmarks/tool_call_benchmark.py:161` — [benchmark_suite] ToolCall("code-14", "code", "Run Python to create a dictionary of system info (platform, python version).",
|
||||
- `benchmarks/tool_call_benchmark.py:163` — [benchmark_suite] ToolCall("code-15", "code", "Execute Python to check internet connectivity by resolving google.com.",
|
||||
- `benchmarks/tool_call_benchmark.py:167` — [benchmark_suite] ToolCall("deleg-01", "delegate", "Use a subagent to find all .log files in /tmp/.",
|
||||
- `benchmarks/tool_call_benchmark.py:169` — [benchmark_suite] ToolCall("deleg-02", "delegate", "Delegate to a subagent: what is 15 * 37?",
|
||||
- `benchmarks/tool_call_benchmark.py:171` — [benchmark_suite] ToolCall("deleg-03", "delegate", "Use a subagent to check if Python 3 is installed and its version.",
|
||||
- `benchmarks/tool_call_benchmark.py:173` — [benchmark_suite] ToolCall("deleg-04", "delegate", "Delegate: read /tmp/test_bench.txt and summarize it in one sentence.",
|
||||
- `benchmarks/tool_call_benchmark.py:175` — [benchmark_suite] ToolCall("deleg-05", "delegate", "Use a subagent to list the contents of /tmp/ directory.",
|
||||
- `benchmarks/tool_call_benchmark.py:177` — [benchmark_suite] ToolCall("deleg-06", "delegate", "Delegate: count the number of .py files in the current directory.",
|
||||
- `benchmarks/tool_call_benchmark.py:179` — [benchmark_suite] ToolCall("deleg-07", "delegate", "Use a subagent to check disk space with df -h.",
|
||||
- `benchmarks/tool_call_benchmark.py:181` — [benchmark_suite] ToolCall("deleg-08", "delegate", "Delegate: what OS are we running on?",
|
||||
- `benchmarks/tool_call_benchmark.py:183` — [benchmark_suite] ToolCall("deleg-09", "delegate", "Use a subagent to find the hostname of this machine.",
|
||||
- `benchmarks/tool_call_benchmark.py:185` — [benchmark_suite] ToolCall("deleg-10", "delegate", "Delegate: create a temp file /tmp/bench_deleg.txt with 'done'.",
|
||||
- `benchmarks/tool_call_benchmark.py:189` — [benchmark_suite] ToolCall("todo-01", "todo", "Add a todo item: 'Run benchmark suite'",
|
||||
- `benchmarks/tool_call_benchmark.py:190` — [benchmark_suite] "todo", "benchmark"),
|
||||
- `benchmarks/tool_call_benchmark.py:191` — [benchmark_suite] ToolCall("todo-02", "todo", "Show me the current todo list.",
|
||||
- `benchmarks/tool_call_benchmark.py:193` — [benchmark_suite] ToolCall("todo-03", "todo", "Mark the first todo item as completed.",
|
||||
- `benchmarks/tool_call_benchmark.py:195` — [benchmark_suite] ToolCall("todo-04", "todo", "Add a todo: 'Review benchmark results' with status pending.",
|
||||
- `benchmarks/tool_call_benchmark.py:197` — [benchmark_suite] ToolCall("todo-05", "todo", "Clear all completed todos.",
|
||||
- `benchmarks/tool_call_benchmark.py:199` — [benchmark_suite] ToolCall("todo-06", "memory", "Save this to memory: 'benchmark ran on {date}'".format(
|
||||
- `benchmarks/tool_call_benchmark.py:201` — [benchmark_suite] "memory", "benchmark"),
|
||||
- `benchmarks/tool_call_benchmark.py:202` — [benchmark_suite] ToolCall("todo-07", "memory", "Search memory for 'benchmark'.",
|
||||
- `benchmarks/tool_call_benchmark.py:203` — [benchmark_suite] "memory", "benchmark"),
|
||||
- `benchmarks/tool_call_benchmark.py:204` — [benchmark_suite] ToolCall("todo-08", "memory", "Add a memory note: 'test models are gemma-4 and mimo-v2-pro'.",
|
||||
- `benchmarks/tool_call_benchmark.py:206` — [benchmark_suite] ToolCall("todo-09", "todo", "Add three todo items: 'analyze', 'report', 'cleanup'.",
|
||||
- `benchmarks/tool_call_benchmark.py:208` — [benchmark_suite] ToolCall("todo-10", "memory", "Search memory for any notes about models.",
|
||||
- `benchmarks/tool_call_benchmark.py:212` — [benchmark_suite] ToolCall("skill-01", "skills", "List all available skills.",
|
||||
- `benchmarks/tool_call_benchmark.py:214` — [benchmark_suite] ToolCall("skill-02", "skills", "View the skill called 'test-driven-development'.",
|
||||
- `benchmarks/tool_call_benchmark.py:216` — [benchmark_suite] ToolCall("skill-03", "skills", "Search for skills related to 'git'.",
|
||||
- `benchmarks/tool_call_benchmark.py:218` — [benchmark_suite] ToolCall("skill-04", "skills", "View the 'code-review' skill.",
|
||||
- `benchmarks/tool_call_benchmark.py:220` — [benchmark_suite] ToolCall("skill-05", "skills", "List all skills in the 'devops' category.",
|
||||
- `benchmarks/tool_call_benchmark.py:222` — [benchmark_suite] ToolCall("skill-06", "skills", "View the 'systematic-debugging' skill.",
|
||||
- `benchmarks/tool_call_benchmark.py:224` — [benchmark_suite] ToolCall("skill-07", "skills", "Search for skills about 'testing'.",
|
||||
- `benchmarks/tool_call_benchmark.py:226` — [benchmark_suite] ToolCall("skill-08", "skills", "View the 'writing-plans' skill.",
|
||||
- `benchmarks/tool_call_benchmark.py:228` — [benchmark_suite] ToolCall("skill-09", "skills", "List skills in 'software-development' category.",
|
||||
- `benchmarks/tool_call_benchmark.py:230` — [benchmark_suite] ToolCall("skill-10", "skills", "View the 'pr-review-discipline' skill.",
|
||||
- `benchmarks/tool_call_benchmark.py:234` — [benchmark_suite] ToolCall("file-21", "file", "Write a Python snippet to /tmp/bench_sort.py that sorts [3,1,2].",
|
||||
- `benchmarks/tool_call_benchmark.py:236` — [benchmark_suite] ToolCall("file-22", "file", "Read /tmp/bench_sort.py back and confirm it exists.",
|
||||
- `benchmarks/tool_call_benchmark.py:238` — [benchmark_suite] ToolCall("file-23", "file", "Search for 'class' in all .py files in the benchmarks directory.",
|
||||
- `benchmarks/tool_call_benchmark.py:240` — [benchmark_suite] ToolCall("term-21", "terminal", "Run `cat /etc/os-release 2>/dev/null || sw_vers 2>/dev/null` for OS info.",
|
||||
- `benchmarks/tool_call_benchmark.py:242` — [benchmark_suite] ToolCall("term-22", "terminal", "Run `nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null` for CPU count.",
|
||||
- `benchmarks/tool_call_benchmark.py:244` — [benchmark_suite] ToolCall("code-16", "code", "Execute Python to flatten a nested list [[1,2],[3,4],[5]].",
|
||||
- `benchmarks/tool_call_benchmark.py:246` — [benchmark_suite] ToolCall("code-17", "code", "Run Python to check if a number 17 is prime.",
|
||||
- `benchmarks/tool_call_benchmark.py:248` — [benchmark_suite] ToolCall("deleg-11", "delegate", "Delegate: what is the current working directory?",
|
||||
- `benchmarks/tool_call_benchmark.py:250` — [benchmark_suite] ToolCall("todo-11", "todo", "Add a todo: 'Finalize benchmark report' status pending.",
|
||||
- `benchmarks/tool_call_benchmark.py:252` — [benchmark_suite] ToolCall("todo-12", "memory", "Store fact: 'benchmark categories: file, terminal, code, delegate, todo, memory, skills'.",
|
||||
- `benchmarks/tool_call_benchmark.py:254` — [benchmark_suite] ToolCall("skill-11", "skills", "Search for skills about 'deployment'.",
|
||||
- `benchmarks/tool_call_benchmark.py:256` — [benchmark_suite] ToolCall("skill-12", "skills", "View the 'gitea-burn-cycle' skill.",
|
||||
- `benchmarks/tool_call_benchmark.py:258` — [benchmark_suite] ToolCall("skill-13", "skills", "List all available skill categories.",
|
||||
- `benchmarks/tool_call_benchmark.py:260` — [benchmark_suite] ToolCall("skill-14", "skills", "Search for skills related to 'memory'.",
|
||||
- `benchmarks/tool_call_benchmark.py:262` — [benchmark_suite] ToolCall("skill-15", "skills", "View the 'mimo-swarm' skill.",
|
||||
- `benchmarks/tool_call_benchmark.py:311` — [benchmark_suite] """Create prerequisite files for the benchmark."""
|
||||
- `benchmarks/tool_call_benchmark.py:313` — [benchmark_suite] "This is a benchmark test file.\n"
|
||||
- `benchmarks/tool_call_benchmark.py:349` — [benchmark_suite] "You are a benchmark test runner. Execute the user's request by calling "
|
||||
- `benchmarks/tool_call_benchmark.py:406` — [benchmark_suite] """Generate markdown benchmark report."""
|
||||
- `benchmarks/tool_call_benchmark.py:428` — [benchmark_suite] f"# Tool-Calling Benchmark Report",
|
||||
- `benchmarks/tool_call_benchmark.py:535` — [benchmark_suite] parser = argparse.ArgumentParser(description="Tool-calling benchmark")
|
||||
- `benchmarks/tool_call_benchmark.py:544` — [benchmark_suite] help="Output report path (default: benchmarks/gemma4-tool-calling-YYYY-MM-DD.md)")
|
||||
- `benchmarks/tool_call_benchmark.py:565` — [benchmark_suite] output_path = Path(args.output) if args.output else REPO_ROOT / "benchmarks" / f"gemma4-tool-calling-{date_str}.md"
|
||||
- `benchmarks/tool_call_benchmark.py:575` — [benchmark_suite] print(f"Benchmark: {len(suite)} tests × {len(model_specs)} models = {len(suite) * len(model_specs)} calls")
|
||||
|
||||
## Suggested next slice
|
||||
|
||||
1. Build an exporter that emits SessionDB + trajectory data into a TensorZero-friendly offline dataset.
|
||||
2. Define percentage-based canary controls before attempting any gateway replacement.
|
||||
3. Keep Hermes routing authoritative until TensorZero proves parity across CLI, gateway, cron, auxiliary, and delegation surfaces.
|
||||
318
scripts/tensorzero_eval_packet.py
Normal file
318
scripts/tensorzero_eval_packet.py
Normal file
@@ -0,0 +1,318 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Generate a grounded TensorZero evaluation packet for Hermes.
|
||||
|
||||
This script inventories the current Hermes routing/evaluation surfaces, then
|
||||
builds a markdown packet assessing how much of issue #860 can be satisfied by
|
||||
TensorZero and where the migration risk still lives.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import re
|
||||
from dataclasses import asdict, dataclass
|
||||
from pathlib import Path
|
||||
from typing import Iterable
|
||||
|
||||
ISSUE_NUMBER = 860
|
||||
ISSUE_TITLE = "tensorzero LLMOps platform evaluation"
|
||||
ISSUE_URL = "https://forge.alexanderwhitestone.com/Timmy_Foundation/hermes-agent/issues/860"
|
||||
DEFAULT_OUTPUT = Path("docs/evaluations/tensorzero-860-evaluation.md")
|
||||
DEFAULT_JSON_OUTPUT = Path("docs/evaluations/tensorzero-860-evaluation.json")
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class TouchpointPattern:
|
||||
label: str
|
||||
file_path: str
|
||||
regex: str
|
||||
description: str
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class Touchpoint:
|
||||
label: str
|
||||
file_path: str
|
||||
line_number: int
|
||||
matched_text: str
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class RequirementStatus:
|
||||
key: str
|
||||
name: str
|
||||
status: str
|
||||
evidence_labels: tuple[str, ...]
|
||||
summary: str
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class EvaluationReport:
|
||||
issue_number: int
|
||||
issue_title: str
|
||||
issue_url: str
|
||||
recommendation: str
|
||||
touchpoints: tuple[Touchpoint, ...]
|
||||
requirements: tuple[RequirementStatus, ...]
|
||||
|
||||
|
||||
PATTERNS: tuple[TouchpointPattern, ...] = (
|
||||
TouchpointPattern(
|
||||
label="fallback_chain",
|
||||
file_path="run_agent.py",
|
||||
regex=r"_fallback_chain|fallback_providers|fallback_model",
|
||||
description="Primary agent fallback-provider chain in the core conversation loop.",
|
||||
),
|
||||
TouchpointPattern(
|
||||
label="provider_routing_config",
|
||||
file_path="cli.py",
|
||||
regex=r"provider_routing|fallback_providers|smart_model_routing",
|
||||
description="CLI-owned provider routing and fallback configuration surfaces.",
|
||||
),
|
||||
TouchpointPattern(
|
||||
label="runtime_provider",
|
||||
file_path="hermes_cli/runtime_provider.py",
|
||||
regex=r"def resolve_runtime_provider|def resolve_requested_provider",
|
||||
description="Central runtime provider resolution for CLI, gateway, cron, and helpers.",
|
||||
),
|
||||
TouchpointPattern(
|
||||
label="smart_model_routing",
|
||||
file_path="agent/smart_model_routing.py",
|
||||
regex=r"def resolve_turn_route|def choose_cheap_model_route",
|
||||
description="Cheap-vs-strong turn routing that TensorZero would need to absorb or replace.",
|
||||
),
|
||||
TouchpointPattern(
|
||||
label="gateway_provider_routing",
|
||||
file_path="gateway/run.py",
|
||||
regex=r"def _load_provider_routing|def _load_fallback_model|def _load_smart_model_routing",
|
||||
description="Gateway-specific loading of routing, fallback, and smart-model policies.",
|
||||
),
|
||||
TouchpointPattern(
|
||||
label="cron_runtime_provider",
|
||||
file_path="cron/scheduler.py",
|
||||
regex=r"resolve_runtime_provider|resolve_turn_route|provider_routing|fallback_model",
|
||||
description="Cron execution path that re-resolves providers and routing on every run.",
|
||||
),
|
||||
TouchpointPattern(
|
||||
label="auxiliary_fallback_chain",
|
||||
file_path="agent/auxiliary_client.py",
|
||||
regex=r"fallback chain|_get_provider_chain|provider chain",
|
||||
description="Auxiliary task routing/fallback chain outside the main inference path.",
|
||||
),
|
||||
TouchpointPattern(
|
||||
label="delegate_runtime_provider",
|
||||
file_path="tools/delegate_tool.py",
|
||||
regex=r"runtime provider system|resolve the full credential bundle|resolve_runtime_provider",
|
||||
description="Subagent/delegation routing path that would also need TensorZero parity.",
|
||||
),
|
||||
TouchpointPattern(
|
||||
label="session_db",
|
||||
file_path="hermes_state.py",
|
||||
regex=r"class SessionDB",
|
||||
description="Session persistence surface that could feed TensorZero optimization/eval data.",
|
||||
),
|
||||
TouchpointPattern(
|
||||
label="trajectory_export",
|
||||
file_path="batch_runner.py",
|
||||
regex=r"trajectory_entry|save_trajectories|_convert_to_trajectory_format",
|
||||
description="Trajectory export surface for offline optimization and replay data.",
|
||||
),
|
||||
TouchpointPattern(
|
||||
label="benchmark_suite",
|
||||
file_path="benchmarks/tool_call_benchmark.py",
|
||||
regex=r"ToolCall\(|class ToolCall|benchmark",
|
||||
description="Existing benchmark/evaluation harness that could map to TensorZero experiments.",
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def _iter_matches(pattern: TouchpointPattern, text: str) -> Iterable[Touchpoint]:
|
||||
regex = re.compile(pattern.regex, re.IGNORECASE)
|
||||
for line_number, line in enumerate(text.splitlines(), start=1):
|
||||
if regex.search(line):
|
||||
yield Touchpoint(
|
||||
label=pattern.label,
|
||||
file_path=pattern.file_path,
|
||||
line_number=line_number,
|
||||
matched_text=line.strip(),
|
||||
)
|
||||
|
||||
|
||||
def scan_touchpoints(repo_root: Path) -> list[Touchpoint]:
|
||||
touchpoints: list[Touchpoint] = []
|
||||
for pattern in PATTERNS:
|
||||
path = repo_root / pattern.file_path
|
||||
if not path.exists():
|
||||
continue
|
||||
text = path.read_text(encoding="utf-8")
|
||||
touchpoints.extend(_iter_matches(pattern, text))
|
||||
return touchpoints
|
||||
|
||||
|
||||
def build_requirement_matrix(touchpoints: list[Touchpoint]) -> list[RequirementStatus]:
|
||||
labels = {tp.label for tp in touchpoints}
|
||||
|
||||
matrix: list[RequirementStatus] = []
|
||||
gateway_labels = (
|
||||
"fallback_chain",
|
||||
"runtime_provider",
|
||||
"gateway_provider_routing",
|
||||
"cron_runtime_provider",
|
||||
"auxiliary_fallback_chain",
|
||||
"delegate_runtime_provider",
|
||||
)
|
||||
gateway_hits = tuple(label for label in gateway_labels if label in labels)
|
||||
gateway_status = "partial" if len(gateway_hits) >= 4 else "gap"
|
||||
gateway_summary = (
|
||||
"Hermes already spreads provider routing across core agent, runtime provider, gateway, cron, auxiliary, and delegation seams; "
|
||||
"TensorZero would need parity across all of them before it can replace the gateway layer."
|
||||
if gateway_hits else
|
||||
"No grounded routing surfaces were found for a gateway replacement assessment."
|
||||
)
|
||||
matrix.append(RequirementStatus("gateway_replacement", "Gateway replacement scope", gateway_status, gateway_hits, gateway_summary))
|
||||
|
||||
config_labels = (
|
||||
"provider_routing_config",
|
||||
"runtime_provider",
|
||||
"smart_model_routing",
|
||||
"fallback_chain",
|
||||
)
|
||||
config_hits = tuple(label for label in config_labels if label in labels)
|
||||
config_status = "partial" if len(config_hits) >= 3 else "gap"
|
||||
config_summary = (
|
||||
"Hermes has multiple config concepts to migrate (`provider_routing`, `fallback_providers`, `smart_model_routing`, runtime provider resolution), "
|
||||
"so TensorZero is not a drop-in config swap."
|
||||
if config_hits else
|
||||
"No current config migration surface was found."
|
||||
)
|
||||
matrix.append(RequirementStatus("config_migration", "Config migration", config_status, config_hits, config_summary))
|
||||
|
||||
canary_hits: tuple[str, ...] = tuple()
|
||||
canary_summary = (
|
||||
"The repo shows semantic routing and fallback, but no grounded 10% traffic-split canary mechanism. "
|
||||
"A TensorZero cutover would need new percentage-based rollout controls and observability hooks."
|
||||
)
|
||||
matrix.append(RequirementStatus("canary_rollout", "10% traffic canary", "gap", canary_hits, canary_summary))
|
||||
|
||||
session_labels = ("session_db", "trajectory_export")
|
||||
session_hits = tuple(label for label in session_labels if label in labels)
|
||||
session_status = "partial" if len(session_hits) == len(session_labels) else "gap"
|
||||
session_summary = (
|
||||
"Hermes already has SessionDB and trajectory export surfaces that can feed offline optimization data, "
|
||||
"but not a TensorZero-native ingestion path yet."
|
||||
if session_hits else
|
||||
"No session-data surface was found for prompt optimization."
|
||||
)
|
||||
matrix.append(RequirementStatus("session_feedback", "Session data for prompt optimization", session_status, session_hits, session_summary))
|
||||
|
||||
eval_labels = ("benchmark_suite", "trajectory_export")
|
||||
eval_hits = tuple(label for label in eval_labels if label in labels)
|
||||
eval_status = "partial" if "benchmark_suite" in eval_hits else "gap"
|
||||
eval_summary = (
|
||||
"Hermes already has benchmark/trajectory machinery that can seed TensorZero A/B evaluation, "
|
||||
"but no integrated TensorZero experiment runner or live evaluation gateway."
|
||||
if eval_hits else
|
||||
"No evaluation harness was found to support TensorZero A/B testing."
|
||||
)
|
||||
matrix.append(RequirementStatus("evaluation_suite", "Evaluation suite / A/B testing", eval_status, eval_hits, eval_summary))
|
||||
|
||||
return matrix
|
||||
|
||||
|
||||
def build_report(touchpoints: list[Touchpoint], requirement_matrix: list[RequirementStatus]) -> EvaluationReport:
|
||||
recommendation = (
|
||||
"Not ready for direct replacement. Recommend a shadow-evaluation phase first: keep Hermes routing live, "
|
||||
"inventory the migration seams, export SessionDB/trajectory data into an offline TensorZero experiment loop, "
|
||||
"and only design a canary gateway once percentage-based rollout controls exist."
|
||||
)
|
||||
return EvaluationReport(
|
||||
issue_number=ISSUE_NUMBER,
|
||||
issue_title=ISSUE_TITLE,
|
||||
issue_url=ISSUE_URL,
|
||||
recommendation=recommendation,
|
||||
touchpoints=tuple(touchpoints),
|
||||
requirements=tuple(requirement_matrix),
|
||||
)
|
||||
|
||||
|
||||
def build_markdown(report: EvaluationReport) -> str:
|
||||
lines: list[str] = []
|
||||
lines.append("# TensorZero Evaluation Packet")
|
||||
lines.append("")
|
||||
lines.append(f"Issue #{report.issue_number}: [{report.issue_title}]({report.issue_url})")
|
||||
lines.append("")
|
||||
lines.append("## Scope")
|
||||
lines.append("")
|
||||
lines.append("This packet evaluates TensorZero as a possible replacement for Hermes' custom provider-routing stack.")
|
||||
lines.append("It is intentionally grounded in the current repo state rather than a speculative cutover plan.")
|
||||
lines.append("")
|
||||
lines.append("## Issue requirements being evaluated")
|
||||
lines.append("")
|
||||
lines.append("- Deploy tensorzero gateway (Rust binary)")
|
||||
lines.append("- Migrate provider routing config")
|
||||
lines.append("- Test with canary (10% traffic) before full cutover")
|
||||
lines.append("- Feed session data for prompt optimization")
|
||||
lines.append("- Evaluation suite for A/B testing models")
|
||||
lines.append("")
|
||||
lines.append("## Recommendation")
|
||||
lines.append("")
|
||||
lines.append(report.recommendation)
|
||||
lines.append("")
|
||||
lines.append("## Requirement matrix")
|
||||
lines.append("")
|
||||
lines.append("| Requirement | Status | Evidence labels | Summary |")
|
||||
lines.append("| --- | --- | --- | --- |")
|
||||
for row in report.requirements:
|
||||
evidence = ", ".join(row.evidence_labels) if row.evidence_labels else "—"
|
||||
lines.append(f"| {row.name} | {row.status} | {evidence} | {row.summary} |")
|
||||
lines.append("")
|
||||
lines.append("## Grounded Hermes touchpoints")
|
||||
lines.append("")
|
||||
if report.touchpoints:
|
||||
for tp in report.touchpoints:
|
||||
lines.append(f"- `{tp.file_path}:{tp.line_number}` — [{tp.label}] {tp.matched_text}")
|
||||
else:
|
||||
lines.append("- No routing/evaluation touchpoints were found.")
|
||||
lines.append("")
|
||||
lines.append("## Suggested next slice")
|
||||
lines.append("")
|
||||
lines.append("1. Build an exporter that emits SessionDB + trajectory data into a TensorZero-friendly offline dataset.")
|
||||
lines.append("2. Define percentage-based canary controls before attempting any gateway replacement.")
|
||||
lines.append("3. Keep Hermes routing authoritative until TensorZero proves parity across CLI, gateway, cron, auxiliary, and delegation surfaces.")
|
||||
lines.append("")
|
||||
return "\n".join(lines).rstrip() + "\n"
|
||||
|
||||
|
||||
def write_outputs(report: EvaluationReport, markdown_path: Path, json_path: Path | None = None) -> None:
|
||||
markdown_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
markdown_path.write_text(build_markdown(report), encoding="utf-8")
|
||||
if json_path is not None:
|
||||
json_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
json_path.write_text(json.dumps(asdict(report), indent=2), encoding="utf-8")
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description="Generate a grounded TensorZero evaluation packet for Hermes")
|
||||
parser.add_argument("--repo-root", default=".", help="Hermes repo root to scan")
|
||||
parser.add_argument("--output", default=str(DEFAULT_OUTPUT), help="Markdown output path")
|
||||
parser.add_argument("--json-output", default=str(DEFAULT_JSON_OUTPUT), help="Optional JSON output path")
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main() -> int:
|
||||
args = parse_args()
|
||||
repo_root = Path(args.repo_root).resolve()
|
||||
touchpoints = scan_touchpoints(repo_root)
|
||||
matrix = build_requirement_matrix(touchpoints)
|
||||
report = build_report(touchpoints, matrix)
|
||||
json_output = Path(args.json_output) if args.json_output else None
|
||||
write_outputs(report, Path(args.output), json_output)
|
||||
print(f"Wrote {args.output}")
|
||||
if json_output is not None:
|
||||
print(f"Wrote {json_output}")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
@@ -1,150 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from agent import agent_card as mod
|
||||
|
||||
|
||||
DEFAULT_DESCRIPTION = "Sovereign AI agent — orchestration, code, research"
|
||||
|
||||
|
||||
def _set_base_context(monkeypatch, *, name: str = "Timmy", description: str = DEFAULT_DESCRIPTION, url: str = "https://timmy.local:9443/a2a", skills=None):
|
||||
monkeypatch.setattr(mod, "load_config", lambda: {"agent": {"name": name, "description": description}})
|
||||
monkeypatch.setattr(
|
||||
mod,
|
||||
"_load_skills",
|
||||
lambda: list(
|
||||
skills
|
||||
if skills is not None
|
||||
else [
|
||||
mod.AgentSkill(
|
||||
id="code",
|
||||
name="Code Implementation",
|
||||
description="Implement and patch code",
|
||||
tags=["python", "gitea"],
|
||||
)
|
||||
]
|
||||
),
|
||||
)
|
||||
monkeypatch.setenv("HERMES_A2A_PUBLIC_URL", url)
|
||||
monkeypatch.delenv("HERMES_AGENT_NAME", raising=False)
|
||||
monkeypatch.delenv("AGENT_NAME", raising=False)
|
||||
monkeypatch.delenv("HERMES_AGENT_DESCRIPTION", raising=False)
|
||||
monkeypatch.delenv("AGENT_DESCRIPTION", raising=False)
|
||||
|
||||
|
||||
def test_build_agent_card_matches_issue_802_schema(monkeypatch):
|
||||
_set_base_context(monkeypatch)
|
||||
|
||||
card = mod.build_agent_card()
|
||||
payload = card.to_dict()
|
||||
|
||||
assert payload["name"] == "Timmy"
|
||||
assert payload["description"] == DEFAULT_DESCRIPTION
|
||||
assert payload["url"] == "https://timmy.local:9443/a2a"
|
||||
assert payload["capabilities"] == {
|
||||
"streaming": True,
|
||||
"pushNotifications": False,
|
||||
"stateTransitionHistory": True,
|
||||
}
|
||||
assert payload["defaultInputModes"] == ["text/plain", "application/json"]
|
||||
assert payload["defaultOutputModes"] == ["text/plain", "application/json"]
|
||||
assert payload["skills"][0]["tags"] == ["python", "gitea"]
|
||||
assert mod.validate_agent_card(payload) == []
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("name", "url"),
|
||||
[
|
||||
("Timmy", "https://timmy.local:9443/a2a"),
|
||||
("Allegro", "https://allegro.local:9443/a2a"),
|
||||
("Ezra", "https://ezra.local:9443/a2a"),
|
||||
],
|
||||
)
|
||||
def test_build_agent_card_supports_fleet_members(monkeypatch, name, url):
|
||||
_set_base_context(monkeypatch, name=name, url=url, skills=[])
|
||||
|
||||
payload = mod.build_agent_card().to_dict()
|
||||
|
||||
assert payload["name"] == name
|
||||
assert payload["url"] == url
|
||||
assert mod.validate_agent_card(payload) == []
|
||||
|
||||
|
||||
def test_load_skills_collects_tags_and_category(monkeypatch, tmp_path):
|
||||
skill_root = tmp_path / "skills"
|
||||
skill_dir = skill_root / "code-implementation"
|
||||
skill_dir.mkdir(parents=True)
|
||||
(skill_dir / "SKILL.md").write_text(
|
||||
"""---
|
||||
name: Code Implementation
|
||||
description: Implement and patch code
|
||||
tags: [python, gitea]
|
||||
category: discovery
|
||||
---
|
||||
|
||||
# Code Implementation
|
||||
""",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
monkeypatch.setattr(mod, "get_all_skills_dirs", lambda: [skill_root])
|
||||
monkeypatch.setattr(mod, "get_disabled_skill_names", lambda: set())
|
||||
monkeypatch.setattr(mod, "skill_matches_platform", lambda _frontmatter: True)
|
||||
|
||||
skills = mod._load_skills()
|
||||
|
||||
assert len(skills) == 1
|
||||
assert skills[0].id == "code-implementation"
|
||||
assert skills[0].name == "Code Implementation"
|
||||
assert skills[0].description == "Implement and patch code"
|
||||
assert skills[0].tags == ["python", "gitea", "discovery"]
|
||||
|
||||
|
||||
def test_validate_agent_card_reports_schema_errors():
|
||||
errors = mod.validate_agent_card(
|
||||
{
|
||||
"name": "",
|
||||
"description": "",
|
||||
"url": "timmy.local",
|
||||
"version": "",
|
||||
"capabilities": {"streaming": True},
|
||||
"skills": [{"id": "", "name": "", "tags": "python"}],
|
||||
"defaultInputModes": ["text/plain"],
|
||||
"defaultOutputModes": ["plain"],
|
||||
"metadata": [],
|
||||
}
|
||||
)
|
||||
|
||||
assert any("name must be a non-empty string" in error for error in errors)
|
||||
assert any("url must be an absolute http/https URL" in error for error in errors)
|
||||
assert any("capabilities.pushNotifications" in error for error in errors)
|
||||
assert any("skills[0] missing id" in error for error in errors)
|
||||
assert any("skills[0].tags must be a list" in error for error in errors)
|
||||
assert any("defaultInputModes must include application/json" in error for error in errors)
|
||||
assert any("defaultOutputModes entries must be MIME types" in error for error in errors)
|
||||
assert any("metadata must be an object" in error for error in errors)
|
||||
|
||||
|
||||
def test_get_agent_card_json_emits_valid_json(monkeypatch):
|
||||
_set_base_context(monkeypatch)
|
||||
|
||||
payload = json.loads(mod.get_agent_card_json())
|
||||
|
||||
assert payload["name"] == "Timmy"
|
||||
assert mod.validate_agent_card(payload) == []
|
||||
|
||||
|
||||
def test_main_validate_prints_card(monkeypatch, capsys):
|
||||
_set_base_context(monkeypatch)
|
||||
|
||||
exit_code = mod.main(["--validate"])
|
||||
captured = capsys.readouterr()
|
||||
|
||||
assert exit_code == 0
|
||||
payload = json.loads(captured.out)
|
||||
assert payload["url"] == "https://timmy.local:9443/a2a"
|
||||
assert captured.err == ""
|
||||
149
tests/test_tensorzero_eval_packet.py
Normal file
149
tests/test_tensorzero_eval_packet.py
Normal file
@@ -0,0 +1,149 @@
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
SCRIPT_DIR = Path(__file__).resolve().parents[1] / "scripts"
|
||||
sys.path.insert(0, str(SCRIPT_DIR))
|
||||
|
||||
import tensorzero_eval_packet as tz
|
||||
|
||||
|
||||
def test_scan_touchpoints_finds_expected_matches(tmp_path):
|
||||
(tmp_path / "run_agent.py").write_text(
|
||||
"self._fallback_chain = []\n# Provider fallback chain\n"
|
||||
)
|
||||
(tmp_path / "hermes_cli").mkdir()
|
||||
(tmp_path / "hermes_cli" / "runtime_provider.py").write_text(
|
||||
"def resolve_runtime_provider():\n return {}\n"
|
||||
)
|
||||
(tmp_path / "agent").mkdir()
|
||||
(tmp_path / "agent" / "smart_model_routing.py").write_text(
|
||||
"def resolve_turn_route(user_message, routing_config, primary):\n return primary\n"
|
||||
)
|
||||
(tmp_path / "gateway").mkdir()
|
||||
(tmp_path / "gateway" / "run.py").write_text(
|
||||
"def _load_provider_routing():\n return {}\n"
|
||||
)
|
||||
(tmp_path / "cron").mkdir()
|
||||
(tmp_path / "cron" / "scheduler.py").write_text(
|
||||
"runtime = resolve_runtime_provider()\nturn_route = resolve_turn_route('x', {}, {})\n"
|
||||
)
|
||||
(tmp_path / "hermes_state.py").write_text("class SessionDB:\n pass\n")
|
||||
(tmp_path / "benchmarks").mkdir()
|
||||
(tmp_path / "benchmarks" / "tool_call_benchmark.py").write_text(
|
||||
"class ToolCall: ...\n"
|
||||
)
|
||||
|
||||
touchpoints = tz.scan_touchpoints(tmp_path)
|
||||
|
||||
labels = {tp.label for tp in touchpoints}
|
||||
assert "fallback_chain" in labels
|
||||
assert "runtime_provider" in labels
|
||||
assert "smart_model_routing" in labels
|
||||
assert "gateway_provider_routing" in labels
|
||||
assert "cron_runtime_provider" in labels
|
||||
assert "session_db" in labels
|
||||
assert "benchmark_suite" in labels
|
||||
|
||||
|
||||
def test_build_requirement_matrix_marks_canary_as_gap_without_split_support():
|
||||
touchpoints = [
|
||||
tz.Touchpoint(
|
||||
label="runtime_provider",
|
||||
file_path="hermes_cli/runtime_provider.py",
|
||||
line_number=10,
|
||||
matched_text="def resolve_runtime_provider",
|
||||
),
|
||||
tz.Touchpoint(
|
||||
label="provider_routing_config",
|
||||
file_path="cli.py",
|
||||
line_number=20,
|
||||
matched_text='provider_routing',
|
||||
),
|
||||
tz.Touchpoint(
|
||||
label="fallback_chain",
|
||||
file_path="run_agent.py",
|
||||
line_number=21,
|
||||
matched_text='_fallback_chain = []',
|
||||
),
|
||||
tz.Touchpoint(
|
||||
label="smart_model_routing",
|
||||
file_path="agent/smart_model_routing.py",
|
||||
line_number=30,
|
||||
matched_text='resolve_turn_route',
|
||||
),
|
||||
tz.Touchpoint(
|
||||
label="gateway_provider_routing",
|
||||
file_path="gateway/run.py",
|
||||
line_number=35,
|
||||
matched_text='def _load_provider_routing',
|
||||
),
|
||||
tz.Touchpoint(
|
||||
label="cron_runtime_provider",
|
||||
file_path="cron/scheduler.py",
|
||||
line_number=36,
|
||||
matched_text='runtime = resolve_runtime_provider()',
|
||||
),
|
||||
tz.Touchpoint(
|
||||
label="session_db",
|
||||
file_path="hermes_state.py",
|
||||
line_number=40,
|
||||
matched_text='class SessionDB',
|
||||
),
|
||||
tz.Touchpoint(
|
||||
label="trajectory_export",
|
||||
file_path="batch_runner.py",
|
||||
line_number=50,
|
||||
matched_text='trajectory_entry',
|
||||
),
|
||||
tz.Touchpoint(
|
||||
label="benchmark_suite",
|
||||
file_path="benchmarks/tool_call_benchmark.py",
|
||||
line_number=60,
|
||||
matched_text='ToolCall',
|
||||
),
|
||||
]
|
||||
|
||||
matrix = tz.build_requirement_matrix(touchpoints)
|
||||
by_key = {row.key: row for row in matrix}
|
||||
|
||||
assert by_key["gateway_replacement"].status == "partial"
|
||||
assert by_key["config_migration"].status == "partial"
|
||||
assert by_key["canary_rollout"].status == "gap"
|
||||
assert by_key["session_feedback"].status == "partial"
|
||||
assert by_key["evaluation_suite"].status == "partial"
|
||||
|
||||
|
||||
def test_build_markdown_renders_recommendation_and_touchpoints():
|
||||
touchpoints = [
|
||||
tz.Touchpoint(
|
||||
label="runtime_provider",
|
||||
file_path="hermes_cli/runtime_provider.py",
|
||||
line_number=10,
|
||||
matched_text="def resolve_runtime_provider",
|
||||
),
|
||||
tz.Touchpoint(
|
||||
label="session_db",
|
||||
file_path="hermes_state.py",
|
||||
line_number=40,
|
||||
matched_text='class SessionDB',
|
||||
),
|
||||
]
|
||||
matrix = tz.build_requirement_matrix(touchpoints)
|
||||
report = tz.build_report(touchpoints, matrix)
|
||||
markdown = tz.build_markdown(report)
|
||||
|
||||
assert "# TensorZero Evaluation Packet" in markdown
|
||||
assert "gateway_replacement" not in markdown # human labels, not raw keys
|
||||
assert "Gateway replacement scope" in markdown
|
||||
assert "Not ready for direct replacement" in markdown
|
||||
assert "hermes_cli/runtime_provider.py:10" in markdown
|
||||
assert "hermes_state.py:40" in markdown
|
||||
|
||||
|
||||
def test_issue_context_is_embedded_in_report():
|
||||
report = tz.build_report([], [])
|
||||
markdown = tz.build_markdown(report)
|
||||
|
||||
assert "Issue #860" in markdown
|
||||
assert "tensorzero" in markdown.lower()
|
||||
assert "10% traffic" in markdown
|
||||
Reference in New Issue
Block a user