Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
755e7513a1 |
@@ -1,326 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Optional
|
||||
|
||||
import httpx
|
||||
|
||||
from agent.anthropic_adapter import _is_oauth_token, resolve_anthropic_token
|
||||
from hermes_cli.auth import _read_codex_tokens, resolve_codex_runtime_credentials
|
||||
from hermes_cli.runtime_provider import resolve_runtime_provider
|
||||
|
||||
|
||||
def _utc_now() -> datetime:
|
||||
return datetime.now(timezone.utc)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class AccountUsageWindow:
|
||||
label: str
|
||||
used_percent: Optional[float] = None
|
||||
reset_at: Optional[datetime] = None
|
||||
detail: Optional[str] = None
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class AccountUsageSnapshot:
|
||||
provider: str
|
||||
source: str
|
||||
fetched_at: datetime
|
||||
title: str = "Account limits"
|
||||
plan: Optional[str] = None
|
||||
windows: tuple[AccountUsageWindow, ...] = ()
|
||||
details: tuple[str, ...] = ()
|
||||
unavailable_reason: Optional[str] = None
|
||||
|
||||
@property
|
||||
def available(self) -> bool:
|
||||
return bool(self.windows or self.details) and not self.unavailable_reason
|
||||
|
||||
|
||||
def _title_case_slug(value: Optional[str]) -> Optional[str]:
|
||||
cleaned = str(value or "").strip()
|
||||
if not cleaned:
|
||||
return None
|
||||
return cleaned.replace("_", " ").replace("-", " ").title()
|
||||
|
||||
|
||||
def _parse_dt(value: Any) -> Optional[datetime]:
|
||||
if value in (None, ""):
|
||||
return None
|
||||
if isinstance(value, (int, float)):
|
||||
return datetime.fromtimestamp(float(value), tz=timezone.utc)
|
||||
if isinstance(value, str):
|
||||
text = value.strip()
|
||||
if not text:
|
||||
return None
|
||||
if text.endswith("Z"):
|
||||
text = text[:-1] + "+00:00"
|
||||
try:
|
||||
dt = datetime.fromisoformat(text)
|
||||
return dt if dt.tzinfo else dt.replace(tzinfo=timezone.utc)
|
||||
except ValueError:
|
||||
return None
|
||||
return None
|
||||
|
||||
|
||||
def _format_reset(dt: Optional[datetime]) -> str:
|
||||
if not dt:
|
||||
return "unknown"
|
||||
local_dt = dt.astimezone()
|
||||
delta = dt - _utc_now()
|
||||
total_seconds = int(delta.total_seconds())
|
||||
if total_seconds <= 0:
|
||||
return f"now ({local_dt.strftime('%Y-%m-%d %H:%M %Z')})"
|
||||
hours, rem = divmod(total_seconds, 3600)
|
||||
minutes = rem // 60
|
||||
if hours >= 24:
|
||||
days, hours = divmod(hours, 24)
|
||||
rel = f"in {days}d {hours}h"
|
||||
elif hours > 0:
|
||||
rel = f"in {hours}h {minutes}m"
|
||||
else:
|
||||
rel = f"in {minutes}m"
|
||||
return f"{rel} ({local_dt.strftime('%Y-%m-%d %H:%M %Z')})"
|
||||
|
||||
|
||||
def render_account_usage_lines(snapshot: Optional[AccountUsageSnapshot], *, markdown: bool = False) -> list[str]:
|
||||
if not snapshot:
|
||||
return []
|
||||
header = f"📈 {'**' if markdown else ''}{snapshot.title}{'**' if markdown else ''}"
|
||||
lines = [header]
|
||||
if snapshot.plan:
|
||||
lines.append(f"Provider: {snapshot.provider} ({snapshot.plan})")
|
||||
else:
|
||||
lines.append(f"Provider: {snapshot.provider}")
|
||||
for window in snapshot.windows:
|
||||
if window.used_percent is None:
|
||||
base = f"{window.label}: unavailable"
|
||||
else:
|
||||
remaining = max(0, round(100 - float(window.used_percent)))
|
||||
used = max(0, round(float(window.used_percent)))
|
||||
base = f"{window.label}: {remaining}% remaining ({used}% used)"
|
||||
if window.reset_at:
|
||||
base += f" • resets {_format_reset(window.reset_at)}"
|
||||
elif window.detail:
|
||||
base += f" • {window.detail}"
|
||||
lines.append(base)
|
||||
for detail in snapshot.details:
|
||||
lines.append(detail)
|
||||
if snapshot.unavailable_reason:
|
||||
lines.append(f"Unavailable: {snapshot.unavailable_reason}")
|
||||
return lines
|
||||
|
||||
|
||||
def _resolve_codex_usage_url(base_url: str) -> str:
|
||||
normalized = (base_url or "").strip().rstrip("/")
|
||||
if not normalized:
|
||||
normalized = "https://chatgpt.com/backend-api/codex"
|
||||
if normalized.endswith("/codex"):
|
||||
normalized = normalized[: -len("/codex")]
|
||||
if "/backend-api" in normalized:
|
||||
return normalized + "/wham/usage"
|
||||
return normalized + "/api/codex/usage"
|
||||
|
||||
|
||||
def _fetch_codex_account_usage() -> Optional[AccountUsageSnapshot]:
|
||||
creds = resolve_codex_runtime_credentials(refresh_if_expiring=True)
|
||||
token_data = _read_codex_tokens()
|
||||
tokens = token_data.get("tokens") or {}
|
||||
account_id = str(tokens.get("account_id", "") or "").strip() or None
|
||||
headers = {
|
||||
"Authorization": f"Bearer {creds['api_key']}",
|
||||
"Accept": "application/json",
|
||||
"User-Agent": "codex-cli",
|
||||
}
|
||||
if account_id:
|
||||
headers["ChatGPT-Account-Id"] = account_id
|
||||
with httpx.Client(timeout=15.0) as client:
|
||||
response = client.get(_resolve_codex_usage_url(creds.get("base_url", "")), headers=headers)
|
||||
response.raise_for_status()
|
||||
payload = response.json() or {}
|
||||
rate_limit = payload.get("rate_limit") or {}
|
||||
windows: list[AccountUsageWindow] = []
|
||||
for key, label in (("primary_window", "Session"), ("secondary_window", "Weekly")):
|
||||
window = rate_limit.get(key) or {}
|
||||
used = window.get("used_percent")
|
||||
if used is None:
|
||||
continue
|
||||
windows.append(
|
||||
AccountUsageWindow(
|
||||
label=label,
|
||||
used_percent=float(used),
|
||||
reset_at=_parse_dt(window.get("reset_at")),
|
||||
)
|
||||
)
|
||||
details: list[str] = []
|
||||
credits = payload.get("credits") or {}
|
||||
if credits.get("has_credits"):
|
||||
balance = credits.get("balance")
|
||||
if isinstance(balance, (int, float)):
|
||||
details.append(f"Credits balance: ${float(balance):.2f}")
|
||||
elif credits.get("unlimited"):
|
||||
details.append("Credits balance: unlimited")
|
||||
return AccountUsageSnapshot(
|
||||
provider="openai-codex",
|
||||
source="usage_api",
|
||||
fetched_at=_utc_now(),
|
||||
plan=_title_case_slug(payload.get("plan_type")),
|
||||
windows=tuple(windows),
|
||||
details=tuple(details),
|
||||
)
|
||||
|
||||
|
||||
def _fetch_anthropic_account_usage() -> Optional[AccountUsageSnapshot]:
|
||||
token = (resolve_anthropic_token() or "").strip()
|
||||
if not token:
|
||||
return None
|
||||
if not _is_oauth_token(token):
|
||||
return AccountUsageSnapshot(
|
||||
provider="anthropic",
|
||||
source="oauth_usage_api",
|
||||
fetched_at=_utc_now(),
|
||||
unavailable_reason="Anthropic account limits are only available for OAuth-backed Claude accounts.",
|
||||
)
|
||||
headers = {
|
||||
"Authorization": f"Bearer {token}",
|
||||
"Accept": "application/json",
|
||||
"Content-Type": "application/json",
|
||||
"anthropic-beta": "oauth-2025-04-20",
|
||||
"User-Agent": "claude-code/2.1.0",
|
||||
}
|
||||
with httpx.Client(timeout=15.0) as client:
|
||||
response = client.get("https://api.anthropic.com/api/oauth/usage", headers=headers)
|
||||
response.raise_for_status()
|
||||
payload = response.json() or {}
|
||||
windows: list[AccountUsageWindow] = []
|
||||
mapping = (
|
||||
("five_hour", "Current session"),
|
||||
("seven_day", "Current week"),
|
||||
("seven_day_opus", "Opus week"),
|
||||
("seven_day_sonnet", "Sonnet week"),
|
||||
)
|
||||
for key, label in mapping:
|
||||
window = payload.get(key) or {}
|
||||
util = window.get("utilization")
|
||||
if util is None:
|
||||
continue
|
||||
used = float(util) * 100 if float(util) <= 1 else float(util)
|
||||
windows.append(
|
||||
AccountUsageWindow(
|
||||
label=label,
|
||||
used_percent=used,
|
||||
reset_at=_parse_dt(window.get("resets_at")),
|
||||
)
|
||||
)
|
||||
details: list[str] = []
|
||||
extra = payload.get("extra_usage") or {}
|
||||
if extra.get("is_enabled"):
|
||||
used_credits = extra.get("used_credits")
|
||||
monthly_limit = extra.get("monthly_limit")
|
||||
currency = extra.get("currency") or "USD"
|
||||
if isinstance(used_credits, (int, float)) and isinstance(monthly_limit, (int, float)):
|
||||
details.append(
|
||||
f"Extra usage: {used_credits:.2f} / {monthly_limit:.2f} {currency}"
|
||||
)
|
||||
return AccountUsageSnapshot(
|
||||
provider="anthropic",
|
||||
source="oauth_usage_api",
|
||||
fetched_at=_utc_now(),
|
||||
windows=tuple(windows),
|
||||
details=tuple(details),
|
||||
)
|
||||
|
||||
|
||||
def _fetch_openrouter_account_usage(base_url: Optional[str], api_key: Optional[str]) -> Optional[AccountUsageSnapshot]:
|
||||
runtime = resolve_runtime_provider(
|
||||
requested="openrouter",
|
||||
explicit_base_url=base_url,
|
||||
explicit_api_key=api_key,
|
||||
)
|
||||
token = str(runtime.get("api_key", "") or "").strip()
|
||||
if not token:
|
||||
return None
|
||||
normalized = str(runtime.get("base_url", "") or "").rstrip("/")
|
||||
credits_url = f"{normalized}/credits"
|
||||
key_url = f"{normalized}/key"
|
||||
headers = {
|
||||
"Authorization": f"Bearer {token}",
|
||||
"Accept": "application/json",
|
||||
}
|
||||
with httpx.Client(timeout=10.0) as client:
|
||||
credits_resp = client.get(credits_url, headers=headers)
|
||||
credits_resp.raise_for_status()
|
||||
credits = (credits_resp.json() or {}).get("data") or {}
|
||||
try:
|
||||
key_resp = client.get(key_url, headers=headers)
|
||||
key_resp.raise_for_status()
|
||||
key_data = (key_resp.json() or {}).get("data") or {}
|
||||
except Exception:
|
||||
key_data = {}
|
||||
total_credits = float(credits.get("total_credits") or 0.0)
|
||||
total_usage = float(credits.get("total_usage") or 0.0)
|
||||
details = [f"Credits balance: ${max(0.0, total_credits - total_usage):.2f}"]
|
||||
windows: list[AccountUsageWindow] = []
|
||||
limit = key_data.get("limit")
|
||||
limit_remaining = key_data.get("limit_remaining")
|
||||
limit_reset = str(key_data.get("limit_reset") or "").strip()
|
||||
usage = key_data.get("usage")
|
||||
if (
|
||||
isinstance(limit, (int, float))
|
||||
and float(limit) > 0
|
||||
and isinstance(limit_remaining, (int, float))
|
||||
and 0 <= float(limit_remaining) <= float(limit)
|
||||
):
|
||||
limit_value = float(limit)
|
||||
remaining_value = float(limit_remaining)
|
||||
used_percent = ((limit_value - remaining_value) / limit_value) * 100
|
||||
detail_parts = [f"${remaining_value:.2f} of ${limit_value:.2f} remaining"]
|
||||
if limit_reset:
|
||||
detail_parts.append(f"resets {limit_reset}")
|
||||
windows.append(
|
||||
AccountUsageWindow(
|
||||
label="API key quota",
|
||||
used_percent=used_percent,
|
||||
detail=" • ".join(detail_parts),
|
||||
)
|
||||
)
|
||||
if isinstance(usage, (int, float)):
|
||||
usage_parts = [f"API key usage: ${float(usage):.2f} total"]
|
||||
for value, label in (
|
||||
(key_data.get("usage_daily"), "today"),
|
||||
(key_data.get("usage_weekly"), "this week"),
|
||||
(key_data.get("usage_monthly"), "this month"),
|
||||
):
|
||||
if isinstance(value, (int, float)) and float(value) > 0:
|
||||
usage_parts.append(f"${float(value):.2f} {label}")
|
||||
details.append(" • ".join(usage_parts))
|
||||
return AccountUsageSnapshot(
|
||||
provider="openrouter",
|
||||
source="credits_api",
|
||||
fetched_at=_utc_now(),
|
||||
windows=tuple(windows),
|
||||
details=tuple(details),
|
||||
)
|
||||
|
||||
|
||||
def fetch_account_usage(
|
||||
provider: Optional[str],
|
||||
*,
|
||||
base_url: Optional[str] = None,
|
||||
api_key: Optional[str] = None,
|
||||
) -> Optional[AccountUsageSnapshot]:
|
||||
normalized = str(provider or "").strip().lower()
|
||||
if normalized in {"", "auto", "custom"}:
|
||||
return None
|
||||
try:
|
||||
if normalized == "openai-codex":
|
||||
return _fetch_codex_account_usage()
|
||||
if normalized == "anthropic":
|
||||
return _fetch_anthropic_account_usage()
|
||||
if normalized == "openrouter":
|
||||
return _fetch_openrouter_account_usage(base_url, api_key)
|
||||
except Exception:
|
||||
return None
|
||||
return None
|
||||
25
cli.py
25
cli.py
@@ -13,7 +13,6 @@ Usage:
|
||||
python cli.py --list-tools # List available tools and exit
|
||||
"""
|
||||
|
||||
import concurrent.futures
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
@@ -64,7 +63,6 @@ from agent.usage_pricing import (
|
||||
format_duration_compact,
|
||||
format_token_count_compact,
|
||||
)
|
||||
from agent.account_usage import fetch_account_usage, render_account_usage_lines
|
||||
from hermes_cli.banner import _format_context_length, format_banner_version_label
|
||||
|
||||
_COMMAND_SPINNER_FRAMES = ("⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏")
|
||||
@@ -6473,29 +6471,6 @@ class HermesCLI:
|
||||
if cost_result.status == "unknown":
|
||||
print(f" Note: Pricing unknown for {agent.model}")
|
||||
|
||||
# Account limits -- fetched off-thread with a hard timeout so slow
|
||||
# provider APIs don't hang the prompt.
|
||||
provider = getattr(agent, "provider", None) or getattr(self, "provider", None)
|
||||
base_url = getattr(agent, "base_url", None) or getattr(self, "base_url", None)
|
||||
api_key = getattr(agent, "api_key", None) or getattr(self, "api_key", None)
|
||||
account_snapshot = None
|
||||
if provider:
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as _pool:
|
||||
try:
|
||||
account_snapshot = _pool.submit(
|
||||
fetch_account_usage,
|
||||
provider,
|
||||
base_url=base_url,
|
||||
api_key=api_key,
|
||||
).result(timeout=10.0)
|
||||
except (concurrent.futures.TimeoutError, Exception):
|
||||
account_snapshot = None
|
||||
account_lines = [f" {line}" for line in render_account_usage_lines(account_snapshot)]
|
||||
if account_lines:
|
||||
print()
|
||||
for line in account_lines:
|
||||
print(line)
|
||||
|
||||
if self.verbose:
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
for noisy in ('openai', 'openai._base_client', 'httpx', 'httpcore', 'asyncio', 'hpack', 'grpc', 'modal'):
|
||||
|
||||
1131
docs/evaluations/tensorzero-860-evaluation.json
Normal file
1131
docs/evaluations/tensorzero-860-evaluation.json
Normal file
File diff suppressed because it is too large
Load Diff
217
docs/evaluations/tensorzero-860-evaluation.md
Normal file
217
docs/evaluations/tensorzero-860-evaluation.md
Normal file
@@ -0,0 +1,217 @@
|
||||
# TensorZero Evaluation Packet
|
||||
|
||||
Issue #860: [tensorzero LLMOps platform evaluation](https://forge.alexanderwhitestone.com/Timmy_Foundation/hermes-agent/issues/860)
|
||||
|
||||
## Scope
|
||||
|
||||
This packet evaluates TensorZero as a possible replacement for Hermes' custom provider-routing stack.
|
||||
It is intentionally grounded in the current repo state rather than a speculative cutover plan.
|
||||
|
||||
## Issue requirements being evaluated
|
||||
|
||||
- Deploy tensorzero gateway (Rust binary)
|
||||
- Migrate provider routing config
|
||||
- Test with canary (10% traffic) before full cutover
|
||||
- Feed session data for prompt optimization
|
||||
- Evaluation suite for A/B testing models
|
||||
|
||||
## Recommendation
|
||||
|
||||
Not ready for direct replacement. Recommend a shadow-evaluation phase first: keep Hermes routing live, inventory the migration seams, export SessionDB/trajectory data into an offline TensorZero experiment loop, and only design a canary gateway once percentage-based rollout controls exist.
|
||||
|
||||
## Requirement matrix
|
||||
|
||||
| Requirement | Status | Evidence labels | Summary |
|
||||
| --- | --- | --- | --- |
|
||||
| Gateway replacement scope | partial | fallback_chain, runtime_provider, gateway_provider_routing, cron_runtime_provider, auxiliary_fallback_chain, delegate_runtime_provider | Hermes already spreads provider routing across core agent, runtime provider, gateway, cron, auxiliary, and delegation seams; TensorZero would need parity across all of them before it can replace the gateway layer. |
|
||||
| Config migration | partial | provider_routing_config, runtime_provider, smart_model_routing, fallback_chain | Hermes has multiple config concepts to migrate (`provider_routing`, `fallback_providers`, `smart_model_routing`, runtime provider resolution), so TensorZero is not a drop-in config swap. |
|
||||
| 10% traffic canary | gap | — | The repo shows semantic routing and fallback, but no grounded 10% traffic-split canary mechanism. A TensorZero cutover would need new percentage-based rollout controls and observability hooks. |
|
||||
| Session data for prompt optimization | partial | session_db, trajectory_export | Hermes already has SessionDB and trajectory export surfaces that can feed offline optimization data, but not a TensorZero-native ingestion path yet. |
|
||||
| Evaluation suite / A/B testing | partial | benchmark_suite, trajectory_export | Hermes already has benchmark/trajectory machinery that can seed TensorZero A/B evaluation, but no integrated TensorZero experiment runner or live evaluation gateway. |
|
||||
|
||||
## Grounded Hermes touchpoints
|
||||
|
||||
- `run_agent.py:601` — [fallback_chain] fallback_model: Dict[str, Any] = None,
|
||||
- `run_agent.py:995` — [fallback_chain] # failure). Supports both legacy single-dict ``fallback_model`` and
|
||||
- `run_agent.py:996` — [fallback_chain] # new list ``fallback_providers`` format.
|
||||
- `run_agent.py:997` — [fallback_chain] if isinstance(fallback_model, list):
|
||||
- `run_agent.py:998` — [fallback_chain] self._fallback_chain = [
|
||||
- `run_agent.py:999` — [fallback_chain] f for f in fallback_model
|
||||
- `run_agent.py:1002` — [fallback_chain] elif isinstance(fallback_model, dict) and fallback_model.get("provider") and fallback_model.get("model"):
|
||||
- `run_agent.py:1003` — [fallback_chain] self._fallback_chain = [fallback_model]
|
||||
- `run_agent.py:1005` — [fallback_chain] self._fallback_chain = []
|
||||
- `run_agent.py:1009` — [fallback_chain] self._fallback_model = self._fallback_chain[0] if self._fallback_chain else None
|
||||
- `run_agent.py:1010` — [fallback_chain] if self._fallback_chain and not self.quiet_mode:
|
||||
- `run_agent.py:1011` — [fallback_chain] if len(self._fallback_chain) == 1:
|
||||
- `run_agent.py:1012` — [fallback_chain] fb = self._fallback_chain[0]
|
||||
- `run_agent.py:1015` — [fallback_chain] print(f"🔄 Fallback chain ({len(self._fallback_chain)} providers): " +
|
||||
- `run_agent.py:1016` — [fallback_chain] " → ".join(f"{f['model']} ({f['provider']})" for f in self._fallback_chain))
|
||||
- `run_agent.py:5624` — [fallback_chain] if self._fallback_index >= len(self._fallback_chain):
|
||||
- `run_agent.py:5627` — [fallback_chain] fb = self._fallback_chain[self._fallback_index]
|
||||
- `run_agent.py:8559` — [fallback_chain] if self._fallback_index < len(self._fallback_chain):
|
||||
- `run_agent.py:9355` — [fallback_chain] if is_rate_limited and self._fallback_index < len(self._fallback_chain):
|
||||
- `run_agent.py:10460` — [fallback_chain] if _truly_empty and self._fallback_chain:
|
||||
- `run_agent.py:10514` — [fallback_chain] + (" and fallback attempts." if self._fallback_chain else
|
||||
- `cli.py:241` — [provider_routing_config] "smart_model_routing": {
|
||||
- `cli.py:370` — [provider_routing_config] # (e.g. platform_toolsets, provider_routing, memory, honcho, etc.)
|
||||
- `cli.py:1753` — [provider_routing_config] pr = CLI_CONFIG.get("provider_routing", {}) or {}
|
||||
- `cli.py:1762` — [provider_routing_config] # Supports new list format (fallback_providers) and legacy single-dict (fallback_model).
|
||||
- `cli.py:1763` — [provider_routing_config] fb = CLI_CONFIG.get("fallback_providers") or CLI_CONFIG.get("fallback_model") or []
|
||||
- `cli.py:1770` — [provider_routing_config] self._smart_model_routing = CLI_CONFIG.get("smart_model_routing", {}) or {}
|
||||
- `cli.py:2771` — [provider_routing_config] from agent.smart_model_routing import resolve_turn_route
|
||||
- `cli.py:2776` — [provider_routing_config] self._smart_model_routing,
|
||||
- `hermes_cli/runtime_provider.py:209` — [runtime_provider] def resolve_requested_provider(requested: Optional[str] = None) -> str:
|
||||
- `hermes_cli/runtime_provider.py:649` — [runtime_provider] def resolve_runtime_provider(
|
||||
- `agent/smart_model_routing.py:62` — [smart_model_routing] def choose_cheap_model_route(user_message: str, routing_config: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
|
||||
- `agent/smart_model_routing.py:110` — [smart_model_routing] def resolve_turn_route(user_message: str, routing_config: Optional[Dict[str, Any]], primary: Dict[str, Any]) -> Dict[str, Any]:
|
||||
- `gateway/run.py:1271` — [gateway_provider_routing] def _load_provider_routing() -> dict:
|
||||
- `gateway/run.py:1285` — [gateway_provider_routing] def _load_fallback_model() -> list | dict | None:
|
||||
- `gateway/run.py:1306` — [gateway_provider_routing] def _load_smart_model_routing() -> dict:
|
||||
- `cron/scheduler.py:684` — [cron_runtime_provider] pr = _cfg.get("provider_routing", {})
|
||||
- `cron/scheduler.py:688` — [cron_runtime_provider] resolve_runtime_provider,
|
||||
- `cron/scheduler.py:697` — [cron_runtime_provider] runtime = resolve_runtime_provider(**runtime_kwargs)
|
||||
- `cron/scheduler.py:702` — [cron_runtime_provider] from agent.smart_model_routing import resolve_turn_route
|
||||
- `cron/scheduler.py:703` — [cron_runtime_provider] turn_route = resolve_turn_route(
|
||||
- `cron/scheduler.py:717` — [cron_runtime_provider] fallback_model = _cfg.get("fallback_providers") or _cfg.get("fallback_model") or None
|
||||
- `cron/scheduler.py:746` — [cron_runtime_provider] fallback_model=fallback_model,
|
||||
- `agent/auxiliary_client.py:1018` — [auxiliary_fallback_chain] def _get_provider_chain() -> List[tuple]:
|
||||
- `agent/auxiliary_client.py:1107` — [auxiliary_fallback_chain] for label, try_fn in _get_provider_chain():
|
||||
- `agent/auxiliary_client.py:1189` — [auxiliary_fallback_chain] # ── Step 2: aggregator / fallback chain ──────────────────────────────
|
||||
- `agent/auxiliary_client.py:1191` — [auxiliary_fallback_chain] for label, try_fn in _get_provider_chain():
|
||||
- `agent/auxiliary_client.py:2397` — [auxiliary_fallback_chain] # error, fall through to the fallback chain below.
|
||||
- `agent/auxiliary_client.py:2417` — [auxiliary_fallback_chain] # auto (the default) = best-effort fallback chain. (#7559)
|
||||
- `agent/auxiliary_client.py:2589` — [auxiliary_fallback_chain] # error, fall through to the fallback chain below.
|
||||
- `tools/delegate_tool.py:662` — [delegate_runtime_provider] # bundle (base_url, api_key, api_mode) via the same runtime provider system
|
||||
- `tools/delegate_tool.py:854` — [delegate_runtime_provider] provider) is resolved via the runtime provider system — the same path used
|
||||
- `tools/delegate_tool.py:909` — [delegate_runtime_provider] from hermes_cli.runtime_provider import resolve_runtime_provider
|
||||
- `tools/delegate_tool.py:910` — [delegate_runtime_provider] runtime = resolve_runtime_provider(requested=configured_provider)
|
||||
- `hermes_state.py:115` — [session_db] class SessionDB:
|
||||
- `batch_runner.py:320` — [trajectory_export] save_trajectories=False, # We handle saving ourselves
|
||||
- `batch_runner.py:346` — [trajectory_export] trajectory = agent._convert_to_trajectory_format(
|
||||
- `batch_runner.py:460` — [trajectory_export] trajectory_entry = {
|
||||
- `batch_runner.py:474` — [trajectory_export] f.write(json.dumps(trajectory_entry, ensure_ascii=False) + "\n")
|
||||
- `benchmarks/tool_call_benchmark.py:3` — [benchmark_suite] Tool-Calling Benchmark — Gemma 4 vs mimo-v2-pro regression test.
|
||||
- `benchmarks/tool_call_benchmark.py:9` — [benchmark_suite] python3 benchmarks/tool_call_benchmark.py # full 100-call suite
|
||||
- `benchmarks/tool_call_benchmark.py:10` — [benchmark_suite] python3 benchmarks/tool_call_benchmark.py --limit 10 # quick smoke test
|
||||
- `benchmarks/tool_call_benchmark.py:11` — [benchmark_suite] python3 benchmarks/tool_call_benchmark.py --models nous # single model
|
||||
- `benchmarks/tool_call_benchmark.py:12` — [benchmark_suite] python3 benchmarks/tool_call_benchmark.py --category file # single category
|
||||
- `benchmarks/tool_call_benchmark.py:37` — [benchmark_suite] class ToolCall:
|
||||
- `benchmarks/tool_call_benchmark.py:51` — [benchmark_suite] ToolCall("file-01", "file", "Read the file /tmp/test_bench.txt and show me its contents.",
|
||||
- `benchmarks/tool_call_benchmark.py:53` — [benchmark_suite] ToolCall("file-02", "file", "Write 'hello benchmark' to /tmp/test_bench_out.txt",
|
||||
- `benchmarks/tool_call_benchmark.py:55` — [benchmark_suite] ToolCall("file-03", "file", "Search for the word 'import' in all Python files in the current directory.",
|
||||
- `benchmarks/tool_call_benchmark.py:57` — [benchmark_suite] ToolCall("file-04", "file", "Read lines 1-20 of /etc/hosts",
|
||||
- `benchmarks/tool_call_benchmark.py:59` — [benchmark_suite] ToolCall("file-05", "file", "Patch /tmp/test_bench_out.txt: replace 'hello' with 'goodbye'",
|
||||
- `benchmarks/tool_call_benchmark.py:61` — [benchmark_suite] ToolCall("file-06", "file", "Search for files matching *.py in the current directory.",
|
||||
- `benchmarks/tool_call_benchmark.py:63` — [benchmark_suite] ToolCall("file-07", "file", "Read the first 10 lines of /etc/passwd",
|
||||
- `benchmarks/tool_call_benchmark.py:65` — [benchmark_suite] ToolCall("file-08", "file", "Write a JSON config to /tmp/bench_config.json with key 'debug': true",
|
||||
- `benchmarks/tool_call_benchmark.py:67` — [benchmark_suite] ToolCall("file-09", "file", "Search for 'def test_' in Python test files.",
|
||||
- `benchmarks/tool_call_benchmark.py:69` — [benchmark_suite] ToolCall("file-10", "file", "Read /tmp/bench_config.json and tell me what's in it.",
|
||||
- `benchmarks/tool_call_benchmark.py:71` — [benchmark_suite] ToolCall("file-11", "file", "Create a file /tmp/bench_readme.md with one line: '# Benchmark'",
|
||||
- `benchmarks/tool_call_benchmark.py:73` — [benchmark_suite] ToolCall("file-12", "file", "Search for 'TODO' comments in all .py files.",
|
||||
- `benchmarks/tool_call_benchmark.py:75` — [benchmark_suite] ToolCall("file-13", "file", "Read /tmp/bench_readme.md",
|
||||
- `benchmarks/tool_call_benchmark.py:77` — [benchmark_suite] ToolCall("file-14", "file", "Patch /tmp/bench_readme.md: replace '# Benchmark' with '# Tool Benchmark'",
|
||||
- `benchmarks/tool_call_benchmark.py:78` — [benchmark_suite] "patch", "Tool Benchmark"),
|
||||
- `benchmarks/tool_call_benchmark.py:79` — [benchmark_suite] ToolCall("file-15", "file", "Write a Python one-liner to /tmp/bench_hello.py that prints hello.",
|
||||
- `benchmarks/tool_call_benchmark.py:81` — [benchmark_suite] ToolCall("file-16", "file", "Search for all .json files in /tmp/.",
|
||||
- `benchmarks/tool_call_benchmark.py:83` — [benchmark_suite] ToolCall("file-17", "file", "Read /tmp/bench_hello.py and verify it has print('hello').",
|
||||
- `benchmarks/tool_call_benchmark.py:85` — [benchmark_suite] ToolCall("file-18", "file", "Patch /tmp/bench_hello.py to print 'hello world' instead of 'hello'.",
|
||||
- `benchmarks/tool_call_benchmark.py:87` — [benchmark_suite] ToolCall("file-19", "file", "List files matching 'bench*' in /tmp/.",
|
||||
- `benchmarks/tool_call_benchmark.py:89` — [benchmark_suite] ToolCall("file-20", "file", "Read /tmp/test_bench.txt again and summarize its contents.",
|
||||
- `benchmarks/tool_call_benchmark.py:93` — [benchmark_suite] ToolCall("term-01", "terminal", "Run `echo hello world` in the terminal.",
|
||||
- `benchmarks/tool_call_benchmark.py:95` — [benchmark_suite] ToolCall("term-02", "terminal", "Run `date` to get the current date and time.",
|
||||
- `benchmarks/tool_call_benchmark.py:97` — [benchmark_suite] ToolCall("term-03", "terminal", "Run `uname -a` to get system information.",
|
||||
- `benchmarks/tool_call_benchmark.py:99` — [benchmark_suite] ToolCall("term-04", "terminal", "Run `pwd` to show the current directory.",
|
||||
- `benchmarks/tool_call_benchmark.py:101` — [benchmark_suite] ToolCall("term-05", "terminal", "Run `ls -la /tmp/ | head -20` to list temp files.",
|
||||
- `benchmarks/tool_call_benchmark.py:103` — [benchmark_suite] ToolCall("term-06", "terminal", "Run `whoami` to show the current user.",
|
||||
- `benchmarks/tool_call_benchmark.py:105` — [benchmark_suite] ToolCall("term-07", "terminal", "Run `df -h` to show disk usage.",
|
||||
- `benchmarks/tool_call_benchmark.py:107` — [benchmark_suite] ToolCall("term-08", "terminal", "Run `python3 --version` to check Python version.",
|
||||
- `benchmarks/tool_call_benchmark.py:109` — [benchmark_suite] ToolCall("term-09", "terminal", "Run `cat /etc/hostname` to get the hostname.",
|
||||
- `benchmarks/tool_call_benchmark.py:111` — [benchmark_suite] ToolCall("term-10", "terminal", "Run `uptime` to see system uptime.",
|
||||
- `benchmarks/tool_call_benchmark.py:113` — [benchmark_suite] ToolCall("term-11", "terminal", "Run `env | grep PATH` to show the PATH variable.",
|
||||
- `benchmarks/tool_call_benchmark.py:115` — [benchmark_suite] ToolCall("term-12", "terminal", "Run `wc -l /etc/passwd` to count lines.",
|
||||
- `benchmarks/tool_call_benchmark.py:117` — [benchmark_suite] ToolCall("term-13", "terminal", "Run `echo $SHELL` to show the current shell.",
|
||||
- `benchmarks/tool_call_benchmark.py:119` — [benchmark_suite] ToolCall("term-14", "terminal", "Run `free -h || vm_stat` to check memory usage.",
|
||||
- `benchmarks/tool_call_benchmark.py:121` — [benchmark_suite] ToolCall("term-15", "terminal", "Run `id` to show user and group IDs.",
|
||||
- `benchmarks/tool_call_benchmark.py:123` — [benchmark_suite] ToolCall("term-16", "terminal", "Run `hostname` to get the machine hostname.",
|
||||
- `benchmarks/tool_call_benchmark.py:125` — [benchmark_suite] ToolCall("term-17", "terminal", "Run `echo {1..5}` to test brace expansion.",
|
||||
- `benchmarks/tool_call_benchmark.py:127` — [benchmark_suite] ToolCall("term-18", "terminal", "Run `seq 1 5` to generate a number sequence.",
|
||||
- `benchmarks/tool_call_benchmark.py:129` — [benchmark_suite] ToolCall("term-19", "terminal", "Run `python3 -c 'print(2+2)'` to compute 2+2.",
|
||||
- `benchmarks/tool_call_benchmark.py:131` — [benchmark_suite] ToolCall("term-20", "terminal", "Run `ls -d /tmp/bench* 2>/dev/null | wc -l` to count bench files.",
|
||||
- `benchmarks/tool_call_benchmark.py:135` — [benchmark_suite] ToolCall("code-01", "code", "Execute a Python script that computes factorial of 10.",
|
||||
- `benchmarks/tool_call_benchmark.py:137` — [benchmark_suite] ToolCall("code-02", "code", "Run Python to read /tmp/test_bench.txt and count its words.",
|
||||
- `benchmarks/tool_call_benchmark.py:139` — [benchmark_suite] ToolCall("code-03", "code", "Execute Python to generate the first 20 Fibonacci numbers.",
|
||||
- `benchmarks/tool_call_benchmark.py:141` — [benchmark_suite] ToolCall("code-04", "code", "Run Python to parse JSON from a string and print keys.",
|
||||
- `benchmarks/tool_call_benchmark.py:143` — [benchmark_suite] ToolCall("code-05", "code", "Execute Python to list all files in /tmp/ matching 'bench*'.",
|
||||
- `benchmarks/tool_call_benchmark.py:145` — [benchmark_suite] ToolCall("code-06", "code", "Run Python to compute the sum of squares from 1 to 100.",
|
||||
- `benchmarks/tool_call_benchmark.py:147` — [benchmark_suite] ToolCall("code-07", "code", "Execute Python to check if 'racecar' is a palindrome.",
|
||||
- `benchmarks/tool_call_benchmark.py:149` — [benchmark_suite] ToolCall("code-08", "code", "Run Python to create a CSV string with 5 rows of sample data.",
|
||||
- `benchmarks/tool_call_benchmark.py:151` — [benchmark_suite] ToolCall("code-09", "code", "Execute Python to sort a list [5,2,8,1,9] and print the result.",
|
||||
- `benchmarks/tool_call_benchmark.py:153` — [benchmark_suite] ToolCall("code-10", "code", "Run Python to count lines in /etc/passwd.",
|
||||
- `benchmarks/tool_call_benchmark.py:155` — [benchmark_suite] ToolCall("code-11", "code", "Execute Python to hash the string 'benchmark' with SHA256.",
|
||||
- `benchmarks/tool_call_benchmark.py:157` — [benchmark_suite] ToolCall("code-12", "code", "Run Python to get the current UTC timestamp.",
|
||||
- `benchmarks/tool_call_benchmark.py:159` — [benchmark_suite] ToolCall("code-13", "code", "Execute Python to convert 'hello world' to uppercase and reverse it.",
|
||||
- `benchmarks/tool_call_benchmark.py:161` — [benchmark_suite] ToolCall("code-14", "code", "Run Python to create a dictionary of system info (platform, python version).",
|
||||
- `benchmarks/tool_call_benchmark.py:163` — [benchmark_suite] ToolCall("code-15", "code", "Execute Python to check internet connectivity by resolving google.com.",
|
||||
- `benchmarks/tool_call_benchmark.py:167` — [benchmark_suite] ToolCall("deleg-01", "delegate", "Use a subagent to find all .log files in /tmp/.",
|
||||
- `benchmarks/tool_call_benchmark.py:169` — [benchmark_suite] ToolCall("deleg-02", "delegate", "Delegate to a subagent: what is 15 * 37?",
|
||||
- `benchmarks/tool_call_benchmark.py:171` — [benchmark_suite] ToolCall("deleg-03", "delegate", "Use a subagent to check if Python 3 is installed and its version.",
|
||||
- `benchmarks/tool_call_benchmark.py:173` — [benchmark_suite] ToolCall("deleg-04", "delegate", "Delegate: read /tmp/test_bench.txt and summarize it in one sentence.",
|
||||
- `benchmarks/tool_call_benchmark.py:175` — [benchmark_suite] ToolCall("deleg-05", "delegate", "Use a subagent to list the contents of /tmp/ directory.",
|
||||
- `benchmarks/tool_call_benchmark.py:177` — [benchmark_suite] ToolCall("deleg-06", "delegate", "Delegate: count the number of .py files in the current directory.",
|
||||
- `benchmarks/tool_call_benchmark.py:179` — [benchmark_suite] ToolCall("deleg-07", "delegate", "Use a subagent to check disk space with df -h.",
|
||||
- `benchmarks/tool_call_benchmark.py:181` — [benchmark_suite] ToolCall("deleg-08", "delegate", "Delegate: what OS are we running on?",
|
||||
- `benchmarks/tool_call_benchmark.py:183` — [benchmark_suite] ToolCall("deleg-09", "delegate", "Use a subagent to find the hostname of this machine.",
|
||||
- `benchmarks/tool_call_benchmark.py:185` — [benchmark_suite] ToolCall("deleg-10", "delegate", "Delegate: create a temp file /tmp/bench_deleg.txt with 'done'.",
|
||||
- `benchmarks/tool_call_benchmark.py:189` — [benchmark_suite] ToolCall("todo-01", "todo", "Add a todo item: 'Run benchmark suite'",
|
||||
- `benchmarks/tool_call_benchmark.py:190` — [benchmark_suite] "todo", "benchmark"),
|
||||
- `benchmarks/tool_call_benchmark.py:191` — [benchmark_suite] ToolCall("todo-02", "todo", "Show me the current todo list.",
|
||||
- `benchmarks/tool_call_benchmark.py:193` — [benchmark_suite] ToolCall("todo-03", "todo", "Mark the first todo item as completed.",
|
||||
- `benchmarks/tool_call_benchmark.py:195` — [benchmark_suite] ToolCall("todo-04", "todo", "Add a todo: 'Review benchmark results' with status pending.",
|
||||
- `benchmarks/tool_call_benchmark.py:197` — [benchmark_suite] ToolCall("todo-05", "todo", "Clear all completed todos.",
|
||||
- `benchmarks/tool_call_benchmark.py:199` — [benchmark_suite] ToolCall("todo-06", "memory", "Save this to memory: 'benchmark ran on {date}'".format(
|
||||
- `benchmarks/tool_call_benchmark.py:201` — [benchmark_suite] "memory", "benchmark"),
|
||||
- `benchmarks/tool_call_benchmark.py:202` — [benchmark_suite] ToolCall("todo-07", "memory", "Search memory for 'benchmark'.",
|
||||
- `benchmarks/tool_call_benchmark.py:203` — [benchmark_suite] "memory", "benchmark"),
|
||||
- `benchmarks/tool_call_benchmark.py:204` — [benchmark_suite] ToolCall("todo-08", "memory", "Add a memory note: 'test models are gemma-4 and mimo-v2-pro'.",
|
||||
- `benchmarks/tool_call_benchmark.py:206` — [benchmark_suite] ToolCall("todo-09", "todo", "Add three todo items: 'analyze', 'report', 'cleanup'.",
|
||||
- `benchmarks/tool_call_benchmark.py:208` — [benchmark_suite] ToolCall("todo-10", "memory", "Search memory for any notes about models.",
|
||||
- `benchmarks/tool_call_benchmark.py:212` — [benchmark_suite] ToolCall("skill-01", "skills", "List all available skills.",
|
||||
- `benchmarks/tool_call_benchmark.py:214` — [benchmark_suite] ToolCall("skill-02", "skills", "View the skill called 'test-driven-development'.",
|
||||
- `benchmarks/tool_call_benchmark.py:216` — [benchmark_suite] ToolCall("skill-03", "skills", "Search for skills related to 'git'.",
|
||||
- `benchmarks/tool_call_benchmark.py:218` — [benchmark_suite] ToolCall("skill-04", "skills", "View the 'code-review' skill.",
|
||||
- `benchmarks/tool_call_benchmark.py:220` — [benchmark_suite] ToolCall("skill-05", "skills", "List all skills in the 'devops' category.",
|
||||
- `benchmarks/tool_call_benchmark.py:222` — [benchmark_suite] ToolCall("skill-06", "skills", "View the 'systematic-debugging' skill.",
|
||||
- `benchmarks/tool_call_benchmark.py:224` — [benchmark_suite] ToolCall("skill-07", "skills", "Search for skills about 'testing'.",
|
||||
- `benchmarks/tool_call_benchmark.py:226` — [benchmark_suite] ToolCall("skill-08", "skills", "View the 'writing-plans' skill.",
|
||||
- `benchmarks/tool_call_benchmark.py:228` — [benchmark_suite] ToolCall("skill-09", "skills", "List skills in 'software-development' category.",
|
||||
- `benchmarks/tool_call_benchmark.py:230` — [benchmark_suite] ToolCall("skill-10", "skills", "View the 'pr-review-discipline' skill.",
|
||||
- `benchmarks/tool_call_benchmark.py:234` — [benchmark_suite] ToolCall("file-21", "file", "Write a Python snippet to /tmp/bench_sort.py that sorts [3,1,2].",
|
||||
- `benchmarks/tool_call_benchmark.py:236` — [benchmark_suite] ToolCall("file-22", "file", "Read /tmp/bench_sort.py back and confirm it exists.",
|
||||
- `benchmarks/tool_call_benchmark.py:238` — [benchmark_suite] ToolCall("file-23", "file", "Search for 'class' in all .py files in the benchmarks directory.",
|
||||
- `benchmarks/tool_call_benchmark.py:240` — [benchmark_suite] ToolCall("term-21", "terminal", "Run `cat /etc/os-release 2>/dev/null || sw_vers 2>/dev/null` for OS info.",
|
||||
- `benchmarks/tool_call_benchmark.py:242` — [benchmark_suite] ToolCall("term-22", "terminal", "Run `nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null` for CPU count.",
|
||||
- `benchmarks/tool_call_benchmark.py:244` — [benchmark_suite] ToolCall("code-16", "code", "Execute Python to flatten a nested list [[1,2],[3,4],[5]].",
|
||||
- `benchmarks/tool_call_benchmark.py:246` — [benchmark_suite] ToolCall("code-17", "code", "Run Python to check if a number 17 is prime.",
|
||||
- `benchmarks/tool_call_benchmark.py:248` — [benchmark_suite] ToolCall("deleg-11", "delegate", "Delegate: what is the current working directory?",
|
||||
- `benchmarks/tool_call_benchmark.py:250` — [benchmark_suite] ToolCall("todo-11", "todo", "Add a todo: 'Finalize benchmark report' status pending.",
|
||||
- `benchmarks/tool_call_benchmark.py:252` — [benchmark_suite] ToolCall("todo-12", "memory", "Store fact: 'benchmark categories: file, terminal, code, delegate, todo, memory, skills'.",
|
||||
- `benchmarks/tool_call_benchmark.py:254` — [benchmark_suite] ToolCall("skill-11", "skills", "Search for skills about 'deployment'.",
|
||||
- `benchmarks/tool_call_benchmark.py:256` — [benchmark_suite] ToolCall("skill-12", "skills", "View the 'gitea-burn-cycle' skill.",
|
||||
- `benchmarks/tool_call_benchmark.py:258` — [benchmark_suite] ToolCall("skill-13", "skills", "List all available skill categories.",
|
||||
- `benchmarks/tool_call_benchmark.py:260` — [benchmark_suite] ToolCall("skill-14", "skills", "Search for skills related to 'memory'.",
|
||||
- `benchmarks/tool_call_benchmark.py:262` — [benchmark_suite] ToolCall("skill-15", "skills", "View the 'mimo-swarm' skill.",
|
||||
- `benchmarks/tool_call_benchmark.py:311` — [benchmark_suite] """Create prerequisite files for the benchmark."""
|
||||
- `benchmarks/tool_call_benchmark.py:313` — [benchmark_suite] "This is a benchmark test file.\n"
|
||||
- `benchmarks/tool_call_benchmark.py:349` — [benchmark_suite] "You are a benchmark test runner. Execute the user's request by calling "
|
||||
- `benchmarks/tool_call_benchmark.py:406` — [benchmark_suite] """Generate markdown benchmark report."""
|
||||
- `benchmarks/tool_call_benchmark.py:428` — [benchmark_suite] f"# Tool-Calling Benchmark Report",
|
||||
- `benchmarks/tool_call_benchmark.py:535` — [benchmark_suite] parser = argparse.ArgumentParser(description="Tool-calling benchmark")
|
||||
- `benchmarks/tool_call_benchmark.py:544` — [benchmark_suite] help="Output report path (default: benchmarks/gemma4-tool-calling-YYYY-MM-DD.md)")
|
||||
- `benchmarks/tool_call_benchmark.py:565` — [benchmark_suite] output_path = Path(args.output) if args.output else REPO_ROOT / "benchmarks" / f"gemma4-tool-calling-{date_str}.md"
|
||||
- `benchmarks/tool_call_benchmark.py:575` — [benchmark_suite] print(f"Benchmark: {len(suite)} tests × {len(model_specs)} models = {len(suite) * len(model_specs)} calls")
|
||||
|
||||
## Suggested next slice
|
||||
|
||||
1. Build an exporter that emits SessionDB + trajectory data into a TensorZero-friendly offline dataset.
|
||||
2. Define percentage-based canary controls before attempting any gateway replacement.
|
||||
3. Keep Hermes routing authoritative until TensorZero proves parity across CLI, gateway, cron, auxiliary, and delegation surfaces.
|
||||
@@ -28,8 +28,6 @@ from pathlib import Path
|
||||
from datetime import datetime
|
||||
from typing import Dict, Optional, Any, List
|
||||
|
||||
from agent.account_usage import fetch_account_usage, render_account_usage_lines
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# SSL certificate auto-detection for NixOS and other non-standard systems.
|
||||
# Must run BEFORE any HTTP library (discord, aiohttp, etc.) is imported.
|
||||
@@ -6483,38 +6481,6 @@ class GatewayRunner:
|
||||
if cached:
|
||||
agent = cached[0]
|
||||
|
||||
# Resolve provider/base_url/api_key for the account-usage fetch.
|
||||
# Prefer the live agent; fall back to persisted billing data on the
|
||||
# SessionDB row so `/usage` still returns account info between turns
|
||||
# when no agent is resident.
|
||||
provider = getattr(agent, "provider", None) if agent and agent is not _AGENT_PENDING_SENTINEL else None
|
||||
base_url = getattr(agent, "base_url", None) if agent and agent is not _AGENT_PENDING_SENTINEL else None
|
||||
api_key = getattr(agent, "api_key", None) if agent and agent is not _AGENT_PENDING_SENTINEL else None
|
||||
if not provider and getattr(self, "_session_db", None) is not None:
|
||||
try:
|
||||
_entry_for_billing = self.session_store.get_or_create_session(source)
|
||||
persisted = self._session_db.get_session(_entry_for_billing.session_id) or {}
|
||||
except Exception:
|
||||
persisted = {}
|
||||
provider = provider or persisted.get("billing_provider")
|
||||
base_url = base_url or persisted.get("billing_base_url")
|
||||
|
||||
# Fetch account usage off the event loop so slow provider APIs don't
|
||||
# block the gateway. Failures are non-fatal -- account_lines stays [].
|
||||
account_lines: list[str] = []
|
||||
if provider:
|
||||
try:
|
||||
account_snapshot = await asyncio.to_thread(
|
||||
fetch_account_usage,
|
||||
provider,
|
||||
base_url=base_url,
|
||||
api_key=api_key,
|
||||
)
|
||||
except Exception:
|
||||
account_snapshot = None
|
||||
if account_snapshot:
|
||||
account_lines = render_account_usage_lines(account_snapshot, markdown=True)
|
||||
|
||||
if agent and hasattr(agent, "session_total_tokens") and agent.session_api_calls > 0:
|
||||
lines = []
|
||||
|
||||
@@ -6572,10 +6538,6 @@ class GatewayRunner:
|
||||
if ctx.compression_count:
|
||||
lines.append(f"Compressions: {ctx.compression_count}")
|
||||
|
||||
if account_lines:
|
||||
lines.append("")
|
||||
lines.extend(account_lines)
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
# No agent at all -- check session history for a rough count
|
||||
@@ -6585,18 +6547,12 @@ class GatewayRunner:
|
||||
from agent.model_metadata import estimate_messages_tokens_rough
|
||||
msgs = [m for m in history if m.get("role") in ("user", "assistant") and m.get("content")]
|
||||
approx = estimate_messages_tokens_rough(msgs)
|
||||
lines = [
|
||||
"📊 **Session Info**",
|
||||
f"Messages: {len(msgs)}",
|
||||
f"Estimated context: ~{approx:,} tokens",
|
||||
"_(Detailed usage available after the first agent response)_",
|
||||
]
|
||||
if account_lines:
|
||||
lines.append("")
|
||||
lines.extend(account_lines)
|
||||
return "\n".join(lines)
|
||||
if account_lines:
|
||||
return "\n".join(account_lines)
|
||||
return (
|
||||
f"📊 **Session Info**\n"
|
||||
f"Messages: {len(msgs)}\n"
|
||||
f"Estimated context: ~{approx:,} tokens\n"
|
||||
f"_(Detailed usage available after the first agent response)_"
|
||||
)
|
||||
return "No usage data available for this session."
|
||||
|
||||
async def _handle_insights_command(self, event: MessageEvent) -> str:
|
||||
|
||||
318
scripts/tensorzero_eval_packet.py
Normal file
318
scripts/tensorzero_eval_packet.py
Normal file
@@ -0,0 +1,318 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Generate a grounded TensorZero evaluation packet for Hermes.
|
||||
|
||||
This script inventories the current Hermes routing/evaluation surfaces, then
|
||||
builds a markdown packet assessing how much of issue #860 can be satisfied by
|
||||
TensorZero and where the migration risk still lives.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import re
|
||||
from dataclasses import asdict, dataclass
|
||||
from pathlib import Path
|
||||
from typing import Iterable
|
||||
|
||||
ISSUE_NUMBER = 860
|
||||
ISSUE_TITLE = "tensorzero LLMOps platform evaluation"
|
||||
ISSUE_URL = "https://forge.alexanderwhitestone.com/Timmy_Foundation/hermes-agent/issues/860"
|
||||
DEFAULT_OUTPUT = Path("docs/evaluations/tensorzero-860-evaluation.md")
|
||||
DEFAULT_JSON_OUTPUT = Path("docs/evaluations/tensorzero-860-evaluation.json")
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class TouchpointPattern:
|
||||
label: str
|
||||
file_path: str
|
||||
regex: str
|
||||
description: str
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class Touchpoint:
|
||||
label: str
|
||||
file_path: str
|
||||
line_number: int
|
||||
matched_text: str
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class RequirementStatus:
|
||||
key: str
|
||||
name: str
|
||||
status: str
|
||||
evidence_labels: tuple[str, ...]
|
||||
summary: str
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class EvaluationReport:
|
||||
issue_number: int
|
||||
issue_title: str
|
||||
issue_url: str
|
||||
recommendation: str
|
||||
touchpoints: tuple[Touchpoint, ...]
|
||||
requirements: tuple[RequirementStatus, ...]
|
||||
|
||||
|
||||
PATTERNS: tuple[TouchpointPattern, ...] = (
|
||||
TouchpointPattern(
|
||||
label="fallback_chain",
|
||||
file_path="run_agent.py",
|
||||
regex=r"_fallback_chain|fallback_providers|fallback_model",
|
||||
description="Primary agent fallback-provider chain in the core conversation loop.",
|
||||
),
|
||||
TouchpointPattern(
|
||||
label="provider_routing_config",
|
||||
file_path="cli.py",
|
||||
regex=r"provider_routing|fallback_providers|smart_model_routing",
|
||||
description="CLI-owned provider routing and fallback configuration surfaces.",
|
||||
),
|
||||
TouchpointPattern(
|
||||
label="runtime_provider",
|
||||
file_path="hermes_cli/runtime_provider.py",
|
||||
regex=r"def resolve_runtime_provider|def resolve_requested_provider",
|
||||
description="Central runtime provider resolution for CLI, gateway, cron, and helpers.",
|
||||
),
|
||||
TouchpointPattern(
|
||||
label="smart_model_routing",
|
||||
file_path="agent/smart_model_routing.py",
|
||||
regex=r"def resolve_turn_route|def choose_cheap_model_route",
|
||||
description="Cheap-vs-strong turn routing that TensorZero would need to absorb or replace.",
|
||||
),
|
||||
TouchpointPattern(
|
||||
label="gateway_provider_routing",
|
||||
file_path="gateway/run.py",
|
||||
regex=r"def _load_provider_routing|def _load_fallback_model|def _load_smart_model_routing",
|
||||
description="Gateway-specific loading of routing, fallback, and smart-model policies.",
|
||||
),
|
||||
TouchpointPattern(
|
||||
label="cron_runtime_provider",
|
||||
file_path="cron/scheduler.py",
|
||||
regex=r"resolve_runtime_provider|resolve_turn_route|provider_routing|fallback_model",
|
||||
description="Cron execution path that re-resolves providers and routing on every run.",
|
||||
),
|
||||
TouchpointPattern(
|
||||
label="auxiliary_fallback_chain",
|
||||
file_path="agent/auxiliary_client.py",
|
||||
regex=r"fallback chain|_get_provider_chain|provider chain",
|
||||
description="Auxiliary task routing/fallback chain outside the main inference path.",
|
||||
),
|
||||
TouchpointPattern(
|
||||
label="delegate_runtime_provider",
|
||||
file_path="tools/delegate_tool.py",
|
||||
regex=r"runtime provider system|resolve the full credential bundle|resolve_runtime_provider",
|
||||
description="Subagent/delegation routing path that would also need TensorZero parity.",
|
||||
),
|
||||
TouchpointPattern(
|
||||
label="session_db",
|
||||
file_path="hermes_state.py",
|
||||
regex=r"class SessionDB",
|
||||
description="Session persistence surface that could feed TensorZero optimization/eval data.",
|
||||
),
|
||||
TouchpointPattern(
|
||||
label="trajectory_export",
|
||||
file_path="batch_runner.py",
|
||||
regex=r"trajectory_entry|save_trajectories|_convert_to_trajectory_format",
|
||||
description="Trajectory export surface for offline optimization and replay data.",
|
||||
),
|
||||
TouchpointPattern(
|
||||
label="benchmark_suite",
|
||||
file_path="benchmarks/tool_call_benchmark.py",
|
||||
regex=r"ToolCall\(|class ToolCall|benchmark",
|
||||
description="Existing benchmark/evaluation harness that could map to TensorZero experiments.",
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def _iter_matches(pattern: TouchpointPattern, text: str) -> Iterable[Touchpoint]:
|
||||
regex = re.compile(pattern.regex, re.IGNORECASE)
|
||||
for line_number, line in enumerate(text.splitlines(), start=1):
|
||||
if regex.search(line):
|
||||
yield Touchpoint(
|
||||
label=pattern.label,
|
||||
file_path=pattern.file_path,
|
||||
line_number=line_number,
|
||||
matched_text=line.strip(),
|
||||
)
|
||||
|
||||
|
||||
def scan_touchpoints(repo_root: Path) -> list[Touchpoint]:
|
||||
touchpoints: list[Touchpoint] = []
|
||||
for pattern in PATTERNS:
|
||||
path = repo_root / pattern.file_path
|
||||
if not path.exists():
|
||||
continue
|
||||
text = path.read_text(encoding="utf-8")
|
||||
touchpoints.extend(_iter_matches(pattern, text))
|
||||
return touchpoints
|
||||
|
||||
|
||||
def build_requirement_matrix(touchpoints: list[Touchpoint]) -> list[RequirementStatus]:
|
||||
labels = {tp.label for tp in touchpoints}
|
||||
|
||||
matrix: list[RequirementStatus] = []
|
||||
gateway_labels = (
|
||||
"fallback_chain",
|
||||
"runtime_provider",
|
||||
"gateway_provider_routing",
|
||||
"cron_runtime_provider",
|
||||
"auxiliary_fallback_chain",
|
||||
"delegate_runtime_provider",
|
||||
)
|
||||
gateway_hits = tuple(label for label in gateway_labels if label in labels)
|
||||
gateway_status = "partial" if len(gateway_hits) >= 4 else "gap"
|
||||
gateway_summary = (
|
||||
"Hermes already spreads provider routing across core agent, runtime provider, gateway, cron, auxiliary, and delegation seams; "
|
||||
"TensorZero would need parity across all of them before it can replace the gateway layer."
|
||||
if gateway_hits else
|
||||
"No grounded routing surfaces were found for a gateway replacement assessment."
|
||||
)
|
||||
matrix.append(RequirementStatus("gateway_replacement", "Gateway replacement scope", gateway_status, gateway_hits, gateway_summary))
|
||||
|
||||
config_labels = (
|
||||
"provider_routing_config",
|
||||
"runtime_provider",
|
||||
"smart_model_routing",
|
||||
"fallback_chain",
|
||||
)
|
||||
config_hits = tuple(label for label in config_labels if label in labels)
|
||||
config_status = "partial" if len(config_hits) >= 3 else "gap"
|
||||
config_summary = (
|
||||
"Hermes has multiple config concepts to migrate (`provider_routing`, `fallback_providers`, `smart_model_routing`, runtime provider resolution), "
|
||||
"so TensorZero is not a drop-in config swap."
|
||||
if config_hits else
|
||||
"No current config migration surface was found."
|
||||
)
|
||||
matrix.append(RequirementStatus("config_migration", "Config migration", config_status, config_hits, config_summary))
|
||||
|
||||
canary_hits: tuple[str, ...] = tuple()
|
||||
canary_summary = (
|
||||
"The repo shows semantic routing and fallback, but no grounded 10% traffic-split canary mechanism. "
|
||||
"A TensorZero cutover would need new percentage-based rollout controls and observability hooks."
|
||||
)
|
||||
matrix.append(RequirementStatus("canary_rollout", "10% traffic canary", "gap", canary_hits, canary_summary))
|
||||
|
||||
session_labels = ("session_db", "trajectory_export")
|
||||
session_hits = tuple(label for label in session_labels if label in labels)
|
||||
session_status = "partial" if len(session_hits) == len(session_labels) else "gap"
|
||||
session_summary = (
|
||||
"Hermes already has SessionDB and trajectory export surfaces that can feed offline optimization data, "
|
||||
"but not a TensorZero-native ingestion path yet."
|
||||
if session_hits else
|
||||
"No session-data surface was found for prompt optimization."
|
||||
)
|
||||
matrix.append(RequirementStatus("session_feedback", "Session data for prompt optimization", session_status, session_hits, session_summary))
|
||||
|
||||
eval_labels = ("benchmark_suite", "trajectory_export")
|
||||
eval_hits = tuple(label for label in eval_labels if label in labels)
|
||||
eval_status = "partial" if "benchmark_suite" in eval_hits else "gap"
|
||||
eval_summary = (
|
||||
"Hermes already has benchmark/trajectory machinery that can seed TensorZero A/B evaluation, "
|
||||
"but no integrated TensorZero experiment runner or live evaluation gateway."
|
||||
if eval_hits else
|
||||
"No evaluation harness was found to support TensorZero A/B testing."
|
||||
)
|
||||
matrix.append(RequirementStatus("evaluation_suite", "Evaluation suite / A/B testing", eval_status, eval_hits, eval_summary))
|
||||
|
||||
return matrix
|
||||
|
||||
|
||||
def build_report(touchpoints: list[Touchpoint], requirement_matrix: list[RequirementStatus]) -> EvaluationReport:
|
||||
recommendation = (
|
||||
"Not ready for direct replacement. Recommend a shadow-evaluation phase first: keep Hermes routing live, "
|
||||
"inventory the migration seams, export SessionDB/trajectory data into an offline TensorZero experiment loop, "
|
||||
"and only design a canary gateway once percentage-based rollout controls exist."
|
||||
)
|
||||
return EvaluationReport(
|
||||
issue_number=ISSUE_NUMBER,
|
||||
issue_title=ISSUE_TITLE,
|
||||
issue_url=ISSUE_URL,
|
||||
recommendation=recommendation,
|
||||
touchpoints=tuple(touchpoints),
|
||||
requirements=tuple(requirement_matrix),
|
||||
)
|
||||
|
||||
|
||||
def build_markdown(report: EvaluationReport) -> str:
|
||||
lines: list[str] = []
|
||||
lines.append("# TensorZero Evaluation Packet")
|
||||
lines.append("")
|
||||
lines.append(f"Issue #{report.issue_number}: [{report.issue_title}]({report.issue_url})")
|
||||
lines.append("")
|
||||
lines.append("## Scope")
|
||||
lines.append("")
|
||||
lines.append("This packet evaluates TensorZero as a possible replacement for Hermes' custom provider-routing stack.")
|
||||
lines.append("It is intentionally grounded in the current repo state rather than a speculative cutover plan.")
|
||||
lines.append("")
|
||||
lines.append("## Issue requirements being evaluated")
|
||||
lines.append("")
|
||||
lines.append("- Deploy tensorzero gateway (Rust binary)")
|
||||
lines.append("- Migrate provider routing config")
|
||||
lines.append("- Test with canary (10% traffic) before full cutover")
|
||||
lines.append("- Feed session data for prompt optimization")
|
||||
lines.append("- Evaluation suite for A/B testing models")
|
||||
lines.append("")
|
||||
lines.append("## Recommendation")
|
||||
lines.append("")
|
||||
lines.append(report.recommendation)
|
||||
lines.append("")
|
||||
lines.append("## Requirement matrix")
|
||||
lines.append("")
|
||||
lines.append("| Requirement | Status | Evidence labels | Summary |")
|
||||
lines.append("| --- | --- | --- | --- |")
|
||||
for row in report.requirements:
|
||||
evidence = ", ".join(row.evidence_labels) if row.evidence_labels else "—"
|
||||
lines.append(f"| {row.name} | {row.status} | {evidence} | {row.summary} |")
|
||||
lines.append("")
|
||||
lines.append("## Grounded Hermes touchpoints")
|
||||
lines.append("")
|
||||
if report.touchpoints:
|
||||
for tp in report.touchpoints:
|
||||
lines.append(f"- `{tp.file_path}:{tp.line_number}` — [{tp.label}] {tp.matched_text}")
|
||||
else:
|
||||
lines.append("- No routing/evaluation touchpoints were found.")
|
||||
lines.append("")
|
||||
lines.append("## Suggested next slice")
|
||||
lines.append("")
|
||||
lines.append("1. Build an exporter that emits SessionDB + trajectory data into a TensorZero-friendly offline dataset.")
|
||||
lines.append("2. Define percentage-based canary controls before attempting any gateway replacement.")
|
||||
lines.append("3. Keep Hermes routing authoritative until TensorZero proves parity across CLI, gateway, cron, auxiliary, and delegation surfaces.")
|
||||
lines.append("")
|
||||
return "\n".join(lines).rstrip() + "\n"
|
||||
|
||||
|
||||
def write_outputs(report: EvaluationReport, markdown_path: Path, json_path: Path | None = None) -> None:
|
||||
markdown_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
markdown_path.write_text(build_markdown(report), encoding="utf-8")
|
||||
if json_path is not None:
|
||||
json_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
json_path.write_text(json.dumps(asdict(report), indent=2), encoding="utf-8")
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description="Generate a grounded TensorZero evaluation packet for Hermes")
|
||||
parser.add_argument("--repo-root", default=".", help="Hermes repo root to scan")
|
||||
parser.add_argument("--output", default=str(DEFAULT_OUTPUT), help="Markdown output path")
|
||||
parser.add_argument("--json-output", default=str(DEFAULT_JSON_OUTPUT), help="Optional JSON output path")
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main() -> int:
|
||||
args = parse_args()
|
||||
repo_root = Path(args.repo_root).resolve()
|
||||
touchpoints = scan_touchpoints(repo_root)
|
||||
matrix = build_requirement_matrix(touchpoints)
|
||||
report = build_report(touchpoints, matrix)
|
||||
json_output = Path(args.json_output) if args.json_output else None
|
||||
write_outputs(report, Path(args.output), json_output)
|
||||
print(f"Wrote {args.output}")
|
||||
if json_output is not None:
|
||||
print(f"Wrote {json_output}")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
@@ -175,79 +175,3 @@ class TestUsageCachedAgent:
|
||||
result = await runner._handle_usage_command(event)
|
||||
|
||||
assert "Cost: included" in result
|
||||
|
||||
|
||||
class TestUsageAccountSection:
|
||||
"""Account-limits section appended to /usage output."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_usage_command_includes_account_section(self, monkeypatch):
|
||||
agent = _make_mock_agent(provider="openai-codex")
|
||||
agent.base_url = "https://chatgpt.com/backend-api/codex"
|
||||
agent.api_key = "unused"
|
||||
runner = _make_runner(SK, cached_agent=agent)
|
||||
event = MagicMock()
|
||||
|
||||
monkeypatch.setattr(
|
||||
"gateway.run.fetch_account_usage",
|
||||
lambda provider, base_url=None, api_key=None: object(),
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"gateway.run.render_account_usage_lines",
|
||||
lambda snapshot, markdown=False: [
|
||||
"📈 **Account limits**",
|
||||
"Provider: openai-codex (Pro)",
|
||||
"Session: 85% remaining (15% used)",
|
||||
],
|
||||
)
|
||||
with patch("agent.rate_limit_tracker.format_rate_limit_compact", return_value="RPM: 50/60"), \
|
||||
patch("agent.usage_pricing.estimate_usage_cost") as mock_cost:
|
||||
mock_cost.return_value = MagicMock(amount_usd=None, status="included")
|
||||
result = await runner._handle_usage_command(event)
|
||||
|
||||
assert "📊 **Session Token Usage**" in result
|
||||
assert "📈 **Account limits**" in result
|
||||
assert "Provider: openai-codex (Pro)" in result
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_usage_command_uses_persisted_provider_when_agent_not_running(self, monkeypatch):
|
||||
runner = _make_runner(SK)
|
||||
runner._session_db = MagicMock()
|
||||
runner._session_db.get_session.return_value = {
|
||||
"billing_provider": "openai-codex",
|
||||
"billing_base_url": "https://chatgpt.com/backend-api/codex",
|
||||
}
|
||||
session_entry = MagicMock()
|
||||
session_entry.session_id = "sess-1"
|
||||
runner.session_store.get_or_create_session.return_value = session_entry
|
||||
runner.session_store.load_transcript.return_value = [
|
||||
{"role": "user", "content": "earlier"},
|
||||
]
|
||||
|
||||
calls = {}
|
||||
|
||||
async def _fake_to_thread(fn, *args, **kwargs):
|
||||
calls["args"] = args
|
||||
calls["kwargs"] = kwargs
|
||||
return fn(*args, **kwargs)
|
||||
|
||||
monkeypatch.setattr("gateway.run.asyncio.to_thread", _fake_to_thread)
|
||||
monkeypatch.setattr(
|
||||
"gateway.run.fetch_account_usage",
|
||||
lambda provider, base_url=None, api_key=None: object(),
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"gateway.run.render_account_usage_lines",
|
||||
lambda snapshot, markdown=False: [
|
||||
"📈 **Account limits**",
|
||||
"Provider: openai-codex (Pro)",
|
||||
],
|
||||
)
|
||||
|
||||
event = MagicMock()
|
||||
result = await runner._handle_usage_command(event)
|
||||
|
||||
assert calls["args"] == ("openai-codex",)
|
||||
assert calls["kwargs"]["base_url"] == "https://chatgpt.com/backend-api/codex"
|
||||
assert "📊 **Session Info**" in result
|
||||
assert "📈 **Account limits**" in result
|
||||
|
||||
@@ -1,203 +0,0 @@
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from agent.account_usage import (
|
||||
AccountUsageSnapshot,
|
||||
AccountUsageWindow,
|
||||
fetch_account_usage,
|
||||
render_account_usage_lines,
|
||||
)
|
||||
|
||||
|
||||
class _Response:
|
||||
def __init__(self, payload, status_code=200):
|
||||
self._payload = payload
|
||||
self.status_code = status_code
|
||||
|
||||
def raise_for_status(self):
|
||||
if self.status_code >= 400:
|
||||
raise RuntimeError(f"HTTP {self.status_code}")
|
||||
|
||||
def json(self):
|
||||
return self._payload
|
||||
|
||||
|
||||
class _Client:
|
||||
def __init__(self, payload):
|
||||
self._payload = payload
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc, tb):
|
||||
return False
|
||||
|
||||
def get(self, url, headers=None):
|
||||
return _Response(self._payload)
|
||||
|
||||
|
||||
class _RoutingClient:
|
||||
def __init__(self, payloads):
|
||||
self._payloads = payloads
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc, tb):
|
||||
return False
|
||||
|
||||
def get(self, url, headers=None):
|
||||
return _Response(self._payloads[url])
|
||||
|
||||
|
||||
def test_fetch_account_usage_codex(monkeypatch):
|
||||
monkeypatch.setattr(
|
||||
"agent.account_usage.resolve_codex_runtime_credentials",
|
||||
lambda refresh_if_expiring=True: {
|
||||
"provider": "openai-codex",
|
||||
"base_url": "https://chatgpt.com/backend-api/codex",
|
||||
"api_key": "***",
|
||||
},
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"agent.account_usage._read_codex_tokens",
|
||||
lambda: {"tokens": {"account_id": "acct_123"}},
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"agent.account_usage.httpx.Client",
|
||||
lambda timeout=15.0: _Client(
|
||||
{
|
||||
"plan_type": "pro",
|
||||
"rate_limit": {
|
||||
"primary_window": {
|
||||
"used_percent": 15,
|
||||
"reset_at": 1_900_000_000,
|
||||
"limit_window_seconds": 18000,
|
||||
},
|
||||
"secondary_window": {
|
||||
"used_percent": 40,
|
||||
"reset_at": 1_900_500_000,
|
||||
"limit_window_seconds": 604800,
|
||||
},
|
||||
},
|
||||
"credits": {"has_credits": True, "balance": 12.5},
|
||||
}
|
||||
),
|
||||
)
|
||||
|
||||
snapshot = fetch_account_usage("openai-codex")
|
||||
|
||||
assert snapshot is not None
|
||||
assert snapshot.plan == "Pro"
|
||||
assert len(snapshot.windows) == 2
|
||||
assert snapshot.windows[0].label == "Session"
|
||||
assert snapshot.windows[0].used_percent == 15.0
|
||||
assert snapshot.windows[0].reset_at == datetime.fromtimestamp(1_900_000_000, tz=timezone.utc)
|
||||
assert "Credits balance: $12.50" in snapshot.details
|
||||
|
||||
|
||||
def test_render_account_usage_lines_includes_reset_and_provider():
|
||||
snapshot = AccountUsageSnapshot(
|
||||
provider="openai-codex",
|
||||
source="usage_api",
|
||||
fetched_at=datetime.now(timezone.utc),
|
||||
plan="Pro",
|
||||
windows=(
|
||||
AccountUsageWindow(
|
||||
label="Session",
|
||||
used_percent=25,
|
||||
reset_at=datetime.now(timezone.utc),
|
||||
),
|
||||
),
|
||||
details=("Credits balance: $9.99",),
|
||||
)
|
||||
lines = render_account_usage_lines(snapshot)
|
||||
|
||||
assert lines[0] == "📈 Account limits"
|
||||
assert "openai-codex (Pro)" in lines[1]
|
||||
assert "Session: 75% remaining (25% used)" in lines[2]
|
||||
assert "Credits balance: $9.99" in lines[3]
|
||||
|
||||
|
||||
def test_fetch_account_usage_openrouter_uses_limit_remaining_and_ignores_deprecated_rate_limit(monkeypatch):
|
||||
monkeypatch.setattr(
|
||||
"agent.account_usage.resolve_runtime_provider",
|
||||
lambda requested, explicit_base_url=None, explicit_api_key=None: {
|
||||
"provider": "openrouter",
|
||||
"base_url": "https://openrouter.ai/api/v1",
|
||||
"api_key": "***",
|
||||
},
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"agent.account_usage.httpx.Client",
|
||||
lambda timeout=10.0: _RoutingClient(
|
||||
{
|
||||
"https://openrouter.ai/api/v1/credits": {
|
||||
"data": {"total_credits": 300.0, "total_usage": 10.92}
|
||||
},
|
||||
"https://openrouter.ai/api/v1/key": {
|
||||
"data": {
|
||||
"limit": 100.0,
|
||||
"limit_remaining": 70.0,
|
||||
"limit_reset": "monthly",
|
||||
"usage": 12.5,
|
||||
"usage_daily": 0.5,
|
||||
"usage_weekly": 2.0,
|
||||
"usage_monthly": 8.0,
|
||||
"rate_limit": {"requests": -1, "interval": "10s"},
|
||||
}
|
||||
},
|
||||
}
|
||||
),
|
||||
)
|
||||
|
||||
snapshot = fetch_account_usage("openrouter")
|
||||
|
||||
assert snapshot is not None
|
||||
assert snapshot.windows == (
|
||||
AccountUsageWindow(
|
||||
label="API key quota",
|
||||
used_percent=30.0,
|
||||
detail="$70.00 of $100.00 remaining • resets monthly",
|
||||
),
|
||||
)
|
||||
assert "Credits balance: $289.08" in snapshot.details
|
||||
assert "API key usage: $12.50 total • $0.50 today • $2.00 this week • $8.00 this month" in snapshot.details
|
||||
assert all("-1 requests / 10s" not in line for line in render_account_usage_lines(snapshot))
|
||||
|
||||
|
||||
def test_fetch_account_usage_openrouter_omits_quota_window_when_key_has_no_limit(monkeypatch):
|
||||
monkeypatch.setattr(
|
||||
"agent.account_usage.resolve_runtime_provider",
|
||||
lambda requested, explicit_base_url=None, explicit_api_key=None: {
|
||||
"provider": "openrouter",
|
||||
"base_url": "https://openrouter.ai/api/v1",
|
||||
"api_key": "***",
|
||||
},
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
"agent.account_usage.httpx.Client",
|
||||
lambda timeout=10.0: _RoutingClient(
|
||||
{
|
||||
"https://openrouter.ai/api/v1/credits": {
|
||||
"data": {"total_credits": 100.0, "total_usage": 25.5}
|
||||
},
|
||||
"https://openrouter.ai/api/v1/key": {
|
||||
"data": {
|
||||
"limit": None,
|
||||
"limit_remaining": None,
|
||||
"usage": 25.5,
|
||||
"usage_daily": 1.25,
|
||||
"usage_weekly": 4.5,
|
||||
"usage_monthly": 18.0,
|
||||
}
|
||||
},
|
||||
}
|
||||
),
|
||||
)
|
||||
|
||||
snapshot = fetch_account_usage("openrouter")
|
||||
|
||||
assert snapshot is not None
|
||||
assert snapshot.windows == ()
|
||||
assert "Credits balance: $74.50" in snapshot.details
|
||||
assert "API key usage: $25.50 total • $1.25 today • $4.50 this week • $18.00 this month" in snapshot.details
|
||||
149
tests/test_tensorzero_eval_packet.py
Normal file
149
tests/test_tensorzero_eval_packet.py
Normal file
@@ -0,0 +1,149 @@
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
SCRIPT_DIR = Path(__file__).resolve().parents[1] / "scripts"
|
||||
sys.path.insert(0, str(SCRIPT_DIR))
|
||||
|
||||
import tensorzero_eval_packet as tz
|
||||
|
||||
|
||||
def test_scan_touchpoints_finds_expected_matches(tmp_path):
|
||||
(tmp_path / "run_agent.py").write_text(
|
||||
"self._fallback_chain = []\n# Provider fallback chain\n"
|
||||
)
|
||||
(tmp_path / "hermes_cli").mkdir()
|
||||
(tmp_path / "hermes_cli" / "runtime_provider.py").write_text(
|
||||
"def resolve_runtime_provider():\n return {}\n"
|
||||
)
|
||||
(tmp_path / "agent").mkdir()
|
||||
(tmp_path / "agent" / "smart_model_routing.py").write_text(
|
||||
"def resolve_turn_route(user_message, routing_config, primary):\n return primary\n"
|
||||
)
|
||||
(tmp_path / "gateway").mkdir()
|
||||
(tmp_path / "gateway" / "run.py").write_text(
|
||||
"def _load_provider_routing():\n return {}\n"
|
||||
)
|
||||
(tmp_path / "cron").mkdir()
|
||||
(tmp_path / "cron" / "scheduler.py").write_text(
|
||||
"runtime = resolve_runtime_provider()\nturn_route = resolve_turn_route('x', {}, {})\n"
|
||||
)
|
||||
(tmp_path / "hermes_state.py").write_text("class SessionDB:\n pass\n")
|
||||
(tmp_path / "benchmarks").mkdir()
|
||||
(tmp_path / "benchmarks" / "tool_call_benchmark.py").write_text(
|
||||
"class ToolCall: ...\n"
|
||||
)
|
||||
|
||||
touchpoints = tz.scan_touchpoints(tmp_path)
|
||||
|
||||
labels = {tp.label for tp in touchpoints}
|
||||
assert "fallback_chain" in labels
|
||||
assert "runtime_provider" in labels
|
||||
assert "smart_model_routing" in labels
|
||||
assert "gateway_provider_routing" in labels
|
||||
assert "cron_runtime_provider" in labels
|
||||
assert "session_db" in labels
|
||||
assert "benchmark_suite" in labels
|
||||
|
||||
|
||||
def test_build_requirement_matrix_marks_canary_as_gap_without_split_support():
|
||||
touchpoints = [
|
||||
tz.Touchpoint(
|
||||
label="runtime_provider",
|
||||
file_path="hermes_cli/runtime_provider.py",
|
||||
line_number=10,
|
||||
matched_text="def resolve_runtime_provider",
|
||||
),
|
||||
tz.Touchpoint(
|
||||
label="provider_routing_config",
|
||||
file_path="cli.py",
|
||||
line_number=20,
|
||||
matched_text='provider_routing',
|
||||
),
|
||||
tz.Touchpoint(
|
||||
label="fallback_chain",
|
||||
file_path="run_agent.py",
|
||||
line_number=21,
|
||||
matched_text='_fallback_chain = []',
|
||||
),
|
||||
tz.Touchpoint(
|
||||
label="smart_model_routing",
|
||||
file_path="agent/smart_model_routing.py",
|
||||
line_number=30,
|
||||
matched_text='resolve_turn_route',
|
||||
),
|
||||
tz.Touchpoint(
|
||||
label="gateway_provider_routing",
|
||||
file_path="gateway/run.py",
|
||||
line_number=35,
|
||||
matched_text='def _load_provider_routing',
|
||||
),
|
||||
tz.Touchpoint(
|
||||
label="cron_runtime_provider",
|
||||
file_path="cron/scheduler.py",
|
||||
line_number=36,
|
||||
matched_text='runtime = resolve_runtime_provider()',
|
||||
),
|
||||
tz.Touchpoint(
|
||||
label="session_db",
|
||||
file_path="hermes_state.py",
|
||||
line_number=40,
|
||||
matched_text='class SessionDB',
|
||||
),
|
||||
tz.Touchpoint(
|
||||
label="trajectory_export",
|
||||
file_path="batch_runner.py",
|
||||
line_number=50,
|
||||
matched_text='trajectory_entry',
|
||||
),
|
||||
tz.Touchpoint(
|
||||
label="benchmark_suite",
|
||||
file_path="benchmarks/tool_call_benchmark.py",
|
||||
line_number=60,
|
||||
matched_text='ToolCall',
|
||||
),
|
||||
]
|
||||
|
||||
matrix = tz.build_requirement_matrix(touchpoints)
|
||||
by_key = {row.key: row for row in matrix}
|
||||
|
||||
assert by_key["gateway_replacement"].status == "partial"
|
||||
assert by_key["config_migration"].status == "partial"
|
||||
assert by_key["canary_rollout"].status == "gap"
|
||||
assert by_key["session_feedback"].status == "partial"
|
||||
assert by_key["evaluation_suite"].status == "partial"
|
||||
|
||||
|
||||
def test_build_markdown_renders_recommendation_and_touchpoints():
|
||||
touchpoints = [
|
||||
tz.Touchpoint(
|
||||
label="runtime_provider",
|
||||
file_path="hermes_cli/runtime_provider.py",
|
||||
line_number=10,
|
||||
matched_text="def resolve_runtime_provider",
|
||||
),
|
||||
tz.Touchpoint(
|
||||
label="session_db",
|
||||
file_path="hermes_state.py",
|
||||
line_number=40,
|
||||
matched_text='class SessionDB',
|
||||
),
|
||||
]
|
||||
matrix = tz.build_requirement_matrix(touchpoints)
|
||||
report = tz.build_report(touchpoints, matrix)
|
||||
markdown = tz.build_markdown(report)
|
||||
|
||||
assert "# TensorZero Evaluation Packet" in markdown
|
||||
assert "gateway_replacement" not in markdown # human labels, not raw keys
|
||||
assert "Gateway replacement scope" in markdown
|
||||
assert "Not ready for direct replacement" in markdown
|
||||
assert "hermes_cli/runtime_provider.py:10" in markdown
|
||||
assert "hermes_state.py:40" in markdown
|
||||
|
||||
|
||||
def test_issue_context_is_embedded_in_report():
|
||||
report = tz.build_report([], [])
|
||||
markdown = tz.build_markdown(report)
|
||||
|
||||
assert "Issue #860" in markdown
|
||||
assert "tensorzero" in markdown.lower()
|
||||
assert "10% traffic" in markdown
|
||||
Reference in New Issue
Block a user