Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
418e601f74 |
@@ -1,4 +1,4 @@
|
||||
"""Shared auxiliary client router for side tasks.
|
||||
from agent.telemetry_logger import log_token_usage\n"""Shared auxiliary client router for side tasks.
|
||||
|
||||
Provides a single resolution chain so every consumer (context compression,
|
||||
session search, web extraction, vision analysis, browser vision) picks up
|
||||
@@ -38,7 +38,6 @@ import json
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
from agent.telemetry_logger import log_token_usage
|
||||
import time
|
||||
from pathlib import Path # noqa: F401 — used by test mocks
|
||||
from types import SimpleNamespace
|
||||
@@ -123,16 +122,6 @@ _OR_HEADERS = {
|
||||
"X-OpenRouter-Categories": "productivity,cli-agent",
|
||||
}
|
||||
|
||||
# Vercel AI Gateway app attribution headers. HTTP-Referer maps to
|
||||
# referrerUrl and X-Title maps to appName in the gateway analytics.
|
||||
from hermes_cli import __version__ as _HERMES_VERSION
|
||||
|
||||
_AI_GATEWAY_HEADERS = {
|
||||
"HTTP-Referer": "https://hermes-agent.nousresearch.com",
|
||||
"X-Title": "Hermes Agent",
|
||||
"User-Agent": f"HermesAgent/{_HERMES_VERSION}",
|
||||
}
|
||||
|
||||
# Nous Portal extra_body for product attribution.
|
||||
# Callers should pass this as extra_body in chat.completions.create()
|
||||
# when the auxiliary client is backed by Nous Portal.
|
||||
@@ -407,8 +396,7 @@ class _CodexCompletionsAdapter:
|
||||
prompt_tokens=getattr(resp_usage, "input_tokens", 0),
|
||||
completion_tokens=getattr(resp_usage, "output_tokens", 0),
|
||||
total_tokens=getattr(resp_usage, "total_tokens", 0),
|
||||
)
|
||||
log_token_usage(usage.prompt_tokens, usage.completion_tokens, model)
|
||||
)\n log_token_usage(usage.prompt_tokens, usage.completion_tokens, model)
|
||||
except Exception as exc:
|
||||
logger.debug("Codex auxiliary Responses API call failed: %s", exc)
|
||||
raise
|
||||
@@ -541,8 +529,7 @@ class _AnthropicCompletionsAdapter:
|
||||
prompt_tokens=prompt_tokens,
|
||||
completion_tokens=completion_tokens,
|
||||
total_tokens=total_tokens,
|
||||
)
|
||||
log_token_usage(usage.prompt_tokens, usage.completion_tokens, model)
|
||||
)\n log_token_usage(usage.prompt_tokens, usage.completion_tokens, model)
|
||||
|
||||
choice = SimpleNamespace(
|
||||
index=0,
|
||||
|
||||
@@ -168,7 +168,7 @@ import time as _time
|
||||
from datetime import datetime
|
||||
|
||||
from hermes_cli import __version__, __release_date__
|
||||
from hermes_constants import AI_GATEWAY_BASE_URL, OPENROUTER_BASE_URL
|
||||
from hermes_constants import OPENROUTER_BASE_URL
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -1112,8 +1112,6 @@ def select_provider_and_model(args=None):
|
||||
# Step 2: Provider-specific setup + model selection
|
||||
if selected_provider == "openrouter":
|
||||
_model_flow_openrouter(config, current_model)
|
||||
elif selected_provider == "ai-gateway":
|
||||
_model_flow_ai_gateway(config, current_model)
|
||||
elif selected_provider == "nous":
|
||||
_model_flow_nous(config, current_model, args=args)
|
||||
elif selected_provider == "openai-codex":
|
||||
@@ -1269,55 +1267,6 @@ def _model_flow_openrouter(config, current_model=""):
|
||||
print("No change.")
|
||||
|
||||
|
||||
def _model_flow_ai_gateway(config, current_model=""):
|
||||
"""Vercel AI Gateway provider: ensure API key, then pick model with pricing."""
|
||||
from hermes_cli.auth import _prompt_model_selection, _save_model_choice, deactivate_provider
|
||||
from hermes_cli.config import get_env_value, save_env_value
|
||||
from hermes_cli.models import ai_gateway_model_ids, get_pricing_for_provider
|
||||
|
||||
api_key = get_env_value("AI_GATEWAY_API_KEY")
|
||||
if not api_key:
|
||||
print("No Vercel AI Gateway API key configured.")
|
||||
print("Create API key here: https://vercel.com/d?to=%2F%5Bteam%5D%2F%7E%2Fai-gateway&title=AI+Gateway")
|
||||
print("Add a payment method to get $5 in free credits.")
|
||||
print()
|
||||
try:
|
||||
import getpass
|
||||
key = getpass.getpass("AI Gateway API key (or Enter to cancel): ").strip()
|
||||
except (KeyboardInterrupt, EOFError):
|
||||
print()
|
||||
return
|
||||
if not key:
|
||||
print("Cancelled.")
|
||||
return
|
||||
save_env_value("AI_GATEWAY_API_KEY", key)
|
||||
print("API key saved.")
|
||||
print()
|
||||
|
||||
models_list = ai_gateway_model_ids(force_refresh=True)
|
||||
pricing = get_pricing_for_provider("ai-gateway", force_refresh=True)
|
||||
|
||||
selected = _prompt_model_selection(models_list, current_model=current_model, pricing=pricing)
|
||||
if selected:
|
||||
_save_model_choice(selected)
|
||||
|
||||
from hermes_cli.config import load_config, save_config
|
||||
|
||||
cfg = load_config()
|
||||
model = cfg.get("model")
|
||||
if not isinstance(model, dict):
|
||||
model = {"default": model} if model else {}
|
||||
cfg["model"] = model
|
||||
model["provider"] = "ai-gateway"
|
||||
model["base_url"] = AI_GATEWAY_BASE_URL
|
||||
model["api_mode"] = "chat_completions"
|
||||
save_config(cfg)
|
||||
deactivate_provider()
|
||||
print(f"Default model set to: {selected} (via Vercel AI Gateway)")
|
||||
else:
|
||||
print("No change.")
|
||||
|
||||
|
||||
def _model_flow_nous(config, current_model="", args=None):
|
||||
"""Nous Portal provider: ensure logged in, then pick model."""
|
||||
from hermes_cli.auth import (
|
||||
|
||||
@@ -58,28 +58,6 @@ OPENROUTER_MODELS: list[tuple[str, str]] = [
|
||||
|
||||
_openrouter_catalog_cache: list[tuple[str, str]] | None = None
|
||||
|
||||
# Fallback Vercel AI Gateway snapshot used when the live catalog is unavailable.
|
||||
# OSS / open-weight models prioritized first, then closed-source by family.
|
||||
VERCEL_AI_GATEWAY_MODELS: list[tuple[str, str]] = [
|
||||
("moonshotai/kimi-k2.6", "recommended"),
|
||||
("alibaba/qwen3.6-plus", ""),
|
||||
("zai/glm-5.1", ""),
|
||||
("minimax/minimax-m2.7", ""),
|
||||
("anthropic/claude-sonnet-4.6", ""),
|
||||
("anthropic/claude-opus-4.7", ""),
|
||||
("anthropic/claude-opus-4.6", ""),
|
||||
("anthropic/claude-haiku-4.5", ""),
|
||||
("openai/gpt-5.4", ""),
|
||||
("openai/gpt-5.4-mini", ""),
|
||||
("openai/gpt-5.3-codex", ""),
|
||||
("google/gemini-3.1-pro-preview", ""),
|
||||
("google/gemini-3-flash", ""),
|
||||
("google/gemini-3.1-flash-lite-preview", ""),
|
||||
("xai/grok-4.20-reasoning", ""),
|
||||
]
|
||||
|
||||
_ai_gateway_catalog_cache: list[tuple[str, str]] | None = None
|
||||
|
||||
|
||||
def _codex_curated_models() -> list[str]:
|
||||
"""Derive the openai-codex curated list from codex_models.py.
|
||||
@@ -280,21 +258,18 @@ _PROVIDER_MODELS: dict[str, list[str]] = {
|
||||
"minimax-m2.5",
|
||||
],
|
||||
"ai-gateway": [
|
||||
"moonshotai/kimi-k2.6",
|
||||
"alibaba/qwen3.6-plus",
|
||||
"zai/glm-5.1",
|
||||
"minimax/minimax-m2.7",
|
||||
"anthropic/claude-sonnet-4.6",
|
||||
"anthropic/claude-opus-4.7",
|
||||
"anthropic/claude-opus-4.6",
|
||||
"anthropic/claude-sonnet-4.6",
|
||||
"anthropic/claude-sonnet-4.5",
|
||||
"anthropic/claude-haiku-4.5",
|
||||
"openai/gpt-5.4",
|
||||
"openai/gpt-5.4-mini",
|
||||
"openai/gpt-5.3-codex",
|
||||
"google/gemini-3.1-pro-preview",
|
||||
"openai/gpt-5",
|
||||
"openai/gpt-4.1",
|
||||
"openai/gpt-4.1-mini",
|
||||
"google/gemini-3-pro-preview",
|
||||
"google/gemini-3-flash",
|
||||
"google/gemini-3.1-flash-lite-preview",
|
||||
"xai/grok-4.20-reasoning",
|
||||
"google/gemini-2.5-pro",
|
||||
"google/gemini-2.5-flash",
|
||||
"deepseek/deepseek-v3.2",
|
||||
],
|
||||
"kilocode": [
|
||||
"anthropic/claude-opus-4.6",
|
||||
@@ -541,7 +516,6 @@ class ProviderEntry(NamedTuple):
|
||||
CANONICAL_PROVIDERS: list[ProviderEntry] = [
|
||||
ProviderEntry("nous", "Nous Portal", "Nous Portal (Nous Research subscription)"),
|
||||
ProviderEntry("openrouter", "OpenRouter", "OpenRouter (100+ models, pay-per-use)"),
|
||||
ProviderEntry("ai-gateway", "Vercel AI Gateway", "Vercel AI Gateway (200+ models, $5 free credit, no markup)"),
|
||||
ProviderEntry("anthropic", "Anthropic", "Anthropic (Claude models — API key or Claude Code)"),
|
||||
ProviderEntry("openai-codex", "OpenAI Codex", "OpenAI Codex"),
|
||||
ProviderEntry("xiaomi", "Xiaomi MiMo", "Xiaomi MiMo (MiMo-V2 models — pro, omni, flash)"),
|
||||
@@ -562,6 +536,7 @@ CANONICAL_PROVIDERS: list[ProviderEntry] = [
|
||||
ProviderEntry("kilocode", "Kilo Code", "Kilo Code (Kilo Gateway API)"),
|
||||
ProviderEntry("opencode-zen", "OpenCode Zen", "OpenCode Zen (35+ curated models, pay-as-you-go)"),
|
||||
ProviderEntry("opencode-go", "OpenCode Go", "OpenCode Go (open models, $10/month subscription)"),
|
||||
ProviderEntry("ai-gateway", "Vercel AI Gateway", "Vercel AI Gateway (200+ models, pay-per-use)"),
|
||||
]
|
||||
|
||||
# Derived dicts — used throughout the codebase
|
||||
@@ -704,90 +679,6 @@ def model_ids(*, force_refresh: bool = False) -> list[str]:
|
||||
|
||||
|
||||
|
||||
def _ai_gateway_model_is_free(pricing: Any) -> bool:
|
||||
"""Return True if an AI Gateway model has $0 input AND output pricing."""
|
||||
if not isinstance(pricing, dict):
|
||||
return False
|
||||
try:
|
||||
return float(pricing.get("input", "0")) == 0 and float(pricing.get("output", "0")) == 0
|
||||
except (TypeError, ValueError):
|
||||
return False
|
||||
|
||||
|
||||
def fetch_ai_gateway_models(
|
||||
timeout: float = 8.0,
|
||||
*,
|
||||
force_refresh: bool = False,
|
||||
) -> list[tuple[str, str]]:
|
||||
"""Return the curated AI Gateway picker list, refreshed from the live catalog when possible."""
|
||||
global _ai_gateway_catalog_cache
|
||||
|
||||
if _ai_gateway_catalog_cache is not None and not force_refresh:
|
||||
return list(_ai_gateway_catalog_cache)
|
||||
|
||||
from hermes_constants import AI_GATEWAY_BASE_URL
|
||||
|
||||
fallback = list(VERCEL_AI_GATEWAY_MODELS)
|
||||
preferred_ids = [mid for mid, _ in fallback]
|
||||
|
||||
try:
|
||||
req = urllib.request.Request(
|
||||
f"{AI_GATEWAY_BASE_URL.rstrip('/')}/models",
|
||||
headers={"Accept": "application/json"},
|
||||
)
|
||||
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
||||
payload = json.loads(resp.read().decode())
|
||||
except Exception:
|
||||
return list(_ai_gateway_catalog_cache or fallback)
|
||||
|
||||
live_items = payload.get("data", [])
|
||||
if not isinstance(live_items, list):
|
||||
return list(_ai_gateway_catalog_cache or fallback)
|
||||
|
||||
live_by_id: dict[str, dict[str, Any]] = {}
|
||||
for item in live_items:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
mid = str(item.get("id") or "").strip()
|
||||
if not mid:
|
||||
continue
|
||||
live_by_id[mid] = item
|
||||
|
||||
curated: list[tuple[str, str]] = []
|
||||
for preferred_id in preferred_ids:
|
||||
live_item = live_by_id.get(preferred_id)
|
||||
if live_item is None:
|
||||
continue
|
||||
desc = "free" if _ai_gateway_model_is_free(live_item.get("pricing")) else ""
|
||||
curated.append((preferred_id, desc))
|
||||
|
||||
if not curated:
|
||||
return list(_ai_gateway_catalog_cache or fallback)
|
||||
|
||||
free_moonshot = next(
|
||||
(
|
||||
mid
|
||||
for mid, item in live_by_id.items()
|
||||
if mid.startswith("moonshotai/") and _ai_gateway_model_is_free(item.get("pricing"))
|
||||
),
|
||||
None,
|
||||
)
|
||||
if free_moonshot:
|
||||
curated = [(mid, desc) for mid, desc in curated if mid != free_moonshot]
|
||||
curated.insert(0, (free_moonshot, "recommended"))
|
||||
else:
|
||||
first_id, _ = curated[0]
|
||||
curated[0] = (first_id, "recommended")
|
||||
|
||||
_ai_gateway_catalog_cache = curated
|
||||
return list(curated)
|
||||
|
||||
|
||||
def ai_gateway_model_ids(*, force_refresh: bool = False) -> list[str]:
|
||||
"""Return just the AI Gateway model-id strings."""
|
||||
return [mid for mid, _ in fetch_ai_gateway_models(force_refresh=force_refresh)]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Pricing helpers — fetch live pricing from OpenRouter-compatible /v1/models
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -930,51 +821,6 @@ def fetch_models_with_pricing(
|
||||
return result
|
||||
|
||||
|
||||
def fetch_ai_gateway_pricing(
|
||||
timeout: float = 8.0,
|
||||
*,
|
||||
force_refresh: bool = False,
|
||||
) -> dict[str, dict[str, str]]:
|
||||
"""Fetch Vercel AI Gateway /v1/models and return Hermes-shaped pricing."""
|
||||
from hermes_constants import AI_GATEWAY_BASE_URL
|
||||
|
||||
cache_key = AI_GATEWAY_BASE_URL.rstrip("/")
|
||||
if not force_refresh and cache_key in _pricing_cache:
|
||||
return _pricing_cache[cache_key]
|
||||
|
||||
try:
|
||||
req = urllib.request.Request(
|
||||
f"{cache_key}/models",
|
||||
headers={"Accept": "application/json"},
|
||||
)
|
||||
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
||||
payload = json.loads(resp.read().decode())
|
||||
except Exception:
|
||||
_pricing_cache[cache_key] = {}
|
||||
return {}
|
||||
|
||||
result: dict[str, dict[str, str]] = {}
|
||||
for item in payload.get("data", []):
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
mid = item.get("id")
|
||||
pricing = item.get("pricing")
|
||||
if not (mid and isinstance(pricing, dict)):
|
||||
continue
|
||||
entry: dict[str, str] = {
|
||||
"prompt": str(pricing.get("input", "")),
|
||||
"completion": str(pricing.get("output", "")),
|
||||
}
|
||||
if pricing.get("input_cache_read"):
|
||||
entry["input_cache_read"] = str(pricing["input_cache_read"])
|
||||
if pricing.get("input_cache_write"):
|
||||
entry["input_cache_write"] = str(pricing["input_cache_write"])
|
||||
result[mid] = entry
|
||||
|
||||
_pricing_cache[cache_key] = result
|
||||
return result
|
||||
|
||||
|
||||
def _resolve_openrouter_api_key() -> str:
|
||||
"""Best-effort OpenRouter API key for pricing fetch."""
|
||||
return os.getenv("OPENROUTER_API_KEY", "").strip()
|
||||
@@ -993,7 +839,7 @@ def _resolve_nous_pricing_credentials() -> tuple[str, str]:
|
||||
|
||||
|
||||
def get_pricing_for_provider(provider: str, *, force_refresh: bool = False) -> dict[str, dict[str, str]]:
|
||||
"""Return live pricing for providers that support it (openrouter, ai-gateway, nous)."""
|
||||
"""Return live pricing for providers that support it (openrouter, nous)."""
|
||||
normalized = normalize_provider(provider)
|
||||
if normalized == "openrouter":
|
||||
return fetch_models_with_pricing(
|
||||
@@ -1001,11 +847,11 @@ def get_pricing_for_provider(provider: str, *, force_refresh: bool = False) -> d
|
||||
base_url="https://openrouter.ai/api",
|
||||
force_refresh=force_refresh,
|
||||
)
|
||||
if normalized == "ai-gateway":
|
||||
return fetch_ai_gateway_pricing(force_refresh=force_refresh)
|
||||
if normalized == "nous":
|
||||
api_key, base_url = _resolve_nous_pricing_credentials()
|
||||
if base_url:
|
||||
# Nous base_url typically looks like https://inference-api.nousresearch.com/v1
|
||||
# We need the part before /v1 for our fetch function
|
||||
stripped = base_url.rstrip("/")
|
||||
if stripped.endswith("/v1"):
|
||||
stripped = stripped[:-3]
|
||||
@@ -1407,7 +1253,9 @@ def provider_model_ids(provider: Optional[str], *, force_refresh: bool = False)
|
||||
if live:
|
||||
return live
|
||||
if normalized == "ai-gateway":
|
||||
return ai_gateway_model_ids()
|
||||
live = _fetch_ai_gateway_models()
|
||||
if live:
|
||||
return live
|
||||
if normalized == "custom":
|
||||
base_url = _get_custom_base_url()
|
||||
if base_url:
|
||||
|
||||
515
research_human_confirmation_firewall.md
Normal file
515
research_human_confirmation_firewall.md
Normal file
@@ -0,0 +1,515 @@
|
||||
# Human Confirmation Firewall: Research Report
|
||||
## Implementation Patterns for Hermes Agent
|
||||
|
||||
**Issue:** #878
|
||||
**Parent:** #659
|
||||
**Priority:** P0
|
||||
**Scope:** Human-in-the-loop safety patterns for tool calls, crisis handling, and irreversible actions
|
||||
|
||||
---
|
||||
|
||||
## Executive Summary
|
||||
|
||||
Hermes already has a partial human confirmation firewall, but it is narrow.
|
||||
|
||||
Current repo state shows:
|
||||
- a real **pre-execution gate** for dangerous terminal commands in `tools/approval.py`
|
||||
- a partial **confidence-threshold path** via `_smart_approve()` in `tools/approval.py`
|
||||
- gateway support for blocking approval resolution in `gateway/run.py`
|
||||
|
||||
What is still missing is the core recommendation from this research issue:
|
||||
- **confidence scoring on all tool calls**, not just terminal commands that already matched a dangerous regex
|
||||
- a **hard pre-execution human gate for crisis interventions**, especially any action that would auto-respond to suicidal content
|
||||
- a consistent way to classify actions into:
|
||||
1. pre-execution gate
|
||||
2. post-execution review
|
||||
3. confidence-threshold execution
|
||||
|
||||
Recommendation:
|
||||
- use **Pattern 1: Pre-Execution Gate** for crisis interventions and irreversible/high-impact actions
|
||||
- use **Pattern 3: Confidence Threshold** for normal operations
|
||||
- reserve **Pattern 2: Post-Execution Review** only for low-risk and reversible actions
|
||||
|
||||
The next implementation step should be a **tool-call risk assessment layer** that runs before dispatch in `model_tools.handle_function_call()`, assigns a score and pattern to every tool call, and routes only the highest-risk calls into mandatory human confirmation.
|
||||
|
||||
---
|
||||
|
||||
## 1. The Three Proven Patterns
|
||||
|
||||
### Pattern 1: Pre-Execution Gate
|
||||
|
||||
Definition:
|
||||
- halt before execution
|
||||
- show the proposed action to the human
|
||||
- require explicit approval or denial
|
||||
|
||||
Best for:
|
||||
- destructive actions
|
||||
- irreversible side effects
|
||||
- crisis interventions
|
||||
- actions that affect another human's safety, money, infrastructure, or private data
|
||||
|
||||
Strengths:
|
||||
- strongest safety guarantee
|
||||
- simplest audit story
|
||||
- prevents the most catastrophic failure mode: acting first and apologizing later
|
||||
|
||||
Weaknesses:
|
||||
- adds latency
|
||||
- creates operator burden if overused
|
||||
- should not be applied to every ordinary tool call
|
||||
|
||||
### Pattern 2: Post-Execution Review
|
||||
|
||||
Definition:
|
||||
- execute first
|
||||
- expose result to human
|
||||
- allow rollback or follow-up correction
|
||||
|
||||
Best for:
|
||||
- reversible operations
|
||||
- low-risk actions with fast recovery
|
||||
- tasks where human review matters but immediate execution is acceptable
|
||||
|
||||
Strengths:
|
||||
- low friction
|
||||
- fast iteration
|
||||
- useful when rollback is practical
|
||||
|
||||
Weaknesses:
|
||||
- unsafe for crisis or destructive actions
|
||||
- only works when rollback actually exists
|
||||
- a poor fit for external communication or life-safety contexts
|
||||
|
||||
### Pattern 3: Confidence Threshold
|
||||
|
||||
Definition:
|
||||
- compute a risk/confidence score before execution
|
||||
- auto-execute high-confidence safe actions
|
||||
- request confirmation for lower-confidence or higher-risk actions
|
||||
|
||||
Best for:
|
||||
- mixed-risk tool ecosystems
|
||||
- day-to-day operations where always-confirm would be too expensive
|
||||
- systems with a large volume of ordinary, safe reads and edits
|
||||
|
||||
Strengths:
|
||||
- best balance of speed and safety
|
||||
- scales across many tool types
|
||||
- allows targeted human attention where it matters most
|
||||
|
||||
Weaknesses:
|
||||
- depends on a good scoring model
|
||||
- weak scoring creates false negatives or unnecessary prompts
|
||||
- must remain inspectable and debuggable
|
||||
|
||||
---
|
||||
|
||||
## 2. What Hermes Already Has
|
||||
|
||||
## 2.1 Existing Pre-Execution Gate for Dangerous Terminal Commands
|
||||
|
||||
`tools/approval.py` already implements a real pre-execution confirmation path for dangerous shell commands.
|
||||
|
||||
Observed components:
|
||||
- `DANGEROUS_PATTERNS`
|
||||
- `detect_dangerous_command()`
|
||||
- `prompt_dangerous_approval()`
|
||||
- `check_dangerous_command()`
|
||||
- gateway queueing and resolution support in the same module
|
||||
|
||||
This is already Pattern 1.
|
||||
|
||||
Current behavior:
|
||||
- dangerous terminal commands are detected before execution
|
||||
- the user can allow once / session / always / deny
|
||||
- gateway sessions can block until approval resolves
|
||||
|
||||
This is a strong foundation, but it is limited to a subset of terminal commands.
|
||||
|
||||
## 2.2 Partial Confidence Threshold via Smart Approvals
|
||||
|
||||
Hermes also already has a partial Pattern 3.
|
||||
|
||||
Observed component:
|
||||
- `_smart_approve()` in `tools/approval.py`
|
||||
|
||||
Current behavior:
|
||||
- only runs **after** a command has already been flagged by dangerous-pattern detection
|
||||
- uses the auxiliary LLM to decide:
|
||||
- approve
|
||||
- deny
|
||||
- escalate
|
||||
|
||||
This means Hermes has a confidence-threshold mechanism, but only for **already-flagged dangerous terminal commands**.
|
||||
|
||||
What it does not yet do:
|
||||
- score all tool calls
|
||||
- classify non-terminal tools
|
||||
- distinguish crisis interventions from normal ops
|
||||
- produce a shared risk model across the tool surface
|
||||
|
||||
## 2.3 Blocking Approval UX in Gateway
|
||||
|
||||
`gateway/run.py` already routes `/approve` and `/deny` into the blocking approval path.
|
||||
|
||||
This means the infrastructure for a true human confirmation firewall already exists in messaging contexts.
|
||||
|
||||
That is important because the missing work is not "invent human approval from zero."
|
||||
The missing work is:
|
||||
- expand the scope from dangerous shell commands to **all tool calls that matter**
|
||||
- make the routing policy explicit and inspectable
|
||||
|
||||
---
|
||||
|
||||
## 3. What Hermes Still Lacks
|
||||
|
||||
## 3.1 No Universal Tool-Call Risk Assessment
|
||||
|
||||
The current approval system is command-pattern-centric.
|
||||
It is not yet a tool-call firewall.
|
||||
|
||||
Missing capability:
|
||||
- before dispatch, every tool call should receive a structured assessment:
|
||||
- tool name
|
||||
- side-effect class
|
||||
- reversibility
|
||||
- human-impact potential
|
||||
- crisis relevance
|
||||
- confidence score
|
||||
- recommended confirmation pattern
|
||||
|
||||
Natural insertion point:
|
||||
- `model_tools.handle_function_call()`
|
||||
|
||||
That function already sits at the central dispatch boundary.
|
||||
It is the right place to add a pre-dispatch classifier.
|
||||
|
||||
## 3.2 No Hard Crisis Gate for Outbound Intervention
|
||||
|
||||
Issue #878 explicitly recommends:
|
||||
- Pattern 1 for crisis interventions
|
||||
- never auto-respond to suicidal content
|
||||
|
||||
That recommendation is not yet codified as a global firewall rule.
|
||||
|
||||
Missing rule:
|
||||
- if a tool call would directly intervene in a crisis context or send outward guidance in response to suicidal content, it must require explicit human confirmation before execution
|
||||
|
||||
Examples that should hard-gate:
|
||||
- outbound `send_message` content aimed at a suicidal user
|
||||
- any future tool that places calls, escalates emergencies, or contacts third parties about a crisis
|
||||
- any autonomous action that claims a person should or should not take a life-safety step
|
||||
|
||||
## 3.3 No First-Class Post-Execution Review Policy
|
||||
|
||||
Hermes has approval and denial, but it does not yet have a formal policy for when Pattern 2 is acceptable.
|
||||
|
||||
Without a policy, post-execution review tends to get used implicitly rather than intentionally.
|
||||
|
||||
That is risky.
|
||||
|
||||
Hermes should define Pattern 2 narrowly:
|
||||
- only for actions that are both low-risk and reversible
|
||||
- only when the system can show the human exactly what happened
|
||||
- never for crisis, finance, destructive config, or sensitive comms
|
||||
|
||||
---
|
||||
|
||||
## 4. Recommended Architecture for Hermes
|
||||
|
||||
## 4.1 Add a Tool-Call Assessment Layer
|
||||
|
||||
Add a pre-dispatch assessment object for every tool call.
|
||||
|
||||
Suggested shape:
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class ToolCallAssessment:
|
||||
tool_name: str
|
||||
risk_score: float # 0.0 to 1.0
|
||||
confidence: float # confidence in the assessment itself
|
||||
pattern: str # pre_execution_gate | post_execution_review | confidence_threshold
|
||||
requires_human: bool
|
||||
reasons: list[str]
|
||||
reversible: bool
|
||||
crisis_sensitive: bool
|
||||
```
|
||||
|
||||
Suggested execution point:
|
||||
- inside `model_tools.handle_function_call()` before `orchestrator.dispatch()`
|
||||
|
||||
Why here:
|
||||
- one place covers all tools
|
||||
- one place can emit traces
|
||||
- one place can remain model-agnostic
|
||||
- one place lets plugins observe or override the assessment
|
||||
|
||||
## 4.2 Classify Tool Calls by Side-Effect Class
|
||||
|
||||
Suggested first-pass taxonomy:
|
||||
|
||||
### A. Read-only
|
||||
Examples:
|
||||
- `read_file`
|
||||
- `search_files`
|
||||
- `browser_snapshot`
|
||||
- `browser_console` read-only inspection
|
||||
|
||||
Pattern:
|
||||
- confidence threshold
|
||||
- almost always auto-execute
|
||||
- human confirmation normally unnecessary
|
||||
|
||||
### B. Local reversible edits
|
||||
Examples:
|
||||
- `patch`
|
||||
- `write_file`
|
||||
- `todo`
|
||||
|
||||
Pattern:
|
||||
- confidence threshold
|
||||
- human confirmation only when risk score rises because of path sensitivity or scope breadth
|
||||
|
||||
### C. External side effects
|
||||
Examples:
|
||||
- `send_message`
|
||||
- `cronjob`
|
||||
- `delegate_task`
|
||||
- smart-home actuation tools
|
||||
|
||||
Pattern:
|
||||
- confidence threshold by default
|
||||
- pre-execution gate when score exceeds threshold or when context is sensitive
|
||||
|
||||
### D. Critical / destructive / crisis-sensitive
|
||||
Examples:
|
||||
- dangerous `terminal`
|
||||
- financial actions
|
||||
- deletion / kill / restart / deployment in sensitive paths
|
||||
- outbound crisis intervention
|
||||
|
||||
Pattern:
|
||||
- pre-execution gate
|
||||
- never auto-execute on confidence alone
|
||||
|
||||
## 4.3 Crisis Override Rule
|
||||
|
||||
Add a hard override:
|
||||
|
||||
```text
|
||||
If tool call is crisis-sensitive AND outbound or irreversible:
|
||||
requires_human = True
|
||||
pattern = pre_execution_gate
|
||||
```
|
||||
|
||||
This is the most important rule in the issue.
|
||||
|
||||
The model may draft the message.
|
||||
The human must confirm before the system sends it.
|
||||
|
||||
## 4.4 Use Confidence Threshold for Normal Ops
|
||||
|
||||
For non-crisis operations, use Pattern 3.
|
||||
|
||||
Suggested logic:
|
||||
- low risk + high assessment confidence -> auto-execute
|
||||
- medium risk or medium confidence -> ask human
|
||||
- high risk -> always ask human
|
||||
|
||||
Key point:
|
||||
- confidence is not just "how sure the LLM is"
|
||||
- confidence should combine:
|
||||
- tool type certainty
|
||||
- argument clarity
|
||||
- path sensitivity
|
||||
- external side effects
|
||||
- crisis indicators
|
||||
|
||||
---
|
||||
|
||||
## 5. Recommended Initial Scoring Factors
|
||||
|
||||
A simple initial scorer is enough.
|
||||
It does not need to be fancy.
|
||||
|
||||
Suggested factors:
|
||||
|
||||
### 5.1 Tool class risk
|
||||
- read-only tools: very low base risk
|
||||
- local mutation tools: moderate base risk
|
||||
- external communication / automation tools: higher base risk
|
||||
- shell execution: variable, often high
|
||||
|
||||
### 5.2 Target sensitivity
|
||||
Examples:
|
||||
- `/tmp` or local scratch paths -> lower
|
||||
- repo files under git -> medium
|
||||
- system config, credentials, secrets, gateway lifecycle -> high
|
||||
- human-facing channels -> high if message content is sensitive
|
||||
|
||||
### 5.3 Reversibility
|
||||
- reversible -> lower
|
||||
- difficult but possible to undo -> medium
|
||||
- practically irreversible -> high
|
||||
|
||||
### 5.4 Human-impact content
|
||||
- no direct human impact -> low
|
||||
- administrative impact -> medium
|
||||
- crisis / safety / emotional intervention -> critical
|
||||
|
||||
### 5.5 Context certainty
|
||||
- arguments are explicit and narrow -> higher confidence
|
||||
- arguments are vague, inferred, or broad -> lower confidence
|
||||
|
||||
---
|
||||
|
||||
## 6. Implementation Plan
|
||||
|
||||
## Phase 1: Assessment Without Behavior Change
|
||||
|
||||
Goal:
|
||||
- score all tool calls
|
||||
- log assessment decisions
|
||||
- emit traces for review
|
||||
- do not yet block new tool categories
|
||||
|
||||
Files to touch:
|
||||
- `tools/approval.py`
|
||||
- `model_tools.py`
|
||||
- tests for assessment coverage
|
||||
|
||||
Output:
|
||||
- risk/confidence trace for every tool call
|
||||
- pattern recommendation for every tool call
|
||||
|
||||
Why first:
|
||||
- lets us calibrate before changing runtime behavior
|
||||
- avoids breaking existing workflows blindly
|
||||
|
||||
## Phase 2: Hard-Gate Crisis-Sensitive Outbound Actions
|
||||
|
||||
Goal:
|
||||
- enforce Pattern 1 for crisis interventions
|
||||
|
||||
Likely surfaces:
|
||||
- `send_message`
|
||||
- any future telephony / call / escalation tools
|
||||
- other tools with direct human intervention side effects
|
||||
|
||||
Rule:
|
||||
- never auto-send crisis intervention content without human confirmation
|
||||
|
||||
## Phase 3: General Confidence Threshold for Normal Ops
|
||||
|
||||
Goal:
|
||||
- apply Pattern 3 to all tool calls
|
||||
- auto-run clearly safe actions
|
||||
- escalate ambiguous or medium-risk actions
|
||||
|
||||
Likely thresholds:
|
||||
- score < 0.25 -> auto
|
||||
- 0.25 to 0.60 -> confirm if confidence is weak
|
||||
- > 0.60 -> confirm
|
||||
- crisis-sensitive -> always confirm
|
||||
|
||||
## Phase 4: Optional Post-Execution Review Lane
|
||||
|
||||
Goal:
|
||||
- allow Pattern 2 only for explicitly reversible operations
|
||||
|
||||
Examples:
|
||||
- maybe low-risk messaging drafts saved locally
|
||||
- maybe reversible UI actions in specific environments
|
||||
|
||||
Important:
|
||||
- this phase is optional
|
||||
- Hermes should not rely on Pattern 2 for safety-critical flows
|
||||
|
||||
---
|
||||
|
||||
## 7. Verification Criteria for the Future Implementation
|
||||
|
||||
The eventual implementation should prove all of the following:
|
||||
|
||||
1. every tool call receives a scored assessment before dispatch
|
||||
2. crisis-sensitive outbound actions always require human confirmation
|
||||
3. dangerous terminal commands still preserve their current pre-execution gate
|
||||
4. clearly safe read-only tool calls are not slowed by unnecessary prompts
|
||||
5. assessment traces can be inspected after a run
|
||||
6. approval decisions remain session-safe across CLI and gateway contexts
|
||||
|
||||
---
|
||||
|
||||
## 8. Concrete Recommendations
|
||||
|
||||
### Recommendation 1
|
||||
Do **not** replace the current dangerous-command approval path.
|
||||
Generalize above it.
|
||||
|
||||
Why:
|
||||
- existing terminal Pattern 1 already works
|
||||
- this is the strongest piece of the current firewall
|
||||
|
||||
### Recommendation 2
|
||||
Add a universal scorer in `model_tools.handle_function_call()`.
|
||||
|
||||
Why:
|
||||
- that is the first point where Hermes knows the tool name and structured arguments
|
||||
- it is the cleanest place to classify all tool calls uniformly
|
||||
|
||||
### Recommendation 3
|
||||
Treat crisis-sensitive outbound intervention as a separate safety class.
|
||||
|
||||
Why:
|
||||
- issue #878 explicitly calls for Pattern 1 here
|
||||
- this matches Timmy's SOUL-level safety requirements
|
||||
|
||||
### Recommendation 4
|
||||
Ship scoring traces before enforcement expansion.
|
||||
|
||||
Why:
|
||||
- you cannot tune thresholds you cannot inspect
|
||||
- false positives will otherwise frustrate normal usage
|
||||
|
||||
### Recommendation 5
|
||||
Use Pattern 3 as the default policy for normal operations.
|
||||
|
||||
Why:
|
||||
- full manual confirmation on every tool call is too expensive
|
||||
- full autonomy is too risky
|
||||
- Pattern 3 is the practical middle ground
|
||||
|
||||
---
|
||||
|
||||
## 9. Bottom Line
|
||||
|
||||
Hermes should implement a **two-track human confirmation firewall**:
|
||||
|
||||
1. **Pattern 1: Pre-Execution Gate**
|
||||
- crisis interventions
|
||||
- destructive terminal actions
|
||||
- irreversible or safety-critical tool calls
|
||||
|
||||
2. **Pattern 3: Confidence Threshold**
|
||||
- all ordinary tool calls
|
||||
- driven by a universal tool-call assessment layer
|
||||
- integrated at the central dispatch boundary
|
||||
|
||||
Pattern 2 should remain optional and narrow.
|
||||
It is not the primary answer for Hermes.
|
||||
|
||||
The repo already contains the beginnings of this system.
|
||||
The next step is not new theory.
|
||||
It is to turn the existing approval path into a true **tool-call-wide human confirmation firewall**.
|
||||
|
||||
---
|
||||
|
||||
## References
|
||||
|
||||
- Issue #878 — Human Confirmation Firewall Implementation Patterns
|
||||
- Issue #659 — Critical Research Tasks
|
||||
- `tools/approval.py` — current dangerous-command approval flow and smart approvals
|
||||
- `model_tools.py` — central tool dispatch boundary
|
||||
- `gateway/run.py` — blocking approval handling for messaging sessions
|
||||
@@ -908,10 +908,6 @@ class AIAgent:
|
||||
"X-OpenRouter-Title": "Hermes Agent",
|
||||
"X-OpenRouter-Categories": "productivity,cli-agent",
|
||||
}
|
||||
elif "ai-gateway.vercel.sh" in effective_base.lower():
|
||||
from agent.auxiliary_client import _AI_GATEWAY_HEADERS
|
||||
|
||||
client_kwargs["default_headers"] = dict(_AI_GATEWAY_HEADERS)
|
||||
elif "api.githubcopilot.com" in effective_base.lower():
|
||||
from hermes_cli.models import copilot_default_headers
|
||||
|
||||
@@ -4671,13 +4667,11 @@ class AIAgent:
|
||||
return True
|
||||
|
||||
def _apply_client_headers_for_base_url(self, base_url: str) -> None:
|
||||
from agent.auxiliary_client import _AI_GATEWAY_HEADERS, _OR_HEADERS
|
||||
from agent.auxiliary_client import _OR_HEADERS
|
||||
|
||||
normalized = (base_url or "").lower()
|
||||
if "openrouter" in normalized:
|
||||
self._client_kwargs["default_headers"] = dict(_OR_HEADERS)
|
||||
elif "ai-gateway.vercel.sh" in normalized:
|
||||
self._client_kwargs["default_headers"] = dict(_AI_GATEWAY_HEADERS)
|
||||
elif "api.githubcopilot.com" in normalized:
|
||||
from hermes_cli.models import copilot_default_headers
|
||||
|
||||
|
||||
@@ -1,222 +0,0 @@
|
||||
"""AI Gateway provider UX, live pricing, and model promotion tests."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from hermes_cli import models as models_module
|
||||
from hermes_cli.models import (
|
||||
CANONICAL_PROVIDERS,
|
||||
VERCEL_AI_GATEWAY_MODELS,
|
||||
_ai_gateway_model_is_free,
|
||||
ai_gateway_model_ids,
|
||||
fetch_ai_gateway_models,
|
||||
fetch_ai_gateway_pricing,
|
||||
get_pricing_for_provider,
|
||||
)
|
||||
|
||||
|
||||
def _mock_urlopen(payload):
|
||||
resp = MagicMock()
|
||||
resp.read.return_value = json.dumps(payload).encode()
|
||||
ctx = MagicMock()
|
||||
ctx.__enter__.return_value = resp
|
||||
ctx.__exit__.return_value = False
|
||||
return ctx
|
||||
|
||||
|
||||
def _reset_caches():
|
||||
models_module._ai_gateway_catalog_cache = None
|
||||
models_module._pricing_cache.clear()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def config_home(tmp_path, monkeypatch):
|
||||
home = tmp_path / "hermes"
|
||||
home.mkdir()
|
||||
(home / "config.yaml").write_text("model: some-old-model\n")
|
||||
(home / ".env").write_text("")
|
||||
monkeypatch.setenv("HERMES_HOME", str(home))
|
||||
monkeypatch.delenv("AI_GATEWAY_API_KEY", raising=False)
|
||||
monkeypatch.delenv("AI_GATEWAY_BASE_URL", raising=False)
|
||||
return home
|
||||
|
||||
|
||||
def test_ai_gateway_provider_is_promoted_near_top_of_picker():
|
||||
slugs = [entry.slug for entry in CANONICAL_PROVIDERS]
|
||||
assert "ai-gateway" in slugs[:3]
|
||||
|
||||
|
||||
def test_ai_gateway_pricing_translates_input_output_to_prompt_completion():
|
||||
_reset_caches()
|
||||
payload = {
|
||||
"data": [
|
||||
{
|
||||
"id": "moonshotai/kimi-k2.5",
|
||||
"type": "language",
|
||||
"pricing": {
|
||||
"input": "0.0000006",
|
||||
"output": "0.0000025",
|
||||
"input_cache_read": "0.00000015",
|
||||
"input_cache_write": "0.0000006",
|
||||
},
|
||||
}
|
||||
]
|
||||
}
|
||||
with patch("urllib.request.urlopen", return_value=_mock_urlopen(payload)):
|
||||
result = fetch_ai_gateway_pricing(force_refresh=True)
|
||||
|
||||
entry = result["moonshotai/kimi-k2.5"]
|
||||
assert entry["prompt"] == "0.0000006"
|
||||
assert entry["completion"] == "0.0000025"
|
||||
assert entry["input_cache_read"] == "0.00000015"
|
||||
assert entry["input_cache_write"] == "0.0000006"
|
||||
|
||||
|
||||
def test_get_pricing_for_provider_supports_ai_gateway():
|
||||
_reset_caches()
|
||||
payload = {
|
||||
"data": [
|
||||
{
|
||||
"id": "moonshotai/kimi-k2.5",
|
||||
"type": "language",
|
||||
"pricing": {"input": "0.0001", "output": "0.0002"},
|
||||
}
|
||||
]
|
||||
}
|
||||
with patch("urllib.request.urlopen", return_value=_mock_urlopen(payload)):
|
||||
result = get_pricing_for_provider("ai-gateway", force_refresh=True)
|
||||
assert result["moonshotai/kimi-k2.5"] == {"prompt": "0.0001", "completion": "0.0002"}
|
||||
|
||||
|
||||
def test_ai_gateway_pricing_returns_empty_on_fetch_failure():
|
||||
_reset_caches()
|
||||
with patch("urllib.request.urlopen", side_effect=OSError("network down")):
|
||||
result = fetch_ai_gateway_pricing(force_refresh=True)
|
||||
assert result == {}
|
||||
|
||||
|
||||
def test_ai_gateway_pricing_skips_entries_without_pricing_dict():
|
||||
_reset_caches()
|
||||
payload = {
|
||||
"data": [
|
||||
{"id": "x/y", "pricing": None},
|
||||
{"id": "a/b", "pricing": {"input": "0", "output": "0"}},
|
||||
]
|
||||
}
|
||||
with patch("urllib.request.urlopen", return_value=_mock_urlopen(payload)):
|
||||
result = fetch_ai_gateway_pricing(force_refresh=True)
|
||||
assert "x/y" not in result
|
||||
assert result["a/b"] == {"prompt": "0", "completion": "0"}
|
||||
|
||||
|
||||
def test_ai_gateway_free_detector():
|
||||
assert _ai_gateway_model_is_free({"input": "0", "output": "0"}) is True
|
||||
assert _ai_gateway_model_is_free({"input": "0", "output": "0.01"}) is False
|
||||
assert _ai_gateway_model_is_free({"input": "0.01", "output": "0"}) is False
|
||||
assert _ai_gateway_model_is_free(None) is False
|
||||
assert _ai_gateway_model_is_free({"input": "not a number"}) is False
|
||||
|
||||
|
||||
def test_fetch_ai_gateway_models_filters_against_live_catalog():
|
||||
_reset_caches()
|
||||
preferred = [mid for mid, _ in VERCEL_AI_GATEWAY_MODELS]
|
||||
live_ids = preferred[:3]
|
||||
payload = {
|
||||
"data": [
|
||||
{"id": mid, "pricing": {"input": "0.001", "output": "0.002"}}
|
||||
for mid in live_ids
|
||||
]
|
||||
}
|
||||
with patch("urllib.request.urlopen", return_value=_mock_urlopen(payload)):
|
||||
result = fetch_ai_gateway_models(force_refresh=True)
|
||||
|
||||
assert [mid for mid, _ in result] == live_ids
|
||||
assert result[0][1] == "recommended"
|
||||
assert ai_gateway_model_ids(force_refresh=False) == live_ids
|
||||
|
||||
|
||||
def test_fetch_ai_gateway_models_tags_free_models():
|
||||
_reset_caches()
|
||||
first_id = VERCEL_AI_GATEWAY_MODELS[0][0]
|
||||
second_id = VERCEL_AI_GATEWAY_MODELS[1][0]
|
||||
payload = {
|
||||
"data": [
|
||||
{"id": first_id, "pricing": {"input": "0.001", "output": "0.002"}},
|
||||
{"id": second_id, "pricing": {"input": "0", "output": "0"}},
|
||||
]
|
||||
}
|
||||
with patch("urllib.request.urlopen", return_value=_mock_urlopen(payload)):
|
||||
result = fetch_ai_gateway_models(force_refresh=True)
|
||||
|
||||
by_id = dict(result)
|
||||
assert by_id[first_id] == "recommended"
|
||||
assert by_id[second_id] == "free"
|
||||
|
||||
|
||||
def test_free_moonshot_model_auto_promoted_to_top_even_if_not_curated():
|
||||
_reset_caches()
|
||||
first_curated = VERCEL_AI_GATEWAY_MODELS[0][0]
|
||||
unlisted_free_moonshot = "moonshotai/kimi-coder-free-preview"
|
||||
payload = {
|
||||
"data": [
|
||||
{"id": first_curated, "pricing": {"input": "0.001", "output": "0.002"}},
|
||||
{"id": unlisted_free_moonshot, "pricing": {"input": "0", "output": "0"}},
|
||||
]
|
||||
}
|
||||
with patch("urllib.request.urlopen", return_value=_mock_urlopen(payload)):
|
||||
result = fetch_ai_gateway_models(force_refresh=True)
|
||||
|
||||
assert result[0] == (unlisted_free_moonshot, "recommended")
|
||||
assert any(mid == first_curated for mid, _ in result)
|
||||
|
||||
|
||||
def test_paid_moonshot_does_not_get_auto_promoted():
|
||||
_reset_caches()
|
||||
first_curated = VERCEL_AI_GATEWAY_MODELS[0][0]
|
||||
payload = {
|
||||
"data": [
|
||||
{"id": first_curated, "pricing": {"input": "0.001", "output": "0.002"}},
|
||||
{"id": "moonshotai/some-paid-variant", "pricing": {"input": "0.001", "output": "0.002"}},
|
||||
]
|
||||
}
|
||||
with patch("urllib.request.urlopen", return_value=_mock_urlopen(payload)):
|
||||
result = fetch_ai_gateway_models(force_refresh=True)
|
||||
|
||||
assert result[0][0] == first_curated
|
||||
|
||||
|
||||
def test_fetch_ai_gateway_models_falls_back_on_error():
|
||||
_reset_caches()
|
||||
with patch("urllib.request.urlopen", side_effect=OSError("network")):
|
||||
result = fetch_ai_gateway_models(force_refresh=True)
|
||||
assert result == list(VERCEL_AI_GATEWAY_MODELS)
|
||||
|
||||
|
||||
def test_ai_gateway_setup_flow_shows_deeplink_and_passes_pricing(config_home, monkeypatch, capsys):
|
||||
from hermes_cli.main import _model_flow_ai_gateway
|
||||
from hermes_cli.config import load_config
|
||||
|
||||
pricing = {"moonshotai/kimi-k2.6": {"prompt": "0", "completion": "0"}}
|
||||
monkeypatch.setenv("HERMES_HOME", str(config_home))
|
||||
|
||||
with patch("getpass.getpass", return_value="vercel-key"), \
|
||||
patch("hermes_cli.models.ai_gateway_model_ids", return_value=["moonshotai/kimi-k2.6"]), \
|
||||
patch("hermes_cli.models.get_pricing_for_provider", return_value=pricing), \
|
||||
patch("hermes_cli.auth._prompt_model_selection", return_value="moonshotai/kimi-k2.6") as prompt_selection, \
|
||||
patch("hermes_cli.auth.deactivate_provider"):
|
||||
_model_flow_ai_gateway(load_config(), "")
|
||||
|
||||
out = capsys.readouterr().out
|
||||
assert "vercel.com/d?to=%2F%5Bteam%5D%2F%7E%2Fai-gateway&title=AI+Gateway" in out
|
||||
assert "free credits" in out.lower()
|
||||
assert prompt_selection.call_args.kwargs["pricing"] == pricing
|
||||
|
||||
import yaml
|
||||
config = yaml.safe_load((config_home / "config.yaml").read_text()) or {}
|
||||
model = config["model"]
|
||||
assert model["provider"] == "ai-gateway"
|
||||
assert model["api_mode"] == "chat_completions"
|
||||
@@ -1,62 +0,0 @@
|
||||
"""Attribution default_headers applied per provider via base-URL detection."""
|
||||
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from run_agent import AIAgent
|
||||
|
||||
|
||||
@patch("run_agent.OpenAI")
|
||||
def test_openrouter_base_url_applies_or_headers(mock_openai):
|
||||
mock_openai.return_value = MagicMock()
|
||||
agent = AIAgent(
|
||||
api_key="test-key",
|
||||
base_url="https://openrouter.ai/api/v1",
|
||||
model="test/model",
|
||||
quiet_mode=True,
|
||||
skip_context_files=True,
|
||||
skip_memory=True,
|
||||
)
|
||||
|
||||
agent._apply_client_headers_for_base_url("https://openrouter.ai/api/v1")
|
||||
|
||||
headers = agent._client_kwargs["default_headers"]
|
||||
assert headers["HTTP-Referer"] == "https://hermes-agent.nousresearch.com"
|
||||
assert headers["X-OpenRouter-Title"] == "Hermes Agent"
|
||||
|
||||
|
||||
@patch("run_agent.OpenAI")
|
||||
def test_ai_gateway_base_url_applies_attribution_headers(mock_openai):
|
||||
mock_openai.return_value = MagicMock()
|
||||
agent = AIAgent(
|
||||
api_key="test-key",
|
||||
base_url="https://openrouter.ai/api/v1",
|
||||
model="test/model",
|
||||
quiet_mode=True,
|
||||
skip_context_files=True,
|
||||
skip_memory=True,
|
||||
)
|
||||
|
||||
agent._apply_client_headers_for_base_url("https://ai-gateway.vercel.sh/v1")
|
||||
|
||||
headers = agent._client_kwargs["default_headers"]
|
||||
assert headers["HTTP-Referer"] == "https://hermes-agent.nousresearch.com"
|
||||
assert headers["X-Title"] == "Hermes Agent"
|
||||
assert headers["User-Agent"].startswith("HermesAgent/")
|
||||
|
||||
|
||||
@patch("run_agent.OpenAI")
|
||||
def test_unknown_base_url_clears_default_headers(mock_openai):
|
||||
mock_openai.return_value = MagicMock()
|
||||
agent = AIAgent(
|
||||
api_key="test-key",
|
||||
base_url="https://openrouter.ai/api/v1",
|
||||
model="test/model",
|
||||
quiet_mode=True,
|
||||
skip_context_files=True,
|
||||
skip_memory=True,
|
||||
)
|
||||
agent._client_kwargs["default_headers"] = {"X-Stale": "yes"}
|
||||
|
||||
agent._apply_client_headers_for_base_url("https://api.example.com/v1")
|
||||
|
||||
assert "default_headers" not in agent._client_kwargs
|
||||
Reference in New Issue
Block a user