Compare commits

..

1 Commits

Author SHA1 Message Date
Alexander Whitestone
418e601f74 docs: add human confirmation firewall research report
All checks were successful
Lint / lint (pull_request) Successful in 9s
2026-04-22 11:22:24 -04:00
3 changed files with 592 additions and 512 deletions

View File

@@ -1,70 +1,43 @@
from __future__ import annotations
"""
A2A agent card generation for fleet discovery.
Agent Card — A2A-compliant agent discovery.
Part of #843: fix: implement A2A agent card for fleet discovery (#819)
Refs #801.
Closes #802.
Provides metadata about the agent's identity, capabilities, and installed skills
for discovery by other agents in the fleet.
"""
import argparse
import json
import logging
import os
import socket
import sys
from dataclasses import asdict, dataclass, field
from typing import Any, Dict, Iterable, List, Mapping, Sequence
from urllib.parse import urlparse, urlunparse
from pathlib import Path
from typing import Any, Dict, List, Optional
from hermes_cli import __version__
from hermes_cli.config import load_config
from hermes_cli.config import load_config, get_hermes_home
from agent.skill_utils import (
get_all_skills_dirs,
get_disabled_skill_names,
iter_skill_index_files,
parse_frontmatter,
skill_matches_platform,
get_all_skills_dirs,
get_disabled_skill_names,
skill_matches_platform
)
logger = logging.getLogger(__name__)
DEFAULT_DESCRIPTION = "Sovereign AI agent — orchestration, code, research"
DEFAULT_INPUT_MODES = ["text/plain", "application/json"]
DEFAULT_OUTPUT_MODES = ["text/plain", "application/json"]
_REQUIRED_CAPABILITY_FLAGS = (
"streaming",
"pushNotifications",
"stateTransitionHistory",
)
@dataclass
class AgentSkill:
id: str
name: str
description: str = ""
tags: List[str] = field(default_factory=list)
def to_dict(self) -> Dict[str, Any]:
data: Dict[str, Any] = {"id": self.id, "name": self.name}
if self.description:
data["description"] = self.description
if self.tags:
data["tags"] = self.tags
return data
version: str = "1.0.0"
@dataclass
class AgentCapabilities:
streaming: bool = True
pushNotifications: bool = False
stateTransitionHistory: bool = True
def to_dict(self) -> Dict[str, Any]:
return asdict(self)
tools: bool = True
vision: bool = False
reasoning: bool = False
@dataclass
class AgentCard:
@@ -74,81 +47,14 @@ class AgentCard:
version: str = __version__
capabilities: AgentCapabilities = field(default_factory=AgentCapabilities)
skills: List[AgentSkill] = field(default_factory=list)
defaultInputModes: List[str] = field(default_factory=lambda: list(DEFAULT_INPUT_MODES))
defaultOutputModes: List[str] = field(default_factory=lambda: list(DEFAULT_OUTPUT_MODES))
metadata: Dict[str, Any] = field(default_factory=dict)
def to_dict(self) -> Dict[str, Any]:
data: Dict[str, Any] = {
"name": self.name,
"description": self.description,
"url": self.url,
"version": self.version,
"capabilities": self.capabilities.to_dict(),
"skills": [skill.to_dict() for skill in self.skills],
"defaultInputModes": list(self.defaultInputModes),
"defaultOutputModes": list(self.defaultOutputModes),
}
if self.metadata:
data["metadata"] = dict(self.metadata)
return data
def to_json(self, indent: int = 2) -> str:
return json.dumps(self.to_dict(), indent=indent)
def _env_or_empty(key: str) -> str:
return os.environ.get(key, "").strip()
def _as_agent_config(config: Mapping[str, Any] | None) -> Dict[str, Any]:
if not isinstance(config, Mapping):
return {}
agent_cfg = config.get("agent")
return dict(agent_cfg) if isinstance(agent_cfg, Mapping) else {}
def _as_a2a_config(config: Mapping[str, Any] | None) -> Dict[str, Any]:
if not isinstance(config, Mapping):
return {}
a2a_cfg = config.get("a2a")
return dict(a2a_cfg) if isinstance(a2a_cfg, Mapping) else {}
def _normalize_string_list(value: Any) -> List[str]:
if value is None:
return []
if isinstance(value, str):
parts = value.split(",")
elif isinstance(value, Sequence) and not isinstance(value, (bytes, bytearray, str)):
parts = list(value)
else:
parts = [value]
out: List[str] = []
seen = set()
for item in parts:
text = str(item).strip()
if not text or text in seen:
continue
seen.add(text)
out.append(text)
return out
def _normalize_skill_tags(frontmatter: Mapping[str, Any]) -> List[str]:
tags = _normalize_string_list(frontmatter.get("tags"))
category = str(frontmatter.get("category") or "").strip()
if category and category not in tags:
tags.append(category)
return tags
defaultInputModes: List[str] = field(default_factory=lambda: ["text/plain"])
defaultOutputModes: List[str] = field(default_factory=lambda: ["text/plain"])
def _load_skills() -> List[AgentSkill]:
"""Scan enabled skills and return A2A skill metadata."""
skills: List[AgentSkill] = []
"""Scan all enabled skills and return metadata."""
skills = []
disabled = get_disabled_skill_names()
seen_ids = set()
for skills_dir in get_all_skills_dirs():
if not skills_dir.is_dir():
continue
@@ -159,262 +65,71 @@ def _load_skills() -> List[AgentSkill]:
except Exception:
continue
skill_name = frontmatter.get("name") or skill_file.parent.name
if str(skill_name) in disabled:
continue
if not skill_matches_platform(frontmatter):
continue
skill_id = str(frontmatter.get("name") or skill_file.parent.name).strip().lower().replace(" ", "-")
if skill_id in disabled or skill_id in seen_ids:
continue
seen_ids.add(skill_id)
skills.append(AgentSkill(
id=str(skill_name),
name=str(frontmatter.get("name", skill_name)),
description=str(frontmatter.get("description", "")),
version=str(frontmatter.get("version", "1.0.0"))
))
return skills
display_name = str(frontmatter.get("title") or frontmatter.get("name") or skill_file.parent.name).strip()
description = str(frontmatter.get("description") or "").strip()
tags = _normalize_skill_tags(frontmatter)
skills.append(
AgentSkill(
id=skill_id,
name=display_name,
description=description,
tags=tags,
)
)
def build_agent_card() -> AgentCard:
"""Build the agent card from current configuration and environment."""
config = load_config()
# Identity
name = os.environ.get("HERMES_AGENT_NAME") or config.get("agent", {}).get("name") or "hermes"
description = os.environ.get("HERMES_AGENT_DESCRIPTION") or config.get("agent", {}).get("description") or "Sovereign AI agent"
# URL - try to determine from environment or config
port = os.environ.get("HERMES_WEB_PORT") or "9119"
host = os.environ.get("HERMES_WEB_HOST") or "localhost"
url = f"http://{host}:{port}"
# Capabilities
# In a real scenario, we'd check model metadata for vision/reasoning
capabilities = AgentCapabilities(
streaming=True,
tools=True,
vision=False, # Default to false unless we can confirm
reasoning=False
)
# Skills
skills = _load_skills()
return AgentCard(
name=name,
description=description,
url=url,
version=__version__,
capabilities=capabilities,
skills=skills
)
return sorted(skills, key=lambda skill: skill.id)
def _get_agent_name(config: Mapping[str, Any] | None, override: str | None = None) -> str:
if override:
return override
env_name = _env_or_empty("HERMES_AGENT_NAME") or _env_or_empty("AGENT_NAME")
if env_name:
return env_name
agent_cfg = _as_agent_config(config)
if agent_cfg.get("name"):
return str(agent_cfg["name"]).strip()
def get_agent_card_json() -> str:
"""Return the agent card as a JSON string."""
try:
hostname = socket.gethostname().split(".", 1)[0].strip()
if hostname:
return hostname
except Exception:
pass
return "hermes"
def _get_description(config: Mapping[str, Any] | None, override: str | None = None) -> str:
if override:
return override
env_description = _env_or_empty("HERMES_AGENT_DESCRIPTION") or _env_or_empty("AGENT_DESCRIPTION")
if env_description:
return env_description
agent_cfg = _as_agent_config(config)
if agent_cfg.get("description"):
return str(agent_cfg["description"]).strip()
return DEFAULT_DESCRIPTION
def _normalize_a2a_url(url: str) -> str:
raw = (url or "").strip()
if not raw:
return ""
parsed = urlparse(raw if "://" in raw else f"https://{raw}")
scheme = parsed.scheme or "https"
netloc = parsed.netloc or parsed.path
path = parsed.path if parsed.netloc else ""
normalized_path = path.rstrip("/") if path not in ("", "/") else ""
if not normalized_path.endswith("/a2a"):
normalized_path = f"{normalized_path}/a2a" if normalized_path else "/a2a"
return urlunparse((scheme, netloc, normalized_path, "", "", ""))
def _get_agent_url(config: Mapping[str, Any] | None, override: str | None = None) -> str:
if override:
return _normalize_a2a_url(override)
agent_cfg = _as_agent_config(config)
a2a_cfg = _as_a2a_config(config)
explicit = (
_env_or_empty("HERMES_A2A_PUBLIC_URL")
or str(a2a_cfg.get("public_url") or "").strip()
or str(agent_cfg.get("a2a_public_url") or "").strip()
)
if explicit:
return _normalize_a2a_url(explicit)
host = (
_env_or_empty("HERMES_A2A_HOST")
or str(a2a_cfg.get("host") or "").strip()
or _env_or_empty("HERMES_WEB_HOST")
or str(agent_cfg.get("host") or "").strip()
or "localhost"
)
port = (
_env_or_empty("HERMES_A2A_PORT")
or str(a2a_cfg.get("port") or "").strip()
or _env_or_empty("HERMES_WEB_PORT")
or str(agent_cfg.get("port") or "").strip()
or "9119"
)
scheme = (
_env_or_empty("HERMES_A2A_SCHEME")
or str(a2a_cfg.get("scheme") or "").strip()
or ("https" if (_env_or_empty("HERMES_MTLS_CERT") or str(port) == "9443") else "http")
)
return _normalize_a2a_url(f"{scheme}://{host}:{port}")
def _merge_skills(base_skills: Iterable[AgentSkill], extra_skills: Iterable[AgentSkill] | None = None) -> List[AgentSkill]:
merged: Dict[str, AgentSkill] = {}
for skill in list(base_skills) + list(extra_skills or []):
if skill.id not in merged:
merged[skill.id] = skill
return [merged[key] for key in sorted(merged)]
def build_agent_card(
*,
name: str | None = None,
description: str | None = None,
url: str | None = None,
extra_skills: Iterable[AgentSkill] | None = None,
metadata: Mapping[str, Any] | None = None,
) -> AgentCard:
"""Build an A2A-compliant agent card from config, env, and installed skills."""
try:
config = load_config()
except Exception as exc:
logger.debug("Falling back to empty config while building agent card: %s", exc)
config = {}
card = AgentCard(
name=_get_agent_name(config, override=name),
description=_get_description(config, override=description),
url=_get_agent_url(config, override=url),
skills=_merge_skills(_load_skills(), extra_skills),
metadata=dict(metadata or {}),
)
return card
def validate_agent_card(card: AgentCard | Dict[str, Any]) -> List[str]:
"""Return a list of schema-validation errors for an agent card."""
data = card.to_dict() if isinstance(card, AgentCard) else dict(card)
errors: List[str] = []
for field_name in ("name", "description", "url", "version"):
value = data.get(field_name)
if not isinstance(value, str) or not value.strip():
errors.append(f"{field_name} must be a non-empty string")
url_value = str(data.get("url") or "")
parsed = urlparse(url_value)
if not parsed.scheme or not parsed.netloc:
errors.append("url must be an absolute http/https URL")
elif parsed.scheme not in {"http", "https"}:
errors.append("url must use http or https")
elif not parsed.path.rstrip("/").endswith("/a2a"):
errors.append("url must point to the /a2a endpoint")
capabilities = data.get("capabilities")
if not isinstance(capabilities, Mapping):
errors.append("capabilities must be an object")
else:
for capability_name in _REQUIRED_CAPABILITY_FLAGS:
if not isinstance(capabilities.get(capability_name), bool):
errors.append(f"capabilities.{capability_name} must be a boolean")
for field_name, required_modes in (
("defaultInputModes", DEFAULT_INPUT_MODES),
("defaultOutputModes", DEFAULT_OUTPUT_MODES),
):
modes = data.get(field_name)
if not isinstance(modes, list) or not modes:
errors.append(f"{field_name} must be a non-empty list of MIME types")
continue
for mode in modes:
if not isinstance(mode, str) or "/" not in mode:
errors.append(f"{field_name} entries must be MIME types")
for required_mode in required_modes:
if required_mode not in modes:
errors.append(f"{field_name} must include {required_mode}")
skills = data.get("skills")
if not isinstance(skills, list):
errors.append("skills must be a list")
else:
for index, skill in enumerate(skills):
if not isinstance(skill, Mapping):
errors.append(f"skills[{index}] must be an object")
continue
if not str(skill.get("id") or "").strip():
errors.append(f"skills[{index}] missing id")
if not str(skill.get("name") or "").strip():
errors.append(f"skills[{index}] missing name")
tags = skill.get("tags", [])
if tags is None:
tags = []
if not isinstance(tags, list):
errors.append(f"skills[{index}].tags must be a list")
else:
for tag in tags:
if not isinstance(tag, str) or not tag.strip():
errors.append(f"skills[{index}].tags entries must be non-empty strings")
metadata = data.get("metadata")
if metadata is not None and not isinstance(metadata, Mapping):
errors.append("metadata must be an object when present")
return errors
def get_agent_card_json(
*,
name: str | None = None,
description: str | None = None,
url: str | None = None,
metadata: Mapping[str, Any] | None = None,
indent: int = 2,
) -> str:
"""Return the local agent card as JSON, falling back to an error card on failure."""
try:
card = build_agent_card(name=name, description=description, url=url, metadata=metadata)
errors = validate_agent_card(card)
if errors:
raise ValueError("; ".join(errors))
return card.to_json(indent=indent)
except Exception as exc:
logger.error("Failed to build agent card: %s", exc)
card = build_agent_card()
return json.dumps(asdict(card), indent=2)
except Exception as e:
logger.error(f"Failed to build agent card: {e}")
# Minimal fallback card
fallback = {
"name": name or _env_or_empty("HERMES_AGENT_NAME") or "hermes",
"description": "Sovereign AI agent (agent card fallback)",
"url": url or "http://localhost:9119/a2a",
"name": "hermes",
"description": "Sovereign AI agent (fallback)",
"version": __version__,
"capabilities": AgentCapabilities().to_dict(),
"skills": [],
"defaultInputModes": list(DEFAULT_INPUT_MODES),
"defaultOutputModes": list(DEFAULT_OUTPUT_MODES),
"error": str(exc),
"error": str(e)
}
return json.dumps(fallback, indent=indent)
return json.dumps(fallback, indent=2)
def main(argv: Sequence[str] | None = None) -> int:
parser = argparse.ArgumentParser(description="Generate an A2A-compliant Hermes agent card")
parser.add_argument("--name", help="Override the agent name")
parser.add_argument("--description", help="Override the agent description")
parser.add_argument("--url", help="Override the public A2A URL")
parser.add_argument("--validate", action="store_true", help="Validate before printing; exit 1 on schema errors")
args = parser.parse_args(list(argv) if argv is not None else None)
card = build_agent_card(name=args.name, description=args.description, url=args.url)
errors = validate_agent_card(card)
if args.validate and errors:
for error in errors:
print(error, file=sys.stderr)
return 1
print(card.to_json(indent=2))
return 0
if __name__ == "__main__":
raise SystemExit(main())
def validate_agent_card(card_data: Dict[str, Any]) -> bool:
"""Check if the card data complies with the A2A schema."""
required = ["name", "description", "url", "version"]
return all(k in card_data for k in required)

View File

@@ -0,0 +1,515 @@
# Human Confirmation Firewall: Research Report
## Implementation Patterns for Hermes Agent
**Issue:** #878
**Parent:** #659
**Priority:** P0
**Scope:** Human-in-the-loop safety patterns for tool calls, crisis handling, and irreversible actions
---
## Executive Summary
Hermes already has a partial human confirmation firewall, but it is narrow.
Current repo state shows:
- a real **pre-execution gate** for dangerous terminal commands in `tools/approval.py`
- a partial **confidence-threshold path** via `_smart_approve()` in `tools/approval.py`
- gateway support for blocking approval resolution in `gateway/run.py`
What is still missing is the core recommendation from this research issue:
- **confidence scoring on all tool calls**, not just terminal commands that already matched a dangerous regex
- a **hard pre-execution human gate for crisis interventions**, especially any action that would auto-respond to suicidal content
- a consistent way to classify actions into:
1. pre-execution gate
2. post-execution review
3. confidence-threshold execution
Recommendation:
- use **Pattern 1: Pre-Execution Gate** for crisis interventions and irreversible/high-impact actions
- use **Pattern 3: Confidence Threshold** for normal operations
- reserve **Pattern 2: Post-Execution Review** only for low-risk and reversible actions
The next implementation step should be a **tool-call risk assessment layer** that runs before dispatch in `model_tools.handle_function_call()`, assigns a score and pattern to every tool call, and routes only the highest-risk calls into mandatory human confirmation.
---
## 1. The Three Proven Patterns
### Pattern 1: Pre-Execution Gate
Definition:
- halt before execution
- show the proposed action to the human
- require explicit approval or denial
Best for:
- destructive actions
- irreversible side effects
- crisis interventions
- actions that affect another human's safety, money, infrastructure, or private data
Strengths:
- strongest safety guarantee
- simplest audit story
- prevents the most catastrophic failure mode: acting first and apologizing later
Weaknesses:
- adds latency
- creates operator burden if overused
- should not be applied to every ordinary tool call
### Pattern 2: Post-Execution Review
Definition:
- execute first
- expose result to human
- allow rollback or follow-up correction
Best for:
- reversible operations
- low-risk actions with fast recovery
- tasks where human review matters but immediate execution is acceptable
Strengths:
- low friction
- fast iteration
- useful when rollback is practical
Weaknesses:
- unsafe for crisis or destructive actions
- only works when rollback actually exists
- a poor fit for external communication or life-safety contexts
### Pattern 3: Confidence Threshold
Definition:
- compute a risk/confidence score before execution
- auto-execute high-confidence safe actions
- request confirmation for lower-confidence or higher-risk actions
Best for:
- mixed-risk tool ecosystems
- day-to-day operations where always-confirm would be too expensive
- systems with a large volume of ordinary, safe reads and edits
Strengths:
- best balance of speed and safety
- scales across many tool types
- allows targeted human attention where it matters most
Weaknesses:
- depends on a good scoring model
- weak scoring creates false negatives or unnecessary prompts
- must remain inspectable and debuggable
---
## 2. What Hermes Already Has
## 2.1 Existing Pre-Execution Gate for Dangerous Terminal Commands
`tools/approval.py` already implements a real pre-execution confirmation path for dangerous shell commands.
Observed components:
- `DANGEROUS_PATTERNS`
- `detect_dangerous_command()`
- `prompt_dangerous_approval()`
- `check_dangerous_command()`
- gateway queueing and resolution support in the same module
This is already Pattern 1.
Current behavior:
- dangerous terminal commands are detected before execution
- the user can allow once / session / always / deny
- gateway sessions can block until approval resolves
This is a strong foundation, but it is limited to a subset of terminal commands.
## 2.2 Partial Confidence Threshold via Smart Approvals
Hermes also already has a partial Pattern 3.
Observed component:
- `_smart_approve()` in `tools/approval.py`
Current behavior:
- only runs **after** a command has already been flagged by dangerous-pattern detection
- uses the auxiliary LLM to decide:
- approve
- deny
- escalate
This means Hermes has a confidence-threshold mechanism, but only for **already-flagged dangerous terminal commands**.
What it does not yet do:
- score all tool calls
- classify non-terminal tools
- distinguish crisis interventions from normal ops
- produce a shared risk model across the tool surface
## 2.3 Blocking Approval UX in Gateway
`gateway/run.py` already routes `/approve` and `/deny` into the blocking approval path.
This means the infrastructure for a true human confirmation firewall already exists in messaging contexts.
That is important because the missing work is not "invent human approval from zero."
The missing work is:
- expand the scope from dangerous shell commands to **all tool calls that matter**
- make the routing policy explicit and inspectable
---
## 3. What Hermes Still Lacks
## 3.1 No Universal Tool-Call Risk Assessment
The current approval system is command-pattern-centric.
It is not yet a tool-call firewall.
Missing capability:
- before dispatch, every tool call should receive a structured assessment:
- tool name
- side-effect class
- reversibility
- human-impact potential
- crisis relevance
- confidence score
- recommended confirmation pattern
Natural insertion point:
- `model_tools.handle_function_call()`
That function already sits at the central dispatch boundary.
It is the right place to add a pre-dispatch classifier.
## 3.2 No Hard Crisis Gate for Outbound Intervention
Issue #878 explicitly recommends:
- Pattern 1 for crisis interventions
- never auto-respond to suicidal content
That recommendation is not yet codified as a global firewall rule.
Missing rule:
- if a tool call would directly intervene in a crisis context or send outward guidance in response to suicidal content, it must require explicit human confirmation before execution
Examples that should hard-gate:
- outbound `send_message` content aimed at a suicidal user
- any future tool that places calls, escalates emergencies, or contacts third parties about a crisis
- any autonomous action that claims a person should or should not take a life-safety step
## 3.3 No First-Class Post-Execution Review Policy
Hermes has approval and denial, but it does not yet have a formal policy for when Pattern 2 is acceptable.
Without a policy, post-execution review tends to get used implicitly rather than intentionally.
That is risky.
Hermes should define Pattern 2 narrowly:
- only for actions that are both low-risk and reversible
- only when the system can show the human exactly what happened
- never for crisis, finance, destructive config, or sensitive comms
---
## 4. Recommended Architecture for Hermes
## 4.1 Add a Tool-Call Assessment Layer
Add a pre-dispatch assessment object for every tool call.
Suggested shape:
```python
@dataclass
class ToolCallAssessment:
tool_name: str
risk_score: float # 0.0 to 1.0
confidence: float # confidence in the assessment itself
pattern: str # pre_execution_gate | post_execution_review | confidence_threshold
requires_human: bool
reasons: list[str]
reversible: bool
crisis_sensitive: bool
```
Suggested execution point:
- inside `model_tools.handle_function_call()` before `orchestrator.dispatch()`
Why here:
- one place covers all tools
- one place can emit traces
- one place can remain model-agnostic
- one place lets plugins observe or override the assessment
## 4.2 Classify Tool Calls by Side-Effect Class
Suggested first-pass taxonomy:
### A. Read-only
Examples:
- `read_file`
- `search_files`
- `browser_snapshot`
- `browser_console` read-only inspection
Pattern:
- confidence threshold
- almost always auto-execute
- human confirmation normally unnecessary
### B. Local reversible edits
Examples:
- `patch`
- `write_file`
- `todo`
Pattern:
- confidence threshold
- human confirmation only when risk score rises because of path sensitivity or scope breadth
### C. External side effects
Examples:
- `send_message`
- `cronjob`
- `delegate_task`
- smart-home actuation tools
Pattern:
- confidence threshold by default
- pre-execution gate when score exceeds threshold or when context is sensitive
### D. Critical / destructive / crisis-sensitive
Examples:
- dangerous `terminal`
- financial actions
- deletion / kill / restart / deployment in sensitive paths
- outbound crisis intervention
Pattern:
- pre-execution gate
- never auto-execute on confidence alone
## 4.3 Crisis Override Rule
Add a hard override:
```text
If tool call is crisis-sensitive AND outbound or irreversible:
requires_human = True
pattern = pre_execution_gate
```
This is the most important rule in the issue.
The model may draft the message.
The human must confirm before the system sends it.
## 4.4 Use Confidence Threshold for Normal Ops
For non-crisis operations, use Pattern 3.
Suggested logic:
- low risk + high assessment confidence -> auto-execute
- medium risk or medium confidence -> ask human
- high risk -> always ask human
Key point:
- confidence is not just "how sure the LLM is"
- confidence should combine:
- tool type certainty
- argument clarity
- path sensitivity
- external side effects
- crisis indicators
---
## 5. Recommended Initial Scoring Factors
A simple initial scorer is enough.
It does not need to be fancy.
Suggested factors:
### 5.1 Tool class risk
- read-only tools: very low base risk
- local mutation tools: moderate base risk
- external communication / automation tools: higher base risk
- shell execution: variable, often high
### 5.2 Target sensitivity
Examples:
- `/tmp` or local scratch paths -> lower
- repo files under git -> medium
- system config, credentials, secrets, gateway lifecycle -> high
- human-facing channels -> high if message content is sensitive
### 5.3 Reversibility
- reversible -> lower
- difficult but possible to undo -> medium
- practically irreversible -> high
### 5.4 Human-impact content
- no direct human impact -> low
- administrative impact -> medium
- crisis / safety / emotional intervention -> critical
### 5.5 Context certainty
- arguments are explicit and narrow -> higher confidence
- arguments are vague, inferred, or broad -> lower confidence
---
## 6. Implementation Plan
## Phase 1: Assessment Without Behavior Change
Goal:
- score all tool calls
- log assessment decisions
- emit traces for review
- do not yet block new tool categories
Files to touch:
- `tools/approval.py`
- `model_tools.py`
- tests for assessment coverage
Output:
- risk/confidence trace for every tool call
- pattern recommendation for every tool call
Why first:
- lets us calibrate before changing runtime behavior
- avoids breaking existing workflows blindly
## Phase 2: Hard-Gate Crisis-Sensitive Outbound Actions
Goal:
- enforce Pattern 1 for crisis interventions
Likely surfaces:
- `send_message`
- any future telephony / call / escalation tools
- other tools with direct human intervention side effects
Rule:
- never auto-send crisis intervention content without human confirmation
## Phase 3: General Confidence Threshold for Normal Ops
Goal:
- apply Pattern 3 to all tool calls
- auto-run clearly safe actions
- escalate ambiguous or medium-risk actions
Likely thresholds:
- score < 0.25 -> auto
- 0.25 to 0.60 -> confirm if confidence is weak
- > 0.60 -> confirm
- crisis-sensitive -> always confirm
## Phase 4: Optional Post-Execution Review Lane
Goal:
- allow Pattern 2 only for explicitly reversible operations
Examples:
- maybe low-risk messaging drafts saved locally
- maybe reversible UI actions in specific environments
Important:
- this phase is optional
- Hermes should not rely on Pattern 2 for safety-critical flows
---
## 7. Verification Criteria for the Future Implementation
The eventual implementation should prove all of the following:
1. every tool call receives a scored assessment before dispatch
2. crisis-sensitive outbound actions always require human confirmation
3. dangerous terminal commands still preserve their current pre-execution gate
4. clearly safe read-only tool calls are not slowed by unnecessary prompts
5. assessment traces can be inspected after a run
6. approval decisions remain session-safe across CLI and gateway contexts
---
## 8. Concrete Recommendations
### Recommendation 1
Do **not** replace the current dangerous-command approval path.
Generalize above it.
Why:
- existing terminal Pattern 1 already works
- this is the strongest piece of the current firewall
### Recommendation 2
Add a universal scorer in `model_tools.handle_function_call()`.
Why:
- that is the first point where Hermes knows the tool name and structured arguments
- it is the cleanest place to classify all tool calls uniformly
### Recommendation 3
Treat crisis-sensitive outbound intervention as a separate safety class.
Why:
- issue #878 explicitly calls for Pattern 1 here
- this matches Timmy's SOUL-level safety requirements
### Recommendation 4
Ship scoring traces before enforcement expansion.
Why:
- you cannot tune thresholds you cannot inspect
- false positives will otherwise frustrate normal usage
### Recommendation 5
Use Pattern 3 as the default policy for normal operations.
Why:
- full manual confirmation on every tool call is too expensive
- full autonomy is too risky
- Pattern 3 is the practical middle ground
---
## 9. Bottom Line
Hermes should implement a **two-track human confirmation firewall**:
1. **Pattern 1: Pre-Execution Gate**
- crisis interventions
- destructive terminal actions
- irreversible or safety-critical tool calls
2. **Pattern 3: Confidence Threshold**
- all ordinary tool calls
- driven by a universal tool-call assessment layer
- integrated at the central dispatch boundary
Pattern 2 should remain optional and narrow.
It is not the primary answer for Hermes.
The repo already contains the beginnings of this system.
The next step is not new theory.
It is to turn the existing approval path into a true **tool-call-wide human confirmation firewall**.
---
## References
- Issue #878 — Human Confirmation Firewall Implementation Patterns
- Issue #659 — Critical Research Tasks
- `tools/approval.py` — current dangerous-command approval flow and smart approvals
- `model_tools.py` — central tool dispatch boundary
- `gateway/run.py` — blocking approval handling for messaging sessions

View File

@@ -1,150 +0,0 @@
from __future__ import annotations
import json
from pathlib import Path
import pytest
from agent import agent_card as mod
DEFAULT_DESCRIPTION = "Sovereign AI agent — orchestration, code, research"
def _set_base_context(monkeypatch, *, name: str = "Timmy", description: str = DEFAULT_DESCRIPTION, url: str = "https://timmy.local:9443/a2a", skills=None):
monkeypatch.setattr(mod, "load_config", lambda: {"agent": {"name": name, "description": description}})
monkeypatch.setattr(
mod,
"_load_skills",
lambda: list(
skills
if skills is not None
else [
mod.AgentSkill(
id="code",
name="Code Implementation",
description="Implement and patch code",
tags=["python", "gitea"],
)
]
),
)
monkeypatch.setenv("HERMES_A2A_PUBLIC_URL", url)
monkeypatch.delenv("HERMES_AGENT_NAME", raising=False)
monkeypatch.delenv("AGENT_NAME", raising=False)
monkeypatch.delenv("HERMES_AGENT_DESCRIPTION", raising=False)
monkeypatch.delenv("AGENT_DESCRIPTION", raising=False)
def test_build_agent_card_matches_issue_802_schema(monkeypatch):
_set_base_context(monkeypatch)
card = mod.build_agent_card()
payload = card.to_dict()
assert payload["name"] == "Timmy"
assert payload["description"] == DEFAULT_DESCRIPTION
assert payload["url"] == "https://timmy.local:9443/a2a"
assert payload["capabilities"] == {
"streaming": True,
"pushNotifications": False,
"stateTransitionHistory": True,
}
assert payload["defaultInputModes"] == ["text/plain", "application/json"]
assert payload["defaultOutputModes"] == ["text/plain", "application/json"]
assert payload["skills"][0]["tags"] == ["python", "gitea"]
assert mod.validate_agent_card(payload) == []
@pytest.mark.parametrize(
("name", "url"),
[
("Timmy", "https://timmy.local:9443/a2a"),
("Allegro", "https://allegro.local:9443/a2a"),
("Ezra", "https://ezra.local:9443/a2a"),
],
)
def test_build_agent_card_supports_fleet_members(monkeypatch, name, url):
_set_base_context(monkeypatch, name=name, url=url, skills=[])
payload = mod.build_agent_card().to_dict()
assert payload["name"] == name
assert payload["url"] == url
assert mod.validate_agent_card(payload) == []
def test_load_skills_collects_tags_and_category(monkeypatch, tmp_path):
skill_root = tmp_path / "skills"
skill_dir = skill_root / "code-implementation"
skill_dir.mkdir(parents=True)
(skill_dir / "SKILL.md").write_text(
"""---
name: Code Implementation
description: Implement and patch code
tags: [python, gitea]
category: discovery
---
# Code Implementation
""",
encoding="utf-8",
)
monkeypatch.setattr(mod, "get_all_skills_dirs", lambda: [skill_root])
monkeypatch.setattr(mod, "get_disabled_skill_names", lambda: set())
monkeypatch.setattr(mod, "skill_matches_platform", lambda _frontmatter: True)
skills = mod._load_skills()
assert len(skills) == 1
assert skills[0].id == "code-implementation"
assert skills[0].name == "Code Implementation"
assert skills[0].description == "Implement and patch code"
assert skills[0].tags == ["python", "gitea", "discovery"]
def test_validate_agent_card_reports_schema_errors():
errors = mod.validate_agent_card(
{
"name": "",
"description": "",
"url": "timmy.local",
"version": "",
"capabilities": {"streaming": True},
"skills": [{"id": "", "name": "", "tags": "python"}],
"defaultInputModes": ["text/plain"],
"defaultOutputModes": ["plain"],
"metadata": [],
}
)
assert any("name must be a non-empty string" in error for error in errors)
assert any("url must be an absolute http/https URL" in error for error in errors)
assert any("capabilities.pushNotifications" in error for error in errors)
assert any("skills[0] missing id" in error for error in errors)
assert any("skills[0].tags must be a list" in error for error in errors)
assert any("defaultInputModes must include application/json" in error for error in errors)
assert any("defaultOutputModes entries must be MIME types" in error for error in errors)
assert any("metadata must be an object" in error for error in errors)
def test_get_agent_card_json_emits_valid_json(monkeypatch):
_set_base_context(monkeypatch)
payload = json.loads(mod.get_agent_card_json())
assert payload["name"] == "Timmy"
assert mod.validate_agent_card(payload) == []
def test_main_validate_prints_card(monkeypatch, capsys):
_set_base_context(monkeypatch)
exit_code = mod.main(["--validate"])
captured = capsys.readouterr()
assert exit_code == 0
payload = json.loads(captured.out)
assert payload["url"] == "https://timmy.local:9443/a2a"
assert captured.err == ""