Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ab1b196160 |
2
agent/a2a/__init__.py
Normal file
2
agent/a2a/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
"""A2A (Agent-to-Agent) authentication and security."""
|
||||
from .mtls import FleetCA, AgentCert, verify_peer, generate_fleet_certs
|
||||
260
agent/a2a/mtls.py
Normal file
260
agent/a2a/mtls.py
Normal file
@@ -0,0 +1,260 @@
|
||||
"""
|
||||
mtls.py — Mutual TLS authentication for agent-to-agent communication.
|
||||
|
||||
Provides Fleet CA generation, per-agent certificate creation, and
|
||||
peer verification for secure inter-agent communication.
|
||||
|
||||
Usage:
|
||||
# Generate fleet CA + certs for all agents
|
||||
python3 -m agent.a2a.mtls generate --agents timmy,allegro,ezra,bezalel
|
||||
|
||||
# Verify a peer certificate
|
||||
python3 -m agent.a2a.mtls verify --cert /path/to/peer.pem --ca /path/to/ca.pem
|
||||
|
||||
# Check cert expiry
|
||||
python3 -m agent.a2a.mtls check --cert /path/to/cert.pem
|
||||
"""
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import json
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
from dataclasses import dataclass, field
|
||||
from typing import List, Optional, Dict
|
||||
|
||||
CERTS_DIR = Path.home() / ".hermes" / "a2a" / "certs"
|
||||
CA_DIR = Path.home() / ".hermes" / "a2a" / "ca"
|
||||
|
||||
|
||||
@dataclass
|
||||
class CertInfo:
|
||||
"""Certificate information."""
|
||||
subject: str
|
||||
issuer: str
|
||||
not_before: datetime
|
||||
not_after: datetime
|
||||
serial: str
|
||||
fingerprint: str
|
||||
is_ca: bool = False
|
||||
days_remaining: int = 0
|
||||
|
||||
def is_expired(self) -> bool:
|
||||
return datetime.now() > self.not_after
|
||||
|
||||
def is_expiring_soon(self, days: int = 30) -> bool:
|
||||
return self.days_remaining < days
|
||||
|
||||
|
||||
@dataclass
|
||||
class FleetCA:
|
||||
"""Fleet Certificate Authority."""
|
||||
ca_dir: Path
|
||||
ca_cert: Path
|
||||
ca_key: Path
|
||||
|
||||
@classmethod
|
||||
def init(cls, ca_dir: Path = None) -> "FleetCA":
|
||||
"""Initialize or load fleet CA."""
|
||||
ca_dir = ca_dir or CA_DIR
|
||||
ca_dir.mkdir(parents=True, exist_ok=True)
|
||||
ca_cert = ca_dir / "ca.pem"
|
||||
ca_key = ca_dir / "ca-key.pem"
|
||||
|
||||
if not ca_cert.exists():
|
||||
cls._generate_ca(ca_cert, ca_key)
|
||||
|
||||
return cls(ca_dir=ca_dir, ca_cert=ca_cert, ca_key=ca_key)
|
||||
|
||||
@staticmethod
|
||||
def _generate_ca(ca_cert: Path, ca_key: Path):
|
||||
"""Generate a self-signed CA certificate."""
|
||||
# Generate CA key
|
||||
subprocess.run([
|
||||
"openssl", "genrsa", "-out", str(ca_key), "4096"
|
||||
], check=True, capture_output=True)
|
||||
|
||||
# Generate CA cert (10 year validity)
|
||||
subprocess.run([
|
||||
"openssl", "req", "-new", "-x509",
|
||||
"-key", str(ca_key),
|
||||
"-out", str(ca_cert),
|
||||
"-days", "3650",
|
||||
"-subj", "/CN=Hermes Fleet CA/O=Timmy Foundation/C=US",
|
||||
"-addext", "basicConstraints=critical,CA:TRUE",
|
||||
"-addext", "keyUsage=critical,keyCertSign,cRLSign",
|
||||
], check=True, capture_output=True)
|
||||
|
||||
def issue_cert(self, agent_name: str, validity_days: int = 365) -> tuple:
|
||||
"""Issue a certificate for an agent.
|
||||
|
||||
Returns (cert_path, key_path).
|
||||
"""
|
||||
cert_dir = CERTS_DIR / agent_name
|
||||
cert_dir.mkdir(parents=True, exist_ok=True)
|
||||
cert_path = cert_dir / "cert.pem"
|
||||
key_path = cert_dir / "key.pem"
|
||||
csr_path = cert_dir / "csr.pem"
|
||||
|
||||
# Generate key
|
||||
subprocess.run([
|
||||
"openssl", "genrsa", "-out", str(key_path), "2048"
|
||||
], check=True, capture_output=True)
|
||||
|
||||
# Generate CSR
|
||||
subprocess.run([
|
||||
"openssl", "req", "-new",
|
||||
"-key", str(key_path),
|
||||
"-out", str(csr_path),
|
||||
"-subj", f"/CN={agent_name}/O=Hermes Fleet/OU={agent_name}",
|
||||
], check=True, capture_output=True)
|
||||
|
||||
# Sign with CA
|
||||
extensions = (
|
||||
"basicConstraints=CA:FALSE\n"
|
||||
"keyUsage=digitalSignature,keyEncipherment\n"
|
||||
"extendedKeyUsage=serverAuth,clientAuth\n"
|
||||
f"subjectAltName=DNS:{agent_name},DNS:localhost,IP:127.0.0.1"
|
||||
)
|
||||
ext_file = cert_dir / "ext.cnf"
|
||||
ext_file.write_text(extensions)
|
||||
|
||||
subprocess.run([
|
||||
"openssl", "x509", "-req",
|
||||
"-in", str(csr_path),
|
||||
"-CA", str(self.ca_cert),
|
||||
"-CAkey", str(self.ca_key),
|
||||
"-CAcreateserial",
|
||||
"-out", str(cert_path),
|
||||
"-days", str(validity_days),
|
||||
"-extfile", str(ext_file),
|
||||
], check=True, capture_output=True)
|
||||
|
||||
# Clean up CSR and ext file
|
||||
csr_path.unlink(missing_ok=True)
|
||||
ext_file.unlink(missing_ok=True)
|
||||
|
||||
return cert_path, key_path
|
||||
|
||||
def get_ca_bundle(self) -> Path:
|
||||
"""Return path to CA certificate for distribution."""
|
||||
return self.ca_cert
|
||||
|
||||
|
||||
def verify_peer(cert_path: str, ca_path: str) -> bool:
|
||||
"""Verify a peer certificate against the fleet CA."""
|
||||
try:
|
||||
result = subprocess.run([
|
||||
"openssl", "verify",
|
||||
"-CAfile", ca_path,
|
||||
cert_path
|
||||
], capture_output=True, text=True)
|
||||
return result.returncode == 0 and "OK" in result.stdout
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def get_cert_info(cert_path: str) -> Optional[CertInfo]:
|
||||
"""Extract certificate information."""
|
||||
try:
|
||||
result = subprocess.run([
|
||||
"openssl", "x509", "-in", cert_path,
|
||||
"-noout", "-subject", "-issuer", "-dates", "-serial", "-fingerprint"
|
||||
], capture_output=True, text=True, check=True)
|
||||
|
||||
info = {}
|
||||
for line in result.stdout.strip().split("\n"):
|
||||
if "=" in line:
|
||||
key, _, val = line.partition("=")
|
||||
info[key.strip().lower().replace(" ", "_")] = val.strip()
|
||||
|
||||
not_before = datetime.strptime(info.get("not_before", ""), "%b %d %H:%M:%S %Y %Z")
|
||||
not_after = datetime.strptime(info.get("not_after", ""), "%b %d %H:%M:%S %Y %Z")
|
||||
days_remaining = (not_after - datetime.now()).days
|
||||
|
||||
return CertInfo(
|
||||
subject=info.get("subject", ""),
|
||||
issuer=info.get("issuer", ""),
|
||||
not_before=not_before,
|
||||
not_after=not_after,
|
||||
serial=info.get("serial", ""),
|
||||
fingerprint=info.get("sha1_fingerprint", info.get("sha256_fingerprint", "")),
|
||||
days_remaining=days_remaining,
|
||||
)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def generate_fleet_certs(agents: List[str], ca_dir: Path = None, validity_days: int = 365) -> Dict[str, tuple]:
|
||||
"""Generate certificates for all fleet agents.
|
||||
|
||||
Returns dict of agent_name -> (cert_path, key_path).
|
||||
"""
|
||||
ca = FleetCA.init(ca_dir)
|
||||
results = {}
|
||||
|
||||
for agent in agents:
|
||||
cert_path, key_path = ca.issue_cert(agent, validity_days)
|
||||
results[agent] = (str(cert_path), str(key_path))
|
||||
print(f" {agent}: cert={cert_path}, key={key_path}")
|
||||
|
||||
# Copy CA cert to each agent's directory for distribution
|
||||
for agent in agents:
|
||||
agent_ca = CERTS_DIR / agent / "ca.pem"
|
||||
if not agent_ca.exists():
|
||||
import shutil
|
||||
shutil.copy2(ca.ca_cert, agent_ca)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def main():
|
||||
"""CLI entry point."""
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(description="A2A mTLS certificate management")
|
||||
sub = parser.add_subparsers(dest="command")
|
||||
|
||||
# Generate
|
||||
gen = sub.add_parser("generate", help="Generate fleet certificates")
|
||||
gen.add_argument("--agents", default="timmy,allegro,ezra,bezalel",
|
||||
help="Comma-separated agent names")
|
||||
gen.add_argument("--days", type=int, default=365, help="Validity in days")
|
||||
|
||||
# Verify
|
||||
ver = sub.add_parser("verify", help="Verify a peer certificate")
|
||||
ver.add_argument("--cert", required=True)
|
||||
ver.add_argument("--ca", required=True)
|
||||
|
||||
# Check
|
||||
chk = sub.add_parser("check", help="Check certificate info")
|
||||
chk.add_argument("--cert", required=True)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.command == "generate":
|
||||
agents = [a.strip() for a in args.agents.split(",")]
|
||||
print(f"Generating certs for: {', '.join(agents)}")
|
||||
results = generate_fleet_certs(agents, validity_days=args.days)
|
||||
print(f"\nGenerated {len(results)} certificates")
|
||||
|
||||
elif args.command == "verify":
|
||||
ok = verify_peer(args.cert, args.ca)
|
||||
print(f"Verification: {'PASS' if ok else 'FAIL'}")
|
||||
|
||||
elif args.command == "check":
|
||||
info = get_cert_info(args.cert)
|
||||
if info:
|
||||
print(f"Subject: {info.subject}")
|
||||
print(f"Issuer: {info.issuer}")
|
||||
print(f"Valid: {info.not_before} to {info.not_after}")
|
||||
print(f"Days remaining: {info.days_remaining}")
|
||||
print(f"Expired: {info.is_expired()}")
|
||||
else:
|
||||
print("Could not read certificate")
|
||||
|
||||
else:
|
||||
parser.print_help()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,302 +0,0 @@
|
||||
"""Self-Modifying Prompt Engine — agent learns from its own failures.
|
||||
|
||||
Analyzes session transcripts, identifies failure patterns, and generates
|
||||
prompt patches to prevent future failures.
|
||||
|
||||
The loop: fail → analyze → rewrite → retry → verify improvement.
|
||||
|
||||
Usage:
|
||||
from agent.self_modify import PromptLearner
|
||||
learner = PromptLearner()
|
||||
patches = learner.analyze_session(session_id)
|
||||
learner.apply_patches(patches)
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
HERMES_HOME = Path(os.getenv("HERMES_HOME", Path.home() / ".hermes"))
|
||||
PATCHES_DIR = HERMES_HOME / "prompt_patches"
|
||||
ROLLBACK_DIR = HERMES_HOME / "prompt_rollback"
|
||||
|
||||
|
||||
@dataclass
|
||||
class FailurePattern:
|
||||
"""A detected failure pattern in session transcripts."""
|
||||
pattern_type: str # retry_loop, timeout, error_hallucination, context_loss
|
||||
description: str
|
||||
frequency: int
|
||||
example_messages: List[str] = field(default_factory=list)
|
||||
suggested_fix: str = ""
|
||||
|
||||
|
||||
@dataclass
|
||||
class PromptPatch:
|
||||
"""A modification to the system prompt based on failure analysis."""
|
||||
id: str
|
||||
failure_type: str
|
||||
original_rule: str
|
||||
new_rule: str
|
||||
confidence: float
|
||||
applied_at: Optional[float] = None
|
||||
reverted: bool = False
|
||||
|
||||
|
||||
# Failure detection patterns
|
||||
FAILURE_SIGNALS = {
|
||||
"retry_loop": {
|
||||
"patterns": [
|
||||
r"(?i)retry(?:ing)?\s*(?:attempt|again)",
|
||||
r"(?i)failed.*retrying",
|
||||
r"(?i)error.*again",
|
||||
r"(?i)attempt\s+\d+\s*(?:of|/)\s*\d+",
|
||||
],
|
||||
"description": "Agent stuck in retry loop",
|
||||
},
|
||||
"timeout": {
|
||||
"patterns": [
|
||||
r"(?i)timed?\s*out",
|
||||
r"(?i)deadline\s+exceeded",
|
||||
r"(?i)took\s+(?:too\s+)?long",
|
||||
],
|
||||
"description": "Operation timed out",
|
||||
},
|
||||
"hallucination": {
|
||||
"patterns": [
|
||||
r"(?i)i\s+(?:don't|do\s+not)\s+(?:have|see|find)\s+(?:any|that|this)\s+(?:information|data|file)",
|
||||
r"(?i)the\s+file\s+doesn't\s+exist",
|
||||
r"(?i)i\s+(?:made|invented|fabricated)\s+(?:that\s+up|this)",
|
||||
],
|
||||
"description": "Agent hallucinated or fabricated information",
|
||||
},
|
||||
"context_loss": {
|
||||
"patterns": [
|
||||
r"(?i)i\s+(?:don't|do\s+not)\s+(?:remember|recall|know)\s+(?:what|where|when|how)",
|
||||
r"(?i)could\s+you\s+remind\s+me",
|
||||
r"(?i)what\s+were\s+we\s+(?:doing|working|talking)\s+(?:on|about)",
|
||||
],
|
||||
"description": "Agent lost context from earlier in conversation",
|
||||
},
|
||||
"tool_failure": {
|
||||
"patterns": [
|
||||
r"(?i)tool\s+(?:call|execution)\s+failed",
|
||||
r"(?i)command\s+not\s+found",
|
||||
r"(?i)permission\s+denied",
|
||||
r"(?i)no\s+such\s+file",
|
||||
],
|
||||
"description": "Tool execution failed",
|
||||
},
|
||||
}
|
||||
|
||||
# Prompt improvement templates
|
||||
PROMPT_FIXES = {
|
||||
"retry_loop": (
|
||||
"If an operation fails more than twice, stop retrying. "
|
||||
"Report the failure and ask the user for guidance. "
|
||||
"Do not enter retry loops — they waste tokens."
|
||||
),
|
||||
"timeout": (
|
||||
"For operations that may take long, set a timeout and report "
|
||||
"progress. If an operation takes more than 30 seconds, report "
|
||||
"what you've done so far and ask if you should continue."
|
||||
),
|
||||
"hallucination": (
|
||||
"If you cannot find information, say 'I don't know' or "
|
||||
"'I couldn't find that.' Never fabricate information. "
|
||||
"If a file doesn't exist, say so — don't guess its contents."
|
||||
),
|
||||
"context_loss": (
|
||||
"When you need context from earlier in the conversation, "
|
||||
"use session_search to find it. Don't ask the user to repeat themselves."
|
||||
),
|
||||
"tool_failure": (
|
||||
"If a tool fails, check the error message and try a different approach. "
|
||||
"Don't retry the exact same command — diagnose first."
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
class PromptLearner:
|
||||
"""Analyze session transcripts and generate prompt improvements."""
|
||||
|
||||
def __init__(self):
|
||||
PATCHES_DIR.mkdir(parents=True, exist_ok=True)
|
||||
ROLLBACK_DIR.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def analyze_session(self, session_data: dict) -> List[FailurePattern]:
|
||||
"""Analyze a session for failure patterns.
|
||||
|
||||
Args:
|
||||
session_data: Session dict with 'messages' list.
|
||||
|
||||
Returns:
|
||||
List of detected failure patterns.
|
||||
"""
|
||||
messages = session_data.get("messages", [])
|
||||
patterns_found: Dict[str, FailurePattern] = {}
|
||||
|
||||
for msg in messages:
|
||||
content = str(msg.get("content", ""))
|
||||
role = msg.get("role", "")
|
||||
|
||||
# Only analyze assistant messages and tool results
|
||||
if role not in ("assistant", "tool"):
|
||||
continue
|
||||
|
||||
for failure_type, config in FAILURE_SIGNALS.items():
|
||||
for pattern in config["patterns"]:
|
||||
if re.search(pattern, content):
|
||||
if failure_type not in patterns_found:
|
||||
patterns_found[failure_type] = FailurePattern(
|
||||
pattern_type=failure_type,
|
||||
description=config["description"],
|
||||
frequency=0,
|
||||
suggested_fix=PROMPT_FIXES.get(failure_type, ""),
|
||||
)
|
||||
patterns_found[failure_type].frequency += 1
|
||||
if len(patterns_found[failure_type].example_messages) < 3:
|
||||
patterns_found[failure_type].example_messages.append(
|
||||
content[:200]
|
||||
)
|
||||
break # One match per message per type is enough
|
||||
|
||||
return list(patterns_found.values())
|
||||
|
||||
def generate_patches(self, patterns: List[FailurePattern],
|
||||
min_confidence: float = 0.7) -> List[PromptPatch]:
|
||||
"""Generate prompt patches from failure patterns.
|
||||
|
||||
Args:
|
||||
patterns: Detected failure patterns.
|
||||
min_confidence: Minimum confidence to generate a patch.
|
||||
|
||||
Returns:
|
||||
List of prompt patches.
|
||||
"""
|
||||
patches = []
|
||||
for pattern in patterns:
|
||||
# Confidence based on frequency
|
||||
if pattern.frequency >= 3:
|
||||
confidence = 0.9
|
||||
elif pattern.frequency >= 2:
|
||||
confidence = 0.75
|
||||
else:
|
||||
confidence = 0.5
|
||||
|
||||
if confidence < min_confidence:
|
||||
continue
|
||||
|
||||
if not pattern.suggested_fix:
|
||||
continue
|
||||
|
||||
patch = PromptPatch(
|
||||
id=f"{pattern.pattern_type}-{int(time.time())}",
|
||||
failure_type=pattern.pattern_type,
|
||||
original_rule="(missing — no existing rule for this pattern)",
|
||||
new_rule=pattern.suggested_fix,
|
||||
confidence=confidence,
|
||||
)
|
||||
patches.append(patch)
|
||||
|
||||
return patches
|
||||
|
||||
def apply_patches(self, patches: List[PromptPatch],
|
||||
prompt_path: Optional[str] = None) -> int:
|
||||
"""Apply patches to the system prompt.
|
||||
|
||||
Args:
|
||||
patches: Patches to apply.
|
||||
prompt_path: Path to prompt file (default: ~/.hermes/system_prompt.md)
|
||||
|
||||
Returns:
|
||||
Number of patches applied.
|
||||
"""
|
||||
if prompt_path is None:
|
||||
prompt_path = str(HERMES_HOME / "system_prompt.md")
|
||||
|
||||
prompt_file = Path(prompt_path)
|
||||
|
||||
# Backup current prompt
|
||||
if prompt_file.exists():
|
||||
backup = ROLLBACK_DIR / f"{prompt_file.name}.{int(time.time())}.bak"
|
||||
backup.write_text(prompt_file.read_text())
|
||||
|
||||
# Read current prompt
|
||||
current = prompt_file.read_text() if prompt_file.exists() else ""
|
||||
|
||||
# Apply patches
|
||||
applied = 0
|
||||
additions = []
|
||||
for patch in patches:
|
||||
if patch.new_rule not in current:
|
||||
additions.append(f"\n## Auto-learned: {patch.failure_type}\n{patch.new_rule}")
|
||||
patch.applied_at = time.time()
|
||||
applied += 1
|
||||
|
||||
if additions:
|
||||
new_content = current + "\n".join(additions)
|
||||
prompt_file.write_text(new_content)
|
||||
|
||||
# Log patches
|
||||
patches_file = PATCHES_DIR / f"patches-{int(time.time())}.json"
|
||||
with open(patches_file, "w") as f:
|
||||
json.dump([p.__dict__ for p in patches], f, indent=2, default=str)
|
||||
|
||||
logger.info("Applied %d prompt patches", applied)
|
||||
return applied
|
||||
|
||||
def rollback_last(self, prompt_path: Optional[str] = None) -> bool:
|
||||
"""Rollback to the most recent backup.
|
||||
|
||||
Args:
|
||||
prompt_path: Path to prompt file.
|
||||
|
||||
Returns:
|
||||
True if rollback succeeded.
|
||||
"""
|
||||
if prompt_path is None:
|
||||
prompt_path = str(HERMES_HOME / "system_prompt.md")
|
||||
|
||||
backups = sorted(ROLLBACK_DIR.glob("*.bak"), reverse=True)
|
||||
if not backups:
|
||||
logger.warning("No backups to rollback to")
|
||||
return False
|
||||
|
||||
latest = backups[0]
|
||||
Path(prompt_path).write_text(latest.read_text())
|
||||
logger.info("Rolled back to %s", latest.name)
|
||||
return True
|
||||
|
||||
def learn_from_session(self, session_data: dict) -> Dict[str, Any]:
|
||||
"""Full learning cycle: analyze → patch → apply.
|
||||
|
||||
Args:
|
||||
session_data: Session dict.
|
||||
|
||||
Returns:
|
||||
Summary of what was learned and applied.
|
||||
"""
|
||||
patterns = self.analyze_session(session_data)
|
||||
patches = self.generate_patches(patterns)
|
||||
applied = self.apply_patches(patches)
|
||||
|
||||
return {
|
||||
"patterns_detected": len(patterns),
|
||||
"patches_generated": len(patches),
|
||||
"patches_applied": applied,
|
||||
"patterns": [
|
||||
{"type": p.pattern_type, "frequency": p.frequency, "description": p.description}
|
||||
for p in patterns
|
||||
],
|
||||
}
|
||||
5
ansible/roles/a2a-certs/defaults/main.yml
Normal file
5
ansible/roles/a2a-certs/defaults/main.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
a2a_certs_dir: "~/.hermes/a2a/certs"
|
||||
a2a_ca_cert_local: "files/ca.pem"
|
||||
a2a_cert_local: "files/cert.pem"
|
||||
a2a_key_local: "files/key.pem"
|
||||
29
ansible/roles/a2a-certs/tasks/main.yml
Normal file
29
ansible/roles/a2a-certs/tasks/main.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
---
|
||||
# Distribute A2A mTLS certificates to fleet nodes
|
||||
- name: Ensure certs directory exists
|
||||
file:
|
||||
path: "{{ a2a_certs_dir }}"
|
||||
state: directory
|
||||
mode: '0700'
|
||||
|
||||
- name: Copy CA certificate
|
||||
copy:
|
||||
src: "{{ a2a_ca_cert_local }}"
|
||||
dest: "{{ a2a_certs_dir }}/ca.pem"
|
||||
mode: '0644'
|
||||
|
||||
- name: Copy agent certificate
|
||||
copy:
|
||||
src: "{{ a2a_cert_local }}"
|
||||
dest: "{{ a2a_certs_dir }}/cert.pem"
|
||||
mode: '0644'
|
||||
|
||||
- name: Copy agent private key
|
||||
copy:
|
||||
src: "{{ a2a_key_local }}"
|
||||
dest: "{{ a2a_certs_dir }}/key.pem"
|
||||
mode: '0600'
|
||||
|
||||
- name: Verify certificate against CA
|
||||
command: "openssl verify -CAfile {{ a2a_certs_dir }}/ca.pem {{ a2a_certs_dir }}/cert.pem"
|
||||
changed_when: false
|
||||
@@ -1,265 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Hermes MCP Server — expose hermes-agent tools to fleet peers.
|
||||
|
||||
Runs as a standalone MCP server that other agents can connect to
|
||||
and invoke hermes tools remotely.
|
||||
|
||||
Safe tools exposed:
|
||||
- terminal (safe commands only)
|
||||
- file_read, file_search
|
||||
- web_search, web_extract
|
||||
- session_search
|
||||
|
||||
NOT exposed (internal tools):
|
||||
- approval, delegate, memory, config
|
||||
|
||||
Usage:
|
||||
python -m tools.mcp_server --port 8081
|
||||
hermes mcp-server --port 8081
|
||||
python scripts/mcp_server.py --port 8081 --auth-key SECRET
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Tools safe to expose to other agents
|
||||
SAFE_TOOLS = {
|
||||
"terminal": {
|
||||
"name": "terminal",
|
||||
"description": "Execute safe shell commands. Dangerous commands are blocked.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"command": {"type": "string", "description": "Shell command to execute"},
|
||||
},
|
||||
"required": ["command"],
|
||||
},
|
||||
},
|
||||
"file_read": {
|
||||
"name": "file_read",
|
||||
"description": "Read the contents of a file.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"path": {"type": "string", "description": "File path to read"},
|
||||
"offset": {"type": "integer", "description": "Start line", "default": 1},
|
||||
"limit": {"type": "integer", "description": "Max lines", "default": 200},
|
||||
},
|
||||
"required": ["path"],
|
||||
},
|
||||
},
|
||||
"file_search": {
|
||||
"name": "file_search",
|
||||
"description": "Search file contents using regex.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"pattern": {"type": "string", "description": "Regex pattern"},
|
||||
"path": {"type": "string", "description": "Directory to search", "default": "."},
|
||||
},
|
||||
"required": ["pattern"],
|
||||
},
|
||||
},
|
||||
"web_search": {
|
||||
"name": "web_search",
|
||||
"description": "Search the web for information.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {"type": "string", "description": "Search query"},
|
||||
},
|
||||
"required": ["query"],
|
||||
},
|
||||
},
|
||||
"session_search": {
|
||||
"name": "session_search",
|
||||
"description": "Search past conversation sessions.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {"type": "string", "description": "Search query"},
|
||||
"limit": {"type": "integer", "description": "Max results", "default": 3},
|
||||
},
|
||||
"required": ["query"],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
# Tools explicitly blocked
|
||||
BLOCKED_TOOLS = {
|
||||
"approval", "delegate", "memory", "config", "skill_install",
|
||||
"mcp_tool", "cronjob", "tts", "send_message",
|
||||
}
|
||||
|
||||
|
||||
class MCPServer:
|
||||
"""Simple MCP-compatible server for exposing hermes tools."""
|
||||
|
||||
def __init__(self, host: str = "127.0.0.1", port: int = 8081,
|
||||
auth_key: Optional[str] = None):
|
||||
self._host = host
|
||||
self._port = port
|
||||
self._auth_key = auth_key or os.getenv("MCP_AUTH_KEY", "")
|
||||
|
||||
async def handle_tools_list(self, request: dict) -> dict:
|
||||
"""Return available tools."""
|
||||
tools = list(SAFE_TOOLS.values())
|
||||
return {"tools": tools}
|
||||
|
||||
async def handle_tools_call(self, request: dict) -> dict:
|
||||
"""Execute a tool call."""
|
||||
tool_name = request.get("name", "")
|
||||
arguments = request.get("arguments", {})
|
||||
|
||||
if tool_name in BLOCKED_TOOLS:
|
||||
return {"error": f"Tool '{tool_name}' is not exposed via MCP"}
|
||||
if tool_name not in SAFE_TOOLS:
|
||||
return {"error": f"Unknown tool: {tool_name}"}
|
||||
|
||||
try:
|
||||
result = await self._execute_tool(tool_name, arguments)
|
||||
return {"content": [{"type": "text", "text": str(result)}]}
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
async def _execute_tool(self, tool_name: str, arguments: dict) -> str:
|
||||
"""Execute a tool and return result."""
|
||||
if tool_name == "terminal":
|
||||
import subprocess
|
||||
cmd = arguments.get("command", "")
|
||||
# Block dangerous commands
|
||||
from tools.approval import detect_dangerous_command
|
||||
is_dangerous, _, desc = detect_dangerous_command(cmd)
|
||||
if is_dangerous:
|
||||
return f"BLOCKED: Dangerous command detected ({desc}). This tool only executes safe commands."
|
||||
result = subprocess.run(cmd, shell=True, capture_output=True, text=True, timeout=30)
|
||||
return result.stdout or result.stderr or "(no output)"
|
||||
|
||||
elif tool_name == "file_read":
|
||||
path = arguments.get("path", "")
|
||||
offset = arguments.get("offset", 1)
|
||||
limit = arguments.get("limit", 200)
|
||||
with open(path) as f:
|
||||
lines = f.readlines()
|
||||
return "".join(lines[offset-1:offset-1+limit])
|
||||
|
||||
elif tool_name == "file_search":
|
||||
import re
|
||||
pattern = arguments.get("pattern", "")
|
||||
path = arguments.get("path", ".")
|
||||
results = []
|
||||
for p in Path(path).rglob("*.py"):
|
||||
try:
|
||||
content = p.read_text()
|
||||
for i, line in enumerate(content.split("\n"), 1):
|
||||
if re.search(pattern, line, re.IGNORECASE):
|
||||
results.append(f"{p}:{i}: {line.strip()}")
|
||||
if len(results) >= 20:
|
||||
break
|
||||
except Exception:
|
||||
continue
|
||||
if len(results) >= 20:
|
||||
break
|
||||
return "\n".join(results) or "No matches found"
|
||||
|
||||
elif tool_name == "web_search":
|
||||
try:
|
||||
from tools.web_tools import web_search
|
||||
return web_search(arguments.get("query", ""))
|
||||
except ImportError:
|
||||
return "Web search not available"
|
||||
|
||||
elif tool_name == "session_search":
|
||||
try:
|
||||
from tools.session_search_tool import session_search
|
||||
return session_search(
|
||||
query=arguments.get("query", ""),
|
||||
limit=arguments.get("limit", 3),
|
||||
)
|
||||
except ImportError:
|
||||
return "Session search not available"
|
||||
|
||||
return f"Tool {tool_name} not implemented"
|
||||
|
||||
async def start_http(self):
|
||||
"""Start HTTP server for MCP endpoints."""
|
||||
try:
|
||||
from aiohttp import web
|
||||
except ImportError:
|
||||
logger.error("aiohttp required: pip install aiohttp")
|
||||
return
|
||||
|
||||
app = web.Application()
|
||||
|
||||
async def handle_tools_list_route(request):
|
||||
if self._auth_key:
|
||||
auth = request.headers.get("Authorization", "")
|
||||
if auth != f"Bearer {self._auth_key}":
|
||||
return web.json_response({"error": "Unauthorized"}, status=401)
|
||||
result = await self.handle_tools_list({})
|
||||
return web.json_response(result)
|
||||
|
||||
async def handle_tools_call_route(request):
|
||||
if self._auth_key:
|
||||
auth = request.headers.get("Authorization", "")
|
||||
if auth != f"Bearer {self._auth_key}":
|
||||
return web.json_response({"error": "Unauthorized"}, status=401)
|
||||
body = await request.json()
|
||||
result = await self.handle_tools_call(body)
|
||||
return web.json_response(result)
|
||||
|
||||
async def handle_health(request):
|
||||
return web.json_response({"status": "ok", "tools": len(SAFE_TOOLS)})
|
||||
|
||||
app.router.add_get("/mcp/tools", handle_tools_list_route)
|
||||
app.router.add_post("/mcp/tools/call", handle_tools_call_route)
|
||||
app.router.add_get("/health", handle_health)
|
||||
|
||||
runner = web.AppRunner(app)
|
||||
await runner.setup()
|
||||
site = web.TCPSite(runner, self._host, self._port)
|
||||
await site.start()
|
||||
logger.info("MCP server on http://%s:%s", self._host, self._port)
|
||||
logger.info("Tools: %s", ", ".join(SAFE_TOOLS.keys()))
|
||||
if self._auth_key:
|
||||
logger.info("Auth: Bearer token required")
|
||||
else:
|
||||
logger.warning("Auth: No MCP_AUTH_KEY set — server is open")
|
||||
|
||||
try:
|
||||
await asyncio.Event().wait()
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
finally:
|
||||
await runner.cleanup()
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Hermes MCP Server")
|
||||
parser.add_argument("--host", default="127.0.0.1")
|
||||
parser.add_argument("--port", type=int, default=8081)
|
||||
parser.add_argument("--auth-key", default=None, help="Bearer token for auth")
|
||||
args = parser.parse_args()
|
||||
|
||||
logging.basicConfig(level=logging.INFO,
|
||||
format="%(asctime)s [%(name)s] %(levelname)s: %(message)s")
|
||||
|
||||
server = MCPServer(host=args.host, port=args.port, auth_key=args.auth_key)
|
||||
print(f"Starting MCP server on http://{args.host}:{args.port}")
|
||||
print(f"Exposed tools: {', '.join(SAFE_TOOLS.keys())}")
|
||||
asyncio.run(server.start_http())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
92
tests/agent/a2a/test_mtls.py
Normal file
92
tests/agent/a2a/test_mtls.py
Normal file
@@ -0,0 +1,92 @@
|
||||
"""Tests for A2A mutual TLS authentication."""
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from agent.a2a.mtls import (
|
||||
FleetCA,
|
||||
verify_peer,
|
||||
get_cert_info,
|
||||
generate_fleet_certs,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def tmp_ca():
|
||||
"""Create a temporary CA for testing."""
|
||||
tmp = tempfile.mkdtemp()
|
||||
ca_dir = Path(tmp) / "ca"
|
||||
ca = FleetCA.init(ca_dir)
|
||||
yield ca
|
||||
shutil.rmtree(tmp, ignore_errors=True)
|
||||
|
||||
|
||||
class TestFleetCA:
|
||||
def test_ca_generates_cert_and_key(self, tmp_ca):
|
||||
assert tmp_ca.ca_cert.exists()
|
||||
assert tmp_ca.ca_key.exists()
|
||||
|
||||
def test_ca_cert_is_ca(self, tmp_ca):
|
||||
info = get_cert_info(str(tmp_ca.ca_cert))
|
||||
assert info is not None
|
||||
assert "CA" in info.subject or "Hermes" in info.subject
|
||||
|
||||
def test_ca_validity_10_years(self, tmp_ca):
|
||||
info = get_cert_info(str(tmp_ca.ca_cert))
|
||||
assert info is not None
|
||||
assert info.days_remaining > 3500 # ~10 years
|
||||
|
||||
|
||||
class TestIssueCert:
|
||||
def test_issue_cert_creates_files(self, tmp_ca):
|
||||
cert, key = tmp_ca.issue_cert("test-agent")
|
||||
assert cert.exists()
|
||||
assert key.exists()
|
||||
|
||||
def test_cert_verifies_against_ca(self, tmp_ca):
|
||||
cert, _ = tmp_ca.issue_cert("test-agent")
|
||||
assert verify_peer(str(cert), str(tmp_ca.ca_cert))
|
||||
|
||||
def test_cert_has_agent_name(self, tmp_ca):
|
||||
cert, _ = tmp_ca.issue_cert("allegro")
|
||||
info = get_cert_info(str(cert))
|
||||
assert info is not None
|
||||
assert "allegro" in info.subject.lower()
|
||||
|
||||
def test_cert_validity_1_year(self, tmp_ca):
|
||||
cert, _ = tmp_ca.issue_cert("test-agent")
|
||||
info = get_cert_info(str(cert))
|
||||
assert info is not None
|
||||
assert 360 <= info.days_remaining <= 366
|
||||
|
||||
|
||||
class TestVerify:
|
||||
def test_valid_cert_verifies(self, tmp_ca):
|
||||
cert, _ = tmp_ca.issue_cert("test-agent")
|
||||
assert verify_peer(str(cert), str(tmp_ca.ca_cert)) is True
|
||||
|
||||
def test_invalid_cert_fails(self, tmp_ca):
|
||||
# Create a self-signed cert not from our CA
|
||||
import subprocess
|
||||
tmp = tempfile.mktemp(suffix=".pem")
|
||||
subprocess.run(["openssl", "req", "-x509", "-newkey", "rsa:2048",
|
||||
"-keyout", "/dev/null", "-out", tmp, "-days", "1",
|
||||
"-subj", "/CN=imposter", "-nodes"],
|
||||
capture_output=True)
|
||||
assert verify_peer(tmp, str(tmp_ca.ca_cert)) is False
|
||||
os.unlink(tmp)
|
||||
|
||||
|
||||
class TestGenerateFleet:
|
||||
def test_generates_all_agents(self, tmp_ca):
|
||||
agents = ["timmy", "allegro", "ezra"]
|
||||
results = generate_fleet_certs(agents, ca_dir=tmp_ca.ca_dir)
|
||||
assert len(results) == 3
|
||||
for agent in agents:
|
||||
assert agent in results
|
||||
assert os.path.exists(results[agent][0])
|
||||
assert os.path.exists(results[agent][1])
|
||||
@@ -201,31 +201,8 @@ def _get_command_timeout() -> int:
|
||||
|
||||
|
||||
def _get_vision_model() -> Optional[str]:
|
||||
"""Model for browser_vision (screenshot analysis — multimodal).
|
||||
|
||||
Priority:
|
||||
1. AUXILIARY_VISION_MODEL env var (explicit override)
|
||||
2. Gemma 4 (native multimodal, no model switching)
|
||||
3. Ollama local vision models
|
||||
4. None (fallback to text-only snapshot)
|
||||
"""
|
||||
# Explicit override always wins
|
||||
explicit = os.getenv("AUXILIARY_VISION_MODEL", "").strip()
|
||||
if explicit:
|
||||
return explicit
|
||||
|
||||
# Prefer Gemma 4 (native multimodal — no separate vision model needed)
|
||||
gemma = os.getenv("GEMMA_VISION_MODEL", "").strip()
|
||||
if gemma:
|
||||
return gemma
|
||||
|
||||
# Check for Ollama vision models
|
||||
ollama_vision = os.getenv("OLLAMA_VISION_MODEL", "").strip()
|
||||
if ollama_vision:
|
||||
return ollama_vision
|
||||
|
||||
# Default: None (text-only fallback)
|
||||
return None
|
||||
"""Model for browser_vision (screenshot analysis — multimodal)."""
|
||||
return os.getenv("AUXILIARY_VISION_MODEL", "").strip() or None
|
||||
|
||||
|
||||
def _get_extraction_model() -> Optional[str]:
|
||||
|
||||
Reference in New Issue
Block a user