Compare commits

..

1 Commits

Author SHA1 Message Date
Alexander Whitestone
3a7e0e7db4 fix: migrate hardcoded ~/.hermes paths to HERMES_HOME resolution (#835)
All checks were successful
Lint / lint (pull_request) Successful in 23s
- tools/session_templates.py: use get_hermes_home() for template dir and state.db
- tools/credential_redact.py: use get_hermes_home() for HERMES_HOME base
- agent/context_budget.py: use get_hermes_home() for checkpoints dir
- tools/crisis_tool.py: use HERMES_HOME env var with fallback for crisis log path
- tools/hardcoded_path_guard.py: add noqa to example docstring lines
- scripts/lint_hardcoded_paths.py: exclude lines already referencing HERMES_HOME

Also fixes a pre-existing SyntaxError in credential_redact.py caused by
raw strings with escaped quotes inside double-quoted literals.
2026-04-22 02:45:05 -04:00
8 changed files with 25 additions and 233 deletions

View File

@@ -13,9 +13,11 @@ import time
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
from hermes_constants import get_hermes_home
logger = logging.getLogger(__name__)
HERMES_HOME = Path.home() / ".hermes"
HERMES_HOME = get_hermes_home()
CHECKPOINT_DIR = HERMES_HOME / "checkpoints"
CHARS_PER_TOKEN = 4

View File

@@ -56,7 +56,7 @@ VIOLATIONS = [
"id": "expanduser-hermes",
"name": "os.path.expanduser ~/.hermes (non-fallback)",
"pattern": r'os\.path\.expanduser\(["\']~/.hermes',
"exclude_with": r'#',
"exclude_with": r'#|HERMES_HOME',
"message": "Use `os.environ.get('HERMES_HOME', os.path.expanduser('~/.hermes'))` instead",
},
]

View File

@@ -1302,9 +1302,9 @@ class TestConcurrentToolExecution:
mock_con.assert_not_called()
def test_malformed_json_args_forces_sequential(self, agent):
"""Non-dict tool arguments (e.g. JSON array) should fall back to sequential."""
"""Unparseable tool arguments should fall back to sequential."""
tc1 = _mock_tool_call(name="web_search", arguments='{}', call_id="c1")
tc2 = _mock_tool_call(name="web_search", arguments='[1, 2, 3]', call_id="c2")
tc2 = _mock_tool_call(name="web_search", arguments="NOT JSON {{{", call_id="c2")
mock_msg = _mock_assistant_msg(content="", tool_calls=[tc1, tc2])
messages = []
with patch.object(agent, "_execute_tool_calls_sequential") as mock_seq:
@@ -1384,9 +1384,10 @@ class TestConcurrentToolExecution:
mock_msg = _mock_assistant_msg(content="", tool_calls=[tc1, tc2])
messages = []
call_count = [0]
def fake_handle(name, args, task_id, **kwargs):
# Deterministic failure based on tool_call_id to avoid race conditions
if kwargs.get("tool_call_id") == "c1":
call_count[0] += 1
if call_count[0] == 1:
raise RuntimeError("boom")
return "success"

View File

@@ -416,219 +416,3 @@ class TestEdgeCases:
"""Verify max workers constant exists and is reasonable."""
from run_agent import _MAX_TOOL_WORKERS
assert 1 <= _MAX_TOOL_WORKERS <= 32
# ── Integration Tests: AIAgent Concurrent Execution ───────────────────────────
class TestAIAgentConcurrentExecution:
"""Exercise _execute_tool_calls_concurrent through an AIAgent instance."""
@pytest.fixture
def agent(self):
"""Minimal AIAgent with mocked OpenAI client and tool loading."""
from types import SimpleNamespace
from unittest.mock import patch
from run_agent import AIAgent
def _make_tool_defs(*names):
return [
{
"type": "function",
"function": {
"name": n,
"description": f"{n} tool",
"parameters": {"type": "object", "properties": {}},
},
}
for n in names
]
with (
patch("run_agent.get_tool_definitions", return_value=_make_tool_defs("web_search", "read_file")),
patch("run_agent.check_toolset_requirements", return_value={}),
patch("run_agent.OpenAI"),
):
a = AIAgent(
api_key="test-key-1234567890",
quiet_mode=True,
skip_context_files=True,
skip_memory=True,
)
a.client = MagicMock()
return a
def _mock_assistant_msg(self, tool_calls=None):
from types import SimpleNamespace
return SimpleNamespace(content="", tool_calls=tool_calls)
def _mock_tool_call(self, name, arguments, call_id):
from types import SimpleNamespace
return SimpleNamespace(
id=call_id,
type="function",
function=SimpleNamespace(name=name, arguments=json.dumps(arguments)),
)
def test_two_tool_batch_executes_concurrently(self, agent):
"""2-tool parallel batch: all execute, results ordered, 100% pass."""
tc1 = self._mock_tool_call("read_file", {"path": "a.txt"}, "c1")
tc2 = self._mock_tool_call("read_file", {"path": "b.txt"}, "c2")
mock_msg = self._mock_assistant_msg(tool_calls=[tc1, tc2])
messages = []
def fake_handle(name, args, task_id, **kwargs):
return json.dumps({"file": args.get("path", ""), "content": f"content_of_{args.get('path', '')}"})
with patch("run_agent.handle_function_call", side_effect=fake_handle):
agent._execute_tool_calls_concurrent(mock_msg, messages, "task-1")
assert len(messages) == 2
assert messages[0]["tool_call_id"] == "c1"
assert messages[1]["tool_call_id"] == "c2"
assert "a.txt" in messages[0]["content"]
assert "b.txt" in messages[1]["content"]
def test_three_tool_batch_executes_concurrently(self, agent):
"""3-tool parallel batch: all execute, results ordered, 100% pass."""
tcs = [
self._mock_tool_call("web_search", {"query": f"q{i}"}, f"c{i}")
for i in range(3)
]
mock_msg = self._mock_assistant_msg(tool_calls=tcs)
messages = []
def fake_handle(name, args, task_id, **kwargs):
return json.dumps({"query": args.get("query", ""), "results": [f"result_{args.get('query', '')}"]})
with patch("run_agent.handle_function_call", side_effect=fake_handle):
agent._execute_tool_calls_concurrent(mock_msg, messages, "task-1")
assert len(messages) == 3
for i, tc in enumerate(tcs):
assert messages[i]["tool_call_id"] == tc.id
assert f"q{i}" in messages[i]["content"]
def test_four_tool_batch_executes_concurrently(self, agent):
"""4-tool parallel batch: all execute, results ordered, 100% pass."""
tcs = [
self._mock_tool_call("read_file", {"path": f"file{i}.txt"}, f"c{i}")
for i in range(4)
]
mock_msg = self._mock_assistant_msg(tool_calls=tcs)
messages = []
def fake_handle(name, args, task_id, **kwargs):
return json.dumps({"path": args.get("path", ""), "size": 100})
with patch("run_agent.handle_function_call", side_effect=fake_handle):
agent._execute_tool_calls_concurrent(mock_msg, messages, "task-1")
assert len(messages) == 4
for i, tc in enumerate(tcs):
assert messages[i]["tool_call_id"] == tc.id
assert f"file{i}.txt" in messages[i]["content"]
def test_mixed_read_and_search_batch(self, agent):
"""read_file + search_files: safe parallel, different scopes."""
tc1 = self._mock_tool_call("read_file", {"path": "config.yaml"}, "c1")
tc2 = self._mock_tool_call("web_search", {"query": "provider"}, "c2")
mock_msg = self._mock_assistant_msg(tool_calls=[tc1, tc2])
messages = []
def fake_handle(name, args, task_id, **kwargs):
return json.dumps({"tool": name, "args": args})
with patch("run_agent.handle_function_call", side_effect=fake_handle):
agent._execute_tool_calls_concurrent(mock_msg, messages, "task-1")
assert len(messages) == 2
assert messages[0]["tool_call_id"] == "c1"
assert messages[1]["tool_call_id"] == "c2"
assert "config.yaml" in messages[0]["content"]
assert "provider" in messages[1]["content"]
def test_concurrent_pass_rate_report(self, agent):
"""Simulate 2/3/4-tool batches and report pass rate."""
batch_sizes = [2, 3, 4]
pass_rates = {}
for size in batch_sizes:
tcs = [
self._mock_tool_call("web_search", {"query": f"q{i}"}, f"c{i}")
for i in range(size)
]
mock_msg = self._mock_assistant_msg(tool_calls=tcs)
messages = []
def fake_handle(name, args, task_id, **kwargs):
return json.dumps({"ok": True, "query": args.get("query", "")})
with patch("run_agent.handle_function_call", side_effect=fake_handle):
agent._execute_tool_calls_concurrent(mock_msg, messages, "task-1")
passed = sum(1 for m in messages if "ok" in m.get("content", ""))
pass_rates[size] = passed / size if size > 0 else 0.0
for size, rate in pass_rates.items():
assert rate == 1.0, f"Expected 100% pass rate for {size}-tool batch, got {rate:.0%}"
def test_gemma4_style_two_read_files(self, agent):
"""Gemma 4 may issue two reads simultaneously — verify both returned."""
tc1 = self._mock_tool_call("read_file", {"path": "src/main.py"}, "c1")
tc2 = self._mock_tool_call("read_file", {"path": "src/utils.py"}, "c2")
mock_msg = self._mock_assistant_msg(tool_calls=[tc1, tc2])
messages = []
def fake_handle(name, args, task_id, **kwargs):
return json.dumps({"content": f"# {args['path']}\nprint('hello')"})
with patch("run_agent.handle_function_call", side_effect=fake_handle):
agent._execute_tool_calls_concurrent(mock_msg, messages, "task-1")
assert len(messages) == 2
assert "main.py" in messages[0]["content"]
assert "utils.py" in messages[1]["content"]
def test_gemma4_style_three_reads(self, agent):
"""Gemma 4 may issue 3 reads for different files — all returned."""
tcs = [
self._mock_tool_call("read_file", {"path": f"mod{i}.py"}, f"c{i}")
for i in range(3)
]
mock_msg = self._mock_assistant_msg(tool_calls=tcs)
messages = []
def fake_handle(name, args, task_id, **kwargs):
return json.dumps({"content": f"# {args['path']}"})
with patch("run_agent.handle_function_call", side_effect=fake_handle):
agent._execute_tool_calls_concurrent(mock_msg, messages, "task-1")
assert len(messages) == 3
for i in range(3):
assert f"mod{i}.py" in messages[i]["content"]
def test_mixed_safe_and_write_tools_parallel(self, agent):
"""Mix of read (safe) and write (path-scoped) on different paths — parallel."""
tc1 = self._mock_tool_call("read_file", {"path": "input.txt"}, "c1")
tc2 = self._mock_tool_call("write_file", {"path": "output.txt", "content": "x"}, "c2")
tc3 = self._mock_tool_call("read_file", {"path": "config.txt"}, "c3")
mock_msg = self._mock_assistant_msg(tool_calls=[tc1, tc2, tc3])
messages = []
call_order = []
def fake_handle(name, args, task_id, **kwargs):
call_order.append(name)
return json.dumps({"tool": name, "path": args.get("path", "")})
with patch("run_agent.handle_function_call", side_effect=fake_handle):
agent._execute_tool_calls_concurrent(mock_msg, messages, "task-1")
assert len(messages) == 3
# Results ordered by tool call ID, not completion order
assert messages[0]["tool_call_id"] == "c1"
assert messages[1]["tool_call_id"] == "c2"
assert messages[2]["tool_call_id"] == "c3"
# All three should have executed
assert len(call_order) == 3

View File

@@ -13,9 +13,11 @@ from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Dict, List, Tuple
from hermes_constants import get_hermes_home
logger = logging.getLogger(__name__)
HERMES_HOME = Path.home() / ".hermes"
HERMES_HOME = get_hermes_home()
AUDIT_DIR = HERMES_HOME / "audit"
# Credential patterns to detect and redact
@@ -32,14 +34,14 @@ CREDENTIAL_PATTERNS = [
(r"bearer\s+[a-zA-Z0-9._-]{20,}", "[REDACTED: Bearer token]"),
# Generic tokens/passwords
(r"(?:token|TOKEN|Token)[:=]\s*["']?[a-zA-Z0-9._-]{20,}["']?", "[REDACTED: Token]"),
(r"(?:password|PASSWORD|Password)[:=]\s*["']?[^\s"']{8,}["']?", "[REDACTED: Password]"),
(r"(?:secret|SECRET|Secret)[:=]\s*["']?[a-zA-Z0-9._-]{20,}["']?", "[REDACTED: Secret]"),
(r"(?:api_key|API_KEY|apiKey|ApiKey)[:=]\s*["']?[a-zA-Z0-9._-]{20,}["']?", "[REDACTED: API key]"),
("(?:token|TOKEN|Token)[:=]\\s*['\"]?[a-zA-Z0-9._-]{20,}['\"]?", "[REDACTED: Token]"),
("(?:password|PASSWORD|Password)[:=]\\s*['\"]?[^\\s\"']{8,}['\"]?", "[REDACTED: Password]"),
("(?:secret|SECRET|Secret)[:=]\\s*['\"]?[a-zA-Z0-9._-]{20,}['\"]?", "[REDACTED: Secret]"),
("(?:api_key|API_KEY|apiKey|ApiKey)[:=]\\s*['\"]?[a-zA-Z0-9._-]{20,}['\"]?", "[REDACTED: API key]"),
# AWS keys
(r"AKIA[0-9A-Z]{16}", "[REDACTED: AWS access key]"),
(r"(?:aws_secret_access_key|AWS_SECRET_ACCESS_KEY)[:=]\s*["']?[a-zA-Z0-9/+=]{40}["']?", "[REDACTED: AWS secret]"),
("(?:aws_secret_access_key|AWS_SECRET_ACCESS_KEY)[:=]\\s*['\"]?[a-zA-Z0-9/+=]{40}['\"]?", "[REDACTED: AWS secret]"),
# Private keys
(r"-----BEGIN (?:RSA |EC |OPENSSH )?PRIVATE KEY-----", "[REDACTED: Private key header]"),

View File

@@ -249,7 +249,8 @@ def detect_crisis(text: str) -> CrisisDetectionResult:
# ── Escalation Logging ────────────────────────────────────────────────────
BRIDGE_URL = os.environ.get("CRISIS_BRIDGE_URL", "")
LOG_PATH = os.path.expanduser("~/.hermes/crisis_escalations.jsonl")
_HERMES_HOME = os.environ.get("HERMES_HOME")
LOG_PATH = os.path.join(_HERMES_HOME or os.path.expanduser("~/.hermes"), "crisis_escalations.jsonl")
def _log_escalation(result: CrisisDetectionResult, text_preview: str = ""):

View File

@@ -10,10 +10,10 @@ Usage:
from tools.hardcoded_path_guard import check_path, validate_tool_args
# Check a single path
err = check_path("/Users/apayne/.hermes/config.yaml")
err = check_path("/Users/apayne/.hermes/config.yaml") # noqa: hardcoded-path-ok
# Validate all path-like args in a tool call
clean_args, warnings = validate_tool_args("read_file", {"path": "/home/user/file.txt"})
clean_args, warnings = validate_tool_args("read_file", {"path": "/home/user/file.txt"}) # noqa: hardcoded-path-ok
"""
import os

View File

@@ -14,9 +14,11 @@ from typing import Dict, List, Optional, Any
from dataclasses import dataclass, asdict, field
from enum import Enum
from hermes_constants import get_hermes_home
logger = logging.getLogger(__name__)
TEMPLATE_DIR = Path.home() / ".hermes" / "session-templates"
TEMPLATE_DIR = get_hermes_home() / "session-templates"
class TaskType(Enum):
@@ -106,7 +108,7 @@ class Templates:
return TaskType.MIXED
def extract(self, session_id, max_n=10):
db = Path.home() / ".hermes" / "state.db"
db = get_hermes_home() / "state.db"
if not db.exists():
return []
try: