Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3ce1f829a2 |
@@ -1,69 +0,0 @@
|
||||
"""First-class context snapshot artifacts for live runtime memory evaluation."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from hermes_constants import get_hermes_home
|
||||
|
||||
|
||||
_SAFE_SEGMENT_RE = re.compile(r"[^A-Za-z0-9_.-]+")
|
||||
|
||||
|
||||
class ContextSnapshotRecorder:
|
||||
"""Write per-call prompt-composition artifacts for a Hermes session."""
|
||||
|
||||
def __init__(self, session_id: str, *, enabled: bool = False, base_dir: str | Path | None = None):
|
||||
self.session_id = session_id or "session"
|
||||
self.enabled = bool(enabled)
|
||||
self.base_dir = Path(base_dir) if base_dir else get_hermes_home() / "reports" / "context_snapshots"
|
||||
|
||||
@property
|
||||
def session_dir(self) -> Path:
|
||||
safe_session = _SAFE_SEGMENT_RE.sub("_", self.session_id).strip("._") or "session"
|
||||
return self.base_dir / safe_session
|
||||
|
||||
def record_call(
|
||||
self,
|
||||
api_call_count: int,
|
||||
*,
|
||||
system_prompt: str,
|
||||
memory_provider_system_prompt: str = "",
|
||||
memory_prefetch_raw: str = "",
|
||||
memory_context_block: str = "",
|
||||
api_user_message: str = "",
|
||||
api_messages: list[dict[str, Any]] | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> Path | None:
|
||||
if not self.enabled:
|
||||
return None
|
||||
|
||||
call_dir = self.session_dir / f"call_{api_call_count:03d}"
|
||||
call_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
self._write_text(call_dir / "system_prompt.txt", system_prompt or "")
|
||||
self._write_text(call_dir / "memory_provider_system_prompt.txt", memory_provider_system_prompt or "")
|
||||
self._write_text(call_dir / "memory_prefetch_raw.txt", memory_prefetch_raw or "")
|
||||
self._write_text(call_dir / "memory_context_block.txt", memory_context_block or "")
|
||||
self._write_text(call_dir / "api_user_message.txt", api_user_message or "")
|
||||
self._write_json(call_dir / "api_messages.json", api_messages or [])
|
||||
self._write_json(
|
||||
call_dir / "metadata.json",
|
||||
{
|
||||
"session_id": self.session_id,
|
||||
"api_call_count": api_call_count,
|
||||
**(metadata or {}),
|
||||
},
|
||||
)
|
||||
return call_dir
|
||||
|
||||
@staticmethod
|
||||
def _write_text(path: Path, content: str) -> None:
|
||||
path.write_text(content, encoding="utf-8")
|
||||
|
||||
@staticmethod
|
||||
def _write_json(path: Path, payload: Any) -> None:
|
||||
path.write_text(json.dumps(payload, indent=2, ensure_ascii=False), encoding="utf-8")
|
||||
16
cli.py
16
cli.py
@@ -589,6 +589,7 @@ from tools.terminal_tool import set_sudo_password_callback, set_approval_callbac
|
||||
from tools.skills_tool import set_secret_capture_callback
|
||||
from hermes_cli.callbacks import prompt_for_secret
|
||||
from tools.browser_tool import _emergency_cleanup_all_sessions as _cleanup_all_browsers
|
||||
from utils import repair_and_load_json
|
||||
|
||||
# Guard to prevent cleanup from running multiple times on exit
|
||||
_cleanup_done = False
|
||||
@@ -3569,7 +3570,11 @@ class HermesCLI:
|
||||
result_json = _asyncio.run(
|
||||
vision_analyze_tool(image_url=str(img_path), user_prompt=analysis_prompt)
|
||||
)
|
||||
result = _json.loads(result_json)
|
||||
result = repair_and_load_json(
|
||||
result_json,
|
||||
default={},
|
||||
context="cli_image_analysis",
|
||||
) if isinstance(result_json, str) else {}
|
||||
if result.get("success"):
|
||||
description = result.get("analysis", "")
|
||||
enriched_parts.append(
|
||||
@@ -4960,7 +4965,14 @@ class HermesCLI:
|
||||
from tools.cronjob_tools import cronjob as cronjob_tool
|
||||
|
||||
def _cron_api(**kwargs):
|
||||
return json.loads(cronjob_tool(**kwargs))
|
||||
result = repair_and_load_json(
|
||||
cronjob_tool(**kwargs),
|
||||
default=None,
|
||||
context="cli_cron_command",
|
||||
)
|
||||
if isinstance(result, dict):
|
||||
return result
|
||||
return {"success": False, "error": "Invalid JSON from cronjob tool"}
|
||||
|
||||
def _normalize_skills(values):
|
||||
normalized = []
|
||||
|
||||
@@ -1,132 +0,0 @@
|
||||
# Hindsight local eval homes for live Hermes runtime testing
|
||||
|
||||
Issue: #1010
|
||||
Parent: #985
|
||||
|
||||
This document defines a reproducible, profile-scoped evaluation layout for baseline / MemPalace / Hindsight comparisons without requiring Hindsight Cloud.
|
||||
|
||||
## Eval home layout
|
||||
|
||||
Use three separate `HERMES_HOME` directories so each run has isolated config, memory, sessions, and artifacts.
|
||||
|
||||
```text
|
||||
~/.hermes/profiles/atlas-baseline/
|
||||
config.yaml
|
||||
.env
|
||||
MEMORY.md
|
||||
USER.md
|
||||
reports/context_snapshots/
|
||||
|
||||
~/.hermes/profiles/atlas-mempalace/
|
||||
config.yaml
|
||||
.env
|
||||
MEMORY.md
|
||||
USER.md
|
||||
reports/context_snapshots/
|
||||
plugins/ # if a local MemPalace plugin is installed for this eval lane
|
||||
|
||||
~/.hermes/profiles/atlas-hindsight/
|
||||
config.yaml
|
||||
.env
|
||||
MEMORY.md
|
||||
USER.md
|
||||
hindsight/config.json
|
||||
reports/context_snapshots/
|
||||
```
|
||||
|
||||
## Hindsight local config
|
||||
|
||||
The Hindsight provider already loads config from `$HERMES_HOME/hindsight/config.json` first. For the local eval lane, prefer `local_embedded` so Hermes can bring up a local Hindsight daemon without cloud signup.
|
||||
|
||||
Example `~/.hermes/profiles/atlas-hindsight/hindsight/config.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"mode": "local_embedded",
|
||||
"memory_mode": "context",
|
||||
"recall_prefetch_method": "recall",
|
||||
"llm_provider": "ollama",
|
||||
"llm_model": "gemma3:12b",
|
||||
"api_url": "http://localhost:8888"
|
||||
}
|
||||
```
|
||||
|
||||
Notes:
|
||||
- `local_embedded` avoids any Hindsight Cloud dependency.
|
||||
- If `profile` is omitted, Hermes now derives a stable local Hindsight profile name from the active profile identity / `HERMES_HOME` instead of collapsing all local runs into the shared legacy `hermes` profile.
|
||||
- `local_external` remains valid if you already run a local Hindsight server yourself.
|
||||
|
||||
## Runtime switching procedure
|
||||
|
||||
Switch by exporting `HERMES_HOME` before launching Hermes.
|
||||
|
||||
### 1. Baseline
|
||||
|
||||
```bash
|
||||
export HERMES_HOME="$HOME/.hermes/profiles/atlas-baseline"
|
||||
unset HERMES_CONTEXT_SNAPSHOTS
|
||||
hermes chat
|
||||
```
|
||||
|
||||
### 2. MemPalace lane
|
||||
|
||||
```bash
|
||||
export HERMES_HOME="$HOME/.hermes/profiles/atlas-mempalace"
|
||||
export HERMES_CONTEXT_SNAPSHOTS=1
|
||||
hermes chat
|
||||
```
|
||||
|
||||
### 3. Hindsight lane
|
||||
|
||||
```bash
|
||||
export HERMES_HOME="$HOME/.hermes/profiles/atlas-hindsight"
|
||||
export HERMES_CONTEXT_SNAPSHOTS=1
|
||||
hermes chat
|
||||
```
|
||||
|
||||
## Raw artifact capture
|
||||
|
||||
When `HERMES_CONTEXT_SNAPSHOTS=1` is enabled, Hermes writes first-class prompt-composition artifacts under the active home by default.
|
||||
|
||||
Artifact tree:
|
||||
|
||||
```text
|
||||
$HERMES_HOME/reports/context_snapshots/<session-id>/call_001/
|
||||
system_prompt.txt
|
||||
memory_provider_system_prompt.txt
|
||||
memory_prefetch_raw.txt
|
||||
memory_context_block.txt
|
||||
api_user_message.txt
|
||||
api_messages.json
|
||||
metadata.json
|
||||
```
|
||||
|
||||
Minimum files a benchmark should inspect:
|
||||
- `system_prompt.txt`
|
||||
- `memory_prefetch_raw.txt`
|
||||
- `memory_context_block.txt`
|
||||
- `api_user_message.txt`
|
||||
- `api_messages.json`
|
||||
|
||||
These prove:
|
||||
- what the system prompt was
|
||||
- what the provider prefetched
|
||||
- what entered `<memory-context>`
|
||||
- what the final API user message looked like
|
||||
- what full payload reached the model
|
||||
|
||||
## Follow-on benchmark workflow
|
||||
|
||||
A benchmark issue can now consume this path without redoing integration work:
|
||||
1. pick one eval home (`atlas-baseline`, `atlas-mempalace`, `atlas-hindsight`)
|
||||
2. export the corresponding `HERMES_HOME`
|
||||
3. run Hermes on the same prompt set
|
||||
4. compare the snapshot artifacts in `reports/context_snapshots/`
|
||||
5. score recall quality and answer quality separately
|
||||
|
||||
## Why this is sovereign
|
||||
|
||||
- no hosted Hindsight Cloud dependency is required
|
||||
- the Hindsight config is profile-scoped under `hindsight/config.json`
|
||||
- the runtime artifacts stay under the active `HERMES_HOME`
|
||||
- switching between baseline / MemPalace / Hindsight is just a `HERMES_HOME` swap
|
||||
@@ -178,25 +178,6 @@ def _load_config() -> dict:
|
||||
}
|
||||
|
||||
|
||||
def _derive_local_profile_name(agent_identity: str = "", hermes_home: str = "") -> str:
|
||||
"""Return a stable profile name for local embedded Hindsight storage.
|
||||
|
||||
Prefer the active Hermes profile identity when available, otherwise fall back
|
||||
to the basename of the active HERMES_HOME path. This prevents all local
|
||||
Hindsight eval homes from sharing the legacy default profile name "hermes".
|
||||
"""
|
||||
from pathlib import Path
|
||||
import re
|
||||
|
||||
raw = (agent_identity or "").strip()
|
||||
if not raw and hermes_home:
|
||||
raw = Path(hermes_home).name.strip()
|
||||
if not raw:
|
||||
raw = "hermes"
|
||||
safe = re.sub(r"[^A-Za-z0-9_.-]+", "-", raw).strip(".-_")
|
||||
return safe or "hermes"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# MemoryProvider implementation
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -487,8 +468,6 @@ class HindsightMemoryProvider(MemoryProvider):
|
||||
|
||||
def initialize(self, session_id: str, **kwargs) -> None:
|
||||
self._session_id = session_id
|
||||
hermes_home = str(kwargs.get("hermes_home") or "")
|
||||
agent_identity = str(kwargs.get("agent_identity") or "")
|
||||
|
||||
# Check client version and auto-upgrade if needed
|
||||
try:
|
||||
@@ -521,11 +500,6 @@ class HindsightMemoryProvider(MemoryProvider):
|
||||
# "local" is a legacy alias for "local_embedded"
|
||||
if self._mode == "local":
|
||||
self._mode = "local_embedded"
|
||||
if self._mode == "local_embedded" and not self._config.get("profile"):
|
||||
self._config["profile"] = _derive_local_profile_name(
|
||||
agent_identity=agent_identity,
|
||||
hermes_home=hermes_home,
|
||||
)
|
||||
self._api_key = self._config.get("apiKey") or self._config.get("api_key") or os.environ.get("HINDSIGHT_API_KEY", "")
|
||||
default_url = _DEFAULT_LOCAL_URL if self._mode in ("local_embedded", "local_external") else _DEFAULT_API_URL
|
||||
self._api_url = self._config.get("api_url") or os.environ.get("HINDSIGHT_API_URL", default_url)
|
||||
|
||||
78
run_agent.py
78
run_agent.py
@@ -604,8 +604,6 @@ class AIAgent:
|
||||
checkpoint_max_snapshots: int = 50,
|
||||
pass_session_id: bool = False,
|
||||
persist_session: bool = True,
|
||||
context_snapshots_enabled: bool | None = None,
|
||||
context_snapshots_dir: str | None = None,
|
||||
):
|
||||
"""
|
||||
Initialize the AI Agent.
|
||||
@@ -1131,43 +1129,6 @@ class AIAgent:
|
||||
except Exception:
|
||||
_agent_cfg = {}
|
||||
|
||||
def _is_enabled(value):
|
||||
if isinstance(value, bool):
|
||||
return value
|
||||
return str(value).strip().lower() in {"1", "true", "yes", "on"}
|
||||
|
||||
_debug_cfg = _agent_cfg.get("debug", {}) if isinstance(_agent_cfg, dict) else {}
|
||||
if not isinstance(_debug_cfg, dict):
|
||||
_debug_cfg = {}
|
||||
_snapshot_cfg = _debug_cfg.get("context_snapshots", {})
|
||||
if not isinstance(_snapshot_cfg, dict):
|
||||
_snapshot_cfg = {}
|
||||
_snapshots_env = os.getenv("HERMES_CONTEXT_SNAPSHOTS")
|
||||
_snapshots_dir_env = os.getenv("HERMES_CONTEXT_SNAPSHOTS_DIR")
|
||||
if context_snapshots_enabled is None:
|
||||
if _snapshots_env is not None:
|
||||
self._context_snapshots_enabled = _is_enabled(_snapshots_env)
|
||||
else:
|
||||
self._context_snapshots_enabled = _is_enabled(_snapshot_cfg.get("enabled", False))
|
||||
else:
|
||||
self._context_snapshots_enabled = bool(context_snapshots_enabled)
|
||||
self._context_snapshots_dir = (
|
||||
context_snapshots_dir
|
||||
or _snapshots_dir_env
|
||||
or _snapshot_cfg.get("dir")
|
||||
or None
|
||||
)
|
||||
try:
|
||||
from agent.context_snapshots import ContextSnapshotRecorder
|
||||
self._context_snapshot_recorder = ContextSnapshotRecorder(
|
||||
session_id=self.session_id,
|
||||
enabled=self._context_snapshots_enabled,
|
||||
base_dir=self._context_snapshots_dir,
|
||||
)
|
||||
except Exception as _snapshot_err:
|
||||
logger.debug("Context snapshot recorder init failed: %s", _snapshot_err)
|
||||
self._context_snapshot_recorder = None
|
||||
|
||||
# Persistent memory (MEMORY.md + USER.md) -- loaded from disk
|
||||
self._memory_store = None
|
||||
self._memory_enabled = False
|
||||
@@ -8183,17 +8144,12 @@ class AIAgent:
|
||||
# Use original_user_message (clean input) — user_message may contain
|
||||
# injected skill content that bloats / breaks provider queries.
|
||||
_ext_prefetch_cache = ""
|
||||
_memory_provider_prompt_cache = ""
|
||||
if self._memory_manager:
|
||||
try:
|
||||
_query = original_user_message if isinstance(original_user_message, str) else ""
|
||||
_ext_prefetch_cache = self._memory_manager.prefetch_all(_query) or ""
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
_memory_provider_prompt_cache = self._memory_manager.build_system_prompt() or ""
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
while (api_call_count < self.max_iterations and self.iteration_budget.remaining > 0) or self._budget_grace_call:
|
||||
# Reset per-turn checkpoint dedup so each iteration can take one snapshot
|
||||
@@ -8261,8 +8217,6 @@ class AIAgent:
|
||||
# However, providers like Moonshot AI require a separate 'reasoning_content' field
|
||||
# on assistant messages with tool_calls. We handle both cases here.
|
||||
api_messages = []
|
||||
_current_api_user_message = ""
|
||||
_current_memory_context_block = ""
|
||||
for idx, msg in enumerate(messages):
|
||||
api_msg = msg.copy()
|
||||
|
||||
@@ -8277,15 +8231,12 @@ class AIAgent:
|
||||
_fenced = build_memory_context_block(_ext_prefetch_cache)
|
||||
if _fenced:
|
||||
_injections.append(_fenced)
|
||||
_current_memory_context_block = _fenced
|
||||
if _plugin_user_context:
|
||||
_injections.append(_plugin_user_context)
|
||||
if _injections:
|
||||
_base = api_msg.get("content", "")
|
||||
if isinstance(_base, str):
|
||||
api_msg["content"] = _base + "\n\n" + "\n\n".join(_injections)
|
||||
if isinstance(api_msg.get("content"), str):
|
||||
_current_api_user_message = api_msg["content"]
|
||||
|
||||
# For ALL assistant messages, pass reasoning back to the API
|
||||
# This ensures multi-turn reasoning context is preserved
|
||||
@@ -8320,13 +8271,7 @@ class AIAgent:
|
||||
from agent.privacy_filter import PrivacyFilter
|
||||
pf = PrivacyFilter()
|
||||
# Sanitize messages before they reach the provider
|
||||
_pf_result = pf.sanitize_messages(api_messages)
|
||||
if isinstance(_pf_result, tuple):
|
||||
api_messages, _pf_report = _pf_result
|
||||
if getattr(pf, "last_report", None) is None:
|
||||
pf.last_report = _pf_report
|
||||
else:
|
||||
api_messages = _pf_result
|
||||
api_messages = pf.sanitize_messages(api_messages)
|
||||
if pf.last_report and pf.last_report.had_redactions:
|
||||
logger.info(f"Privacy Filter: Redacted sensitive data from turn payload. Details: {pf.last_report.summary()}")
|
||||
except Exception as e:
|
||||
@@ -8397,27 +8342,6 @@ class AIAgent:
|
||||
new_tcs.append(tc)
|
||||
am["tool_calls"] = new_tcs
|
||||
|
||||
if self._context_snapshot_recorder:
|
||||
try:
|
||||
self._context_snapshot_recorder.record_call(
|
||||
api_call_count,
|
||||
system_prompt=effective_system,
|
||||
memory_provider_system_prompt=_memory_provider_prompt_cache,
|
||||
memory_prefetch_raw=_ext_prefetch_cache,
|
||||
memory_context_block=_current_memory_context_block,
|
||||
api_user_message=_current_api_user_message,
|
||||
api_messages=api_messages,
|
||||
metadata={
|
||||
"model": self.model,
|
||||
"provider": self.provider,
|
||||
"platform": self.platform or "",
|
||||
"api_mode": self.api_mode,
|
||||
"memory_providers": [p.name for p in getattr(self._memory_manager, "providers", [])],
|
||||
},
|
||||
)
|
||||
except Exception as _snapshot_err:
|
||||
logger.debug("Context snapshot capture failed: %s", _snapshot_err)
|
||||
|
||||
# Calculate approximate request size for logging
|
||||
total_chars = sum(len(str(msg)) for msg in api_messages)
|
||||
approx_tokens = estimate_messages_tokens_rough(api_messages)
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
from pathlib import Path
|
||||
|
||||
from agent.context_snapshots import ContextSnapshotRecorder
|
||||
|
||||
|
||||
def test_disabled_recorder_writes_nothing(tmp_path):
|
||||
recorder = ContextSnapshotRecorder(session_id="session-1", enabled=False, base_dir=tmp_path)
|
||||
|
||||
out = recorder.record_call(
|
||||
1,
|
||||
system_prompt="system",
|
||||
api_messages=[{"role": "user", "content": "hello"}],
|
||||
)
|
||||
|
||||
assert out is None
|
||||
assert not (tmp_path / "session-1").exists()
|
||||
|
||||
|
||||
def test_enabled_recorder_writes_expected_artifacts(tmp_path):
|
||||
recorder = ContextSnapshotRecorder(session_id="session-1", enabled=True, base_dir=tmp_path)
|
||||
|
||||
out = recorder.record_call(
|
||||
1,
|
||||
system_prompt="system prompt",
|
||||
memory_provider_system_prompt="# Hindsight Memory\nActive.",
|
||||
memory_prefetch_raw="- remembered fact",
|
||||
memory_context_block="<memory-context>\nremembered\n</memory-context>",
|
||||
api_user_message="What do I prefer?\n\n<memory-context>\nremembered\n</memory-context>",
|
||||
api_messages=[
|
||||
{"role": "system", "content": "system prompt"},
|
||||
{"role": "user", "content": "What do I prefer?"},
|
||||
],
|
||||
metadata={"provider": "openai", "memory_providers": ["builtin", "hindsight"]},
|
||||
)
|
||||
|
||||
assert out == tmp_path / "session-1" / "call_001"
|
||||
assert (out / "system_prompt.txt").read_text(encoding="utf-8") == "system prompt"
|
||||
assert (out / "memory_provider_system_prompt.txt").read_text(encoding="utf-8").startswith("# Hindsight Memory")
|
||||
assert (out / "memory_prefetch_raw.txt").read_text(encoding="utf-8") == "- remembered fact"
|
||||
assert "<memory-context>" in (out / "memory_context_block.txt").read_text(encoding="utf-8")
|
||||
assert "What do I prefer?" in (out / "api_user_message.txt").read_text(encoding="utf-8")
|
||||
assert (out / "api_messages.json").read_text(encoding="utf-8").startswith("[")
|
||||
assert '"hindsight"' in (out / "metadata.json").read_text(encoding="utf-8")
|
||||
62
tests/cli/test_cli_json_repair.py
Normal file
62
tests/cli/test_cli_json_repair.py
Normal file
@@ -0,0 +1,62 @@
|
||||
import sys
|
||||
import types
|
||||
from unittest.mock import patch
|
||||
|
||||
|
||||
def _stub_auxiliary_client():
|
||||
stub = types.ModuleType("agent.auxiliary_client")
|
||||
stub.call_llm = lambda *args, **kwargs: None
|
||||
stub.resolve_provider_client = lambda *args, **kwargs: (None, None)
|
||||
stub.get_text_auxiliary_client = lambda *args, **kwargs: (None, None)
|
||||
stub.async_call_llm = lambda *args, **kwargs: None
|
||||
stub.extract_content_or_reasoning = lambda *args, **kwargs: ""
|
||||
stub._OR_HEADERS = {}
|
||||
stub._get_task_timeout = lambda *args, **kwargs: 30
|
||||
sys.modules["agent.auxiliary_client"] = stub
|
||||
|
||||
|
||||
def _stub_vision_tools(vision_analyze_tool):
|
||||
stub = types.ModuleType("tools.vision_tools")
|
||||
stub.vision_analyze_tool = vision_analyze_tool
|
||||
sys.modules["tools.vision_tools"] = stub
|
||||
|
||||
|
||||
def test_preprocess_images_with_vision_repairs_malformed_json(tmp_path):
|
||||
_stub_auxiliary_client()
|
||||
from cli import HermesCLI
|
||||
|
||||
cli_obj = HermesCLI.__new__(HermesCLI)
|
||||
image_path = tmp_path / "test.png"
|
||||
image_path.write_bytes(b"fake-image-bytes")
|
||||
|
||||
async def fake_vision(**kwargs):
|
||||
return "{'success': true, 'analysis': 'Recovered image description',}"
|
||||
|
||||
_stub_vision_tools(fake_vision)
|
||||
result = HermesCLI._preprocess_images_with_vision(
|
||||
cli_obj,
|
||||
"Describe this",
|
||||
[image_path],
|
||||
announce=False,
|
||||
)
|
||||
|
||||
assert "Recovered image description" in result
|
||||
assert "Describe this" in result
|
||||
assert str(image_path) in result
|
||||
|
||||
|
||||
def test_handle_cron_command_repairs_malformed_json(capsys):
|
||||
_stub_auxiliary_client()
|
||||
from cli import HermesCLI
|
||||
|
||||
cli_obj = HermesCLI.__new__(HermesCLI)
|
||||
malformed_result = """{'success': true, 'jobs': [{'job_id': 'job-1234567890ab', 'name': 'Nightly Check', 'state': 'scheduled', 'schedule': 'every 1h', 'repeat': 'forever', 'prompt_preview': 'Check server status', 'skills': ['blogwatcher',], 'next_run_at': '2026-04-22T01:00:00Z',},],}"""
|
||||
|
||||
with patch("tools.cronjob_tools.cronjob", return_value=malformed_result):
|
||||
HermesCLI._handle_cron_command(cli_obj, "/cron list")
|
||||
|
||||
out = capsys.readouterr().out
|
||||
assert "Scheduled Jobs:" in out
|
||||
assert "job-1234567890ab" in out
|
||||
assert "Nightly Check" in out
|
||||
assert "blogwatcher" in out
|
||||
@@ -596,26 +596,3 @@ class TestAvailability:
|
||||
monkeypatch.setenv("HINDSIGHT_MODE", "local")
|
||||
p = HindsightMemoryProvider()
|
||||
assert p.is_available()
|
||||
|
||||
def test_local_embedded_profile_defaults_to_agent_identity(self, tmp_path, monkeypatch):
|
||||
config_path = tmp_path / "hindsight" / "config.json"
|
||||
config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
config_path.write_text(json.dumps({
|
||||
"mode": "local_embedded",
|
||||
"llm_provider": "ollama",
|
||||
"llm_model": "gemma3:12b",
|
||||
}))
|
||||
monkeypatch.setattr(
|
||||
"plugins.memory.hindsight.get_hermes_home",
|
||||
lambda: tmp_path,
|
||||
)
|
||||
|
||||
p = HindsightMemoryProvider()
|
||||
p.initialize(
|
||||
session_id="test-session",
|
||||
hermes_home=str(tmp_path / "profiles" / "atlas-hindsight"),
|
||||
platform="cli",
|
||||
agent_identity="atlas-hindsight",
|
||||
)
|
||||
|
||||
assert p._config["profile"] == "atlas-hindsight"
|
||||
|
||||
@@ -1,94 +0,0 @@
|
||||
from pathlib import Path
|
||||
from types import SimpleNamespace
|
||||
from unittest.mock import MagicMock, patch
|
||||
import importlib
|
||||
import sys
|
||||
import types
|
||||
|
||||
|
||||
|
||||
def _make_tool_defs(*names: str) -> list:
|
||||
return [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": n,
|
||||
"description": f"{n} tool",
|
||||
"parameters": {"type": "object", "properties": {}},
|
||||
},
|
||||
}
|
||||
for n in names
|
||||
]
|
||||
|
||||
|
||||
|
||||
def _mock_response(content="Done", finish_reason="stop"):
|
||||
msg = SimpleNamespace(content=content, tool_calls=None)
|
||||
choice = SimpleNamespace(message=msg, finish_reason=finish_reason)
|
||||
return SimpleNamespace(choices=[choice], usage=SimpleNamespace(prompt_tokens=1, completion_tokens=1, total_tokens=2))
|
||||
|
||||
|
||||
|
||||
def _load_ai_agent():
|
||||
sys.modules.setdefault("agent.auxiliary_client", types.SimpleNamespace(call_llm=lambda *a, **k: ""))
|
||||
run_agent = importlib.import_module("run_agent")
|
||||
return run_agent.AIAgent
|
||||
|
||||
|
||||
|
||||
def test_run_conversation_writes_context_snapshot_artifacts(tmp_path):
|
||||
AIAgent = _load_ai_agent()
|
||||
|
||||
class _FakePrivacyFilter:
|
||||
def __init__(self):
|
||||
self.last_report = None
|
||||
|
||||
def sanitize_messages(self, messages):
|
||||
return list(messages)
|
||||
|
||||
with (
|
||||
patch("run_agent.get_tool_definitions", return_value=_make_tool_defs("web_search")),
|
||||
patch("run_agent.check_toolset_requirements", return_value={}),
|
||||
patch("run_agent.OpenAI"),
|
||||
patch("hermes_cli.plugins.invoke_hook", return_value=[]),
|
||||
patch.dict(sys.modules, {"agent.privacy_filter": types.SimpleNamespace(PrivacyFilter=_FakePrivacyFilter)}),
|
||||
):
|
||||
agent = AIAgent(
|
||||
api_key="test-key-1234567890",
|
||||
base_url="https://example.com/v1",
|
||||
quiet_mode=True,
|
||||
skip_context_files=True,
|
||||
skip_memory=True,
|
||||
context_snapshots_enabled=True,
|
||||
context_snapshots_dir=str(tmp_path),
|
||||
)
|
||||
|
||||
agent.client = MagicMock()
|
||||
agent.client.chat.completions.create.return_value = _mock_response(content="Done")
|
||||
agent._build_system_prompt = MagicMock(return_value="Core system prompt")
|
||||
agent._memory_manager = MagicMock()
|
||||
agent._memory_manager.prefetch_all.return_value = "- remembered preference"
|
||||
agent._memory_manager.build_system_prompt.return_value = "# Hindsight Memory\nActive."
|
||||
agent._memory_manager.providers = [
|
||||
SimpleNamespace(name="builtin"),
|
||||
SimpleNamespace(name="hindsight"),
|
||||
]
|
||||
|
||||
result = agent.run_conversation("What do I prefer?")
|
||||
|
||||
assert result["final_response"] == "Done"
|
||||
|
||||
call_dir = tmp_path / agent.session_id / "call_001"
|
||||
assert call_dir.exists()
|
||||
assert (call_dir / "system_prompt.txt").read_text(encoding="utf-8") == "Core system prompt"
|
||||
assert (call_dir / "memory_provider_system_prompt.txt").read_text(encoding="utf-8").startswith("# Hindsight Memory")
|
||||
assert (call_dir / "memory_prefetch_raw.txt").read_text(encoding="utf-8") == "- remembered preference"
|
||||
assert "<memory-context>" in (call_dir / "memory_context_block.txt").read_text(encoding="utf-8")
|
||||
api_user_message = (call_dir / "api_user_message.txt").read_text(encoding="utf-8")
|
||||
assert "What do I prefer?" in api_user_message
|
||||
assert "remembered preference" in api_user_message
|
||||
api_messages = (call_dir / "api_messages.json").read_text(encoding="utf-8")
|
||||
assert '"role": "system"' in api_messages
|
||||
assert '"role": "user"' in api_messages
|
||||
metadata = (call_dir / "metadata.json").read_text(encoding="utf-8")
|
||||
assert '"hindsight"' in metadata
|
||||
@@ -1,22 +0,0 @@
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
ROOT = Path(__file__).resolve().parents[1]
|
||||
DOC = ROOT / "docs" / "hindsight-local-eval.md"
|
||||
|
||||
|
||||
def test_hindsight_local_eval_doc_exists_and_covers_switching():
|
||||
assert DOC.exists(), "missing Hindsight local eval doc"
|
||||
text = DOC.read_text(encoding="utf-8")
|
||||
for snippet in (
|
||||
"atlas-baseline",
|
||||
"atlas-mempalace",
|
||||
"atlas-hindsight",
|
||||
"HERMES_HOME",
|
||||
"HERMES_CONTEXT_SNAPSHOTS",
|
||||
"memory_prefetch_raw.txt",
|
||||
"api_user_message.txt",
|
||||
"local_embedded",
|
||||
"hindsight/config.json",
|
||||
):
|
||||
assert snippet in text
|
||||
108
tests/tools/test_browser_json_repair.py
Normal file
108
tests/tools/test_browser_json_repair.py
Normal file
@@ -0,0 +1,108 @@
|
||||
import io
|
||||
import json
|
||||
import sys
|
||||
import types
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
|
||||
def _stub_auxiliary_client():
|
||||
stub = types.ModuleType("agent.auxiliary_client")
|
||||
stub.call_llm = lambda *args, **kwargs: None
|
||||
stub.resolve_provider_client = lambda *args, **kwargs: (None, None)
|
||||
stub.get_text_auxiliary_client = lambda *args, **kwargs: (None, None)
|
||||
stub.async_call_llm = lambda *args, **kwargs: None
|
||||
stub.extract_content_or_reasoning = lambda *args, **kwargs: ""
|
||||
stub._OR_HEADERS = {}
|
||||
stub._get_task_timeout = lambda *args, **kwargs: 30
|
||||
sys.modules["agent.auxiliary_client"] = stub
|
||||
|
||||
|
||||
def test_run_browser_command_repairs_malformed_stdout_envelope(tmp_path):
|
||||
_stub_auxiliary_client()
|
||||
from tools.browser_tool import _run_browser_command
|
||||
|
||||
mock_proc = MagicMock()
|
||||
mock_proc.returncode = 0
|
||||
mock_proc.wait.return_value = 0
|
||||
fake_session = {
|
||||
"session_name": "test-session",
|
||||
"session_id": "test-id",
|
||||
"cdp_url": None,
|
||||
}
|
||||
malformed_stdout = "{'success': true, 'data': {'url': 'https://example.com',},}"
|
||||
|
||||
def fake_open(path, mode="r", *args, **kwargs):
|
||||
path = str(path)
|
||||
if path.endswith("_stdout_navigate"):
|
||||
return io.StringIO(malformed_stdout)
|
||||
if path.endswith("_stderr_navigate"):
|
||||
return io.StringIO("")
|
||||
raise FileNotFoundError(path)
|
||||
|
||||
with (
|
||||
patch("tools.browser_tool._find_agent_browser", return_value="/usr/bin/agent-browser"),
|
||||
patch("tools.browser_tool._get_session_info", return_value=fake_session),
|
||||
patch("tools.browser_tool._socket_safe_tmpdir", return_value=str(tmp_path)),
|
||||
patch("tools.browser_tool._merge_browser_path", side_effect=lambda p: p),
|
||||
patch("tools.interrupt.is_interrupted", return_value=False),
|
||||
patch("subprocess.Popen", return_value=mock_proc),
|
||||
patch("os.open", return_value=99),
|
||||
patch("os.close"),
|
||||
patch("os.unlink"),
|
||||
patch("builtins.open", side_effect=fake_open),
|
||||
):
|
||||
result = _run_browser_command("task-1", "navigate", ["https://example.com"])
|
||||
|
||||
assert result["success"] is True
|
||||
assert result["data"]["url"] == "https://example.com"
|
||||
|
||||
|
||||
def test_agent_browser_eval_repairs_malformed_json_result():
|
||||
_stub_auxiliary_client()
|
||||
from tools.browser_tool import _browser_eval
|
||||
|
||||
with patch(
|
||||
"tools.browser_tool._run_browser_command",
|
||||
return_value={"success": True, "data": {"result": "{'items': ['a', 'b',],}"}},
|
||||
):
|
||||
result = json.loads(_browser_eval("document.body.innerText", task_id="test"))
|
||||
|
||||
assert result["success"] is True
|
||||
assert result["result"] == {"items": ["a", "b"]}
|
||||
assert result["result_type"] == "dict"
|
||||
|
||||
|
||||
def test_camofox_eval_repairs_malformed_json_result():
|
||||
_stub_auxiliary_client()
|
||||
from tools.browser_tool import _camofox_eval
|
||||
|
||||
with (
|
||||
patch("tools.browser_camofox._ensure_tab", return_value={"tab_id": "tab-1", "user_id": "user-1"}),
|
||||
patch("tools.browser_camofox._post", return_value={"result": "{'count': 3,}"}),
|
||||
):
|
||||
result = json.loads(_camofox_eval("2+1", task_id="test"))
|
||||
|
||||
assert result["success"] is True
|
||||
assert result["result"] == {"count": 3}
|
||||
assert result["result_type"] == "dict"
|
||||
|
||||
|
||||
def test_browser_get_images_repairs_malformed_json_result():
|
||||
_stub_auxiliary_client()
|
||||
from tools.browser_tool import browser_get_images
|
||||
|
||||
with patch(
|
||||
"tools.browser_tool._run_browser_command",
|
||||
return_value={
|
||||
"success": True,
|
||||
"data": {
|
||||
"result": "[{\"src\": \"https://example.com/cat.png\", \"alt\": \"cat\",}]"
|
||||
},
|
||||
},
|
||||
):
|
||||
result = json.loads(browser_get_images(task_id="test"))
|
||||
|
||||
assert result["success"] is True
|
||||
assert result["count"] == 1
|
||||
assert result["images"] == [{"src": "https://example.com/cat.png", "alt": "cat"}]
|
||||
assert "warning" not in result
|
||||
@@ -67,6 +67,7 @@ from typing import Dict, Any, Optional, List
|
||||
from pathlib import Path
|
||||
from agent.auxiliary_client import call_llm
|
||||
from hermes_constants import get_hermes_home
|
||||
from utils import repair_and_load_json
|
||||
|
||||
try:
|
||||
from tools.website_policy import check_website_access
|
||||
@@ -1171,8 +1172,12 @@ def _run_browser_command(
|
||||
return {"success": False, "error": f"Browser command '{command}' returned no output"}
|
||||
|
||||
if stdout_text:
|
||||
try:
|
||||
parsed = json.loads(stdout_text)
|
||||
parsed = repair_and_load_json(
|
||||
stdout_text,
|
||||
default=None,
|
||||
context=f"browser_{command}_stdout",
|
||||
)
|
||||
if isinstance(parsed, dict):
|
||||
# Warn if snapshot came back empty (common sign of daemon/CDP issues)
|
||||
if command == "snapshot" and parsed.get("success"):
|
||||
snap_data = parsed.get("data", {})
|
||||
@@ -1181,35 +1186,35 @@ def _run_browser_command(
|
||||
"Possible stale daemon or CDP connection issue. "
|
||||
"returncode=%s", returncode)
|
||||
return parsed
|
||||
except json.JSONDecodeError:
|
||||
raw = stdout_text[:2000]
|
||||
logger.warning("browser '%s' returned non-JSON output (rc=%s): %s",
|
||||
command, returncode, raw[:500])
|
||||
|
||||
if command == "screenshot":
|
||||
stderr_text = (stderr or "").strip()
|
||||
combined_text = "\n".join(
|
||||
part for part in [stdout_text, stderr_text] if part
|
||||
raw = stdout_text[:2000]
|
||||
logger.warning("browser '%s' returned non-JSON output (rc=%s): %s",
|
||||
command, returncode, raw[:500])
|
||||
|
||||
if command == "screenshot":
|
||||
stderr_text = (stderr or "").strip()
|
||||
combined_text = "\n".join(
|
||||
part for part in [stdout_text, stderr_text] if part
|
||||
)
|
||||
recovered_path = _extract_screenshot_path_from_text(combined_text)
|
||||
|
||||
if recovered_path and Path(recovered_path).exists():
|
||||
logger.info(
|
||||
"browser 'screenshot' recovered file from non-JSON output: %s",
|
||||
recovered_path,
|
||||
)
|
||||
recovered_path = _extract_screenshot_path_from_text(combined_text)
|
||||
return {
|
||||
"success": True,
|
||||
"data": {
|
||||
"path": recovered_path,
|
||||
"raw": raw,
|
||||
},
|
||||
}
|
||||
|
||||
if recovered_path and Path(recovered_path).exists():
|
||||
logger.info(
|
||||
"browser 'screenshot' recovered file from non-JSON output: %s",
|
||||
recovered_path,
|
||||
)
|
||||
return {
|
||||
"success": True,
|
||||
"data": {
|
||||
"path": recovered_path,
|
||||
"raw": raw,
|
||||
},
|
||||
}
|
||||
|
||||
return {
|
||||
"success": False,
|
||||
"error": f"Non-JSON output from agent-browser for '{command}': {raw}"
|
||||
}
|
||||
return {
|
||||
"success": False,
|
||||
"error": f"Non-JSON output from agent-browser for '{command}': {raw}"
|
||||
}
|
||||
|
||||
# Check for errors
|
||||
if returncode != 0:
|
||||
@@ -1777,10 +1782,11 @@ def _browser_eval(expression: str, task_id: Optional[str] = None) -> str:
|
||||
# is valid JSON, parse it so the model gets structured data.
|
||||
parsed = raw_result
|
||||
if isinstance(raw_result, str):
|
||||
try:
|
||||
parsed = json.loads(raw_result)
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
pass # keep as string
|
||||
parsed = repair_and_load_json(
|
||||
raw_result,
|
||||
default=raw_result,
|
||||
context="browser_eval_result",
|
||||
)
|
||||
|
||||
return json.dumps({
|
||||
"success": True,
|
||||
@@ -1801,10 +1807,11 @@ def _camofox_eval(expression: str, task_id: Optional[str] = None) -> str:
|
||||
raw_result = resp.get("result") if isinstance(resp, dict) else resp
|
||||
parsed = raw_result
|
||||
if isinstance(raw_result, str):
|
||||
try:
|
||||
parsed = json.loads(raw_result)
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
pass
|
||||
parsed = repair_and_load_json(
|
||||
raw_result,
|
||||
default=raw_result,
|
||||
context="camofox_eval_result",
|
||||
)
|
||||
|
||||
return json.dumps({
|
||||
"success": True,
|
||||
@@ -1904,26 +1911,29 @@ def browser_get_images(task_id: Optional[str] = None) -> str:
|
||||
if result.get("success"):
|
||||
data = result.get("data", {})
|
||||
raw_result = data.get("result", "[]")
|
||||
|
||||
try:
|
||||
# Parse the JSON string returned by JavaScript
|
||||
if isinstance(raw_result, str):
|
||||
images = json.loads(raw_result)
|
||||
else:
|
||||
images = raw_result
|
||||
|
||||
return json.dumps({
|
||||
"success": True,
|
||||
"images": images,
|
||||
"count": len(images)
|
||||
}, ensure_ascii=False)
|
||||
except json.JSONDecodeError:
|
||||
return json.dumps({
|
||||
"success": True,
|
||||
"images": [],
|
||||
"count": 0,
|
||||
"warning": "Could not parse image data"
|
||||
}, ensure_ascii=False)
|
||||
|
||||
warning = None
|
||||
if isinstance(raw_result, str):
|
||||
images = repair_and_load_json(
|
||||
raw_result,
|
||||
default=None,
|
||||
context="browser_get_images_result",
|
||||
)
|
||||
else:
|
||||
images = raw_result
|
||||
|
||||
if not isinstance(images, list):
|
||||
images = []
|
||||
warning = "Could not parse image data"
|
||||
|
||||
payload = {
|
||||
"success": True,
|
||||
"images": images,
|
||||
"count": len(images),
|
||||
}
|
||||
if warning:
|
||||
payload["warning"] = warning
|
||||
return json.dumps(payload, ensure_ascii=False)
|
||||
else:
|
||||
return json.dumps({
|
||||
"success": False,
|
||||
|
||||
Reference in New Issue
Block a user