2026-02-25 18:20:38 -08:00
|
|
|
import importlib
|
|
|
|
|
import sys
|
|
|
|
|
import types
|
|
|
|
|
from contextlib import nullcontext
|
|
|
|
|
from types import SimpleNamespace
|
|
|
|
|
|
feat(memory): pluggable memory provider interface with profile isolation, review fixes, and honcho CLI restoration (#4623)
* feat(memory): add pluggable memory provider interface with profile isolation
Introduces a pluggable MemoryProvider ABC so external memory backends can
integrate with Hermes without modifying core files. Each backend becomes a
plugin implementing a standard interface, orchestrated by MemoryManager.
Key architecture:
- agent/memory_provider.py — ABC with core + optional lifecycle hooks
- agent/memory_manager.py — single integration point in the agent loop
- agent/builtin_memory_provider.py — wraps existing MEMORY.md/USER.md
Profile isolation fixes applied to all 6 shipped plugins:
- Cognitive Memory: use get_hermes_home() instead of raw env var
- Hindsight Memory: check $HERMES_HOME/hindsight/config.json first,
fall back to legacy ~/.hindsight/ for backward compat
- Hermes Memory Store: replace hardcoded ~/.hermes paths with
get_hermes_home() for config loading and DB path defaults
- Mem0 Memory: use get_hermes_home() instead of raw env var
- RetainDB Memory: auto-derive profile-scoped project name from
hermes_home path (hermes-<profile>), explicit env var overrides
- OpenViking Memory: read-only, no local state, isolation via .env
MemoryManager.initialize_all() now injects hermes_home into kwargs so
every provider can resolve profile-scoped storage without importing
get_hermes_home() themselves.
Plugin system: adds register_memory_provider() to PluginContext and
get_plugin_memory_providers() accessor.
Based on PR #3825. 46 tests (37 unit + 5 E2E + 4 plugin registration).
* refactor(memory): drop cognitive plugin, rewrite OpenViking as full provider
Remove cognitive-memory plugin (#727) — core mechanics are broken:
decay runs 24x too fast (hourly not daily), prefetch uses row ID as
timestamp, search limited by importance not similarity.
Rewrite openviking-memory plugin from a read-only search wrapper into
a full bidirectional memory provider using the complete OpenViking
session lifecycle API:
- sync_turn: records user/assistant messages to OpenViking session
(threaded, non-blocking)
- on_session_end: commits session to trigger automatic memory extraction
into 6 categories (profile, preferences, entities, events, cases,
patterns)
- prefetch: background semantic search via find() endpoint
- on_memory_write: mirrors built-in memory writes to the session
- is_available: checks env var only, no network calls (ABC compliance)
Tools expanded from 3 to 5:
- viking_search: semantic search with mode/scope/limit
- viking_read: tiered content (abstract ~100tok / overview ~2k / full)
- viking_browse: filesystem-style navigation (list/tree/stat)
- viking_remember: explicit memory storage via session
- viking_add_resource: ingest URLs/docs into knowledge base
Uses direct HTTP via httpx (no openviking SDK dependency needed).
Response truncation on viking_read to prevent context flooding.
* fix(memory): harden Mem0 plugin — thread safety, non-blocking sync, circuit breaker
- Remove redundant mem0_context tool (identical to mem0_search with
rerank=true, top_k=5 — wastes a tool slot and confuses the model)
- Thread sync_turn so it's non-blocking — Mem0's server-side LLM
extraction can take 5-10s, was stalling the agent after every turn
- Add threading.Lock around _get_client() for thread-safe lazy init
(prefetch and sync threads could race on first client creation)
- Add circuit breaker: after 5 consecutive API failures, pause calls
for 120s instead of hammering a down server every turn. Auto-resets
after cooldown. Logs a warning when tripped.
- Track success/failure in prefetch, sync_turn, and all tool calls
- Wait for previous sync to finish before starting a new one (prevents
unbounded thread accumulation on rapid turns)
- Clean up shutdown to join both prefetch and sync threads
* fix(memory): enforce single external memory provider limit
MemoryManager now rejects a second non-builtin provider with a warning.
Built-in memory (MEMORY.md/USER.md) is always accepted. Only ONE
external plugin provider is allowed at a time. This prevents tool
schema bloat (some providers add 3-5 tools each) and conflicting
memory backends.
The warning message directs users to configure memory.provider in
config.yaml to select which provider to activate.
Updated all 47 tests to use builtin + one external pattern instead
of multiple externals. Added test_second_external_rejected to verify
the enforcement.
* feat(memory): add ByteRover memory provider plugin
Implements the ByteRover integration (from PR #3499 by hieuntg81) as a
MemoryProvider plugin instead of direct run_agent.py modifications.
ByteRover provides persistent memory via the brv CLI — a hierarchical
knowledge tree with tiered retrieval (fuzzy text then LLM-driven search).
Local-first with optional cloud sync.
Plugin capabilities:
- prefetch: background brv query for relevant context
- sync_turn: curate conversation turns (threaded, non-blocking)
- on_memory_write: mirror built-in memory writes to brv
- on_pre_compress: extract insights before context compression
Tools (3):
- brv_query: search the knowledge tree
- brv_curate: store facts/decisions/patterns
- brv_status: check CLI version and context tree state
Profile isolation: working directory at $HERMES_HOME/byterover/ (scoped
per profile). Binary resolution cached with thread-safe double-checked
locking. All write operations threaded to avoid blocking the agent
(curate can take 120s with LLM processing).
* fix(memory): thread remaining sync_turns, fix holographic, add config key
Plugin fixes:
- Hindsight: thread sync_turn (was blocking up to 30s via _run_in_thread)
- RetainDB: thread sync_turn (was blocking on HTTP POST)
- Both: shutdown now joins sync threads alongside prefetch threads
Holographic retrieval fixes:
- reason(): removed dead intersection_key computation (bundled but never
used in scoring). Now reuses pre-computed entity_residuals directly,
moved role_content encoding outside the inner loop.
- contradict(): added _MAX_CONTRADICT_FACTS=500 scaling guard. Above
500 facts, only checks the most recently updated ones to avoid O(n^2)
explosion (~125K comparisons at 500 is acceptable).
Config:
- Added memory.provider key to DEFAULT_CONFIG ("" = builtin only).
No version bump needed (deep_merge handles new keys automatically).
* feat(memory): extract Honcho as a MemoryProvider plugin
Creates plugins/honcho-memory/ as a thin adapter over the existing
honcho_integration/ package. All 4 Honcho tools (profile, search,
context, conclude) move from the normal tool registry to the
MemoryProvider interface.
The plugin delegates all work to HonchoSessionManager — no Honcho
logic is reimplemented. It uses the existing config chain:
$HERMES_HOME/honcho.json -> ~/.honcho/config.json -> env vars.
Lifecycle hooks:
- initialize: creates HonchoSessionManager via existing client factory
- prefetch: background dialectic query
- sync_turn: records messages + flushes to API (threaded)
- on_memory_write: mirrors user profile writes as conclusions
- on_session_end: flushes all pending messages
This is a prerequisite for the MemoryManager wiring in run_agent.py.
Once wired, Honcho goes through the same provider interface as all
other memory plugins, and the scattered Honcho code in run_agent.py
can be consolidated into the single MemoryManager integration point.
* feat(memory): wire MemoryManager into run_agent.py
Adds 8 integration points for the external memory provider plugin,
all purely additive (zero existing code modified):
1. Init (~L1130): Create MemoryManager, find matching plugin provider
from memory.provider config, initialize with session context
2. Tool injection (~L1160): Append provider tool schemas to self.tools
and self.valid_tool_names after memory_manager init
3. System prompt (~L2705): Add external provider's system_prompt_block
alongside existing MEMORY.md/USER.md blocks
4. Tool routing (~L5362): Route provider tool calls through
memory_manager.handle_tool_call() before the catchall handler
5. Memory write bridge (~L5353): Notify external provider via
on_memory_write() when the built-in memory tool writes
6. Pre-compress (~L5233): Call on_pre_compress() before context
compression discards messages
7. Prefetch (~L6421): Inject provider prefetch results into the
current-turn user message (same pattern as Honcho turn context)
8. Turn sync + session end (~L8161, ~L8172): sync_all() after each
completed turn, queue_prefetch_all() for next turn, on_session_end()
+ shutdown_all() at conversation end
All hooks are wrapped in try/except — a failing provider never breaks
the agent. The existing memory system, Honcho integration, and all
other code paths are completely untouched.
Full suite: 7222 passed, 4 pre-existing failures.
* refactor(memory): remove legacy Honcho integration from core
Extracts all Honcho-specific code from run_agent.py, model_tools.py,
toolsets.py, and gateway/run.py. Honcho is now exclusively available
as a memory provider plugin (plugins/honcho-memory/).
Removed from run_agent.py (-457 lines):
- Honcho init block (session manager creation, activation, config)
- 8 Honcho methods: _honcho_should_activate, _strip_honcho_tools,
_activate_honcho, _register_honcho_exit_hook, _queue_honcho_prefetch,
_honcho_prefetch, _honcho_save_user_observation, _honcho_sync
- _inject_honcho_turn_context module-level function
- Honcho system prompt block (tool descriptions, CLI commands)
- Honcho context injection in api_messages building
- Honcho params from __init__ (honcho_session_key, honcho_manager,
honcho_config)
- HONCHO_TOOL_NAMES constant
- All honcho-specific tool dispatch forwarding
Removed from other files:
- model_tools.py: honcho_tools import, honcho params from handle_function_call
- toolsets.py: honcho toolset definition, honcho tools from core tools list
- gateway/run.py: honcho params from AIAgent constructor calls
Removed tests (-339 lines):
- 9 Honcho-specific test methods from test_run_agent.py
- TestHonchoAtexitFlush class from test_exit_cleanup_interrupt.py
Restored two regex constants (_SURROGATE_RE, _BUDGET_WARNING_RE) that
were accidentally removed during the honcho function extraction.
The honcho_integration/ package is kept intact — the plugin delegates
to it. tools/honcho_tools.py registry entries are now dead code (import
commented out in model_tools.py) but the file is preserved for reference.
Full suite: 7207 passed, 4 pre-existing failures. Zero regressions.
* refactor(memory): restructure plugins, add CLI, clean gateway, migration notice
Plugin restructure:
- Move all memory plugins from plugins/<name>-memory/ to plugins/memory/<name>/
(byterover, hindsight, holographic, honcho, mem0, openviking, retaindb)
- New plugins/memory/__init__.py discovery module that scans the directory
directly, loading providers by name without the general plugin system
- run_agent.py uses load_memory_provider() instead of get_plugin_memory_providers()
CLI wiring:
- hermes memory setup — interactive curses picker + config wizard
- hermes memory status — show active provider, config, availability
- hermes memory off — disable external provider (built-in only)
- hermes honcho — now shows migration notice pointing to hermes memory setup
Gateway cleanup:
- Remove _get_or_create_gateway_honcho (already removed in prev commit)
- Remove _shutdown_gateway_honcho and _shutdown_all_gateway_honcho methods
- Remove all calls to shutdown methods (4 call sites)
- Remove _honcho_managers/_honcho_configs dict references
Dead code removal:
- Delete tools/honcho_tools.py (279 lines, import was already commented out)
- Delete tests/gateway/test_honcho_lifecycle.py (131 lines, tested removed methods)
- Remove if False placeholder from run_agent.py
Migration:
- Honcho migration notice on startup: detects existing honcho.json or
~/.honcho/config.json, prints guidance to run hermes memory setup.
Only fires when memory.provider is not set and not in quiet mode.
Full suite: 7203 passed, 4 pre-existing failures. Zero regressions.
* feat(memory): standardize plugin config + add per-plugin documentation
Config architecture:
- Add save_config(values, hermes_home) to MemoryProvider ABC
- Honcho: writes to $HERMES_HOME/honcho.json (SDK native)
- Mem0: writes to $HERMES_HOME/mem0.json
- Hindsight: writes to $HERMES_HOME/hindsight/config.json
- Holographic: writes to config.yaml under plugins.hermes-memory-store
- OpenViking/RetainDB/ByteRover: env-var only (default no-op)
Setup wizard (hermes memory setup):
- Now calls provider.save_config() for non-secret config
- Secrets still go to .env via env vars
- Only memory.provider activation key goes to config.yaml
Documentation:
- README.md for each of the 7 providers in plugins/memory/<name>/
- Requirements, setup (wizard + manual), config reference, tools table
- Consistent format across all providers
The contract for new memory plugins:
- get_config_schema() declares all fields (REQUIRED)
- save_config() writes native config (REQUIRED if not env-var-only)
- Secrets use env_var field in schema, written to .env by wizard
- README.md in the plugin directory
* docs: add memory providers user guide + developer guide
New pages:
- user-guide/features/memory-providers.md — comprehensive guide covering
all 7 shipped providers (Honcho, OpenViking, Mem0, Hindsight,
Holographic, RetainDB, ByteRover). Each with setup, config, tools,
cost, and unique features. Includes comparison table and profile
isolation notes.
- developer-guide/memory-provider-plugin.md — how to build a new memory
provider plugin. Covers ABC, required methods, config schema,
save_config, threading contract, profile isolation, testing.
Updated pages:
- user-guide/features/memory.md — replaced Honcho section with link to
new Memory Providers page
- user-guide/features/honcho.md — replaced with migration redirect to
the new Memory Providers page
- sidebars.ts — added both new pages to navigation
* fix(memory): auto-migrate Honcho users to memory provider plugin
When honcho.json or ~/.honcho/config.json exists but memory.provider
is not set, automatically set memory.provider: honcho in config.yaml
and activate the plugin. The plugin reads the same config files, so
all data and credentials are preserved. Zero user action needed.
Persists the migration to config.yaml so it only fires once. Prints
a one-line confirmation in non-quiet mode.
* fix(memory): only auto-migrate Honcho when enabled + credentialed
Check HonchoClientConfig.enabled AND (api_key OR base_url) before
auto-migrating — not just file existence. Prevents false activation
for users who disabled Honcho, stopped using it (config lingers),
or have ~/.honcho/ from a different tool.
* feat(memory): auto-install pip dependencies during hermes memory setup
Reads pip_dependencies from plugin.yaml, checks which are missing,
installs them via pip before config walkthrough. Also shows install
guidance for external_dependencies (e.g. brv CLI for ByteRover).
Updated all 7 plugin.yaml files with pip_dependencies:
- honcho: honcho-ai
- mem0: mem0ai
- openviking: httpx
- hindsight: hindsight-client
- holographic: (none)
- retaindb: requests
- byterover: (external_dependencies for brv CLI)
* fix: remove remaining Honcho crash risks from cli.py and gateway
cli.py: removed Honcho session re-mapping block (would crash importing
deleted tools/honcho_tools.py), Honcho flush on compress, Honcho
session display on startup, Honcho shutdown on exit, honcho_session_key
AIAgent param.
gateway/run.py: removed honcho_session_key params from helper methods,
sync_honcho param, _honcho.shutdown() block.
tests: fixed test_cron_session_with_honcho_key_skipped (was passing
removed honcho_key param to _flush_memories_for_session).
* fix: include plugins/ in pyproject.toml package list
Without this, plugins/memory/ wouldn't be included in non-editable
installs. Hermes always runs from the repo checkout so this is belt-
and-suspenders, but prevents breakage if the install method changes.
* fix(memory): correct pip-to-import name mapping for dep checks
The heuristic dep.replace('-', '_') fails for packages where the pip
name differs from the import name: honcho-ai→honcho, mem0ai→mem0,
hindsight-client→hindsight_client. Added explicit mapping table so
hermes memory setup doesn't try to reinstall already-installed packages.
* chore: remove dead code from old plugin memory registration path
- hermes_cli/plugins.py: removed register_memory_provider(),
_memory_providers list, get_plugin_memory_providers() — memory
providers now use plugins/memory/ discovery, not the general plugin system
- hermes_cli/main.py: stripped 74 lines of dead honcho argparse
subparsers (setup, status, sessions, map, peer, mode, tokens,
identity, migrate) — kept only the migration redirect
- agent/memory_provider.py: updated docstring to reflect new
registration path
- tests: replaced TestPluginMemoryProviderRegistration with
TestPluginMemoryDiscovery that tests the actual plugins/memory/
discovery system. Added 3 new tests (discover, load, nonexistent).
* chore: delete dead honcho_integration/cli.py and its tests
cli.py (794 lines) was the old 'hermes honcho' command handler — nobody
calls it since cmd_honcho was replaced with a migration redirect.
Deleted tests that imported from removed code:
- tests/honcho_integration/test_cli.py (tested _resolve_api_key)
- tests/honcho_integration/test_config_isolation.py (tested CLI config paths)
- tests/tools/test_honcho_tools.py (tested the deleted tools/honcho_tools.py)
Remaining honcho_integration/ files (actively used by the plugin):
- client.py (445 lines) — config loading, SDK client creation
- session.py (991 lines) — session management, queries, flush
* refactor: move honcho_integration/ into the honcho plugin
Moves client.py (445 lines) and session.py (991 lines) from the
top-level honcho_integration/ package into plugins/memory/honcho/.
No Honcho code remains in the main codebase.
- plugins/memory/honcho/client.py — config loading, SDK client creation
- plugins/memory/honcho/session.py — session management, queries, flush
- Updated all imports: run_agent.py (auto-migration), hermes_cli/doctor.py,
plugin __init__.py, session.py cross-import, all tests
- Removed honcho_integration/ package and pyproject.toml entry
- Renamed tests/honcho_integration/ → tests/honcho_plugin/
* docs: update architecture + gateway-internals for memory provider system
- architecture.md: replaced honcho_integration/ with plugins/memory/
- gateway-internals.md: replaced Honcho-specific session routing and
flush lifecycle docs with generic memory provider interface docs
* fix: update stale mock path for resolve_active_host after honcho plugin migration
* fix(memory): address review feedback — P0 lifecycle, ABC contract, honcho CLI restore
Review feedback from Honcho devs (erosika):
P0 — Provider lifecycle:
- Remove on_session_end() + shutdown_all() from run_conversation() tail
(was killing providers after every turn in multi-turn sessions)
- Add shutdown_memory_provider() method on AIAgent for callers
- Wire shutdown into CLI atexit, reset_conversation, gateway stop/expiry
Bug fixes:
- Remove sync_honcho=False kwarg from /btw callsites (TypeError crash)
- Fix doctor.py references to dead 'hermes honcho setup' command
- Cache prefetch_all() before tool loop (was re-calling every iteration)
ABC contract hardening (all backwards-compatible):
- Add session_id kwarg to prefetch/sync_turn/queue_prefetch
- Make on_pre_compress() return str (provider insights in compression)
- Add **kwargs to on_turn_start() for runtime context
- Add on_delegation() hook for parent-side subagent observation
- Document agent_context/agent_identity/agent_workspace kwargs on
initialize() (prevents cron corruption, enables profile scoping)
- Fix docstring: single external provider, not multiple
Honcho CLI restoration:
- Add plugins/memory/honcho/cli.py (from main's honcho_integration/cli.py
with imports adapted to plugin path)
- Restore full hermes honcho command with all subcommands (status, peer,
mode, tokens, identity, enable/disable, sync, peers, --target-profile)
- Restore auto-clone on profile creation + sync on hermes update
- hermes honcho setup now redirects to hermes memory setup
* fix(memory): wire on_delegation, skip_memory for cron/flush, fix ByteRover return type
- Wire on_delegation() in delegate_tool.py — parent's memory provider
is notified with task+result after each subagent completes
- Add skip_memory=True to cron scheduler (prevents cron system prompts
from corrupting user representations — closes #4052)
- Add skip_memory=True to gateway flush agent (throwaway agent shouldn't
activate memory provider)
- Fix ByteRover on_pre_compress() return type: None -> str
* fix(honcho): port profile isolation fixes from PR #4632
Ports 5 bug fixes found during profile testing (erosika's PR #4632):
1. 3-tier config resolution — resolve_config_path() now checks
$HERMES_HOME/honcho.json → ~/.hermes/honcho.json → ~/.honcho/config.json
(non-default profiles couldn't find shared host blocks)
2. Thread host=_host_key() through from_global_config() in cmd_setup,
cmd_status, cmd_identity (--target-profile was being ignored)
3. Use bare profile name as aiPeer (not host key with dots) — Honcho's
peer ID pattern is ^[a-zA-Z0-9_-]+$, dots are invalid
4. Wrap add_peers() in try/except — was fatal on new AI peers, killed
all message uploads for the session
5. Gate Honcho clone behind --clone/--clone-all on profile create
(bare create should be blank-slate)
Also: sanitize assistant_peer_id via _sanitize_id()
* fix(tests): add module cleanup fixture to test_cli_provider_resolution
test_cli_provider_resolution._import_cli() wipes tools.*, cli, and
run_agent from sys.modules to force fresh imports, but had no cleanup.
This poisoned all subsequent tests on the same xdist worker — mocks
targeting tools.file_tools, tools.send_message_tool, etc. patched the
NEW module object while already-imported functions still referenced
the OLD one. Caused ~25 cascade failures: send_message KeyError,
process_registry FileNotFoundError, file_read_guards timeouts,
read_loop_detection file-not-found, mcp_oauth None port, and
provider_parity/codex_execution stale tool lists.
Fix: autouse fixture saves all affected modules before each test and
restores them after, matching the pattern in
test_managed_browserbase_and_modal.py.
2026-04-02 15:33:51 -07:00
|
|
|
import pytest
|
|
|
|
|
|
2026-02-25 18:20:38 -08:00
|
|
|
from hermes_cli.auth import AuthError
|
|
|
|
|
from hermes_cli import main as hermes_main
|
|
|
|
|
|
|
|
|
|
|
feat(memory): pluggable memory provider interface with profile isolation, review fixes, and honcho CLI restoration (#4623)
* feat(memory): add pluggable memory provider interface with profile isolation
Introduces a pluggable MemoryProvider ABC so external memory backends can
integrate with Hermes without modifying core files. Each backend becomes a
plugin implementing a standard interface, orchestrated by MemoryManager.
Key architecture:
- agent/memory_provider.py — ABC with core + optional lifecycle hooks
- agent/memory_manager.py — single integration point in the agent loop
- agent/builtin_memory_provider.py — wraps existing MEMORY.md/USER.md
Profile isolation fixes applied to all 6 shipped plugins:
- Cognitive Memory: use get_hermes_home() instead of raw env var
- Hindsight Memory: check $HERMES_HOME/hindsight/config.json first,
fall back to legacy ~/.hindsight/ for backward compat
- Hermes Memory Store: replace hardcoded ~/.hermes paths with
get_hermes_home() for config loading and DB path defaults
- Mem0 Memory: use get_hermes_home() instead of raw env var
- RetainDB Memory: auto-derive profile-scoped project name from
hermes_home path (hermes-<profile>), explicit env var overrides
- OpenViking Memory: read-only, no local state, isolation via .env
MemoryManager.initialize_all() now injects hermes_home into kwargs so
every provider can resolve profile-scoped storage without importing
get_hermes_home() themselves.
Plugin system: adds register_memory_provider() to PluginContext and
get_plugin_memory_providers() accessor.
Based on PR #3825. 46 tests (37 unit + 5 E2E + 4 plugin registration).
* refactor(memory): drop cognitive plugin, rewrite OpenViking as full provider
Remove cognitive-memory plugin (#727) — core mechanics are broken:
decay runs 24x too fast (hourly not daily), prefetch uses row ID as
timestamp, search limited by importance not similarity.
Rewrite openviking-memory plugin from a read-only search wrapper into
a full bidirectional memory provider using the complete OpenViking
session lifecycle API:
- sync_turn: records user/assistant messages to OpenViking session
(threaded, non-blocking)
- on_session_end: commits session to trigger automatic memory extraction
into 6 categories (profile, preferences, entities, events, cases,
patterns)
- prefetch: background semantic search via find() endpoint
- on_memory_write: mirrors built-in memory writes to the session
- is_available: checks env var only, no network calls (ABC compliance)
Tools expanded from 3 to 5:
- viking_search: semantic search with mode/scope/limit
- viking_read: tiered content (abstract ~100tok / overview ~2k / full)
- viking_browse: filesystem-style navigation (list/tree/stat)
- viking_remember: explicit memory storage via session
- viking_add_resource: ingest URLs/docs into knowledge base
Uses direct HTTP via httpx (no openviking SDK dependency needed).
Response truncation on viking_read to prevent context flooding.
* fix(memory): harden Mem0 plugin — thread safety, non-blocking sync, circuit breaker
- Remove redundant mem0_context tool (identical to mem0_search with
rerank=true, top_k=5 — wastes a tool slot and confuses the model)
- Thread sync_turn so it's non-blocking — Mem0's server-side LLM
extraction can take 5-10s, was stalling the agent after every turn
- Add threading.Lock around _get_client() for thread-safe lazy init
(prefetch and sync threads could race on first client creation)
- Add circuit breaker: after 5 consecutive API failures, pause calls
for 120s instead of hammering a down server every turn. Auto-resets
after cooldown. Logs a warning when tripped.
- Track success/failure in prefetch, sync_turn, and all tool calls
- Wait for previous sync to finish before starting a new one (prevents
unbounded thread accumulation on rapid turns)
- Clean up shutdown to join both prefetch and sync threads
* fix(memory): enforce single external memory provider limit
MemoryManager now rejects a second non-builtin provider with a warning.
Built-in memory (MEMORY.md/USER.md) is always accepted. Only ONE
external plugin provider is allowed at a time. This prevents tool
schema bloat (some providers add 3-5 tools each) and conflicting
memory backends.
The warning message directs users to configure memory.provider in
config.yaml to select which provider to activate.
Updated all 47 tests to use builtin + one external pattern instead
of multiple externals. Added test_second_external_rejected to verify
the enforcement.
* feat(memory): add ByteRover memory provider plugin
Implements the ByteRover integration (from PR #3499 by hieuntg81) as a
MemoryProvider plugin instead of direct run_agent.py modifications.
ByteRover provides persistent memory via the brv CLI — a hierarchical
knowledge tree with tiered retrieval (fuzzy text then LLM-driven search).
Local-first with optional cloud sync.
Plugin capabilities:
- prefetch: background brv query for relevant context
- sync_turn: curate conversation turns (threaded, non-blocking)
- on_memory_write: mirror built-in memory writes to brv
- on_pre_compress: extract insights before context compression
Tools (3):
- brv_query: search the knowledge tree
- brv_curate: store facts/decisions/patterns
- brv_status: check CLI version and context tree state
Profile isolation: working directory at $HERMES_HOME/byterover/ (scoped
per profile). Binary resolution cached with thread-safe double-checked
locking. All write operations threaded to avoid blocking the agent
(curate can take 120s with LLM processing).
* fix(memory): thread remaining sync_turns, fix holographic, add config key
Plugin fixes:
- Hindsight: thread sync_turn (was blocking up to 30s via _run_in_thread)
- RetainDB: thread sync_turn (was blocking on HTTP POST)
- Both: shutdown now joins sync threads alongside prefetch threads
Holographic retrieval fixes:
- reason(): removed dead intersection_key computation (bundled but never
used in scoring). Now reuses pre-computed entity_residuals directly,
moved role_content encoding outside the inner loop.
- contradict(): added _MAX_CONTRADICT_FACTS=500 scaling guard. Above
500 facts, only checks the most recently updated ones to avoid O(n^2)
explosion (~125K comparisons at 500 is acceptable).
Config:
- Added memory.provider key to DEFAULT_CONFIG ("" = builtin only).
No version bump needed (deep_merge handles new keys automatically).
* feat(memory): extract Honcho as a MemoryProvider plugin
Creates plugins/honcho-memory/ as a thin adapter over the existing
honcho_integration/ package. All 4 Honcho tools (profile, search,
context, conclude) move from the normal tool registry to the
MemoryProvider interface.
The plugin delegates all work to HonchoSessionManager — no Honcho
logic is reimplemented. It uses the existing config chain:
$HERMES_HOME/honcho.json -> ~/.honcho/config.json -> env vars.
Lifecycle hooks:
- initialize: creates HonchoSessionManager via existing client factory
- prefetch: background dialectic query
- sync_turn: records messages + flushes to API (threaded)
- on_memory_write: mirrors user profile writes as conclusions
- on_session_end: flushes all pending messages
This is a prerequisite for the MemoryManager wiring in run_agent.py.
Once wired, Honcho goes through the same provider interface as all
other memory plugins, and the scattered Honcho code in run_agent.py
can be consolidated into the single MemoryManager integration point.
* feat(memory): wire MemoryManager into run_agent.py
Adds 8 integration points for the external memory provider plugin,
all purely additive (zero existing code modified):
1. Init (~L1130): Create MemoryManager, find matching plugin provider
from memory.provider config, initialize with session context
2. Tool injection (~L1160): Append provider tool schemas to self.tools
and self.valid_tool_names after memory_manager init
3. System prompt (~L2705): Add external provider's system_prompt_block
alongside existing MEMORY.md/USER.md blocks
4. Tool routing (~L5362): Route provider tool calls through
memory_manager.handle_tool_call() before the catchall handler
5. Memory write bridge (~L5353): Notify external provider via
on_memory_write() when the built-in memory tool writes
6. Pre-compress (~L5233): Call on_pre_compress() before context
compression discards messages
7. Prefetch (~L6421): Inject provider prefetch results into the
current-turn user message (same pattern as Honcho turn context)
8. Turn sync + session end (~L8161, ~L8172): sync_all() after each
completed turn, queue_prefetch_all() for next turn, on_session_end()
+ shutdown_all() at conversation end
All hooks are wrapped in try/except — a failing provider never breaks
the agent. The existing memory system, Honcho integration, and all
other code paths are completely untouched.
Full suite: 7222 passed, 4 pre-existing failures.
* refactor(memory): remove legacy Honcho integration from core
Extracts all Honcho-specific code from run_agent.py, model_tools.py,
toolsets.py, and gateway/run.py. Honcho is now exclusively available
as a memory provider plugin (plugins/honcho-memory/).
Removed from run_agent.py (-457 lines):
- Honcho init block (session manager creation, activation, config)
- 8 Honcho methods: _honcho_should_activate, _strip_honcho_tools,
_activate_honcho, _register_honcho_exit_hook, _queue_honcho_prefetch,
_honcho_prefetch, _honcho_save_user_observation, _honcho_sync
- _inject_honcho_turn_context module-level function
- Honcho system prompt block (tool descriptions, CLI commands)
- Honcho context injection in api_messages building
- Honcho params from __init__ (honcho_session_key, honcho_manager,
honcho_config)
- HONCHO_TOOL_NAMES constant
- All honcho-specific tool dispatch forwarding
Removed from other files:
- model_tools.py: honcho_tools import, honcho params from handle_function_call
- toolsets.py: honcho toolset definition, honcho tools from core tools list
- gateway/run.py: honcho params from AIAgent constructor calls
Removed tests (-339 lines):
- 9 Honcho-specific test methods from test_run_agent.py
- TestHonchoAtexitFlush class from test_exit_cleanup_interrupt.py
Restored two regex constants (_SURROGATE_RE, _BUDGET_WARNING_RE) that
were accidentally removed during the honcho function extraction.
The honcho_integration/ package is kept intact — the plugin delegates
to it. tools/honcho_tools.py registry entries are now dead code (import
commented out in model_tools.py) but the file is preserved for reference.
Full suite: 7207 passed, 4 pre-existing failures. Zero regressions.
* refactor(memory): restructure plugins, add CLI, clean gateway, migration notice
Plugin restructure:
- Move all memory plugins from plugins/<name>-memory/ to plugins/memory/<name>/
(byterover, hindsight, holographic, honcho, mem0, openviking, retaindb)
- New plugins/memory/__init__.py discovery module that scans the directory
directly, loading providers by name without the general plugin system
- run_agent.py uses load_memory_provider() instead of get_plugin_memory_providers()
CLI wiring:
- hermes memory setup — interactive curses picker + config wizard
- hermes memory status — show active provider, config, availability
- hermes memory off — disable external provider (built-in only)
- hermes honcho — now shows migration notice pointing to hermes memory setup
Gateway cleanup:
- Remove _get_or_create_gateway_honcho (already removed in prev commit)
- Remove _shutdown_gateway_honcho and _shutdown_all_gateway_honcho methods
- Remove all calls to shutdown methods (4 call sites)
- Remove _honcho_managers/_honcho_configs dict references
Dead code removal:
- Delete tools/honcho_tools.py (279 lines, import was already commented out)
- Delete tests/gateway/test_honcho_lifecycle.py (131 lines, tested removed methods)
- Remove if False placeholder from run_agent.py
Migration:
- Honcho migration notice on startup: detects existing honcho.json or
~/.honcho/config.json, prints guidance to run hermes memory setup.
Only fires when memory.provider is not set and not in quiet mode.
Full suite: 7203 passed, 4 pre-existing failures. Zero regressions.
* feat(memory): standardize plugin config + add per-plugin documentation
Config architecture:
- Add save_config(values, hermes_home) to MemoryProvider ABC
- Honcho: writes to $HERMES_HOME/honcho.json (SDK native)
- Mem0: writes to $HERMES_HOME/mem0.json
- Hindsight: writes to $HERMES_HOME/hindsight/config.json
- Holographic: writes to config.yaml under plugins.hermes-memory-store
- OpenViking/RetainDB/ByteRover: env-var only (default no-op)
Setup wizard (hermes memory setup):
- Now calls provider.save_config() for non-secret config
- Secrets still go to .env via env vars
- Only memory.provider activation key goes to config.yaml
Documentation:
- README.md for each of the 7 providers in plugins/memory/<name>/
- Requirements, setup (wizard + manual), config reference, tools table
- Consistent format across all providers
The contract for new memory plugins:
- get_config_schema() declares all fields (REQUIRED)
- save_config() writes native config (REQUIRED if not env-var-only)
- Secrets use env_var field in schema, written to .env by wizard
- README.md in the plugin directory
* docs: add memory providers user guide + developer guide
New pages:
- user-guide/features/memory-providers.md — comprehensive guide covering
all 7 shipped providers (Honcho, OpenViking, Mem0, Hindsight,
Holographic, RetainDB, ByteRover). Each with setup, config, tools,
cost, and unique features. Includes comparison table and profile
isolation notes.
- developer-guide/memory-provider-plugin.md — how to build a new memory
provider plugin. Covers ABC, required methods, config schema,
save_config, threading contract, profile isolation, testing.
Updated pages:
- user-guide/features/memory.md — replaced Honcho section with link to
new Memory Providers page
- user-guide/features/honcho.md — replaced with migration redirect to
the new Memory Providers page
- sidebars.ts — added both new pages to navigation
* fix(memory): auto-migrate Honcho users to memory provider plugin
When honcho.json or ~/.honcho/config.json exists but memory.provider
is not set, automatically set memory.provider: honcho in config.yaml
and activate the plugin. The plugin reads the same config files, so
all data and credentials are preserved. Zero user action needed.
Persists the migration to config.yaml so it only fires once. Prints
a one-line confirmation in non-quiet mode.
* fix(memory): only auto-migrate Honcho when enabled + credentialed
Check HonchoClientConfig.enabled AND (api_key OR base_url) before
auto-migrating — not just file existence. Prevents false activation
for users who disabled Honcho, stopped using it (config lingers),
or have ~/.honcho/ from a different tool.
* feat(memory): auto-install pip dependencies during hermes memory setup
Reads pip_dependencies from plugin.yaml, checks which are missing,
installs them via pip before config walkthrough. Also shows install
guidance for external_dependencies (e.g. brv CLI for ByteRover).
Updated all 7 plugin.yaml files with pip_dependencies:
- honcho: honcho-ai
- mem0: mem0ai
- openviking: httpx
- hindsight: hindsight-client
- holographic: (none)
- retaindb: requests
- byterover: (external_dependencies for brv CLI)
* fix: remove remaining Honcho crash risks from cli.py and gateway
cli.py: removed Honcho session re-mapping block (would crash importing
deleted tools/honcho_tools.py), Honcho flush on compress, Honcho
session display on startup, Honcho shutdown on exit, honcho_session_key
AIAgent param.
gateway/run.py: removed honcho_session_key params from helper methods,
sync_honcho param, _honcho.shutdown() block.
tests: fixed test_cron_session_with_honcho_key_skipped (was passing
removed honcho_key param to _flush_memories_for_session).
* fix: include plugins/ in pyproject.toml package list
Without this, plugins/memory/ wouldn't be included in non-editable
installs. Hermes always runs from the repo checkout so this is belt-
and-suspenders, but prevents breakage if the install method changes.
* fix(memory): correct pip-to-import name mapping for dep checks
The heuristic dep.replace('-', '_') fails for packages where the pip
name differs from the import name: honcho-ai→honcho, mem0ai→mem0,
hindsight-client→hindsight_client. Added explicit mapping table so
hermes memory setup doesn't try to reinstall already-installed packages.
* chore: remove dead code from old plugin memory registration path
- hermes_cli/plugins.py: removed register_memory_provider(),
_memory_providers list, get_plugin_memory_providers() — memory
providers now use plugins/memory/ discovery, not the general plugin system
- hermes_cli/main.py: stripped 74 lines of dead honcho argparse
subparsers (setup, status, sessions, map, peer, mode, tokens,
identity, migrate) — kept only the migration redirect
- agent/memory_provider.py: updated docstring to reflect new
registration path
- tests: replaced TestPluginMemoryProviderRegistration with
TestPluginMemoryDiscovery that tests the actual plugins/memory/
discovery system. Added 3 new tests (discover, load, nonexistent).
* chore: delete dead honcho_integration/cli.py and its tests
cli.py (794 lines) was the old 'hermes honcho' command handler — nobody
calls it since cmd_honcho was replaced with a migration redirect.
Deleted tests that imported from removed code:
- tests/honcho_integration/test_cli.py (tested _resolve_api_key)
- tests/honcho_integration/test_config_isolation.py (tested CLI config paths)
- tests/tools/test_honcho_tools.py (tested the deleted tools/honcho_tools.py)
Remaining honcho_integration/ files (actively used by the plugin):
- client.py (445 lines) — config loading, SDK client creation
- session.py (991 lines) — session management, queries, flush
* refactor: move honcho_integration/ into the honcho plugin
Moves client.py (445 lines) and session.py (991 lines) from the
top-level honcho_integration/ package into plugins/memory/honcho/.
No Honcho code remains in the main codebase.
- plugins/memory/honcho/client.py — config loading, SDK client creation
- plugins/memory/honcho/session.py — session management, queries, flush
- Updated all imports: run_agent.py (auto-migration), hermes_cli/doctor.py,
plugin __init__.py, session.py cross-import, all tests
- Removed honcho_integration/ package and pyproject.toml entry
- Renamed tests/honcho_integration/ → tests/honcho_plugin/
* docs: update architecture + gateway-internals for memory provider system
- architecture.md: replaced honcho_integration/ with plugins/memory/
- gateway-internals.md: replaced Honcho-specific session routing and
flush lifecycle docs with generic memory provider interface docs
* fix: update stale mock path for resolve_active_host after honcho plugin migration
* fix(memory): address review feedback — P0 lifecycle, ABC contract, honcho CLI restore
Review feedback from Honcho devs (erosika):
P0 — Provider lifecycle:
- Remove on_session_end() + shutdown_all() from run_conversation() tail
(was killing providers after every turn in multi-turn sessions)
- Add shutdown_memory_provider() method on AIAgent for callers
- Wire shutdown into CLI atexit, reset_conversation, gateway stop/expiry
Bug fixes:
- Remove sync_honcho=False kwarg from /btw callsites (TypeError crash)
- Fix doctor.py references to dead 'hermes honcho setup' command
- Cache prefetch_all() before tool loop (was re-calling every iteration)
ABC contract hardening (all backwards-compatible):
- Add session_id kwarg to prefetch/sync_turn/queue_prefetch
- Make on_pre_compress() return str (provider insights in compression)
- Add **kwargs to on_turn_start() for runtime context
- Add on_delegation() hook for parent-side subagent observation
- Document agent_context/agent_identity/agent_workspace kwargs on
initialize() (prevents cron corruption, enables profile scoping)
- Fix docstring: single external provider, not multiple
Honcho CLI restoration:
- Add plugins/memory/honcho/cli.py (from main's honcho_integration/cli.py
with imports adapted to plugin path)
- Restore full hermes honcho command with all subcommands (status, peer,
mode, tokens, identity, enable/disable, sync, peers, --target-profile)
- Restore auto-clone on profile creation + sync on hermes update
- hermes honcho setup now redirects to hermes memory setup
* fix(memory): wire on_delegation, skip_memory for cron/flush, fix ByteRover return type
- Wire on_delegation() in delegate_tool.py — parent's memory provider
is notified with task+result after each subagent completes
- Add skip_memory=True to cron scheduler (prevents cron system prompts
from corrupting user representations — closes #4052)
- Add skip_memory=True to gateway flush agent (throwaway agent shouldn't
activate memory provider)
- Fix ByteRover on_pre_compress() return type: None -> str
* fix(honcho): port profile isolation fixes from PR #4632
Ports 5 bug fixes found during profile testing (erosika's PR #4632):
1. 3-tier config resolution — resolve_config_path() now checks
$HERMES_HOME/honcho.json → ~/.hermes/honcho.json → ~/.honcho/config.json
(non-default profiles couldn't find shared host blocks)
2. Thread host=_host_key() through from_global_config() in cmd_setup,
cmd_status, cmd_identity (--target-profile was being ignored)
3. Use bare profile name as aiPeer (not host key with dots) — Honcho's
peer ID pattern is ^[a-zA-Z0-9_-]+$, dots are invalid
4. Wrap add_peers() in try/except — was fatal on new AI peers, killed
all message uploads for the session
5. Gate Honcho clone behind --clone/--clone-all on profile create
(bare create should be blank-slate)
Also: sanitize assistant_peer_id via _sanitize_id()
* fix(tests): add module cleanup fixture to test_cli_provider_resolution
test_cli_provider_resolution._import_cli() wipes tools.*, cli, and
run_agent from sys.modules to force fresh imports, but had no cleanup.
This poisoned all subsequent tests on the same xdist worker — mocks
targeting tools.file_tools, tools.send_message_tool, etc. patched the
NEW module object while already-imported functions still referenced
the OLD one. Caused ~25 cascade failures: send_message KeyError,
process_registry FileNotFoundError, file_read_guards timeouts,
read_loop_detection file-not-found, mcp_oauth None port, and
provider_parity/codex_execution stale tool lists.
Fix: autouse fixture saves all affected modules before each test and
restores them after, matching the pattern in
test_managed_browserbase_and_modal.py.
2026-04-02 15:33:51 -07:00
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
# Module isolation: _import_cli() wipes tools.* / cli / run_agent from
|
|
|
|
|
# sys.modules so it can re-import cli fresh. Without cleanup the wiped
|
|
|
|
|
# modules leak into subsequent tests on the same xdist worker, breaking
|
|
|
|
|
# mock patches that target "tools.file_tools._get_file_ops" etc.
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
def _reset_modules(prefixes: tuple[str, ...]):
|
|
|
|
|
for name in list(sys.modules):
|
|
|
|
|
if any(name == p or name.startswith(p + ".") for p in prefixes):
|
|
|
|
|
sys.modules.pop(name, None)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture(autouse=True)
|
|
|
|
|
def _restore_cli_and_tool_modules():
|
|
|
|
|
"""Save and restore tools/cli/run_agent modules around every test."""
|
|
|
|
|
prefixes = ("tools", "cli", "run_agent")
|
|
|
|
|
original_modules = {
|
|
|
|
|
name: module
|
|
|
|
|
for name, module in sys.modules.items()
|
|
|
|
|
if any(name == p or name.startswith(p + ".") for p in prefixes)
|
|
|
|
|
}
|
|
|
|
|
try:
|
|
|
|
|
yield
|
|
|
|
|
finally:
|
|
|
|
|
_reset_modules(prefixes)
|
|
|
|
|
sys.modules.update(original_modules)
|
|
|
|
|
|
|
|
|
|
|
2026-02-25 18:20:38 -08:00
|
|
|
def _install_prompt_toolkit_stubs():
|
|
|
|
|
class _Dummy:
|
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
class _Condition:
|
|
|
|
|
def __init__(self, func):
|
|
|
|
|
self.func = func
|
|
|
|
|
|
|
|
|
|
def __bool__(self):
|
|
|
|
|
return bool(self.func())
|
|
|
|
|
|
|
|
|
|
class _ANSI(str):
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
root = types.ModuleType("prompt_toolkit")
|
|
|
|
|
history = types.ModuleType("prompt_toolkit.history")
|
|
|
|
|
styles = types.ModuleType("prompt_toolkit.styles")
|
|
|
|
|
patch_stdout = types.ModuleType("prompt_toolkit.patch_stdout")
|
|
|
|
|
application = types.ModuleType("prompt_toolkit.application")
|
|
|
|
|
layout = types.ModuleType("prompt_toolkit.layout")
|
|
|
|
|
processors = types.ModuleType("prompt_toolkit.layout.processors")
|
|
|
|
|
filters = types.ModuleType("prompt_toolkit.filters")
|
|
|
|
|
dimension = types.ModuleType("prompt_toolkit.layout.dimension")
|
|
|
|
|
menus = types.ModuleType("prompt_toolkit.layout.menus")
|
|
|
|
|
widgets = types.ModuleType("prompt_toolkit.widgets")
|
|
|
|
|
key_binding = types.ModuleType("prompt_toolkit.key_binding")
|
|
|
|
|
completion = types.ModuleType("prompt_toolkit.completion")
|
|
|
|
|
formatted_text = types.ModuleType("prompt_toolkit.formatted_text")
|
|
|
|
|
|
|
|
|
|
history.FileHistory = _Dummy
|
|
|
|
|
styles.Style = _Dummy
|
|
|
|
|
patch_stdout.patch_stdout = lambda *args, **kwargs: nullcontext()
|
|
|
|
|
application.Application = _Dummy
|
|
|
|
|
layout.Layout = _Dummy
|
|
|
|
|
layout.HSplit = _Dummy
|
|
|
|
|
layout.Window = _Dummy
|
|
|
|
|
layout.FormattedTextControl = _Dummy
|
|
|
|
|
layout.ConditionalContainer = _Dummy
|
|
|
|
|
processors.Processor = _Dummy
|
|
|
|
|
processors.Transformation = _Dummy
|
|
|
|
|
processors.PasswordProcessor = _Dummy
|
|
|
|
|
processors.ConditionalProcessor = _Dummy
|
|
|
|
|
filters.Condition = _Condition
|
|
|
|
|
dimension.Dimension = _Dummy
|
|
|
|
|
menus.CompletionsMenu = _Dummy
|
|
|
|
|
widgets.TextArea = _Dummy
|
|
|
|
|
key_binding.KeyBindings = _Dummy
|
|
|
|
|
completion.Completer = _Dummy
|
|
|
|
|
completion.Completion = _Dummy
|
|
|
|
|
formatted_text.ANSI = _ANSI
|
|
|
|
|
root.print_formatted_text = lambda *args, **kwargs: None
|
|
|
|
|
|
|
|
|
|
sys.modules.setdefault("prompt_toolkit", root)
|
|
|
|
|
sys.modules.setdefault("prompt_toolkit.history", history)
|
|
|
|
|
sys.modules.setdefault("prompt_toolkit.styles", styles)
|
|
|
|
|
sys.modules.setdefault("prompt_toolkit.patch_stdout", patch_stdout)
|
|
|
|
|
sys.modules.setdefault("prompt_toolkit.application", application)
|
|
|
|
|
sys.modules.setdefault("prompt_toolkit.layout", layout)
|
|
|
|
|
sys.modules.setdefault("prompt_toolkit.layout.processors", processors)
|
|
|
|
|
sys.modules.setdefault("prompt_toolkit.filters", filters)
|
|
|
|
|
sys.modules.setdefault("prompt_toolkit.layout.dimension", dimension)
|
|
|
|
|
sys.modules.setdefault("prompt_toolkit.layout.menus", menus)
|
|
|
|
|
sys.modules.setdefault("prompt_toolkit.widgets", widgets)
|
|
|
|
|
sys.modules.setdefault("prompt_toolkit.key_binding", key_binding)
|
|
|
|
|
sys.modules.setdefault("prompt_toolkit.completion", completion)
|
|
|
|
|
sys.modules.setdefault("prompt_toolkit.formatted_text", formatted_text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _import_cli():
|
2026-03-26 15:27:27 -07:00
|
|
|
for name in list(sys.modules):
|
|
|
|
|
if name == "cli" or name == "run_agent" or name == "tools" or name.startswith("tools."):
|
|
|
|
|
sys.modules.pop(name, None)
|
|
|
|
|
|
|
|
|
|
if "firecrawl" not in sys.modules:
|
|
|
|
|
sys.modules["firecrawl"] = types.SimpleNamespace(Firecrawl=object)
|
|
|
|
|
|
2026-02-25 18:20:38 -08:00
|
|
|
try:
|
|
|
|
|
importlib.import_module("prompt_toolkit")
|
|
|
|
|
except ModuleNotFoundError:
|
|
|
|
|
_install_prompt_toolkit_stubs()
|
|
|
|
|
return importlib.import_module("cli")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_hermes_cli_init_does_not_eagerly_resolve_runtime_provider(monkeypatch):
|
|
|
|
|
cli = _import_cli()
|
|
|
|
|
calls = {"count": 0}
|
|
|
|
|
|
|
|
|
|
def _unexpected_runtime_resolve(**kwargs):
|
|
|
|
|
calls["count"] += 1
|
|
|
|
|
raise AssertionError("resolve_runtime_provider should not be called in HermesCLI.__init__")
|
|
|
|
|
|
|
|
|
|
monkeypatch.setattr("hermes_cli.runtime_provider.resolve_runtime_provider", _unexpected_runtime_resolve)
|
|
|
|
|
monkeypatch.setattr("hermes_cli.runtime_provider.format_runtime_provider_error", lambda exc: str(exc))
|
|
|
|
|
|
|
|
|
|
shell = cli.HermesCLI(model="gpt-5", compact=True, max_turns=1)
|
|
|
|
|
|
|
|
|
|
assert shell is not None
|
|
|
|
|
assert calls["count"] == 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_runtime_resolution_failure_is_not_sticky(monkeypatch):
|
|
|
|
|
cli = _import_cli()
|
|
|
|
|
calls = {"count": 0}
|
|
|
|
|
|
|
|
|
|
def _runtime_resolve(**kwargs):
|
|
|
|
|
calls["count"] += 1
|
|
|
|
|
if calls["count"] == 1:
|
|
|
|
|
raise RuntimeError("temporary auth failure")
|
|
|
|
|
return {
|
|
|
|
|
"provider": "openrouter",
|
|
|
|
|
"api_mode": "chat_completions",
|
|
|
|
|
"base_url": "https://openrouter.ai/api/v1",
|
|
|
|
|
"api_key": "test-key",
|
|
|
|
|
"source": "env/config",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
class _DummyAgent:
|
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
|
self.kwargs = kwargs
|
|
|
|
|
|
|
|
|
|
monkeypatch.setattr("hermes_cli.runtime_provider.resolve_runtime_provider", _runtime_resolve)
|
|
|
|
|
monkeypatch.setattr("hermes_cli.runtime_provider.format_runtime_provider_error", lambda exc: str(exc))
|
|
|
|
|
monkeypatch.setattr(cli, "AIAgent", _DummyAgent)
|
|
|
|
|
|
|
|
|
|
shell = cli.HermesCLI(model="gpt-5", compact=True, max_turns=1)
|
|
|
|
|
|
|
|
|
|
assert shell._init_agent() is False
|
|
|
|
|
assert shell._init_agent() is True
|
|
|
|
|
assert calls["count"] == 2
|
|
|
|
|
assert shell.agent is not None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_runtime_resolution_rebuilds_agent_on_routing_change(monkeypatch):
|
|
|
|
|
cli = _import_cli()
|
|
|
|
|
|
|
|
|
|
def _runtime_resolve(**kwargs):
|
|
|
|
|
return {
|
|
|
|
|
"provider": "openai-codex",
|
|
|
|
|
"api_mode": "codex_responses",
|
|
|
|
|
"base_url": "https://same-endpoint.example/v1",
|
|
|
|
|
"api_key": "same-key",
|
|
|
|
|
"source": "env/config",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
monkeypatch.setattr("hermes_cli.runtime_provider.resolve_runtime_provider", _runtime_resolve)
|
|
|
|
|
monkeypatch.setattr("hermes_cli.runtime_provider.format_runtime_provider_error", lambda exc: str(exc))
|
|
|
|
|
|
|
|
|
|
shell = cli.HermesCLI(model="gpt-5", compact=True, max_turns=1)
|
|
|
|
|
shell.provider = "openrouter"
|
|
|
|
|
shell.api_mode = "chat_completions"
|
|
|
|
|
shell.base_url = "https://same-endpoint.example/v1"
|
|
|
|
|
shell.api_key = "same-key"
|
|
|
|
|
shell.agent = object()
|
|
|
|
|
|
|
|
|
|
assert shell._ensure_runtime_credentials() is True
|
|
|
|
|
assert shell.agent is None
|
|
|
|
|
assert shell.provider == "openai-codex"
|
|
|
|
|
assert shell.api_mode == "codex_responses"
|
|
|
|
|
|
|
|
|
|
|
fix: hermes update causes dual gateways on macOS (launchd) (#1567)
* feat: add optional smart model routing
Add a conservative cheap-vs-strong routing option that can send very short/simple turns to a cheaper model across providers while keeping the primary model for complex work. Wire it through CLI, gateway, and cron, and document the config.yaml workflow.
* fix(gateway): remove recursive ExecStop from systemd units, extend TimeoutStopSec to 60s
* fix(gateway): avoid recursive ExecStop in user systemd unit
* fix: extend ExecStop removal and TimeoutStopSec=60 to system unit
The cherry-picked PR #1448 fix only covered the user systemd unit.
The system unit had the same TimeoutStopSec=15 and could benefit
from the same 60s timeout for clean shutdown. Also adds a regression
test for the system unit.
---------
Co-authored-by: Ninja <ninja@local>
* feat(skills): add blender-mcp optional skill for 3D modeling
Control a running Blender instance from Hermes via socket connection
to the blender-mcp addon (port 9876). Supports creating 3D objects,
materials, animations, and running arbitrary bpy code.
Placed in optional-skills/ since it requires Blender 4.3+ desktop
with a third-party addon manually started each session.
* feat(acp): support slash commands in ACP adapter (#1532)
Adds /help, /model, /tools, /context, /reset, /compact, /version
to the ACP adapter (VS Code, Zed, JetBrains). Commands are handled
directly in the server without instantiating the TUI — each command
queries agent/session state and returns plain text.
Unrecognized /commands fall through to the LLM as normal messages.
/model uses detect_provider_for_model() for auto-detection when
switching models, matching the CLI and gateway behavior.
Fixes #1402
* fix(logging): improve error logging in session search tool (#1533)
* fix(gateway): restart on retryable startup failures (#1517)
* feat(email): add skip_attachments option via config.yaml
* feat(email): add skip_attachments option via config.yaml
Adds a config.yaml-driven option to skip email attachments in the
gateway email adapter. Useful for malware protection and bandwidth
savings.
Configure in config.yaml:
platforms:
email:
skip_attachments: true
Based on PR #1521 by @an420eth, changed from env var to config.yaml
(via PlatformConfig.extra) to match the project's config-first pattern.
* docs: document skip_attachments option for email adapter
* fix(telegram): retry on transient TLS failures during connect and send
Add exponential-backoff retry (3 attempts) around initialize() to
handle transient TLS resets during gateway startup. Also catches
TimedOut and OSError in addition to NetworkError.
Add exponential-backoff retry (3 attempts) around send_message() for
NetworkError during message delivery, wrapping the existing Markdown
fallback logic.
Both imports are guarded with try/except ImportError for test
environments where telegram is mocked.
Based on PR #1527 by cmd8. Closes #1526.
* feat: permissive block_anchor thresholds and unicode normalization (#1539)
Salvaged from PR #1528 by an420eth. Closes #517.
Improves _strategy_block_anchor in fuzzy_match.py:
- Add unicode normalization (smart quotes, em/en-dashes, ellipsis,
non-breaking spaces → ASCII) so LLM-produced unicode artifacts
don't break anchor line matching
- Lower thresholds: 0.10 for unique matches (was 0.70), 0.30 for
multiple candidates — if first/last lines match exactly, the
block is almost certainly correct
- Use original (non-normalized) content for offset calculation to
preserve correct character positions
Tested: 3 new scenarios fixed (em-dash anchors, non-breaking space
anchors, very-low-similarity unique matches), zero regressions on
all 9 existing fuzzy match tests.
Co-authored-by: an420eth <an420eth@users.noreply.github.com>
* feat(cli): add file path autocomplete in the input prompt (#1545)
When typing a path-like token (./ ../ ~/ / or containing /),
the CLI now shows filesystem completions in the dropdown menu.
Directories show a trailing slash and 'dir' label; files show
their size. Completions are case-insensitive and capped at 30
entries.
Triggered by tokens like:
edit ./src/ma → shows ./src/main.py, ./src/manifest.json, ...
check ~/doc → shows ~/docs/, ~/documents/, ...
read /etc/hos → shows /etc/hosts, /etc/hostname, ...
open tools/reg → shows tools/registry.py
Slash command autocomplete (/help, /model, etc.) is unaffected —
it still triggers when the input starts with /.
Inspired by OpenCode PR #145 (file path completion menu).
Implementation:
- hermes_cli/commands.py: _extract_path_word() detects path-like
tokens, _path_completions() yields filesystem Completions with
size labels, get_completions() routes to paths vs slash commands
- tests/hermes_cli/test_path_completion.py: 26 tests covering
path extraction, prefix filtering, directory markers, home
expansion, case-insensitivity, integration with slash commands
* feat(privacy): redact PII from LLM context when privacy.redact_pii is enabled
Add privacy.redact_pii config option (boolean, default false). When
enabled, the gateway redacts personally identifiable information from
the system prompt before sending it to the LLM provider:
- Phone numbers (user IDs on WhatsApp/Signal) → hashed to user_<sha256>
- User IDs → hashed to user_<sha256>
- Chat IDs → numeric portion hashed, platform prefix preserved
- Home channel IDs → hashed
- Names/usernames → NOT affected (user-chosen, publicly visible)
Hashes are deterministic (same user → same hash) so the model can
still distinguish users in group chats. Routing and delivery use
the original values internally — redaction only affects LLM context.
Inspired by OpenClaw PR #47959.
* fix(privacy): skip PII redaction on Discord/Slack (mentions need real IDs)
Discord uses <@user_id> for mentions and Slack uses <@U12345> — the LLM
needs the real ID to tag users. Redaction now only applies to WhatsApp,
Signal, and Telegram where IDs are pure routing metadata.
Add 4 platform-specific tests covering Discord, WhatsApp, Signal, Slack.
* feat: smart approvals + /stop command (inspired by OpenAI Codex)
* feat: smart approvals — LLM-based risk assessment for dangerous commands
Adds a 'smart' approval mode that uses the auxiliary LLM to assess
whether a flagged command is genuinely dangerous or a false positive,
auto-approving low-risk commands without prompting the user.
Inspired by OpenAI Codex's Smart Approvals guardian subagent
(openai/codex#13860).
Config (config.yaml):
approvals:
mode: manual # manual (default), smart, off
Modes:
- manual — current behavior, always prompt the user
- smart — aux LLM evaluates risk: APPROVE (auto-allow), DENY (block),
or ESCALATE (fall through to manual prompt)
- off — skip all approval prompts (equivalent to --yolo)
When smart mode auto-approves, the pattern gets session-level approval
so subsequent uses of the same pattern don't trigger another LLM call.
When it denies, the command is blocked without user prompt. When
uncertain, it escalates to the normal manual approval flow.
The LLM prompt is carefully scoped: it sees only the command text and
the flagged reason, assesses actual risk vs false positive, and returns
a single-word verdict.
* feat: make smart approval model configurable via config.yaml
Adds auxiliary.approval section to config.yaml with the same
provider/model/base_url/api_key pattern as other aux tasks (vision,
web_extract, compression, etc.).
Config:
auxiliary:
approval:
provider: auto
model: '' # fast/cheap model recommended
base_url: ''
api_key: ''
Bridged to env vars in both CLI and gateway paths so the aux client
picks them up automatically.
* feat: add /stop command to kill all background processes
Adds a /stop slash command that kills all running background processes
at once. Currently users have to process(list) then process(kill) for
each one individually.
Inspired by OpenAI Codex's separation of interrupt (Ctrl+C stops current
turn) from /stop (cleans up background processes). See openai/codex#14602.
Ctrl+C continues to only interrupt the active agent turn — background
dev servers, watchers, etc. are preserved. /stop is the explicit way
to clean them all up.
* feat: first-class plugin architecture + hide status bar cost by default (#1544)
The persistent status bar now shows context %, token counts, and
duration but NOT $ cost by default. Cost display is opt-in via:
display:
show_cost: true
in config.yaml, or: hermes config set display.show_cost true
The /usage command still shows full cost breakdown since the user
explicitly asked for it — this only affects the always-visible bar.
Status bar without cost:
⚕ claude-sonnet-4 │ 12K/200K │ 6% │ 15m
Status bar with show_cost: true:
⚕ claude-sonnet-4 │ 12K/200K │ 6% │ $0.06 │ 15m
* feat: improve memory prioritization + aggressive skill updates (inspired by OpenAI Codex)
* feat: improve memory prioritization — user preferences over procedural knowledge
Inspired by OpenAI Codex's memory prompt improvements (openai/codex#14493)
which focus memory writes on user preferences and recurring patterns
rather than procedural task details.
Key insight: 'Optimize for reducing future user steering — the most
valuable memory prevents the user from having to repeat themselves.'
Changes:
- MEMORY_GUIDANCE (prompt_builder.py): added prioritization hierarchy
and the core principle about reducing user steering
- MEMORY_SCHEMA (memory_tool.py): reordered WHEN TO SAVE list to put
corrections first, added explicit PRIORITY guidance
- Memory nudge (run_agent.py): now asks specifically about preferences,
corrections, and workflow patterns instead of generic 'anything'
- Memory flush (run_agent.py): now instructs to prioritize user
preferences and corrections over task-specific details
* feat: more aggressive skill creation and update prompting
Press harder on skill updates — the agent should proactively patch
skills when it encounters issues during use, not wait to be asked.
Changes:
- SKILLS_GUIDANCE: 'consider saving' → 'save'; added explicit instruction
to patch skills immediately when found outdated/wrong
- Skills header: added instruction to update loaded skills before finishing
if they had missing steps or wrong commands
- Skill nudge: more assertive ('save the approach' not 'consider saving'),
now also prompts for updating existing skills used in the task
- Skill nudge interval: lowered default from 15 to 10 iterations
- skill_manage schema: added 'patch it immediately' to update triggers
* feat: first-class plugin architecture (#1555)
Plugin system for extending Hermes with custom tools, hooks, and
integrations — no source code changes required.
Core system (hermes_cli/plugins.py):
- Plugin discovery from ~/.hermes/plugins/, .hermes/plugins/, and
pip entry_points (hermes_agent.plugins group)
- PluginContext with register_tool() and register_hook()
- 6 lifecycle hooks: pre/post tool_call, pre/post llm_call,
on_session_start/end
- Namespace package handling for relative imports in plugins
- Graceful error isolation — broken plugins never crash the agent
Integration (model_tools.py):
- Plugin discovery runs after built-in + MCP tools
- Plugin tools bypass toolset filter via get_plugin_tool_names()
- Pre/post tool call hooks fire in handle_function_call()
CLI:
- /plugins command shows loaded plugins, tool counts, status
- Added to COMMANDS dict for autocomplete
Docs:
- Getting started guide (build-a-hermes-plugin.md) — full tutorial
building a calculator plugin step by step
- Reference page (features/plugins.md) — quick overview + tables
- Covers: file structure, schemas, handlers, hooks, data files,
bundled skills, env var gating, pip distribution, common mistakes
Tests: 16 tests covering discovery, loading, hooks, tool visibility.
* fix: hermes update causes dual gateways on macOS (launchd)
Three bugs worked together to create the dual-gateway problem:
1. cmd_update only checked systemd for gateway restart, completely
ignoring launchd on macOS. After killing the PID it would print
'Restart it with: hermes gateway run' even when launchd was about
to auto-respawn the process.
2. launchd's KeepAlive.SuccessfulExit=false respawns the gateway
after SIGTERM (non-zero exit), so the user's manual restart
created a second instance.
3. The launchd plist lacked --replace (systemd had it), so the
respawned gateway didn't kill stale instances on startup.
Fixes:
- Add --replace to launchd ProgramArguments (matches systemd)
- Add launchd detection to cmd_update's auto-restart logic
- Print 'auto-restart via launchd' instead of manual restart hint
* fix: add launchd plist auto-refresh + explicit restart in cmd_update
Two integration issues with the initial fix:
1. Existing macOS users with old plist (no --replace) would never
get the fix until manual uninstall/reinstall. Added
refresh_launchd_plist_if_needed() — mirrors the existing
refresh_systemd_unit_if_needed(). Called from launchd_start(),
launchd_restart(), and cmd_update.
2. cmd_update relied on KeepAlive respawn after SIGTERM rather than
explicit launchctl stop/start. This caused races: launchd would
respawn the old process before the PID file was cleaned up.
Now does explicit stop+start (matching how systemd gets an
explicit systemctl restart), with plist refresh first so the
new --replace flag is picked up.
---------
Co-authored-by: Ninja <ninja@local>
Co-authored-by: alireza78a <alireza78a@users.noreply.github.com>
Co-authored-by: Oktay Aydin <113846926+aydnOktay@users.noreply.github.com>
Co-authored-by: JP Lew <polydegen@protonmail.com>
Co-authored-by: an420eth <an420eth@users.noreply.github.com>
2026-03-16 12:36:29 -07:00
|
|
|
def test_cli_turn_routing_uses_primary_when_disabled(monkeypatch):
|
|
|
|
|
cli = _import_cli()
|
|
|
|
|
shell = cli.HermesCLI(model="gpt-5", compact=True, max_turns=1)
|
|
|
|
|
shell.provider = "openrouter"
|
|
|
|
|
shell.api_mode = "chat_completions"
|
|
|
|
|
shell.base_url = "https://openrouter.ai/api/v1"
|
|
|
|
|
shell.api_key = "sk-primary"
|
|
|
|
|
shell._smart_model_routing = {"enabled": False}
|
|
|
|
|
|
|
|
|
|
result = shell._resolve_turn_agent_config("what time is it in tokyo?")
|
|
|
|
|
|
|
|
|
|
assert result["model"] == "gpt-5"
|
|
|
|
|
assert result["runtime"]["provider"] == "openrouter"
|
|
|
|
|
assert result["label"] is None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cli_turn_routing_uses_cheap_model_when_simple(monkeypatch):
|
|
|
|
|
cli = _import_cli()
|
|
|
|
|
|
|
|
|
|
def _runtime_resolve(**kwargs):
|
|
|
|
|
assert kwargs["requested"] == "zai"
|
|
|
|
|
return {
|
|
|
|
|
"provider": "zai",
|
|
|
|
|
"api_mode": "chat_completions",
|
|
|
|
|
"base_url": "https://open.z.ai/api/v1",
|
|
|
|
|
"api_key": "cheap-key",
|
|
|
|
|
"source": "env/config",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
monkeypatch.setattr("hermes_cli.runtime_provider.resolve_runtime_provider", _runtime_resolve)
|
|
|
|
|
|
|
|
|
|
shell = cli.HermesCLI(model="anthropic/claude-sonnet-4", compact=True, max_turns=1)
|
|
|
|
|
shell.provider = "openrouter"
|
|
|
|
|
shell.api_mode = "chat_completions"
|
|
|
|
|
shell.base_url = "https://openrouter.ai/api/v1"
|
|
|
|
|
shell.api_key = "primary-key"
|
|
|
|
|
shell._smart_model_routing = {
|
|
|
|
|
"enabled": True,
|
|
|
|
|
"cheap_model": {"provider": "zai", "model": "glm-5-air"},
|
|
|
|
|
"max_simple_chars": 160,
|
|
|
|
|
"max_simple_words": 28,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
result = shell._resolve_turn_agent_config("what time is it in tokyo?")
|
|
|
|
|
|
|
|
|
|
assert result["model"] == "glm-5-air"
|
|
|
|
|
assert result["runtime"]["provider"] == "zai"
|
|
|
|
|
assert result["runtime"]["api_key"] == "cheap-key"
|
|
|
|
|
assert result["label"] is not None
|
|
|
|
|
|
|
|
|
|
|
2026-03-13 23:59:12 -07:00
|
|
|
def test_cli_prefers_config_provider_over_stale_env_override(monkeypatch):
|
|
|
|
|
cli = _import_cli()
|
|
|
|
|
|
|
|
|
|
monkeypatch.setenv("HERMES_INFERENCE_PROVIDER", "openrouter")
|
|
|
|
|
config_copy = dict(cli.CLI_CONFIG)
|
|
|
|
|
model_copy = dict(config_copy.get("model", {}))
|
|
|
|
|
model_copy["provider"] = "custom"
|
|
|
|
|
model_copy["base_url"] = "https://api.fireworks.ai/inference/v1"
|
|
|
|
|
config_copy["model"] = model_copy
|
|
|
|
|
monkeypatch.setattr(cli, "CLI_CONFIG", config_copy)
|
|
|
|
|
|
|
|
|
|
shell = cli.HermesCLI(model="fireworks/minimax-m2p5", compact=True, max_turns=1)
|
|
|
|
|
|
|
|
|
|
assert shell.requested_provider == "custom"
|
|
|
|
|
|
|
|
|
|
|
2026-03-08 16:48:56 -07:00
|
|
|
def test_codex_provider_replaces_incompatible_default_model(monkeypatch):
|
|
|
|
|
"""When provider resolves to openai-codex and no model was explicitly
|
|
|
|
|
chosen, the global config default (e.g. anthropic/claude-opus-4.6) must
|
|
|
|
|
be replaced with a Codex-compatible model. Fixes #651."""
|
|
|
|
|
cli = _import_cli()
|
|
|
|
|
|
|
|
|
|
monkeypatch.delenv("LLM_MODEL", raising=False)
|
|
|
|
|
monkeypatch.delenv("OPENAI_MODEL", raising=False)
|
2026-03-12 15:19:31 +03:00
|
|
|
# Ensure local user config does not leak a model into the test
|
2026-03-12 15:31:00 +03:00
|
|
|
monkeypatch.setitem(cli.CLI_CONFIG, "model", {
|
|
|
|
|
"default": "",
|
|
|
|
|
"base_url": "https://openrouter.ai/api/v1",
|
|
|
|
|
})
|
2026-03-08 16:48:56 -07:00
|
|
|
|
|
|
|
|
def _runtime_resolve(**kwargs):
|
|
|
|
|
return {
|
|
|
|
|
"provider": "openai-codex",
|
|
|
|
|
"api_mode": "codex_responses",
|
|
|
|
|
"base_url": "https://chatgpt.com/backend-api/codex",
|
|
|
|
|
"api_key": "test-key",
|
|
|
|
|
"source": "env/config",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
monkeypatch.setattr("hermes_cli.runtime_provider.resolve_runtime_provider", _runtime_resolve)
|
|
|
|
|
monkeypatch.setattr("hermes_cli.runtime_provider.format_runtime_provider_error", lambda exc: str(exc))
|
|
|
|
|
monkeypatch.setattr(
|
|
|
|
|
"hermes_cli.codex_models.get_codex_model_ids",
|
|
|
|
|
lambda access_token=None: ["gpt-5.2-codex", "gpt-5.1-codex-mini"],
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
shell = cli.HermesCLI(compact=True, max_turns=1)
|
|
|
|
|
|
|
|
|
|
assert shell._model_is_default is True
|
|
|
|
|
assert shell._ensure_runtime_credentials() is True
|
|
|
|
|
assert shell.provider == "openai-codex"
|
|
|
|
|
assert "anthropic" not in shell.model
|
|
|
|
|
assert "claude" not in shell.model
|
|
|
|
|
assert shell.model == "gpt-5.2-codex"
|
|
|
|
|
|
|
|
|
|
|
2026-03-26 15:27:27 -07:00
|
|
|
def test_model_flow_nous_prints_subscription_guidance_without_mutating_explicit_tts(monkeypatch, capsys):
|
2026-03-30 13:28:10 +09:00
|
|
|
monkeypatch.setenv("HERMES_ENABLE_NOUS_MANAGED_TOOLS", "1")
|
2026-03-26 15:27:27 -07:00
|
|
|
config = {
|
|
|
|
|
"model": {"provider": "nous", "default": "claude-opus-4-6"},
|
|
|
|
|
"tts": {"provider": "elevenlabs"},
|
|
|
|
|
"browser": {"cloud_provider": "browser-use"},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
monkeypatch.setattr(
|
|
|
|
|
"hermes_cli.auth.get_provider_auth_state",
|
|
|
|
|
lambda provider: {"access_token": "nous-token"},
|
|
|
|
|
)
|
|
|
|
|
monkeypatch.setattr(
|
|
|
|
|
"hermes_cli.auth.resolve_nous_runtime_credentials",
|
|
|
|
|
lambda *args, **kwargs: {
|
|
|
|
|
"base_url": "https://inference.example.com/v1",
|
|
|
|
|
"api_key": "nous-key",
|
|
|
|
|
},
|
|
|
|
|
)
|
|
|
|
|
monkeypatch.setattr(
|
|
|
|
|
"hermes_cli.auth.fetch_nous_models",
|
|
|
|
|
lambda *args, **kwargs: ["claude-opus-4-6"],
|
|
|
|
|
)
|
2026-04-07 17:19:07 -07:00
|
|
|
monkeypatch.setattr("hermes_cli.auth._prompt_model_selection", lambda model_ids, current_model="", pricing=None, **kw: "claude-opus-4-6")
|
2026-03-26 15:27:27 -07:00
|
|
|
monkeypatch.setattr("hermes_cli.auth._save_model_choice", lambda model: None)
|
|
|
|
|
monkeypatch.setattr("hermes_cli.auth._update_config_for_provider", lambda provider, url: None)
|
|
|
|
|
monkeypatch.setattr(
|
|
|
|
|
"hermes_cli.nous_subscription.get_nous_subscription_explainer_lines",
|
|
|
|
|
lambda: ["Nous subscription enables managed web tools."],
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
hermes_main._model_flow_nous(config, current_model="claude-opus-4-6")
|
|
|
|
|
|
|
|
|
|
out = capsys.readouterr().out
|
|
|
|
|
assert "Nous subscription enables managed web tools." in out
|
|
|
|
|
assert config["tts"]["provider"] == "elevenlabs"
|
|
|
|
|
assert config["browser"]["cloud_provider"] == "browser-use"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_model_flow_nous_applies_managed_tts_default_when_unconfigured(monkeypatch, capsys):
|
2026-03-30 13:28:10 +09:00
|
|
|
monkeypatch.setenv("HERMES_ENABLE_NOUS_MANAGED_TOOLS", "1")
|
2026-03-26 15:27:27 -07:00
|
|
|
config = {
|
|
|
|
|
"model": {"provider": "nous", "default": "claude-opus-4-6"},
|
|
|
|
|
"tts": {"provider": "edge"},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
monkeypatch.setattr(
|
|
|
|
|
"hermes_cli.auth.get_provider_auth_state",
|
|
|
|
|
lambda provider: {"access_token": "nous-token"},
|
|
|
|
|
)
|
|
|
|
|
monkeypatch.setattr(
|
|
|
|
|
"hermes_cli.auth.resolve_nous_runtime_credentials",
|
|
|
|
|
lambda *args, **kwargs: {
|
|
|
|
|
"base_url": "https://inference.example.com/v1",
|
|
|
|
|
"api_key": "nous-key",
|
|
|
|
|
},
|
|
|
|
|
)
|
|
|
|
|
monkeypatch.setattr(
|
|
|
|
|
"hermes_cli.auth.fetch_nous_models",
|
|
|
|
|
lambda *args, **kwargs: ["claude-opus-4-6"],
|
|
|
|
|
)
|
2026-04-07 17:19:07 -07:00
|
|
|
monkeypatch.setattr("hermes_cli.auth._prompt_model_selection", lambda model_ids, current_model="", pricing=None, **kw: "claude-opus-4-6")
|
2026-03-26 15:27:27 -07:00
|
|
|
monkeypatch.setattr("hermes_cli.auth._save_model_choice", lambda model: None)
|
|
|
|
|
monkeypatch.setattr("hermes_cli.auth._update_config_for_provider", lambda provider, url: None)
|
|
|
|
|
monkeypatch.setattr(
|
|
|
|
|
"hermes_cli.nous_subscription.get_nous_subscription_explainer_lines",
|
|
|
|
|
lambda: ["Nous subscription enables managed web tools."],
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
hermes_main._model_flow_nous(config, current_model="claude-opus-4-6")
|
|
|
|
|
|
|
|
|
|
out = capsys.readouterr().out
|
|
|
|
|
assert "Nous subscription enables managed web tools." in out
|
|
|
|
|
assert "OpenAI TTS via your Nous subscription" in out
|
|
|
|
|
assert config["tts"]["provider"] == "openai"
|
|
|
|
|
|
|
|
|
|
|
2026-03-11 22:04:42 -07:00
|
|
|
def test_codex_provider_uses_config_model(monkeypatch):
|
|
|
|
|
"""Model comes from config.yaml, not LLM_MODEL env var.
|
|
|
|
|
Config.yaml is the single source of truth to avoid multi-agent conflicts."""
|
2026-03-08 16:48:56 -07:00
|
|
|
cli = _import_cli()
|
|
|
|
|
|
2026-03-11 22:04:42 -07:00
|
|
|
# LLM_MODEL env var should be IGNORED (even if set)
|
|
|
|
|
monkeypatch.setenv("LLM_MODEL", "should-be-ignored")
|
2026-03-08 16:48:56 -07:00
|
|
|
monkeypatch.delenv("OPENAI_MODEL", raising=False)
|
|
|
|
|
|
2026-03-11 22:04:42 -07:00
|
|
|
# Set model via config
|
|
|
|
|
monkeypatch.setitem(cli.CLI_CONFIG, "model", {
|
|
|
|
|
"default": "gpt-5.2-codex",
|
|
|
|
|
"provider": "openai-codex",
|
|
|
|
|
"base_url": "https://chatgpt.com/backend-api/codex",
|
|
|
|
|
})
|
|
|
|
|
|
2026-03-08 16:48:56 -07:00
|
|
|
def _runtime_resolve(**kwargs):
|
|
|
|
|
return {
|
|
|
|
|
"provider": "openai-codex",
|
|
|
|
|
"api_mode": "codex_responses",
|
|
|
|
|
"base_url": "https://chatgpt.com/backend-api/codex",
|
2026-03-11 22:04:42 -07:00
|
|
|
"api_key": "fake-codex-token",
|
2026-03-08 16:48:56 -07:00
|
|
|
"source": "env/config",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
monkeypatch.setattr("hermes_cli.runtime_provider.resolve_runtime_provider", _runtime_resolve)
|
|
|
|
|
monkeypatch.setattr("hermes_cli.runtime_provider.format_runtime_provider_error", lambda exc: str(exc))
|
2026-03-12 15:19:31 +03:00
|
|
|
# Prevent live API call from overriding the config model
|
|
|
|
|
monkeypatch.setattr(
|
|
|
|
|
"hermes_cli.codex_models.get_codex_model_ids",
|
|
|
|
|
lambda access_token=None: ["gpt-5.2-codex"],
|
|
|
|
|
)
|
2026-03-08 16:48:56 -07:00
|
|
|
|
|
|
|
|
shell = cli.HermesCLI(compact=True, max_turns=1)
|
|
|
|
|
|
|
|
|
|
assert shell._ensure_runtime_credentials() is True
|
|
|
|
|
assert shell.provider == "openai-codex"
|
2026-03-11 22:04:42 -07:00
|
|
|
# Model from config (may be normalized by codex provider logic)
|
|
|
|
|
assert "codex" in shell.model.lower()
|
|
|
|
|
# LLM_MODEL env var is NOT used
|
|
|
|
|
assert shell.model != "should-be-ignored"
|
2026-03-08 16:48:56 -07:00
|
|
|
|
|
|
|
|
|
2026-03-18 02:50:31 -07:00
|
|
|
def test_codex_config_model_not_replaced_by_normalization(monkeypatch):
|
|
|
|
|
"""When the user sets model.default in config.yaml to a specific codex
|
|
|
|
|
model, _normalize_model_for_provider must NOT replace it with the latest
|
|
|
|
|
available model from the API. Regression test for #1887."""
|
|
|
|
|
cli = _import_cli()
|
|
|
|
|
|
|
|
|
|
monkeypatch.delenv("LLM_MODEL", raising=False)
|
|
|
|
|
monkeypatch.delenv("OPENAI_MODEL", raising=False)
|
|
|
|
|
|
|
|
|
|
# User explicitly configured gpt-5.3-codex in config.yaml
|
|
|
|
|
monkeypatch.setitem(cli.CLI_CONFIG, "model", {
|
|
|
|
|
"default": "gpt-5.3-codex",
|
|
|
|
|
"provider": "openai-codex",
|
|
|
|
|
"base_url": "https://chatgpt.com/backend-api/codex",
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
def _runtime_resolve(**kwargs):
|
|
|
|
|
return {
|
|
|
|
|
"provider": "openai-codex",
|
|
|
|
|
"api_mode": "codex_responses",
|
|
|
|
|
"base_url": "https://chatgpt.com/backend-api/codex",
|
|
|
|
|
"api_key": "fake-key",
|
|
|
|
|
"source": "env/config",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
monkeypatch.setattr("hermes_cli.runtime_provider.resolve_runtime_provider", _runtime_resolve)
|
|
|
|
|
monkeypatch.setattr("hermes_cli.runtime_provider.format_runtime_provider_error", lambda exc: str(exc))
|
|
|
|
|
# API returns a DIFFERENT model than what the user configured
|
|
|
|
|
monkeypatch.setattr(
|
|
|
|
|
"hermes_cli.codex_models.get_codex_model_ids",
|
|
|
|
|
lambda access_token=None: ["gpt-5.4", "gpt-5.3-codex"],
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
shell = cli.HermesCLI(compact=True, max_turns=1)
|
|
|
|
|
|
|
|
|
|
# Config model is NOT the global default — user made a deliberate choice
|
|
|
|
|
assert shell._model_is_default is False
|
|
|
|
|
assert shell._ensure_runtime_credentials() is True
|
|
|
|
|
assert shell.provider == "openai-codex"
|
|
|
|
|
# Model must stay as user configured, not replaced by gpt-5.4
|
|
|
|
|
assert shell.model == "gpt-5.3-codex"
|
|
|
|
|
|
|
|
|
|
|
2026-03-08 16:48:56 -07:00
|
|
|
def test_codex_provider_preserves_explicit_codex_model(monkeypatch):
|
|
|
|
|
"""If the user explicitly passes a Codex-compatible model, it must be
|
|
|
|
|
preserved even when the provider resolves to openai-codex."""
|
|
|
|
|
cli = _import_cli()
|
|
|
|
|
|
|
|
|
|
monkeypatch.delenv("LLM_MODEL", raising=False)
|
|
|
|
|
monkeypatch.delenv("OPENAI_MODEL", raising=False)
|
|
|
|
|
|
|
|
|
|
def _runtime_resolve(**kwargs):
|
|
|
|
|
return {
|
|
|
|
|
"provider": "openai-codex",
|
|
|
|
|
"api_mode": "codex_responses",
|
|
|
|
|
"base_url": "https://chatgpt.com/backend-api/codex",
|
|
|
|
|
"api_key": "test-key",
|
|
|
|
|
"source": "env/config",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
monkeypatch.setattr("hermes_cli.runtime_provider.resolve_runtime_provider", _runtime_resolve)
|
|
|
|
|
monkeypatch.setattr("hermes_cli.runtime_provider.format_runtime_provider_error", lambda exc: str(exc))
|
|
|
|
|
|
|
|
|
|
shell = cli.HermesCLI(model="gpt-5.1-codex-mini", compact=True, max_turns=1)
|
|
|
|
|
|
|
|
|
|
assert shell._model_is_default is False
|
|
|
|
|
assert shell._ensure_runtime_credentials() is True
|
|
|
|
|
assert shell.model == "gpt-5.1-codex-mini"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_codex_provider_strips_provider_prefix_from_model(monkeypatch):
|
|
|
|
|
"""openai/gpt-5.3-codex should become gpt-5.3-codex — the Codex
|
|
|
|
|
Responses API does not accept provider-prefixed model slugs."""
|
|
|
|
|
cli = _import_cli()
|
|
|
|
|
|
|
|
|
|
monkeypatch.delenv("LLM_MODEL", raising=False)
|
|
|
|
|
monkeypatch.delenv("OPENAI_MODEL", raising=False)
|
|
|
|
|
|
|
|
|
|
def _runtime_resolve(**kwargs):
|
|
|
|
|
return {
|
|
|
|
|
"provider": "openai-codex",
|
|
|
|
|
"api_mode": "codex_responses",
|
|
|
|
|
"base_url": "https://chatgpt.com/backend-api/codex",
|
|
|
|
|
"api_key": "test-key",
|
|
|
|
|
"source": "env/config",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
monkeypatch.setattr("hermes_cli.runtime_provider.resolve_runtime_provider", _runtime_resolve)
|
|
|
|
|
monkeypatch.setattr("hermes_cli.runtime_provider.format_runtime_provider_error", lambda exc: str(exc))
|
|
|
|
|
|
|
|
|
|
shell = cli.HermesCLI(model="openai/gpt-5.3-codex", compact=True, max_turns=1)
|
|
|
|
|
|
|
|
|
|
assert shell._ensure_runtime_credentials() is True
|
|
|
|
|
assert shell.model == "gpt-5.3-codex"
|
|
|
|
|
|
|
|
|
|
|
2026-02-25 18:20:38 -08:00
|
|
|
def test_cmd_model_falls_back_to_auto_on_invalid_provider(monkeypatch, capsys):
|
|
|
|
|
monkeypatch.setattr(
|
|
|
|
|
"hermes_cli.config.load_config",
|
|
|
|
|
lambda: {"model": {"default": "gpt-5", "provider": "invalid-provider"}},
|
|
|
|
|
)
|
|
|
|
|
monkeypatch.setattr("hermes_cli.config.save_config", lambda cfg: None)
|
|
|
|
|
monkeypatch.setattr("hermes_cli.config.get_env_value", lambda key: "")
|
|
|
|
|
monkeypatch.setattr("hermes_cli.config.save_env_value", lambda key, value: None)
|
|
|
|
|
|
|
|
|
|
def _resolve_provider(requested, **kwargs):
|
|
|
|
|
if requested == "invalid-provider":
|
|
|
|
|
raise AuthError("Unknown provider 'invalid-provider'.", code="invalid_provider")
|
|
|
|
|
return "openrouter"
|
|
|
|
|
|
|
|
|
|
monkeypatch.setattr("hermes_cli.auth.resolve_provider", _resolve_provider)
|
fix: repair 57 failing CI tests across 14 files (#5823)
* fix: repair 57 failing CI tests across 14 files
Categories of fixes:
**Test isolation under xdist (-n auto):**
- test_hermes_logging: Strip ALL RotatingFileHandlers before each test
to prevent handlers leaked from other xdist workers from polluting counts
- test_code_execution: Force TERMINAL_ENV=local in setUp — prevents Modal
AuthError when another test leaks TERMINAL_ENV=modal
- test_timezone: Same TERMINAL_ENV fix for execute_code timezone tests
- test_codex_execution_paths: Mock _resolve_turn_agent_config to ensure
model resolution works regardless of xdist worker state
**Matrix adapter tests (nio not installed in CI):**
- Add _make_fake_nio() helper with real response classes for isinstance()
checks in production code
- Replace MagicMock(spec=nio.XxxResponse) with fake_nio instances
- Wrap production method calls with patch.dict('sys.modules', {'nio': ...})
so import nio succeeds in method bodies
- Use try/except instead of pytest.importorskip for nio.crypto imports
(importorskip can be fooled by MagicMock in sys.modules)
- test_matrix_voice: Skip entire file if nio is a mock, not just missing
**Stale test expectations:**
- test_cli_provider_resolution: _prompt_provider_choice now takes **kwargs
(default param added); mock getpass.getpass alongside input
- test_anthropic_oauth_flow: Mock getpass.getpass (code switched from input)
- test_gemini_provider: Mock models.dev + OpenRouter API lookups to test
hardcoded defaults without external API variance
- test_code_execution: Add notify_on_complete to blocked terminal params
- test_setup_openclaw_migration: Mock prompt_choice to select 'Full setup'
(new quick-setup path leads to _require_tty → sys.exit in CI)
- test_skill_manager_tool: Patch get_all_skills_dirs alongside SKILLS_DIR
so _find_skill searches tmp_path, not real ~/.hermes/skills/
**Missing attributes in object.__new__ test runners:**
- test_platform_reconnect: Add session_store to _make_runner()
- test_session_race_guard: Add hooks, _running_agents_ts, session_store,
delivery_router to _make_runner()
**Production bug fix (gateway/run.py):**
- Fix sentinel eviction race: _AGENT_PENDING_SENTINEL was immediately
evicted by the stale-detection logic because sentinels have no
get_activity_summary() method, causing _stale_idle=inf >= timeout.
Guard _should_evict with 'is not _AGENT_PENDING_SENTINEL'.
* fix: address remaining CI failures
- test_setup_openclaw_migration: Also mock _offer_launch_chat (called at
end of both quick and full setup paths)
- test_code_execution: Move TERMINAL_ENV=local to module level to protect
ALL test classes (TestEnvVarFiltering, TestExecuteCodeEdgeCases,
TestInterruptHandling, TestHeadTailTruncation) from xdist env leaks
- test_matrix: Use try/except for nio.crypto imports (importorskip can be
fooled by MagicMock in sys.modules under xdist)
2026-04-07 09:58:45 -07:00
|
|
|
monkeypatch.setattr(hermes_main, "_prompt_provider_choice", lambda choices, **kwargs: len(choices) - 1)
|
2026-03-30 20:36:56 -07:00
|
|
|
monkeypatch.setattr("sys.stdin", type("FakeTTY", (), {"isatty": lambda self: True})())
|
2026-02-25 18:20:38 -08:00
|
|
|
|
|
|
|
|
hermes_main.cmd_model(SimpleNamespace())
|
|
|
|
|
output = capsys.readouterr().out
|
|
|
|
|
|
|
|
|
|
assert "Warning:" in output
|
|
|
|
|
assert "falling back to auto provider detection" in output.lower()
|
2026-03-15 20:09:50 -07:00
|
|
|
assert "No change." in output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_model_flow_custom_saves_verified_v1_base_url(monkeypatch, capsys):
|
|
|
|
|
monkeypatch.setattr(
|
|
|
|
|
"hermes_cli.config.get_env_value",
|
|
|
|
|
lambda key: "" if key in {"OPENAI_BASE_URL", "OPENAI_API_KEY"} else "",
|
|
|
|
|
)
|
|
|
|
|
saved_env = {}
|
|
|
|
|
monkeypatch.setattr("hermes_cli.config.save_env_value", lambda key, value: saved_env.__setitem__(key, value))
|
|
|
|
|
monkeypatch.setattr("hermes_cli.auth._save_model_choice", lambda model: saved_env.__setitem__("MODEL", model))
|
|
|
|
|
monkeypatch.setattr("hermes_cli.auth.deactivate_provider", lambda: None)
|
|
|
|
|
monkeypatch.setattr("hermes_cli.main._save_custom_provider", lambda *args, **kwargs: None)
|
|
|
|
|
monkeypatch.setattr(
|
|
|
|
|
"hermes_cli.models.probe_api_models",
|
|
|
|
|
lambda api_key, base_url: {
|
|
|
|
|
"models": ["llm"],
|
|
|
|
|
"probed_url": "http://localhost:8000/v1/models",
|
|
|
|
|
"resolved_base_url": "http://localhost:8000/v1",
|
|
|
|
|
"suggested_base_url": "http://localhost:8000/v1",
|
|
|
|
|
"used_fallback": True,
|
|
|
|
|
},
|
|
|
|
|
)
|
|
|
|
|
monkeypatch.setattr(
|
|
|
|
|
"hermes_cli.config.load_config",
|
|
|
|
|
lambda: {"model": {"default": "", "provider": "custom", "base_url": ""}},
|
|
|
|
|
)
|
|
|
|
|
monkeypatch.setattr("hermes_cli.config.save_config", lambda cfg: None)
|
|
|
|
|
|
2026-03-31 03:29:00 -07:00
|
|
|
# After the probe detects a single model ("llm"), the flow asks
|
|
|
|
|
# "Use this model? [Y/n]:" — confirm with Enter, then context length.
|
|
|
|
|
answers = iter(["http://localhost:8000", "local-key", "", ""])
|
2026-03-15 20:09:50 -07:00
|
|
|
monkeypatch.setattr("builtins.input", lambda _prompt="": next(answers))
|
fix: repair 57 failing CI tests across 14 files (#5823)
* fix: repair 57 failing CI tests across 14 files
Categories of fixes:
**Test isolation under xdist (-n auto):**
- test_hermes_logging: Strip ALL RotatingFileHandlers before each test
to prevent handlers leaked from other xdist workers from polluting counts
- test_code_execution: Force TERMINAL_ENV=local in setUp — prevents Modal
AuthError when another test leaks TERMINAL_ENV=modal
- test_timezone: Same TERMINAL_ENV fix for execute_code timezone tests
- test_codex_execution_paths: Mock _resolve_turn_agent_config to ensure
model resolution works regardless of xdist worker state
**Matrix adapter tests (nio not installed in CI):**
- Add _make_fake_nio() helper with real response classes for isinstance()
checks in production code
- Replace MagicMock(spec=nio.XxxResponse) with fake_nio instances
- Wrap production method calls with patch.dict('sys.modules', {'nio': ...})
so import nio succeeds in method bodies
- Use try/except instead of pytest.importorskip for nio.crypto imports
(importorskip can be fooled by MagicMock in sys.modules)
- test_matrix_voice: Skip entire file if nio is a mock, not just missing
**Stale test expectations:**
- test_cli_provider_resolution: _prompt_provider_choice now takes **kwargs
(default param added); mock getpass.getpass alongside input
- test_anthropic_oauth_flow: Mock getpass.getpass (code switched from input)
- test_gemini_provider: Mock models.dev + OpenRouter API lookups to test
hardcoded defaults without external API variance
- test_code_execution: Add notify_on_complete to blocked terminal params
- test_setup_openclaw_migration: Mock prompt_choice to select 'Full setup'
(new quick-setup path leads to _require_tty → sys.exit in CI)
- test_skill_manager_tool: Patch get_all_skills_dirs alongside SKILLS_DIR
so _find_skill searches tmp_path, not real ~/.hermes/skills/
**Missing attributes in object.__new__ test runners:**
- test_platform_reconnect: Add session_store to _make_runner()
- test_session_race_guard: Add hooks, _running_agents_ts, session_store,
delivery_router to _make_runner()
**Production bug fix (gateway/run.py):**
- Fix sentinel eviction race: _AGENT_PENDING_SENTINEL was immediately
evicted by the stale-detection logic because sentinels have no
get_activity_summary() method, causing _stale_idle=inf >= timeout.
Guard _should_evict with 'is not _AGENT_PENDING_SENTINEL'.
* fix: address remaining CI failures
- test_setup_openclaw_migration: Also mock _offer_launch_chat (called at
end of both quick and full setup paths)
- test_code_execution: Move TERMINAL_ENV=local to module level to protect
ALL test classes (TestEnvVarFiltering, TestExecuteCodeEdgeCases,
TestInterruptHandling, TestHeadTailTruncation) from xdist env leaks
- test_matrix: Use try/except for nio.crypto imports (importorskip can be
fooled by MagicMock in sys.modules under xdist)
2026-04-07 09:58:45 -07:00
|
|
|
monkeypatch.setattr("getpass.getpass", lambda _prompt="": next(answers))
|
2026-03-15 20:09:50 -07:00
|
|
|
|
|
|
|
|
hermes_main._model_flow_custom({})
|
|
|
|
|
output = capsys.readouterr().out
|
|
|
|
|
|
|
|
|
|
assert "Saving the working base URL instead" in output
|
2026-03-31 03:29:00 -07:00
|
|
|
assert "Detected model: llm" in output
|
refactor: make config.yaml the single source of truth for endpoint URLs (#4165)
OPENAI_BASE_URL was written to .env AND config.yaml, creating a dual-source
confusion. Users (especially Docker) would see the URL in .env and assume
that's where all config lives, then wonder why LLM_MODEL in .env didn't work.
Changes:
- Remove all 27 save_env_value("OPENAI_BASE_URL", ...) calls across main.py,
setup.py, and tools_config.py
- Remove OPENAI_BASE_URL env var reading from runtime_provider.py, cli.py,
models.py, and gateway/run.py
- Remove LLM_MODEL/HERMES_MODEL env var reading from gateway/run.py and
auxiliary_client.py — config.yaml model.default is authoritative
- Vision base URL now saved to config.yaml auxiliary.vision.base_url
(both setup wizard and tools_config paths)
- Tests updated to set config values instead of env vars
Convention enforced: .env is for SECRETS only (API keys). All other
configuration (model names, base URLs, provider selection) lives
exclusively in config.yaml.
2026-03-30 22:02:53 -07:00
|
|
|
# OPENAI_BASE_URL is no longer saved to .env — config.yaml is authoritative
|
|
|
|
|
assert "OPENAI_BASE_URL" not in saved_env
|
2026-03-26 15:27:27 -07:00
|
|
|
assert saved_env["MODEL"] == "llm"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_cmd_model_forwards_nous_login_tls_options(monkeypatch):
|
2026-04-02 00:50:40 +00:00
|
|
|
monkeypatch.setattr(hermes_main, "_require_tty", lambda *a: None)
|
2026-03-26 15:27:27 -07:00
|
|
|
monkeypatch.setattr(
|
|
|
|
|
"hermes_cli.config.load_config",
|
|
|
|
|
lambda: {"model": {"default": "gpt-5", "provider": "nous"}},
|
|
|
|
|
)
|
|
|
|
|
monkeypatch.setattr("hermes_cli.config.save_config", lambda cfg: None)
|
|
|
|
|
monkeypatch.setattr("hermes_cli.config.get_env_value", lambda key: "")
|
|
|
|
|
monkeypatch.setattr("hermes_cli.config.save_env_value", lambda key, value: None)
|
|
|
|
|
monkeypatch.setattr("hermes_cli.auth.resolve_provider", lambda requested, **kwargs: "nous")
|
|
|
|
|
monkeypatch.setattr("hermes_cli.auth.get_provider_auth_state", lambda provider_id: None)
|
fix: repair 57 failing CI tests across 14 files (#5823)
* fix: repair 57 failing CI tests across 14 files
Categories of fixes:
**Test isolation under xdist (-n auto):**
- test_hermes_logging: Strip ALL RotatingFileHandlers before each test
to prevent handlers leaked from other xdist workers from polluting counts
- test_code_execution: Force TERMINAL_ENV=local in setUp — prevents Modal
AuthError when another test leaks TERMINAL_ENV=modal
- test_timezone: Same TERMINAL_ENV fix for execute_code timezone tests
- test_codex_execution_paths: Mock _resolve_turn_agent_config to ensure
model resolution works regardless of xdist worker state
**Matrix adapter tests (nio not installed in CI):**
- Add _make_fake_nio() helper with real response classes for isinstance()
checks in production code
- Replace MagicMock(spec=nio.XxxResponse) with fake_nio instances
- Wrap production method calls with patch.dict('sys.modules', {'nio': ...})
so import nio succeeds in method bodies
- Use try/except instead of pytest.importorskip for nio.crypto imports
(importorskip can be fooled by MagicMock in sys.modules)
- test_matrix_voice: Skip entire file if nio is a mock, not just missing
**Stale test expectations:**
- test_cli_provider_resolution: _prompt_provider_choice now takes **kwargs
(default param added); mock getpass.getpass alongside input
- test_anthropic_oauth_flow: Mock getpass.getpass (code switched from input)
- test_gemini_provider: Mock models.dev + OpenRouter API lookups to test
hardcoded defaults without external API variance
- test_code_execution: Add notify_on_complete to blocked terminal params
- test_setup_openclaw_migration: Mock prompt_choice to select 'Full setup'
(new quick-setup path leads to _require_tty → sys.exit in CI)
- test_skill_manager_tool: Patch get_all_skills_dirs alongside SKILLS_DIR
so _find_skill searches tmp_path, not real ~/.hermes/skills/
**Missing attributes in object.__new__ test runners:**
- test_platform_reconnect: Add session_store to _make_runner()
- test_session_race_guard: Add hooks, _running_agents_ts, session_store,
delivery_router to _make_runner()
**Production bug fix (gateway/run.py):**
- Fix sentinel eviction race: _AGENT_PENDING_SENTINEL was immediately
evicted by the stale-detection logic because sentinels have no
get_activity_summary() method, causing _stale_idle=inf >= timeout.
Guard _should_evict with 'is not _AGENT_PENDING_SENTINEL'.
* fix: address remaining CI failures
- test_setup_openclaw_migration: Also mock _offer_launch_chat (called at
end of both quick and full setup paths)
- test_code_execution: Move TERMINAL_ENV=local to module level to protect
ALL test classes (TestEnvVarFiltering, TestExecuteCodeEdgeCases,
TestInterruptHandling, TestHeadTailTruncation) from xdist env leaks
- test_matrix: Use try/except for nio.crypto imports (importorskip can be
fooled by MagicMock in sys.modules under xdist)
2026-04-07 09:58:45 -07:00
|
|
|
monkeypatch.setattr(hermes_main, "_prompt_provider_choice", lambda choices, **kwargs: 0)
|
2026-03-26 15:27:27 -07:00
|
|
|
|
|
|
|
|
captured = {}
|
|
|
|
|
|
|
|
|
|
def _fake_login(login_args, provider_config):
|
|
|
|
|
captured["portal_url"] = login_args.portal_url
|
|
|
|
|
captured["inference_url"] = login_args.inference_url
|
|
|
|
|
captured["client_id"] = login_args.client_id
|
|
|
|
|
captured["scope"] = login_args.scope
|
|
|
|
|
captured["no_browser"] = login_args.no_browser
|
|
|
|
|
captured["timeout"] = login_args.timeout
|
|
|
|
|
captured["ca_bundle"] = login_args.ca_bundle
|
|
|
|
|
captured["insecure"] = login_args.insecure
|
|
|
|
|
|
|
|
|
|
monkeypatch.setattr("hermes_cli.auth._login_nous", _fake_login)
|
|
|
|
|
|
|
|
|
|
hermes_main.cmd_model(
|
|
|
|
|
SimpleNamespace(
|
|
|
|
|
portal_url="https://portal.nousresearch.com",
|
|
|
|
|
inference_url="https://inference.nousresearch.com/v1",
|
|
|
|
|
client_id="hermes-local",
|
|
|
|
|
scope="openid profile",
|
|
|
|
|
no_browser=True,
|
|
|
|
|
timeout=7.5,
|
|
|
|
|
ca_bundle="/tmp/local-ca.pem",
|
|
|
|
|
insecure=True,
|
|
|
|
|
)
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
assert captured == {
|
|
|
|
|
"portal_url": "https://portal.nousresearch.com",
|
|
|
|
|
"inference_url": "https://inference.nousresearch.com/v1",
|
|
|
|
|
"client_id": "hermes-local",
|
|
|
|
|
"scope": "openid profile",
|
|
|
|
|
"no_browser": True,
|
|
|
|
|
"timeout": 7.5,
|
|
|
|
|
"ca_bundle": "/tmp/local-ca.pem",
|
|
|
|
|
"insecure": True,
|
|
|
|
|
}
|