* fix: use session_key instead of chat_id for adapter interrupt lookups monitor_for_interrupt() in _run_agent was using source.chat_id to query the adapter's has_pending_interrupt() and get_pending_message() methods. But the adapter stores interrupt events under build_session_key(source), which produces a different string (e.g. 'agent:main:telegram:dm' vs '123456'). This key mismatch meant the interrupt was never detected through the adapter path, which is the only active interrupt path for all adapter-based platforms (Telegram, Discord, Slack, etc.). The gateway-level interrupt path (in dispatch_message) is unreachable because the adapter intercepts the 2nd message in handle_message() before it reaches dispatch_message(). Result: sending a new message while subagents were running had no effect — the interrupt was silently lost. Fix: replace all source.chat_id references in the interrupt-related code within _run_agent() with the session_key parameter, which matches the adapter's storage keys. Also adds regression tests verifying session_key vs chat_id consistency. * debug: add file-based logging to CLI interrupt path Temporary instrumentation to diagnose why message-based interrupts don't seem to work during subagent execution. Logs to ~/.hermes/interrupt_debug.log (immune to redirect_stdout). Two log points: 1. When Enter handler puts message into _interrupt_queue 2. When chat() reads it and calls agent.interrupt() This will reveal whether the message reaches the queue and whether the interrupt is actually fired. * fix: accept unlisted models with warning instead of rejecting validate_requested_model() previously hard-rejected any model not found in the provider's API listing. This was too aggressive — users on higher plan tiers (e.g. Z.AI Pro/Max) may have access to models not shown in the public listing (like glm-5 on coding endpoints). Changes: - validate_requested_model: accept unlisted models with a warning note instead of blocking. The model is saved to config and used immediately. - Z.AI setup: always offer glm-5 in the model list regardless of whether a coding endpoint was detected. Pro/Max plans support it. - Z.AI setup detection message: softened from 'GLM-5 is not available' to 'GLM-5 may still be available depending on your plan tier'
128 lines
5.2 KiB
Python
128 lines
5.2 KiB
Python
"""Regression tests for the `/model` slash command in the interactive CLI."""
|
|
|
|
from unittest.mock import patch, MagicMock
|
|
|
|
from cli import HermesCLI
|
|
|
|
|
|
class TestModelCommand:
|
|
def _make_cli(self):
|
|
cli_obj = HermesCLI.__new__(HermesCLI)
|
|
cli_obj.model = "anthropic/claude-opus-4.6"
|
|
cli_obj.agent = object()
|
|
cli_obj.provider = "openrouter"
|
|
cli_obj.requested_provider = "openrouter"
|
|
cli_obj.base_url = "https://openrouter.ai/api/v1"
|
|
cli_obj.api_key = "test-key"
|
|
cli_obj._explicit_api_key = None
|
|
cli_obj._explicit_base_url = None
|
|
return cli_obj
|
|
|
|
def test_valid_model_from_api_saved_to_config(self, capsys):
|
|
cli_obj = self._make_cli()
|
|
|
|
with patch("hermes_cli.models.fetch_api_models",
|
|
return_value=["anthropic/claude-sonnet-4.5", "openai/gpt-5.4"]), \
|
|
patch("cli.save_config_value", return_value=True) as save_mock:
|
|
cli_obj.process_command("/model anthropic/claude-sonnet-4.5")
|
|
|
|
output = capsys.readouterr().out
|
|
assert "saved to config" in output
|
|
assert cli_obj.model == "anthropic/claude-sonnet-4.5"
|
|
save_mock.assert_called_once_with("model.default", "anthropic/claude-sonnet-4.5")
|
|
|
|
def test_unlisted_model_accepted_with_warning(self, capsys):
|
|
cli_obj = self._make_cli()
|
|
|
|
with patch("hermes_cli.models.fetch_api_models",
|
|
return_value=["anthropic/claude-opus-4.6"]), \
|
|
patch("cli.save_config_value") as save_mock:
|
|
cli_obj.process_command("/model anthropic/fake-model")
|
|
|
|
output = capsys.readouterr().out
|
|
assert "not found" in output or "Model changed" in output
|
|
assert cli_obj.model == "anthropic/fake-model" # accepted
|
|
|
|
def test_api_unreachable_accepts_and_persists(self, capsys):
|
|
cli_obj = self._make_cli()
|
|
|
|
with patch("hermes_cli.models.fetch_api_models", return_value=None), \
|
|
patch("cli.save_config_value") as save_mock:
|
|
cli_obj.process_command("/model anthropic/claude-sonnet-next")
|
|
|
|
output = capsys.readouterr().out
|
|
assert "saved to config" in output
|
|
assert cli_obj.model == "anthropic/claude-sonnet-next"
|
|
save_mock.assert_called_once()
|
|
|
|
def test_no_slash_model_accepted_with_warning(self, capsys):
|
|
cli_obj = self._make_cli()
|
|
|
|
with patch("hermes_cli.models.fetch_api_models",
|
|
return_value=["openai/gpt-5.4"]) as fetch_mock, \
|
|
patch("cli.save_config_value") as save_mock:
|
|
cli_obj.process_command("/model gpt-5.4")
|
|
|
|
output = capsys.readouterr().out
|
|
# Model is accepted (with warning) even if not in API listing
|
|
assert cli_obj.model == "gpt-5.4"
|
|
|
|
def test_validation_crash_falls_back_to_save(self, capsys):
|
|
cli_obj = self._make_cli()
|
|
|
|
with patch("hermes_cli.models.validate_requested_model",
|
|
side_effect=RuntimeError("boom")), \
|
|
patch("cli.save_config_value", return_value=True) as save_mock:
|
|
cli_obj.process_command("/model anthropic/claude-sonnet-4.5")
|
|
|
|
output = capsys.readouterr().out
|
|
assert "saved to config" in output
|
|
assert cli_obj.model == "anthropic/claude-sonnet-4.5"
|
|
save_mock.assert_called_once()
|
|
|
|
def test_show_model_when_no_argument(self, capsys):
|
|
cli_obj = self._make_cli()
|
|
cli_obj.process_command("/model")
|
|
|
|
output = capsys.readouterr().out
|
|
assert "anthropic/claude-opus-4.6" in output
|
|
assert "OpenRouter" in output
|
|
assert "Authenticated providers" in output or "Switch model" in output
|
|
assert "provider" in output and "model" in output
|
|
|
|
# -- provider switching tests -------------------------------------------
|
|
|
|
def test_provider_colon_model_switches_provider(self, capsys):
|
|
cli_obj = self._make_cli()
|
|
|
|
with patch("hermes_cli.runtime_provider.resolve_runtime_provider", return_value={
|
|
"provider": "zai",
|
|
"api_key": "zai-key",
|
|
"base_url": "https://api.z.ai/api/paas/v4",
|
|
}), \
|
|
patch("hermes_cli.models.fetch_api_models",
|
|
return_value=["glm-5", "glm-4.7"]), \
|
|
patch("cli.save_config_value", return_value=True) as save_mock:
|
|
cli_obj.process_command("/model zai:glm-5")
|
|
|
|
output = capsys.readouterr().out
|
|
assert "glm-5" in output
|
|
assert "provider:" in output.lower() or "Z.AI" in output
|
|
assert cli_obj.model == "glm-5"
|
|
assert cli_obj.provider == "zai"
|
|
assert cli_obj.base_url == "https://api.z.ai/api/paas/v4"
|
|
# Both model and provider should be saved
|
|
assert save_mock.call_count == 2
|
|
|
|
def test_provider_switch_fails_on_bad_credentials(self, capsys):
|
|
cli_obj = self._make_cli()
|
|
|
|
with patch("hermes_cli.runtime_provider.resolve_runtime_provider",
|
|
side_effect=Exception("No API key found")):
|
|
cli_obj.process_command("/model nous:hermes-3")
|
|
|
|
output = capsys.readouterr().out
|
|
assert "Could not resolve credentials" in output
|
|
assert cli_obj.model == "anthropic/claude-opus-4.6" # unchanged
|
|
assert cli_obj.provider == "openrouter" # unchanged
|