From f8e4233e67916e7524e9757df312bf46a4d57164 Mon Sep 17 00:00:00 2001 From: 0xbyt4 <35742124+0xbyt4@users.noreply.github.com> Date: Thu, 12 Mar 2026 15:19:31 +0300 Subject: [PATCH 1/2] fix(test): isolate codex provider tests from local env leaking API keys --- tests/test_cli_provider_resolution.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/test_cli_provider_resolution.py b/tests/test_cli_provider_resolution.py index 8c22dd7ac..a88307777 100644 --- a/tests/test_cli_provider_resolution.py +++ b/tests/test_cli_provider_resolution.py @@ -186,6 +186,8 @@ def test_codex_provider_replaces_incompatible_default_model(monkeypatch): monkeypatch.delenv("LLM_MODEL", raising=False) monkeypatch.delenv("OPENAI_MODEL", raising=False) + # Ensure local user config does not leak a model into the test + monkeypatch.setitem(cli.CLI_CONFIG, "model", {}) def _runtime_resolve(**kwargs): return { @@ -240,6 +242,11 @@ def test_codex_provider_uses_config_model(monkeypatch): monkeypatch.setattr("hermes_cli.runtime_provider.resolve_runtime_provider", _runtime_resolve) monkeypatch.setattr("hermes_cli.runtime_provider.format_runtime_provider_error", lambda exc: str(exc)) + # Prevent live API call from overriding the config model + monkeypatch.setattr( + "hermes_cli.codex_models.get_codex_model_ids", + lambda access_token=None: ["gpt-5.2-codex"], + ) shell = cli.HermesCLI(compact=True, max_turns=1) From 7f485f588e10a202001f07e2fcc5fa4db88b4d0b Mon Sep 17 00:00:00 2001 From: 0xbyt4 <35742124+0xbyt4@users.noreply.github.com> Date: Thu, 12 Mar 2026 15:31:00 +0300 Subject: [PATCH 2/2] fix(test): provide required model config keys to prevent KeyError on base_url --- tests/test_cli_provider_resolution.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/test_cli_provider_resolution.py b/tests/test_cli_provider_resolution.py index a88307777..ffc5752ff 100644 --- a/tests/test_cli_provider_resolution.py +++ b/tests/test_cli_provider_resolution.py @@ -187,7 +187,10 @@ def test_codex_provider_replaces_incompatible_default_model(monkeypatch): monkeypatch.delenv("LLM_MODEL", raising=False) monkeypatch.delenv("OPENAI_MODEL", raising=False) # Ensure local user config does not leak a model into the test - monkeypatch.setitem(cli.CLI_CONFIG, "model", {}) + monkeypatch.setitem(cli.CLI_CONFIG, "model", { + "default": "", + "base_url": "https://openrouter.ai/api/v1", + }) def _runtime_resolve(**kwargs): return {