fix: custom endpoint no longer leaks OPENROUTER_API_KEY (#560)
API key selection is now base_url-aware: when the resolved base_url targets OpenRouter, OPENROUTER_API_KEY takes priority (preserving the #289 fix). When hitting any other endpoint (Z.ai, vLLM, custom, etc.), OPENAI_API_KEY takes priority so the OpenRouter key doesn't leak. Applied in both the runtime provider resolver (the real code path) and the CLI initial default (for consistency). Fixes #560.
This commit is contained in:
8
cli.py
8
cli.py
@@ -870,7 +870,13 @@ class HermesCLI:
|
||||
or os.getenv("OPENAI_BASE_URL")
|
||||
or os.getenv("OPENROUTER_BASE_URL", CLI_CONFIG["model"]["base_url"])
|
||||
)
|
||||
self.api_key = api_key or os.getenv("OPENROUTER_API_KEY") or os.getenv("OPENAI_API_KEY")
|
||||
# Match key to resolved base_url: OpenRouter URL → prefer OPENROUTER_API_KEY,
|
||||
# custom endpoint → prefer OPENAI_API_KEY (issue #560).
|
||||
# Note: _ensure_runtime_credentials() re-resolves this before first use.
|
||||
if "openrouter.ai" in self.base_url:
|
||||
self.api_key = api_key or os.getenv("OPENROUTER_API_KEY") or os.getenv("OPENAI_API_KEY")
|
||||
else:
|
||||
self.api_key = api_key or os.getenv("OPENAI_API_KEY") or os.getenv("OPENROUTER_API_KEY")
|
||||
self._nous_key_expires_at: Optional[str] = None
|
||||
self._nous_key_source: Optional[str] = None
|
||||
# Max turns priority: CLI arg > config file > env var > default
|
||||
|
||||
@@ -72,12 +72,25 @@ def _resolve_openrouter_runtime(
|
||||
or OPENROUTER_BASE_URL
|
||||
).rstrip("/")
|
||||
|
||||
api_key = (
|
||||
explicit_api_key
|
||||
or os.getenv("OPENROUTER_API_KEY")
|
||||
or os.getenv("OPENAI_API_KEY")
|
||||
or ""
|
||||
)
|
||||
# Choose API key based on whether the resolved base_url targets OpenRouter.
|
||||
# When hitting OpenRouter, prefer OPENROUTER_API_KEY (issue #289).
|
||||
# When hitting a custom endpoint, prefer OPENAI_API_KEY so the OpenRouter
|
||||
# key doesn't leak to an unrelated provider (issue #560).
|
||||
_is_openrouter_url = "openrouter.ai" in base_url
|
||||
if _is_openrouter_url:
|
||||
api_key = (
|
||||
explicit_api_key
|
||||
or os.getenv("OPENROUTER_API_KEY")
|
||||
or os.getenv("OPENAI_API_KEY")
|
||||
or ""
|
||||
)
|
||||
else:
|
||||
api_key = (
|
||||
explicit_api_key
|
||||
or os.getenv("OPENAI_API_KEY")
|
||||
or os.getenv("OPENROUTER_API_KEY")
|
||||
or ""
|
||||
)
|
||||
|
||||
source = "explicit" if (explicit_api_key or explicit_base_url) else "env/config"
|
||||
|
||||
|
||||
@@ -121,6 +121,43 @@ def test_openai_key_used_when_no_openrouter_key(monkeypatch):
|
||||
assert resolved["api_key"] == "sk-openai-fallback"
|
||||
|
||||
|
||||
def test_custom_endpoint_prefers_openai_key(monkeypatch):
|
||||
"""Custom endpoint should use OPENAI_API_KEY, not OPENROUTER_API_KEY.
|
||||
|
||||
Regression test for #560: when base_url is a non-OpenRouter endpoint,
|
||||
OPENROUTER_API_KEY was being sent as the auth header instead of OPENAI_API_KEY.
|
||||
"""
|
||||
monkeypatch.setattr(rp, "resolve_provider", lambda *a, **k: "openrouter")
|
||||
monkeypatch.setattr(rp, "_get_model_config", lambda: {})
|
||||
monkeypatch.setenv("OPENAI_BASE_URL", "https://api.z.ai/api/coding/paas/v4")
|
||||
monkeypatch.delenv("OPENROUTER_BASE_URL", raising=False)
|
||||
monkeypatch.setenv("OPENAI_API_KEY", "sk-zai-correct-key")
|
||||
monkeypatch.setenv("OPENROUTER_API_KEY", "sk-or-wrong-key-for-zai")
|
||||
|
||||
resolved = rp.resolve_runtime_provider(requested="custom")
|
||||
|
||||
assert resolved["base_url"] == "https://api.z.ai/api/coding/paas/v4"
|
||||
assert resolved["api_key"] == "sk-zai-correct-key"
|
||||
|
||||
|
||||
def test_custom_endpoint_auto_provider_prefers_openai_key(monkeypatch):
|
||||
"""Auto provider with non-OpenRouter base_url should prefer OPENAI_API_KEY.
|
||||
|
||||
Same as #560 but via 'hermes model' flow which sets provider to 'auto'.
|
||||
"""
|
||||
monkeypatch.setattr(rp, "resolve_provider", lambda *a, **k: "openrouter")
|
||||
monkeypatch.setattr(rp, "_get_model_config", lambda: {})
|
||||
monkeypatch.setenv("OPENAI_BASE_URL", "https://my-vllm-server.example.com/v1")
|
||||
monkeypatch.delenv("OPENROUTER_BASE_URL", raising=False)
|
||||
monkeypatch.setenv("OPENAI_API_KEY", "sk-vllm-key")
|
||||
monkeypatch.setenv("OPENROUTER_API_KEY", "sk-or-should-not-leak")
|
||||
|
||||
resolved = rp.resolve_runtime_provider(requested="auto")
|
||||
|
||||
assert resolved["base_url"] == "https://my-vllm-server.example.com/v1"
|
||||
assert resolved["api_key"] == "sk-vllm-key"
|
||||
|
||||
|
||||
def test_resolve_requested_provider_precedence(monkeypatch):
|
||||
monkeypatch.setenv("HERMES_INFERENCE_PROVIDER", "nous")
|
||||
monkeypatch.setattr(rp, "_get_model_config", lambda: {"provider": "openai-codex"})
|
||||
|
||||
Reference in New Issue
Block a user