From ee066b7be6c2cf646711fdd6845f30ea00cb1910 Mon Sep 17 00:00:00 2001 From: Teknium <127238744+teknium1@users.noreply.github.com> Date: Sat, 28 Mar 2026 14:47:41 -0700 Subject: [PATCH] fix: use placeholder api_key for custom providers without credentials (#3604) Local/custom OpenAI-compatible providers (Ollama, LM Studio, vLLM) that don't require auth were hitting empty api_key rejections from the OpenAI SDK, especially when used as smart model routing targets. Uses the same 'no-key-required' placeholder already used in _resolve_openrouter_runtime() for the identical scenario. Salvaged from PR #3543. Co-authored-by: scottlowry --- hermes_cli/runtime_provider.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hermes_cli/runtime_provider.py b/hermes_cli/runtime_provider.py index 5d8edc10..046e7d6d 100644 --- a/hermes_cli/runtime_provider.py +++ b/hermes_cli/runtime_provider.py @@ -203,7 +203,7 @@ def _resolve_named_custom_runtime( or _detect_api_mode_for_url(base_url) or "chat_completions", "base_url": base_url, - "api_key": api_key, + "api_key": api_key or "no-key-required", "source": f"custom_provider:{custom_provider.get('name', requested_provider)}", }