refactor: unify setup wizard provider selection with hermes model

setup_model_provider() had 800+ lines of duplicated provider handling
that reimplemented the same credential prompting, OAuth flows, and model
selection that hermes model already provides via the _model_flow_*
functions.  Every new provider had to be added in both places, and the
two implementations diverged in config persistence (setup.py did raw
YAML writes, _set_model_provider, and _update_config_for_provider
depending on the provider — main.py used its own load/save cycle).

This caused the #4172 bug: _model_flow_custom saved config to disk but
the wizard's final save_config(config) overwrote it with stale values.

Fix: extract the core of cmd_model() into select_provider_and_model()
and have setup_model_provider() call it.  After the call, re-sync the
wizard's config dict from disk.  Deletes ~800 lines of duplicated
provider handling from setup.py.

Also fixes cmd_model() double-AuthError crash on fresh installs with
no API keys configured.
This commit is contained in:
Teknium
2026-03-31 01:04:07 -07:00
committed by GitHub
parent 89d8127772
commit 491e79bca9
4 changed files with 283 additions and 1418 deletions

View File

@@ -1,6 +1,8 @@
"""Tests for setup_model_provider — verifies the delegation to
select_provider_and_model() and config dict sync."""
import json
from hermes_cli.auth import _update_config_for_provider, get_active_provider
from hermes_cli.auth import get_active_provider
from hermes_cli.config import load_config, save_config
from hermes_cli.setup import setup_model_provider
@@ -23,270 +25,198 @@ def _clear_provider_env(monkeypatch):
monkeypatch.delenv(key, raising=False)
def _stub_tts(monkeypatch):
"""Stub out TTS prompts so setup_model_provider doesn't block."""
monkeypatch.setattr("hermes_cli.setup.prompt_choice", lambda q, c, d=0: (
_maybe_keep_current_tts(q, c) if _maybe_keep_current_tts(q, c) is not None
else d
))
monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *a, **kw: False)
def test_nous_oauth_setup_keeps_current_model_when_syncing_disk_provider(
tmp_path, monkeypatch
):
def _write_model_config(tmp_path, provider, base_url="", model_name="test-model"):
"""Simulate what a _model_flow_* function writes to disk."""
cfg = load_config()
m = cfg.get("model")
if not isinstance(m, dict):
m = {"default": m} if m else {}
cfg["model"] = m
m["provider"] = provider
if base_url:
m["base_url"] = base_url
if model_name:
m["default"] = model_name
save_config(cfg)
def test_setup_delegates_to_select_provider_and_model(tmp_path, monkeypatch):
"""setup_model_provider calls select_provider_and_model and syncs config."""
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
_clear_provider_env(monkeypatch)
_stub_tts(monkeypatch)
config = load_config()
def fake_prompt_choice(question, choices, default=0):
if question == "Select your inference provider:":
return 1 # Nous Portal
if question == "Configure vision:":
return len(choices) - 1
if question == "Select default model:":
assert choices[-1] == "Keep current (anthropic/claude-opus-4.6)"
return len(choices) - 1
tts_idx = _maybe_keep_current_tts(question, choices)
if tts_idx is not None:
return tts_idx
raise AssertionError(f"Unexpected prompt_choice call: {question}")
def fake_select():
_write_model_config(tmp_path, "custom", "http://localhost:11434/v1", "qwen3.5:32b")
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
monkeypatch.setattr("hermes_cli.setup.prompt", lambda *args, **kwargs: "")
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
def _fake_login_nous(*args, **kwargs):
auth_path = tmp_path / "auth.json"
auth_path.write_text(json.dumps({"active_provider": "nous", "providers": {}}))
_update_config_for_provider("nous", "https://inference.example.com/v1")
monkeypatch.setattr("hermes_cli.auth._login_nous", _fake_login_nous)
monkeypatch.setattr(
"hermes_cli.auth.resolve_nous_runtime_credentials",
lambda *args, **kwargs: {
"base_url": "https://inference.example.com/v1",
"api_key": "nous-key",
},
)
monkeypatch.setattr(
"hermes_cli.auth.fetch_nous_models",
lambda *args, **kwargs: ["gemini-3-flash"],
)
monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select)
setup_model_provider(config)
save_config(config)
reloaded = load_config()
assert isinstance(reloaded["model"], dict)
assert reloaded["model"]["provider"] == "custom"
assert reloaded["model"]["base_url"] == "http://localhost:11434/v1"
assert reloaded["model"]["default"] == "qwen3.5:32b"
def test_setup_syncs_openrouter_from_disk(tmp_path, monkeypatch):
"""When select_provider_and_model saves OpenRouter config to disk,
the wizard's config dict picks it up."""
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
_clear_provider_env(monkeypatch)
_stub_tts(monkeypatch)
config = load_config()
assert isinstance(config.get("model"), str) # fresh install
def fake_select():
_write_model_config(tmp_path, "openrouter", model_name="anthropic/claude-opus-4.6")
monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select)
setup_model_provider(config)
save_config(config)
reloaded = load_config()
assert isinstance(reloaded["model"], dict)
assert reloaded["model"]["provider"] == "openrouter"
def test_setup_syncs_nous_from_disk(tmp_path, monkeypatch):
"""Nous OAuth writes config to disk; wizard config dict must pick it up."""
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
_clear_provider_env(monkeypatch)
_stub_tts(monkeypatch)
config = load_config()
def fake_select():
_write_model_config(tmp_path, "nous", "https://inference.example.com/v1", "gemini-3-flash")
monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select)
setup_model_provider(config)
save_config(config)
reloaded = load_config()
assert isinstance(reloaded["model"], dict)
assert reloaded["model"]["provider"] == "nous"
assert reloaded["model"]["base_url"] == "https://inference.example.com/v1"
assert reloaded["model"]["default"] == "anthropic/claude-opus-4.6"
def test_custom_setup_clears_active_oauth_provider(tmp_path, monkeypatch):
def test_setup_custom_providers_synced(tmp_path, monkeypatch):
"""custom_providers written by select_provider_and_model must survive."""
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
_clear_provider_env(monkeypatch)
auth_path = tmp_path / "auth.json"
auth_path.write_text(json.dumps({"active_provider": "nous", "providers": {}}))
_stub_tts(monkeypatch)
config = load_config()
def fake_prompt_choice(question, choices, default=0):
if question == "Select your inference provider:":
return 3
tts_idx = _maybe_keep_current_tts(question, choices)
if tts_idx is not None:
return tts_idx
raise AssertionError(f"Unexpected prompt_choice call: {question}")
def fake_select():
_write_model_config(tmp_path, "custom", "http://localhost:8080/v1", "llama3")
cfg = load_config()
cfg["custom_providers"] = [{"name": "Local", "base_url": "http://localhost:8080/v1"}]
save_config(cfg)
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
# _model_flow_custom uses builtins.input (URL, key, model, context_length)
input_values = iter([
"https://custom.example/v1",
"custom-api-key",
"custom/model",
"", # context_length (blank = auto-detect)
])
monkeypatch.setattr("builtins.input", lambda _prompt="": next(input_values))
monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *args, **kwargs: False)
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
monkeypatch.setattr("hermes_cli.main._save_custom_provider", lambda *args, **kwargs: None)
monkeypatch.setattr(
"hermes_cli.models.probe_api_models",
lambda api_key, base_url: {"models": ["m"], "probed_url": base_url + "/models"},
)
monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select)
setup_model_provider(config)
# Core assertion: switching to custom endpoint clears OAuth provider
assert get_active_provider() is None
# Simulate what the real setup wizard does: save_config(config) AFTER
# setup_model_provider returns. This is the step that previously
# overwrote model.provider/base_url (#4172).
save_config(config)
reloaded = load_config()
assert isinstance(reloaded.get("model"), dict), (
"model should be a dict after custom setup, not "
+ repr(type(reloaded.get("model")))
)
assert reloaded["model"].get("provider") == "custom"
assert reloaded["model"].get("default") == "custom/model"
assert "custom.example" in reloaded["model"].get("base_url", "")
assert reloaded.get("custom_providers") == [{"name": "Local", "base_url": "http://localhost:8080/v1"}]
def test_custom_setup_preserves_provider_after_wizard_save_config(
tmp_path, monkeypatch
):
"""Regression test for #4172: the setup wizard's final save_config(config)
must not overwrite model.provider/base_url that _model_flow_custom set.
Simulates the full flow:
1. load config (fresh install — model is a string)
2. setup_model_provider picks custom
3. wizard calls save_config(config) afterward
4. verify resolve_requested_provider returns "custom"
"""
def test_setup_cancel_preserves_existing_config(tmp_path, monkeypatch):
"""When the user cancels provider selection, existing config is preserved."""
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
_clear_provider_env(monkeypatch)
_stub_tts(monkeypatch)
# Pre-set a provider
_write_model_config(tmp_path, "openrouter", model_name="gpt-4o")
config = load_config()
# Sanity: fresh install has model as a string
assert isinstance(config.get("model"), str) or config.get("model") is None
assert config["model"]["provider"] == "openrouter"
def fake_prompt_choice(question, choices, default=0):
if question == "Select your inference provider:":
return 3 # Custom endpoint
tts_idx = _maybe_keep_current_tts(question, choices)
if tts_idx is not None:
return tts_idx
raise AssertionError(f"Unexpected prompt_choice call: {question}")
def fake_select():
pass # user cancelled — nothing written to disk
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
input_values = iter([
"http://localhost:11434/v1", # Ollama URL
"", # no API key (local)
"qwen3.5:32b", # model name
"", # context length (auto-detect)
])
monkeypatch.setattr("builtins.input", lambda _prompt="": next(input_values))
monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *a, **kw: False)
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
monkeypatch.setattr("hermes_cli.main._save_custom_provider", lambda *a, **kw: None)
monkeypatch.setattr(
"hermes_cli.models.probe_api_models",
lambda api_key, base_url: {"models": ["qwen3.5:32b"], "probed_url": base_url + "/models"},
)
# Full wizard cycle
setup_model_provider(config)
save_config(config) # ← this is what the real wizard does
# Verify config on disk
reloaded = load_config()
assert isinstance(reloaded["model"], dict)
assert reloaded["model"]["provider"] == "custom"
assert reloaded["model"]["base_url"] == "http://localhost:11434/v1"
assert reloaded["model"]["default"] == "qwen3.5:32b"
assert "api_mode" not in reloaded["model"]
# Verify the runtime resolver sees "custom", not "auto"
from hermes_cli.runtime_provider import resolve_requested_provider
assert resolve_requested_provider() == "custom"
def test_custom_setup_no_model_name_still_preserves_endpoint(
tmp_path, monkeypatch
):
"""When the user enters a URL and key but skips the model name,
model.provider and model.base_url must still survive the wizard's
final save_config(config)."""
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
_clear_provider_env(monkeypatch)
config = load_config()
def fake_prompt_choice(question, choices, default=0):
if question == "Select your inference provider:":
return 3
tts_idx = _maybe_keep_current_tts(question, choices)
if tts_idx is not None:
return tts_idx
raise AssertionError(f"Unexpected prompt_choice call: {question}")
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
input_values = iter([
"http://192.168.1.50:8080/v1", # URL
"my-key", # API key
"", # no model name
"", # context length
])
monkeypatch.setattr("builtins.input", lambda _prompt="": next(input_values))
monkeypatch.setattr("hermes_cli.setup.prompt_yes_no", lambda *a, **kw: False)
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
monkeypatch.setattr("hermes_cli.main._save_custom_provider", lambda *a, **kw: None)
monkeypatch.setattr(
"hermes_cli.models.probe_api_models",
lambda api_key, base_url: {"models": None, "probed_url": base_url + "/models"},
)
monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select)
setup_model_provider(config)
save_config(config)
reloaded = load_config()
assert isinstance(reloaded["model"], dict)
assert reloaded["model"]["provider"] == "custom"
assert reloaded["model"]["base_url"] == "http://192.168.1.50:8080/v1"
assert reloaded["model"]["provider"] == "openrouter"
assert reloaded["model"]["default"] == "gpt-4o"
def test_setup_exception_in_select_gracefully_handled(tmp_path, monkeypatch):
"""If select_provider_and_model raises, setup continues with existing config."""
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
_clear_provider_env(monkeypatch)
_stub_tts(monkeypatch)
config = load_config()
def fake_select():
raise RuntimeError("something broke")
monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select)
# Should not raise
setup_model_provider(config)
def test_setup_keyboard_interrupt_gracefully_handled(tmp_path, monkeypatch):
"""KeyboardInterrupt during provider selection is handled."""
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
_clear_provider_env(monkeypatch)
_stub_tts(monkeypatch)
config = load_config()
def fake_select():
raise KeyboardInterrupt()
monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select)
setup_model_provider(config)
def test_codex_setup_uses_runtime_access_token_for_live_model_list(tmp_path, monkeypatch):
"""Codex model list fetching uses the runtime access token."""
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
monkeypatch.setenv("OPENROUTER_API_KEY", "or-test-key")
_clear_provider_env(monkeypatch)
monkeypatch.setenv("OPENROUTER_API_KEY", "or-test-key")
config = load_config()
_stub_tts(monkeypatch)
def fake_prompt_choice(question, choices, default=0):
if question == "Select your inference provider:":
return 2 # OpenAI Codex
if question == "Select default model:":
return 0
tts_idx = _maybe_keep_current_tts(question, choices)
if tts_idx is not None:
return tts_idx
raise AssertionError(f"Unexpected prompt_choice call: {question}")
def fake_select():
_write_model_config(tmp_path, "openai-codex", "https://api.openai.com/v1", "gpt-4o")
monkeypatch.setattr("hermes_cli.setup.prompt_choice", fake_prompt_choice)
monkeypatch.setattr("hermes_cli.setup.prompt", lambda *args, **kwargs: "")
monkeypatch.setattr("hermes_cli.auth.detect_external_credentials", lambda: [])
monkeypatch.setattr("hermes_cli.auth._login_openai_codex", lambda *args, **kwargs: None)
monkeypatch.setattr(
"hermes_cli.auth.resolve_codex_runtime_credentials",
lambda *args, **kwargs: {
"base_url": "https://chatgpt.com/backend-api/codex",
"api_key": "codex-access-token",
},
)
captured = {}
def _fake_get_codex_model_ids(access_token=None):
captured["access_token"] = access_token
return ["gpt-5.2-codex", "gpt-5.2"]
monkeypatch.setattr(
"hermes_cli.codex_models.get_codex_model_ids",
_fake_get_codex_model_ids,
)
monkeypatch.setattr("hermes_cli.main.select_provider_and_model", fake_select)
setup_model_provider(config)
save_config(config)
reloaded = load_config()
assert captured["access_token"] == "codex-access-token"
assert isinstance(reloaded["model"], dict)
assert reloaded["model"]["provider"] == "openai-codex"
assert reloaded["model"]["default"] == "gpt-5.2-codex"
assert reloaded["model"]["base_url"] == "https://chatgpt.com/backend-api/codex"