fix: align Nous Portal model slugs with OpenRouter naming (#3253)

Nous Portal now passes through OpenRouter model names and routes from
there. Update the static fallback model list and auxiliary client default
to use OpenRouter-format slugs (provider/model) instead of bare names.

- _PROVIDER_MODELS['nous']: full OpenRouter catalog
- _NOUS_MODEL: google/gemini-3-flash-preview (was gemini-3-flash)
- Updated 4 test assertions for the new default model name
This commit is contained in:
Teknium
2026-03-26 13:49:43 -07:00
committed by GitHub
parent b81d49dc45
commit a8e02c7d49
4 changed files with 28 additions and 11 deletions

View File

@@ -82,7 +82,7 @@ auxiliary_is_nous: bool = False
# Default auxiliary models per provider
_OPENROUTER_MODEL = "google/gemini-3-flash-preview"
_NOUS_MODEL = "gemini-3-flash"
_NOUS_MODEL = "google/gemini-3-flash-preview"
_NOUS_DEFAULT_BASE_URL = "https://inference-api.nousresearch.com/v1"
_ANTHROPIC_DEFAULT_BASE_URL = "https://api.anthropic.com"
_AUTH_JSON_PATH = get_hermes_home() / "auth.json"

View File

@@ -53,12 +53,29 @@ OPENROUTER_MODELS: list[tuple[str, str]] = [
_PROVIDER_MODELS: dict[str, list[str]] = {
"nous": [
"claude-opus-4-6",
"claude-sonnet-4-6",
"gpt-5.4",
"gemini-3-flash",
"gemini-3.0-pro-preview",
"deepseek-v3.2",
"anthropic/claude-opus-4.6",
"anthropic/claude-sonnet-4.5",
"anthropic/claude-haiku-4.5",
"openai/gpt-5.4",
"openai/gpt-5.4-mini",
"xiaomi/mimo-v2-pro",
"openai/gpt-5.3-codex",
"google/gemini-3-pro-preview",
"google/gemini-3-flash-preview",
"qwen/qwen3.5-plus-02-15",
"qwen/qwen3.5-35b-a3b",
"stepfun/step-3.5-flash",
"minimax/minimax-m2.7",
"minimax/minimax-m2.5",
"z-ai/glm-5",
"z-ai/glm-5-turbo",
"moonshotai/kimi-k2.5",
"x-ai/grok-4.20-beta",
"nvidia/nemotron-3-super-120b-a12b",
"nvidia/nemotron-3-super-120b-a12b:free",
"arcee-ai/trinity-large-preview:free",
"openai/gpt-5.4-pro",
"openai/gpt-5.4-nano",
],
"openai-codex": [
"gpt-5.3-codex",

View File

@@ -461,7 +461,7 @@ class TestGetTextAuxiliaryClient:
patch("agent.auxiliary_client.OpenAI") as mock_openai:
mock_nous.return_value = {"access_token": "nous-tok"}
client, model = get_text_auxiliary_client()
assert model == "gemini-3-flash"
assert model == "google/gemini-3-flash-preview"
def test_custom_endpoint_over_codex(self, monkeypatch, codex_auth_dir):
monkeypatch.setenv("OPENAI_BASE_URL", "http://localhost:1234/v1")
@@ -693,7 +693,7 @@ class TestVisionClientFallback:
patch("agent.auxiliary_client.OpenAI"):
mock_nous.return_value = {"access_token": "nous-tok"}
client, model = get_vision_auxiliary_client()
assert model == "gemini-3-flash"
assert model == "google/gemini-3-flash-preview"
assert client is not None
def test_vision_forced_main_uses_custom_endpoint(self, monkeypatch):
@@ -789,7 +789,7 @@ class TestResolveForcedProvider:
patch("agent.auxiliary_client.OpenAI"):
mock_nous.return_value = {"access_token": "nous-tok"}
client, model = _resolve_forced_provider("nous")
assert model == "gemini-3-flash"
assert model == "google/gemini-3-flash-preview"
assert client is not None
def test_forced_nous_not_configured(self, monkeypatch):

View File

@@ -556,7 +556,7 @@ class TestAuxiliaryClientProviderPriority:
with patch("agent.auxiliary_client._read_nous_auth", return_value={"access_token": "nous-tok"}), \
patch("agent.auxiliary_client.OpenAI") as mock:
client, model = get_text_auxiliary_client()
assert model == "gemini-3-flash"
assert model == "google/gemini-3-flash-preview"
def test_custom_endpoint_when_no_nous(self, monkeypatch):
monkeypatch.delenv("OPENROUTER_API_KEY", raising=False)