test: make local llama.cpp the default runtime

This commit is contained in:
Alexander Whitestone
2026-03-28 01:33:43 -04:00
parent 0eaf0b3d0f
commit a7858540a9
2 changed files with 25 additions and 4 deletions

View File

@@ -1,8 +1,8 @@
model:
default: gpt-5.4
provider: openai-codex
default: hermes4:14b
provider: custom
context_length: 65536
base_url: https://chatgpt.com/backend-api/codex
base_url: http://localhost:8081/v1
toolsets:
- all
agent:
@@ -188,7 +188,7 @@ custom_providers:
- name: Local llama.cpp
base_url: http://localhost:8081/v1
api_key: none
model: auto
model: hermes4:14b
- name: Google Gemini
base_url: https://generativelanguage.googleapis.com/v1beta/openai
api_key_env: GEMINI_API_KEY

View File

@@ -0,0 +1,21 @@
from __future__ import annotations
from pathlib import Path
import yaml
def test_config_defaults_to_local_llama_cpp_runtime() -> None:
config = yaml.safe_load(Path("config.yaml").read_text())
assert config["model"]["provider"] == "custom"
assert config["model"]["default"] == "hermes4:14b"
assert config["model"]["base_url"] == "http://localhost:8081/v1"
local_provider = next(
entry for entry in config["custom_providers"] if entry["name"] == "Local llama.cpp"
)
assert local_provider["model"] == "hermes4:14b"
assert config["fallback_model"]["provider"] == "custom"
assert config["fallback_model"]["model"] == "gemini-2.5-pro"