From f263156cf1e04d0fddf9f088098caa443a5a4c6e Mon Sep 17 00:00:00 2001 From: Timmy Time Date: Sat, 28 Mar 2026 05:33:47 +0000 Subject: [PATCH] test: make local llama.cpp the default runtime (#77) --- config.yaml | 8 ++++---- tests/test_local_runtime_defaults.py | 21 +++++++++++++++++++++ 2 files changed, 25 insertions(+), 4 deletions(-) create mode 100644 tests/test_local_runtime_defaults.py diff --git a/config.yaml b/config.yaml index 202c4da0..1ab24d56 100644 --- a/config.yaml +++ b/config.yaml @@ -1,8 +1,8 @@ model: - default: gpt-5.4 - provider: openai-codex + default: hermes4:14b + provider: custom context_length: 65536 - base_url: https://chatgpt.com/backend-api/codex + base_url: http://localhost:8081/v1 toolsets: - all agent: @@ -188,7 +188,7 @@ custom_providers: - name: Local llama.cpp base_url: http://localhost:8081/v1 api_key: none - model: auto + model: hermes4:14b - name: Google Gemini base_url: https://generativelanguage.googleapis.com/v1beta/openai api_key_env: GEMINI_API_KEY diff --git a/tests/test_local_runtime_defaults.py b/tests/test_local_runtime_defaults.py new file mode 100644 index 00000000..4ebf50e3 --- /dev/null +++ b/tests/test_local_runtime_defaults.py @@ -0,0 +1,21 @@ +from __future__ import annotations + +from pathlib import Path + +import yaml + + +def test_config_defaults_to_local_llama_cpp_runtime() -> None: + config = yaml.safe_load(Path("config.yaml").read_text()) + + assert config["model"]["provider"] == "custom" + assert config["model"]["default"] == "hermes4:14b" + assert config["model"]["base_url"] == "http://localhost:8081/v1" + + local_provider = next( + entry for entry in config["custom_providers"] if entry["name"] == "Local llama.cpp" + ) + assert local_provider["model"] == "hermes4:14b" + + assert config["fallback_model"]["provider"] == "custom" + assert config["fallback_model"]["model"] == "gemini-2.5-pro"