test: make local llama.cpp the default runtime
This commit is contained in:
@@ -1,8 +1,8 @@
|
|||||||
model:
|
model:
|
||||||
default: gpt-5.4
|
default: hermes4:14b
|
||||||
provider: openai-codex
|
provider: custom
|
||||||
context_length: 65536
|
context_length: 65536
|
||||||
base_url: https://chatgpt.com/backend-api/codex
|
base_url: http://localhost:8081/v1
|
||||||
toolsets:
|
toolsets:
|
||||||
- all
|
- all
|
||||||
agent:
|
agent:
|
||||||
@@ -188,7 +188,7 @@ custom_providers:
|
|||||||
- name: Local llama.cpp
|
- name: Local llama.cpp
|
||||||
base_url: http://localhost:8081/v1
|
base_url: http://localhost:8081/v1
|
||||||
api_key: none
|
api_key: none
|
||||||
model: auto
|
model: hermes4:14b
|
||||||
- name: Google Gemini
|
- name: Google Gemini
|
||||||
base_url: https://generativelanguage.googleapis.com/v1beta/openai
|
base_url: https://generativelanguage.googleapis.com/v1beta/openai
|
||||||
api_key_env: GEMINI_API_KEY
|
api_key_env: GEMINI_API_KEY
|
||||||
|
|||||||
21
tests/test_local_runtime_defaults.py
Normal file
21
tests/test_local_runtime_defaults.py
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
|
def test_config_defaults_to_local_llama_cpp_runtime() -> None:
|
||||||
|
config = yaml.safe_load(Path("config.yaml").read_text())
|
||||||
|
|
||||||
|
assert config["model"]["provider"] == "custom"
|
||||||
|
assert config["model"]["default"] == "hermes4:14b"
|
||||||
|
assert config["model"]["base_url"] == "http://localhost:8081/v1"
|
||||||
|
|
||||||
|
local_provider = next(
|
||||||
|
entry for entry in config["custom_providers"] if entry["name"] == "Local llama.cpp"
|
||||||
|
)
|
||||||
|
assert local_provider["model"] == "hermes4:14b"
|
||||||
|
|
||||||
|
assert config["fallback_model"]["provider"] == "custom"
|
||||||
|
assert config["fallback_model"]["model"] == "gemini-2.5-pro"
|
||||||
Reference in New Issue
Block a user