test: make local llama.cpp the default runtime (#77)
This commit was merged in pull request #77.
This commit is contained in:
21
tests/test_local_runtime_defaults.py
Normal file
21
tests/test_local_runtime_defaults.py
Normal file
@@ -0,0 +1,21 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
import yaml
|
||||
|
||||
|
||||
def test_config_defaults_to_local_llama_cpp_runtime() -> None:
|
||||
config = yaml.safe_load(Path("config.yaml").read_text())
|
||||
|
||||
assert config["model"]["provider"] == "custom"
|
||||
assert config["model"]["default"] == "hermes4:14b"
|
||||
assert config["model"]["base_url"] == "http://localhost:8081/v1"
|
||||
|
||||
local_provider = next(
|
||||
entry for entry in config["custom_providers"] if entry["name"] == "Local llama.cpp"
|
||||
)
|
||||
assert local_provider["model"] == "hermes4:14b"
|
||||
|
||||
assert config["fallback_model"]["provider"] == "custom"
|
||||
assert config["fallback_model"]["model"] == "gemini-2.5-pro"
|
||||
Reference in New Issue
Block a user