Resolves #291 - config.yaml: providers.ollama.model hermes3:latest -> gemma4:latest - config.yaml: fallback_model.model hermes3:latest -> gemma4:latest - config.yaml: fix YAML indent bugs in security block and container keys - cron/jobs.json: Health Monitor model -> gemma4:latest - tests/test_local_runtime_defaults.py: update assertion to match - Machine truth: ollama list shows gemma4:latest installed, hermes3:latest absent - Gateway: 404 errors on hermes3:latest will stop after restart Per Alexander: 'we are Gemma4 maxis now. Or hermes trained frontier models'
23 lines
742 B
Python
23 lines
742 B
Python
from __future__ import annotations
|
|
|
|
from pathlib import Path
|
|
|
|
import yaml
|
|
|
|
|
|
def test_config_defaults_to_local_llama_cpp_runtime() -> None:
|
|
config = yaml.safe_load(Path("config.yaml").read_text())
|
|
|
|
assert config["model"]["provider"] == "custom"
|
|
assert config["model"]["default"] == "hermes4:14b"
|
|
assert config["model"]["base_url"] == "http://localhost:8081/v1"
|
|
|
|
local_provider = next(
|
|
entry for entry in config["custom_providers"] if entry["name"] == "Local llama.cpp"
|
|
)
|
|
assert local_provider["model"] == "hermes4:14b"
|
|
|
|
assert config["fallback_model"]["provider"] == "ollama"
|
|
assert config["fallback_model"]["model"] == "gemma4:latest"
|
|
assert "localhost" in config["fallback_model"]["base_url"]
|