Files
timmy-config/tests/test_config_validate.py

276 lines
9.1 KiB
Python

#!/usr/bin/env python3
"""Tests for config_validate.py — issue #690."""
import json
import sys
import tempfile
from pathlib import Path
import pytest
sys.path.insert(0, str(Path(__file__).resolve().parent.parent / "scripts"))
from config_validate import (
validate_config,
validate_file,
validate_yaml_syntax,
validate_required_keys,
validate_value_types,
validate_no_forbidden_keys,
validate_unknown_keys,
ValidationError,
SCHEMA,
)
class TestYAMLSyntax:
def test_valid_yaml(self):
data, errors = validate_yaml_syntax("model: gpt-4\nprovider: openai\n")
assert data is not None
assert errors == []
def test_empty_yaml(self):
data, errors = validate_yaml_syntax("")
assert data == {}
assert any(e.severity == "warning" for e in errors)
def test_invalid_yaml(self):
data, errors = validate_yaml_syntax("model: gpt-4\n bad: [\n")
assert data is None
assert len(errors) == 1
def test_non_mapping_yaml(self):
data, errors = validate_yaml_syntax("- item1\n- item2\n")
assert data is None
assert any("mapping" in e.message for e in errors)
class TestRequiredKeys:
def test_model_present(self):
errors = validate_required_keys({"model": "gpt-4"})
assert not any(e.path == "model" for e in errors)
def test_model_missing(self):
errors = validate_required_keys({"provider": "openai"})
assert any(e.path == "model" and "missing" in e.message.lower() for e in errors)
def test_model_wrong_type(self):
errors = validate_required_keys({"model": 123})
assert any(e.path == "model" and "str" in e.message for e in errors)
class TestValueTypes:
def test_correct_types(self):
data = {"model": "gpt-4", "agent": {"max_iterations": 90, "temperature": 0.7}}
errors = validate_value_types(data)
assert errors == []
def test_wrong_agent_type(self):
data = {"agent": {"max_iterations": "ninety"}}
errors = validate_value_types(data)
assert any("max_iterations" in e.path and "int" in e.message for e in errors)
def test_wrong_display_type(self):
data = {"display": {"spinner": "yes"}}
errors = validate_value_types(data)
assert any("spinner" in e.path and "bool" in e.message for e in errors)
def test_wrong_cron_type(self):
data = {"cron": {"interval_seconds": "5m"}}
errors = validate_value_types(data)
assert any("interval_seconds" in e.path for e in errors)
def test_list_item_types(self):
data = {"toolsets": ["web", "browser", 123]}
errors = validate_value_types(data)
assert any("toolsets[2]" in e.path for e in errors)
def test_nested_dict_depth(self):
data = {"providers": {"openrouter": {"base_url": 42}}}
errors = validate_value_types(data)
assert any("providers.openrouter.base_url" in e.path for e in errors)
class TestForbiddenKeys:
def test_no_forbidden(self):
errors = validate_no_forbidden_keys({"model": "gpt-4"})
assert errors == []
def test_password_rejected(self):
errors = validate_no_forbidden_keys({"model": "gpt-4", "password": "s3cret"})
assert any("password" in e.path for e in errors)
def test_secret_rejected(self):
errors = validate_no_forbidden_keys({"secret": "abc"})
assert any("secret" in e.path for e in errors)
def test_nested_forbidden(self):
errors = validate_no_forbidden_keys({"providers": {"x": {"api_key": "sk-xxx"}}})
# api_key is not forbidden, but let's check token
errors = validate_no_forbidden_keys({"providers": {"x": {"token": "tok"}}})
assert any("token" in e.path for e in errors)
def test_api_key_env_allowed(self):
errors = validate_no_forbidden_keys({"providers": {"x": {"api_key_env": "MY_KEY"}}})
assert not any("api_key_env" in e.path for e in errors)
class TestUnknownKeys:
def test_known_keys_no_warnings(self):
warnings = validate_unknown_keys({"model": "gpt-4", "provider": "openai"})
assert warnings == []
def test_unknown_top_level_warns(self):
warnings = validate_unknown_keys({"model": "gpt-4", "custom_field": 1})
assert any("custom_field" in w.path and w.severity == "warning" for w in warnings)
class TestFullValidation:
def test_valid_config(self):
content = "model: nousresearch/hermes-4-14b\nprovider: openrouter\n"
valid, errors = validate_config(content)
assert valid
assert not any(e.severity == "error" for e in errors)
def test_missing_model(self):
content = "provider: openrouter\n"
valid, errors = validate_config(content)
assert not valid
assert any("model" in e.path for e in errors)
def test_forbidden_key(self):
content = "model: gpt-4\npassword: secret\n"
valid, errors = validate_config(content)
assert not valid
def test_invalid_yaml(self):
content = "model: [\n broken\n"
valid, errors = validate_config(content)
assert not valid
def test_full_realistic_config(self):
content = """
model: nousresearch/hermes-4-14b
provider: openrouter
providers:
openrouter:
base_url: https://openrouter.ai/api/v1
api_key_env: OPENROUTER_API_KEY
ollama:
base_url: http://localhost:11434
toolsets:
- web
- browser
agent:
max_iterations: 90
temperature: 0.7
save_trajectories: false
display:
spinner: true
colors: true
skin: default
cron:
enabled: false
interval_seconds: 300
gateway:
enabled: false
port: 8080
logging:
level: INFO
"""
valid, errors = validate_config(content)
assert valid, f"Unexpected errors: {errors}"
def test_warnings_dont_fail(self):
content = "model: gpt-4\ncustom_key: value\n"
valid, errors = validate_config(content)
assert valid # warnings don't make it invalid
assert any(e.severity == "warning" for e in errors)
class TestValidateFile:
def test_valid_file(self, tmp_path):
f = tmp_path / "config.yaml"
f.write_text("model: gpt-4\n")
valid, errors = validate_file(str(f))
assert valid
def test_missing_file(self):
valid, errors = validate_file("/nonexistent/config.yaml")
assert not valid
assert any("not found" in e.message for e in errors)
def test_roundtrip(self, tmp_path):
f = tmp_path / "config.yaml"
f.write_text("model: gpt-4\nagent:\n max_iterations: 50\n")
valid, errors = validate_file(str(f))
assert valid
class TestCLI:
def test_deploy_mode(self, tmp_path):
import subprocess
src = tmp_path / "src.yaml"
src.write_text("model: gpt-4\n")
dest = tmp_path / "deployed.yaml"
result = subprocess.run(
[sys.executable, str(Path(__file__).resolve().parent.parent / "scripts" / "config_validate.py"),
str(src), "--deploy", str(dest)],
capture_output=True, text=True
)
assert result.returncode == 0
assert dest.exists()
assert "model: gpt-4" in dest.read_text()
def test_deploy_rejects_invalid(self, tmp_path):
import subprocess
src = tmp_path / "bad.yaml"
src.write_text("provider: openai\n") # missing required model
dest = tmp_path / "should_not_exist.yaml"
result = subprocess.run(
[sys.executable, str(Path(__file__).resolve().parent.parent / "scripts" / "config_validate.py"),
str(src), "--deploy", str(dest)],
capture_output=True, text=True
)
assert result.returncode == 1
assert not dest.exists()
def test_schema_flag(self):
import subprocess
result = subprocess.run(
[sys.executable, str(Path(__file__).resolve().parent.parent / "scripts" / "config_validate.py"),
"--schema"],
capture_output=True, text=True
)
assert result.returncode == 0
assert "model:" in result.stdout
assert "required" in result.stdout
def test_json_output(self, tmp_path):
import subprocess
f = tmp_path / "config.yaml"
f.write_text("model: gpt-4\n")
result = subprocess.run(
[sys.executable, str(Path(__file__).resolve().parent.parent / "scripts" / "config_validate.py"),
str(f), "--json"],
capture_output=True, text=True
)
assert result.returncode == 0
out = json.loads(result.stdout)
assert out["valid"] is True
assert "errors" in out
def test_check_dir(self, tmp_path):
import subprocess
(tmp_path / "good.yaml").write_text("model: gpt-4\n")
(tmp_path / "bad.yaml").write_text("provider: openai\n")
result = subprocess.run(
[sys.executable, str(Path(__file__).resolve().parent.parent / "scripts" / "config_validate.py"),
"--check-dir", str(tmp_path)],
capture_output=True, text=True
)
assert result.returncode == 1 # bad.yaml fails
assert "bad.yaml" in result.stdout