Compare commits

..

1 Commits

Author SHA1 Message Date
Timmy Time
478bbcdd8a Fix #375: deploy-crons.py now compares model/provider when checking for updates
Some checks failed
Forge CI / smoke-and-build (pull_request) Failing after 1m42s
- Added scripts/deploy_crons.py to deploy cron jobs from YAML to jobs.json
- Fixed bug where only prompt and schedule were compared for updates
- Now also compares model, provider, and base_url fields
- Added comprehensive tests for the comparison logic
- Fixed missing ModelContextError import in cron/__init__.py

The deploy-crons.py script reads cron jobs from a YAML configuration file
(cron-jobs.yaml) and synchronizes them with the jobs.json file used by the
Hermes scheduler. Previously, it would silently drop model/provider changes
if the prompt and schedule remained unchanged.

Fixes #375
2026-04-13 18:27:32 -04:00
6 changed files with 611 additions and 300 deletions

View File

@@ -26,7 +26,7 @@ from cron.jobs import (
trigger_job,
JOBS_FILE,
)
from cron.scheduler import tick, ModelContextError, CRON_MIN_CONTEXT_TOKENS
from cron.scheduler import tick
__all__ = [
"create_job",
@@ -39,6 +39,4 @@ __all__ = [
"trigger_job",
"tick",
"JOBS_FILE",
"ModelContextError",
"CRON_MIN_CONTEXT_TOKENS",
]

1
scripts/__init__.py Normal file
View File

@@ -0,0 +1 @@
# Scripts package

259
scripts/deploy_crons.py Executable file
View File

@@ -0,0 +1,259 @@
#!/usr/bin/env python3
"""
deploy-crons.py — Deploy cron jobs from YAML configuration to jobs.json.
This script reads cron job definitions from a YAML file (cron-jobs.yaml) and
synchronizes them with the jobs.json file used by the Hermes scheduler.
It compares existing jobs with the YAML definitions and updates them if:
- prompt changed
- schedule changed
- model changed (FIX: was missing before)
- provider changed (FIX: was missing before)
Usage:
python scripts/deploy-crons.py [--config PATH] [--dry-run]
Exit codes:
0 All jobs deployed successfully.
1 One or more errors occurred.
"""
import argparse
import json
import os
import sys
from pathlib import Path
from typing import Any, Dict, List, Optional
# Add parent directory to path for imports
sys.path.insert(0, str(Path(__file__).parent.parent))
from cron.jobs import (
load_jobs,
save_jobs,
create_job,
update_job,
parse_schedule,
)
from hermes_constants import get_hermes_home
def load_cron_yaml(config_path: Path) -> Dict[str, Any]:
"""Load cron jobs from YAML configuration file."""
try:
import yaml
except ImportError:
print("Error: PyYAML is required. Install with: pip install pyyaml", file=sys.stderr)
sys.exit(1)
if not config_path.exists():
print(f"Error: Config file not found: {config_path}", file=sys.stderr)
sys.exit(1)
with open(config_path, 'r', encoding='utf-8') as f:
data = yaml.safe_load(f) or {}
return data
def normalize_job_for_comparison(job: Dict[str, Any]) -> Dict[str, Any]:
"""Normalize a job dict for comparison purposes."""
normalized = {}
normalized["prompt"] = job.get("prompt", "")
normalized["schedule"] = job.get("schedule", {})
normalized["model"] = job.get("model")
normalized["provider"] = job.get("provider")
normalized["base_url"] = job.get("base_url")
return normalized
def find_matching_job(jobs: List[Dict[str, Any]], yaml_job: Dict[str, Any]) -> Optional[Dict[str, Any]]:
"""Find a matching job in jobs.json by name or ID."""
yaml_name = yaml_job.get("name")
yaml_id = yaml_job.get("id")
for job in jobs:
# Match by ID if provided
if yaml_id and job.get("id") == yaml_id:
return job
# Match by name if provided
if yaml_name and job.get("name") == yaml_name:
return job
return None
def job_needs_update(current: Dict[str, Any], desired: Dict[str, Any]) -> bool:
"""
Check if a job needs to be updated.
Compares prompt, schedule, model, and provider.
If any of these changed, the job needs to be updated.
This is the FIX for issue #375: model and provider were not being compared.
"""
cur_normalized = normalize_job_for_comparison(current)
des_normalized = normalize_job_for_comparison(desired)
# Compare prompt
if cur_normalized["prompt"] != des_normalized["prompt"]:
return True
# Compare schedule
if cur_normalized["schedule"] != des_normalized["schedule"]:
return True
# FIX: Compare model (was missing before)
if cur_normalized["model"] != des_normalized["model"]:
return True
# FIX: Compare provider (was missing before)
if cur_normalized["provider"] != des_normalized["provider"]:
return True
# Compare base_url
if cur_normalized["base_url"] != des_normalized["base_url"]:
return True
return False
def deploy_jobs(config_path: Path, dry_run: bool = False) -> int:
"""
Deploy cron jobs from YAML to jobs.json.
Returns the number of jobs updated.
"""
config = load_cron_yaml(config_path)
yaml_jobs = config.get("jobs", [])
if not yaml_jobs:
print("No jobs found in configuration file.")
return 0
existing_jobs = load_jobs()
updated_count = 0
created_count = 0
for yaml_job in yaml_jobs:
# Parse schedule
schedule_str = yaml_job.get("schedule")
if not schedule_str:
print(f"Warning: Job '{yaml_job.get('name', 'unnamed')}' has no schedule, skipping.")
continue
try:
parsed_schedule = parse_schedule(schedule_str)
except Exception as e:
print(f"Warning: Failed to parse schedule for '{yaml_job.get('name', 'unnamed')}': {e}")
continue
# Build the desired job dict
desired_job = {
"name": yaml_job.get("name"),
"prompt": yaml_job.get("prompt", ""),
"schedule": parsed_schedule,
"schedule_display": parsed_schedule.get("display", schedule_str),
"model": yaml_job.get("model"),
"provider": yaml_job.get("provider"),
"base_url": yaml_job.get("base_url"),
"deliver": yaml_job.get("deliver", "local"),
"skills": yaml_job.get("skills", []),
"skill": yaml_job.get("skills", [None])[0] if yaml_job.get("skills") else yaml_job.get("skill"),
"repeat": yaml_job.get("repeat"),
"script": yaml_job.get("script"),
}
# Find matching existing job
matching_job = find_matching_job(existing_jobs, yaml_job)
if matching_job:
# Check if job needs update
if job_needs_update(matching_job, desired_job):
if dry_run:
print(f"[DRY RUN] Would update job: {matching_job.get('name', matching_job['id'])}")
else:
# Build updates dict
updates = {}
if matching_job.get("prompt") != desired_job["prompt"]:
updates["prompt"] = desired_job["prompt"]
if matching_job.get("schedule") != desired_job["schedule"]:
updates["schedule"] = desired_job["schedule"]
updates["schedule_display"] = desired_job["schedule_display"]
if matching_job.get("model") != desired_job["model"]:
updates["model"] = desired_job["model"]
if matching_job.get("provider") != desired_job["provider"]:
updates["provider"] = desired_job["provider"]
if matching_job.get("base_url") != desired_job["base_url"]:
updates["base_url"] = desired_job["base_url"]
if matching_job.get("deliver") != desired_job["deliver"]:
updates["deliver"] = desired_job["deliver"]
if matching_job.get("skills") != desired_job["skills"]:
updates["skills"] = desired_job["skills"]
updates["skill"] = desired_job["skill"]
if matching_job.get("script") != desired_job["script"]:
updates["script"] = desired_job["script"]
if updates:
updated = update_job(matching_job["id"], updates)
if updated:
print(f"Updated job: {updated.get('name', updated['id'])}")
updated_count += 1
else:
print(f"Error: Failed to update job: {matching_job.get('name', matching_job['id'])}")
else:
print(f"Job unchanged: {matching_job.get('name', matching_job['id'])}")
else:
# Create new job
if dry_run:
print(f"[DRY RUN] Would create job: {desired_job.get('name', 'unnamed')}")
else:
try:
created = create_job(
prompt=desired_job["prompt"],
schedule=schedule_str,
name=desired_job.get("name"),
deliver=desired_job.get("deliver"),
model=desired_job.get("model"),
provider=desired_job.get("provider"),
base_url=desired_job.get("base_url"),
skills=desired_job.get("skills"),
script=desired_job.get("script"),
repeat=desired_job.get("repeat"),
)
print(f"Created job: {created.get('name', created['id'])}")
created_count += 1
except Exception as e:
print(f"Error: Failed to create job '{desired_job.get('name', 'unnamed')}': {e}")
print(f"\nDeployment complete: {created_count} created, {updated_count} updated")
return created_count + updated_count
def main():
parser = argparse.ArgumentParser(description="Deploy cron jobs from YAML to jobs.json")
parser.add_argument(
"--config",
type=Path,
default=get_hermes_home() / "cron-jobs.yaml",
help="Path to cron-jobs.yaml (default: ~/.hermes/cron-jobs.yaml)"
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Show what would be done without making changes"
)
args = parser.parse_args()
try:
count = deploy_jobs(args.config, args.dry_run)
sys.exit(0 if count >= 0 else 1)
except Exception as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -1,234 +0,0 @@
#!/usr/bin/env python3
"""Evaluate Qwen3.5:35B as a local model option for the Hermes fleet.
Part of Epic #281 -- Vitalik's Secure LLM Architecture.
Issue #288 -- Evaluate Qwen3.5:35B as Local Model Option.
Evaluates:
1. Model specs & deployment feasibility
2. Context window & tool-use support
3. Security posture (local inference = no data exfiltration)
4. Comparison against current fleet models
5. VRAM requirements by quantization level
6. Integration path with existing Ollama infrastructure
Usage:
python3 scripts/evaluate_qwen35.py # Full evaluation
python3 scripts/evaluate_qwen35.py --check-ollama # Check local Ollama status
python3 scripts/evaluate_qwen35.py --benchmark MODEL # Run benchmark against a model
"""
import json
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Dict, List, Optional
@dataclass
class ModelSpec:
name: str = "Qwen3.5-35B-A3B"
ollama_tag: str = "qwen3.5:35b"
hf_id: str = "Qwen/Qwen3.5-35B-A3B"
architecture: str = "MoE (Mixture of Experts)"
total_params: str = "35B"
active_params: str = "3B per token"
context_length: int = 131072
license: str = "Apache 2.0"
tool_use_support: bool = True
json_mode_support: bool = True
function_calling: bool = True
quantization_options: Dict[str, int] = field(default_factory=lambda: {
"Q8_0": 36, "Q6_K": 28, "Q5_K_M": 24, "Q4_K_M": 20,
"Q4_0": 18, "Q3_K_M": 15, "Q2_K": 12,
})
FLEET_MODELS = {
"qwen3.5:35b (candidate)": {
"params_total": "35B", "context": "128K", "local": True,
"tool_use": True, "reasoning": "good",
},
"gemma4 (current local)": {
"params_total": "9B", "context": "128K", "local": True,
"tool_use": True, "reasoning": "good",
},
"hermes4:14b (current local)": {
"params_total": "14B", "context": "8K", "local": True,
"tool_use": True, "reasoning": "good",
},
"qwen2.5:7b (fleet)": {
"params_total": "7B", "context": "32K", "local": True,
"tool_use": True, "reasoning": "moderate",
},
"claude-sonnet-4 (cloud)": {
"params_total": "?", "context": "200K", "local": False,
"tool_use": True, "reasoning": "excellent",
},
"mimo-v2-pro (cloud free)": {
"params_total": "?", "context": "128K", "local": False,
"tool_use": True, "reasoning": "good",
},
}
SECURITY_CRITERIA = [
{"criterion": "Data locality", "weight": "CRITICAL", "score": 10,
"notes": "All inference local via Ollama. Zero data exfiltration."},
{"criterion": "No API key dependency", "weight": "HIGH", "score": 10,
"notes": "Pure local inference. No external credentials needed."},
{"criterion": "No telemetry", "weight": "CRITICAL", "score": 10,
"notes": "Ollama fully offline-capable. No phone-home in weights."},
{"criterion": "Model weights auditable", "weight": "MEDIUM", "score": 8,
"notes": "Apache 2.0, HuggingFace SHA verification. MoE harder to audit."},
{"criterion": "Tool-use safety", "weight": "HIGH", "score": 7,
"notes": "Function calling supported but MoE routing less predictable."},
{"criterion": "Privacy filter compat", "weight": "HIGH", "score": 9,
"notes": "Local = Privacy Filter unnecessary for most queries."},
{"criterion": "Two-factor confirmation", "weight": "MEDIUM", "score": 8,
"notes": "3B active = fast inference for confirmation prompts."},
{"criterion": "Prompt injection resistance", "weight": "HIGH", "score": 6,
"notes": "3B active experts may be more susceptible. Needs red-team."},
]
HARDWARE_PROFILES = {
"mac_m2_ultra_192gb": {
"name": "Mac Studio M2 Ultra (192GB)", "mem_gb": 192,
"fits_q4": True, "fits_q8": True, "rec": "Q6_K", "tok_sec": 40,
},
"mac_m4_pro_48gb": {
"name": "Mac Mini M4 Pro (48GB)", "mem_gb": 48,
"fits_q4": True, "fits_q8": False, "rec": "Q4_K_M", "tok_sec": 30,
},
"mac_m1_16gb": {
"name": "Mac M1 (16GB)", "mem_gb": 16,
"fits_q4": False, "fits_q8": False, "rec": None, "tok_sec": None,
},
"rtx_4090_24gb": {
"name": "NVIDIA RTX 4090 (24GB)", "mem_gb": 24,
"fits_q4": True, "fits_q8": False, "rec": "Q5_K_M", "tok_sec": 50,
},
"rtx_3090_24gb": {
"name": "NVIDIA RTX 3090 (24GB)", "mem_gb": 24,
"fits_q4": True, "fits_q8": False, "rec": "Q4_K_M", "tok_sec": 35,
},
"runpod_l40s_48gb": {
"name": "RunPod L40S (48GB)", "mem_gb": 48,
"fits_q4": True, "fits_q8": True, "rec": "Q6_K", "tok_sec": 60,
},
}
def check_ollama_status() -> Dict[str, Any]:
import subprocess
result = {"running": False, "models": [], "qwen35_available": False}
try:
r = subprocess.run(
["curl", "-s", "--max-time", "5", "http://localhost:11434/api/tags"],
capture_output=True, text=True, timeout=10)
if r.returncode == 0:
data = json.loads(r.stdout)
result["running"] = True
result["models"] = [m["name"] for m in data.get("models", [])]
result["qwen35_available"] = any("qwen3.5" in m.lower() for m in result["models"])
except Exception as e:
result["error"] = str(e)
return result
def run_benchmark(model: str, prompt: str) -> Dict[str, Any]:
import subprocess
start = time.time()
try:
r = subprocess.run(
["curl", "-s", "--max-time", "120", "http://localhost:11434/api/generate",
"-d", json.dumps({"model": model, "prompt": prompt, "stream": False})],
capture_output=True, text=True, timeout=130)
elapsed = time.time() - start
if r.returncode == 0:
data = json.loads(r.stdout)
response = data.get("response", "")
ec = data.get("eval_count", 0)
ed = data.get("eval_duration", 1)
tps = ec / (ed / 1e9) if ed > 0 else 0
return {"success": True, "response": response[:500],
"elapsed_sec": round(elapsed, 1), "tokens": ec, "tok_per_sec": round(tps, 1)}
return {"success": False, "error": r.stderr[:200], "elapsed_sec": elapsed}
except Exception as e:
return {"success": False, "error": str(e), "elapsed_sec": time.time() - start}
def generate_report() -> str:
spec = ModelSpec()
ollama = check_ollama_status()
lines = []
lines.append("=" * 72)
lines.append("Qwen3.5:35B EVALUATION REPORT -- Issue #288")
lines.append("Part of Epic #281 -- Vitalik's Secure LLM Architecture")
lines.append("=" * 72)
lines.append("\n## 1. Model Specification\n")
lines.append(f" Name: {spec.name}")
lines.append(f" Ollama tag: {spec.ollama_tag}")
lines.append(f" HuggingFace: {spec.hf_id}")
lines.append(f" Architecture: {spec.architecture}")
lines.append(f" Params: {spec.total_params} total, {spec.active_params}")
lines.append(f" Context: {spec.context_length:,} tokens ({spec.context_length//1024}K)")
lines.append(f" License: {spec.license}")
lines.append(f" Tool use: {'Yes' if spec.tool_use_support else 'No'}")
lines.append("\n## 2. VRAM Requirements\n")
for q, vram in sorted(spec.quantization_options.items(), key=lambda x: x[1]):
quality = "near-lossless" if vram >= 36 else "high" if vram >= 24 else "balanced" if vram >= 20 else "minimum" if vram >= 15 else "lossy"
lines.append(f" {q:<10} {vram:>4}GB {quality}")
lines.append("\n## 3. Hardware Compatibility\n")
for hw in HARDWARE_PROFILES.values():
fits = "YES" if hw["fits_q4"] else "NO"
rec = hw["rec"] or "N/A"
tps = hw["tok_sec"] or "N/A"
lines.append(f" {hw['name']} {hw['mem_gb']}GB Q4:{fits} Rec:{rec} ~{tps}tok/s")
lines.append("\n## 4. Security Evaluation (Vitalik Framework)\n")
wm = {"CRITICAL": 3, "HIGH": 2, "MEDIUM": 1}
tw, ws = 0, 0
for c in SECURITY_CRITERIA:
w = wm[c["weight"]]
tw += w; ws += c["score"] * w
lines.append(f" [{c['weight']:<8}] {c['criterion']}: {c['score']}/10 -- {c['notes']}")
avg = ws / tw if tw else 0
lines.append(f"\n Weighted score: {avg:.1f}/10 Verdict: {'STRONG' if avg >= 8 else 'ADEQUATE'}")
lines.append("\n## 5. Fleet Comparison\n")
for name, d in FLEET_MODELS.items():
lines.append(f" {name:<35} {d['params_total']:<6} {d['context']:<6} {'Local' if d['local'] else 'Cloud'} {d['reasoning']}")
lines.append("\n## 6. Ollama Status\n")
lines.append(f" Running: {'Yes' if ollama['running'] else 'No'}")
lines.append(f" Models: {', '.join(ollama['models']) or 'none'}")
lines.append(f" Qwen3.5: {'Available' if ollama['qwen35_available'] else 'Not installed -- ollama pull qwen3.5:35b'}")
lines.append("\n## 7. Recommendation\n")
lines.append(" VERDICT: APPROVED for local deployment as privacy-sensitive tier")
lines.append("\n + Perfect data sovereignty (Vitalik #1 requirement)")
lines.append(" + MoE: 35B quality at 3B inference speed")
lines.append(" + 128K context, Apache 2.0, tool use + JSON mode")
lines.append(" + Eliminates Privacy Filter need for most queries")
lines.append("\n - 20GB VRAM at Q4 (needs beefy hardware)")
lines.append(" - MoE routing less predictable than dense models")
lines.append(" - Needs red-team testing for prompt injection (#324)")
lines.append("\n## 8. Integration Path\n")
lines.append(" config.yaml:")
lines.append(" privacy_model:")
lines.append(" provider: ollama")
lines.append(" model: qwen3.5:35b")
lines.append(" base_url: http://localhost:11434")
lines.append(" context_length: 131072")
return "\n".join(lines)
if __name__ == "__main__":
if "--check-ollama" in sys.argv:
print(json.dumps(check_ollama_status(), indent=2))
elif "--benchmark" in sys.argv:
idx = sys.argv.index("--benchmark")
model = sys.argv[idx + 1] if idx + 1 < len(sys.argv) else "qwen2.5:7b"
print(json.dumps(run_benchmark(model, "Explain local LLM security in 3 sentences."), indent=2))
else:
print(generate_report())

View File

@@ -0,0 +1,350 @@
"""
Tests for scripts/deploy-crons.py — cron job deployment from YAML.
"""
import json
import os
import tempfile
from pathlib import Path
from unittest.mock import patch
import pytest
# Add parent directory to path for imports
import sys
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
from scripts.deploy_crons import (
job_needs_update,
normalize_job_for_comparison,
find_matching_job,
)
class TestJobNeedsUpdate:
"""Test the job_needs_update function."""
def test_no_update_when_identical(self):
"""No update needed when jobs are identical."""
current = {
"prompt": "Check server status",
"schedule": {"kind": "interval", "minutes": 60},
"model": "claude-sonnet-4",
"provider": "anthropic",
}
desired = {
"prompt": "Check server status",
"schedule": {"kind": "interval", "minutes": 60},
"model": "claude-sonnet-4",
"provider": "anthropic",
}
assert job_needs_update(current, desired) is False
def test_update_when_prompt_changes(self):
"""Update needed when prompt changes."""
current = {
"prompt": "Check server status",
"schedule": {"kind": "interval", "minutes": 60},
"model": "claude-sonnet-4",
"provider": "anthropic",
}
desired = {
"prompt": "Check server health",
"schedule": {"kind": "interval", "minutes": 60},
"model": "claude-sonnet-4",
"provider": "anthropic",
}
assert job_needs_update(current, desired) is True
def test_update_when_schedule_changes(self):
"""Update needed when schedule changes."""
current = {
"prompt": "Check server status",
"schedule": {"kind": "interval", "minutes": 60},
"model": "claude-sonnet-4",
"provider": "anthropic",
}
desired = {
"prompt": "Check server status",
"schedule": {"kind": "interval", "minutes": 30},
"model": "claude-sonnet-4",
"provider": "anthropic",
}
assert job_needs_update(current, desired) is True
def test_update_when_model_changes(self):
"""Update needed when model changes (FIX for issue #375)."""
current = {
"prompt": "Check server status",
"schedule": {"kind": "interval", "minutes": 60},
"model": "claude-sonnet-4",
"provider": "anthropic",
}
desired = {
"prompt": "Check server status",
"schedule": {"kind": "interval", "minutes": 60},
"model": "claude-sonnet-4-6",
"provider": "anthropic",
}
assert job_needs_update(current, desired) is True
def test_update_when_provider_changes(self):
"""Update needed when provider changes (FIX for issue #375)."""
current = {
"prompt": "Check server status",
"schedule": {"kind": "interval", "minutes": 60},
"model": "claude-sonnet-4",
"provider": "anthropic",
}
desired = {
"prompt": "Check server status",
"schedule": {"kind": "interval", "minutes": 60},
"model": "claude-sonnet-4",
"provider": "openrouter",
}
assert job_needs_update(current, desired) is True
def test_update_when_model_added(self):
"""Update needed when model is added to a job that didn't have one."""
current = {
"prompt": "Check server status",
"schedule": {"kind": "interval", "minutes": 60},
"model": None,
"provider": None,
}
desired = {
"prompt": "Check server status",
"schedule": {"kind": "interval", "minutes": 60},
"model": "claude-sonnet-4",
"provider": "anthropic",
}
assert job_needs_update(current, desired) is True
def test_update_when_provider_added(self):
"""Update needed when provider is added to a job that didn't have one."""
current = {
"prompt": "Check server status",
"schedule": {"kind": "interval", "minutes": 60},
"model": "claude-sonnet-4",
"provider": None,
}
desired = {
"prompt": "Check server status",
"schedule": {"kind": "interval", "minutes": 60},
"model": "claude-sonnet-4",
"provider": "anthropic",
}
assert job_needs_update(current, desired) is True
class TestNormalizeJobForComparison:
"""Test the normalize_job_for_comparison function."""
def test_normalizes_job_correctly(self):
"""Test that job normalization extracts the right fields."""
job = {
"id": "abc123",
"name": "Test Job",
"prompt": "Do something",
"schedule": {"kind": "interval", "minutes": 60},
"model": "claude-sonnet-4",
"provider": "anthropic",
"base_url": "https://api.anthropic.com",
"extra_field": "ignored",
}
normalized = normalize_job_for_comparison(job)
assert normalized["prompt"] == "Do something"
assert normalized["schedule"] == {"kind": "interval", "minutes": 60}
assert normalized["model"] == "claude-sonnet-4"
assert normalized["provider"] == "anthropic"
assert normalized["base_url"] == "https://api.anthropic.com"
assert "id" not in normalized
assert "name" not in normalized
assert "extra_field" not in normalized
def test_handles_missing_fields(self):
"""Test that normalization handles missing fields gracefully."""
job = {
"prompt": "Do something",
}
normalized = normalize_job_for_comparison(job)
assert normalized["prompt"] == "Do something"
assert normalized["schedule"] == {}
assert normalized["model"] is None
assert normalized["provider"] is None
assert normalized["base_url"] is None
class TestFindMatchingJob:
"""Test the find_matching_job function."""
def test_finds_by_id(self):
"""Test finding a job by ID."""
jobs = [
{"id": "abc123", "name": "Job 1"},
{"id": "def456", "name": "Job 2"},
]
yaml_job = {"id": "abc123", "name": "Different Name"}
result = find_matching_job(jobs, yaml_job)
assert result is not None
assert result["id"] == "abc123"
def test_finds_by_name(self):
"""Test finding a job by name."""
jobs = [
{"id": "abc123", "name": "Job 1"},
{"id": "def456", "name": "Job 2"},
]
yaml_job = {"name": "Job 2"}
result = find_matching_job(jobs, yaml_job)
assert result is not None
assert result["id"] == "def456"
def test_returns_none_when_no_match(self):
"""Test that None is returned when no match is found."""
jobs = [
{"id": "abc123", "name": "Job 1"},
{"id": "def456", "name": "Job 2"},
]
yaml_job = {"name": "Nonexistent Job"}
result = find_matching_job(jobs, yaml_job)
assert result is None
def test_prefers_id_over_name(self):
"""Test that ID matching takes precedence over name matching."""
jobs = [
{"id": "abc123", "name": "Job 1"},
{"id": "def456", "name": "Job 2"},
]
yaml_job = {"id": "abc123", "name": "Job 2"}
result = find_matching_job(jobs, yaml_job)
assert result is not None
assert result["id"] == "abc123" # ID match takes precedence
class TestDeployCronsIntegration:
"""Integration tests for deploy-crons.py."""
@pytest.fixture
def temp_dir(self, tmp_path):
"""Create a temporary directory for test files."""
return tmp_path
@pytest.fixture
def sample_yaml(self, temp_dir):
"""Create a sample cron-jobs.yaml file."""
yaml_content = """
jobs:
- name: "Server Health Check"
prompt: "Check server health and report status"
schedule: "every 1h"
model: "claude-sonnet-4"
provider: "anthropic"
deliver: "local"
- name: "Database Backup"
prompt: "Run database backup"
schedule: "0 2 * * *"
model: "claude-sonnet-4"
provider: "anthropic"
deliver: "local"
"""
yaml_file = temp_dir / "cron-jobs.yaml"
yaml_file.write_text(yaml_content)
return yaml_file
@pytest.fixture
def sample_jobs_json(self, temp_dir):
"""Create a sample jobs.json file."""
jobs_data = {
"jobs": [
{
"id": "job1",
"name": "Server Health Check",
"prompt": "Check server status",
"schedule": {"kind": "interval", "minutes": 60, "display": "every 1h"},
"schedule_display": "every 1h",
"model": "claude-sonnet-4",
"provider": "anthropic",
"enabled": True,
"state": "scheduled",
},
{
"id": "job2",
"name": "Database Backup",
"prompt": "Run database backup",
"schedule": {"kind": "cron", "expr": "0 2 * * *", "display": "0 2 * * *"},
"schedule_display": "0 2 * * *",
"model": None, # No model specified
"provider": None, # No provider specified
"enabled": True,
"state": "scheduled",
},
],
"updated_at": "2026-04-13T00:00:00",
}
jobs_file = temp_dir / "jobs.json"
jobs_file.write_text(json.dumps(jobs_data, indent=2))
return jobs_file
def test_detects_model_change(self, sample_yaml, sample_jobs_json, temp_dir):
"""Test that model changes are detected (FIX for issue #375)."""
from scripts.deploy_crons import job_needs_update, normalize_job_for_comparison
# Simulate a job where model changed
current_job = {
"prompt": "Check server health and report status",
"schedule": {"kind": "interval", "minutes": 60},
"model": "claude-sonnet-4", # Current model
"provider": "anthropic",
}
desired_job = {
"prompt": "Check server health and report status",
"schedule": {"kind": "interval", "minutes": 60},
"model": "claude-sonnet-4-6", # New model
"provider": "anthropic",
}
assert job_needs_update(current_job, desired_job) is True
def test_detects_provider_change(self, sample_yaml, sample_jobs_json, temp_dir):
"""Test that provider changes are detected (FIX for issue #375)."""
from scripts.deploy_crons import job_needs_update
# Simulate a job where provider changed
current_job = {
"prompt": "Check server health and report status",
"schedule": {"kind": "interval", "minutes": 60},
"model": "claude-sonnet-4",
"provider": "anthropic", # Current provider
}
desired_job = {
"prompt": "Check server health and report status",
"schedule": {"kind": "interval", "minutes": 60},
"model": "claude-sonnet-4",
"provider": "openrouter", # New provider
}
assert job_needs_update(current_job, desired_job) is True
def test_no_update_when_only_prompt_unchanged(self, sample_yaml, sample_jobs_json, temp_dir):
"""Test that jobs are NOT updated when only prompt is unchanged but model/provider changed."""
from scripts.deploy_crons import job_needs_update
# This is the bug scenario: prompt unchanged, but model/provider changed
current_job = {
"prompt": "Check server health and report status",
"schedule": {"kind": "interval", "minutes": 60},
"model": None, # No model
"provider": None, # No provider
}
desired_job = {
"prompt": "Check server health and report status", # Same prompt
"schedule": {"kind": "interval", "minutes": 60}, # Same schedule
"model": "claude-sonnet-4", # New model added
"provider": "anthropic", # New provider added
}
# This should return True (needs update) because model/provider changed
assert job_needs_update(current_job, desired_job) is True
if __name__ == "__main__":
pytest.main([__file__, "-v"])

View File

@@ -1,63 +0,0 @@
"""Tests for Qwen3.5:35B evaluation -- Issue #288."""
import json
import pytest
from scripts.evaluate_qwen35 import (
ModelSpec, FLEET_MODELS, SECURITY_CRITERIA, HARDWARE_PROFILES,
check_ollama_status, generate_report,
)
class TestModelSpec:
def test_spec_fields(self):
s = ModelSpec()
assert s.name == "Qwen3.5-35B-A3B"
assert s.total_params == "35B"
assert s.active_params == "3B per token"
assert s.context_length == 131072
assert s.license == "Apache 2.0"
assert s.tool_use_support is True
def test_quantization_decreasing_vram(self):
s = ModelSpec()
items = sorted(s.quantization_options.items(), key=lambda x: x[1])
for i in range(1, len(items)):
assert items[i][1] >= items[i-1][1]
class TestSecurity:
def test_scores_in_range(self):
for c in SECURITY_CRITERIA:
assert 1 <= c["score"] <= 10
assert c["weight"] in ("CRITICAL", "HIGH", "MEDIUM")
def test_weighted_average(self):
wm = {"CRITICAL": 3, "HIGH": 2, "MEDIUM": 1}
tw = sum(wm[c["weight"]] for c in SECURITY_CRITERIA)
ws = sum(c["score"] * wm[c["weight"]] for c in SECURITY_CRITERIA)
assert ws / tw >= 7.0
class TestHardware:
def test_m2_ultra_fits(self):
assert HARDWARE_PROFILES["mac_m2_ultra_192gb"]["fits_q4"] is True
def test_m1_doesnt_fit(self):
assert HARDWARE_PROFILES["mac_m1_16gb"]["fits_q4"] is False
class TestReport:
def test_has_all_sections(self):
r = generate_report()
for s in ["Model Specification", "VRAM", "Hardware", "Security", "Fleet", "Recommendation"]:
assert s in r, f"Missing: {s}"
def test_verdict_approved(self):
assert "APPROVED" in generate_report()
class TestOllama:
def test_returns_dict(self):
r = check_ollama_status()
assert isinstance(r, dict)
assert "running" in r