Compare commits
1 Commits
burn/375-1
...
burn/model
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f8f4678ee4 |
@@ -26,7 +26,7 @@ from cron.jobs import (
|
||||
trigger_job,
|
||||
JOBS_FILE,
|
||||
)
|
||||
from cron.scheduler import tick
|
||||
from cron.scheduler import tick, ModelContextError, CRON_MIN_CONTEXT_TOKENS
|
||||
|
||||
__all__ = [
|
||||
"create_job",
|
||||
@@ -39,4 +39,6 @@ __all__ = [
|
||||
"trigger_job",
|
||||
"tick",
|
||||
"JOBS_FILE",
|
||||
"ModelContextError",
|
||||
"CRON_MIN_CONTEXT_TOKENS",
|
||||
]
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
# Scripts package
|
||||
284
scripts/benchmark_local_models.py
Normal file
284
scripts/benchmark_local_models.py
Normal file
@@ -0,0 +1,284 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Benchmark local Ollama models against the 50 tok/s UX threshold.
|
||||
|
||||
Usage:
|
||||
python3 scripts/benchmark_local_models.py [--models MODEL1,MODEL2] [--prompt PROMPT] [--rounds N]
|
||||
python3 scripts/benchmark_local_models.py --all # test all pulled models
|
||||
python3 scripts/benchmark_local_models.py --json # JSON output for CI
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import urllib.request
|
||||
import urllib.error
|
||||
from dataclasses import dataclass, asdict
|
||||
from typing import Optional
|
||||
|
||||
OLLAMA_BASE = os.environ.get("OLLAMA_BASE_URL", "http://localhost:11434")
|
||||
THRESHOLD_TOK_S = 50.0
|
||||
|
||||
BENCHMARK_PROMPT = (
|
||||
"Explain the difference between TCP and UDP protocols. "
|
||||
"Cover reliability, ordering, speed, and use cases. "
|
||||
"Be thorough but concise. Write at least 300 words."
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class BenchmarkResult:
|
||||
model: str
|
||||
size_gb: float
|
||||
prompt_tokens: int
|
||||
eval_tokens: int
|
||||
eval_duration_s: float
|
||||
tokens_per_second: float
|
||||
total_duration_s: float
|
||||
rounds: int
|
||||
avg_tok_s: float
|
||||
meets_threshold: bool
|
||||
error: Optional[str] = None
|
||||
|
||||
|
||||
def get_models() -> list[dict]:
|
||||
"""List all pulled Ollama models."""
|
||||
url = f"{OLLAMA_BASE}/api/tags"
|
||||
try:
|
||||
req = urllib.request.Request(url)
|
||||
with urllib.request.urlopen(req, timeout=10) as resp:
|
||||
data = json.loads(resp.read())
|
||||
return data.get("models", [])
|
||||
except Exception as e:
|
||||
print(f"Error connecting to Ollama at {OLLAMA_BASE}: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def benchmark_model(model: str, prompt: str, num_predict: int = 512) -> dict:
|
||||
"""Run a single benchmark generation, return timing stats."""
|
||||
url = f"{OLLAMA_BASE}/api/generate"
|
||||
payload = json.dumps({
|
||||
"model": model,
|
||||
"prompt": prompt,
|
||||
"stream": False,
|
||||
"options": {
|
||||
"num_predict": num_predict,
|
||||
"temperature": 0.1, # low temp for consistent output
|
||||
},
|
||||
}).encode()
|
||||
|
||||
req = urllib.request.Request(url, data=payload, method="POST")
|
||||
req.add_header("Content-Type", "application/json")
|
||||
|
||||
start = time.monotonic()
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=300) as resp:
|
||||
data = json.loads(resp.read())
|
||||
except urllib.error.HTTPError as e:
|
||||
body = e.read().decode() if e.fp else str(e)
|
||||
raise RuntimeError(f"HTTP {e.code}: {body[:200]}")
|
||||
except Exception as e:
|
||||
raise RuntimeError(str(e))
|
||||
elapsed = time.monotonic() - start
|
||||
|
||||
prompt_tokens = data.get("prompt_eval_count", 0)
|
||||
eval_tokens = data.get("eval_count", 0)
|
||||
eval_duration_ns = data.get("eval_duration", 0)
|
||||
total_duration_ns = data.get("total_duration", 0)
|
||||
|
||||
eval_duration_s = eval_duration_ns / 1e9 if eval_duration_ns else elapsed
|
||||
total_duration_s = total_duration_ns / 1e9 if total_duration_ns else elapsed
|
||||
tok_s = eval_tokens / eval_duration_s if eval_duration_s > 0 else 0.0
|
||||
|
||||
return {
|
||||
"prompt_tokens": prompt_tokens,
|
||||
"eval_tokens": eval_tokens,
|
||||
"eval_duration_s": round(eval_duration_s, 2),
|
||||
"total_duration_s": round(total_duration_s, 2),
|
||||
"tokens_per_second": round(tok_s, 1),
|
||||
}
|
||||
|
||||
|
||||
def run_benchmark(
|
||||
model_name: str,
|
||||
model_size: float,
|
||||
prompt: str,
|
||||
rounds: int,
|
||||
num_predict: int,
|
||||
threshold: float = 50.0,
|
||||
) -> BenchmarkResult:
|
||||
"""Run multiple rounds and compute average."""
|
||||
results = []
|
||||
errors = []
|
||||
|
||||
for i in range(rounds):
|
||||
try:
|
||||
r = benchmark_model(model_name, prompt, num_predict)
|
||||
results.append(r)
|
||||
print(f" Round {i+1}/{rounds}: {r['tokens_per_second']} tok/s "
|
||||
f"({r['eval_tokens']} tokens in {r['eval_duration_s']}s)")
|
||||
except Exception as e:
|
||||
errors.append(str(e))
|
||||
print(f" Round {i+1}/{rounds}: ERROR - {e}")
|
||||
|
||||
if not results:
|
||||
return BenchmarkResult(
|
||||
model=model_name,
|
||||
size_gb=model_size,
|
||||
prompt_tokens=0, eval_tokens=0,
|
||||
eval_duration_s=0, tokens_per_second=0,
|
||||
total_duration_s=0, rounds=rounds,
|
||||
avg_tok_s=0, meets_threshold=False,
|
||||
error="; ".join(errors),
|
||||
)
|
||||
|
||||
avg_tok_s = sum(r["tokens_per_second"] for r in results) / len(results)
|
||||
avg_tok_s = round(avg_tok_s, 1)
|
||||
|
||||
return BenchmarkResult(
|
||||
model=model_name,
|
||||
size_gb=model_size,
|
||||
prompt_tokens=sum(r["prompt_tokens"] for r in results) // len(results),
|
||||
eval_tokens=sum(r["eval_tokens"] for r in results) // len(results),
|
||||
eval_duration_s=round(sum(r["eval_duration_s"] for r in results) / len(results), 2),
|
||||
tokens_per_second=avg_tok_s,
|
||||
total_duration_s=round(sum(r["total_duration_s"] for r in results) / len(results), 2),
|
||||
rounds=len(results),
|
||||
avg_tok_s=avg_tok_s,
|
||||
meets_threshold=avg_tok_s >= threshold,
|
||||
)
|
||||
|
||||
|
||||
def format_report(results: list[BenchmarkResult], threshold: float = 50.0) -> str:
|
||||
"""Format a human-readable benchmark report."""
|
||||
lines = []
|
||||
lines.append("")
|
||||
lines.append("=" * 72)
|
||||
lines.append(f" LOCAL MODEL BENCHMARK — {threshold:.0f} tok/s UX Threshold")
|
||||
lines.append("=" * 72)
|
||||
lines.append("")
|
||||
|
||||
# Summary table
|
||||
header = f"{'Model':<25} {'Size':>6} {'tok/s':>8} {'Threshold':>10} {'Status':>8}"
|
||||
lines.append(header)
|
||||
lines.append("-" * 72)
|
||||
|
||||
passed = 0
|
||||
failed = 0
|
||||
errors = 0
|
||||
|
||||
for r in sorted(results, key=lambda x: x.avg_tok_s, reverse=True):
|
||||
size_str = f"{r.size_gb:.1f}GB"
|
||||
tok_s_str = f"{r.avg_tok_s:.1f}"
|
||||
|
||||
if r.error:
|
||||
status = "ERROR"
|
||||
errors += 1
|
||||
elif r.meets_threshold:
|
||||
status = "PASS"
|
||||
passed += 1
|
||||
else:
|
||||
status = "FAIL"
|
||||
failed += 1
|
||||
|
||||
marker = ">" if r.meets_threshold else "X" if r.error else "!"
|
||||
thresh_str = f">= {threshold:.0f}"
|
||||
lines.append(f" {marker} {r.model:<23} {size_str:>6} {tok_s_str:>8} {thresh_str:>10} {status:>8}")
|
||||
|
||||
lines.append("-" * 72)
|
||||
lines.append(f" Passed: {passed} | Failed: {failed} | Errors: {errors} | Total: {len(results)}")
|
||||
lines.append("")
|
||||
|
||||
# Detail section for failures
|
||||
failures = [r for r in results if not r.meets_threshold and not r.error]
|
||||
if failures:
|
||||
lines.append(" FAILED MODELS (below threshold):")
|
||||
for r in sorted(failures, key=lambda x: x.avg_tok_s):
|
||||
gap = threshold - r.avg_tok_s
|
||||
lines.append(f" - {r.model}: {r.avg_tok_s:.1f} tok/s "
|
||||
f"({gap:.1f} tok/s short, {r.eval_tokens} avg tokens/round)")
|
||||
lines.append("")
|
||||
|
||||
error_list = [r for r in results if r.error]
|
||||
if error_list:
|
||||
lines.append(" ERRORS:")
|
||||
for r in error_list:
|
||||
lines.append(f" - {r.model}: {r.error}")
|
||||
lines.append("")
|
||||
|
||||
# Hardware info
|
||||
import platform
|
||||
lines.append(f" Host: {platform.node()} | {platform.system()} {platform.release()}")
|
||||
lines.append(f" Ollama: {OLLAMA_BASE}")
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Benchmark local Ollama models vs 50 tok/s threshold")
|
||||
parser.add_argument("--models", help="Comma-separated model names (default: all)")
|
||||
parser.add_argument("--prompt", default=BENCHMARK_PROMPT, help="Benchmark prompt")
|
||||
parser.add_argument("--rounds", type=int, default=3, help="Rounds per model (default: 3)")
|
||||
parser.add_argument("--tokens", type=int, default=512, help="Max tokens to generate (default: 512)")
|
||||
parser.add_argument("--json", action="store_true", help="JSON output for CI")
|
||||
parser.add_argument("--all", action="store_true", help="Test all pulled models")
|
||||
parser.add_argument("--threshold", type=float, default=THRESHOLD_TOK_S, help="tok/s threshold")
|
||||
args = parser.parse_args()
|
||||
threshold = args.threshold
|
||||
|
||||
# Get model list
|
||||
available = get_models()
|
||||
if not available:
|
||||
print("No models found. Pull a model first: ollama pull <model>", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
if args.models:
|
||||
names = [m.strip() for m in args.models.split(",")]
|
||||
models = [m for m in available if m["name"] in names]
|
||||
missing = set(names) - set(m["name"] for m in models)
|
||||
if missing:
|
||||
print(f"Models not found: {', '.join(missing)}", file=sys.stderr)
|
||||
print(f"Available: {', '.join(m['name'] for m in available)}", file=sys.stderr)
|
||||
else:
|
||||
models = available
|
||||
|
||||
print(f"Benchmarking {len(models)} model(s) against {threshold} tok/s threshold")
|
||||
print(f"Ollama: {OLLAMA_BASE} | Rounds: {args.rounds} | Max tokens: {args.tokens}")
|
||||
print()
|
||||
|
||||
results = []
|
||||
for m in models:
|
||||
name = m["name"]
|
||||
size_gb = m.get("size", 0) / (1024**3)
|
||||
print(f" {name} ({size_gb:.1f}GB):")
|
||||
|
||||
result = run_benchmark(name, size_gb, args.prompt, args.rounds, args.tokens, threshold)
|
||||
results.append(result)
|
||||
|
||||
# Output
|
||||
report = format_report(results, threshold)
|
||||
if args.json:
|
||||
output = {
|
||||
"threshold_tok_s": threshold,
|
||||
"ollama_base": OLLAMA_BASE,
|
||||
"rounds": args.rounds,
|
||||
"results": [asdict(r) for r in results],
|
||||
"passed": sum(1 for r in results if r.meets_threshold),
|
||||
"failed": sum(1 for r in results if not r.meets_threshold and not r.error),
|
||||
"errors": sum(1 for r in results if r.error),
|
||||
}
|
||||
print(json.dumps(output, indent=2))
|
||||
else:
|
||||
print(report)
|
||||
|
||||
# Exit code: 0 if all pass, 1 if any fail/error
|
||||
if any(not r.meets_threshold or r.error for r in results):
|
||||
sys.exit(1)
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,259 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
deploy-crons.py — Deploy cron jobs from YAML configuration to jobs.json.
|
||||
|
||||
This script reads cron job definitions from a YAML file (cron-jobs.yaml) and
|
||||
synchronizes them with the jobs.json file used by the Hermes scheduler.
|
||||
|
||||
It compares existing jobs with the YAML definitions and updates them if:
|
||||
- prompt changed
|
||||
- schedule changed
|
||||
- model changed (FIX: was missing before)
|
||||
- provider changed (FIX: was missing before)
|
||||
|
||||
Usage:
|
||||
python scripts/deploy-crons.py [--config PATH] [--dry-run]
|
||||
|
||||
Exit codes:
|
||||
0 All jobs deployed successfully.
|
||||
1 One or more errors occurred.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
# Add parent directory to path for imports
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from cron.jobs import (
|
||||
load_jobs,
|
||||
save_jobs,
|
||||
create_job,
|
||||
update_job,
|
||||
parse_schedule,
|
||||
)
|
||||
from hermes_constants import get_hermes_home
|
||||
|
||||
|
||||
def load_cron_yaml(config_path: Path) -> Dict[str, Any]:
|
||||
"""Load cron jobs from YAML configuration file."""
|
||||
try:
|
||||
import yaml
|
||||
except ImportError:
|
||||
print("Error: PyYAML is required. Install with: pip install pyyaml", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
if not config_path.exists():
|
||||
print(f"Error: Config file not found: {config_path}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
with open(config_path, 'r', encoding='utf-8') as f:
|
||||
data = yaml.safe_load(f) or {}
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def normalize_job_for_comparison(job: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Normalize a job dict for comparison purposes."""
|
||||
normalized = {}
|
||||
normalized["prompt"] = job.get("prompt", "")
|
||||
normalized["schedule"] = job.get("schedule", {})
|
||||
normalized["model"] = job.get("model")
|
||||
normalized["provider"] = job.get("provider")
|
||||
normalized["base_url"] = job.get("base_url")
|
||||
return normalized
|
||||
|
||||
|
||||
def find_matching_job(jobs: List[Dict[str, Any]], yaml_job: Dict[str, Any]) -> Optional[Dict[str, Any]]:
|
||||
"""Find a matching job in jobs.json by name or ID."""
|
||||
yaml_name = yaml_job.get("name")
|
||||
yaml_id = yaml_job.get("id")
|
||||
|
||||
for job in jobs:
|
||||
# Match by ID if provided
|
||||
if yaml_id and job.get("id") == yaml_id:
|
||||
return job
|
||||
# Match by name if provided
|
||||
if yaml_name and job.get("name") == yaml_name:
|
||||
return job
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def job_needs_update(current: Dict[str, Any], desired: Dict[str, Any]) -> bool:
|
||||
"""
|
||||
Check if a job needs to be updated.
|
||||
|
||||
Compares prompt, schedule, model, and provider.
|
||||
If any of these changed, the job needs to be updated.
|
||||
|
||||
This is the FIX for issue #375: model and provider were not being compared.
|
||||
"""
|
||||
cur_normalized = normalize_job_for_comparison(current)
|
||||
des_normalized = normalize_job_for_comparison(desired)
|
||||
|
||||
# Compare prompt
|
||||
if cur_normalized["prompt"] != des_normalized["prompt"]:
|
||||
return True
|
||||
|
||||
# Compare schedule
|
||||
if cur_normalized["schedule"] != des_normalized["schedule"]:
|
||||
return True
|
||||
|
||||
# FIX: Compare model (was missing before)
|
||||
if cur_normalized["model"] != des_normalized["model"]:
|
||||
return True
|
||||
|
||||
# FIX: Compare provider (was missing before)
|
||||
if cur_normalized["provider"] != des_normalized["provider"]:
|
||||
return True
|
||||
|
||||
# Compare base_url
|
||||
if cur_normalized["base_url"] != des_normalized["base_url"]:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def deploy_jobs(config_path: Path, dry_run: bool = False) -> int:
|
||||
"""
|
||||
Deploy cron jobs from YAML to jobs.json.
|
||||
|
||||
Returns the number of jobs updated.
|
||||
"""
|
||||
config = load_cron_yaml(config_path)
|
||||
yaml_jobs = config.get("jobs", [])
|
||||
|
||||
if not yaml_jobs:
|
||||
print("No jobs found in configuration file.")
|
||||
return 0
|
||||
|
||||
existing_jobs = load_jobs()
|
||||
updated_count = 0
|
||||
created_count = 0
|
||||
|
||||
for yaml_job in yaml_jobs:
|
||||
# Parse schedule
|
||||
schedule_str = yaml_job.get("schedule")
|
||||
if not schedule_str:
|
||||
print(f"Warning: Job '{yaml_job.get('name', 'unnamed')}' has no schedule, skipping.")
|
||||
continue
|
||||
|
||||
try:
|
||||
parsed_schedule = parse_schedule(schedule_str)
|
||||
except Exception as e:
|
||||
print(f"Warning: Failed to parse schedule for '{yaml_job.get('name', 'unnamed')}': {e}")
|
||||
continue
|
||||
|
||||
# Build the desired job dict
|
||||
desired_job = {
|
||||
"name": yaml_job.get("name"),
|
||||
"prompt": yaml_job.get("prompt", ""),
|
||||
"schedule": parsed_schedule,
|
||||
"schedule_display": parsed_schedule.get("display", schedule_str),
|
||||
"model": yaml_job.get("model"),
|
||||
"provider": yaml_job.get("provider"),
|
||||
"base_url": yaml_job.get("base_url"),
|
||||
"deliver": yaml_job.get("deliver", "local"),
|
||||
"skills": yaml_job.get("skills", []),
|
||||
"skill": yaml_job.get("skills", [None])[0] if yaml_job.get("skills") else yaml_job.get("skill"),
|
||||
"repeat": yaml_job.get("repeat"),
|
||||
"script": yaml_job.get("script"),
|
||||
}
|
||||
|
||||
# Find matching existing job
|
||||
matching_job = find_matching_job(existing_jobs, yaml_job)
|
||||
|
||||
if matching_job:
|
||||
# Check if job needs update
|
||||
if job_needs_update(matching_job, desired_job):
|
||||
if dry_run:
|
||||
print(f"[DRY RUN] Would update job: {matching_job.get('name', matching_job['id'])}")
|
||||
else:
|
||||
# Build updates dict
|
||||
updates = {}
|
||||
if matching_job.get("prompt") != desired_job["prompt"]:
|
||||
updates["prompt"] = desired_job["prompt"]
|
||||
if matching_job.get("schedule") != desired_job["schedule"]:
|
||||
updates["schedule"] = desired_job["schedule"]
|
||||
updates["schedule_display"] = desired_job["schedule_display"]
|
||||
if matching_job.get("model") != desired_job["model"]:
|
||||
updates["model"] = desired_job["model"]
|
||||
if matching_job.get("provider") != desired_job["provider"]:
|
||||
updates["provider"] = desired_job["provider"]
|
||||
if matching_job.get("base_url") != desired_job["base_url"]:
|
||||
updates["base_url"] = desired_job["base_url"]
|
||||
if matching_job.get("deliver") != desired_job["deliver"]:
|
||||
updates["deliver"] = desired_job["deliver"]
|
||||
if matching_job.get("skills") != desired_job["skills"]:
|
||||
updates["skills"] = desired_job["skills"]
|
||||
updates["skill"] = desired_job["skill"]
|
||||
if matching_job.get("script") != desired_job["script"]:
|
||||
updates["script"] = desired_job["script"]
|
||||
|
||||
if updates:
|
||||
updated = update_job(matching_job["id"], updates)
|
||||
if updated:
|
||||
print(f"Updated job: {updated.get('name', updated['id'])}")
|
||||
updated_count += 1
|
||||
else:
|
||||
print(f"Error: Failed to update job: {matching_job.get('name', matching_job['id'])}")
|
||||
else:
|
||||
print(f"Job unchanged: {matching_job.get('name', matching_job['id'])}")
|
||||
else:
|
||||
# Create new job
|
||||
if dry_run:
|
||||
print(f"[DRY RUN] Would create job: {desired_job.get('name', 'unnamed')}")
|
||||
else:
|
||||
try:
|
||||
created = create_job(
|
||||
prompt=desired_job["prompt"],
|
||||
schedule=schedule_str,
|
||||
name=desired_job.get("name"),
|
||||
deliver=desired_job.get("deliver"),
|
||||
model=desired_job.get("model"),
|
||||
provider=desired_job.get("provider"),
|
||||
base_url=desired_job.get("base_url"),
|
||||
skills=desired_job.get("skills"),
|
||||
script=desired_job.get("script"),
|
||||
repeat=desired_job.get("repeat"),
|
||||
)
|
||||
print(f"Created job: {created.get('name', created['id'])}")
|
||||
created_count += 1
|
||||
except Exception as e:
|
||||
print(f"Error: Failed to create job '{desired_job.get('name', 'unnamed')}': {e}")
|
||||
|
||||
print(f"\nDeployment complete: {created_count} created, {updated_count} updated")
|
||||
return created_count + updated_count
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Deploy cron jobs from YAML to jobs.json")
|
||||
parser.add_argument(
|
||||
"--config",
|
||||
type=Path,
|
||||
default=get_hermes_home() / "cron-jobs.yaml",
|
||||
help="Path to cron-jobs.yaml (default: ~/.hermes/cron-jobs.yaml)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--dry-run",
|
||||
action="store_true",
|
||||
help="Show what would be done without making changes"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
count = deploy_jobs(args.config, args.dry_run)
|
||||
sys.exit(0 if count >= 0 else 1)
|
||||
except Exception as e:
|
||||
print(f"Error: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,350 +0,0 @@
|
||||
"""
|
||||
Tests for scripts/deploy-crons.py — cron job deployment from YAML.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
# Add parent directory to path for imports
|
||||
import sys
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
||||
|
||||
from scripts.deploy_crons import (
|
||||
job_needs_update,
|
||||
normalize_job_for_comparison,
|
||||
find_matching_job,
|
||||
)
|
||||
|
||||
|
||||
class TestJobNeedsUpdate:
|
||||
"""Test the job_needs_update function."""
|
||||
|
||||
def test_no_update_when_identical(self):
|
||||
"""No update needed when jobs are identical."""
|
||||
current = {
|
||||
"prompt": "Check server status",
|
||||
"schedule": {"kind": "interval", "minutes": 60},
|
||||
"model": "claude-sonnet-4",
|
||||
"provider": "anthropic",
|
||||
}
|
||||
desired = {
|
||||
"prompt": "Check server status",
|
||||
"schedule": {"kind": "interval", "minutes": 60},
|
||||
"model": "claude-sonnet-4",
|
||||
"provider": "anthropic",
|
||||
}
|
||||
assert job_needs_update(current, desired) is False
|
||||
|
||||
def test_update_when_prompt_changes(self):
|
||||
"""Update needed when prompt changes."""
|
||||
current = {
|
||||
"prompt": "Check server status",
|
||||
"schedule": {"kind": "interval", "minutes": 60},
|
||||
"model": "claude-sonnet-4",
|
||||
"provider": "anthropic",
|
||||
}
|
||||
desired = {
|
||||
"prompt": "Check server health",
|
||||
"schedule": {"kind": "interval", "minutes": 60},
|
||||
"model": "claude-sonnet-4",
|
||||
"provider": "anthropic",
|
||||
}
|
||||
assert job_needs_update(current, desired) is True
|
||||
|
||||
def test_update_when_schedule_changes(self):
|
||||
"""Update needed when schedule changes."""
|
||||
current = {
|
||||
"prompt": "Check server status",
|
||||
"schedule": {"kind": "interval", "minutes": 60},
|
||||
"model": "claude-sonnet-4",
|
||||
"provider": "anthropic",
|
||||
}
|
||||
desired = {
|
||||
"prompt": "Check server status",
|
||||
"schedule": {"kind": "interval", "minutes": 30},
|
||||
"model": "claude-sonnet-4",
|
||||
"provider": "anthropic",
|
||||
}
|
||||
assert job_needs_update(current, desired) is True
|
||||
|
||||
def test_update_when_model_changes(self):
|
||||
"""Update needed when model changes (FIX for issue #375)."""
|
||||
current = {
|
||||
"prompt": "Check server status",
|
||||
"schedule": {"kind": "interval", "minutes": 60},
|
||||
"model": "claude-sonnet-4",
|
||||
"provider": "anthropic",
|
||||
}
|
||||
desired = {
|
||||
"prompt": "Check server status",
|
||||
"schedule": {"kind": "interval", "minutes": 60},
|
||||
"model": "claude-sonnet-4-6",
|
||||
"provider": "anthropic",
|
||||
}
|
||||
assert job_needs_update(current, desired) is True
|
||||
|
||||
def test_update_when_provider_changes(self):
|
||||
"""Update needed when provider changes (FIX for issue #375)."""
|
||||
current = {
|
||||
"prompt": "Check server status",
|
||||
"schedule": {"kind": "interval", "minutes": 60},
|
||||
"model": "claude-sonnet-4",
|
||||
"provider": "anthropic",
|
||||
}
|
||||
desired = {
|
||||
"prompt": "Check server status",
|
||||
"schedule": {"kind": "interval", "minutes": 60},
|
||||
"model": "claude-sonnet-4",
|
||||
"provider": "openrouter",
|
||||
}
|
||||
assert job_needs_update(current, desired) is True
|
||||
|
||||
def test_update_when_model_added(self):
|
||||
"""Update needed when model is added to a job that didn't have one."""
|
||||
current = {
|
||||
"prompt": "Check server status",
|
||||
"schedule": {"kind": "interval", "minutes": 60},
|
||||
"model": None,
|
||||
"provider": None,
|
||||
}
|
||||
desired = {
|
||||
"prompt": "Check server status",
|
||||
"schedule": {"kind": "interval", "minutes": 60},
|
||||
"model": "claude-sonnet-4",
|
||||
"provider": "anthropic",
|
||||
}
|
||||
assert job_needs_update(current, desired) is True
|
||||
|
||||
def test_update_when_provider_added(self):
|
||||
"""Update needed when provider is added to a job that didn't have one."""
|
||||
current = {
|
||||
"prompt": "Check server status",
|
||||
"schedule": {"kind": "interval", "minutes": 60},
|
||||
"model": "claude-sonnet-4",
|
||||
"provider": None,
|
||||
}
|
||||
desired = {
|
||||
"prompt": "Check server status",
|
||||
"schedule": {"kind": "interval", "minutes": 60},
|
||||
"model": "claude-sonnet-4",
|
||||
"provider": "anthropic",
|
||||
}
|
||||
assert job_needs_update(current, desired) is True
|
||||
|
||||
|
||||
class TestNormalizeJobForComparison:
|
||||
"""Test the normalize_job_for_comparison function."""
|
||||
|
||||
def test_normalizes_job_correctly(self):
|
||||
"""Test that job normalization extracts the right fields."""
|
||||
job = {
|
||||
"id": "abc123",
|
||||
"name": "Test Job",
|
||||
"prompt": "Do something",
|
||||
"schedule": {"kind": "interval", "minutes": 60},
|
||||
"model": "claude-sonnet-4",
|
||||
"provider": "anthropic",
|
||||
"base_url": "https://api.anthropic.com",
|
||||
"extra_field": "ignored",
|
||||
}
|
||||
normalized = normalize_job_for_comparison(job)
|
||||
assert normalized["prompt"] == "Do something"
|
||||
assert normalized["schedule"] == {"kind": "interval", "minutes": 60}
|
||||
assert normalized["model"] == "claude-sonnet-4"
|
||||
assert normalized["provider"] == "anthropic"
|
||||
assert normalized["base_url"] == "https://api.anthropic.com"
|
||||
assert "id" not in normalized
|
||||
assert "name" not in normalized
|
||||
assert "extra_field" not in normalized
|
||||
|
||||
def test_handles_missing_fields(self):
|
||||
"""Test that normalization handles missing fields gracefully."""
|
||||
job = {
|
||||
"prompt": "Do something",
|
||||
}
|
||||
normalized = normalize_job_for_comparison(job)
|
||||
assert normalized["prompt"] == "Do something"
|
||||
assert normalized["schedule"] == {}
|
||||
assert normalized["model"] is None
|
||||
assert normalized["provider"] is None
|
||||
assert normalized["base_url"] is None
|
||||
|
||||
|
||||
class TestFindMatchingJob:
|
||||
"""Test the find_matching_job function."""
|
||||
|
||||
def test_finds_by_id(self):
|
||||
"""Test finding a job by ID."""
|
||||
jobs = [
|
||||
{"id": "abc123", "name": "Job 1"},
|
||||
{"id": "def456", "name": "Job 2"},
|
||||
]
|
||||
yaml_job = {"id": "abc123", "name": "Different Name"}
|
||||
result = find_matching_job(jobs, yaml_job)
|
||||
assert result is not None
|
||||
assert result["id"] == "abc123"
|
||||
|
||||
def test_finds_by_name(self):
|
||||
"""Test finding a job by name."""
|
||||
jobs = [
|
||||
{"id": "abc123", "name": "Job 1"},
|
||||
{"id": "def456", "name": "Job 2"},
|
||||
]
|
||||
yaml_job = {"name": "Job 2"}
|
||||
result = find_matching_job(jobs, yaml_job)
|
||||
assert result is not None
|
||||
assert result["id"] == "def456"
|
||||
|
||||
def test_returns_none_when_no_match(self):
|
||||
"""Test that None is returned when no match is found."""
|
||||
jobs = [
|
||||
{"id": "abc123", "name": "Job 1"},
|
||||
{"id": "def456", "name": "Job 2"},
|
||||
]
|
||||
yaml_job = {"name": "Nonexistent Job"}
|
||||
result = find_matching_job(jobs, yaml_job)
|
||||
assert result is None
|
||||
|
||||
def test_prefers_id_over_name(self):
|
||||
"""Test that ID matching takes precedence over name matching."""
|
||||
jobs = [
|
||||
{"id": "abc123", "name": "Job 1"},
|
||||
{"id": "def456", "name": "Job 2"},
|
||||
]
|
||||
yaml_job = {"id": "abc123", "name": "Job 2"}
|
||||
result = find_matching_job(jobs, yaml_job)
|
||||
assert result is not None
|
||||
assert result["id"] == "abc123" # ID match takes precedence
|
||||
|
||||
|
||||
class TestDeployCronsIntegration:
|
||||
"""Integration tests for deploy-crons.py."""
|
||||
|
||||
@pytest.fixture
|
||||
def temp_dir(self, tmp_path):
|
||||
"""Create a temporary directory for test files."""
|
||||
return tmp_path
|
||||
|
||||
@pytest.fixture
|
||||
def sample_yaml(self, temp_dir):
|
||||
"""Create a sample cron-jobs.yaml file."""
|
||||
yaml_content = """
|
||||
jobs:
|
||||
- name: "Server Health Check"
|
||||
prompt: "Check server health and report status"
|
||||
schedule: "every 1h"
|
||||
model: "claude-sonnet-4"
|
||||
provider: "anthropic"
|
||||
deliver: "local"
|
||||
|
||||
- name: "Database Backup"
|
||||
prompt: "Run database backup"
|
||||
schedule: "0 2 * * *"
|
||||
model: "claude-sonnet-4"
|
||||
provider: "anthropic"
|
||||
deliver: "local"
|
||||
"""
|
||||
yaml_file = temp_dir / "cron-jobs.yaml"
|
||||
yaml_file.write_text(yaml_content)
|
||||
return yaml_file
|
||||
|
||||
@pytest.fixture
|
||||
def sample_jobs_json(self, temp_dir):
|
||||
"""Create a sample jobs.json file."""
|
||||
jobs_data = {
|
||||
"jobs": [
|
||||
{
|
||||
"id": "job1",
|
||||
"name": "Server Health Check",
|
||||
"prompt": "Check server status",
|
||||
"schedule": {"kind": "interval", "minutes": 60, "display": "every 1h"},
|
||||
"schedule_display": "every 1h",
|
||||
"model": "claude-sonnet-4",
|
||||
"provider": "anthropic",
|
||||
"enabled": True,
|
||||
"state": "scheduled",
|
||||
},
|
||||
{
|
||||
"id": "job2",
|
||||
"name": "Database Backup",
|
||||
"prompt": "Run database backup",
|
||||
"schedule": {"kind": "cron", "expr": "0 2 * * *", "display": "0 2 * * *"},
|
||||
"schedule_display": "0 2 * * *",
|
||||
"model": None, # No model specified
|
||||
"provider": None, # No provider specified
|
||||
"enabled": True,
|
||||
"state": "scheduled",
|
||||
},
|
||||
],
|
||||
"updated_at": "2026-04-13T00:00:00",
|
||||
}
|
||||
jobs_file = temp_dir / "jobs.json"
|
||||
jobs_file.write_text(json.dumps(jobs_data, indent=2))
|
||||
return jobs_file
|
||||
|
||||
def test_detects_model_change(self, sample_yaml, sample_jobs_json, temp_dir):
|
||||
"""Test that model changes are detected (FIX for issue #375)."""
|
||||
from scripts.deploy_crons import job_needs_update, normalize_job_for_comparison
|
||||
|
||||
# Simulate a job where model changed
|
||||
current_job = {
|
||||
"prompt": "Check server health and report status",
|
||||
"schedule": {"kind": "interval", "minutes": 60},
|
||||
"model": "claude-sonnet-4", # Current model
|
||||
"provider": "anthropic",
|
||||
}
|
||||
desired_job = {
|
||||
"prompt": "Check server health and report status",
|
||||
"schedule": {"kind": "interval", "minutes": 60},
|
||||
"model": "claude-sonnet-4-6", # New model
|
||||
"provider": "anthropic",
|
||||
}
|
||||
assert job_needs_update(current_job, desired_job) is True
|
||||
|
||||
def test_detects_provider_change(self, sample_yaml, sample_jobs_json, temp_dir):
|
||||
"""Test that provider changes are detected (FIX for issue #375)."""
|
||||
from scripts.deploy_crons import job_needs_update
|
||||
|
||||
# Simulate a job where provider changed
|
||||
current_job = {
|
||||
"prompt": "Check server health and report status",
|
||||
"schedule": {"kind": "interval", "minutes": 60},
|
||||
"model": "claude-sonnet-4",
|
||||
"provider": "anthropic", # Current provider
|
||||
}
|
||||
desired_job = {
|
||||
"prompt": "Check server health and report status",
|
||||
"schedule": {"kind": "interval", "minutes": 60},
|
||||
"model": "claude-sonnet-4",
|
||||
"provider": "openrouter", # New provider
|
||||
}
|
||||
assert job_needs_update(current_job, desired_job) is True
|
||||
|
||||
def test_no_update_when_only_prompt_unchanged(self, sample_yaml, sample_jobs_json, temp_dir):
|
||||
"""Test that jobs are NOT updated when only prompt is unchanged but model/provider changed."""
|
||||
from scripts.deploy_crons import job_needs_update
|
||||
|
||||
# This is the bug scenario: prompt unchanged, but model/provider changed
|
||||
current_job = {
|
||||
"prompt": "Check server health and report status",
|
||||
"schedule": {"kind": "interval", "minutes": 60},
|
||||
"model": None, # No model
|
||||
"provider": None, # No provider
|
||||
}
|
||||
desired_job = {
|
||||
"prompt": "Check server health and report status", # Same prompt
|
||||
"schedule": {"kind": "interval", "minutes": 60}, # Same schedule
|
||||
"model": "claude-sonnet-4", # New model added
|
||||
"provider": "anthropic", # New provider added
|
||||
}
|
||||
# This should return True (needs update) because model/provider changed
|
||||
assert job_needs_update(current_job, desired_job) is True
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
Reference in New Issue
Block a user