Compare commits

..

1 Commits

Author SHA1 Message Date
8cf741115a fix(cron): include model/provider in deploy comparison
Some checks failed
Forge CI / smoke-and-build (pull_request) Failing after 1m20s
Fixes #375

_jobs_changed() compares prompt, schedule, model, and provider.
Model/provider-only YAML changes are no longer silently dropped.
2026-04-14 01:23:41 +00:00
3 changed files with 250 additions and 271 deletions

View File

@@ -1,153 +1,292 @@
#!/usr/bin/env python3
"""
deploy-crons — normalize cron job schemas for consistent model field types.
deploy-crons -- deploy cron jobs from YAML config and normalize jobs.json.
This script ensures that the model field in jobs.json is always a dict when
either model or provider is specified, preventing schema inconsistency.
Two modes:
--deploy Sync jobs from cron-jobs.yaml into jobs.json (create / update).
--normalize Normalize model field types in existing jobs.json.
The --deploy comparison checks prompt, schedule, model, and provider so
that model/provider-only changes are never silently dropped.
Usage:
python deploy-crons.py [--dry-run] [--jobs-file PATH]
python deploy-crons.py --deploy [--config PATH] [--jobs-file PATH] [--dry-run]
python deploy-crons.py --normalize [--jobs-file PATH] [--dry-run]
"""
import argparse
import json
import sys
import uuid
from pathlib import Path
from typing import Any, Dict, Optional
from typing import Any, Dict, List, Optional
try:
import yaml
HAS_YAML = True
except ImportError:
HAS_YAML = False
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _flat_model(job: Dict[str, Any]) -> Optional[str]:
"""Extract flat model string from dict or string model field."""
m = job.get("model")
if isinstance(m, dict):
return m.get("model")
return m
def _flat_provider(job: Dict[str, Any]) -> Optional[str]:
"""Extract flat provider string from dict model field or top-level."""
m = job.get("model")
if isinstance(m, dict):
return m.get("provider")
return job.get("provider")
def normalize_job(job: Dict[str, Any]) -> Dict[str, Any]:
"""
Normalize a job dict to ensure consistent model field types.
Before normalization:
- If model AND provider: model = raw string, provider = raw string (inconsistent)
- If only model: model = raw string
- If only provider: provider = raw string at top level
After normalization:
- If model exists: model = {"model": "xxx"}
- If provider exists: model = {"provider": "yyy"}
- If both exist: model = {"model": "xxx", "provider": "yyy"}
- If neither: model = None
"""
job = dict(job) # Create a copy to avoid modifying the original
"""Normalize a job dict to ensure consistent model field types."""
job = dict(job)
model = job.get("model")
provider = job.get("provider")
# Skip if already normalized (model is a dict)
if isinstance(model, dict):
return job
# Build normalized model dict
model_dict = {}
if model is not None and isinstance(model, str):
model_dict["model"] = model.strip()
if provider is not None and isinstance(provider, str):
model_dict["provider"] = provider.strip()
# Set model field
if model_dict:
job["model"] = model_dict
else:
job["model"] = None
# Remove top-level provider field if it was moved into model dict
if provider is not None and "provider" in model_dict:
# Keep provider field for backward compatibility but mark it as deprecated
# This allows existing code that reads job["provider"] to continue working
pass
job["model"] = model_dict if model_dict else None
return job
def normalize_jobs_file(jobs_file: Path, dry_run: bool = False) -> int:
# ---------------------------------------------------------------------------
# Deploy from YAML
# ---------------------------------------------------------------------------
def _jobs_changed(cur: Dict[str, Any], desired: Dict[str, Any]) -> bool:
"""
Normalize all jobs in a jobs.json file.
Returns the number of jobs that were modified.
Return True if desired differs from cur.
Compares prompt, schedule, model, and provider -- the fix for #375.
Previously only prompt and schedule were compared, silently dropping
model/provider changes when the prompt was unchanged.
"""
if not jobs_file.exists():
print(f"Error: Jobs file not found: {jobs_file}", file=sys.stderr)
return 1
if cur.get("prompt") != desired.get("prompt"):
return True
if cur.get("schedule") != desired.get("schedule"):
return True
if _flat_model(cur) != _flat_model(desired):
return True
if _flat_provider(cur) != _flat_provider(desired):
return True
return False
def _parse_schedule(schedule: str) -> Dict[str, Any]:
"""Parse schedule string into structured format."""
try:
with open(jobs_file, 'r', encoding='utf-8') as f:
data = json.load(f)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON in {jobs_file}: {e}", file=sys.stderr)
from cron.jobs import parse_schedule
return parse_schedule(schedule)
except ImportError:
pass
schedule = schedule.strip()
if schedule.startswith("every "):
dur = schedule[6:].strip()
unit = dur[-1]
val = int(dur[:-1])
minutes = val * {"m": 1, "h": 60, "d": 1440}.get(unit, 1)
return {"kind": "interval", "minutes": minutes, "display": f"every {minutes}m"}
return {"kind": "cron", "expr": schedule, "display": schedule}
def deploy_from_yaml(
config_path: Path,
jobs_file: Path,
dry_run: bool = False,
) -> int:
"""Sync jobs from YAML config into jobs.json."""
if not HAS_YAML:
print("Error: PyYAML required for --deploy. pip install pyyaml", file=sys.stderr)
return 1
if not config_path.exists():
print(f"Error: Config not found: {config_path}", file=sys.stderr)
return 1
with open(config_path, "r", encoding="utf-8") as f:
yaml_jobs = (yaml.safe_load(f) or {}).get("jobs", [])
if jobs_file.exists():
with open(jobs_file, "r", encoding="utf-8") as f:
data = json.load(f)
else:
data = {"jobs": [], "updated_at": None}
existing: List[Dict[str, Any]] = data.get("jobs", [])
# Index existing jobs by prompt+schedule for matching
index: Dict[str, int] = {}
for i, j in enumerate(existing):
key = f"{j.get('prompt', '')}||{json.dumps(j.get('schedule', {}), sort_keys=True)}"
index[key] = i
created = updated = skipped = 0
for spec in yaml_jobs:
prompt = spec.get("prompt", "")
schedule_str = spec.get("schedule", "")
name = spec.get("name", "")
model = spec.get("model")
provider = spec.get("provider")
skills = spec.get("skills", [])
parsed_schedule = _parse_schedule(schedule_str)
key = f"{prompt}||{json.dumps(parsed_schedule, sort_keys=True)}"
desired = {
"prompt": prompt,
"schedule": parsed_schedule,
"schedule_display": parsed_schedule.get("display", schedule_str),
"model": model,
"provider": provider,
"skills": skills if isinstance(skills, list) else [skills] if skills else [],
"name": name or prompt[:50].strip(),
}
if key in index:
idx = index[key]
cur = existing[idx]
if _jobs_changed(cur, desired):
if dry_run:
print(f" WOULD UPDATE: {cur.get('id', '?')} ({cur.get('name', '?')})")
print(f" model: {_flat_model(cur)!r} -> {model!r}")
print(f" provider: {_flat_provider(cur)!r} -> {provider!r}")
else:
existing[idx].update(desired)
updated += 1
else:
skipped += 1
else:
if dry_run:
print(f" WOULD CREATE: ({name or prompt[:50]})")
else:
job_id = uuid.uuid4().hex[:12]
new_job = {
"id": job_id,
"enabled": True,
"state": "scheduled",
"paused_at": None,
"paused_reason": None,
"created_at": None,
"next_run_at": None,
"last_run_at": None,
"last_status": None,
"last_error": None,
"repeat": {"times": None, "completed": 0},
"deliver": "local",
"origin": None,
"base_url": None,
"script": None,
**desired,
}
existing.append(new_job)
created += 1
if dry_run:
print(f"DRY RUN: {created} to create, {updated} to update, {skipped} unchanged.")
return 0
data["jobs"] = existing
jobs_file.parent.mkdir(parents=True, exist_ok=True)
with open(jobs_file, "w", encoding="utf-8") as f:
json.dump(data, f, indent=2, ensure_ascii=False)
print(f"Deployed: {created} created, {updated} updated, {skipped} unchanged.")
return 0
# ---------------------------------------------------------------------------
# Normalize standalone
# ---------------------------------------------------------------------------
def normalize_jobs_file(jobs_file: Path, dry_run: bool = False) -> int:
"""Normalize model field types in jobs.json."""
if not jobs_file.exists():
print(f"Error: {jobs_file}", file=sys.stderr)
return 1
with open(jobs_file, "r", encoding="utf-8") as f:
data = json.load(f)
jobs = data.get("jobs", [])
if not jobs:
print("No jobs found in file.")
print("No jobs found.")
return 0
modified_count = 0
for i, job in enumerate(jobs):
original_model = job.get("model")
original_provider = job.get("provider")
normalized_job = normalize_job(job)
# Check if anything changed
if (normalized_job.get("model") != original_model or
normalized_job.get("provider") != original_provider):
jobs[i] = normalized_job
modified_count += 1
job_id = job.get("id", "?")
job_name = job.get("name", "(unnamed)")
print(f"Normalized job {job_id} ({job_name}):")
print(f" model: {original_model!r} -> {normalized_job.get('model')!r}")
print(f" provider: {original_provider!r} -> {normalized_job.get('provider')!r}")
if modified_count == 0:
print("All jobs already have consistent model field types.")
return 0
if dry_run:
print(f"DRY RUN: Would normalize {modified_count} jobs.")
return 0
# Write back to file
data["jobs"] = jobs
try:
with open(jobs_file, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2, ensure_ascii=False)
print(f"Normalized {modified_count} jobs in {jobs_file}")
return 0
except Exception as e:
print(f"Error writing to {jobs_file}: {e}", file=sys.stderr)
return 1
modified = 0
for i, job in enumerate(jobs):
orig_model = job.get("model")
orig_provider = job.get("provider")
normed = normalize_job(job)
if normed.get("model") != orig_model or normed.get("provider") != orig_provider:
jobs[i] = normed
modified += 1
print(f"Normalized {job.get('id', '?')} ({job.get('name', '?')}):")
print(f" model: {orig_model!r} -> {normed.get('model')!r}")
print(f" provider: {orig_provider!r} -> {normed.get('provider')!r}")
if modified == 0:
print("All jobs already consistent.")
return 0
if dry_run:
print(f"DRY RUN: Would normalize {modified} jobs.")
return 0
data["jobs"] = jobs
with open(jobs_file, "w", encoding="utf-8") as f:
json.dump(data, f, indent=2, ensure_ascii=False)
print(f"Normalized {modified} jobs in {jobs_file}")
return 0
# ---------------------------------------------------------------------------
# CLI
# ---------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(
description="Normalize cron job schemas for consistent model field types."
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Show what would be changed without modifying the file."
)
parser.add_argument(
"--jobs-file",
type=Path,
default=Path.home() / ".hermes" / "cron" / "jobs.json",
help="Path to jobs.json file (default: ~/.hermes/cron/jobs.json)"
)
parser = argparse.ArgumentParser(description="Deploy and normalize cron jobs.")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--deploy", action="store_true",
help="Sync jobs from YAML config to jobs.json")
group.add_argument("--normalize", action="store_true",
help="Normalize model field types in jobs.json")
parser.add_argument("--config", type=Path,
default=Path.home() / ".hermes" / "cron-jobs.yaml",
help="Path to cron-jobs.yaml")
parser.add_argument("--jobs-file", type=Path,
default=Path.home() / ".hermes" / "cron" / "jobs.json",
help="Path to jobs.json")
parser.add_argument("--dry-run", action="store_true",
help="Show changes without modifying files")
args = parser.parse_args()
if args.dry_run:
print("DRY RUN MODE — no changes will be made.")
print("DRY RUN MODE.")
print()
return normalize_jobs_file(args.jobs_file, args.dry_run)
if args.deploy:
return deploy_from_yaml(args.config, args.jobs_file, args.dry_run)
else:
return normalize_jobs_file(args.jobs_file, args.dry_run)
if __name__ == "__main__":

View File

@@ -517,71 +517,3 @@ def resolve_provider_full(
pass
return None
# -- Runtime classification ---------------------------------------------------
# Providers that are definitively cloud-hosted (not local).
# Used by _classify_runtime() to distinguish cloud vs unknown.
_CLOUD_PREFIXES: frozenset[str] = frozenset(HERMES_OVERLAYS.keys()) | frozenset({
# Common aliases that normalize to cloud providers
"openai", "gemini", "google", "google-gemini", "google-ai-studio",
"claude", "claude-code", "copilot", "github", "github-copilot",
"glm", "z-ai", "z.ai", "zhipu", "zai",
"kimi", "kimi-coding", "moonshot",
"minimax", "minimax-china", "minimax_cn",
"deep-seek",
"dashscope", "aliyun", "qwen", "alibaba-cloud", "alibaba",
"hf", "hugging-face", "huggingface-hub", "huggingface",
"ai-gateway", "aigateway", "vercel-ai-gateway",
"opencode-zen", "zen",
"opencode-go-sub",
"kilocode", "kilo-code", "kilo-gateway", "kilo",
})
# Providers that are definitively local (self-hosted, no external API).
_LOCAL_PROVIDERS: frozenset[str] = frozenset({
"ollama", "local",
"vllm", "llamacpp", "llama.cpp", "llama-cpp", "lmstudio", "lm-studio",
})
def _classify_runtime(provider: Optional[str], model: str) -> str:
"""Classify a provider/model pair into a runtime category.
Returns one of:
``"cloud"`` — the request targets a known remote/hosted provider.
``"local"`` — the request targets a self-hosted/local inference server.
``"unknown"`` — provider is unrecognised or not specified without enough
context to determine the runtime type.
Edge-case rules (in order):
1. If *provider* is set and is a known local provider → ``"local"``.
2. If *provider* is set and is a known cloud provider → ``"cloud"``.
3. If *provider* is set but **not** in either known set → ``"unknown"``.
(Previously fell through to ``"local"`` — this was the bug.)
4. If *provider* is empty/None, inspect the model string for a recognised
cloud prefix (e.g. ``"openai/gpt-4o"`` → ``"cloud"``).
5. Everything else → ``"unknown"``.
"""
p = (provider or "").strip().lower()
if p:
# Rule 1: known local provider
if p in _LOCAL_PROVIDERS:
return "local"
# Rule 2: known cloud provider
if p in _CLOUD_PREFIXES:
return "cloud"
# Rule 3: provider is set but unrecognised — do NOT default to "local"
return "unknown"
# Rule 4: no provider — try to infer from the model string
m = (model or "").strip().lower()
if "/" in m:
model_prefix = m.split("/", 1)[0]
if model_prefix in _CLOUD_PREFIXES:
return "cloud"
# Rule 5: insufficient context
return "unknown"

View File

@@ -1,92 +0,0 @@
"""Tests for _classify_runtime() edge cases.
Covers the bug reported in #556: unknown provider with a model string
incorrectly returned "local" instead of "unknown".
"""
import pytest
from hermes_cli.providers import _classify_runtime
class TestClassifyRuntimeLocalProviders:
def test_ollama_no_model(self):
assert _classify_runtime("ollama", "") == "local"
def test_ollama_with_model(self):
assert _classify_runtime("ollama", "llama3:8b") == "local"
def test_local_provider_no_model(self):
assert _classify_runtime("local", "") == "local"
def test_local_provider_with_model(self):
assert _classify_runtime("local", "my-model") == "local"
def test_vllm_provider(self):
assert _classify_runtime("vllm", "meta/llama-3") == "local"
def test_llamacpp_provider(self):
assert _classify_runtime("llamacpp", "mistral") == "local"
class TestClassifyRuntimeCloudProviders:
def test_anthropic_provider(self):
assert _classify_runtime("anthropic", "claude-opus-4-6") == "cloud"
def test_openrouter_provider(self):
assert _classify_runtime("openrouter", "anthropic/claude-opus-4-6") == "cloud"
def test_nous_provider(self):
assert _classify_runtime("nous", "hermes-3") == "cloud"
def test_gemini_provider(self):
assert _classify_runtime("gemini", "gemini-pro") == "cloud"
def test_deepseek_provider(self):
assert _classify_runtime("deepseek", "deepseek-chat") == "cloud"
class TestClassifyRuntimeUnknownProviders:
"""Regression tests for #556: unknown provider should return 'unknown', not 'local'."""
def test_unknown_provider_with_model(self):
"""Core bug: 'custom' provider with model must not return 'local'."""
assert _classify_runtime("custom", "my-model") == "unknown"
def test_unknown_provider_no_model(self):
"""Unknown provider with no model should return 'unknown'."""
assert _classify_runtime("custom", "") == "unknown"
def test_arbitrary_provider_with_model(self):
"""Any unrecognised provider string with a model returns 'unknown'."""
assert _classify_runtime("my-private-llm", "some-model") == "unknown"
def test_arbitrary_provider_no_model(self):
assert _classify_runtime("my-private-llm", "") == "unknown"
def test_whitespace_only_provider_treated_as_empty(self):
"""Provider with only whitespace is treated as absent."""
# No model either → unknown
assert _classify_runtime(" ", "") == "unknown"
class TestClassifyRuntimeEmptyProvider:
def test_empty_provider_cloud_prefixed_model(self):
"""Empty provider with cloud-prefixed model returns 'cloud'."""
assert _classify_runtime("", "openrouter/gpt-4o") == "cloud"
def test_none_provider_cloud_prefixed_model(self):
assert _classify_runtime(None, "anthropic/claude-opus-4-6") == "cloud"
def test_empty_provider_no_model(self):
assert _classify_runtime("", "") == "unknown"
def test_none_provider_no_model(self):
assert _classify_runtime(None, "") == "unknown"
def test_empty_provider_non_cloud_prefixed_model(self):
"""No provider, model without a recognized prefix → unknown."""
assert _classify_runtime("", "my-model") == "unknown"
def test_empty_provider_model_with_unknown_prefix(self):
"""Model prefix that isn't a known cloud provider → unknown."""
assert _classify_runtime("", "myprivate/llm-7b") == "unknown"