Compare commits
1 Commits
fix/468-cr
...
burn/376-1
| Author | SHA1 | Date | |
|---|---|---|---|
| 3c66333c94 |
@@ -13,7 +13,6 @@ import concurrent.futures
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
@@ -157,27 +156,6 @@ _KNOWN_DELIVERY_PLATFORMS = frozenset({
|
||||
|
||||
from cron.jobs import get_due_jobs, mark_job_run, save_job_output, advance_next_run
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Model context guard
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
CRON_MIN_CONTEXT_TOKENS = 4096
|
||||
|
||||
|
||||
class ModelContextError(ValueError):
|
||||
"""Raised when a job's model has insufficient context for cron execution."""
|
||||
pass
|
||||
|
||||
|
||||
def _check_model_context_compat(model: str, context_length: int) -> None:
|
||||
"""Raise ModelContextError if the model context is below the cron minimum."""
|
||||
if context_length < CRON_MIN_CONTEXT_TOKENS:
|
||||
raise ModelContextError(
|
||||
f"Model '{model}' context ({context_length} tokens) is below the "
|
||||
f"minimum {CRON_MIN_CONTEXT_TOKENS} tokens required for cron jobs."
|
||||
)
|
||||
|
||||
|
||||
# Sentinel: when a cron agent has nothing new to report, it can start its
|
||||
# response with this marker to suppress delivery. Output is still saved
|
||||
# locally for audit.
|
||||
@@ -566,55 +544,6 @@ def _run_job_script(script_path: str) -> tuple[bool, str]:
|
||||
return False, f"Script execution failed: {exc}"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Cloud context warning — detect local service refs in cloud cron prompts
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_LOCAL_SERVICE_PATTERNS = [
|
||||
r'localhost:\d{2,5}',
|
||||
r'127\.0\.0\.\d{1,3}:\d{2,5}',
|
||||
r'0\.0\.0\.0:\d{2,5}',
|
||||
r'\bollama\b',
|
||||
r'curl\s+.*localhost',
|
||||
r'wget\s+.*localhost',
|
||||
r'http://localhost',
|
||||
r'https?://127\.',
|
||||
r'https?://0\.0\.0\.0',
|
||||
r'check.*ollama',
|
||||
r'connect.*local',
|
||||
r'hermes.*gateway.*local',
|
||||
]
|
||||
|
||||
_LOCAL_SERVICE_RE = [re.compile(p, re.IGNORECASE) for p in _LOCAL_SERVICE_PATTERNS]
|
||||
|
||||
|
||||
def _detect_local_service_refs(prompt: str) -> list[str]:
|
||||
"""Scan a prompt for references to local services (Ollama, localhost, etc.).
|
||||
|
||||
Returns list of matched patterns for logging.
|
||||
"""
|
||||
matches = []
|
||||
for pattern_re in _LOCAL_SERVICE_RE:
|
||||
if pattern_re.search(prompt):
|
||||
matches.append(pattern_re.pattern)
|
||||
return matches
|
||||
|
||||
|
||||
def _inject_cloud_context(prompt: str, local_refs: list[str]) -> str:
|
||||
"""Prepend a warning when cron runs on cloud but prompt refs local services.
|
||||
|
||||
The agent reports the limitation instead of wasting iterations on doomed connections.
|
||||
"""
|
||||
warning = (
|
||||
"[SYSTEM NOTE: You are running on a cloud endpoint, but your prompt references "
|
||||
"local services (localhost/Ollama). You cannot reach localhost from a cloud "
|
||||
"endpoint. Report this limitation to the user and suggest running the job on "
|
||||
"a local endpoint instead. Do NOT attempt to connect to localhost — it will "
|
||||
"timeout and waste your iteration budget.]\n\n"
|
||||
)
|
||||
return warning + prompt
|
||||
|
||||
|
||||
def _build_job_prompt(job: dict) -> str:
|
||||
"""Build the effective prompt for a cron job, optionally loading one or more skills first."""
|
||||
prompt = job.get("prompt", "")
|
||||
@@ -833,16 +762,6 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
|
||||
message = format_runtime_provider_error(exc)
|
||||
raise RuntimeError(message) from exc
|
||||
|
||||
# Cloud context warning: if running on cloud but prompt refs local services,
|
||||
# inject a warning so the agent reports the limitation instead of wasting
|
||||
# iterations on doomed connections. (Fixes #378, #456)
|
||||
base_url = runtime.get("base_url") or ""
|
||||
is_cloud = not any(h in base_url for h in ("localhost", "127.0.0.1", "0.0.0.0", "::1"))
|
||||
local_refs = _detect_local_service_refs(prompt)
|
||||
if is_cloud and local_refs:
|
||||
logger.info("Job '%s': cloud endpoint + local service refs detected, injecting warning", job_name)
|
||||
prompt = _inject_cloud_context(prompt, local_refs)
|
||||
|
||||
from agent.smart_model_routing import resolve_turn_route
|
||||
turn_route = resolve_turn_route(
|
||||
prompt,
|
||||
|
||||
154
deploy-crons.py
Normal file
154
deploy-crons.py
Normal file
@@ -0,0 +1,154 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
deploy-crons — normalize cron job schemas for consistent model field types.
|
||||
|
||||
This script ensures that the model field in jobs.json is always a dict when
|
||||
either model or provider is specified, preventing schema inconsistency.
|
||||
|
||||
Usage:
|
||||
python deploy-crons.py [--dry-run] [--jobs-file PATH]
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
|
||||
def normalize_job(job: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Normalize a job dict to ensure consistent model field types.
|
||||
|
||||
Before normalization:
|
||||
- If model AND provider: model = raw string, provider = raw string (inconsistent)
|
||||
- If only model: model = raw string
|
||||
- If only provider: provider = raw string at top level
|
||||
|
||||
After normalization:
|
||||
- If model exists: model = {"model": "xxx"}
|
||||
- If provider exists: model = {"provider": "yyy"}
|
||||
- If both exist: model = {"model": "xxx", "provider": "yyy"}
|
||||
- If neither: model = None
|
||||
"""
|
||||
job = dict(job) # Create a copy to avoid modifying the original
|
||||
|
||||
model = job.get("model")
|
||||
provider = job.get("provider")
|
||||
|
||||
# Skip if already normalized (model is a dict)
|
||||
if isinstance(model, dict):
|
||||
return job
|
||||
|
||||
# Build normalized model dict
|
||||
model_dict = {}
|
||||
|
||||
if model is not None and isinstance(model, str):
|
||||
model_dict["model"] = model.strip()
|
||||
|
||||
if provider is not None and isinstance(provider, str):
|
||||
model_dict["provider"] = provider.strip()
|
||||
|
||||
# Set model field
|
||||
if model_dict:
|
||||
job["model"] = model_dict
|
||||
else:
|
||||
job["model"] = None
|
||||
|
||||
# Remove top-level provider field if it was moved into model dict
|
||||
if provider is not None and "provider" in model_dict:
|
||||
# Keep provider field for backward compatibility but mark it as deprecated
|
||||
# This allows existing code that reads job["provider"] to continue working
|
||||
pass
|
||||
|
||||
return job
|
||||
|
||||
|
||||
def normalize_jobs_file(jobs_file: Path, dry_run: bool = False) -> int:
|
||||
"""
|
||||
Normalize all jobs in a jobs.json file.
|
||||
|
||||
Returns the number of jobs that were modified.
|
||||
"""
|
||||
if not jobs_file.exists():
|
||||
print(f"Error: Jobs file not found: {jobs_file}", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
try:
|
||||
with open(jobs_file, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
except json.JSONDecodeError as e:
|
||||
print(f"Error: Invalid JSON in {jobs_file}: {e}", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
jobs = data.get("jobs", [])
|
||||
if not jobs:
|
||||
print("No jobs found in file.")
|
||||
return 0
|
||||
|
||||
modified_count = 0
|
||||
for i, job in enumerate(jobs):
|
||||
original_model = job.get("model")
|
||||
original_provider = job.get("provider")
|
||||
|
||||
normalized_job = normalize_job(job)
|
||||
|
||||
# Check if anything changed
|
||||
if (normalized_job.get("model") != original_model or
|
||||
normalized_job.get("provider") != original_provider):
|
||||
jobs[i] = normalized_job
|
||||
modified_count += 1
|
||||
|
||||
job_id = job.get("id", "?")
|
||||
job_name = job.get("name", "(unnamed)")
|
||||
print(f"Normalized job {job_id} ({job_name}):")
|
||||
print(f" model: {original_model!r} -> {normalized_job.get('model')!r}")
|
||||
print(f" provider: {original_provider!r} -> {normalized_job.get('provider')!r}")
|
||||
|
||||
if modified_count == 0:
|
||||
print("All jobs already have consistent model field types.")
|
||||
return 0
|
||||
|
||||
if dry_run:
|
||||
print(f"DRY RUN: Would normalize {modified_count} jobs.")
|
||||
return 0
|
||||
|
||||
# Write back to file
|
||||
data["jobs"] = jobs
|
||||
try:
|
||||
with open(jobs_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(data, f, indent=2, ensure_ascii=False)
|
||||
print(f"Normalized {modified_count} jobs in {jobs_file}")
|
||||
return 0
|
||||
except Exception as e:
|
||||
print(f"Error writing to {jobs_file}: {e}", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Normalize cron job schemas for consistent model field types."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--dry-run",
|
||||
action="store_true",
|
||||
help="Show what would be changed without modifying the file."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--jobs-file",
|
||||
type=Path,
|
||||
default=Path.home() / ".hermes" / "cron" / "jobs.json",
|
||||
help="Path to jobs.json file (default: ~/.hermes/cron/jobs.json)"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.dry_run:
|
||||
print("DRY RUN MODE — no changes will be made.")
|
||||
print()
|
||||
|
||||
return normalize_jobs_file(args.jobs_file, args.dry_run)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -1,120 +0,0 @@
|
||||
"""Tests for cron cloud context warning injection (fix #378, #456).
|
||||
|
||||
When a cron job runs on a cloud endpoint but its prompt references local
|
||||
services (Ollama, localhost, etc.), inject a warning so the agent reports
|
||||
the limitation instead of wasting iterations on doomed connections.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
|
||||
from cron.scheduler import (
|
||||
_detect_local_service_refs,
|
||||
_inject_cloud_context,
|
||||
_LOCAL_SERVICE_PATTERNS,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Pattern detection
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestDetectLocalServiceRefs:
|
||||
def test_localhost_with_port(self):
|
||||
refs = _detect_local_service_refs("Check http://localhost:8080/status")
|
||||
assert len(refs) > 0
|
||||
assert any("localhost" in r for r in refs)
|
||||
|
||||
def test_127_address(self):
|
||||
refs = _detect_local_service_refs("Connect to 127.0.0.1:11434")
|
||||
assert len(refs) > 0
|
||||
|
||||
def test_ollama_reference(self):
|
||||
refs = _detect_local_service_refs("Run this on Ollama with gemma3")
|
||||
assert len(refs) > 0
|
||||
assert any("ollama" in r.lower() for r in refs)
|
||||
|
||||
def test_curl_localhost(self):
|
||||
refs = _detect_local_service_refs("curl localhost:3000/api/data")
|
||||
assert len(refs) > 0
|
||||
|
||||
def test_wget_localhost(self):
|
||||
refs = _detect_local_service_refs("wget http://localhost/file.txt")
|
||||
assert len(refs) > 0
|
||||
|
||||
def test_http_localhost(self):
|
||||
refs = _detect_local_service_refs("http://localhost:8642/health")
|
||||
assert len(refs) > 0
|
||||
|
||||
def test_https_127(self):
|
||||
refs = _detect_local_service_refs("https://127.0.0.1:443/secure")
|
||||
assert len(refs) > 0
|
||||
|
||||
def test_0000_address(self):
|
||||
refs = _detect_local_service_refs("Bind to 0.0.0.0:9090")
|
||||
assert len(refs) > 0
|
||||
|
||||
def test_no_match_for_remote(self):
|
||||
refs = _detect_local_service_refs("Check https://api.openai.com/v1/models")
|
||||
assert len(refs) == 0
|
||||
|
||||
def test_no_match_for_gitea(self):
|
||||
refs = _detect_local_service_refs("Query forge.alexanderwhitestone.com for issues")
|
||||
assert len(refs) == 0
|
||||
|
||||
def test_no_match_empty(self):
|
||||
refs = _detect_local_service_refs("")
|
||||
assert len(refs) == 0
|
||||
|
||||
def test_check_ollama_phrase(self):
|
||||
refs = _detect_local_service_refs("First check Ollama is running")
|
||||
assert len(refs) > 0
|
||||
|
||||
def test_connect_local_phrase(self):
|
||||
refs = _detect_local_service_refs("Connect to local Ollama server")
|
||||
assert len(refs) > 0
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Warning injection
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestInjectCloudContext:
|
||||
def test_prepends_warning(self):
|
||||
original = "Run a health check on localhost:8080"
|
||||
refs = _detect_local_service_refs(original)
|
||||
result = _inject_cloud_context(original, refs)
|
||||
assert "SYSTEM NOTE" in result
|
||||
assert "cloud endpoint" in result
|
||||
assert original in result
|
||||
|
||||
def test_warning_is_first(self):
|
||||
original = "Check localhost:11434"
|
||||
refs = _detect_local_service_refs(original)
|
||||
result = _inject_cloud_context(original, refs)
|
||||
assert result.startswith("[SYSTEM NOTE")
|
||||
|
||||
def test_preserves_original_prompt(self):
|
||||
original = "Do something with Ollama and then report results"
|
||||
refs = _detect_local_service_refs(original)
|
||||
result = _inject_cloud_context(original, refs)
|
||||
assert "Do something with Ollama" in result
|
||||
|
||||
def test_mentions_cannot_reach(self):
|
||||
original = "curl localhost:8080"
|
||||
refs = _detect_local_service_refs(original)
|
||||
result = _inject_cloud_context(original, refs)
|
||||
assert "cannot reach" in result.lower() or "cannot" in result.lower()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Pattern coverage
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestPatternCoverage:
|
||||
def test_at_least_10_patterns(self):
|
||||
assert len(_LOCAL_SERVICE_PATTERNS) >= 10
|
||||
|
||||
def test_patterns_are_strings(self):
|
||||
for p in _LOCAL_SERVICE_PATTERNS:
|
||||
assert isinstance(p, str)
|
||||
assert len(p) > 0
|
||||
Reference in New Issue
Block a user