Compare commits
3 Commits
whip/378-1
...
fix/500-cl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
568a735f65 | ||
| 954fd992eb | |||
|
|
f35f56e397 |
@@ -13,7 +13,6 @@ import concurrent.futures
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
@@ -42,6 +41,64 @@ from agent.model_metadata import is_local_endpoint
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Minimum context tokens required for cron job execution
|
||||
CRON_MIN_CONTEXT_TOKENS = 500
|
||||
|
||||
|
||||
class ModelContextError(Exception):
|
||||
"""Raised when a model does not have enough context tokens for a cron job."""
|
||||
pass
|
||||
|
||||
|
||||
# =====================================================================
|
||||
# Cloud Context Warning — detect local service refs in cloud prompts
|
||||
# =====================================================================
|
||||
|
||||
import re as _re
|
||||
|
||||
_LOCAL_SERVICE_PATTERNS = [
|
||||
_re.compile(r'\blocalhost:\d+', _re.IGNORECASE),
|
||||
_re.compile(r'\b127\.\d+\.\d+\.\d+:\d+', _re.IGNORECASE),
|
||||
_re.compile(r'\b0\.0\.0\.0:\d+', _re.IGNORECASE),
|
||||
_re.compile(r'\bollama\b', _re.IGNORECASE),
|
||||
_re.compile(r'\bcurl\s+localhost\b', _re.IGNORECASE),
|
||||
_re.compile(r'\bwget\s+localhost\b', _re.IGNORECASE),
|
||||
_re.compile(r'\bhttp://localhost\b', _re.IGNORECASE),
|
||||
_re.compile(r'\bhttps?://127\.\d+\.\d+\.\d+\b', _re.IGNORECASE),
|
||||
_re.compile(r'\bcheck\s+ollama\b', _re.IGNORECASE),
|
||||
_re.compile(r'\bconnect\s+local\b', _re.IGNORECASE),
|
||||
_re.compile(r'\bhermes\s+gateway\s+local\b', _re.IGNORECASE),
|
||||
_re.compile(r'\blocal\s+model\b', _re.IGNORECASE),
|
||||
]
|
||||
|
||||
_CLOUD_CONTEXT_WARNING = (
|
||||
"\n\n[SYSTEM NOTE: This cron job is running on a CLOUD inference endpoint. "
|
||||
"Local services (Ollama, localhost, local gateway) are NOT accessible from "
|
||||
"this environment. Do not attempt to connect to localhost, run curl/wget "
|
||||
"against local ports, or check local model availability. Report the "
|
||||
"limitation and focus on tasks achievable remotely.]\n"
|
||||
)
|
||||
|
||||
|
||||
def _detect_local_service_refs(text: str) -> list[str]:
|
||||
"""Detect references to local services in prompt text."""
|
||||
refs = []
|
||||
for pat in _LOCAL_SERVICE_PATTERNS:
|
||||
if pat.search(text):
|
||||
refs.append(pat.pattern)
|
||||
return refs
|
||||
|
||||
|
||||
def _inject_cloud_context(prompt: str, base_url: str) -> str:
|
||||
"""If running on cloud but prompt references local services, inject warning."""
|
||||
if is_local_endpoint(base_url):
|
||||
return prompt
|
||||
refs = _detect_local_service_refs(prompt)
|
||||
if refs:
|
||||
logger.info("Cloud endpoint + local service refs detected (%d patterns), injecting warning", len(refs))
|
||||
return _CLOUD_CONTEXT_WARNING + prompt
|
||||
return prompt
|
||||
|
||||
|
||||
# =====================================================================
|
||||
# Deploy Sync Guard
|
||||
@@ -644,66 +701,6 @@ def _build_job_prompt(job: dict) -> str:
|
||||
return "\n".join(parts)
|
||||
|
||||
|
||||
# Well-known local inference endpoints to probe for prefer_local jobs
|
||||
_LOCAL_ENDPOINTS = [
|
||||
{"name": "ollama", "base_url": "http://localhost:11434/v1", "health": "http://localhost:11434/api/tags"},
|
||||
{"name": "llama-cpp", "base_url": "http://localhost:8080/v1", "health": "http://localhost:8080/health"},
|
||||
{"name": "vllm", "base_url": "http://localhost:8000/v1", "health": "http://localhost:8000/v1/models"},
|
||||
]
|
||||
|
||||
|
||||
def _probe_local_endpoint(url: str, timeout: float = 2.0) -> bool:
|
||||
"""Quick probe to check if a local inference server is running."""
|
||||
import urllib.request
|
||||
try:
|
||||
req = urllib.request.Request(url)
|
||||
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
||||
return resp.status == 200
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _resolve_prefer_local(job: dict) -> tuple[Optional[str], Optional[str], str]:
|
||||
"""For jobs with prefer_local=true, find a running local inference server.
|
||||
|
||||
Returns (provider_override, base_url_override, status_message).
|
||||
None values mean "use default resolution".
|
||||
"""
|
||||
if not job.get("prefer_local"):
|
||||
return None, None, ""
|
||||
|
||||
# If the job already specifies an explicit base_url and it's local, honor it
|
||||
explicit_url = job.get("base_url", "")
|
||||
if explicit_url:
|
||||
from agent.model_metadata import is_local_endpoint
|
||||
if is_local_endpoint(explicit_url):
|
||||
return None, None, f"prefer_local: explicit base_url {explicit_url} is already local"
|
||||
|
||||
# Probe well-known local endpoints
|
||||
for ep in _LOCAL_ENDPOINTS:
|
||||
if _probe_local_endpoint(ep["health"]):
|
||||
logger.info(
|
||||
"Job '%s': prefer_local → found %s at %s",
|
||||
job.get("name", "?"), ep["name"], ep["base_url"],
|
||||
)
|
||||
return None, ep["base_url"], (
|
||||
f"prefer_local: using {ep['name']} at {ep['base_url']}"
|
||||
)
|
||||
|
||||
# No local server found — warn and fall back to default
|
||||
logger.warning(
|
||||
"Job '%s': prefer_local=true but no local inference server found "
|
||||
"(probed: %s). Falling back to default provider.",
|
||||
job.get("name", "?"),
|
||||
", ".join(ep["name"] for ep in _LOCAL_ENDPOINTS),
|
||||
)
|
||||
return None, None, (
|
||||
"prefer_local: no local server found (tried: "
|
||||
+ ", ".join(ep["name"] for ep in _LOCAL_ENDPOINTS)
|
||||
+ "). Using default provider."
|
||||
)
|
||||
|
||||
|
||||
def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
|
||||
"""
|
||||
Execute a single cron job.
|
||||
@@ -825,12 +822,6 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
|
||||
pr = _cfg.get("provider_routing", {})
|
||||
smart_routing = _cfg.get("smart_model_routing", {}) or {}
|
||||
|
||||
# prefer_local: if the job declares prefer_local=true, probe for a
|
||||
# local inference server and override the base_url when found. (#378)
|
||||
_pl_provider, _pl_base_url, _pl_status = _resolve_prefer_local(job)
|
||||
if _pl_status:
|
||||
logger.info("Job '%s': %s", job_name, _pl_status)
|
||||
|
||||
from hermes_cli.runtime_provider import (
|
||||
resolve_runtime_provider,
|
||||
format_runtime_provider_error,
|
||||
@@ -839,10 +830,7 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
|
||||
runtime_kwargs = {
|
||||
"requested": job.get("provider") or os.getenv("HERMES_INFERENCE_PROVIDER"),
|
||||
}
|
||||
# prefer_local override: use the discovered local endpoint
|
||||
if _pl_base_url:
|
||||
runtime_kwargs["explicit_base_url"] = _pl_base_url
|
||||
elif job.get("base_url"):
|
||||
if job.get("base_url"):
|
||||
runtime_kwargs["explicit_base_url"] = job.get("base_url")
|
||||
runtime = resolve_runtime_provider(**runtime_kwargs)
|
||||
except Exception as exc:
|
||||
@@ -887,6 +875,9 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
|
||||
job_name,
|
||||
)
|
||||
|
||||
# Inject cloud-context warning if prompt references local services (#468)
|
||||
prompt = _inject_cloud_context(prompt, _runtime_base_url)
|
||||
|
||||
_agent_kwargs = _safe_agent_kwargs({
|
||||
"model": turn_route["model"],
|
||||
"api_key": turn_route["runtime"].get("api_key"),
|
||||
|
||||
28
run_agent.py
28
run_agent.py
@@ -1001,30 +1001,10 @@ class AIAgent:
|
||||
self._session_db = session_db
|
||||
self._parent_session_id = parent_session_id
|
||||
self._last_flushed_db_idx = 0 # tracks DB-write cursor to prevent duplicate writes
|
||||
if self._session_db:
|
||||
try:
|
||||
self._session_db.create_session(
|
||||
session_id=self.session_id,
|
||||
source=self.platform or os.environ.get("HERMES_SESSION_SOURCE", "cli"),
|
||||
model=self.model,
|
||||
model_config={
|
||||
"max_iterations": self.max_iterations,
|
||||
"reasoning_config": reasoning_config,
|
||||
"max_tokens": max_tokens,
|
||||
},
|
||||
user_id=None,
|
||||
parent_session_id=self._parent_session_id,
|
||||
)
|
||||
except Exception as e:
|
||||
# Transient SQLite lock contention (e.g. CLI and gateway writing
|
||||
# concurrently) must NOT permanently disable session_search for
|
||||
# this agent. Keep _session_db alive — subsequent message
|
||||
# flushes and session_search calls will still work once the
|
||||
# lock clears. The session row may be missing from the index
|
||||
# for this run, but that is recoverable (flushes upsert rows).
|
||||
logger.warning(
|
||||
"Session DB create_session failed (session_search still available): %s", e
|
||||
)
|
||||
# Lazy session creation: defer until first message flush (#314).
|
||||
# _flush_messages_to_session_db() calls ensure_session() which uses
|
||||
# INSERT OR IGNORE — creating the row only when messages arrive.
|
||||
# This eliminates 32% of sessions that are created but never used.
|
||||
|
||||
# In-memory todo list for task planning (one per agent/session)
|
||||
from tools.todo_tool import TodoStore
|
||||
|
||||
83
tests/cron/test_cron_cloud_context.py
Normal file
83
tests/cron/test_cron_cloud_context.py
Normal file
@@ -0,0 +1,83 @@
|
||||
"""Tests for cron cloud-context warning injection (#468)."""
|
||||
|
||||
import pytest
|
||||
|
||||
from cron.scheduler import (
|
||||
_LOCAL_SERVICE_PATTERNS,
|
||||
_detect_local_service_refs,
|
||||
_inject_cloud_context,
|
||||
_CLOUD_CONTEXT_WARNING,
|
||||
)
|
||||
|
||||
|
||||
class TestDetectLocalServiceRefs:
|
||||
"""Test local service reference detection."""
|
||||
|
||||
def test_detects_localhost_with_port(self):
|
||||
refs = _detect_local_service_refs("Connect to localhost:11434")
|
||||
assert len(refs) > 0
|
||||
|
||||
def test_detects_127_address(self):
|
||||
refs = _detect_local_service_refs("Check http://127.0.0.1:8080/health")
|
||||
assert len(refs) > 0
|
||||
|
||||
def test_detects_ollama(self):
|
||||
refs = _detect_local_service_refs("Run ollama pull gemma4")
|
||||
assert len(refs) > 0
|
||||
|
||||
def test_detects_curl_localhost(self):
|
||||
refs = _detect_local_service_refs("curl localhost:11434/api/tags")
|
||||
assert len(refs) > 0
|
||||
|
||||
def test_detects_wget_localhost(self):
|
||||
refs = _detect_local_service_refs("wget localhost:8080/data")
|
||||
assert len(refs) > 0
|
||||
|
||||
def test_detects_http_localhost(self):
|
||||
refs = _detect_local_service_refs("http://localhost:3000")
|
||||
assert len(refs) > 0
|
||||
|
||||
def test_detects_local_model(self):
|
||||
refs = _detect_local_service_refs("Use the local model for inference")
|
||||
assert len(refs) > 0
|
||||
|
||||
def test_no_refs_returns_empty(self):
|
||||
refs = _detect_local_service_refs("Search the web for Python tutorials")
|
||||
assert len(refs) == 0
|
||||
|
||||
def test_case_insensitive(self):
|
||||
refs = _detect_local_service_refs("OLLAMA is running on LocalHost:11434")
|
||||
assert len(refs) > 0
|
||||
|
||||
|
||||
class TestInjectCloudContext:
|
||||
"""Test cloud context warning injection."""
|
||||
|
||||
def test_no_warning_on_local_endpoint(self):
|
||||
prompt = "Check ollama on localhost:11434"
|
||||
result = _inject_cloud_context(prompt, "http://localhost:11434/v1")
|
||||
assert result == prompt # No injection for local endpoints
|
||||
|
||||
def test_no_warning_when_no_local_refs(self):
|
||||
prompt = "Search the web for news"
|
||||
result = _inject_cloud_context(prompt, "https://api.openai.com/v1")
|
||||
assert result == prompt
|
||||
|
||||
def test_injects_warning_on_cloud_with_local_refs(self):
|
||||
prompt = "Check ollama status on localhost:11434"
|
||||
result = _inject_cloud_context(prompt, "https://api.openai.com/v1")
|
||||
assert _CLOUD_CONTEXT_WARNING in result
|
||||
assert prompt in result
|
||||
assert result.startswith(_CLOUD_CONTEXT_WARNING)
|
||||
|
||||
def test_nous_cloud_injects_warning(self):
|
||||
prompt = "curl localhost:11434/api/tags"
|
||||
result = _inject_cloud_context(prompt, "https://inference-api.nousresearch.com/v1")
|
||||
assert _CLOUD_CONTEXT_WARNING in result
|
||||
|
||||
def test_warning_content(self):
|
||||
prompt = "local model check"
|
||||
result = _inject_cloud_context(prompt, "https://api.example.com/v1")
|
||||
assert "CLOUD" in result
|
||||
assert "NOT accessible" in result
|
||||
assert "localhost" in result
|
||||
@@ -1,116 +0,0 @@
|
||||
"""Tests for cron prefer_local auto-routing (#378).
|
||||
|
||||
Jobs with prefer_local=true should automatically route to a local inference
|
||||
server (Ollama, llama.cpp, vllm) when one is available, instead of falling
|
||||
through to the cloud default.
|
||||
"""
|
||||
|
||||
import re
|
||||
import pytest
|
||||
|
||||
|
||||
# Patterns mirrored from scheduler for test isolation
|
||||
_LOCAL_ENDPOINTS = [
|
||||
{"name": "ollama", "base_url": "http://localhost:11434/v1", "health": "http://localhost:11434/api/tags"},
|
||||
{"name": "llama-cpp", "base_url": "http://localhost:8080/v1", "health": "http://localhost:8080/health"},
|
||||
{"name": "vllm", "base_url": "http://localhost:8000/v1", "health": "http://localhost:8000/v1/models"},
|
||||
]
|
||||
|
||||
|
||||
def _probe_local_endpoint(url: str, timeout: float = 2.0) -> bool:
|
||||
import urllib.request
|
||||
try:
|
||||
req = urllib.request.Request(url)
|
||||
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
||||
return resp.status == 200
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _is_local_endpoint(base_url: str) -> bool:
|
||||
if not base_url:
|
||||
return False
|
||||
from urllib.parse import urlparse
|
||||
parsed = urlparse(base_url)
|
||||
host = (parsed.hostname or "").lower()
|
||||
return host in ("localhost", "127.0.0.1", "0.0.0.0") or (
|
||||
host.startswith("10.") or host.startswith("192.168.") or
|
||||
any(host.startswith(f"172.{i}.") for i in range(16, 32))
|
||||
)
|
||||
|
||||
|
||||
def _resolve_prefer_local(job: dict):
|
||||
if not job.get("prefer_local"):
|
||||
return None, None, ""
|
||||
explicit_url = job.get("base_url", "")
|
||||
if explicit_url and _is_local_endpoint(explicit_url):
|
||||
return None, None, f"prefer_local: explicit base_url {explicit_url} is already local"
|
||||
for ep in _LOCAL_ENDPOINTS:
|
||||
if _probe_local_endpoint(ep["health"], timeout=0.5):
|
||||
return None, ep["base_url"], f"prefer_local: using {ep['name']} at {ep['base_url']}"
|
||||
return None, None, "prefer_local: no local server found"
|
||||
|
||||
|
||||
class TestProbeLocalEndpoint:
|
||||
def test_unreachable_returns_false(self):
|
||||
"""A port with nothing listening should return False."""
|
||||
assert _probe_local_endpoint("http://localhost:19999/api/tags", timeout=0.5) is False
|
||||
|
||||
def test_invalid_url_returns_false(self):
|
||||
assert _probe_local_endpoint("not-a-url", timeout=0.5) is False
|
||||
|
||||
|
||||
class TestResolvePreferLocal:
|
||||
def test_no_prefer_local(self):
|
||||
"""When prefer_local is not set, return empty overrides."""
|
||||
job = {"name": "test", "prompt": "hello"}
|
||||
prov, url, status = _resolve_prefer_local(job)
|
||||
assert prov is None
|
||||
assert url is None
|
||||
assert status == ""
|
||||
|
||||
def test_prefer_local_with_explicit_local_url(self):
|
||||
"""When base_url is already local, skip probing."""
|
||||
job = {"name": "test", "prefer_local": True, "base_url": "http://localhost:11434/v1"}
|
||||
prov, url, status = _resolve_prefer_local(job)
|
||||
assert prov is None
|
||||
assert url is None # Don't override — already local
|
||||
assert "already local" in status
|
||||
|
||||
def test_prefer_local_no_server_found(self):
|
||||
"""When no local server is running, status indicates fallback."""
|
||||
job = {"name": "test", "prefer_local": True}
|
||||
prov, url, status = _resolve_prefer_local(job)
|
||||
# Unless Ollama happens to be running, this should fail
|
||||
if url is None:
|
||||
assert "no local server" in status
|
||||
|
||||
def test_prefer_local_false(self):
|
||||
"""prefer_local=false should act like unset."""
|
||||
job = {"name": "test", "prefer_local": False}
|
||||
prov, url, status = _resolve_prefer_local(job)
|
||||
assert prov is None
|
||||
assert url is None
|
||||
assert status == ""
|
||||
|
||||
|
||||
class TestLocalEndpointsConfig:
|
||||
"""Verify the well-known endpoints list covers expected servers."""
|
||||
|
||||
def test_ollama_in_endpoints(self):
|
||||
names = [ep["name"] for ep in _LOCAL_ENDPOINTS]
|
||||
assert "ollama" in names
|
||||
|
||||
def test_llama_cpp_in_endpoints(self):
|
||||
names = [ep["name"] for ep in _LOCAL_ENDPOINTS]
|
||||
assert "llama-cpp" in names
|
||||
|
||||
def test_all_endpoints_have_health(self):
|
||||
for ep in _LOCAL_ENDPOINTS:
|
||||
assert "health" in ep
|
||||
assert ep["health"].startswith("http")
|
||||
|
||||
def test_all_endpoints_have_base_url(self):
|
||||
for ep in _LOCAL_ENDPOINTS:
|
||||
assert "base_url" in ep
|
||||
assert "/v1" in ep["base_url"]
|
||||
Reference in New Issue
Block a user