Compare commits
1 Commits
fix/561-ss
...
whip/378-1
| Author | SHA1 | Date | |
|---|---|---|---|
| 63ae8a09f9 |
@@ -13,6 +13,7 @@ import concurrent.futures
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
@@ -643,6 +644,66 @@ def _build_job_prompt(job: dict) -> str:
|
||||
return "\n".join(parts)
|
||||
|
||||
|
||||
# Well-known local inference endpoints to probe for prefer_local jobs
|
||||
_LOCAL_ENDPOINTS = [
|
||||
{"name": "ollama", "base_url": "http://localhost:11434/v1", "health": "http://localhost:11434/api/tags"},
|
||||
{"name": "llama-cpp", "base_url": "http://localhost:8080/v1", "health": "http://localhost:8080/health"},
|
||||
{"name": "vllm", "base_url": "http://localhost:8000/v1", "health": "http://localhost:8000/v1/models"},
|
||||
]
|
||||
|
||||
|
||||
def _probe_local_endpoint(url: str, timeout: float = 2.0) -> bool:
|
||||
"""Quick probe to check if a local inference server is running."""
|
||||
import urllib.request
|
||||
try:
|
||||
req = urllib.request.Request(url)
|
||||
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
||||
return resp.status == 200
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _resolve_prefer_local(job: dict) -> tuple[Optional[str], Optional[str], str]:
|
||||
"""For jobs with prefer_local=true, find a running local inference server.
|
||||
|
||||
Returns (provider_override, base_url_override, status_message).
|
||||
None values mean "use default resolution".
|
||||
"""
|
||||
if not job.get("prefer_local"):
|
||||
return None, None, ""
|
||||
|
||||
# If the job already specifies an explicit base_url and it's local, honor it
|
||||
explicit_url = job.get("base_url", "")
|
||||
if explicit_url:
|
||||
from agent.model_metadata import is_local_endpoint
|
||||
if is_local_endpoint(explicit_url):
|
||||
return None, None, f"prefer_local: explicit base_url {explicit_url} is already local"
|
||||
|
||||
# Probe well-known local endpoints
|
||||
for ep in _LOCAL_ENDPOINTS:
|
||||
if _probe_local_endpoint(ep["health"]):
|
||||
logger.info(
|
||||
"Job '%s': prefer_local → found %s at %s",
|
||||
job.get("name", "?"), ep["name"], ep["base_url"],
|
||||
)
|
||||
return None, ep["base_url"], (
|
||||
f"prefer_local: using {ep['name']} at {ep['base_url']}"
|
||||
)
|
||||
|
||||
# No local server found — warn and fall back to default
|
||||
logger.warning(
|
||||
"Job '%s': prefer_local=true but no local inference server found "
|
||||
"(probed: %s). Falling back to default provider.",
|
||||
job.get("name", "?"),
|
||||
", ".join(ep["name"] for ep in _LOCAL_ENDPOINTS),
|
||||
)
|
||||
return None, None, (
|
||||
"prefer_local: no local server found (tried: "
|
||||
+ ", ".join(ep["name"] for ep in _LOCAL_ENDPOINTS)
|
||||
+ "). Using default provider."
|
||||
)
|
||||
|
||||
|
||||
def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
|
||||
"""
|
||||
Execute a single cron job.
|
||||
@@ -764,6 +825,12 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
|
||||
pr = _cfg.get("provider_routing", {})
|
||||
smart_routing = _cfg.get("smart_model_routing", {}) or {}
|
||||
|
||||
# prefer_local: if the job declares prefer_local=true, probe for a
|
||||
# local inference server and override the base_url when found. (#378)
|
||||
_pl_provider, _pl_base_url, _pl_status = _resolve_prefer_local(job)
|
||||
if _pl_status:
|
||||
logger.info("Job '%s': %s", job_name, _pl_status)
|
||||
|
||||
from hermes_cli.runtime_provider import (
|
||||
resolve_runtime_provider,
|
||||
format_runtime_provider_error,
|
||||
@@ -772,7 +839,10 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
|
||||
runtime_kwargs = {
|
||||
"requested": job.get("provider") or os.getenv("HERMES_INFERENCE_PROVIDER"),
|
||||
}
|
||||
if job.get("base_url"):
|
||||
# prefer_local override: use the discovered local endpoint
|
||||
if _pl_base_url:
|
||||
runtime_kwargs["explicit_base_url"] = _pl_base_url
|
||||
elif job.get("base_url"):
|
||||
runtime_kwargs["explicit_base_url"] = job.get("base_url")
|
||||
runtime = resolve_runtime_provider(**runtime_kwargs)
|
||||
except Exception as exc:
|
||||
|
||||
116
tests/test_cron_prefer_local.py
Normal file
116
tests/test_cron_prefer_local.py
Normal file
@@ -0,0 +1,116 @@
|
||||
"""Tests for cron prefer_local auto-routing (#378).
|
||||
|
||||
Jobs with prefer_local=true should automatically route to a local inference
|
||||
server (Ollama, llama.cpp, vllm) when one is available, instead of falling
|
||||
through to the cloud default.
|
||||
"""
|
||||
|
||||
import re
|
||||
import pytest
|
||||
|
||||
|
||||
# Patterns mirrored from scheduler for test isolation
|
||||
_LOCAL_ENDPOINTS = [
|
||||
{"name": "ollama", "base_url": "http://localhost:11434/v1", "health": "http://localhost:11434/api/tags"},
|
||||
{"name": "llama-cpp", "base_url": "http://localhost:8080/v1", "health": "http://localhost:8080/health"},
|
||||
{"name": "vllm", "base_url": "http://localhost:8000/v1", "health": "http://localhost:8000/v1/models"},
|
||||
]
|
||||
|
||||
|
||||
def _probe_local_endpoint(url: str, timeout: float = 2.0) -> bool:
|
||||
import urllib.request
|
||||
try:
|
||||
req = urllib.request.Request(url)
|
||||
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
||||
return resp.status == 200
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _is_local_endpoint(base_url: str) -> bool:
|
||||
if not base_url:
|
||||
return False
|
||||
from urllib.parse import urlparse
|
||||
parsed = urlparse(base_url)
|
||||
host = (parsed.hostname or "").lower()
|
||||
return host in ("localhost", "127.0.0.1", "0.0.0.0") or (
|
||||
host.startswith("10.") or host.startswith("192.168.") or
|
||||
any(host.startswith(f"172.{i}.") for i in range(16, 32))
|
||||
)
|
||||
|
||||
|
||||
def _resolve_prefer_local(job: dict):
|
||||
if not job.get("prefer_local"):
|
||||
return None, None, ""
|
||||
explicit_url = job.get("base_url", "")
|
||||
if explicit_url and _is_local_endpoint(explicit_url):
|
||||
return None, None, f"prefer_local: explicit base_url {explicit_url} is already local"
|
||||
for ep in _LOCAL_ENDPOINTS:
|
||||
if _probe_local_endpoint(ep["health"], timeout=0.5):
|
||||
return None, ep["base_url"], f"prefer_local: using {ep['name']} at {ep['base_url']}"
|
||||
return None, None, "prefer_local: no local server found"
|
||||
|
||||
|
||||
class TestProbeLocalEndpoint:
|
||||
def test_unreachable_returns_false(self):
|
||||
"""A port with nothing listening should return False."""
|
||||
assert _probe_local_endpoint("http://localhost:19999/api/tags", timeout=0.5) is False
|
||||
|
||||
def test_invalid_url_returns_false(self):
|
||||
assert _probe_local_endpoint("not-a-url", timeout=0.5) is False
|
||||
|
||||
|
||||
class TestResolvePreferLocal:
|
||||
def test_no_prefer_local(self):
|
||||
"""When prefer_local is not set, return empty overrides."""
|
||||
job = {"name": "test", "prompt": "hello"}
|
||||
prov, url, status = _resolve_prefer_local(job)
|
||||
assert prov is None
|
||||
assert url is None
|
||||
assert status == ""
|
||||
|
||||
def test_prefer_local_with_explicit_local_url(self):
|
||||
"""When base_url is already local, skip probing."""
|
||||
job = {"name": "test", "prefer_local": True, "base_url": "http://localhost:11434/v1"}
|
||||
prov, url, status = _resolve_prefer_local(job)
|
||||
assert prov is None
|
||||
assert url is None # Don't override — already local
|
||||
assert "already local" in status
|
||||
|
||||
def test_prefer_local_no_server_found(self):
|
||||
"""When no local server is running, status indicates fallback."""
|
||||
job = {"name": "test", "prefer_local": True}
|
||||
prov, url, status = _resolve_prefer_local(job)
|
||||
# Unless Ollama happens to be running, this should fail
|
||||
if url is None:
|
||||
assert "no local server" in status
|
||||
|
||||
def test_prefer_local_false(self):
|
||||
"""prefer_local=false should act like unset."""
|
||||
job = {"name": "test", "prefer_local": False}
|
||||
prov, url, status = _resolve_prefer_local(job)
|
||||
assert prov is None
|
||||
assert url is None
|
||||
assert status == ""
|
||||
|
||||
|
||||
class TestLocalEndpointsConfig:
|
||||
"""Verify the well-known endpoints list covers expected servers."""
|
||||
|
||||
def test_ollama_in_endpoints(self):
|
||||
names = [ep["name"] for ep in _LOCAL_ENDPOINTS]
|
||||
assert "ollama" in names
|
||||
|
||||
def test_llama_cpp_in_endpoints(self):
|
||||
names = [ep["name"] for ep in _LOCAL_ENDPOINTS]
|
||||
assert "llama-cpp" in names
|
||||
|
||||
def test_all_endpoints_have_health(self):
|
||||
for ep in _LOCAL_ENDPOINTS:
|
||||
assert "health" in ep
|
||||
assert ep["health"].startswith("http")
|
||||
|
||||
def test_all_endpoints_have_base_url(self):
|
||||
for ep in _LOCAL_ENDPOINTS:
|
||||
assert "base_url" in ep
|
||||
assert "/v1" in ep["base_url"]
|
||||
Reference in New Issue
Block a user