Compare commits

...

1 Commits

Author SHA1 Message Date
Alexander Whitestone
7aca8f5ff9 fix(cron): inject runtime context into cron job prompts after provider migration
Some checks failed
Forge CI / smoke-and-build (pull_request) Failing after 1m7s
Cron job prompts didn't know what model/provider they were running on.
This caused three bugs after the Ollama→Nous migration:

1. Health Monitor: prompt said "Check Ollama is responding" but ran on
   nous/mimo-v2-pro (cloud API, no local Ollama access)
2. nightwatch-health-monitor: prompt assumed SSH capability but cloud
   models can't SSH into VPSes
3. know-thy-father-analyzer: vision capability unverified on Nous

Fix: _build_job_prompt() now receives runtime_model and runtime_provider.
When a cloud provider is detected, a [SYSTEM: RUNTIME CONTEXT] hint is
injected telling the agent:
- What model it's running on
- What provider it's using
- Whether it has local machine access or is a cloud API
- NOT to assume SSH, local Ollama, or local filesystem access

The model/provider is resolved early (before prompt building) from:
1. job.model override
2. HERMES_MODEL env var
3. config.yaml model.default
4. Provider derived from model prefix (e.g. "xiaomi/mimo-v2-pro" → "xiaomi")

Closes #372
2026-04-13 18:18:31 -04:00

View File

@@ -544,8 +544,16 @@ def _run_job_script(script_path: str) -> tuple[bool, str]:
return False, f"Script execution failed: {exc}"
def _build_job_prompt(job: dict) -> str:
"""Build the effective prompt for a cron job, optionally loading one or more skills first."""
def _build_job_prompt(job: dict, *, runtime_model: str = "", runtime_provider: str = "") -> str:
"""Build the effective prompt for a cron job, optionally loading one or more skills first.
Args:
job: The cron job dict.
runtime_model: The resolved model name (e.g. "xiaomi/mimo-v2-pro").
If provided, injected into the cron hint so the agent knows its own
capabilities and can avoid prompts that assume local/Ollama access.
runtime_provider: The resolved provider name (e.g. "nous", "openrouter").
"""
prompt = job.get("prompt", "")
skills = job.get("skills")
@@ -596,6 +604,43 @@ def _build_job_prompt(job: dict) -> str:
"\"[SCRIPT_FAILED]: forge.alexanderwhitestone.com timed out\" "
"\"[SCRIPT_FAILED]: script exited with code 1\".]\\n\\n"
)
# Inject runtime context so the agent knows its own capabilities.
# This prevents prompts from assuming local Ollama/SSH when running
# on a cloud API provider (e.g. nous/mimo-v2-pro).
if runtime_model or runtime_provider:
is_local = (
runtime_provider in ("ollama", "local", "")
or "ollama" in (runtime_model or "").lower()
)
is_cloud = not is_local and bool(runtime_provider)
has_terminal = True # Cron jobs always have terminal tool
capability_notes = []
if runtime_model:
capability_notes.append(f"MODEL: {runtime_model}")
if runtime_provider:
capability_notes.append(f"PROVIDER: {runtime_provider}")
if is_local:
capability_notes.append(
"RUNTIME: local — you have access to the local machine, "
"local Ollama, SSH keys, and filesystem"
)
elif is_cloud:
capability_notes.append(
"RUNTIME: cloud API — you do NOT have local machine access. "
"Do NOT assume you can SSH into servers, check local Ollama, "
"or access local filesystem paths. Use terminal tools only "
"for commands that work from this environment."
)
if capability_notes:
runtime_ctx = (
"[SYSTEM: RUNTIME CONTEXT — "
+ "; ".join(capability_notes)
+ ". Adjust your approach based on these capabilities.]\\n\\n"
)
cron_hint = runtime_ctx + cron_hint
prompt = cron_hint + prompt
if skills is None:
legacy = job.get("skill")
@@ -666,7 +711,31 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
job_id = job["id"]
job_name = job["name"]
prompt = _build_job_prompt(job)
# Resolve model early so we can inject capability context into the prompt.
# The full provider resolution happens later (smart routing, etc.) but we
# need the basic model/provider name for the runtime context hint.
_early_model = job.get("model") or os.getenv("HERMES_MODEL") or ""
_early_provider = os.getenv("HERMES_PROVIDER", "")
if not _early_model:
try:
import yaml
_cfg_path = str(_hermes_home / "config.yaml")
if os.path.exists(_cfg_path):
with open(_cfg_path) as _f:
_cfg_early = yaml.safe_load(_f) or {}
_mc = _cfg_early.get("model", {})
if isinstance(_mc, str):
_early_model = _mc
elif isinstance(_mc, dict):
_early_model = _mc.get("default", "")
except Exception:
pass
# Derive provider from model prefix if not explicitly set
if not _early_provider and "/" in _early_model:
_early_provider = _early_model.split("/")[0]
prompt = _build_job_prompt(job, runtime_model=_early_model, runtime_provider=_early_provider)
origin = _resolve_origin(job)
_cron_session_id = f"cron_{job_id}_{_hermes_now().strftime('%Y%m%d_%H%M%S')}"