fix: overnight loop provider — explicit Ollama (99% error rate fix)
Some checks failed
Smoke Test / smoke (push) Has been cancelled

Merge PR #605: fix: overnight loop provider — explicit Ollama (99% error rate fix)
This commit was merged in pull request #605.
This commit is contained in:
2026-04-13 03:11:24 +00:00

View File

@@ -104,20 +104,23 @@ def run_task(task: dict, run_number: int) -> dict:
sys.path.insert(0, str(AGENT_DIR))
try:
from hermes_cli.runtime_provider import resolve_runtime_provider
from run_agent import AIAgent
runtime = resolve_runtime_provider()
# Explicit Ollama provider — do NOT use resolve_runtime_provider()
# which may return 'local' (unsupported). The overnight loop always
# runs against local Ollama inference.
_model = os.environ.get("OVERNIGHT_MODEL", "hermes4:14b")
_base_url = os.environ.get("OVERNIGHT_BASE_URL", "http://localhost:11434/v1")
_provider = "ollama"
buf_out = io.StringIO()
buf_err = io.StringIO()
agent = AIAgent(
model=runtime.get("model", "hermes4:14b"),
api_key=runtime.get("api_key"),
base_url=runtime.get("base_url"),
provider=runtime.get("provider"),
api_mode=runtime.get("api_mode"),
model=_model,
base_url=_base_url,
provider=_provider,
api_mode="chat_completions",
max_iterations=MAX_TURNS_PER_TASK,
quiet_mode=True,
ephemeral_system_prompt=SYSTEM_PROMPT,
@@ -134,9 +137,9 @@ def run_task(task: dict, run_number: int) -> dict:
result["elapsed_seconds"] = round(elapsed, 2)
result["response"] = conv_result.get("final_response", "")[:2000]
result["session_id"] = getattr(agent, "session_id", None)
result["provider"] = runtime.get("provider")
result["base_url"] = runtime.get("base_url")
result["model"] = runtime.get("model")
result["provider"] = _provider
result["base_url"] = _base_url
result["model"] = _model
result["tool_calls_made"] = conv_result.get("tool_calls_count", 0)
result["status"] = "pass" if conv_result.get("final_response") else "empty"
result["stdout"] = buf_out.getvalue()[:500]