* fix: repair 57 failing CI tests across 14 files
Categories of fixes:
**Test isolation under xdist (-n auto):**
- test_hermes_logging: Strip ALL RotatingFileHandlers before each test
to prevent handlers leaked from other xdist workers from polluting counts
- test_code_execution: Force TERMINAL_ENV=local in setUp — prevents Modal
AuthError when another test leaks TERMINAL_ENV=modal
- test_timezone: Same TERMINAL_ENV fix for execute_code timezone tests
- test_codex_execution_paths: Mock _resolve_turn_agent_config to ensure
model resolution works regardless of xdist worker state
**Matrix adapter tests (nio not installed in CI):**
- Add _make_fake_nio() helper with real response classes for isinstance()
checks in production code
- Replace MagicMock(spec=nio.XxxResponse) with fake_nio instances
- Wrap production method calls with patch.dict('sys.modules', {'nio': ...})
so import nio succeeds in method bodies
- Use try/except instead of pytest.importorskip for nio.crypto imports
(importorskip can be fooled by MagicMock in sys.modules)
- test_matrix_voice: Skip entire file if nio is a mock, not just missing
**Stale test expectations:**
- test_cli_provider_resolution: _prompt_provider_choice now takes **kwargs
(default param added); mock getpass.getpass alongside input
- test_anthropic_oauth_flow: Mock getpass.getpass (code switched from input)
- test_gemini_provider: Mock models.dev + OpenRouter API lookups to test
hardcoded defaults without external API variance
- test_code_execution: Add notify_on_complete to blocked terminal params
- test_setup_openclaw_migration: Mock prompt_choice to select 'Full setup'
(new quick-setup path leads to _require_tty → sys.exit in CI)
- test_skill_manager_tool: Patch get_all_skills_dirs alongside SKILLS_DIR
so _find_skill searches tmp_path, not real ~/.hermes/skills/
**Missing attributes in object.__new__ test runners:**
- test_platform_reconnect: Add session_store to _make_runner()
- test_session_race_guard: Add hooks, _running_agents_ts, session_store,
delivery_router to _make_runner()
**Production bug fix (gateway/run.py):**
- Fix sentinel eviction race: _AGENT_PENDING_SENTINEL was immediately
evicted by the stale-detection logic because sentinels have no
get_activity_summary() method, causing _stale_idle=inf >= timeout.
Guard _should_evict with 'is not _AGENT_PENDING_SENTINEL'.
* fix: address remaining CI failures
- test_setup_openclaw_migration: Also mock _offer_launch_chat (called at
end of both quick and full setup paths)
- test_code_execution: Move TERMINAL_ENV=local to module level to protect
ALL test classes (TestEnvVarFiltering, TestExecuteCodeEdgeCases,
TestInterruptHandling, TestHeadTailTruncation) from xdist env leaks
- test_matrix: Use try/except for nio.crypto imports (importorskip can be
fooled by MagicMock in sys.modules under xdist)
195 lines
6.5 KiB
Python
195 lines
6.5 KiB
Python
import asyncio
|
|
import sys
|
|
import types
|
|
from types import SimpleNamespace
|
|
|
|
|
|
sys.modules.setdefault("fire", types.SimpleNamespace(Fire=lambda *a, **k: None))
|
|
sys.modules.setdefault("firecrawl", types.SimpleNamespace(Firecrawl=object))
|
|
sys.modules.setdefault("fal_client", types.SimpleNamespace())
|
|
|
|
import cron.scheduler as cron_scheduler
|
|
import gateway.run as gateway_run
|
|
import run_agent
|
|
from gateway.config import Platform
|
|
from gateway.session import SessionSource
|
|
|
|
|
|
def _patch_agent_bootstrap(monkeypatch):
|
|
monkeypatch.setattr(
|
|
run_agent,
|
|
"get_tool_definitions",
|
|
lambda **kwargs: [
|
|
{
|
|
"type": "function",
|
|
"function": {
|
|
"name": "terminal",
|
|
"description": "Run shell commands.",
|
|
"parameters": {"type": "object", "properties": {}},
|
|
},
|
|
}
|
|
],
|
|
)
|
|
monkeypatch.setattr(run_agent, "check_toolset_requirements", lambda: {})
|
|
|
|
|
|
def _codex_message_response(text: str):
|
|
return SimpleNamespace(
|
|
output=[
|
|
SimpleNamespace(
|
|
type="message",
|
|
content=[SimpleNamespace(type="output_text", text=text)],
|
|
)
|
|
],
|
|
usage=SimpleNamespace(input_tokens=5, output_tokens=3, total_tokens=8),
|
|
status="completed",
|
|
model="gpt-5-codex",
|
|
)
|
|
|
|
|
|
class _UnauthorizedError(RuntimeError):
|
|
def __init__(self):
|
|
super().__init__("Error code: 401 - unauthorized")
|
|
self.status_code = 401
|
|
|
|
|
|
class _FakeOpenAI:
|
|
def __init__(self, **kwargs):
|
|
self.kwargs = kwargs
|
|
|
|
def close(self):
|
|
return None
|
|
|
|
|
|
class _Codex401ThenSuccessAgent(run_agent.AIAgent):
|
|
refresh_attempts = 0
|
|
last_init = {}
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
kwargs.setdefault("skip_context_files", True)
|
|
kwargs.setdefault("skip_memory", True)
|
|
kwargs.setdefault("max_iterations", 4)
|
|
type(self).last_init = dict(kwargs)
|
|
super().__init__(*args, **kwargs)
|
|
self._cleanup_task_resources = lambda task_id: None
|
|
self._persist_session = lambda messages, history=None: None
|
|
self._save_trajectory = lambda messages, user_message, completed: None
|
|
self._save_session_log = lambda messages: None
|
|
|
|
def _try_refresh_codex_client_credentials(self, *, force: bool = True) -> bool:
|
|
type(self).refresh_attempts += 1
|
|
return True
|
|
|
|
def run_conversation(self, user_message: str, conversation_history=None, task_id=None):
|
|
calls = {"api": 0}
|
|
|
|
def _fake_api_call(api_kwargs):
|
|
calls["api"] += 1
|
|
if calls["api"] == 1:
|
|
raise _UnauthorizedError()
|
|
return _codex_message_response("Recovered via refresh")
|
|
|
|
self._interruptible_api_call = _fake_api_call
|
|
return super().run_conversation(user_message, conversation_history=conversation_history, task_id=task_id)
|
|
|
|
|
|
def test_cron_run_job_codex_path_handles_internal_401_refresh(monkeypatch):
|
|
_patch_agent_bootstrap(monkeypatch)
|
|
monkeypatch.setattr(run_agent, "OpenAI", _FakeOpenAI)
|
|
monkeypatch.setattr(run_agent, "AIAgent", _Codex401ThenSuccessAgent)
|
|
monkeypatch.setattr(
|
|
"hermes_cli.runtime_provider.resolve_runtime_provider",
|
|
lambda requested=None: {
|
|
"provider": "openai-codex",
|
|
"api_mode": "codex_responses",
|
|
"base_url": "https://chatgpt.com/backend-api/codex",
|
|
"api_key": "codex-token",
|
|
},
|
|
)
|
|
monkeypatch.setattr("hermes_cli.runtime_provider.format_runtime_provider_error", lambda exc: str(exc))
|
|
|
|
_Codex401ThenSuccessAgent.refresh_attempts = 0
|
|
_Codex401ThenSuccessAgent.last_init = {}
|
|
|
|
success, output, final_response, error = cron_scheduler.run_job(
|
|
{"id": "job-1", "name": "Codex Refresh Test", "prompt": "ping", "model": "gpt-5.3-codex"}
|
|
)
|
|
|
|
assert success is True
|
|
assert error is None
|
|
assert final_response == "Recovered via refresh"
|
|
assert "Recovered via refresh" in output
|
|
assert _Codex401ThenSuccessAgent.refresh_attempts == 1
|
|
assert _Codex401ThenSuccessAgent.last_init["provider"] == "openai-codex"
|
|
assert _Codex401ThenSuccessAgent.last_init["api_mode"] == "codex_responses"
|
|
|
|
|
|
def test_gateway_run_agent_codex_path_handles_internal_401_refresh(monkeypatch):
|
|
_patch_agent_bootstrap(monkeypatch)
|
|
monkeypatch.setattr(run_agent, "OpenAI", _FakeOpenAI)
|
|
monkeypatch.setattr(run_agent, "AIAgent", _Codex401ThenSuccessAgent)
|
|
monkeypatch.setattr(
|
|
gateway_run,
|
|
"_resolve_runtime_agent_kwargs",
|
|
lambda: {
|
|
"provider": "openai-codex",
|
|
"api_mode": "codex_responses",
|
|
"base_url": "https://chatgpt.com/backend-api/codex",
|
|
"api_key": "codex-token",
|
|
},
|
|
)
|
|
monkeypatch.setenv("HERMES_TOOL_PROGRESS", "false")
|
|
monkeypatch.setenv("HERMES_MODEL", "gpt-5.3-codex")
|
|
|
|
_Codex401ThenSuccessAgent.refresh_attempts = 0
|
|
_Codex401ThenSuccessAgent.last_init = {}
|
|
|
|
runner = gateway_run.GatewayRunner.__new__(gateway_run.GatewayRunner)
|
|
runner.adapters = {}
|
|
runner._ephemeral_system_prompt = ""
|
|
runner._prefill_messages = []
|
|
runner._reasoning_config = None
|
|
runner._provider_routing = {}
|
|
runner._fallback_model = None
|
|
runner._running_agents = {}
|
|
runner._smart_model_routing = {}
|
|
from unittest.mock import MagicMock, AsyncMock
|
|
runner.hooks = MagicMock()
|
|
runner.hooks.emit = AsyncMock()
|
|
runner.hooks.loaded_hooks = []
|
|
runner._session_db = None
|
|
# Ensure model resolution returns the codex model even if xdist
|
|
# leaked env vars cleared HERMES_MODEL.
|
|
monkeypatch.setattr(
|
|
gateway_run.GatewayRunner,
|
|
"_resolve_turn_agent_config",
|
|
lambda self, msg, model, runtime: {
|
|
"model": model or "gpt-5.3-codex",
|
|
"runtime": runtime,
|
|
},
|
|
)
|
|
|
|
source = SessionSource(
|
|
platform=Platform.LOCAL,
|
|
chat_id="cli",
|
|
chat_name="CLI",
|
|
chat_type="dm",
|
|
user_id="user-1",
|
|
)
|
|
|
|
result = asyncio.run(
|
|
runner._run_agent(
|
|
message="ping",
|
|
context_prompt="",
|
|
history=[],
|
|
source=source,
|
|
session_id="session-1",
|
|
session_key="agent:main:local:dm",
|
|
)
|
|
)
|
|
|
|
assert result["final_response"] == "Recovered via refresh"
|
|
assert _Codex401ThenSuccessAgent.refresh_attempts == 1
|
|
assert _Codex401ThenSuccessAgent.last_init["provider"] == "openai-codex"
|
|
assert _Codex401ThenSuccessAgent.last_init["api_mode"] == "codex_responses"
|