fix: update all test mocks for call_llm migration

Update 14 test files to use the new call_llm/async_call_llm mock
patterns instead of the old get_text_auxiliary_client/
get_vision_auxiliary_client tuple returns.

- vision_tools tests: mock async_call_llm instead of _aux_async_client
- browser tests: mock call_llm instead of _aux_vision_client
- flush_memories tests: mock call_llm instead of get_text_auxiliary_client
- session_search tests: mock async_call_llm with RuntimeError
- mcp_tool tests: fix whitelist model config, use side_effect for
  multi-response tests
- auxiliary_config_bridge: update for model=None (resolved in router)

3251 passed, 2 pre-existing unrelated failures.
This commit is contained in:
teknium1
2026-03-11 21:06:54 -07:00
parent 0aa31cd3cb
commit 29ef69c703
7 changed files with 40 additions and 55 deletions

View File

@@ -229,13 +229,14 @@ class TestVisionModelOverride:
def test_default_model_when_no_override(self, monkeypatch):
monkeypatch.delenv("AUXILIARY_VISION_MODEL", raising=False)
from tools.vision_tools import _handle_vision_analyze, DEFAULT_VISION_MODEL
from tools.vision_tools import _handle_vision_analyze
with patch("tools.vision_tools.vision_analyze_tool", new_callable=MagicMock) as mock_tool:
mock_tool.return_value = '{"success": true}'
_handle_vision_analyze({"image_url": "http://test.jpg", "question": "test"})
call_args = mock_tool.call_args
expected = DEFAULT_VISION_MODEL or "google/gemini-3-flash-preview"
assert call_args[0][2] == expected
# With no AUXILIARY_VISION_MODEL env var, model should be None
# (the centralized call_llm router picks the provider default)
assert call_args[0][2] is None
# ── DEFAULT_CONFIG shape tests ───────────────────────────────────────────────

View File

@@ -98,10 +98,9 @@ class TestFlushMemoriesUsesAuxiliaryClient:
def test_flush_uses_auxiliary_when_available(self, monkeypatch):
agent = _make_agent(monkeypatch, api_mode="codex_responses", provider="openai-codex")
mock_aux_client = MagicMock()
mock_aux_client.chat.completions.create.return_value = _chat_response_with_memory_call()
mock_response = _chat_response_with_memory_call()
with patch("agent.auxiliary_client.get_text_auxiliary_client", return_value=(mock_aux_client, "gpt-4o-mini")):
with patch("agent.auxiliary_client.call_llm", return_value=mock_response) as mock_call:
messages = [
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi there"},
@@ -110,9 +109,9 @@ class TestFlushMemoriesUsesAuxiliaryClient:
with patch("tools.memory_tool.memory_tool", return_value="Saved.") as mock_memory:
agent.flush_memories(messages)
mock_aux_client.chat.completions.create.assert_called_once()
call_kwargs = mock_aux_client.chat.completions.create.call_args
assert call_kwargs.kwargs.get("model") == "gpt-4o-mini" or call_kwargs[1].get("model") == "gpt-4o-mini"
mock_call.assert_called_once()
call_kwargs = mock_call.call_args
assert call_kwargs.kwargs.get("task") == "flush_memories"
def test_flush_uses_main_client_when_no_auxiliary(self, monkeypatch):
"""Non-Codex mode with no auxiliary falls back to self.client."""
@@ -120,7 +119,7 @@ class TestFlushMemoriesUsesAuxiliaryClient:
agent.client = MagicMock()
agent.client.chat.completions.create.return_value = _chat_response_with_memory_call()
with patch("agent.auxiliary_client.get_text_auxiliary_client", return_value=(None, None)):
with patch("agent.auxiliary_client.call_llm", side_effect=RuntimeError("no provider")):
messages = [
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi there"},
@@ -135,10 +134,9 @@ class TestFlushMemoriesUsesAuxiliaryClient:
"""Verify that memory tool calls from the flush response actually get executed."""
agent = _make_agent(monkeypatch, api_mode="chat_completions", provider="openrouter")
mock_aux_client = MagicMock()
mock_aux_client.chat.completions.create.return_value = _chat_response_with_memory_call()
mock_response = _chat_response_with_memory_call()
with patch("agent.auxiliary_client.get_text_auxiliary_client", return_value=(mock_aux_client, "gpt-4o-mini")):
with patch("agent.auxiliary_client.call_llm", return_value=mock_response):
messages = [
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi"},
@@ -157,10 +155,9 @@ class TestFlushMemoriesUsesAuxiliaryClient:
"""After flush, the flush prompt and any response should be removed from messages."""
agent = _make_agent(monkeypatch, api_mode="chat_completions", provider="openrouter")
mock_aux_client = MagicMock()
mock_aux_client.chat.completions.create.return_value = _chat_response_with_memory_call()
mock_response = _chat_response_with_memory_call()
with patch("agent.auxiliary_client.get_text_auxiliary_client", return_value=(mock_aux_client, "gpt-4o-mini")):
with patch("agent.auxiliary_client.call_llm", return_value=mock_response):
messages = [
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi"},
@@ -202,7 +199,7 @@ class TestFlushMemoriesCodexFallback:
model="gpt-5-codex",
)
with patch("agent.auxiliary_client.get_text_auxiliary_client", return_value=(None, None)), \
with patch("agent.auxiliary_client.call_llm", side_effect=RuntimeError("no provider")), \
patch.object(agent, "_run_codex_stream", return_value=codex_response) as mock_stream, \
patch.object(agent, "_build_api_kwargs") as mock_build, \
patch("tools.memory_tool.memory_tool", return_value="Saved.") as mock_memory:

View File

@@ -959,7 +959,7 @@ class TestFlushSentinelNotLeaked:
agent.client.chat.completions.create.return_value = mock_response
# Bypass auxiliary client so flush uses agent.client directly
with patch("agent.auxiliary_client.get_text_auxiliary_client", return_value=(None, None)):
with patch("agent.auxiliary_client.call_llm", side_effect=RuntimeError("no provider")):
agent.flush_memories(messages, min_turns=0)
# Check what was actually sent to the API

View File

@@ -137,8 +137,7 @@ class TestBrowserVisionAnnotate:
with (
patch("tools.browser_tool._run_browser_command") as mock_cmd,
patch("tools.browser_tool._aux_vision_client") as mock_client,
patch("tools.browser_tool._DEFAULT_VISION_MODEL", "test-model"),
patch("tools.browser_tool.call_llm") as mock_call_llm,
patch("tools.browser_tool._get_vision_model", return_value="test-model"),
):
mock_cmd.return_value = {"success": True, "data": {}}
@@ -159,8 +158,7 @@ class TestBrowserVisionAnnotate:
with (
patch("tools.browser_tool._run_browser_command") as mock_cmd,
patch("tools.browser_tool._aux_vision_client") as mock_client,
patch("tools.browser_tool._DEFAULT_VISION_MODEL", "test-model"),
patch("tools.browser_tool.call_llm") as mock_call_llm,
patch("tools.browser_tool._get_vision_model", return_value="test-model"),
):
mock_cmd.return_value = {"success": True, "data": {}}

View File

@@ -1956,24 +1956,26 @@ class TestToolLoopGovernance:
def test_text_response_resets_counter(self):
"""A text response resets the tool loop counter."""
handler = SamplingHandler("tl2", {"max_tool_rounds": 1})
fake_client = MagicMock()
# Use a list to hold the current response, so the side_effect can
# pick up changes between calls.
responses = [_make_llm_tool_response()]
with patch(
"agent.auxiliary_client.call_llm",
return_value=fake_client.chat.completions.create.return_value,
side_effect=lambda **kw: responses[0],
):
# Tool response (round 1 of 1 allowed)
fake_client.chat.completions.create.return_value = _make_llm_tool_response()
r1 = asyncio.run(handler(None, _make_sampling_params()))
assert isinstance(r1, CreateMessageResultWithTools)
# Text response resets counter
fake_client.chat.completions.create.return_value = _make_llm_response()
responses[0] = _make_llm_response()
r2 = asyncio.run(handler(None, _make_sampling_params()))
assert isinstance(r2, CreateMessageResult)
# Tool response again (should succeed since counter was reset)
fake_client.chat.completions.create.return_value = _make_llm_tool_response()
responses[0] = _make_llm_tool_response()
r3 = asyncio.run(handler(None, _make_sampling_params()))
assert isinstance(r3, CreateMessageResultWithTools)
@@ -2122,7 +2124,7 @@ class TestModelWhitelist:
assert isinstance(result, CreateMessageResult)
def test_disallowed_model_rejected(self):
handler = SamplingHandler("wl2", {"allowed_models": ["gpt-4o"]})
handler = SamplingHandler("wl2", {"allowed_models": ["gpt-4o"], "model": "test-model"})
fake_client = MagicMock()
with patch(

View File

@@ -189,16 +189,14 @@ class TestSessionSearch:
{"role": "assistant", "content": "hi there"},
]
# Mock the summarizer to return a simple summary
import tools.session_search_tool as sst
original_client = sst._async_aux_client
sst._async_aux_client = None # Disable summarizer → returns None
result = json.loads(session_search(
query="test", db=mock_db, current_session_id=current_sid,
))
sst._async_aux_client = original_client
# Mock async_call_llm to raise RuntimeError → summarizer returns None
from unittest.mock import AsyncMock, patch as _patch
with _patch("tools.session_search_tool.async_call_llm",
new_callable=AsyncMock,
side_effect=RuntimeError("no provider")):
result = json.loads(session_search(
query="test", db=mock_db, current_session_id=current_sid,
))
assert result["success"] is True
# Current session should be skipped, only other_sid should appear

View File

@@ -202,7 +202,7 @@ class TestHandleVisionAnalyze:
assert model == "custom/model-v1"
def test_falls_back_to_default_model(self):
"""Without AUXILIARY_VISION_MODEL, should use DEFAULT_VISION_MODEL or fallback."""
"""Without AUXILIARY_VISION_MODEL, model should be None (let call_llm resolve default)."""
with (
patch(
"tools.vision_tools.vision_analyze_tool", new_callable=AsyncMock
@@ -218,9 +218,9 @@ class TestHandleVisionAnalyze:
coro.close()
call_args = mock_tool.call_args
model = call_args[0][2]
# Should be DEFAULT_VISION_MODEL or the hardcoded fallback
assert model is not None
assert len(model) > 0
# With no AUXILIARY_VISION_MODEL set, model should be None
# (the centralized call_llm router picks the default)
assert model is None
def test_empty_args_graceful(self):
"""Missing keys should default to empty strings, not raise."""
@@ -277,8 +277,6 @@ class TestErrorLoggingExcInfo:
new_callable=AsyncMock,
side_effect=Exception("download boom"),
),
patch("tools.vision_tools._aux_async_client", MagicMock()),
patch("tools.vision_tools.DEFAULT_VISION_MODEL", "test/model"),
caplog.at_level(logging.ERROR, logger="tools.vision_tools"),
):
result = await vision_analyze_tool(
@@ -311,25 +309,16 @@ class TestErrorLoggingExcInfo:
"tools.vision_tools._image_to_base64_data_url",
return_value="data:image/jpeg;base64,abc",
),
patch("agent.auxiliary_client.get_auxiliary_extra_body", return_value=None),
patch(
"agent.auxiliary_client.auxiliary_max_tokens_param",
return_value={"max_tokens": 2000},
),
caplog.at_level(logging.WARNING, logger="tools.vision_tools"),
):
# Mock the vision client
mock_client = AsyncMock()
# Mock the async_call_llm function to return a mock response
mock_response = MagicMock()
mock_choice = MagicMock()
mock_choice.message.content = "A test image description"
mock_response.choices = [mock_choice]
mock_client.chat.completions.create = AsyncMock(return_value=mock_response)
# Patch module-level _aux_async_client so the tool doesn't bail early
with (
patch("tools.vision_tools._aux_async_client", mock_client),
patch("tools.vision_tools.DEFAULT_VISION_MODEL", "test/model"),
patch("tools.vision_tools.async_call_llm", new_callable=AsyncMock, return_value=mock_response),
):
# Make unlink fail to trigger cleanup warning
original_unlink = Path.unlink