Compare commits

..

1 Commits

Author SHA1 Message Date
Alexander Whitestone
a1b744c327 fix: harden Gemma 4 tool-call argument normalization (#797)
All checks were successful
Lint / lint (pull_request) Successful in 29s
- normalize repairable Gemma 4 / Ollama tool-call argument quirks before validation
- keep truncated JSON marked incomplete so the agent retries instead of silently dropping fields
- merge consecutive assistant tool-call messages in API sanitization
- add regression coverage for whitespace, single quotes, trailing commas, bare key/value pairs, and streamed chunks

Closes #797
2026-04-22 10:44:30 -04:00
5 changed files with 321 additions and 64 deletions

View File

@@ -250,16 +250,12 @@ _PROVIDER_MODELS: dict[str, list[str]] = {
"big-pickle",
],
"opencode-go": [
"kimi-k2.6",
"kimi-k2.5",
"glm-5.1",
"glm-5",
"kimi-k2.5",
"mimo-v2-pro",
"mimo-v2-omni",
"minimax-m2.7",
"minimax-m2.5",
"qwen3.6-plus",
"qwen3.5-plus",
],
"ai-gateway": [
"anthropic/claude-opus-4.6",

View File

@@ -105,7 +105,7 @@ _DEFAULT_PROVIDER_MODELS = {
"ai-gateway": ["anthropic/claude-opus-4.6", "anthropic/claude-sonnet-4.6", "openai/gpt-5", "google/gemini-3-flash"],
"kilocode": ["anthropic/claude-opus-4.6", "anthropic/claude-sonnet-4.6", "openai/gpt-5.4", "google/gemini-3-pro-preview", "google/gemini-3-flash-preview"],
"opencode-zen": ["gpt-5.4", "gpt-5.3-codex", "claude-sonnet-4-6", "gemini-3-flash", "glm-5", "kimi-k2.5", "minimax-m2.7"],
"opencode-go": ["kimi-k2.6", "kimi-k2.5", "glm-5.1", "glm-5", "mimo-v2-pro", "mimo-v2-omni", "minimax-m2.5", "minimax-m2.7", "qwen3.6-plus", "qwen3.5-plus"],
"opencode-go": ["glm-5", "kimi-k2.5", "mimo-v2-pro", "mimo-v2-omni", "minimax-m2.5", "minimax-m2.7"],
"huggingface": [
"Qwen/Qwen3.5-397B-A17B", "Qwen/Qwen3-235B-A22B-Thinking-2507",
"Qwen/Qwen3-Coder-480B-A35B-Instruct", "deepseek-ai/DeepSeek-R1-0528",

View File

@@ -20,6 +20,7 @@ Usage:
response = agent.run_conversation("Tell me about the latest Python updates")
"""
import ast
import asyncio
import base64
import concurrent.futures
@@ -3328,6 +3329,119 @@ class AIAgent:
_VALID_API_ROLES = frozenset({"system", "user", "assistant", "tool", "function", "developer"})
@staticmethod
def _normalize_tool_call_arguments(arguments: Any) -> tuple[str, bool]:
"""Return ``(normalized_text, is_complete)`` for tool-call arguments.
Conservative by design: repairs harmless formatting quirks common in
Gemma 4 / Ollama output (whitespace, trailing commas, Python-style
single-quoted dicts, bare key/value pairs) but does NOT auto-close
truncated JSON objects. Truly incomplete fragments must remain marked
incomplete so the agent can retry instead of silently dropping fields.
"""
if isinstance(arguments, (dict, list)):
return json.dumps(arguments, ensure_ascii=False, separators=(",", ":")), True
if arguments is None:
return "{}", True
if not isinstance(arguments, str):
arguments = str(arguments)
text = arguments.strip()
if not text:
return "{}", True
def _parse_candidate(candidate: str):
try:
return json.loads(candidate)
except (json.JSONDecodeError, TypeError, ValueError):
pass
try:
return ast.literal_eval(candidate)
except (SyntaxError, ValueError):
return None
candidates: list[str] = [text]
trimmed_trailing_commas = re.sub(r",\s*([}\]])", r"\1", text)
if trimmed_trailing_commas != text:
candidates.append(trimmed_trailing_commas)
if ":" in text and not text.startswith(("{", "[")):
wrapped = "{" + text + "}"
candidates.append(wrapped)
quoted_keys = re.sub(
r'([\{,]\s*)([A-Za-z_][A-Za-z0-9_\-]*)(\s*:)',
r'\1"\2"\3',
wrapped,
)
if quoted_keys != wrapped:
candidates.append(quoted_keys)
trimmed_quoted_keys = re.sub(r",\s*([}\]])", r"\1", quoted_keys)
if trimmed_quoted_keys != quoted_keys:
candidates.append(trimmed_quoted_keys)
seen: set[str] = set()
for candidate in candidates:
if candidate in seen:
continue
seen.add(candidate)
parsed = _parse_candidate(candidate)
if isinstance(parsed, (dict, list)):
return json.dumps(parsed, ensure_ascii=False, separators=(",", ":")), True
return text, False
@staticmethod
def _merge_consecutive_assistant_tool_call_messages(messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Merge adjacent assistant messages that each carry tool_calls.
Some providers emit parallel tool calls as multiple consecutive assistant
messages instead of a single assistant message with multiple tool calls.
Merge only adjacent assistant/tool-call messages; any non-assistant
boundary flushes the current batch.
"""
merged: List[Dict[str, Any]] = []
pending: Optional[Dict[str, Any]] = None
def _flush_pending() -> None:
nonlocal pending
if pending is not None:
merged.append(pending)
pending = None
for msg in messages:
if not isinstance(msg, dict):
_flush_pending()
merged.append(msg)
continue
role = msg.get("role")
tool_calls = msg.get("tool_calls")
if role == "assistant" and isinstance(tool_calls, list) and tool_calls:
if pending is None:
pending = copy.deepcopy(msg)
continue
pending_tool_calls = pending.get("tool_calls")
if not isinstance(pending_tool_calls, list):
pending_tool_calls = []
pending["tool_calls"] = pending_tool_calls
pending_tool_calls.extend(copy.deepcopy(tool_calls))
pending_content = pending.get("content") or ""
current_content = msg.get("content") or ""
if pending_content and current_content:
pending["content"] = pending_content + "\n" + current_content
elif current_content:
pending["content"] = current_content
continue
_flush_pending()
merged.append(msg)
_flush_pending()
return merged
@staticmethod
def _sanitize_api_messages(messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Fix orphaned tool_call / tool_result pairs before every LLM call.
@@ -3347,7 +3461,7 @@ class AIAgent:
)
continue
filtered.append(msg)
messages = filtered
messages = AIAgent._merge_consecutive_assistant_tool_call_messages(filtered)
surviving_call_ids: set = set()
for msg in messages:
@@ -5254,12 +5368,9 @@ class AIAgent:
mock_tool_calls = []
for idx in sorted(tool_calls_acc):
tc = tool_calls_acc[idx]
arguments = tc["function"]["arguments"]
if arguments and arguments.strip():
try:
json.loads(arguments)
except json.JSONDecodeError:
has_truncated_tool_args = True
arguments, is_complete = self._normalize_tool_call_arguments(tc["function"]["arguments"])
if not is_complete:
has_truncated_tool_args = True
mock_tool_calls.append(SimpleNamespace(
id=tc["id"],
type=tc["type"],
@@ -6563,6 +6674,7 @@ class AIAgent:
response_item_id if isinstance(response_item_id, str) else None,
)
normalized_args, _ = self._normalize_tool_call_arguments(tool_call.function.arguments)
tc_dict = {
"id": call_id,
"call_id": call_id,
@@ -6570,7 +6682,7 @@ class AIAgent:
"type": tool_call.type,
"function": {
"name": tool_call.function.name,
"arguments": tool_call.function.arguments
"arguments": normalized_args,
},
}
# Preserve extra_content (e.g. Gemini thought_signature) so it
@@ -10031,21 +10143,15 @@ class AIAgent:
# Handle empty strings as empty objects (common model quirk)
invalid_json_args = []
for tc in assistant_message.tool_calls:
args = tc.function.arguments
if isinstance(args, (dict, list)):
tc.function.arguments = json.dumps(args)
continue
if args is not None and not isinstance(args, str):
tc.function.arguments = str(args)
args = tc.function.arguments
# Treat empty/whitespace strings as empty object
if not args or not args.strip():
tc.function.arguments = "{}"
continue
try:
json.loads(args)
except json.JSONDecodeError as e:
invalid_json_args.append((tc.function.name, str(e)))
normalized_args, is_complete = self._normalize_tool_call_arguments(tc.function.arguments)
tc.function.arguments = normalized_args
if not is_complete:
try:
json.loads(normalized_args)
except json.JSONDecodeError as e:
invalid_json_args.append((tc.function.name, str(e)))
except Exception as e:
invalid_json_args.append((tc.function.name, str(e)))
if invalid_json_args:
# Check if the invalid JSON is due to truncation rather

View File

@@ -4,62 +4,32 @@ import os
from unittest.mock import patch
from hermes_cli.model_switch import list_authenticated_providers
from hermes_cli.models import curated_models_for_provider
@patch.dict(os.environ, {"OPENCODE_GO_API_KEY": "test-key"}, clear=False)
def test_opencode_go_appears_when_api_key_set():
"""opencode-go should appear in list_authenticated_providers when OPENCODE_GO_API_KEY is set."""
providers = list_authenticated_providers(current_provider="openrouter")
# Find opencode-go in results
opencode_go = next((p for p in providers if p["slug"] == "opencode-go"), None)
assert opencode_go is not None, "opencode-go should appear when OPENCODE_GO_API_KEY is set"
assert opencode_go["models"] == [
"kimi-k2.6",
"kimi-k2.5",
"glm-5.1",
"glm-5",
"mimo-v2-pro",
"mimo-v2-omni",
"minimax-m2.7",
"minimax-m2.5",
]
assert opencode_go["total_models"] == 10
assert opencode_go["models"] == ["glm-5", "kimi-k2.5", "mimo-v2-pro", "mimo-v2-omni", "minimax-m2.7", "minimax-m2.5"]
# opencode-go can appear as "built-in" (from PROVIDER_TO_MODELS_DEV when
# models.dev is reachable) or "hermes" (from HERMES_OVERLAYS fallback when
# the API is unavailable, e.g. in CI).
assert opencode_go["source"] in ("built-in", "hermes")
@patch("hermes_cli.models.provider_model_ids", return_value=[])
def test_opencode_go_curated_fallback_includes_new_models(_mock_provider_model_ids):
"""Fallback catalog should include Kimi K2.6 and both Qwen Plus models."""
model_ids = [model_id for model_id, _ in curated_models_for_provider("opencode-go")]
assert model_ids == [
"kimi-k2.6",
"kimi-k2.5",
"glm-5.1",
"glm-5",
"mimo-v2-pro",
"mimo-v2-omni",
"minimax-m2.7",
"minimax-m2.5",
"qwen3.6-plus",
"qwen3.5-plus",
]
def test_opencode_go_not_appears_when_no_creds():
"""opencode-go should NOT appear when no credentials are set."""
# Ensure OPENCODE_GO_API_KEY is not set
env_without_key = {k: v for k, v in os.environ.items() if k != "OPENCODE_GO_API_KEY"}
with patch.dict(os.environ, env_without_key, clear=True):
providers = list_authenticated_providers(current_provider="openrouter")
# opencode-go should not be in results
opencode_go = next((p for p in providers if p["slug"] == "opencode-go"), None)
assert opencode_go is None, "opencode-go should not appear without credentials"

View File

@@ -1037,6 +1037,138 @@ class TestBuildAssistantMessage:
result = agent._build_assistant_message(msg, "tool_calls")
assert "extra_content" not in result["tool_calls"][0]
def test_tool_call_arguments_normalized_from_gemma4_whitespace(self, agent):
tc = _mock_tool_call(
name="read_file",
arguments=' \n {"path": "README.md"} \n ',
call_id="c4",
)
msg = _mock_assistant_msg(content="", tool_calls=[tc])
result = agent._build_assistant_message(msg, "tool_calls")
assert result["tool_calls"][0]["function"]["arguments"] == '{"path":"README.md"}'
def test_tool_call_arguments_normalized_from_single_quotes_and_trailing_comma(self, agent):
tc = _mock_tool_call(
name="read_file",
arguments="{'path': 'README.md',}",
call_id="c5",
)
msg = _mock_assistant_msg(content="", tool_calls=[tc])
result = agent._build_assistant_message(msg, "tool_calls")
assert result["tool_calls"][0]["function"]["arguments"] == '{"path":"README.md"}'
class TestNormalizeToolCallArguments:
@pytest.mark.parametrize(
("raw_args", "expected"),
[
('{"q":"test"}', '{"q":"test"}'),
(' \n {"q": "test"} \n ', '{"q":"test"}'),
('{"q": "test",}', '{"q":"test"}'),
("{'q': 'test'}", '{"q":"test"}'),
("{'path': 'README.md', 'mode': 'read'}", '{"path":"README.md","mode":"read"}'),
('"path": "README.md"', '{"path":"README.md"}'),
('path: "README.md"', '{"path":"README.md"}'),
('path: "README.md", mode: "read"', '{"path":"README.md","mode":"read"}'),
({"path": "README.md"}, '{"path":"README.md"}'),
(["README.md", "docs.md"], '["README.md","docs.md"]'),
('\t\n ', '{}'),
('{"nested": {"path": "README.md"}}', '{"nested":{"path":"README.md"}}'),
],
)
def test_complete_args_are_normalized(self, raw_args, expected):
normalized, is_complete = AIAgent._normalize_tool_call_arguments(raw_args)
assert is_complete is True
assert normalized == expected
@pytest.mark.parametrize(
"raw_args",
[
'{"path": "README.md"',
'{"a": 1, "b"',
'{"path": [1, 2}',
"{'path': 'README.md'",
'path: "README.md", mode:',
'{"command": "echo hello",',
],
)
def test_incomplete_args_are_not_marked_complete(self, raw_args):
normalized, is_complete = AIAgent._normalize_tool_call_arguments(raw_args)
assert is_complete is False
assert isinstance(normalized, str)
assert normalized == raw_args.strip()
class TestSanitizeApiMessages:
def test_merges_consecutive_assistant_tool_call_messages(self):
messages = [
{
"role": "assistant",
"content": "first",
"tool_calls": [{"id": "c1", "type": "function", "function": {"name": "read_file", "arguments": '{"path":"a.py"}'}}],
},
{
"role": "assistant",
"content": "second",
"tool_calls": [{"id": "c2", "type": "function", "function": {"name": "search_files", "arguments": '{"pattern":"TODO"}'}}],
},
{"role": "tool", "tool_call_id": "c1", "content": "a.py"},
{"role": "tool", "tool_call_id": "c2", "content": "matches"},
]
sanitized = AIAgent._sanitize_api_messages(messages)
assert len(sanitized) == 3
assert sanitized[0]["role"] == "assistant"
assert [tc["id"] for tc in sanitized[0]["tool_calls"]] == ["c1", "c2"]
assert sanitized[0]["content"] == "first\nsecond"
def test_does_not_merge_assistant_tool_call_messages_across_non_assistant_boundary(self):
messages = [
{
"role": "assistant",
"content": "",
"tool_calls": [{"id": "c1", "type": "function", "function": {"name": "read_file", "arguments": '{"path":"a.py"}'}}],
},
{"role": "tool", "tool_call_id": "c1", "content": "a.py"},
{
"role": "assistant",
"content": "",
"tool_calls": [{"id": "c2", "type": "function", "function": {"name": "read_file", "arguments": '{"path":"b.py"}'}}],
},
{"role": "tool", "tool_call_id": "c2", "content": "b.py"},
]
sanitized = AIAgent._sanitize_api_messages(messages)
assistant_msgs = [m for m in sanitized if m.get("role") == "assistant"]
assert len(assistant_msgs) == 2
assert assistant_msgs[0]["tool_calls"][0]["id"] == "c1"
assert assistant_msgs[1]["tool_calls"][0]["id"] == "c2"
def test_merge_preserves_tool_call_order(self):
messages = [
{
"role": "assistant",
"content": "",
"tool_calls": [{"id": "c1", "type": "function", "function": {"name": "read_file", "arguments": '{"path":"a.py"}'}}],
},
{
"role": "assistant",
"content": "",
"tool_calls": [{"id": "c2", "type": "function", "function": {"name": "read_file", "arguments": '{"path":"b.py"}'}}],
},
{
"role": "assistant",
"content": "",
"tool_calls": [{"id": "c3", "type": "function", "function": {"name": "read_file", "arguments": '{"path":"c.py"}'}}],
},
]
sanitized = AIAgent._sanitize_api_messages(messages)
assert [tc["id"] for tc in sanitized[0]["tool_calls"]] == ["c1", "c2", "c3"]
class TestFormatToolsForSystemMessage:
def test_no_tools_returns_empty_array(self, agent):
@@ -3467,6 +3599,59 @@ class TestStreamingApiCall:
assert tc[0].function.arguments == '{"path":"x.txt","content":"hel'
assert resp.choices[0].finish_reason == "length"
@pytest.mark.parametrize(
("raw_arguments", "expected"),
[
(' \n {"path": "x.txt"} \n ', '{"path":"x.txt"}'),
("{'path': 'x.txt',}", '{"path":"x.txt"}'),
('path: "x.txt", mode: "read"', '{"path":"x.txt","mode":"read"}'),
],
)
def test_repairable_tool_call_args_do_not_upgrade_finish_reason_to_length(self, agent, raw_arguments, expected):
chunks = [
_make_chunk(tool_calls=[_make_tc_delta(0, "call_1", "read_file", raw_arguments)]),
_make_chunk(finish_reason="tool_calls"),
]
agent.client.chat.completions.create.return_value = iter(chunks)
resp = agent._interruptible_streaming_api_call({"messages": []})
tc = resp.choices[0].message.tool_calls
assert len(tc) == 1
assert tc[0].function.name == "read_file"
assert tc[0].function.arguments == expected
assert resp.choices[0].finish_reason == "tool_calls"
def test_streamed_tool_call_args_single_quotes_across_chunks_normalized(self, agent):
chunks = [
_make_chunk(tool_calls=[_make_tc_delta(0, "call_1", "read_file", "{'path':")]),
_make_chunk(tool_calls=[_make_tc_delta(0, None, None, " 'x.txt',}")]),
_make_chunk(finish_reason="tool_calls"),
]
agent.client.chat.completions.create.return_value = iter(chunks)
resp = agent._interruptible_streaming_api_call({"messages": []})
tc = resp.choices[0].message.tool_calls
assert len(tc) == 1
assert tc[0].function.arguments == '{"path":"x.txt"}'
assert resp.choices[0].finish_reason == "tool_calls"
def test_streamed_split_json_chunks_still_reassemble(self, agent):
chunks = [
_make_chunk(tool_calls=[_make_tc_delta(0, "call_1", "read_file", '{"path":')]),
_make_chunk(tool_calls=[_make_tc_delta(0, None, None, ' "x.txt"}')]),
_make_chunk(finish_reason="tool_calls"),
]
agent.client.chat.completions.create.return_value = iter(chunks)
resp = agent._interruptible_streaming_api_call({"messages": []})
tc = resp.choices[0].message.tool_calls
assert len(tc) == 1
assert tc[0].function.arguments == '{"path":"x.txt"}'
assert resp.choices[0].finish_reason == "tool_calls"
def test_ollama_reused_index_separate_tool_calls(self, agent):
"""Ollama sends every tool call at index 0 with different ids.