Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d1fb50bf2f |
@@ -1396,6 +1396,8 @@ def normalize_anthropic_response(
|
||||
"tool_use": "tool_calls",
|
||||
"max_tokens": "length",
|
||||
"stop_sequence": "stop",
|
||||
"refusal": "content_filter",
|
||||
"model_context_window_exceeded": "length",
|
||||
}
|
||||
finish_reason = stop_reason_map.get(response.stop_reason, "stop")
|
||||
|
||||
@@ -1409,3 +1411,42 @@ def normalize_anthropic_response(
|
||||
),
|
||||
finish_reason,
|
||||
)
|
||||
|
||||
|
||||
def normalize_anthropic_response_v2(
|
||||
response,
|
||||
strip_tool_prefix: bool = False,
|
||||
) -> "NormalizedResponse":
|
||||
"""Normalize Anthropic response to NormalizedResponse.
|
||||
|
||||
Wraps the existing normalize_anthropic_response() and maps its output
|
||||
to the shared transport types. This allows incremental migration
|
||||
without disturbing the legacy call sites.
|
||||
"""
|
||||
from agent.transports.types import NormalizedResponse, build_tool_call
|
||||
|
||||
assistant_msg, finish_reason = normalize_anthropic_response(response, strip_tool_prefix)
|
||||
|
||||
tool_calls = None
|
||||
if assistant_msg.tool_calls:
|
||||
tool_calls = [
|
||||
build_tool_call(
|
||||
id=tc.id,
|
||||
name=tc.function.name,
|
||||
arguments=tc.function.arguments,
|
||||
)
|
||||
for tc in assistant_msg.tool_calls
|
||||
]
|
||||
|
||||
provider_data = {}
|
||||
if getattr(assistant_msg, "reasoning_details", None):
|
||||
provider_data["reasoning_details"] = assistant_msg.reasoning_details
|
||||
|
||||
return NormalizedResponse(
|
||||
content=assistant_msg.content,
|
||||
tool_calls=tool_calls,
|
||||
finish_reason=finish_reason,
|
||||
reasoning=getattr(assistant_msg, "reasoning", None),
|
||||
usage=None,
|
||||
provider_data=provider_data or None,
|
||||
)
|
||||
|
||||
57
agent/transports/__init__.py
Normal file
57
agent/transports/__init__.py
Normal file
@@ -0,0 +1,57 @@
|
||||
"""Transport layer types and registry for provider response normalization.
|
||||
|
||||
Usage:
|
||||
from agent.transports import get_transport
|
||||
transport = get_transport("anthropic_messages")
|
||||
result = transport.normalize_response(raw_response)
|
||||
"""
|
||||
|
||||
from agent.transports.types import ( # noqa: F401
|
||||
NormalizedResponse,
|
||||
ToolCall,
|
||||
Usage,
|
||||
build_tool_call,
|
||||
map_finish_reason,
|
||||
)
|
||||
|
||||
_REGISTRY: dict = {}
|
||||
|
||||
|
||||
def register_transport(api_mode: str, transport_cls: type) -> None:
|
||||
"""Register a transport class for an api_mode string."""
|
||||
_REGISTRY[api_mode] = transport_cls
|
||||
|
||||
|
||||
def get_transport(api_mode: str):
|
||||
"""Get a transport instance for the given api_mode.
|
||||
|
||||
Returns None if no transport is registered for this api_mode.
|
||||
This allows gradual migration — call sites can check for None
|
||||
and fall back to the legacy code path.
|
||||
"""
|
||||
if not _REGISTRY:
|
||||
_discover_transports()
|
||||
cls = _REGISTRY.get(api_mode)
|
||||
if cls is None:
|
||||
return None
|
||||
return cls()
|
||||
|
||||
|
||||
def _discover_transports() -> None:
|
||||
"""Import all transport modules to trigger auto-registration."""
|
||||
try:
|
||||
import agent.transports.anthropic # noqa: F401
|
||||
except ImportError:
|
||||
pass
|
||||
try:
|
||||
import agent.transports.codex # noqa: F401
|
||||
except ImportError:
|
||||
pass
|
||||
try:
|
||||
import agent.transports.chat_completions # noqa: F401
|
||||
except ImportError:
|
||||
pass
|
||||
try:
|
||||
import agent.transports.bedrock # noqa: F401
|
||||
except ImportError:
|
||||
pass
|
||||
95
agent/transports/anthropic.py
Normal file
95
agent/transports/anthropic.py
Normal file
@@ -0,0 +1,95 @@
|
||||
"""Anthropic Messages API transport.
|
||||
|
||||
Delegates to the existing adapter functions in agent/anthropic_adapter.py.
|
||||
This transport owns format conversion and normalization — NOT client lifecycle.
|
||||
"""
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from agent.transports.base import ProviderTransport
|
||||
from agent.transports.types import NormalizedResponse
|
||||
|
||||
|
||||
class AnthropicTransport(ProviderTransport):
|
||||
"""Transport for api_mode='anthropic_messages'."""
|
||||
|
||||
@property
|
||||
def api_mode(self) -> str:
|
||||
return "anthropic_messages"
|
||||
|
||||
def convert_messages(self, messages: List[Dict[str, Any]], **kwargs) -> Any:
|
||||
from agent.anthropic_adapter import convert_messages_to_anthropic
|
||||
|
||||
base_url = kwargs.get("base_url")
|
||||
return convert_messages_to_anthropic(messages, base_url=base_url)
|
||||
|
||||
def convert_tools(self, tools: List[Dict[str, Any]]) -> Any:
|
||||
from agent.anthropic_adapter import convert_tools_to_anthropic
|
||||
|
||||
return convert_tools_to_anthropic(tools)
|
||||
|
||||
def build_kwargs(
|
||||
self,
|
||||
model: str,
|
||||
messages: List[Dict[str, Any]],
|
||||
tools: Optional[List[Dict[str, Any]]] = None,
|
||||
**params,
|
||||
) -> Dict[str, Any]:
|
||||
from agent.anthropic_adapter import build_anthropic_kwargs
|
||||
|
||||
return build_anthropic_kwargs(
|
||||
model=model,
|
||||
messages=messages,
|
||||
tools=tools,
|
||||
max_tokens=params.get("max_tokens", 16384),
|
||||
reasoning_config=params.get("reasoning_config"),
|
||||
tool_choice=params.get("tool_choice"),
|
||||
is_oauth=params.get("is_oauth", False),
|
||||
preserve_dots=params.get("preserve_dots", False),
|
||||
context_length=params.get("context_length"),
|
||||
base_url=params.get("base_url"),
|
||||
fast_mode=params.get("fast_mode", False),
|
||||
)
|
||||
|
||||
def normalize_response(self, response: Any, **kwargs) -> NormalizedResponse:
|
||||
from agent.anthropic_adapter import normalize_anthropic_response_v2
|
||||
|
||||
strip_tool_prefix = kwargs.get("strip_tool_prefix", False)
|
||||
return normalize_anthropic_response_v2(response, strip_tool_prefix=strip_tool_prefix)
|
||||
|
||||
def validate_response(self, response: Any) -> bool:
|
||||
if response is None:
|
||||
return False
|
||||
content_blocks = getattr(response, "content", None)
|
||||
if not isinstance(content_blocks, list):
|
||||
return False
|
||||
if not content_blocks:
|
||||
return False
|
||||
return True
|
||||
|
||||
def extract_cache_stats(self, response: Any):
|
||||
usage = getattr(response, "usage", None)
|
||||
if usage is None:
|
||||
return None
|
||||
cached = getattr(usage, "cache_read_input_tokens", 0) or 0
|
||||
written = getattr(usage, "cache_creation_input_tokens", 0) or 0
|
||||
if cached or written:
|
||||
return {"cached_tokens": cached, "creation_tokens": written}
|
||||
return None
|
||||
|
||||
_STOP_REASON_MAP = {
|
||||
"end_turn": "stop",
|
||||
"tool_use": "tool_calls",
|
||||
"max_tokens": "length",
|
||||
"stop_sequence": "stop",
|
||||
"refusal": "content_filter",
|
||||
"model_context_window_exceeded": "length",
|
||||
}
|
||||
|
||||
def map_finish_reason(self, raw_reason: str) -> str:
|
||||
return self._STOP_REASON_MAP.get(raw_reason, "stop")
|
||||
|
||||
|
||||
from agent.transports import register_transport # noqa: E402
|
||||
|
||||
register_transport("anthropic_messages", AnthropicTransport)
|
||||
61
agent/transports/base.py
Normal file
61
agent/transports/base.py
Normal file
@@ -0,0 +1,61 @@
|
||||
"""Abstract base for provider transports.
|
||||
|
||||
A transport owns the data path for one api_mode:
|
||||
convert_messages → convert_tools → build_kwargs → normalize_response
|
||||
|
||||
It does NOT own: client construction, streaming, credential refresh,
|
||||
prompt caching, interrupt handling, or retry logic. Those stay on AIAgent.
|
||||
"""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from agent.transports.types import NormalizedResponse
|
||||
|
||||
|
||||
class ProviderTransport(ABC):
|
||||
"""Base class for provider-specific format conversion and normalization."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def api_mode(self) -> str:
|
||||
"""The api_mode string this transport handles."""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def convert_messages(self, messages: List[Dict[str, Any]], **kwargs) -> Any:
|
||||
"""Convert OpenAI-format messages to provider-native format."""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def convert_tools(self, tools: List[Dict[str, Any]]) -> Any:
|
||||
"""Convert OpenAI-format tool definitions to provider-native format."""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def build_kwargs(
|
||||
self,
|
||||
model: str,
|
||||
messages: List[Dict[str, Any]],
|
||||
tools: Optional[List[Dict[str, Any]]] = None,
|
||||
**params,
|
||||
) -> Dict[str, Any]:
|
||||
"""Build the complete provider kwargs dict."""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def normalize_response(self, response: Any, **kwargs) -> NormalizedResponse:
|
||||
"""Normalize a raw provider response to the shared NormalizedResponse type."""
|
||||
...
|
||||
|
||||
def validate_response(self, response: Any) -> bool:
|
||||
"""Optional structural validation for raw responses."""
|
||||
return True
|
||||
|
||||
def extract_cache_stats(self, response: Any) -> Optional[Dict[str, int]]:
|
||||
"""Optional cache stats extraction."""
|
||||
return None
|
||||
|
||||
def map_finish_reason(self, raw_reason: str) -> str:
|
||||
"""Optional stop-reason mapping. Defaults to passthrough."""
|
||||
return raw_reason
|
||||
58
agent/transports/types.py
Normal file
58
agent/transports/types.py
Normal file
@@ -0,0 +1,58 @@
|
||||
"""Shared types for normalized provider responses."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
|
||||
@dataclass
|
||||
class ToolCall:
|
||||
"""A normalized tool call from any provider."""
|
||||
|
||||
id: Optional[str]
|
||||
name: str
|
||||
arguments: str
|
||||
provider_data: Optional[Dict[str, Any]] = field(default=None, repr=False)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Usage:
|
||||
"""Token usage from an API response."""
|
||||
|
||||
prompt_tokens: int = 0
|
||||
completion_tokens: int = 0
|
||||
total_tokens: int = 0
|
||||
cached_tokens: int = 0
|
||||
|
||||
|
||||
@dataclass
|
||||
class NormalizedResponse:
|
||||
"""Normalized API response from any provider."""
|
||||
|
||||
content: Optional[str]
|
||||
tool_calls: Optional[List[ToolCall]]
|
||||
finish_reason: str
|
||||
reasoning: Optional[str] = None
|
||||
usage: Optional[Usage] = None
|
||||
provider_data: Optional[Dict[str, Any]] = field(default=None, repr=False)
|
||||
|
||||
|
||||
def build_tool_call(
|
||||
id: Optional[str],
|
||||
name: str,
|
||||
arguments: Any,
|
||||
**provider_fields: Any,
|
||||
) -> ToolCall:
|
||||
"""Build a ToolCall, auto-serialising dict arguments."""
|
||||
args_str = json.dumps(arguments) if isinstance(arguments, dict) else str(arguments)
|
||||
provider_data = dict(provider_fields) if provider_fields else None
|
||||
return ToolCall(id=id, name=name, arguments=args_str, provider_data=provider_data)
|
||||
|
||||
|
||||
def map_finish_reason(reason: Optional[str], mapping: Dict[str, str]) -> str:
|
||||
"""Translate a provider-specific stop reason to the normalized set."""
|
||||
if reason is None:
|
||||
return "stop"
|
||||
return mapping.get(reason, "stop")
|
||||
213
tests/agent/test_anthropic_normalize_v2.py
Normal file
213
tests/agent/test_anthropic_normalize_v2.py
Normal file
@@ -0,0 +1,213 @@
|
||||
"""Regression tests: normalize_anthropic_response_v2 vs v1.
|
||||
|
||||
Constructs mock Anthropic responses and asserts that the v2 function
|
||||
(returning NormalizedResponse) produces identical field values to the
|
||||
original v1 function (returning SimpleNamespace + finish_reason).
|
||||
"""
|
||||
|
||||
from types import SimpleNamespace
|
||||
|
||||
import pytest
|
||||
|
||||
from agent.anthropic_adapter import (
|
||||
normalize_anthropic_response,
|
||||
normalize_anthropic_response_v2,
|
||||
)
|
||||
from agent.transports.types import NormalizedResponse
|
||||
|
||||
|
||||
def _text_block(text: str):
|
||||
return SimpleNamespace(type="text", text=text)
|
||||
|
||||
|
||||
def _thinking_block(thinking: str, signature: str = "sig_abc"):
|
||||
return SimpleNamespace(type="thinking", thinking=thinking, signature=signature)
|
||||
|
||||
|
||||
def _tool_use_block(id: str, name: str, input: dict):
|
||||
return SimpleNamespace(type="tool_use", id=id, name=name, input=input)
|
||||
|
||||
|
||||
def _response(content_blocks, stop_reason="end_turn"):
|
||||
return SimpleNamespace(
|
||||
content=content_blocks,
|
||||
stop_reason=stop_reason,
|
||||
usage=SimpleNamespace(input_tokens=10, output_tokens=5),
|
||||
)
|
||||
|
||||
|
||||
class TestTextOnly:
|
||||
def setup_method(self):
|
||||
self.resp = _response([_text_block("Hello world")])
|
||||
self.v1_msg, self.v1_finish = normalize_anthropic_response(self.resp)
|
||||
self.v2 = normalize_anthropic_response_v2(self.resp)
|
||||
|
||||
def test_type(self):
|
||||
assert isinstance(self.v2, NormalizedResponse)
|
||||
|
||||
def test_content_matches(self):
|
||||
assert self.v2.content == self.v1_msg.content
|
||||
|
||||
def test_finish_reason_matches(self):
|
||||
assert self.v2.finish_reason == self.v1_finish
|
||||
|
||||
def test_no_tool_calls(self):
|
||||
assert self.v2.tool_calls is None
|
||||
assert self.v1_msg.tool_calls is None
|
||||
|
||||
def test_no_reasoning(self):
|
||||
assert self.v2.reasoning is None
|
||||
assert self.v1_msg.reasoning is None
|
||||
|
||||
|
||||
class TestWithToolCalls:
|
||||
def setup_method(self):
|
||||
self.resp = _response(
|
||||
[
|
||||
_text_block("I'll check that"),
|
||||
_tool_use_block("toolu_abc", "terminal", {"command": "ls"}),
|
||||
_tool_use_block("toolu_def", "read_file", {"path": "/tmp"}),
|
||||
],
|
||||
stop_reason="tool_use",
|
||||
)
|
||||
self.v1_msg, self.v1_finish = normalize_anthropic_response(self.resp)
|
||||
self.v2 = normalize_anthropic_response_v2(self.resp)
|
||||
|
||||
def test_finish_reason(self):
|
||||
assert self.v2.finish_reason == "tool_calls"
|
||||
assert self.v1_finish == "tool_calls"
|
||||
|
||||
def test_tool_call_count(self):
|
||||
assert len(self.v2.tool_calls) == 2
|
||||
assert len(self.v1_msg.tool_calls) == 2
|
||||
|
||||
def test_tool_call_ids_match(self):
|
||||
for i in range(2):
|
||||
assert self.v2.tool_calls[i].id == self.v1_msg.tool_calls[i].id
|
||||
|
||||
def test_tool_call_names_match(self):
|
||||
assert self.v2.tool_calls[0].name == "terminal"
|
||||
assert self.v2.tool_calls[1].name == "read_file"
|
||||
for i in range(2):
|
||||
assert self.v2.tool_calls[i].name == self.v1_msg.tool_calls[i].function.name
|
||||
|
||||
def test_tool_call_arguments_match(self):
|
||||
for i in range(2):
|
||||
assert self.v2.tool_calls[i].arguments == self.v1_msg.tool_calls[i].function.arguments
|
||||
|
||||
def test_content_preserved(self):
|
||||
assert self.v2.content == self.v1_msg.content
|
||||
assert "check that" in self.v2.content
|
||||
|
||||
|
||||
class TestWithThinking:
|
||||
def setup_method(self):
|
||||
self.resp = _response([
|
||||
_thinking_block("Let me think about this carefully..."),
|
||||
_text_block("The answer is 42."),
|
||||
])
|
||||
self.v1_msg, self.v1_finish = normalize_anthropic_response(self.resp)
|
||||
self.v2 = normalize_anthropic_response_v2(self.resp)
|
||||
|
||||
def test_reasoning_matches(self):
|
||||
assert self.v2.reasoning == self.v1_msg.reasoning
|
||||
assert "think about this" in self.v2.reasoning
|
||||
|
||||
def test_reasoning_details_in_provider_data(self):
|
||||
v1_details = self.v1_msg.reasoning_details
|
||||
v2_details = self.v2.provider_data.get("reasoning_details") if self.v2.provider_data else None
|
||||
assert v1_details is not None
|
||||
assert v2_details is not None
|
||||
assert len(v2_details) == len(v1_details)
|
||||
|
||||
def test_content_excludes_thinking(self):
|
||||
assert self.v2.content == "The answer is 42."
|
||||
|
||||
|
||||
class TestMixed:
|
||||
def setup_method(self):
|
||||
self.resp = _response(
|
||||
[
|
||||
_thinking_block("Planning my approach..."),
|
||||
_text_block("I'll run the command"),
|
||||
_tool_use_block("toolu_xyz", "terminal", {"command": "pwd"}),
|
||||
],
|
||||
stop_reason="tool_use",
|
||||
)
|
||||
self.v1_msg, self.v1_finish = normalize_anthropic_response(self.resp)
|
||||
self.v2 = normalize_anthropic_response_v2(self.resp)
|
||||
|
||||
def test_all_fields_present(self):
|
||||
assert self.v2.content is not None
|
||||
assert self.v2.tool_calls is not None
|
||||
assert self.v2.reasoning is not None
|
||||
assert self.v2.finish_reason == "tool_calls"
|
||||
|
||||
def test_content_matches(self):
|
||||
assert self.v2.content == self.v1_msg.content
|
||||
|
||||
def test_reasoning_matches(self):
|
||||
assert self.v2.reasoning == self.v1_msg.reasoning
|
||||
|
||||
def test_tool_call_matches(self):
|
||||
assert self.v2.tool_calls[0].id == self.v1_msg.tool_calls[0].id
|
||||
assert self.v2.tool_calls[0].name == self.v1_msg.tool_calls[0].function.name
|
||||
|
||||
|
||||
class TestStopReasons:
|
||||
@pytest.mark.parametrize("stop_reason,expected", [
|
||||
("end_turn", "stop"),
|
||||
("tool_use", "tool_calls"),
|
||||
("max_tokens", "length"),
|
||||
("stop_sequence", "stop"),
|
||||
("refusal", "content_filter"),
|
||||
("model_context_window_exceeded", "length"),
|
||||
("unknown_future_reason", "stop"),
|
||||
])
|
||||
def test_stop_reason_mapping(self, stop_reason, expected):
|
||||
resp = _response([_text_block("x")], stop_reason=stop_reason)
|
||||
_v1_msg, v1_finish = normalize_anthropic_response(resp)
|
||||
v2 = normalize_anthropic_response_v2(resp)
|
||||
assert v2.finish_reason == v1_finish == expected
|
||||
|
||||
|
||||
class TestStripToolPrefix:
|
||||
def test_prefix_stripped(self):
|
||||
resp = _response(
|
||||
[_tool_use_block("toolu_1", "mcp_terminal", {"cmd": "ls"})],
|
||||
stop_reason="tool_use",
|
||||
)
|
||||
v1_msg, _ = normalize_anthropic_response(resp, strip_tool_prefix=True)
|
||||
v2 = normalize_anthropic_response_v2(resp, strip_tool_prefix=True)
|
||||
assert v1_msg.tool_calls[0].function.name == "terminal"
|
||||
assert v2.tool_calls[0].name == "terminal"
|
||||
|
||||
def test_prefix_kept(self):
|
||||
resp = _response(
|
||||
[_tool_use_block("toolu_1", "mcp_terminal", {"cmd": "ls"})],
|
||||
stop_reason="tool_use",
|
||||
)
|
||||
v1_msg, _ = normalize_anthropic_response(resp, strip_tool_prefix=False)
|
||||
v2 = normalize_anthropic_response_v2(resp, strip_tool_prefix=False)
|
||||
assert v1_msg.tool_calls[0].function.name == "mcp_terminal"
|
||||
assert v2.tool_calls[0].name == "mcp_terminal"
|
||||
|
||||
|
||||
class TestEdgeCases:
|
||||
def test_empty_content_blocks(self):
|
||||
resp = _response([])
|
||||
v1_msg, _v1_finish = normalize_anthropic_response(resp)
|
||||
v2 = normalize_anthropic_response_v2(resp)
|
||||
assert v2.content == v1_msg.content
|
||||
assert v2.content is None
|
||||
|
||||
def test_no_reasoning_details_means_none_provider_data(self):
|
||||
resp = _response([_text_block("hi")])
|
||||
v2 = normalize_anthropic_response_v2(resp)
|
||||
assert v2.provider_data is None
|
||||
|
||||
def test_v2_returns_dataclass_not_namespace(self):
|
||||
resp = _response([_text_block("hi")])
|
||||
v2 = normalize_anthropic_response_v2(resp)
|
||||
assert isinstance(v2, NormalizedResponse)
|
||||
assert not isinstance(v2, SimpleNamespace)
|
||||
208
tests/agent/transports/test_transport.py
Normal file
208
tests/agent/transports/test_transport.py
Normal file
@@ -0,0 +1,208 @@
|
||||
"""Tests for the transport ABC, registry, and AnthropicTransport."""
|
||||
|
||||
from types import SimpleNamespace
|
||||
|
||||
import pytest
|
||||
|
||||
from agent.transports import _REGISTRY, get_transport, register_transport
|
||||
from agent.transports.base import ProviderTransport
|
||||
from agent.transports.types import NormalizedResponse
|
||||
|
||||
|
||||
class TestProviderTransportABC:
|
||||
def test_cannot_instantiate_abc(self):
|
||||
with pytest.raises(TypeError):
|
||||
ProviderTransport()
|
||||
|
||||
def test_concrete_must_implement_all_abstract(self):
|
||||
class Incomplete(ProviderTransport):
|
||||
@property
|
||||
def api_mode(self):
|
||||
return "test"
|
||||
|
||||
with pytest.raises(TypeError):
|
||||
Incomplete()
|
||||
|
||||
def test_minimal_concrete(self):
|
||||
class Minimal(ProviderTransport):
|
||||
@property
|
||||
def api_mode(self):
|
||||
return "test_minimal"
|
||||
|
||||
def convert_messages(self, messages, **kw):
|
||||
return messages
|
||||
|
||||
def convert_tools(self, tools):
|
||||
return tools
|
||||
|
||||
def build_kwargs(self, model, messages, tools=None, **params):
|
||||
return {"model": model, "messages": messages}
|
||||
|
||||
def normalize_response(self, response, **kw):
|
||||
return NormalizedResponse(content="ok", tool_calls=None, finish_reason="stop")
|
||||
|
||||
t = Minimal()
|
||||
assert t.api_mode == "test_minimal"
|
||||
assert t.validate_response(None) is True
|
||||
assert t.extract_cache_stats(None) is None
|
||||
assert t.map_finish_reason("end_turn") == "end_turn"
|
||||
|
||||
|
||||
class TestTransportRegistry:
|
||||
def test_get_unregistered_returns_none(self):
|
||||
assert get_transport("nonexistent_mode") is None
|
||||
|
||||
def test_anthropic_registered_on_import(self):
|
||||
import agent.transports.anthropic # noqa: F401
|
||||
|
||||
t = get_transport("anthropic_messages")
|
||||
assert t is not None
|
||||
assert t.api_mode == "anthropic_messages"
|
||||
|
||||
def test_register_and_get(self):
|
||||
class DummyTransport(ProviderTransport):
|
||||
@property
|
||||
def api_mode(self):
|
||||
return "dummy_test"
|
||||
|
||||
def convert_messages(self, messages, **kw):
|
||||
return messages
|
||||
|
||||
def convert_tools(self, tools):
|
||||
return tools
|
||||
|
||||
def build_kwargs(self, model, messages, tools=None, **params):
|
||||
return {}
|
||||
|
||||
def normalize_response(self, response, **kw):
|
||||
return NormalizedResponse(content=None, tool_calls=None, finish_reason="stop")
|
||||
|
||||
register_transport("dummy_test", DummyTransport)
|
||||
t = get_transport("dummy_test")
|
||||
assert t.api_mode == "dummy_test"
|
||||
_REGISTRY.pop("dummy_test", None)
|
||||
|
||||
|
||||
class TestAnthropicTransport:
|
||||
@pytest.fixture
|
||||
def transport(self):
|
||||
import agent.transports.anthropic # noqa: F401
|
||||
|
||||
return get_transport("anthropic_messages")
|
||||
|
||||
def test_api_mode(self, transport):
|
||||
assert transport.api_mode == "anthropic_messages"
|
||||
|
||||
def test_convert_tools_simple(self, transport):
|
||||
tools = [{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "test_tool",
|
||||
"description": "A test",
|
||||
"parameters": {"type": "object", "properties": {}},
|
||||
},
|
||||
}]
|
||||
result = transport.convert_tools(tools)
|
||||
assert len(result) == 1
|
||||
assert result[0]["name"] == "test_tool"
|
||||
assert "input_schema" in result[0]
|
||||
|
||||
def test_validate_response_none(self, transport):
|
||||
assert transport.validate_response(None) is False
|
||||
|
||||
def test_validate_response_empty_content(self, transport):
|
||||
r = SimpleNamespace(content=[])
|
||||
assert transport.validate_response(r) is False
|
||||
|
||||
def test_validate_response_valid(self, transport):
|
||||
r = SimpleNamespace(content=[SimpleNamespace(type="text", text="hello")])
|
||||
assert transport.validate_response(r) is True
|
||||
|
||||
def test_map_finish_reason(self, transport):
|
||||
assert transport.map_finish_reason("end_turn") == "stop"
|
||||
assert transport.map_finish_reason("tool_use") == "tool_calls"
|
||||
assert transport.map_finish_reason("max_tokens") == "length"
|
||||
assert transport.map_finish_reason("stop_sequence") == "stop"
|
||||
assert transport.map_finish_reason("refusal") == "content_filter"
|
||||
assert transport.map_finish_reason("model_context_window_exceeded") == "length"
|
||||
assert transport.map_finish_reason("unknown") == "stop"
|
||||
|
||||
def test_extract_cache_stats_none_usage(self, transport):
|
||||
r = SimpleNamespace(usage=None)
|
||||
assert transport.extract_cache_stats(r) is None
|
||||
|
||||
def test_extract_cache_stats_with_cache(self, transport):
|
||||
usage = SimpleNamespace(cache_read_input_tokens=100, cache_creation_input_tokens=50)
|
||||
r = SimpleNamespace(usage=usage)
|
||||
result = transport.extract_cache_stats(r)
|
||||
assert result == {"cached_tokens": 100, "creation_tokens": 50}
|
||||
|
||||
def test_extract_cache_stats_zero(self, transport):
|
||||
usage = SimpleNamespace(cache_read_input_tokens=0, cache_creation_input_tokens=0)
|
||||
r = SimpleNamespace(usage=usage)
|
||||
assert transport.extract_cache_stats(r) is None
|
||||
|
||||
def test_normalize_response_text(self, transport):
|
||||
r = SimpleNamespace(
|
||||
content=[SimpleNamespace(type="text", text="Hello world")],
|
||||
stop_reason="end_turn",
|
||||
usage=SimpleNamespace(input_tokens=10, output_tokens=5),
|
||||
model="claude-sonnet-4-6",
|
||||
)
|
||||
nr = transport.normalize_response(r)
|
||||
assert isinstance(nr, NormalizedResponse)
|
||||
assert nr.content == "Hello world"
|
||||
assert nr.tool_calls is None or nr.tool_calls == []
|
||||
assert nr.finish_reason == "stop"
|
||||
|
||||
def test_normalize_response_tool_calls(self, transport):
|
||||
r = SimpleNamespace(
|
||||
content=[
|
||||
SimpleNamespace(type="tool_use", id="toolu_123", name="terminal", input={"command": "ls"}),
|
||||
],
|
||||
stop_reason="tool_use",
|
||||
usage=SimpleNamespace(input_tokens=10, output_tokens=20),
|
||||
model="claude-sonnet-4-6",
|
||||
)
|
||||
nr = transport.normalize_response(r)
|
||||
assert nr.finish_reason == "tool_calls"
|
||||
assert len(nr.tool_calls) == 1
|
||||
tc = nr.tool_calls[0]
|
||||
assert tc.name == "terminal"
|
||||
assert tc.id == "toolu_123"
|
||||
assert '"command"' in tc.arguments
|
||||
|
||||
def test_normalize_response_thinking(self, transport):
|
||||
r = SimpleNamespace(
|
||||
content=[
|
||||
SimpleNamespace(type="thinking", thinking="Let me think..."),
|
||||
SimpleNamespace(type="text", text="The answer is 42"),
|
||||
],
|
||||
stop_reason="end_turn",
|
||||
usage=SimpleNamespace(input_tokens=10, output_tokens=15),
|
||||
model="claude-sonnet-4-6",
|
||||
)
|
||||
nr = transport.normalize_response(r)
|
||||
assert nr.content == "The answer is 42"
|
||||
assert nr.reasoning == "Let me think..."
|
||||
|
||||
def test_build_kwargs_returns_dict(self, transport):
|
||||
messages = [{"role": "user", "content": "Hello"}]
|
||||
kw = transport.build_kwargs(
|
||||
model="claude-sonnet-4-6",
|
||||
messages=messages,
|
||||
max_tokens=1024,
|
||||
)
|
||||
assert isinstance(kw, dict)
|
||||
assert "model" in kw
|
||||
assert "max_tokens" in kw
|
||||
assert "messages" in kw
|
||||
|
||||
def test_convert_messages_extracts_system(self, transport):
|
||||
messages = [
|
||||
{"role": "system", "content": "You are helpful."},
|
||||
{"role": "user", "content": "Hi"},
|
||||
]
|
||||
system, msgs = transport.convert_messages(messages)
|
||||
assert system is not None
|
||||
assert len(msgs) >= 1
|
||||
130
tests/agent/transports/test_types.py
Normal file
130
tests/agent/transports/test_types.py
Normal file
@@ -0,0 +1,130 @@
|
||||
"""Tests for agent/transports/types.py — dataclass construction + helpers."""
|
||||
|
||||
import json
|
||||
|
||||
from agent.transports.types import (
|
||||
NormalizedResponse,
|
||||
ToolCall,
|
||||
Usage,
|
||||
build_tool_call,
|
||||
map_finish_reason,
|
||||
)
|
||||
|
||||
|
||||
class TestToolCall:
|
||||
def test_basic_construction(self):
|
||||
tc = ToolCall(id="call_abc", name="terminal", arguments='{"cmd": "ls"}')
|
||||
assert tc.id == "call_abc"
|
||||
assert tc.name == "terminal"
|
||||
assert tc.arguments == '{"cmd": "ls"}'
|
||||
assert tc.provider_data is None
|
||||
|
||||
def test_none_id(self):
|
||||
tc = ToolCall(id=None, name="read_file", arguments="{}")
|
||||
assert tc.id is None
|
||||
|
||||
def test_provider_data(self):
|
||||
tc = ToolCall(
|
||||
id="call_x",
|
||||
name="t",
|
||||
arguments="{}",
|
||||
provider_data={"call_id": "call_x", "response_item_id": "fc_x"},
|
||||
)
|
||||
assert tc.provider_data["call_id"] == "call_x"
|
||||
assert tc.provider_data["response_item_id"] == "fc_x"
|
||||
|
||||
|
||||
class TestUsage:
|
||||
def test_defaults(self):
|
||||
u = Usage()
|
||||
assert u.prompt_tokens == 0
|
||||
assert u.completion_tokens == 0
|
||||
assert u.total_tokens == 0
|
||||
assert u.cached_tokens == 0
|
||||
|
||||
def test_explicit(self):
|
||||
u = Usage(prompt_tokens=100, completion_tokens=50, total_tokens=150, cached_tokens=80)
|
||||
assert u.total_tokens == 150
|
||||
|
||||
|
||||
class TestNormalizedResponse:
|
||||
def test_text_only(self):
|
||||
r = NormalizedResponse(content="hello", tool_calls=None, finish_reason="stop")
|
||||
assert r.content == "hello"
|
||||
assert r.tool_calls is None
|
||||
assert r.finish_reason == "stop"
|
||||
assert r.reasoning is None
|
||||
assert r.usage is None
|
||||
assert r.provider_data is None
|
||||
|
||||
def test_with_tool_calls(self):
|
||||
tcs = [ToolCall(id="call_1", name="terminal", arguments='{"cmd":"pwd"}')]
|
||||
r = NormalizedResponse(content=None, tool_calls=tcs, finish_reason="tool_calls")
|
||||
assert r.finish_reason == "tool_calls"
|
||||
assert len(r.tool_calls) == 1
|
||||
assert r.tool_calls[0].name == "terminal"
|
||||
|
||||
def test_with_reasoning(self):
|
||||
r = NormalizedResponse(
|
||||
content="answer",
|
||||
tool_calls=None,
|
||||
finish_reason="stop",
|
||||
reasoning="I thought about it",
|
||||
)
|
||||
assert r.reasoning == "I thought about it"
|
||||
|
||||
def test_with_provider_data(self):
|
||||
r = NormalizedResponse(
|
||||
content=None,
|
||||
tool_calls=None,
|
||||
finish_reason="stop",
|
||||
provider_data={"reasoning_details": [{"type": "thinking", "thinking": "hmm"}]},
|
||||
)
|
||||
assert r.provider_data["reasoning_details"][0]["type"] == "thinking"
|
||||
|
||||
|
||||
class TestBuildToolCall:
|
||||
def test_dict_arguments_serialized(self):
|
||||
tc = build_tool_call(id="call_1", name="terminal", arguments={"cmd": "ls"})
|
||||
assert tc.arguments == json.dumps({"cmd": "ls"})
|
||||
assert tc.provider_data is None
|
||||
|
||||
def test_string_arguments_passthrough(self):
|
||||
tc = build_tool_call(id="call_2", name="read_file", arguments='{"path": "/tmp"}')
|
||||
assert tc.arguments == '{"path": "/tmp"}'
|
||||
|
||||
def test_provider_fields(self):
|
||||
tc = build_tool_call(
|
||||
id="call_3",
|
||||
name="terminal",
|
||||
arguments="{}",
|
||||
call_id="call_3",
|
||||
response_item_id="fc_3",
|
||||
)
|
||||
assert tc.provider_data == {"call_id": "call_3", "response_item_id": "fc_3"}
|
||||
|
||||
def test_none_id(self):
|
||||
tc = build_tool_call(id=None, name="t", arguments="{}")
|
||||
assert tc.id is None
|
||||
|
||||
|
||||
class TestMapFinishReason:
|
||||
ANTHROPIC_MAP = {
|
||||
"end_turn": "stop",
|
||||
"tool_use": "tool_calls",
|
||||
"max_tokens": "length",
|
||||
"stop_sequence": "stop",
|
||||
"refusal": "content_filter",
|
||||
}
|
||||
|
||||
def test_known_reason(self):
|
||||
assert map_finish_reason("end_turn", self.ANTHROPIC_MAP) == "stop"
|
||||
assert map_finish_reason("tool_use", self.ANTHROPIC_MAP) == "tool_calls"
|
||||
assert map_finish_reason("max_tokens", self.ANTHROPIC_MAP) == "length"
|
||||
assert map_finish_reason("refusal", self.ANTHROPIC_MAP) == "content_filter"
|
||||
|
||||
def test_unknown_reason_defaults_to_stop(self):
|
||||
assert map_finish_reason("something_new", self.ANTHROPIC_MAP) == "stop"
|
||||
|
||||
def test_none_reason(self):
|
||||
assert map_finish_reason(None, self.ANTHROPIC_MAP) == "stop"
|
||||
@@ -148,184 +148,3 @@ class TestStrategyNameSurfaced:
|
||||
assert count == 0
|
||||
assert strategy is None
|
||||
assert err is not None
|
||||
|
||||
|
||||
class TestEscapeDriftGuard:
|
||||
"""Tests for the escape-drift guard that catches bash/JSON serialization
|
||||
artifacts where an apostrophe gets prefixed with a spurious backslash
|
||||
in tool-call transport.
|
||||
"""
|
||||
|
||||
def test_drift_blocked_apostrophe(self):
|
||||
"""File has ', old_string and new_string both have \\' — classic
|
||||
tool-call drift. Guard must block with a helpful error instead of
|
||||
writing \\' literals into source code."""
|
||||
content = "x = \"hello there\"\n"
|
||||
# Simulate transport-corrupted old_string and new_string where an
|
||||
# apostrophe-like context got prefixed with a backslash. The content
|
||||
# itself has no apostrophe, but both strings do — matching via
|
||||
# whitespace/anchor strategies would otherwise succeed.
|
||||
old_string = "x = \"hello there\" # don\\'t edit\n"
|
||||
new_string = "x = \"hi there\" # don\\'t edit\n"
|
||||
# This particular pair won't match anything, so it exits via
|
||||
# no-match path. Build a case where a non-exact strategy DOES match.
|
||||
content = "line\n x = 1\nline"
|
||||
old_string = "line\n x = \\'a\\'\nline"
|
||||
new_string = "line\n x = \\'b\\'\nline"
|
||||
new, count, strategy, err = fuzzy_find_and_replace(content, old_string, new_string)
|
||||
assert count == 0
|
||||
assert err is not None and "Escape-drift" in err
|
||||
assert "backslash" in err.lower()
|
||||
assert new == content # file untouched
|
||||
|
||||
def test_drift_blocked_double_quote(self):
|
||||
"""Same idea but with \\" drift instead of \\'."""
|
||||
content = 'line\n x = 1\nline'
|
||||
old_string = 'line\n x = \\"a\\"\nline'
|
||||
new_string = 'line\n x = \\"b\\"\nline'
|
||||
new, count, strategy, err = fuzzy_find_and_replace(content, old_string, new_string)
|
||||
assert count == 0
|
||||
assert err is not None and "Escape-drift" in err
|
||||
|
||||
def test_drift_allowed_when_file_genuinely_has_backslash_escapes(self):
|
||||
"""If the file already contains \\' (e.g. inside an existing escaped
|
||||
string), the model is legitimately preserving it. Guard must NOT
|
||||
fire."""
|
||||
content = "line\n x = \\'a\\'\nline"
|
||||
old_string = "line\n x = \\'a\\'\nline"
|
||||
new_string = "line\n x = \\'b\\'\nline"
|
||||
new, count, strategy, err = fuzzy_find_and_replace(content, old_string, new_string)
|
||||
assert err is None
|
||||
assert count == 1
|
||||
assert "\\'b\\'" in new
|
||||
|
||||
def test_drift_allowed_on_exact_match(self):
|
||||
"""Exact matches bypass the drift guard entirely — if the file
|
||||
really contains the exact bytes old_string specified, it's not
|
||||
drift."""
|
||||
content = "hello \\'world\\'"
|
||||
new, count, strategy, err = fuzzy_find_and_replace(
|
||||
content, "hello \\'world\\'", "hello \\'there\\'"
|
||||
)
|
||||
assert err is None
|
||||
assert count == 1
|
||||
assert strategy == "exact"
|
||||
|
||||
def test_drift_allowed_when_adding_escaped_strings(self):
|
||||
"""Model is adding new content with \\' that wasn't in the original.
|
||||
old_string has no \\', so guard doesn't fire."""
|
||||
content = "line1\nline2\nline3"
|
||||
old_string = "line1\nline2\nline3"
|
||||
new_string = "line1\nprint(\\'added\\')\nline2\nline3"
|
||||
new, count, strategy, err = fuzzy_find_and_replace(content, old_string, new_string)
|
||||
assert err is None
|
||||
assert count == 1
|
||||
assert "\\'added\\'" in new
|
||||
|
||||
def test_no_drift_check_when_new_string_lacks_suspect_chars(self):
|
||||
"""Fast-path: if new_string has no \\' or \\", guard must not
|
||||
fire even on fuzzy match."""
|
||||
content = "def foo():\n pass" # extra space ignored by line_trimmed
|
||||
old_string = "def foo():\n pass"
|
||||
new_string = "def bar():\n return 1"
|
||||
new, count, strategy, err = fuzzy_find_and_replace(content, old_string, new_string)
|
||||
assert err is None
|
||||
assert count == 1
|
||||
|
||||
|
||||
class TestFindClosestLines:
|
||||
def setup_method(self):
|
||||
from tools.fuzzy_match import find_closest_lines
|
||||
self.find_closest_lines = find_closest_lines
|
||||
|
||||
def test_finds_similar_line(self):
|
||||
content = "def foo():\n pass\ndef bar():\n return 1\n"
|
||||
result = self.find_closest_lines("def baz():", content)
|
||||
assert "def foo" in result or "def bar" in result
|
||||
|
||||
def test_returns_empty_for_no_match(self):
|
||||
content = "completely different content here"
|
||||
result = self.find_closest_lines("xyzzy_no_match_possible_!!!", content)
|
||||
assert result == ""
|
||||
|
||||
def test_returns_empty_for_empty_inputs(self):
|
||||
assert self.find_closest_lines("", "some content") == ""
|
||||
assert self.find_closest_lines("old string", "") == ""
|
||||
|
||||
def test_includes_context_lines(self):
|
||||
content = "line1\nline2\ndef target():\n pass\nline5\n"
|
||||
result = self.find_closest_lines("def target():", content)
|
||||
assert "target" in result
|
||||
|
||||
def test_includes_line_numbers(self):
|
||||
content = "line1\nline2\ndef foo():\n pass\n"
|
||||
result = self.find_closest_lines("def foo():", content)
|
||||
# Should include line numbers in format "N| content"
|
||||
assert "|" in result
|
||||
|
||||
|
||||
class TestFormatNoMatchHint:
|
||||
"""Gating tests for format_no_match_hint — the shared helper that decides
|
||||
whether a 'Did you mean?' snippet should be appended to an error.
|
||||
"""
|
||||
|
||||
def setup_method(self):
|
||||
from tools.fuzzy_match import format_no_match_hint
|
||||
self.fmt = format_no_match_hint
|
||||
|
||||
def test_fires_on_could_not_find_with_match(self):
|
||||
"""Classic no-match: similar content exists → hint fires."""
|
||||
content = "def foo():\n pass\ndef bar():\n pass\n"
|
||||
result = self.fmt(
|
||||
"Could not find a match for old_string in the file",
|
||||
0, "def baz():", content,
|
||||
)
|
||||
assert "Did you mean" in result
|
||||
assert "foo" in result or "bar" in result
|
||||
|
||||
def test_silent_on_ambiguous_match_error(self):
|
||||
"""'Found N matches' is not a missing-match failure — no hint."""
|
||||
content = "aaa bbb aaa\n"
|
||||
result = self.fmt(
|
||||
"Found 2 matches for old_string. Provide more context to make it unique, or use replace_all=True.",
|
||||
0, "aaa", content,
|
||||
)
|
||||
assert result == ""
|
||||
|
||||
def test_silent_on_escape_drift_error(self):
|
||||
"""Escape-drift errors are intentional blocks — hint would mislead."""
|
||||
content = "x = 1\n"
|
||||
result = self.fmt(
|
||||
"Escape-drift detected: old_string and new_string contain the literal sequence '\\\\''...",
|
||||
0, "x = \\'1\\'", content,
|
||||
)
|
||||
assert result == ""
|
||||
|
||||
def test_silent_on_identical_strings(self):
|
||||
"""old_string == new_string — hint irrelevant."""
|
||||
result = self.fmt(
|
||||
"old_string and new_string are identical",
|
||||
0, "foo", "foo bar\n",
|
||||
)
|
||||
assert result == ""
|
||||
|
||||
def test_silent_when_match_count_nonzero(self):
|
||||
"""If match succeeded, we shouldn't be in the error path — defense in depth."""
|
||||
result = self.fmt(
|
||||
"Could not find a match for old_string in the file",
|
||||
1, "foo", "foo bar\n",
|
||||
)
|
||||
assert result == ""
|
||||
|
||||
def test_silent_on_none_error(self):
|
||||
"""No error at all — no hint."""
|
||||
result = self.fmt(None, 0, "foo", "bar\n")
|
||||
assert result == ""
|
||||
|
||||
def test_silent_when_no_similar_content(self):
|
||||
"""Even for a valid no-match error, skip hint when nothing similar exists."""
|
||||
result = self.fmt(
|
||||
"Could not find a match for old_string in the file",
|
||||
0, "totally_unique_xyzzy_qux", "abc\nxyz\n",
|
||||
)
|
||||
assert result == ""
|
||||
|
||||
@@ -1,114 +0,0 @@
|
||||
import json
|
||||
import os
|
||||
import textwrap
|
||||
from pathlib import Path
|
||||
|
||||
import tools.skill_manager_tool as skill_manager_tool
|
||||
from tools.file_tools import patch_tool
|
||||
from tools.skill_manager_tool import _create_skill, _patch_skill
|
||||
|
||||
|
||||
def _disable_patch_tool_guards(monkeypatch):
|
||||
monkeypatch.setattr("tools.file_tools._check_sensitive_path", lambda _path: None)
|
||||
monkeypatch.setattr("tools.file_tools._check_file_staleness", lambda _path, _task_id: None)
|
||||
monkeypatch.setattr("tools.file_tools._log_and_check_conflict", lambda _path, _task_id, _action: None)
|
||||
|
||||
|
||||
def test_patch_tool_replace_no_match_shows_rich_hint_without_legacy_hint(tmp_path, monkeypatch):
|
||||
_disable_patch_tool_guards(monkeypatch)
|
||||
sample = tmp_path / "sample.py"
|
||||
sample.write_text("def foo():\n return 1\n\ndef bar():\n return 2\n", encoding="utf-8")
|
||||
|
||||
raw = patch_tool(
|
||||
mode="replace",
|
||||
path=str(sample),
|
||||
old_string="def barycentric():",
|
||||
new_string="def barycentric_new():",
|
||||
task_id="qa960-replace-rich-hint",
|
||||
)
|
||||
|
||||
result = json.loads(raw)
|
||||
assert result["success"] is False
|
||||
assert "Could not find a match" in result["error"]
|
||||
assert "Did you mean one of these sections?" in result["error"]
|
||||
assert "def bar():" in result["error"] or "def foo():" in result["error"]
|
||||
assert "[Hint:" not in raw
|
||||
|
||||
|
||||
def test_patch_tool_replace_ambiguous_error_does_not_show_did_you_mean(tmp_path, monkeypatch):
|
||||
_disable_patch_tool_guards(monkeypatch)
|
||||
sample = tmp_path / "sample.py"
|
||||
sample.write_text("aaa\nbbb\naaa\n", encoding="utf-8")
|
||||
|
||||
raw = patch_tool(
|
||||
mode="replace",
|
||||
path=str(sample),
|
||||
old_string="aaa",
|
||||
new_string="ccc",
|
||||
task_id="qa960-replace-ambiguous",
|
||||
)
|
||||
|
||||
result = json.loads(raw)
|
||||
assert result["success"] is False
|
||||
assert "Found 2 matches" in result["error"]
|
||||
assert "Did you mean one of these sections?" not in result["error"]
|
||||
assert "[Hint:" not in raw
|
||||
|
||||
|
||||
def test_patch_tool_v4a_no_match_shows_rich_hint(tmp_path, monkeypatch):
|
||||
_disable_patch_tool_guards(monkeypatch)
|
||||
sample = tmp_path / "sample.py"
|
||||
sample.write_text("def foo():\n return 1\n", encoding="utf-8")
|
||||
|
||||
patch = textwrap.dedent(
|
||||
f"""\
|
||||
*** Begin Patch
|
||||
*** Update File: {sample}
|
||||
@@
|
||||
-def barycentric():
|
||||
+def barycentric_new():
|
||||
*** End Patch
|
||||
"""
|
||||
)
|
||||
|
||||
raw = patch_tool(mode="patch", patch=patch, task_id="qa960-v4a-rich-hint")
|
||||
result = json.loads(raw)
|
||||
assert result["success"] is False
|
||||
assert "Patch validation failed" in result["error"]
|
||||
assert "Did you mean one of these sections?" in result["error"]
|
||||
assert "def foo():" in result["error"]
|
||||
|
||||
|
||||
def test_skill_patch_no_match_shows_rich_hint(tmp_path, monkeypatch):
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
skills_dir = tmp_path / "skills"
|
||||
skills_dir.mkdir(parents=True, exist_ok=True)
|
||||
monkeypatch.setattr(skill_manager_tool, "SKILLS_DIR", skills_dir)
|
||||
monkeypatch.setattr(skill_manager_tool, "_security_scan_skill", lambda _skill_dir: None)
|
||||
|
||||
_create_skill(
|
||||
"qa-skill",
|
||||
textwrap.dedent(
|
||||
"""\
|
||||
---
|
||||
name: qa-skill
|
||||
description: test
|
||||
---
|
||||
|
||||
Step 1: Do the thing.
|
||||
Step 2: Verify the thing.
|
||||
"""
|
||||
),
|
||||
)
|
||||
|
||||
result = _patch_skill(
|
||||
"qa-skill",
|
||||
"Step 1: Do the production rollout.",
|
||||
"Step 1: Updated.",
|
||||
)
|
||||
|
||||
assert result["success"] is False
|
||||
assert "Could not find a match" in result["error"]
|
||||
assert "Did you mean one of these sections?" in result["error"]
|
||||
assert "Step 1: Do the thing." in result["error"]
|
||||
assert "file_preview" in result
|
||||
@@ -757,14 +757,12 @@ class ShellFileOperations(FileOperations):
|
||||
content, old_string, new_string, replace_all
|
||||
)
|
||||
|
||||
if error or match_count == 0:
|
||||
err_msg = error or f"Could not find match for old_string in {path}"
|
||||
try:
|
||||
from tools.fuzzy_match import format_no_match_hint
|
||||
err_msg += format_no_match_hint(err_msg, match_count, old_string, content)
|
||||
except Exception:
|
||||
pass
|
||||
return PatchResult(error=err_msg)
|
||||
if error:
|
||||
return PatchResult(error=error)
|
||||
|
||||
if match_count == 0:
|
||||
return PatchResult(error=f"Could not find match for old_string in {path}")
|
||||
|
||||
# Write back
|
||||
write_result = self.write_file(path, new_content)
|
||||
if write_result.error:
|
||||
|
||||
@@ -8,7 +8,6 @@ import os
|
||||
import threading
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional
|
||||
from tools.binary_extensions import has_binary_extension
|
||||
from tools.file_operations import ShellFileOperations
|
||||
from agent.redact import redact_sensitive_text
|
||||
@@ -691,11 +690,8 @@ def patch_tool(mode: str = "replace", path: str = None, old_string: str = None,
|
||||
result_json = json.dumps(result_dict, ensure_ascii=False)
|
||||
# Hint when old_string not found — saves iterations where the agent
|
||||
# retries with stale content instead of re-reading the file.
|
||||
# Suppressed when patch_replace already attached a rich "Did you mean?"
|
||||
# snippet (which is strictly more useful than the generic hint).
|
||||
if result_dict.get("error") and "Could not find" in str(result_dict["error"]):
|
||||
if "Did you mean one of these sections?" not in str(result_dict["error"]):
|
||||
result_json += "\n\n[Hint: old_string not found. Use read_file to verify the current content, or search_files to locate the text.]"
|
||||
result_json += "\n\n[Hint: old_string not found. Use read_file to verify the current content, or search_files to locate the text.]"
|
||||
return result_json
|
||||
except Exception as e:
|
||||
return tool_error(str(e))
|
||||
|
||||
@@ -93,21 +93,6 @@ def fuzzy_find_and_replace(content: str, old_string: str, new_string: str,
|
||||
f"Provide more context to make it unique, or use replace_all=True."
|
||||
)
|
||||
|
||||
# Escape-drift guard: when the matched strategy is NOT `exact`,
|
||||
# we matched via some form of normalization. If new_string
|
||||
# contains shell/JSON-style escape sequences (\\' or \\") that
|
||||
# would be written literally into the file but the matched
|
||||
# region of the file has no such sequences, this is almost
|
||||
# certainly tool-call serialization drift — the model typed
|
||||
# an apostrophe/quote and the transport added a stray
|
||||
# backslash. Writing new_string as-is would corrupt the file.
|
||||
# Block with a helpful error so the model re-reads and retries
|
||||
# instead of the caller silently persisting garbage (or not).
|
||||
if strategy_name != "exact":
|
||||
drift_err = _detect_escape_drift(content, matches, old_string, new_string)
|
||||
if drift_err:
|
||||
return content, 0, None, drift_err
|
||||
|
||||
# Perform replacement
|
||||
new_content = _apply_replacements(content, matches, new_string)
|
||||
return new_content, len(matches), strategy_name, None
|
||||
@@ -116,46 +101,6 @@ def fuzzy_find_and_replace(content: str, old_string: str, new_string: str,
|
||||
return content, 0, None, "Could not find a match for old_string in the file"
|
||||
|
||||
|
||||
def _detect_escape_drift(content: str, matches: List[Tuple[int, int]],
|
||||
old_string: str, new_string: str) -> Optional[str]:
|
||||
"""Detect tool-call escape-drift artifacts in new_string.
|
||||
|
||||
Looks for ``\\'`` or ``\\"`` sequences that are present in both
|
||||
old_string and new_string (i.e. the model copy-pasted them as "context"
|
||||
it intended to preserve) but don't exist in the matched region of the
|
||||
file. That pattern indicates the transport layer inserted spurious
|
||||
shell-style escapes around apostrophes or quotes — writing new_string
|
||||
verbatim would literally insert ``\\'`` into source code.
|
||||
|
||||
Returns an error string if drift is detected, None otherwise.
|
||||
"""
|
||||
# Cheap pre-check: bail out unless new_string actually contains a
|
||||
# suspect escape sequence. This keeps the guard free for all the
|
||||
# common, correct cases.
|
||||
if "\\'" not in new_string and '\\"' not in new_string:
|
||||
return None
|
||||
|
||||
# Aggregate matched regions of the file — that's what new_string will
|
||||
# replace. If the suspect escapes are present there already, the
|
||||
# model is genuinely preserving them (valid for some languages /
|
||||
# escaped strings); accept the patch.
|
||||
matched_regions = "".join(content[start:end] for start, end in matches)
|
||||
|
||||
for suspect in ("\\'", '\\"'):
|
||||
if suspect in new_string and suspect in old_string and suspect not in matched_regions:
|
||||
plain = suspect[1] # "'" or '"'
|
||||
return (
|
||||
f"Escape-drift detected: old_string and new_string contain "
|
||||
f"the literal sequence {suspect!r} but the matched region of "
|
||||
f"the file does not. This is almost always a tool-call "
|
||||
f"serialization artifact where an apostrophe or quote got "
|
||||
f"prefixed with a spurious backslash. Re-read the file with "
|
||||
f"read_file and pass old_string/new_string without "
|
||||
f"backslash-escaping {plain!r} characters."
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
def _apply_replacements(content: str, matches: List[Tuple[int, int]], new_string: str) -> str:
|
||||
"""
|
||||
Apply replacements at the given positions.
|
||||
@@ -619,86 +564,3 @@ def _map_normalized_positions(original: str, normalized: str,
|
||||
original_matches.append((orig_start, min(orig_end, len(original))))
|
||||
|
||||
return original_matches
|
||||
|
||||
|
||||
def find_closest_lines(old_string: str, content: str, context_lines: int = 2, max_results: int = 3) -> str:
|
||||
"""Find lines in content most similar to old_string for "did you mean?" feedback.
|
||||
|
||||
Returns a formatted string showing the closest matching lines with context,
|
||||
or empty string if no useful match is found.
|
||||
"""
|
||||
if not old_string or not content:
|
||||
return ""
|
||||
|
||||
old_lines = old_string.splitlines()
|
||||
content_lines = content.splitlines()
|
||||
|
||||
if not old_lines or not content_lines:
|
||||
return ""
|
||||
|
||||
# Use first line of old_string as anchor for search
|
||||
anchor = old_lines[0].strip()
|
||||
if not anchor:
|
||||
# Try second line if first is blank
|
||||
candidates = [l.strip() for l in old_lines if l.strip()]
|
||||
if not candidates:
|
||||
return ""
|
||||
anchor = candidates[0]
|
||||
|
||||
# Score each line in content by similarity to anchor
|
||||
scored = []
|
||||
for i, line in enumerate(content_lines):
|
||||
stripped = line.strip()
|
||||
if not stripped:
|
||||
continue
|
||||
ratio = SequenceMatcher(None, anchor, stripped).ratio()
|
||||
if ratio > 0.3:
|
||||
scored.append((ratio, i))
|
||||
|
||||
if not scored:
|
||||
return ""
|
||||
|
||||
# Take top matches
|
||||
scored.sort(key=lambda x: -x[0])
|
||||
top = scored[:max_results]
|
||||
|
||||
parts = []
|
||||
seen_ranges = set()
|
||||
for _, line_idx in top:
|
||||
start = max(0, line_idx - context_lines)
|
||||
end = min(len(content_lines), line_idx + len(old_lines) + context_lines)
|
||||
key = (start, end)
|
||||
if key in seen_ranges:
|
||||
continue
|
||||
seen_ranges.add(key)
|
||||
snippet = "\n".join(
|
||||
f"{start + j + 1:4d}| {content_lines[start + j]}"
|
||||
for j in range(end - start)
|
||||
)
|
||||
parts.append(snippet)
|
||||
|
||||
if not parts:
|
||||
return ""
|
||||
|
||||
return "\n---\n".join(parts)
|
||||
|
||||
|
||||
def format_no_match_hint(error: Optional[str], match_count: int,
|
||||
old_string: str, content: str) -> str:
|
||||
"""Return a '\\n\\nDid you mean...' snippet for plain no-match errors.
|
||||
|
||||
Gated so the hint only fires for actual "old_string not found" failures.
|
||||
Ambiguous-match ("Found N matches"), escape-drift, and identical-strings
|
||||
errors all have ``match_count == 0`` but a "did you mean?" snippet would
|
||||
be misleading — those failed for unrelated reasons.
|
||||
|
||||
Returns an empty string when there's nothing useful to append.
|
||||
"""
|
||||
if match_count != 0:
|
||||
return ""
|
||||
if not error or not error.startswith("Could not find"):
|
||||
return ""
|
||||
hint = find_closest_lines(old_string, content)
|
||||
if not hint:
|
||||
return ""
|
||||
return "\n\nDid you mean one of these sections?\n" + hint
|
||||
|
||||
@@ -290,16 +290,10 @@ def _validate_operations(
|
||||
)
|
||||
if count == 0:
|
||||
label = f"'{hunk.context_hint}'" if hunk.context_hint else "(no hint)"
|
||||
msg = (
|
||||
errors.append(
|
||||
f"{op.file_path}: hunk {label} not found"
|
||||
+ (f" — {match_error}" if match_error else "")
|
||||
)
|
||||
try:
|
||||
from tools.fuzzy_match import format_no_match_hint
|
||||
msg += format_no_match_hint(match_error, count, search_pattern, simulated)
|
||||
except Exception:
|
||||
pass
|
||||
errors.append(msg)
|
||||
else:
|
||||
# Advance simulation so subsequent hunks validate correctly.
|
||||
# Reuse the result from the call above — no second fuzzy run.
|
||||
@@ -543,13 +537,7 @@ def _apply_update(op: PatchOperation, file_ops: Any) -> Tuple[bool, str]:
|
||||
error = None
|
||||
|
||||
if error:
|
||||
err_msg = f"Could not apply hunk: {error}"
|
||||
try:
|
||||
from tools.fuzzy_match import format_no_match_hint
|
||||
err_msg += format_no_match_hint(error, 0, search_pattern, new_content)
|
||||
except Exception:
|
||||
pass
|
||||
return False, err_msg
|
||||
return False, f"Could not apply hunk: {error}"
|
||||
else:
|
||||
# Addition-only hunk (no context or removed lines).
|
||||
# Insert at the location indicated by the context hint, or at end of file.
|
||||
|
||||
@@ -575,15 +575,9 @@ def _patch_skill(
|
||||
if match_error:
|
||||
# Show a short preview of the file so the model can self-correct
|
||||
preview = content[:500] + ("..." if len(content) > 500 else "")
|
||||
err_msg = match_error
|
||||
try:
|
||||
from tools.fuzzy_match import format_no_match_hint
|
||||
err_msg += format_no_match_hint(match_error, match_count, old_string, content)
|
||||
except Exception:
|
||||
pass
|
||||
return {
|
||||
"success": False,
|
||||
"error": err_msg,
|
||||
"error": match_error,
|
||||
"file_preview": preview,
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user