fix(anthropic): final polish — tool ID sanitization, crash guards, temp=1

Remaining issues from deep scan:

Adapter (agent/anthropic_adapter.py):
- Add _sanitize_tool_id() — Anthropic requires IDs matching [a-zA-Z0-9_-],
  now strips invalid chars and ensures non-empty (both tool_use and tool_result)
- Empty tool result content → '(no output)' placeholder (Anthropic rejects empty)
- Set temperature=1 when thinking type='enabled' on older models (required)
- normalize_model_name now case-insensitive for 'Anthropic/' prefix
- Fix stale docstrings referencing only ~/.claude/.credentials.json

Agent loop (run_agent.py):
- Guard memory flush path (line ~2684) — was calling self.client.chat.completions
  which is None in anthropic_messages mode. Now routes through Anthropic client.
- Guard summary generation path (line ~3171) — same crash when reaching
  iteration limit. Now builds proper Anthropic kwargs and normalizes response.
- Guard retry summary path (line ~3200) — same fix for the summary retry loop.

All three self.client.chat.completions.create() calls outside the main
loop now have anthropic_messages branches to prevent NoneType crashes.
This commit is contained in:
teknium1
2026-03-12 17:23:09 -07:00
parent 4068f20ce9
commit aaaba78126
2 changed files with 57 additions and 13 deletions

View File

@@ -7,7 +7,7 @@ adapter — all provider-specific logic is isolated here.
Auth supports:
- Regular API keys (sk-ant-api*) → x-api-key header
- OAuth setup-tokens (sk-ant-oat*) → Bearer auth + beta header
- Claude Code credentials (~/.claude/.credentials.json) → Bearer auth
- Claude Code credentials (~/.claude.json or ~/.claude/.credentials.json) → Bearer auth
"""
import json
@@ -150,7 +150,7 @@ def resolve_anthropic_token() -> Optional[str]:
Priority:
1. ANTHROPIC_API_KEY env var (regular API key)
2. ANTHROPIC_TOKEN env var (OAuth/setup token)
3. Claude Code credentials (~/.claude/.credentials.json)
3. Claude Code credentials (~/.claude.json or ~/.claude/.credentials.json)
Returns the token string or None.
"""
@@ -172,7 +172,7 @@ def resolve_anthropic_token() -> Optional[str]:
# 3. Claude Code credential file
creds = read_claude_code_credentials()
if creds and is_claude_code_token_valid(creds):
logger.debug("Using Claude Code credentials from ~/.claude/.credentials.json")
logger.debug("Using Claude Code credentials (auto-detected)")
return creds["accessToken"]
elif creds:
logger.debug("Claude Code credentials expired — run 'claude' to refresh")
@@ -188,13 +188,27 @@ def resolve_anthropic_token() -> Optional[str]:
def normalize_model_name(model: str) -> str:
"""Normalize a model name for the Anthropic API.
- Strips 'anthropic/' prefix (OpenRouter format)
- Strips 'anthropic/' prefix (OpenRouter format, case-insensitive)
"""
if model.startswith("anthropic/"):
lower = model.lower()
if lower.startswith("anthropic/"):
model = model[len("anthropic/"):]
return model
def _sanitize_tool_id(tool_id: str) -> str:
"""Sanitize a tool call ID for the Anthropic API.
Anthropic requires IDs matching [a-zA-Z0-9_-]. Replace invalid
characters with underscores and ensure non-empty.
"""
import re
if not tool_id:
return "tool_0"
sanitized = re.sub(r"[^a-zA-Z0-9_-]", "_", tool_id)
return sanitized or "tool_0"
def convert_tools_to_anthropic(tools: List[Dict]) -> List[Dict]:
"""Convert OpenAI tool definitions to Anthropic format."""
if not tools:
@@ -256,7 +270,7 @@ def convert_messages_to_anthropic(
parsed_args = {}
blocks.append({
"type": "tool_use",
"id": tc.get("id", ""),
"id": _sanitize_tool_id(tc.get("id", "")),
"name": fn.get("name", ""),
"input": parsed_args,
})
@@ -268,10 +282,14 @@ def convert_messages_to_anthropic(
continue
if role == "tool":
# Sanitize tool_use_id and ensure non-empty content
result_content = content if isinstance(content, str) else json.dumps(content)
if not result_content:
result_content = "(no output)"
tool_result = {
"type": "tool_result",
"tool_use_id": m.get("tool_call_id", ""),
"content": content if isinstance(content, str) else json.dumps(content),
"tool_use_id": _sanitize_tool_id(m.get("tool_call_id", "")),
"content": result_content,
}
# Merge consecutive tool results into one user message
if (
@@ -391,6 +409,8 @@ def build_anthropic_kwargs(
kwargs["thinking"] = {"type": "adaptive", "budget_tokens": budget}
else:
kwargs["thinking"] = {"type": "enabled", "budget_tokens": budget}
# Anthropic requires temperature=1 when thinking is enabled on older models
kwargs["temperature"] = 1
kwargs["max_tokens"] = max(effective_max_tokens, budget + 4096)
return kwargs

View File

@@ -2673,6 +2673,15 @@ class AIAgent:
if "max_output_tokens" in codex_kwargs:
codex_kwargs["max_output_tokens"] = 5120
response = self._run_codex_stream(codex_kwargs)
elif not _aux_available and self.api_mode == "anthropic_messages":
# Native Anthropic — use the Anthropic client directly
from agent.anthropic_adapter import build_anthropic_kwargs as _build_ant_kwargs
ant_kwargs = _build_ant_kwargs(
model=self.model, messages=api_messages,
tools=[memory_tool_def], max_tokens=5120,
reasoning_config=None,
)
response = self._anthropic_client.messages.create(**ant_kwargs)
elif not _aux_available:
api_kwargs = {
"model": self.model,
@@ -3158,12 +3167,20 @@ class AIAgent:
if summary_extra_body:
summary_kwargs["extra_body"] = summary_extra_body
summary_response = self.client.chat.completions.create(**summary_kwargs)
if summary_response.choices and summary_response.choices[0].message.content:
final_response = summary_response.choices[0].message.content
if self.api_mode == "anthropic_messages":
from agent.anthropic_adapter import build_anthropic_kwargs as _bak, normalize_anthropic_response as _nar
_ant_kw = _bak(model=self.model, messages=api_messages, tools=None,
max_tokens=self.max_tokens, reasoning_config=self.reasoning_config)
summary_response = self._anthropic_client.messages.create(**_ant_kw)
_msg, _ = _nar(summary_response)
final_response = (_msg.content or "").strip()
else:
final_response = ""
summary_response = self.client.chat.completions.create(**summary_kwargs)
if summary_response.choices and summary_response.choices[0].message.content:
final_response = summary_response.choices[0].message.content
else:
final_response = ""
if final_response:
if "<think>" in final_response:
@@ -3180,6 +3197,13 @@ class AIAgent:
retry_response = self._run_codex_stream(codex_kwargs)
retry_msg, _ = self._normalize_codex_response(retry_response)
final_response = (retry_msg.content or "").strip() if retry_msg else ""
elif self.api_mode == "anthropic_messages":
from agent.anthropic_adapter import build_anthropic_kwargs as _bak2, normalize_anthropic_response as _nar2
_ant_kw2 = _bak2(model=self.model, messages=api_messages, tools=None,
max_tokens=self.max_tokens, reasoning_config=self.reasoning_config)
retry_response = self._anthropic_client.messages.create(**_ant_kw2)
_retry_msg, _ = _nar2(retry_response)
final_response = (_retry_msg.content or "").strip()
else:
summary_kwargs = {
"model": self.model,