Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a1b744c327 |
152
run_agent.py
152
run_agent.py
@@ -20,6 +20,7 @@ Usage:
|
||||
response = agent.run_conversation("Tell me about the latest Python updates")
|
||||
"""
|
||||
|
||||
import ast
|
||||
import asyncio
|
||||
import base64
|
||||
import concurrent.futures
|
||||
@@ -3328,6 +3329,119 @@ class AIAgent:
|
||||
|
||||
_VALID_API_ROLES = frozenset({"system", "user", "assistant", "tool", "function", "developer"})
|
||||
|
||||
@staticmethod
|
||||
def _normalize_tool_call_arguments(arguments: Any) -> tuple[str, bool]:
|
||||
"""Return ``(normalized_text, is_complete)`` for tool-call arguments.
|
||||
|
||||
Conservative by design: repairs harmless formatting quirks common in
|
||||
Gemma 4 / Ollama output (whitespace, trailing commas, Python-style
|
||||
single-quoted dicts, bare key/value pairs) but does NOT auto-close
|
||||
truncated JSON objects. Truly incomplete fragments must remain marked
|
||||
incomplete so the agent can retry instead of silently dropping fields.
|
||||
"""
|
||||
if isinstance(arguments, (dict, list)):
|
||||
return json.dumps(arguments, ensure_ascii=False, separators=(",", ":")), True
|
||||
if arguments is None:
|
||||
return "{}", True
|
||||
if not isinstance(arguments, str):
|
||||
arguments = str(arguments)
|
||||
|
||||
text = arguments.strip()
|
||||
if not text:
|
||||
return "{}", True
|
||||
|
||||
def _parse_candidate(candidate: str):
|
||||
try:
|
||||
return json.loads(candidate)
|
||||
except (json.JSONDecodeError, TypeError, ValueError):
|
||||
pass
|
||||
try:
|
||||
return ast.literal_eval(candidate)
|
||||
except (SyntaxError, ValueError):
|
||||
return None
|
||||
|
||||
candidates: list[str] = [text]
|
||||
|
||||
trimmed_trailing_commas = re.sub(r",\s*([}\]])", r"\1", text)
|
||||
if trimmed_trailing_commas != text:
|
||||
candidates.append(trimmed_trailing_commas)
|
||||
|
||||
if ":" in text and not text.startswith(("{", "[")):
|
||||
wrapped = "{" + text + "}"
|
||||
candidates.append(wrapped)
|
||||
quoted_keys = re.sub(
|
||||
r'([\{,]\s*)([A-Za-z_][A-Za-z0-9_\-]*)(\s*:)',
|
||||
r'\1"\2"\3',
|
||||
wrapped,
|
||||
)
|
||||
if quoted_keys != wrapped:
|
||||
candidates.append(quoted_keys)
|
||||
trimmed_quoted_keys = re.sub(r",\s*([}\]])", r"\1", quoted_keys)
|
||||
if trimmed_quoted_keys != quoted_keys:
|
||||
candidates.append(trimmed_quoted_keys)
|
||||
|
||||
seen: set[str] = set()
|
||||
for candidate in candidates:
|
||||
if candidate in seen:
|
||||
continue
|
||||
seen.add(candidate)
|
||||
parsed = _parse_candidate(candidate)
|
||||
if isinstance(parsed, (dict, list)):
|
||||
return json.dumps(parsed, ensure_ascii=False, separators=(",", ":")), True
|
||||
|
||||
return text, False
|
||||
|
||||
@staticmethod
|
||||
def _merge_consecutive_assistant_tool_call_messages(messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||
"""Merge adjacent assistant messages that each carry tool_calls.
|
||||
|
||||
Some providers emit parallel tool calls as multiple consecutive assistant
|
||||
messages instead of a single assistant message with multiple tool calls.
|
||||
Merge only adjacent assistant/tool-call messages; any non-assistant
|
||||
boundary flushes the current batch.
|
||||
"""
|
||||
merged: List[Dict[str, Any]] = []
|
||||
pending: Optional[Dict[str, Any]] = None
|
||||
|
||||
def _flush_pending() -> None:
|
||||
nonlocal pending
|
||||
if pending is not None:
|
||||
merged.append(pending)
|
||||
pending = None
|
||||
|
||||
for msg in messages:
|
||||
if not isinstance(msg, dict):
|
||||
_flush_pending()
|
||||
merged.append(msg)
|
||||
continue
|
||||
|
||||
role = msg.get("role")
|
||||
tool_calls = msg.get("tool_calls")
|
||||
if role == "assistant" and isinstance(tool_calls, list) and tool_calls:
|
||||
if pending is None:
|
||||
pending = copy.deepcopy(msg)
|
||||
continue
|
||||
|
||||
pending_tool_calls = pending.get("tool_calls")
|
||||
if not isinstance(pending_tool_calls, list):
|
||||
pending_tool_calls = []
|
||||
pending["tool_calls"] = pending_tool_calls
|
||||
pending_tool_calls.extend(copy.deepcopy(tool_calls))
|
||||
|
||||
pending_content = pending.get("content") or ""
|
||||
current_content = msg.get("content") or ""
|
||||
if pending_content and current_content:
|
||||
pending["content"] = pending_content + "\n" + current_content
|
||||
elif current_content:
|
||||
pending["content"] = current_content
|
||||
continue
|
||||
|
||||
_flush_pending()
|
||||
merged.append(msg)
|
||||
|
||||
_flush_pending()
|
||||
return merged
|
||||
|
||||
@staticmethod
|
||||
def _sanitize_api_messages(messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||
"""Fix orphaned tool_call / tool_result pairs before every LLM call.
|
||||
@@ -3347,7 +3461,7 @@ class AIAgent:
|
||||
)
|
||||
continue
|
||||
filtered.append(msg)
|
||||
messages = filtered
|
||||
messages = AIAgent._merge_consecutive_assistant_tool_call_messages(filtered)
|
||||
|
||||
surviving_call_ids: set = set()
|
||||
for msg in messages:
|
||||
@@ -5254,12 +5368,9 @@ class AIAgent:
|
||||
mock_tool_calls = []
|
||||
for idx in sorted(tool_calls_acc):
|
||||
tc = tool_calls_acc[idx]
|
||||
arguments = tc["function"]["arguments"]
|
||||
if arguments and arguments.strip():
|
||||
try:
|
||||
json.loads(arguments)
|
||||
except json.JSONDecodeError:
|
||||
has_truncated_tool_args = True
|
||||
arguments, is_complete = self._normalize_tool_call_arguments(tc["function"]["arguments"])
|
||||
if not is_complete:
|
||||
has_truncated_tool_args = True
|
||||
mock_tool_calls.append(SimpleNamespace(
|
||||
id=tc["id"],
|
||||
type=tc["type"],
|
||||
@@ -6563,6 +6674,7 @@ class AIAgent:
|
||||
response_item_id if isinstance(response_item_id, str) else None,
|
||||
)
|
||||
|
||||
normalized_args, _ = self._normalize_tool_call_arguments(tool_call.function.arguments)
|
||||
tc_dict = {
|
||||
"id": call_id,
|
||||
"call_id": call_id,
|
||||
@@ -6570,7 +6682,7 @@ class AIAgent:
|
||||
"type": tool_call.type,
|
||||
"function": {
|
||||
"name": tool_call.function.name,
|
||||
"arguments": tool_call.function.arguments
|
||||
"arguments": normalized_args,
|
||||
},
|
||||
}
|
||||
# Preserve extra_content (e.g. Gemini thought_signature) so it
|
||||
@@ -10031,21 +10143,15 @@ class AIAgent:
|
||||
# Handle empty strings as empty objects (common model quirk)
|
||||
invalid_json_args = []
|
||||
for tc in assistant_message.tool_calls:
|
||||
args = tc.function.arguments
|
||||
if isinstance(args, (dict, list)):
|
||||
tc.function.arguments = json.dumps(args)
|
||||
continue
|
||||
if args is not None and not isinstance(args, str):
|
||||
tc.function.arguments = str(args)
|
||||
args = tc.function.arguments
|
||||
# Treat empty/whitespace strings as empty object
|
||||
if not args or not args.strip():
|
||||
tc.function.arguments = "{}"
|
||||
continue
|
||||
try:
|
||||
json.loads(args)
|
||||
except json.JSONDecodeError as e:
|
||||
invalid_json_args.append((tc.function.name, str(e)))
|
||||
normalized_args, is_complete = self._normalize_tool_call_arguments(tc.function.arguments)
|
||||
tc.function.arguments = normalized_args
|
||||
if not is_complete:
|
||||
try:
|
||||
json.loads(normalized_args)
|
||||
except json.JSONDecodeError as e:
|
||||
invalid_json_args.append((tc.function.name, str(e)))
|
||||
except Exception as e:
|
||||
invalid_json_args.append((tc.function.name, str(e)))
|
||||
|
||||
if invalid_json_args:
|
||||
# Check if the invalid JSON is due to truncation rather
|
||||
|
||||
0
skills/creative/excalidraw/scripts/upload.py
Executable file → Normal file
0
skills/creative/excalidraw/scripts/upload.py
Executable file → Normal file
0
skills/leisure/find-nearby/scripts/find_nearby.py
Executable file → Normal file
0
skills/leisure/find-nearby/scripts/find_nearby.py
Executable file → Normal file
0
skills/media/youtube-content/scripts/fetch_transcript.py
Executable file → Normal file
0
skills/media/youtube-content/scripts/fetch_transcript.py
Executable file → Normal file
0
skills/productivity/google-workspace/scripts/google_api.py
Executable file → Normal file
0
skills/productivity/google-workspace/scripts/google_api.py
Executable file → Normal file
0
skills/productivity/google-workspace/scripts/setup.py
Executable file → Normal file
0
skills/productivity/google-workspace/scripts/setup.py
Executable file → Normal file
0
skills/productivity/ocr-and-documents/scripts/extract_marker.py
Executable file → Normal file
0
skills/productivity/ocr-and-documents/scripts/extract_marker.py
Executable file → Normal file
0
skills/productivity/ocr-and-documents/scripts/extract_pymupdf.py
Executable file → Normal file
0
skills/productivity/ocr-and-documents/scripts/extract_pymupdf.py
Executable file → Normal file
0
skills/red-teaming/godmode/scripts/auto_jailbreak.py
Executable file → Normal file
0
skills/red-teaming/godmode/scripts/auto_jailbreak.py
Executable file → Normal file
0
skills/red-teaming/godmode/scripts/godmode_race.py
Executable file → Normal file
0
skills/red-teaming/godmode/scripts/godmode_race.py
Executable file → Normal file
0
skills/red-teaming/godmode/scripts/parseltongue.py
Executable file → Normal file
0
skills/red-teaming/godmode/scripts/parseltongue.py
Executable file → Normal file
0
skills/research/arxiv/scripts/search_arxiv.py
Executable file → Normal file
0
skills/research/arxiv/scripts/search_arxiv.py
Executable file → Normal file
0
skills/research/polymarket/scripts/polymarket.py
Executable file → Normal file
0
skills/research/polymarket/scripts/polymarket.py
Executable file → Normal file
@@ -1037,6 +1037,138 @@ class TestBuildAssistantMessage:
|
||||
result = agent._build_assistant_message(msg, "tool_calls")
|
||||
assert "extra_content" not in result["tool_calls"][0]
|
||||
|
||||
def test_tool_call_arguments_normalized_from_gemma4_whitespace(self, agent):
|
||||
tc = _mock_tool_call(
|
||||
name="read_file",
|
||||
arguments=' \n {"path": "README.md"} \n ',
|
||||
call_id="c4",
|
||||
)
|
||||
msg = _mock_assistant_msg(content="", tool_calls=[tc])
|
||||
result = agent._build_assistant_message(msg, "tool_calls")
|
||||
assert result["tool_calls"][0]["function"]["arguments"] == '{"path":"README.md"}'
|
||||
|
||||
def test_tool_call_arguments_normalized_from_single_quotes_and_trailing_comma(self, agent):
|
||||
tc = _mock_tool_call(
|
||||
name="read_file",
|
||||
arguments="{'path': 'README.md',}",
|
||||
call_id="c5",
|
||||
)
|
||||
msg = _mock_assistant_msg(content="", tool_calls=[tc])
|
||||
result = agent._build_assistant_message(msg, "tool_calls")
|
||||
assert result["tool_calls"][0]["function"]["arguments"] == '{"path":"README.md"}'
|
||||
|
||||
|
||||
class TestNormalizeToolCallArguments:
|
||||
@pytest.mark.parametrize(
|
||||
("raw_args", "expected"),
|
||||
[
|
||||
('{"q":"test"}', '{"q":"test"}'),
|
||||
(' \n {"q": "test"} \n ', '{"q":"test"}'),
|
||||
('{"q": "test",}', '{"q":"test"}'),
|
||||
("{'q': 'test'}", '{"q":"test"}'),
|
||||
("{'path': 'README.md', 'mode': 'read'}", '{"path":"README.md","mode":"read"}'),
|
||||
('"path": "README.md"', '{"path":"README.md"}'),
|
||||
('path: "README.md"', '{"path":"README.md"}'),
|
||||
('path: "README.md", mode: "read"', '{"path":"README.md","mode":"read"}'),
|
||||
({"path": "README.md"}, '{"path":"README.md"}'),
|
||||
(["README.md", "docs.md"], '["README.md","docs.md"]'),
|
||||
('\t\n ', '{}'),
|
||||
('{"nested": {"path": "README.md"}}', '{"nested":{"path":"README.md"}}'),
|
||||
],
|
||||
)
|
||||
def test_complete_args_are_normalized(self, raw_args, expected):
|
||||
normalized, is_complete = AIAgent._normalize_tool_call_arguments(raw_args)
|
||||
assert is_complete is True
|
||||
assert normalized == expected
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"raw_args",
|
||||
[
|
||||
'{"path": "README.md"',
|
||||
'{"a": 1, "b"',
|
||||
'{"path": [1, 2}',
|
||||
"{'path': 'README.md'",
|
||||
'path: "README.md", mode:',
|
||||
'{"command": "echo hello",',
|
||||
],
|
||||
)
|
||||
def test_incomplete_args_are_not_marked_complete(self, raw_args):
|
||||
normalized, is_complete = AIAgent._normalize_tool_call_arguments(raw_args)
|
||||
assert is_complete is False
|
||||
assert isinstance(normalized, str)
|
||||
assert normalized == raw_args.strip()
|
||||
|
||||
|
||||
class TestSanitizeApiMessages:
|
||||
def test_merges_consecutive_assistant_tool_call_messages(self):
|
||||
messages = [
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "first",
|
||||
"tool_calls": [{"id": "c1", "type": "function", "function": {"name": "read_file", "arguments": '{"path":"a.py"}'}}],
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "second",
|
||||
"tool_calls": [{"id": "c2", "type": "function", "function": {"name": "search_files", "arguments": '{"pattern":"TODO"}'}}],
|
||||
},
|
||||
{"role": "tool", "tool_call_id": "c1", "content": "a.py"},
|
||||
{"role": "tool", "tool_call_id": "c2", "content": "matches"},
|
||||
]
|
||||
|
||||
sanitized = AIAgent._sanitize_api_messages(messages)
|
||||
|
||||
assert len(sanitized) == 3
|
||||
assert sanitized[0]["role"] == "assistant"
|
||||
assert [tc["id"] for tc in sanitized[0]["tool_calls"]] == ["c1", "c2"]
|
||||
assert sanitized[0]["content"] == "first\nsecond"
|
||||
|
||||
def test_does_not_merge_assistant_tool_call_messages_across_non_assistant_boundary(self):
|
||||
messages = [
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "",
|
||||
"tool_calls": [{"id": "c1", "type": "function", "function": {"name": "read_file", "arguments": '{"path":"a.py"}'}}],
|
||||
},
|
||||
{"role": "tool", "tool_call_id": "c1", "content": "a.py"},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "",
|
||||
"tool_calls": [{"id": "c2", "type": "function", "function": {"name": "read_file", "arguments": '{"path":"b.py"}'}}],
|
||||
},
|
||||
{"role": "tool", "tool_call_id": "c2", "content": "b.py"},
|
||||
]
|
||||
|
||||
sanitized = AIAgent._sanitize_api_messages(messages)
|
||||
|
||||
assistant_msgs = [m for m in sanitized if m.get("role") == "assistant"]
|
||||
assert len(assistant_msgs) == 2
|
||||
assert assistant_msgs[0]["tool_calls"][0]["id"] == "c1"
|
||||
assert assistant_msgs[1]["tool_calls"][0]["id"] == "c2"
|
||||
|
||||
def test_merge_preserves_tool_call_order(self):
|
||||
messages = [
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "",
|
||||
"tool_calls": [{"id": "c1", "type": "function", "function": {"name": "read_file", "arguments": '{"path":"a.py"}'}}],
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "",
|
||||
"tool_calls": [{"id": "c2", "type": "function", "function": {"name": "read_file", "arguments": '{"path":"b.py"}'}}],
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "",
|
||||
"tool_calls": [{"id": "c3", "type": "function", "function": {"name": "read_file", "arguments": '{"path":"c.py"}'}}],
|
||||
},
|
||||
]
|
||||
|
||||
sanitized = AIAgent._sanitize_api_messages(messages)
|
||||
|
||||
assert [tc["id"] for tc in sanitized[0]["tool_calls"]] == ["c1", "c2", "c3"]
|
||||
|
||||
|
||||
class TestFormatToolsForSystemMessage:
|
||||
def test_no_tools_returns_empty_array(self, agent):
|
||||
@@ -3467,6 +3599,59 @@ class TestStreamingApiCall:
|
||||
assert tc[0].function.arguments == '{"path":"x.txt","content":"hel'
|
||||
assert resp.choices[0].finish_reason == "length"
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("raw_arguments", "expected"),
|
||||
[
|
||||
(' \n {"path": "x.txt"} \n ', '{"path":"x.txt"}'),
|
||||
("{'path': 'x.txt',}", '{"path":"x.txt"}'),
|
||||
('path: "x.txt", mode: "read"', '{"path":"x.txt","mode":"read"}'),
|
||||
],
|
||||
)
|
||||
def test_repairable_tool_call_args_do_not_upgrade_finish_reason_to_length(self, agent, raw_arguments, expected):
|
||||
chunks = [
|
||||
_make_chunk(tool_calls=[_make_tc_delta(0, "call_1", "read_file", raw_arguments)]),
|
||||
_make_chunk(finish_reason="tool_calls"),
|
||||
]
|
||||
agent.client.chat.completions.create.return_value = iter(chunks)
|
||||
|
||||
resp = agent._interruptible_streaming_api_call({"messages": []})
|
||||
|
||||
tc = resp.choices[0].message.tool_calls
|
||||
assert len(tc) == 1
|
||||
assert tc[0].function.name == "read_file"
|
||||
assert tc[0].function.arguments == expected
|
||||
assert resp.choices[0].finish_reason == "tool_calls"
|
||||
|
||||
def test_streamed_tool_call_args_single_quotes_across_chunks_normalized(self, agent):
|
||||
chunks = [
|
||||
_make_chunk(tool_calls=[_make_tc_delta(0, "call_1", "read_file", "{'path':")]),
|
||||
_make_chunk(tool_calls=[_make_tc_delta(0, None, None, " 'x.txt',}")]),
|
||||
_make_chunk(finish_reason="tool_calls"),
|
||||
]
|
||||
agent.client.chat.completions.create.return_value = iter(chunks)
|
||||
|
||||
resp = agent._interruptible_streaming_api_call({"messages": []})
|
||||
|
||||
tc = resp.choices[0].message.tool_calls
|
||||
assert len(tc) == 1
|
||||
assert tc[0].function.arguments == '{"path":"x.txt"}'
|
||||
assert resp.choices[0].finish_reason == "tool_calls"
|
||||
|
||||
def test_streamed_split_json_chunks_still_reassemble(self, agent):
|
||||
chunks = [
|
||||
_make_chunk(tool_calls=[_make_tc_delta(0, "call_1", "read_file", '{"path":')]),
|
||||
_make_chunk(tool_calls=[_make_tc_delta(0, None, None, ' "x.txt"}')]),
|
||||
_make_chunk(finish_reason="tool_calls"),
|
||||
]
|
||||
agent.client.chat.completions.create.return_value = iter(chunks)
|
||||
|
||||
resp = agent._interruptible_streaming_api_call({"messages": []})
|
||||
|
||||
tc = resp.choices[0].message.tool_calls
|
||||
assert len(tc) == 1
|
||||
assert tc[0].function.arguments == '{"path":"x.txt"}'
|
||||
assert resp.choices[0].finish_reason == "tool_calls"
|
||||
|
||||
def test_ollama_reused_index_separate_tool_calls(self, agent):
|
||||
"""Ollama sends every tool call at index 0 with different ids.
|
||||
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
"""Regression tests for bundled skill scripts and local shell execution.
|
||||
|
||||
Issue #953 verifies that bundled skill scripts run out of the box from the
|
||||
installed ~/.hermes/skills tree without manual chmod or PATH surgery.
|
||||
"""
|
||||
|
||||
import shlex
|
||||
import shutil
|
||||
import stat
|
||||
from pathlib import Path
|
||||
|
||||
from tools.environments.local import LocalEnvironment
|
||||
|
||||
|
||||
REPO_ROOT = Path(__file__).resolve().parents[2]
|
||||
SKILLS_ROOT = REPO_ROOT / "skills"
|
||||
|
||||
|
||||
def _bundled_shebang_scripts() -> list[Path]:
|
||||
scripts: list[Path] = []
|
||||
for path in SKILLS_ROOT.rglob("*"):
|
||||
if not path.is_file() or path.is_symlink() or "scripts" not in path.parts:
|
||||
continue
|
||||
first_line = path.read_bytes().splitlines()[:1]
|
||||
if first_line and first_line[0].startswith(b"#!"):
|
||||
scripts.append(path)
|
||||
return sorted(scripts)
|
||||
|
||||
|
||||
def test_bundled_skill_shebang_scripts_are_executable():
|
||||
missing = []
|
||||
for path in _bundled_shebang_scripts():
|
||||
mode = stat.S_IMODE(path.stat().st_mode)
|
||||
if mode & 0o111 == 0:
|
||||
missing.append(f"{path.relative_to(REPO_ROOT)} ({oct(mode)})")
|
||||
|
||||
assert not missing, (
|
||||
"Bundled shebang scripts must ship executable so synced skill copies run "
|
||||
"without manual chmod:\n" + "\n".join(missing)
|
||||
)
|
||||
|
||||
|
||||
def test_local_environment_executes_installed_skill_script_without_manual_prep(tmp_path):
|
||||
hermes_home = tmp_path / ".hermes"
|
||||
installed_skill = hermes_home / "skills" / "research" / "arxiv"
|
||||
installed_skill.parent.mkdir(parents=True, exist_ok=True)
|
||||
shutil.copytree(SKILLS_ROOT / "research" / "arxiv", installed_skill)
|
||||
|
||||
script_path = installed_skill / "scripts" / "search_arxiv.py"
|
||||
env = LocalEnvironment(
|
||||
cwd=str(tmp_path),
|
||||
timeout=15,
|
||||
env={
|
||||
"HERMES_HOME": str(hermes_home),
|
||||
"PATH": "/custom/bin",
|
||||
},
|
||||
)
|
||||
|
||||
result = env.execute(f"{shlex.quote(str(script_path))} --help")
|
||||
|
||||
assert result["returncode"] == 0, result["output"]
|
||||
assert "Search arXiv and display results in a clean format." in result["output"]
|
||||
assert "python search_arxiv.py" in result["output"]
|
||||
Reference in New Issue
Block a user