More major refactor/tech debt removal!
This commit is contained in:
2510
model_tools.py
2510
model_tools.py
File diff suppressed because it is too large
Load Diff
@@ -1640,3 +1640,93 @@ if __name__ == "__main__":
|
||||
print(" from tools.browser_tool import browser_navigate, browser_snapshot")
|
||||
print(" result = browser_navigate('https://example.com', task_id='my_task')")
|
||||
print(" snapshot = browser_snapshot(task_id='my_task')")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Registry
|
||||
# ---------------------------------------------------------------------------
|
||||
from tools.registry import registry
|
||||
|
||||
_BROWSER_SCHEMA_MAP = {s["name"]: s for s in BROWSER_TOOL_SCHEMAS}
|
||||
|
||||
registry.register(
|
||||
name="browser_navigate",
|
||||
toolset="browser",
|
||||
schema=_BROWSER_SCHEMA_MAP["browser_navigate"],
|
||||
handler=lambda args, **kw: browser_navigate(url=args.get("url", ""), task_id=kw.get("task_id")),
|
||||
check_fn=check_browser_requirements,
|
||||
requires_env=["BROWSERBASE_API_KEY", "BROWSERBASE_PROJECT_ID"],
|
||||
)
|
||||
registry.register(
|
||||
name="browser_snapshot",
|
||||
toolset="browser",
|
||||
schema=_BROWSER_SCHEMA_MAP["browser_snapshot"],
|
||||
handler=lambda args, **kw: browser_snapshot(
|
||||
full=args.get("full", False), task_id=kw.get("task_id"), user_task=kw.get("user_task")),
|
||||
check_fn=check_browser_requirements,
|
||||
requires_env=["BROWSERBASE_API_KEY", "BROWSERBASE_PROJECT_ID"],
|
||||
)
|
||||
registry.register(
|
||||
name="browser_click",
|
||||
toolset="browser",
|
||||
schema=_BROWSER_SCHEMA_MAP["browser_click"],
|
||||
handler=lambda args, **kw: browser_click(**args, task_id=kw.get("task_id")),
|
||||
check_fn=check_browser_requirements,
|
||||
requires_env=["BROWSERBASE_API_KEY", "BROWSERBASE_PROJECT_ID"],
|
||||
)
|
||||
registry.register(
|
||||
name="browser_type",
|
||||
toolset="browser",
|
||||
schema=_BROWSER_SCHEMA_MAP["browser_type"],
|
||||
handler=lambda args, **kw: browser_type(**args, task_id=kw.get("task_id")),
|
||||
check_fn=check_browser_requirements,
|
||||
requires_env=["BROWSERBASE_API_KEY", "BROWSERBASE_PROJECT_ID"],
|
||||
)
|
||||
registry.register(
|
||||
name="browser_scroll",
|
||||
toolset="browser",
|
||||
schema=_BROWSER_SCHEMA_MAP["browser_scroll"],
|
||||
handler=lambda args, **kw: browser_scroll(**args, task_id=kw.get("task_id")),
|
||||
check_fn=check_browser_requirements,
|
||||
requires_env=["BROWSERBASE_API_KEY", "BROWSERBASE_PROJECT_ID"],
|
||||
)
|
||||
registry.register(
|
||||
name="browser_back",
|
||||
toolset="browser",
|
||||
schema=_BROWSER_SCHEMA_MAP["browser_back"],
|
||||
handler=lambda args, **kw: browser_back(task_id=kw.get("task_id")),
|
||||
check_fn=check_browser_requirements,
|
||||
requires_env=["BROWSERBASE_API_KEY", "BROWSERBASE_PROJECT_ID"],
|
||||
)
|
||||
registry.register(
|
||||
name="browser_press",
|
||||
toolset="browser",
|
||||
schema=_BROWSER_SCHEMA_MAP["browser_press"],
|
||||
handler=lambda args, **kw: browser_press(key=args.get("key", ""), task_id=kw.get("task_id")),
|
||||
check_fn=check_browser_requirements,
|
||||
requires_env=["BROWSERBASE_API_KEY", "BROWSERBASE_PROJECT_ID"],
|
||||
)
|
||||
registry.register(
|
||||
name="browser_close",
|
||||
toolset="browser",
|
||||
schema=_BROWSER_SCHEMA_MAP["browser_close"],
|
||||
handler=lambda args, **kw: browser_close(task_id=kw.get("task_id")),
|
||||
check_fn=check_browser_requirements,
|
||||
requires_env=["BROWSERBASE_API_KEY", "BROWSERBASE_PROJECT_ID"],
|
||||
)
|
||||
registry.register(
|
||||
name="browser_get_images",
|
||||
toolset="browser",
|
||||
schema=_BROWSER_SCHEMA_MAP["browser_get_images"],
|
||||
handler=lambda args, **kw: browser_get_images(task_id=kw.get("task_id")),
|
||||
check_fn=check_browser_requirements,
|
||||
requires_env=["BROWSERBASE_API_KEY", "BROWSERBASE_PROJECT_ID"],
|
||||
)
|
||||
registry.register(
|
||||
name="browser_vision",
|
||||
toolset="browser",
|
||||
schema=_BROWSER_SCHEMA_MAP["browser_vision"],
|
||||
handler=lambda args, **kw: browser_vision(question=args.get("question", ""), task_id=kw.get("task_id")),
|
||||
check_fn=check_browser_requirements,
|
||||
requires_env=["BROWSERBASE_API_KEY", "BROWSERBASE_PROJECT_ID"],
|
||||
)
|
||||
|
||||
@@ -123,3 +123,18 @@ CLARIFY_SCHEMA = {
|
||||
"required": ["question"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
# --- Registry ---
|
||||
from tools.registry import registry
|
||||
|
||||
registry.register(
|
||||
name="clarify",
|
||||
toolset="clarify",
|
||||
schema=CLARIFY_SCHEMA,
|
||||
handler=lambda args, **kw: clarify_tool(
|
||||
question=args.get("question", ""),
|
||||
choices=args.get("choices"),
|
||||
callback=kw.get("callback")),
|
||||
check_fn=check_clarify_requirements,
|
||||
)
|
||||
|
||||
@@ -581,3 +581,18 @@ EXECUTE_CODE_SCHEMA = {
|
||||
"required": ["code"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
# --- Registry ---
|
||||
from tools.registry import registry
|
||||
|
||||
registry.register(
|
||||
name="execute_code",
|
||||
toolset="code_execution",
|
||||
schema=EXECUTE_CODE_SCHEMA,
|
||||
handler=lambda args, **kw: execute_code(
|
||||
code=args.get("code", ""),
|
||||
task_id=kw.get("task_id"),
|
||||
enabled_tools=kw.get("enabled_tools")),
|
||||
check_fn=check_sandbox_requirements,
|
||||
)
|
||||
|
||||
@@ -377,3 +377,39 @@ if __name__ == "__main__":
|
||||
print("\nTesting list_cronjobs:")
|
||||
result = list_cronjobs()
|
||||
print(result)
|
||||
|
||||
|
||||
# --- Registry ---
|
||||
from tools.registry import registry
|
||||
|
||||
registry.register(
|
||||
name="schedule_cronjob",
|
||||
toolset="cronjob",
|
||||
schema=SCHEDULE_CRONJOB_SCHEMA,
|
||||
handler=lambda args, **kw: schedule_cronjob(
|
||||
prompt=args.get("prompt", ""),
|
||||
schedule=args.get("schedule", ""),
|
||||
name=args.get("name"),
|
||||
repeat=args.get("repeat"),
|
||||
deliver=args.get("deliver"),
|
||||
task_id=kw.get("task_id")),
|
||||
check_fn=check_cronjob_requirements,
|
||||
)
|
||||
registry.register(
|
||||
name="list_cronjobs",
|
||||
toolset="cronjob",
|
||||
schema=LIST_CRONJOBS_SCHEMA,
|
||||
handler=lambda args, **kw: list_cronjobs(
|
||||
include_disabled=args.get("include_disabled", False),
|
||||
task_id=kw.get("task_id")),
|
||||
check_fn=check_cronjob_requirements,
|
||||
)
|
||||
registry.register(
|
||||
name="remove_cronjob",
|
||||
toolset="cronjob",
|
||||
schema=REMOVE_CRONJOB_SCHEMA,
|
||||
handler=lambda args, **kw: remove_cronjob(
|
||||
job_id=args.get("job_id", ""),
|
||||
task_id=kw.get("task_id")),
|
||||
check_fn=check_cronjob_requirements,
|
||||
)
|
||||
|
||||
@@ -421,3 +421,22 @@ DELEGATE_TASK_SCHEMA = {
|
||||
"required": [],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
# --- Registry ---
|
||||
from tools.registry import registry
|
||||
|
||||
registry.register(
|
||||
name="delegate_task",
|
||||
toolset="delegation",
|
||||
schema=DELEGATE_TASK_SCHEMA,
|
||||
handler=lambda args, **kw: delegate_task(
|
||||
goal=args.get("goal"),
|
||||
context=args.get("context"),
|
||||
toolsets=args.get("toolsets"),
|
||||
tasks=args.get("tasks"),
|
||||
model=args.get("model"),
|
||||
max_iterations=args.get("max_iterations"),
|
||||
parent_agent=kw.get("parent_agent")),
|
||||
check_fn=check_delegate_requirements,
|
||||
)
|
||||
|
||||
@@ -184,3 +184,113 @@ FILE_TOOLS = [
|
||||
def get_file_tools():
|
||||
"""Get the list of file tool definitions."""
|
||||
return FILE_TOOLS
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Schemas + Registry
|
||||
# ---------------------------------------------------------------------------
|
||||
from tools.registry import registry
|
||||
|
||||
|
||||
def _check_file_reqs():
|
||||
"""Lazy wrapper to avoid circular import with tools/__init__.py."""
|
||||
from tools import check_file_requirements
|
||||
return check_file_requirements()
|
||||
|
||||
READ_FILE_SCHEMA = {
|
||||
"name": "read_file",
|
||||
"description": "Read a file with line numbers and pagination. Output format: 'LINE_NUM|CONTENT'. Suggests similar filenames if not found. Images (png/jpg/gif/webp) returned as base64. Use offset and limit for large files.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"path": {"type": "string", "description": "Path to the file to read (absolute, relative, or ~/path)"},
|
||||
"offset": {"type": "integer", "description": "Line number to start reading from (1-indexed, default: 1)", "default": 1, "minimum": 1},
|
||||
"limit": {"type": "integer", "description": "Maximum number of lines to read (default: 500, max: 2000)", "default": 500, "maximum": 2000}
|
||||
},
|
||||
"required": ["path"]
|
||||
}
|
||||
}
|
||||
|
||||
WRITE_FILE_SCHEMA = {
|
||||
"name": "write_file",
|
||||
"description": "Write content to a file, completely replacing existing content. Creates parent directories automatically. OVERWRITES the entire file — use 'patch' for targeted edits.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"path": {"type": "string", "description": "Path to the file to write (will be created if it doesn't exist, overwritten if it does)"},
|
||||
"content": {"type": "string", "description": "Complete content to write to the file"}
|
||||
},
|
||||
"required": ["path", "content"]
|
||||
}
|
||||
}
|
||||
|
||||
PATCH_SCHEMA = {
|
||||
"name": "patch",
|
||||
"description": "Targeted find-and-replace edits in files. Uses fuzzy matching (9 strategies) so minor whitespace/indentation differences won't break it. Returns a unified diff. Auto-runs syntax checks after editing.\n\nReplace mode (default): find a unique string and replace it.\nPatch mode: apply V4A multi-file patches for bulk changes.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"mode": {"type": "string", "enum": ["replace", "patch"], "description": "Edit mode: 'replace' for targeted find-and-replace, 'patch' for V4A multi-file patches", "default": "replace"},
|
||||
"path": {"type": "string", "description": "File path to edit (required for 'replace' mode)"},
|
||||
"old_string": {"type": "string", "description": "Text to find in the file (required for 'replace' mode). Must be unique in the file unless replace_all=true. Include enough surrounding context to ensure uniqueness."},
|
||||
"new_string": {"type": "string", "description": "Replacement text (required for 'replace' mode). Can be empty string to delete the matched text."},
|
||||
"replace_all": {"type": "boolean", "description": "Replace all occurrences instead of requiring a unique match (default: false)", "default": False},
|
||||
"patch": {"type": "string", "description": "V4A format patch content (required for 'patch' mode). Format:\n*** Begin Patch\n*** Update File: path/to/file\n@@ context hint @@\n context line\n-removed line\n+added line\n*** End Patch"}
|
||||
},
|
||||
"required": ["mode"]
|
||||
}
|
||||
}
|
||||
|
||||
SEARCH_FILES_SCHEMA = {
|
||||
"name": "search_files",
|
||||
"description": "Search file contents or find files by name. Ripgrep-backed, faster than grep/rg/find in the terminal.\n\nContent search (target='content'): Regex search inside files. Output modes: full matches with line numbers, file paths only, or match counts.\n\nFile search (target='files'): Find files by glob pattern (e.g., '*.py', '*config*'). Results sorted by modification time.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"pattern": {"type": "string", "description": "Regex pattern for content search, or glob pattern (e.g., '*.py') for file search"},
|
||||
"target": {"type": "string", "enum": ["content", "files"], "description": "'content' searches inside file contents, 'files' searches for files by name", "default": "content"},
|
||||
"path": {"type": "string", "description": "Directory or file to search in (default: current working directory)", "default": "."},
|
||||
"file_glob": {"type": "string", "description": "Filter files by pattern in grep mode (e.g., '*.py' to only search Python files)"},
|
||||
"limit": {"type": "integer", "description": "Maximum number of results to return (default: 50)", "default": 50},
|
||||
"offset": {"type": "integer", "description": "Skip first N results for pagination (default: 0)", "default": 0},
|
||||
"output_mode": {"type": "string", "enum": ["content", "files_only", "count"], "description": "Output format for grep mode: 'content' shows matching lines with line numbers, 'files_only' lists file paths, 'count' shows match counts per file", "default": "content"},
|
||||
"context": {"type": "integer", "description": "Number of context lines before and after each match (grep mode only)", "default": 0}
|
||||
},
|
||||
"required": ["pattern"]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def _handle_read_file(args, **kw):
|
||||
tid = kw.get("task_id") or "default"
|
||||
return read_file_tool(path=args.get("path", ""), offset=args.get("offset", 1), limit=args.get("limit", 500), task_id=tid)
|
||||
|
||||
|
||||
def _handle_write_file(args, **kw):
|
||||
tid = kw.get("task_id") or "default"
|
||||
return write_file_tool(path=args.get("path", ""), content=args.get("content", ""), task_id=tid)
|
||||
|
||||
|
||||
def _handle_patch(args, **kw):
|
||||
tid = kw.get("task_id") or "default"
|
||||
return patch_tool(
|
||||
mode=args.get("mode", "replace"), path=args.get("path"),
|
||||
old_string=args.get("old_string"), new_string=args.get("new_string"),
|
||||
replace_all=args.get("replace_all", False), patch=args.get("patch"), task_id=tid)
|
||||
|
||||
|
||||
def _handle_search_files(args, **kw):
|
||||
tid = kw.get("task_id") or "default"
|
||||
target_map = {"grep": "content", "find": "files"}
|
||||
raw_target = args.get("target", "content")
|
||||
target = target_map.get(raw_target, raw_target)
|
||||
return search_tool(
|
||||
pattern=args.get("pattern", ""), target=target, path=args.get("path", "."),
|
||||
file_glob=args.get("file_glob"), limit=args.get("limit", 50), offset=args.get("offset", 0),
|
||||
output_mode=args.get("output_mode", "content"), context=args.get("context", 0), task_id=tid)
|
||||
|
||||
|
||||
registry.register(name="read_file", toolset="file", schema=READ_FILE_SCHEMA, handler=_handle_read_file, check_fn=_check_file_reqs)
|
||||
registry.register(name="write_file", toolset="file", schema=WRITE_FILE_SCHEMA, handler=_handle_write_file, check_fn=_check_file_reqs)
|
||||
registry.register(name="patch", toolset="file", schema=PATCH_SCHEMA, handler=_handle_patch, check_fn=_check_file_reqs)
|
||||
registry.register(name="search_files", toolset="file", schema=SEARCH_FILES_SCHEMA, handler=_handle_search_files, check_fn=_check_file_reqs)
|
||||
|
||||
@@ -501,3 +501,56 @@ if __name__ == "__main__":
|
||||
print(" export IMAGE_TOOLS_DEBUG=true")
|
||||
print(" # Debug logs capture all image generation calls and results")
|
||||
print(" # Logs saved to: ./logs/image_tools_debug_UUID.json")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Registry
|
||||
# ---------------------------------------------------------------------------
|
||||
from tools.registry import registry
|
||||
|
||||
IMAGE_GENERATE_SCHEMA = {
|
||||
"name": "image_generate",
|
||||
"description": "Generate high-quality images from text prompts using FLUX 2 Pro model with automatic 2x upscaling. Creates detailed, artistic images that are automatically upscaled for hi-rez results. Returns a single upscaled image URL. Display it using markdown: ",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"prompt": {
|
||||
"type": "string",
|
||||
"description": "The text prompt describing the desired image. Be detailed and descriptive."
|
||||
},
|
||||
"aspect_ratio": {
|
||||
"type": "string",
|
||||
"enum": ["landscape", "square", "portrait"],
|
||||
"description": "The aspect ratio of the generated image. 'landscape' is 16:9 wide, 'portrait' is 16:9 tall, 'square' is 1:1.",
|
||||
"default": "landscape"
|
||||
}
|
||||
},
|
||||
"required": ["prompt"]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def _handle_image_generate(args, **kw):
|
||||
prompt = args.get("prompt", "")
|
||||
if not prompt:
|
||||
return json.dumps({"error": "prompt is required for image generation"})
|
||||
return image_generate_tool(
|
||||
prompt=prompt,
|
||||
aspect_ratio=args.get("aspect_ratio", "landscape"),
|
||||
num_inference_steps=50,
|
||||
guidance_scale=4.5,
|
||||
num_images=1,
|
||||
output_format="png",
|
||||
seed=None,
|
||||
)
|
||||
|
||||
|
||||
registry.register(
|
||||
name="image_generate",
|
||||
toolset="image_gen",
|
||||
schema=IMAGE_GENERATE_SCHEMA,
|
||||
handler=_handle_image_generate,
|
||||
check_fn=check_image_generation_requirements,
|
||||
requires_env=["FAL_KEY"],
|
||||
is_async=True,
|
||||
)
|
||||
|
||||
@@ -410,8 +410,21 @@ MEMORY_SCHEMA = {
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
# --- Registry ---
|
||||
from tools.registry import registry
|
||||
|
||||
registry.register(
|
||||
name="memory",
|
||||
toolset="memory",
|
||||
schema=MEMORY_SCHEMA,
|
||||
handler=lambda args, **kw: memory_tool(
|
||||
action=args.get("action", ""),
|
||||
target=args.get("target", "memory"),
|
||||
content=args.get("content"),
|
||||
old_text=args.get("old_text"),
|
||||
store=kw.get("store")),
|
||||
check_fn=check_memory_requirements,
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -511,3 +511,34 @@ if __name__ == "__main__":
|
||||
print(" export MOA_TOOLS_DEBUG=true")
|
||||
print(" # Debug logs capture all MoA processing steps and metrics")
|
||||
print(" # Logs saved to: ./logs/moa_tools_debug_UUID.json")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Registry
|
||||
# ---------------------------------------------------------------------------
|
||||
from tools.registry import registry
|
||||
|
||||
MOA_SCHEMA = {
|
||||
"name": "mixture_of_agents",
|
||||
"description": "Route a hard problem through multiple frontier LLMs collaboratively. Makes 5 API calls (4 reference models + 1 aggregator) with maximum reasoning effort — use sparingly for genuinely difficult problems. Best for: complex math, advanced algorithms, multi-step analytical reasoning, problems benefiting from diverse perspectives.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"user_prompt": {
|
||||
"type": "string",
|
||||
"description": "The complex query or problem to solve using multiple AI models. Should be a challenging problem that benefits from diverse perspectives and collaborative reasoning."
|
||||
}
|
||||
},
|
||||
"required": ["user_prompt"]
|
||||
}
|
||||
}
|
||||
|
||||
registry.register(
|
||||
name="mixture_of_agents",
|
||||
toolset="moa",
|
||||
schema=MOA_SCHEMA,
|
||||
handler=lambda args, **kw: mixture_of_agents_tool(user_prompt=args.get("user_prompt", "")),
|
||||
check_fn=check_moa_requirements,
|
||||
requires_env=["OPENROUTER_API_KEY"],
|
||||
is_async=True,
|
||||
)
|
||||
|
||||
@@ -727,3 +727,88 @@ class ProcessRegistry:
|
||||
|
||||
# Module-level singleton
|
||||
process_registry = ProcessRegistry()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Registry -- the "process" tool schema + handler
|
||||
# ---------------------------------------------------------------------------
|
||||
from tools.registry import registry
|
||||
|
||||
PROCESS_SCHEMA = {
|
||||
"name": "process",
|
||||
"description": (
|
||||
"Manage background processes started with terminal(background=true). "
|
||||
"Actions: 'list' (show all), 'poll' (check status + new output), "
|
||||
"'log' (full output with pagination), 'wait' (block until done or timeout), "
|
||||
"'kill' (terminate), 'write' (send raw stdin data without newline), "
|
||||
"'submit' (send data + Enter, for answering prompts)."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"action": {
|
||||
"type": "string",
|
||||
"enum": ["list", "poll", "log", "wait", "kill", "write", "submit"],
|
||||
"description": "Action to perform on background processes"
|
||||
},
|
||||
"session_id": {
|
||||
"type": "string",
|
||||
"description": "Process session ID (from terminal background output). Required for all actions except 'list'."
|
||||
},
|
||||
"data": {
|
||||
"type": "string",
|
||||
"description": "Text to send to process stdin (for 'write' and 'submit' actions)"
|
||||
},
|
||||
"timeout": {
|
||||
"type": "integer",
|
||||
"description": "Max seconds to block for 'wait' action. Returns partial output on timeout.",
|
||||
"minimum": 1
|
||||
},
|
||||
"offset": {
|
||||
"type": "integer",
|
||||
"description": "Line offset for 'log' action (default: last 200 lines)"
|
||||
},
|
||||
"limit": {
|
||||
"type": "integer",
|
||||
"description": "Max lines to return for 'log' action",
|
||||
"minimum": 1
|
||||
}
|
||||
},
|
||||
"required": ["action"]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def _handle_process(args, **kw):
|
||||
import json as _json
|
||||
task_id = kw.get("task_id")
|
||||
action = args.get("action", "")
|
||||
session_id = args.get("session_id", "")
|
||||
|
||||
if action == "list":
|
||||
return _json.dumps({"processes": process_registry.list_sessions(task_id=task_id)}, ensure_ascii=False)
|
||||
elif action in ("poll", "log", "wait", "kill", "write", "submit"):
|
||||
if not session_id:
|
||||
return _json.dumps({"error": f"session_id is required for {action}"}, ensure_ascii=False)
|
||||
if action == "poll":
|
||||
return _json.dumps(process_registry.poll(session_id), ensure_ascii=False)
|
||||
elif action == "log":
|
||||
return _json.dumps(process_registry.read_log(
|
||||
session_id, offset=args.get("offset", 0), limit=args.get("limit", 200)), ensure_ascii=False)
|
||||
elif action == "wait":
|
||||
return _json.dumps(process_registry.wait(session_id, timeout=args.get("timeout")), ensure_ascii=False)
|
||||
elif action == "kill":
|
||||
return _json.dumps(process_registry.kill_process(session_id), ensure_ascii=False)
|
||||
elif action == "write":
|
||||
return _json.dumps(process_registry.write_stdin(session_id, args.get("data", "")), ensure_ascii=False)
|
||||
elif action == "submit":
|
||||
return _json.dumps(process_registry.submit_stdin(session_id, args.get("data", "")), ensure_ascii=False)
|
||||
return _json.dumps({"error": f"Unknown process action: {action}. Use: list, poll, log, wait, kill, write, submit"}, ensure_ascii=False)
|
||||
|
||||
|
||||
registry.register(
|
||||
name="process",
|
||||
toolset="terminal",
|
||||
schema=PROCESS_SCHEMA,
|
||||
handler=_handle_process,
|
||||
)
|
||||
|
||||
219
tools/registry.py
Normal file
219
tools/registry.py
Normal file
@@ -0,0 +1,219 @@
|
||||
"""Central registry for all hermes-agent tools.
|
||||
|
||||
Each tool file calls ``registry.register()`` at module level to declare its
|
||||
schema, handler, toolset membership, and availability check. ``model_tools.py``
|
||||
queries the registry instead of maintaining its own parallel data structures.
|
||||
|
||||
Import chain (circular-import safe):
|
||||
tools/registry.py (no imports from model_tools or tool files)
|
||||
^
|
||||
tools/*.py (import from tools.registry at module level)
|
||||
^
|
||||
model_tools.py (imports tools.registry + all tool modules)
|
||||
^
|
||||
run_agent.py, cli.py, batch_runner.py, etc.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Any, Callable, Dict, List, Optional, Set
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ToolEntry:
|
||||
"""Metadata for a single registered tool."""
|
||||
|
||||
__slots__ = (
|
||||
"name", "toolset", "schema", "handler", "check_fn",
|
||||
"requires_env", "is_async", "description",
|
||||
)
|
||||
|
||||
def __init__(self, name, toolset, schema, handler, check_fn,
|
||||
requires_env, is_async, description):
|
||||
self.name = name
|
||||
self.toolset = toolset
|
||||
self.schema = schema
|
||||
self.handler = handler
|
||||
self.check_fn = check_fn
|
||||
self.requires_env = requires_env
|
||||
self.is_async = is_async
|
||||
self.description = description
|
||||
|
||||
|
||||
class ToolRegistry:
|
||||
"""Singleton registry that collects tool schemas + handlers from tool files."""
|
||||
|
||||
def __init__(self):
|
||||
self._tools: Dict[str, ToolEntry] = {}
|
||||
self._toolset_checks: Dict[str, Callable] = {}
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Registration
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def register(
|
||||
self,
|
||||
name: str,
|
||||
toolset: str,
|
||||
schema: dict,
|
||||
handler: Callable,
|
||||
check_fn: Callable = None,
|
||||
requires_env: list = None,
|
||||
is_async: bool = False,
|
||||
description: str = "",
|
||||
):
|
||||
"""Register a tool. Called at module-import time by each tool file."""
|
||||
self._tools[name] = ToolEntry(
|
||||
name=name,
|
||||
toolset=toolset,
|
||||
schema=schema,
|
||||
handler=handler,
|
||||
check_fn=check_fn,
|
||||
requires_env=requires_env or [],
|
||||
is_async=is_async,
|
||||
description=description or schema.get("description", ""),
|
||||
)
|
||||
if check_fn and toolset not in self._toolset_checks:
|
||||
self._toolset_checks[toolset] = check_fn
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Schema retrieval
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def get_definitions(self, tool_names: Set[str], quiet: bool = False) -> List[dict]:
|
||||
"""Return OpenAI-format tool schemas for the requested tool names.
|
||||
|
||||
Only tools whose ``check_fn()`` returns True (or have no check_fn)
|
||||
are included.
|
||||
"""
|
||||
result = []
|
||||
for name in sorted(tool_names):
|
||||
entry = self._tools.get(name)
|
||||
if not entry:
|
||||
continue
|
||||
if entry.check_fn:
|
||||
try:
|
||||
if not entry.check_fn():
|
||||
if not quiet:
|
||||
logger.debug("Tool %s unavailable (check failed)", name)
|
||||
continue
|
||||
except Exception:
|
||||
if not quiet:
|
||||
logger.debug("Tool %s check raised; skipping", name)
|
||||
continue
|
||||
result.append({"type": "function", "function": entry.schema})
|
||||
return result
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Dispatch
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def dispatch(self, name: str, args: dict, **kwargs) -> str:
|
||||
"""Execute a tool handler by name.
|
||||
|
||||
* Async handlers are bridged automatically via ``_run_async()``.
|
||||
* All exceptions are caught and returned as ``{"error": "..."}``
|
||||
for consistent error format.
|
||||
"""
|
||||
entry = self._tools.get(name)
|
||||
if not entry:
|
||||
return json.dumps({"error": f"Unknown tool: {name}"})
|
||||
try:
|
||||
if entry.is_async:
|
||||
from model_tools import _run_async
|
||||
return _run_async(entry.handler(args, **kwargs))
|
||||
return entry.handler(args, **kwargs)
|
||||
except Exception as e:
|
||||
logger.error("Tool %s dispatch error: %s", name, e)
|
||||
return json.dumps({"error": f"Tool execution failed: {type(e).__name__}: {e}"})
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Query helpers (replace redundant dicts in model_tools.py)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def get_all_tool_names(self) -> List[str]:
|
||||
"""Return sorted list of all registered tool names."""
|
||||
return sorted(self._tools.keys())
|
||||
|
||||
def get_toolset_for_tool(self, name: str) -> Optional[str]:
|
||||
"""Return the toolset a tool belongs to, or None."""
|
||||
entry = self._tools.get(name)
|
||||
return entry.toolset if entry else None
|
||||
|
||||
def get_tool_to_toolset_map(self) -> Dict[str, str]:
|
||||
"""Return ``{tool_name: toolset_name}`` for every registered tool."""
|
||||
return {name: e.toolset for name, e in self._tools.items()}
|
||||
|
||||
def is_toolset_available(self, toolset: str) -> bool:
|
||||
"""Check if a toolset's requirements are met."""
|
||||
check = self._toolset_checks.get(toolset)
|
||||
return check() if check else True
|
||||
|
||||
def check_toolset_requirements(self) -> Dict[str, bool]:
|
||||
"""Return ``{toolset: available_bool}`` for every toolset."""
|
||||
toolsets = set(e.toolset for e in self._tools.values())
|
||||
return {ts: self.is_toolset_available(ts) for ts in sorted(toolsets)}
|
||||
|
||||
def get_available_toolsets(self) -> Dict[str, dict]:
|
||||
"""Return toolset metadata for UI display."""
|
||||
toolsets: Dict[str, dict] = {}
|
||||
for entry in self._tools.values():
|
||||
ts = entry.toolset
|
||||
if ts not in toolsets:
|
||||
toolsets[ts] = {
|
||||
"available": self.is_toolset_available(ts),
|
||||
"tools": [],
|
||||
"description": "",
|
||||
"requirements": [],
|
||||
}
|
||||
toolsets[ts]["tools"].append(entry.name)
|
||||
if entry.requires_env:
|
||||
for env in entry.requires_env:
|
||||
if env not in toolsets[ts]["requirements"]:
|
||||
toolsets[ts]["requirements"].append(env)
|
||||
return toolsets
|
||||
|
||||
def get_toolset_requirements(self) -> Dict[str, dict]:
|
||||
"""Build a TOOLSET_REQUIREMENTS-compatible dict for backward compat."""
|
||||
result: Dict[str, dict] = {}
|
||||
for entry in self._tools.values():
|
||||
ts = entry.toolset
|
||||
if ts not in result:
|
||||
result[ts] = {
|
||||
"name": ts,
|
||||
"env_vars": [],
|
||||
"check_fn": self._toolset_checks.get(ts),
|
||||
"setup_url": None,
|
||||
"tools": [],
|
||||
}
|
||||
if entry.name not in result[ts]["tools"]:
|
||||
result[ts]["tools"].append(entry.name)
|
||||
for env in entry.requires_env:
|
||||
if env not in result[ts]["env_vars"]:
|
||||
result[ts]["env_vars"].append(env)
|
||||
return result
|
||||
|
||||
def check_tool_availability(self, quiet: bool = False):
|
||||
"""Return (available_toolsets, unavailable_info) like the old function."""
|
||||
available = []
|
||||
unavailable = []
|
||||
seen = set()
|
||||
for entry in self._tools.values():
|
||||
ts = entry.toolset
|
||||
if ts in seen:
|
||||
continue
|
||||
seen.add(ts)
|
||||
if self.is_toolset_available(ts):
|
||||
available.append(ts)
|
||||
else:
|
||||
unavailable.append({
|
||||
"name": ts,
|
||||
"env_vars": entry.requires_env,
|
||||
"tools": [e.name for e in self._tools.values() if e.toolset == ts],
|
||||
})
|
||||
return available, unavailable
|
||||
|
||||
|
||||
# Module-level singleton
|
||||
registry = ToolRegistry()
|
||||
@@ -1337,3 +1337,44 @@ def get_missing_keys() -> List[str]:
|
||||
if not os.getenv("WANDB_API_KEY"):
|
||||
missing.append("WANDB_API_KEY")
|
||||
return missing
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Schemas + Registry
|
||||
# ---------------------------------------------------------------------------
|
||||
from tools.registry import registry
|
||||
|
||||
RL_LIST_ENVIRONMENTS_SCHEMA = {"name": "rl_list_environments", "description": "List all available RL environments. Returns environment names, paths, and descriptions. TIP: Read the file_path with file tools to understand how each environment works (verifiers, data loading, rewards).", "parameters": {"type": "object", "properties": {}, "required": []}}
|
||||
RL_SELECT_ENVIRONMENT_SCHEMA = {"name": "rl_select_environment", "description": "Select an RL environment for training. Loads the environment's default configuration. After selecting, use rl_get_current_config() to see settings and rl_edit_config() to modify them.", "parameters": {"type": "object", "properties": {"name": {"type": "string", "description": "Name of the environment to select (from rl_list_environments)"}}, "required": ["name"]}}
|
||||
RL_GET_CURRENT_CONFIG_SCHEMA = {"name": "rl_get_current_config", "description": "Get the current environment configuration. Returns only fields that can be modified: group_size, max_token_length, total_steps, steps_per_eval, use_wandb, wandb_name, max_num_workers.", "parameters": {"type": "object", "properties": {}, "required": []}}
|
||||
RL_EDIT_CONFIG_SCHEMA = {"name": "rl_edit_config", "description": "Update a configuration field. Use rl_get_current_config() first to see all available fields for the selected environment. Each environment has different configurable options. Infrastructure settings (tokenizer, URLs, lora_rank, learning_rate) are locked.", "parameters": {"type": "object", "properties": {"field": {"type": "string", "description": "Name of the field to update (get available fields from rl_get_current_config)"}, "value": {"description": "New value for the field"}}, "required": ["field", "value"]}}
|
||||
RL_START_TRAINING_SCHEMA = {"name": "rl_start_training", "description": "Start a new RL training run with the current environment and config. Most training parameters (lora_rank, learning_rate, etc.) are fixed. Use rl_edit_config() to set group_size, batch_size, wandb_project before starting. WARNING: Training takes hours.", "parameters": {"type": "object", "properties": {}, "required": []}}
|
||||
RL_CHECK_STATUS_SCHEMA = {"name": "rl_check_status", "description": "Get status and metrics for a training run. RATE LIMITED: enforces 30-minute minimum between checks for the same run. Returns WandB metrics: step, state, reward_mean, loss, percent_correct.", "parameters": {"type": "object", "properties": {"run_id": {"type": "string", "description": "The run ID from rl_start_training()"}}, "required": ["run_id"]}}
|
||||
RL_STOP_TRAINING_SCHEMA = {"name": "rl_stop_training", "description": "Stop a running training job. Use if metrics look bad, training is stagnant, or you want to try different settings.", "parameters": {"type": "object", "properties": {"run_id": {"type": "string", "description": "The run ID to stop"}}, "required": ["run_id"]}}
|
||||
RL_GET_RESULTS_SCHEMA = {"name": "rl_get_results", "description": "Get final results and metrics for a completed training run. Returns final metrics and path to trained weights.", "parameters": {"type": "object", "properties": {"run_id": {"type": "string", "description": "The run ID to get results for"}}, "required": ["run_id"]}}
|
||||
RL_LIST_RUNS_SCHEMA = {"name": "rl_list_runs", "description": "List all training runs (active and completed) with their status.", "parameters": {"type": "object", "properties": {}, "required": []}}
|
||||
RL_TEST_INFERENCE_SCHEMA = {"name": "rl_test_inference", "description": "Quick inference test for any environment. Runs a few steps of inference + scoring using OpenRouter. Default: 3 steps x 16 completions = 48 rollouts per model, testing 3 models = 144 total. Tests environment loading, prompt construction, inference parsing, and verifier logic. Use BEFORE training to catch issues.", "parameters": {"type": "object", "properties": {"num_steps": {"type": "integer", "description": "Number of steps to run (default: 3, recommended max for testing)", "default": 3}, "group_size": {"type": "integer", "description": "Completions per step (default: 16, like training)", "default": 16}, "models": {"type": "array", "items": {"type": "string"}, "description": "Optional list of OpenRouter model IDs. Default: qwen/qwen3-8b, z-ai/glm-4.7-flash, minimax/minimax-m2.1"}}, "required": []}}
|
||||
|
||||
_rl_env = ["TINKER_API_KEY", "WANDB_API_KEY"]
|
||||
|
||||
registry.register(name="rl_list_environments", toolset="rl", schema=RL_LIST_ENVIRONMENTS_SCHEMA,
|
||||
handler=lambda args, **kw: rl_list_environments(), check_fn=check_rl_api_keys, requires_env=_rl_env, is_async=True)
|
||||
registry.register(name="rl_select_environment", toolset="rl", schema=RL_SELECT_ENVIRONMENT_SCHEMA,
|
||||
handler=lambda args, **kw: rl_select_environment(name=args.get("name", "")), check_fn=check_rl_api_keys, requires_env=_rl_env, is_async=True)
|
||||
registry.register(name="rl_get_current_config", toolset="rl", schema=RL_GET_CURRENT_CONFIG_SCHEMA,
|
||||
handler=lambda args, **kw: rl_get_current_config(), check_fn=check_rl_api_keys, requires_env=_rl_env, is_async=True)
|
||||
registry.register(name="rl_edit_config", toolset="rl", schema=RL_EDIT_CONFIG_SCHEMA,
|
||||
handler=lambda args, **kw: rl_edit_config(field=args.get("field", ""), value=args.get("value")), check_fn=check_rl_api_keys, requires_env=_rl_env, is_async=True)
|
||||
registry.register(name="rl_start_training", toolset="rl", schema=RL_START_TRAINING_SCHEMA,
|
||||
handler=lambda args, **kw: rl_start_training(), check_fn=check_rl_api_keys, requires_env=_rl_env, is_async=True)
|
||||
registry.register(name="rl_check_status", toolset="rl", schema=RL_CHECK_STATUS_SCHEMA,
|
||||
handler=lambda args, **kw: rl_check_status(run_id=args.get("run_id", "")), check_fn=check_rl_api_keys, requires_env=_rl_env, is_async=True)
|
||||
registry.register(name="rl_stop_training", toolset="rl", schema=RL_STOP_TRAINING_SCHEMA,
|
||||
handler=lambda args, **kw: rl_stop_training(run_id=args.get("run_id", "")), check_fn=check_rl_api_keys, requires_env=_rl_env, is_async=True)
|
||||
registry.register(name="rl_get_results", toolset="rl", schema=RL_GET_RESULTS_SCHEMA,
|
||||
handler=lambda args, **kw: rl_get_results(run_id=args.get("run_id", "")), check_fn=check_rl_api_keys, requires_env=_rl_env, is_async=True)
|
||||
registry.register(name="rl_list_runs", toolset="rl", schema=RL_LIST_RUNS_SCHEMA,
|
||||
handler=lambda args, **kw: rl_list_runs(), check_fn=check_rl_api_keys, requires_env=_rl_env, is_async=True)
|
||||
registry.register(name="rl_test_inference", toolset="rl", schema=RL_TEST_INFERENCE_SCHEMA,
|
||||
handler=lambda args, **kw: rl_test_inference(num_steps=args.get("num_steps", 3), group_size=args.get("group_size", 16), models=args.get("models")),
|
||||
check_fn=check_rl_api_keys, requires_env=_rl_env, is_async=True)
|
||||
|
||||
162
tools/send_message_tool.py
Normal file
162
tools/send_message_tool.py
Normal file
@@ -0,0 +1,162 @@
|
||||
"""Send Message Tool -- cross-channel messaging via platform APIs.
|
||||
|
||||
Sends a message to a user or channel on any connected messaging platform
|
||||
(Telegram, Discord, Slack). Works in both CLI and gateway contexts.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
SEND_MESSAGE_SCHEMA = {
|
||||
"name": "send_message",
|
||||
"description": "Send a message to a user or channel on any connected messaging platform. Use this when the user asks you to send something to a different platform, or when delivering notifications/alerts to a specific destination.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"target": {
|
||||
"type": "string",
|
||||
"description": "Delivery target. Format: 'platform' (uses home channel) or 'platform:chat_id' (specific chat). Examples: 'telegram', 'discord:123456789', 'slack:C01234ABCDE'"
|
||||
},
|
||||
"message": {
|
||||
"type": "string",
|
||||
"description": "The message text to send"
|
||||
}
|
||||
},
|
||||
"required": ["target", "message"]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def send_message_tool(args, **kw):
|
||||
"""Handle cross-channel send_message tool calls.
|
||||
|
||||
Sends a message directly to the target platform using its API.
|
||||
Works in both CLI and gateway contexts -- does not require the
|
||||
gateway to be running. Loads credentials from the gateway config
|
||||
(env vars / ~/.hermes/gateway.json).
|
||||
"""
|
||||
target = args.get("target", "")
|
||||
message = args.get("message", "")
|
||||
if not target or not message:
|
||||
return json.dumps({"error": "Both 'target' and 'message' are required"})
|
||||
|
||||
parts = target.split(":", 1)
|
||||
platform_name = parts[0].strip().lower()
|
||||
chat_id = parts[1].strip() if len(parts) > 1 else None
|
||||
|
||||
try:
|
||||
from gateway.config import load_gateway_config, Platform
|
||||
config = load_gateway_config()
|
||||
except Exception as e:
|
||||
return json.dumps({"error": f"Failed to load gateway config: {e}"})
|
||||
|
||||
platform_map = {
|
||||
"telegram": Platform.TELEGRAM,
|
||||
"discord": Platform.DISCORD,
|
||||
"slack": Platform.SLACK,
|
||||
"whatsapp": Platform.WHATSAPP,
|
||||
}
|
||||
platform = platform_map.get(platform_name)
|
||||
if not platform:
|
||||
avail = ", ".join(platform_map.keys())
|
||||
return json.dumps({"error": f"Unknown platform: {platform_name}. Available: {avail}"})
|
||||
|
||||
pconfig = config.platforms.get(platform)
|
||||
if not pconfig or not pconfig.enabled:
|
||||
return json.dumps({"error": f"Platform '{platform_name}' is not configured. Set up credentials in ~/.hermes/gateway.json or environment variables."})
|
||||
|
||||
if not chat_id:
|
||||
home = config.get_home_channel(platform)
|
||||
if home:
|
||||
chat_id = home.chat_id
|
||||
else:
|
||||
return json.dumps({"error": f"No chat_id specified and no home channel configured for {platform_name}. Use format 'platform:chat_id'."})
|
||||
|
||||
try:
|
||||
from model_tools import _run_async
|
||||
result = _run_async(_send_to_platform(platform, pconfig, chat_id, message))
|
||||
return json.dumps(result)
|
||||
except Exception as e:
|
||||
return json.dumps({"error": f"Send failed: {e}"})
|
||||
|
||||
|
||||
async def _send_to_platform(platform, pconfig, chat_id, message):
|
||||
"""Route a message to the appropriate platform sender."""
|
||||
from gateway.config import Platform
|
||||
if platform == Platform.TELEGRAM:
|
||||
return await _send_telegram(pconfig.token, chat_id, message)
|
||||
elif platform == Platform.DISCORD:
|
||||
return await _send_discord(pconfig.token, chat_id, message)
|
||||
elif platform == Platform.SLACK:
|
||||
return await _send_slack(pconfig.token, chat_id, message)
|
||||
return {"error": f"Direct sending not yet implemented for {platform.value}"}
|
||||
|
||||
|
||||
async def _send_telegram(token, chat_id, message):
|
||||
"""Send via Telegram Bot API (one-shot, no polling needed)."""
|
||||
try:
|
||||
from telegram import Bot
|
||||
bot = Bot(token=token)
|
||||
msg = await bot.send_message(chat_id=int(chat_id), text=message)
|
||||
return {"success": True, "platform": "telegram", "chat_id": chat_id, "message_id": str(msg.message_id)}
|
||||
except ImportError:
|
||||
return {"error": "python-telegram-bot not installed. Run: pip install python-telegram-bot"}
|
||||
except Exception as e:
|
||||
return {"error": f"Telegram send failed: {e}"}
|
||||
|
||||
|
||||
async def _send_discord(token, chat_id, message):
|
||||
"""Send via Discord REST API (no websocket client needed)."""
|
||||
try:
|
||||
import aiohttp
|
||||
except ImportError:
|
||||
return {"error": "aiohttp not installed. Run: pip install aiohttp"}
|
||||
try:
|
||||
url = f"https://discord.com/api/v10/channels/{chat_id}/messages"
|
||||
headers = {"Authorization": f"Bot {token}", "Content-Type": "application/json"}
|
||||
chunks = [message[i:i+2000] for i in range(0, len(message), 2000)]
|
||||
message_ids = []
|
||||
async with aiohttp.ClientSession() as session:
|
||||
for chunk in chunks:
|
||||
async with session.post(url, headers=headers, json={"content": chunk}) as resp:
|
||||
if resp.status not in (200, 201):
|
||||
body = await resp.text()
|
||||
return {"error": f"Discord API error ({resp.status}): {body}"}
|
||||
data = await resp.json()
|
||||
message_ids.append(data.get("id"))
|
||||
return {"success": True, "platform": "discord", "chat_id": chat_id, "message_ids": message_ids}
|
||||
except Exception as e:
|
||||
return {"error": f"Discord send failed: {e}"}
|
||||
|
||||
|
||||
async def _send_slack(token, chat_id, message):
|
||||
"""Send via Slack Web API."""
|
||||
try:
|
||||
import aiohttp
|
||||
except ImportError:
|
||||
return {"error": "aiohttp not installed. Run: pip install aiohttp"}
|
||||
try:
|
||||
url = "https://slack.com/api/chat.postMessage"
|
||||
headers = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"}
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.post(url, headers=headers, json={"channel": chat_id, "text": message}) as resp:
|
||||
data = await resp.json()
|
||||
if data.get("ok"):
|
||||
return {"success": True, "platform": "slack", "chat_id": chat_id, "message_id": data.get("ts")}
|
||||
return {"error": f"Slack API error: {data.get('error', 'unknown')}"}
|
||||
except Exception as e:
|
||||
return {"error": f"Slack send failed: {e}"}
|
||||
|
||||
|
||||
# --- Registry ---
|
||||
from tools.registry import registry
|
||||
|
||||
registry.register(
|
||||
name="send_message",
|
||||
toolset="messaging",
|
||||
schema=SEND_MESSAGE_SCHEMA,
|
||||
handler=send_message_tool,
|
||||
)
|
||||
@@ -301,3 +301,20 @@ SESSION_SEARCH_SCHEMA = {
|
||||
"required": ["query"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
# --- Registry ---
|
||||
from tools.registry import registry
|
||||
|
||||
registry.register(
|
||||
name="session_search",
|
||||
toolset="session_search",
|
||||
schema=SESSION_SEARCH_SCHEMA,
|
||||
handler=lambda args, **kw: session_search(
|
||||
query=args.get("query", ""),
|
||||
role_filter=args.get("role_filter"),
|
||||
limit=args.get("limit", 3),
|
||||
db=kw.get("db")),
|
||||
check_fn=check_session_search_requirements,
|
||||
requires_env=["OPENROUTER_API_KEY"],
|
||||
)
|
||||
|
||||
@@ -542,3 +542,23 @@ SKILL_MANAGE_SCHEMA = {
|
||||
"required": ["action", "name"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
# --- Registry ---
|
||||
from tools.registry import registry
|
||||
|
||||
registry.register(
|
||||
name="skill_manage",
|
||||
toolset="skills",
|
||||
schema=SKILL_MANAGE_SCHEMA,
|
||||
handler=lambda args, **kw: skill_manage(
|
||||
action=args.get("action", ""),
|
||||
name=args.get("name", ""),
|
||||
content=args.get("content"),
|
||||
category=args.get("category"),
|
||||
file_path=args.get("file_path"),
|
||||
file_content=args.get("file_content"),
|
||||
old_string=args.get("old_string"),
|
||||
new_string=args.get("new_string"),
|
||||
replace_all=args.get("replace_all", False)),
|
||||
)
|
||||
|
||||
@@ -637,3 +637,58 @@ if __name__ == "__main__":
|
||||
print(f"Preview: {result['content'][:150]}...")
|
||||
else:
|
||||
print(f"Error: {result['error']}")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Registry
|
||||
# ---------------------------------------------------------------------------
|
||||
from tools.registry import registry
|
||||
|
||||
SKILLS_LIST_SCHEMA = {
|
||||
"name": "skills_list",
|
||||
"description": "List available skills (name + description). Use skill_view(name) to load full content.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"category": {
|
||||
"type": "string",
|
||||
"description": "Optional category filter to narrow results"
|
||||
}
|
||||
},
|
||||
"required": []
|
||||
}
|
||||
}
|
||||
|
||||
SKILL_VIEW_SCHEMA = {
|
||||
"name": "skill_view",
|
||||
"description": "Skills allow for loading information about specific tasks and workflows, as well as scripts and templates. Load a skill's full content or access its linked files (references, templates, scripts). First call returns SKILL.md content plus a 'linked_files' dict showing available references/templates/scripts. To access those, call again with file_path parameter.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "The skill name (use skills_list to see available skills)"
|
||||
},
|
||||
"file_path": {
|
||||
"type": "string",
|
||||
"description": "OPTIONAL: Path to a linked file within the skill (e.g., 'references/api.md', 'templates/config.yaml', 'scripts/validate.py'). Omit to get the main SKILL.md content."
|
||||
}
|
||||
},
|
||||
"required": ["name"]
|
||||
}
|
||||
}
|
||||
|
||||
registry.register(
|
||||
name="skills_list",
|
||||
toolset="skills",
|
||||
schema=SKILLS_LIST_SCHEMA,
|
||||
handler=lambda args, **kw: skills_list(category=args.get("category")),
|
||||
check_fn=check_skills_requirements,
|
||||
)
|
||||
registry.register(
|
||||
name="skill_view",
|
||||
toolset="skills",
|
||||
schema=SKILL_VIEW_SCHEMA,
|
||||
handler=lambda args, **kw: skill_view(args.get("name", ""), file_path=args.get("file_path")),
|
||||
check_fn=check_skills_requirements,
|
||||
)
|
||||
|
||||
@@ -1980,3 +1980,69 @@ if __name__ == "__main__":
|
||||
print(f" TERMINAL_CWD: {os.getenv('TERMINAL_CWD', os.getcwd())}")
|
||||
print(f" TERMINAL_TIMEOUT: {os.getenv('TERMINAL_TIMEOUT', '60')}")
|
||||
print(f" TERMINAL_LIFETIME_SECONDS: {os.getenv('TERMINAL_LIFETIME_SECONDS', '300')}")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Registry
|
||||
# ---------------------------------------------------------------------------
|
||||
from tools.registry import registry
|
||||
|
||||
TERMINAL_SCHEMA = {
|
||||
"name": "terminal",
|
||||
"description": TERMINAL_TOOL_DESCRIPTION,
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"command": {
|
||||
"type": "string",
|
||||
"description": "The command to execute on the VM"
|
||||
},
|
||||
"background": {
|
||||
"type": "boolean",
|
||||
"description": "Whether to run the command in the background (default: false)",
|
||||
"default": False
|
||||
},
|
||||
"timeout": {
|
||||
"type": "integer",
|
||||
"description": "Command timeout in seconds (optional)",
|
||||
"minimum": 1
|
||||
},
|
||||
"workdir": {
|
||||
"type": "string",
|
||||
"description": "Working directory for this command (absolute path). Defaults to the session working directory."
|
||||
},
|
||||
"check_interval": {
|
||||
"type": "integer",
|
||||
"description": "Seconds between automatic status checks for background processes (gateway/messaging only, minimum 30). When set, I'll proactively report progress.",
|
||||
"minimum": 30
|
||||
},
|
||||
"pty": {
|
||||
"type": "boolean",
|
||||
"description": "Run in pseudo-terminal (PTY) mode for interactive CLI tools like Codex, Claude Code, or Python REPL. Only works with local and SSH backends. Default: false.",
|
||||
"default": False
|
||||
}
|
||||
},
|
||||
"required": ["command"]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def _handle_terminal(args, **kw):
|
||||
return terminal_tool(
|
||||
command=args.get("command"),
|
||||
background=args.get("background", False),
|
||||
timeout=args.get("timeout"),
|
||||
task_id=kw.get("task_id"),
|
||||
workdir=args.get("workdir"),
|
||||
check_interval=args.get("check_interval"),
|
||||
pty=args.get("pty", False),
|
||||
)
|
||||
|
||||
|
||||
registry.register(
|
||||
name="terminal",
|
||||
toolset="terminal",
|
||||
schema=TERMINAL_SCHEMA,
|
||||
handler=_handle_terminal,
|
||||
check_fn=check_terminal_requirements,
|
||||
)
|
||||
|
||||
@@ -243,3 +243,16 @@ TODO_SCHEMA = {
|
||||
"required": []
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# --- Registry ---
|
||||
from tools.registry import registry
|
||||
|
||||
registry.register(
|
||||
name="todo",
|
||||
toolset="todo",
|
||||
schema=TODO_SCHEMA,
|
||||
handler=lambda args, **kw: todo_tool(
|
||||
todos=args.get("todos"), merge=args.get("merge", False), store=kw.get("store")),
|
||||
check_fn=check_todo_requirements,
|
||||
)
|
||||
|
||||
@@ -416,3 +416,38 @@ if __name__ == "__main__":
|
||||
config = _load_tts_config()
|
||||
provider = _get_provider(config)
|
||||
print(f" Configured provider: {provider}")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Registry
|
||||
# ---------------------------------------------------------------------------
|
||||
from tools.registry import registry
|
||||
|
||||
TTS_SCHEMA = {
|
||||
"name": "text_to_speech",
|
||||
"description": "Convert text to speech audio. Returns a MEDIA: path that the platform delivers as a voice message. On Telegram it plays as a voice bubble, on Discord/WhatsApp as an audio attachment. In CLI mode, saves to ~/voice-memos/. Voice and provider are user-configured, not model-selected.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"text": {
|
||||
"type": "string",
|
||||
"description": "The text to convert to speech. Keep under 4000 characters."
|
||||
},
|
||||
"output_path": {
|
||||
"type": "string",
|
||||
"description": "Optional custom file path to save the audio. Defaults to ~/voice-memos/<timestamp>.mp3"
|
||||
}
|
||||
},
|
||||
"required": ["text"]
|
||||
}
|
||||
}
|
||||
|
||||
registry.register(
|
||||
name="text_to_speech",
|
||||
toolset="tts",
|
||||
schema=TTS_SCHEMA,
|
||||
handler=lambda args, **kw: text_to_speech_tool(
|
||||
text=args.get("text", ""),
|
||||
output_path=args.get("output_path")),
|
||||
check_fn=check_tts_requirements,
|
||||
)
|
||||
|
||||
@@ -424,3 +424,46 @@ if __name__ == "__main__":
|
||||
print(" export VISION_TOOLS_DEBUG=true")
|
||||
print(" # Debug logs capture all vision analysis calls and results")
|
||||
print(" # Logs saved to: ./logs/vision_tools_debug_UUID.json")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Registry
|
||||
# ---------------------------------------------------------------------------
|
||||
from tools.registry import registry
|
||||
|
||||
VISION_ANALYZE_SCHEMA = {
|
||||
"name": "vision_analyze",
|
||||
"description": "Analyze images using AI vision. Provides a comprehensive description and answers a specific question about the image content.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"image_url": {
|
||||
"type": "string",
|
||||
"description": "Image URL (http/https) or local file path to analyze."
|
||||
},
|
||||
"question": {
|
||||
"type": "string",
|
||||
"description": "Your specific question or request about the image to resolve. The AI will automatically provide a complete image description AND answer your specific question."
|
||||
}
|
||||
},
|
||||
"required": ["image_url", "question"]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def _handle_vision_analyze(args, **kw):
|
||||
image_url = args.get("image_url", "")
|
||||
question = args.get("question", "")
|
||||
full_prompt = f"Fully describe and explain everything about this image, then answer the following question:\n\n{question}"
|
||||
return vision_analyze_tool(image_url, full_prompt, "google/gemini-3-flash-preview")
|
||||
|
||||
|
||||
registry.register(
|
||||
name="vision_analyze",
|
||||
toolset="vision",
|
||||
schema=VISION_ANALYZE_SCHEMA,
|
||||
handler=_handle_vision_analyze,
|
||||
check_fn=check_vision_requirements,
|
||||
requires_env=["OPENROUTER_API_KEY"],
|
||||
is_async=True,
|
||||
)
|
||||
|
||||
@@ -1193,3 +1193,60 @@ if __name__ == "__main__":
|
||||
print(" # Logs saved to: ./logs/web_tools_debug_UUID.json")
|
||||
|
||||
print(f"\n📝 Run 'python test_web_tools_llm.py' to test LLM processing capabilities")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Registry
|
||||
# ---------------------------------------------------------------------------
|
||||
from tools.registry import registry
|
||||
|
||||
WEB_SEARCH_SCHEMA = {
|
||||
"name": "web_search",
|
||||
"description": "Search the web for information on any topic. Returns up to 5 relevant results with titles, URLs, and descriptions.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "The search query to look up on the web"
|
||||
}
|
||||
},
|
||||
"required": ["query"]
|
||||
}
|
||||
}
|
||||
|
||||
WEB_EXTRACT_SCHEMA = {
|
||||
"name": "web_extract",
|
||||
"description": "Extract content from web page URLs. Pages under 5000 chars return raw content; larger pages are LLM-summarized and capped at ~5000 chars per page. Pages over 2M chars are refused. Use browser tools only when pages require interaction or dynamic content.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"urls": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "List of URLs to extract content from (max 5 URLs per call)",
|
||||
"maxItems": 5
|
||||
}
|
||||
},
|
||||
"required": ["urls"]
|
||||
}
|
||||
}
|
||||
|
||||
registry.register(
|
||||
name="web_search",
|
||||
toolset="web",
|
||||
schema=WEB_SEARCH_SCHEMA,
|
||||
handler=lambda args, **kw: web_search_tool(args.get("query", ""), limit=5),
|
||||
check_fn=check_firecrawl_api_key,
|
||||
requires_env=["FIRECRAWL_API_KEY"],
|
||||
)
|
||||
registry.register(
|
||||
name="web_extract",
|
||||
toolset="web",
|
||||
schema=WEB_EXTRACT_SCHEMA,
|
||||
handler=lambda args, **kw: web_extract_tool(
|
||||
args.get("urls", [])[:5] if isinstance(args.get("urls"), list) else [], "markdown"),
|
||||
check_fn=check_firecrawl_api_key,
|
||||
requires_env=["FIRECRAWL_API_KEY"],
|
||||
is_async=True,
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user