diff --git a/batch_runner.py b/batch_runner.py
index 2d1ba40c0..54a1a5851 100644
--- a/batch_runner.py
+++ b/batch_runner.py
@@ -27,7 +27,7 @@ import time
from pathlib import Path
from typing import List, Dict, Any, Optional, Tuple
from datetime import datetime
-from multiprocessing import Pool, Manager, Lock
+from multiprocessing import Pool, Lock
import traceback
from rich.progress import Progress, SpinnerColumn, BarColumn, TextColumn, TimeRemainingColumn, MofNCompleteColumn
@@ -36,7 +36,6 @@ import fire
from run_agent import AIAgent
from toolset_distributions import (
- get_distribution,
list_distributions,
sample_toolsets_from_distribution,
validate_distribution
@@ -173,7 +172,7 @@ def _extract_tool_stats(messages: List[Dict[str, Any]]) -> Dict[str, Dict[str, i
if content_json.get("success") is False:
is_success = False
- except:
+ except (json.JSONDecodeError, ValueError, TypeError):
# If not JSON, check if content is empty or explicitly states an error
# Note: We avoid simple substring matching to prevent false positives
if not content:
diff --git a/cli.py b/cli.py
index c2a58474a..4cadb2f4b 100755
--- a/cli.py
+++ b/cli.py
@@ -39,16 +39,16 @@ from prompt_toolkit.layout.menus import CompletionsMenu
from prompt_toolkit.widgets import TextArea
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.completion import Completer, Completion
-from prompt_toolkit.keys import Keys
from prompt_toolkit import print_formatted_text as _pt_print
from prompt_toolkit.formatted_text import ANSI as _PT_ANSI
import threading
import queue
-import tempfile
# Load environment variables first
from dotenv import load_dotenv
+from hermes_constants import OPENROUTER_BASE_URL
+
env_path = Path(__file__).parent / '.env'
if env_path.exists():
load_dotenv(dotenv_path=env_path)
@@ -88,7 +88,7 @@ def load_cli_config() -> Dict[str, Any]:
defaults = {
"model": {
"default": "anthropic/claude-opus-4.6",
- "base_url": "https://openrouter.ai/api/v1",
+ "base_url": OPENROUTER_BASE_URL,
"provider": "auto",
},
"terminal": {
@@ -262,20 +262,15 @@ def load_cli_config() -> Dict[str, Any]:
# Load configuration at module startup
CLI_CONFIG = load_cli_config()
-from rich.console import Console, Group
+from rich.console import Console
from rich.panel import Panel
-from rich.text import Text
from rich.table import Table
-from rich.markdown import Markdown
-from rich.columns import Columns
-from rich.align import Align
-from rich import box
import fire
# Import the agent and tool systems
from run_agent import AIAgent
-from model_tools import get_tool_definitions, get_all_tool_names, get_toolset_for_tool, get_available_toolsets
+from model_tools import get_tool_definitions, get_toolset_for_tool
from toolsets import get_all_toolsets, get_toolset_info, resolve_toolset, validate_toolset
# Cron job system for scheduled tasks
diff --git a/gateway/delivery.py b/gateway/delivery.py
index 04c55f0ba..676c3b5ae 100644
--- a/gateway/delivery.py
+++ b/gateway/delivery.py
@@ -8,14 +8,13 @@ Routes messages to the appropriate destination based on:
- Local (always saved to files)
"""
-import json
from pathlib import Path
from datetime import datetime
from dataclasses import dataclass
from typing import Dict, List, Optional, Any, Union
from enum import Enum
-from .config import Platform, GatewayConfig, HomeChannel
+from .config import Platform, GatewayConfig
from .session import SessionSource
diff --git a/gateway/run.py b/gateway/run.py
index bacdb2996..2604df1c0 100644
--- a/gateway/run.py
+++ b/gateway/run.py
@@ -1081,7 +1081,7 @@ class GatewayRunner:
try:
msg = progress_queue.get_nowait()
await adapter.send(chat_id=source.chat_id, content=msg)
- except:
+ except Exception:
break
return
except Exception as e:
diff --git a/hermes_cli/auth.py b/hermes_cli/auth.py
index 7fc826688..861b92018 100644
--- a/hermes_cli/auth.py
+++ b/hermes_cli/auth.py
@@ -30,6 +30,7 @@ import httpx
import yaml
from hermes_cli.config import get_hermes_home, get_config_path
+from hermes_constants import OPENROUTER_BASE_URL
try:
import fcntl
@@ -865,7 +866,7 @@ def _reset_config_provider() -> Path:
if isinstance(model, dict):
model["provider"] = "auto"
if "base_url" in model:
- model["base_url"] = "https://openrouter.ai/api/v1"
+ model["base_url"] = OPENROUTER_BASE_URL
config_path.write_text(yaml.safe_dump(config, sort_keys=False))
return config_path
diff --git a/hermes_cli/colors.py b/hermes_cli/colors.py
new file mode 100644
index 000000000..d30f99c62
--- /dev/null
+++ b/hermes_cli/colors.py
@@ -0,0 +1,22 @@
+"""Shared ANSI color utilities for Hermes CLI modules."""
+
+import sys
+
+
+class Colors:
+ RESET = "\033[0m"
+ BOLD = "\033[1m"
+ DIM = "\033[2m"
+ RED = "\033[31m"
+ GREEN = "\033[32m"
+ YELLOW = "\033[33m"
+ BLUE = "\033[34m"
+ MAGENTA = "\033[35m"
+ CYAN = "\033[36m"
+
+
+def color(text: str, *codes) -> str:
+ """Apply color codes to text (only when output is a TTY)."""
+ if not sys.stdout.isatty():
+ return text
+ return "".join(codes) + text + Colors.RESET
diff --git a/hermes_cli/config.py b/hermes_cli/config.py
index 7fd88905b..a42fdedee 100644
--- a/hermes_cli/config.py
+++ b/hermes_cli/config.py
@@ -20,22 +20,7 @@ from typing import Dict, Any, Optional, List, Tuple
import yaml
-# ANSI colors
-class Colors:
- RESET = "\033[0m"
- BOLD = "\033[1m"
- DIM = "\033[2m"
- RED = "\033[31m"
- GREEN = "\033[32m"
- YELLOW = "\033[33m"
- BLUE = "\033[34m"
- MAGENTA = "\033[35m"
- CYAN = "\033[36m"
-
-def color(text: str, *codes) -> str:
- if not sys.stdout.isatty():
- return text
- return "".join(codes) + text + Colors.RESET
+from hermes_cli.colors import Colors, color
# =============================================================================
diff --git a/hermes_cli/cron.py b/hermes_cli/cron.py
index 9000ae536..37cc40926 100644
--- a/hermes_cli/cron.py
+++ b/hermes_cli/cron.py
@@ -4,29 +4,14 @@ Cron subcommand for hermes CLI.
Handles: hermes cron [list|daemon|tick]
"""
-import json
import sys
-import time
from pathlib import Path
from datetime import datetime
PROJECT_ROOT = Path(__file__).parent.parent.resolve()
sys.path.insert(0, str(PROJECT_ROOT))
-# ANSI colors
-class Colors:
- RESET = "\033[0m"
- BOLD = "\033[1m"
- DIM = "\033[2m"
- RED = "\033[31m"
- GREEN = "\033[32m"
- YELLOW = "\033[33m"
- CYAN = "\033[36m"
-
-def color(text: str, *codes) -> str:
- if not sys.stdout.isatty():
- return text
- return "".join(codes) + text + Colors.RESET
+from hermes_cli.colors import Colors, color
def cron_list(show_all: bool = False):
diff --git a/hermes_cli/doctor.py b/hermes_cli/doctor.py
index 83462e639..09176ba15 100644
--- a/hermes_cli/doctor.py
+++ b/hermes_cli/doctor.py
@@ -23,20 +23,8 @@ if _env_path.exists():
# Also try project .env as fallback
load_dotenv(PROJECT_ROOT / ".env", override=False)
-# ANSI colors
-class Colors:
- RESET = "\033[0m"
- BOLD = "\033[1m"
- DIM = "\033[2m"
- RED = "\033[31m"
- GREEN = "\033[32m"
- YELLOW = "\033[33m"
- CYAN = "\033[36m"
-
-def color(text: str, *codes) -> str:
- if not sys.stdout.isatty():
- return text
- return "".join(codes) + text + Colors.RESET
+from hermes_cli.colors import Colors, color
+from hermes_constants import OPENROUTER_MODELS_URL
def check_ok(text: str, detail: str = ""):
print(f" {color('✓', Colors.GREEN)} {text}" + (f" {color(detail, Colors.DIM)}" if detail else ""))
@@ -314,7 +302,7 @@ def run_doctor(args):
try:
import httpx
response = httpx.get(
- "https://openrouter.ai/api/v1/models",
+ OPENROUTER_MODELS_URL,
headers={"Authorization": f"Bearer {openrouter_key}"},
timeout=10
)
diff --git a/hermes_cli/main.py b/hermes_cli/main.py
index 4c92b57e4..09d56bc63 100644
--- a/hermes_cli/main.py
+++ b/hermes_cli/main.py
@@ -40,6 +40,7 @@ if env_path.exists():
load_dotenv(dotenv_path=env_path)
from hermes_cli import __version__
+from hermes_constants import OPENROUTER_BASE_URL
def cmd_chat(args):
@@ -241,7 +242,7 @@ def _model_flow_openrouter(config, current_model=""):
model = cfg.get("model")
if isinstance(model, dict):
model["provider"] = "openrouter"
- model["base_url"] = "https://openrouter.ai/api/v1"
+ model["base_url"] = OPENROUTER_BASE_URL
save_config(cfg)
deactivate_provider()
print(f"Default model set to: {selected} (via OpenRouter)")
diff --git a/hermes_cli/pairing.py b/hermes_cli/pairing.py
index d15d8cb6b..ecd9f61fc 100644
--- a/hermes_cli/pairing.py
+++ b/hermes_cli/pairing.py
@@ -8,9 +8,6 @@ Usage:
hermes pairing clear-pending # Clear all expired/pending codes
"""
-import sys
-
-
def pairing_command(args):
"""Handle hermes pairing subcommands."""
from gateway.pairing import PairingStore
diff --git a/hermes_cli/setup.py b/hermes_cli/setup.py
index fe5ef6341..7ade18f8c 100644
--- a/hermes_cli/setup.py
+++ b/hermes_cli/setup.py
@@ -26,23 +26,7 @@ from hermes_cli.config import (
ensure_hermes_home, DEFAULT_CONFIG
)
-# ANSI colors
-class Colors:
- RESET = "\033[0m"
- BOLD = "\033[1m"
- DIM = "\033[2m"
- RED = "\033[31m"
- GREEN = "\033[32m"
- YELLOW = "\033[33m"
- BLUE = "\033[34m"
- MAGENTA = "\033[35m"
- CYAN = "\033[36m"
-
-def color(text: str, *codes) -> str:
- """Apply color codes to text."""
- if not sys.stdout.isatty():
- return text
- return "".join(codes) + text + Colors.RESET
+from hermes_cli.colors import Colors, color
def print_header(title: str):
"""Print a section header."""
diff --git a/hermes_cli/skills_hub.py b/hermes_cli/skills_hub.py
index b0e4bbfe0..db6db9e39 100644
--- a/hermes_cli/skills_hub.py
+++ b/hermes_cli/skills_hub.py
@@ -12,7 +12,6 @@ handler are thin wrappers that parse args and delegate.
import json
import shutil
-import sys
from pathlib import Path
from typing import Optional
diff --git a/hermes_cli/status.py b/hermes_cli/status.py
index 9f914102b..33ebd4983 100644
--- a/hermes_cli/status.py
+++ b/hermes_cli/status.py
@@ -11,20 +11,8 @@ from pathlib import Path
PROJECT_ROOT = Path(__file__).parent.parent.resolve()
-# ANSI colors
-class Colors:
- RESET = "\033[0m"
- BOLD = "\033[1m"
- DIM = "\033[2m"
- RED = "\033[31m"
- GREEN = "\033[32m"
- YELLOW = "\033[33m"
- CYAN = "\033[36m"
-
-def color(text: str, *codes) -> str:
- if not sys.stdout.isatty():
- return text
- return "".join(codes) + text + Colors.RESET
+from hermes_cli.colors import Colors, color
+from hermes_constants import OPENROUTER_MODELS_URL
def check_mark(ok: bool) -> str:
if ok:
@@ -232,7 +220,7 @@ def show_status(args):
jobs = data.get("jobs", [])
enabled_jobs = [j for j in jobs if j.get("enabled", True)]
print(f" Jobs: {len(enabled_jobs)} active, {len(jobs)} total")
- except:
+ except Exception:
print(f" Jobs: (error reading jobs file)")
else:
print(f" Jobs: 0")
@@ -250,7 +238,7 @@ def show_status(args):
with open(sessions_file) as f:
data = json.load(f)
print(f" Active: {len(data)} session(s)")
- except:
+ except Exception:
print(f" Active: (error reading sessions file)")
else:
print(f" Active: 0")
@@ -268,7 +256,7 @@ def show_status(args):
try:
import httpx
response = httpx.get(
- "https://openrouter.ai/api/v1/models",
+ OPENROUTER_MODELS_URL,
headers={"Authorization": f"Bearer {openrouter_key}"},
timeout=10
)
@@ -288,7 +276,7 @@ def show_status(args):
port_in_use = result == 0
# This is informational, not necessarily bad
print(f" Port 18789: {'in use' if port_in_use else 'available'}")
- except:
+ except OSError:
pass
print()
diff --git a/hermes_cli/uninstall.py b/hermes_cli/uninstall.py
index 0f7676d8a..d70405ce3 100644
--- a/hermes_cli/uninstall.py
+++ b/hermes_cli/uninstall.py
@@ -13,23 +13,7 @@ import subprocess
from pathlib import Path
from typing import Optional
-# ANSI colors
-class Colors:
- RESET = "\033[0m"
- BOLD = "\033[1m"
- DIM = "\033[2m"
- RED = "\033[31m"
- GREEN = "\033[32m"
- YELLOW = "\033[33m"
- BLUE = "\033[34m"
- MAGENTA = "\033[35m"
- CYAN = "\033[36m"
-
-def color(text: str, *codes) -> str:
- """Apply color codes to text (only in TTY)."""
- if not sys.stdout.isatty():
- return text
- return "".join(codes) + text + Colors.RESET
+from hermes_cli.colors import Colors, color
def log_info(msg: str):
print(f"{color('→', Colors.CYAN)} {msg}")
diff --git a/hermes_constants.py b/hermes_constants.py
new file mode 100644
index 000000000..066194c87
--- /dev/null
+++ b/hermes_constants.py
@@ -0,0 +1,9 @@
+"""Shared constants for Hermes Agent.
+
+Import-safe module with no dependencies — can be imported from anywhere
+without risk of circular imports.
+"""
+
+OPENROUTER_BASE_URL = "https://openrouter.ai/api/v1"
+OPENROUTER_MODELS_URL = f"{OPENROUTER_BASE_URL}/models"
+OPENROUTER_CHAT_URL = f"{OPENROUTER_BASE_URL}/chat/completions"
diff --git a/model_tools.py b/model_tools.py
index 787470a98..08ea89f94 100644
--- a/model_tools.py
+++ b/model_tools.py
@@ -31,19 +31,17 @@ import asyncio
import os
from typing import Dict, Any, List, Optional, Tuple
-from tools.web_tools import web_search_tool, web_extract_tool, web_crawl_tool, check_firecrawl_api_key
-from tools.terminal_tool import terminal_tool, check_terminal_requirements, TERMINAL_TOOL_DESCRIPTION, cleanup_vm
+from tools.web_tools import web_search_tool, web_extract_tool, check_firecrawl_api_key
+from tools.terminal_tool import terminal_tool, check_terminal_requirements, TERMINAL_TOOL_DESCRIPTION
# File manipulation tools (read, write, patch, search)
from tools.file_tools import read_file_tool, write_file_tool, patch_tool, search_tool
from tools import check_file_requirements
-# Hecate/MorphCloud terminal tool (cloud VMs) - available as alternative backend
-from tools.terminal_hecate import terminal_hecate_tool, check_hecate_requirements, TERMINAL_HECATE_DESCRIPTION
from tools.vision_tools import vision_analyze_tool, check_vision_requirements
from tools.mixture_of_agents_tool import mixture_of_agents_tool, check_moa_requirements
from tools.image_generation_tool import image_generate_tool, check_image_generation_requirements
-from tools.skills_tool import skills_list, skill_view, check_skills_requirements, SKILLS_TOOL_DESCRIPTION
+from tools.skills_tool import skills_list, skill_view, check_skills_requirements
# Agent-managed skill creation/editing
-from tools.skill_manager_tool import skill_manage, check_skill_manage_requirements, SKILL_MANAGE_SCHEMA
+from tools.skill_manager_tool import skill_manage, SKILL_MANAGE_SCHEMA
# RL Training tools (Tinker-Atropos)
from tools.rl_training_tool import (
rl_list_environments,
@@ -64,7 +62,6 @@ from tools.cronjob_tools import (
list_cronjobs,
remove_cronjob,
check_cronjob_requirements,
- get_cronjob_tool_definitions,
SCHEDULE_CRONJOB_SCHEMA,
LIST_CRONJOBS_SCHEMA,
REMOVE_CRONJOB_SCHEMA
@@ -99,11 +96,7 @@ from tools.clarify_tool import clarify_tool, check_clarify_requirements, CLARIFY
from tools.code_execution_tool import execute_code, check_sandbox_requirements, EXECUTE_CODE_SCHEMA
# Subagent delegation
from tools.delegate_tool import delegate_task, check_delegate_requirements, DELEGATE_TASK_SCHEMA
-from toolsets import (
- get_toolset, resolve_toolset, resolve_multiple_toolsets,
- get_all_toolsets, get_toolset_names, validate_toolset,
- get_toolset_info, print_toolset_tree
-)
+from toolsets import resolve_toolset, validate_toolset
# =============================================================================
@@ -262,43 +255,6 @@ def check_tool_availability(quiet: bool = False) -> Tuple[List[str], List[Dict[s
return available, unavailable
-def print_tool_availability_warnings(unavailable: List[Dict[str, Any]], prefix: str = ""):
- """Print warnings about unavailable tools."""
- if not unavailable:
- return
-
- # Filter to only those missing API keys (not system dependencies)
- api_key_missing = [u for u in unavailable if u["missing_vars"]]
-
- if api_key_missing:
- print(f"{prefix}⚠️ Some tools are disabled due to missing API keys:")
- for item in api_key_missing:
- vars_str = ", ".join(item["missing_vars"])
- print(f"{prefix} • {item['name']}: missing {vars_str}")
- if item["setup_url"]:
- print(f"{prefix} Get key at: {item['setup_url']}")
- print(f"{prefix} Run 'hermes setup' to configure API keys")
- print()
-
-
-def get_tool_availability_summary() -> Dict[str, Any]:
- """
- Get a summary of tool availability for display in status/doctor commands.
-
- Returns:
- Dict with 'available' and 'unavailable' lists of tool info
- """
- available, unavailable = check_tool_availability()
-
- return {
- "available": [
- {"id": tid, "name": TOOLSET_REQUIREMENTS[tid]["name"], "tools": TOOLSET_REQUIREMENTS[tid]["tools"]}
- for tid in available
- ],
- "unavailable": unavailable,
- }
-
-
def get_web_tool_definitions() -> List[Dict[str, Any]]:
"""
Get tool definitions for web tools in OpenAI's expected format.
diff --git a/pyproject.toml b/pyproject.toml
index 02e354df7..7f6a4695e 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -62,7 +62,7 @@ hermes = "hermes_cli.main:main"
hermes-agent = "run_agent:main"
[tool.setuptools]
-py-modules = ["run_agent", "model_tools", "toolsets", "batch_runner", "trajectory_compressor", "toolset_distributions", "cli"]
+py-modules = ["run_agent", "model_tools", "toolsets", "batch_runner", "trajectory_compressor", "toolset_distributions", "cli", "hermes_constants"]
[tool.setuptools.packages.find]
include = ["tools", "hermes_cli", "gateway", "cron"]
diff --git a/requirements.txt b/requirements.txt
index 45faea555..030c84656 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,3 +1,7 @@
+# NOTE: This file is maintained for convenience only.
+# The canonical dependency list is in pyproject.toml.
+# Preferred install: pip install -e ".[all]"
+
# Core dependencies
openai
python-dotenv
@@ -10,6 +14,7 @@ pyyaml
requests
jinja2
pydantic>=2.0
+PyJWT[crypto]
# Web tools
firecrawl-py
@@ -23,27 +28,13 @@ litellm>=1.75.5
typer
platformdirs
-# Optional: For Docker backend (recommended)
-# Requires Docker installed and user in 'docker' group
-
-# Optional: For Modal backend (cloud execution)
-# swe-rex[modal]>=1.4.0 # Includes modal + boto3 + swe-rex runtime
-
# Text-to-speech (Edge TTS is free, no API key needed)
edge-tts
-# Optional: Premium TTS providers
-# elevenlabs # Uncomment if using ElevenLabs TTS (needs ELEVENLABS_API_KEY)
-
# Optional: For cron expression parsing (cronjob scheduling)
croniter
# Optional: For messaging platform integrations (gateway)
-# Telegram
python-telegram-bot>=20.0
-
-# Discord
discord.py>=2.0
-
-# WhatsApp bridge communication + general async HTTP (used by gateway)
aiohttp>=3.9.0
diff --git a/rl_cli.py b/rl_cli.py
index a45c365b4..eaeec1d96 100644
--- a/rl_cli.py
+++ b/rl_cli.py
@@ -64,8 +64,10 @@ from tools.rl_training_tool import check_rl_api_keys, get_missing_keys
# Config Loading
# ============================================================================
+from hermes_constants import OPENROUTER_BASE_URL
+
DEFAULT_MODEL = "anthropic/claude-opus-4.5"
-DEFAULT_BASE_URL = "https://openrouter.ai/api/v1"
+DEFAULT_BASE_URL = OPENROUTER_BASE_URL
def load_hermes_config() -> dict:
diff --git a/run_agent.py b/run_agent.py
index 3ef98bdb9..067c07c6d 100644
--- a/run_agent.py
+++ b/run_agent.py
@@ -25,6 +25,7 @@ import json
import logging
import os
import random
+import re
import sys
import time
import threading
@@ -54,6 +55,8 @@ from tools.browser_tool import cleanup_browser
import requests
+from hermes_constants import OPENROUTER_BASE_URL, OPENROUTER_MODELS_URL
+
# =============================================================================
# Default Agent Identity & Platform Hints
# =============================================================================
@@ -133,7 +136,7 @@ def fetch_model_metadata(force_refresh: bool = False) -> Dict[str, Dict[str, Any
try:
response = requests.get(
- "https://openrouter.ai/api/v1/models",
+ OPENROUTER_MODELS_URL,
timeout=10
)
response.raise_for_status()
@@ -282,7 +285,7 @@ class ContextCompressor:
api_key = os.getenv("OPENROUTER_API_KEY", "")
self.client = OpenAI(
api_key=api_key,
- base_url="https://openrouter.ai/api/v1"
+ base_url=OPENROUTER_BASE_URL
) if api_key else None
def update_from_response(self, usage: Dict[str, Any]):
@@ -600,7 +603,6 @@ def build_skills_system_prompt() -> str:
str: The skills system prompt section, or empty string if no skills found.
"""
import os
- import re
from pathlib import Path
hermes_home = Path(os.getenv("HERMES_HOME", Path.home() / ".hermes"))
@@ -1093,8 +1095,8 @@ class AIAgent:
Args:
base_url (str): Base URL for the model API (optional)
api_key (str): API key for authentication (optional, uses env var if not provided)
- model (str): Model name to use (default: "gpt-4")
- max_iterations (int): Maximum number of tool calling iterations (default: 10)
+ model (str): Model name to use (default: "anthropic/claude-opus-4.6")
+ max_iterations (int): Maximum number of tool calling iterations (default: 60)
tool_delay (float): Delay between tool calls in seconds (default: 1.0)
enabled_toolsets (List[str]): Only enable tools from these toolsets (optional)
disabled_toolsets (List[str]): Disable tools from these toolsets (optional)
@@ -1102,7 +1104,7 @@ class AIAgent:
verbose_logging (bool): Enable verbose logging for debugging (default: False)
quiet_mode (bool): Suppress progress output for clean CLI experience (default: False)
ephemeral_system_prompt (str): System prompt used during agent execution but NOT saved to trajectories (optional)
- log_prefix_chars (int): Number of characters to show in log previews for tool calls/responses (default: 20)
+ log_prefix_chars (int): Number of characters to show in log previews for tool calls/responses (default: 100)
log_prefix (str): Prefix to add to all log messages for identification in parallel processing (default: "")
providers_allowed (List[str]): OpenRouter providers to allow (optional)
providers_ignored (List[str]): OpenRouter providers to ignore (optional)
@@ -1137,7 +1139,7 @@ class AIAgent:
self.log_prefix = f"{log_prefix} " if log_prefix else ""
# Store effective base URL for feature detection (prompt caching, reasoning, etc.)
# When no base_url is provided, the client defaults to OpenRouter, so reflect that here.
- self.base_url = base_url or "https://openrouter.ai/api/v1"
+ self.base_url = base_url or OPENROUTER_BASE_URL
self.tool_progress_callback = tool_progress_callback
self.clarify_callback = clarify_callback
self._last_reported_tool = None # Track for "new tool" mode
@@ -1215,7 +1217,7 @@ class AIAgent:
if base_url:
client_kwargs["base_url"] = base_url
else:
- client_kwargs["base_url"] = "https://openrouter.ai/api/v1"
+ client_kwargs["base_url"] = OPENROUTER_BASE_URL
# Handle API key - OpenRouter is the primary provider
if api_key:
@@ -1636,7 +1638,6 @@ class AIAgent:
if not content:
return False
- import re
# Remove all ... blocks (including nested ones, non-greedy)
cleaned = re.sub(r'.*?', '', content, flags=re.DOTALL)
@@ -1686,6 +1687,19 @@ class AIAgent:
return None
+ def _cleanup_task_resources(self, task_id: str) -> None:
+ """Clean up VM and browser resources for a given task."""
+ try:
+ cleanup_vm(task_id)
+ except Exception as e:
+ if self.verbose_logging:
+ logging.warning(f"Failed to cleanup VM for task {task_id}: {e}")
+ try:
+ cleanup_browser(task_id)
+ except Exception as e:
+ if self.verbose_logging:
+ logging.warning(f"Failed to cleanup browser for task {task_id}: {e}")
+
def _get_messages_up_to_last_assistant(self, messages: List[Dict]) -> List[Dict]:
"""
Get messages up to (but not including) the last assistant turn.
@@ -2331,7 +2345,6 @@ class AIAgent:
Dict: Complete conversation result with final response and message history
"""
# Generate unique task_id if not provided to isolate VMs between concurrent tasks
- import uuid
effective_task_id = task_id or str(uuid.uuid4())
# Reset retry counters at the start of each conversation to prevent state leakage
@@ -2628,17 +2641,7 @@ class AIAgent:
print(f"{self.log_prefix} ⏪ Rolling back to last complete assistant turn")
rolled_back_messages = self._get_messages_up_to_last_assistant(messages)
- # Clean up VM and browser
- try:
- cleanup_vm(effective_task_id)
- except Exception as e:
- if self.verbose_logging:
- logging.warning(f"Failed to cleanup VM for task {effective_task_id}: {e}")
- try:
- cleanup_browser(effective_task_id)
- except Exception as e:
- if self.verbose_logging:
- logging.warning(f"Failed to cleanup browser for task {effective_task_id}: {e}")
+ self._cleanup_task_resources(effective_task_id)
return {
"final_response": None,
@@ -2846,15 +2849,7 @@ class AIAgent:
self._incomplete_scratchpad_retries = 0
rolled_back_messages = self._get_messages_up_to_last_assistant(messages)
-
- try:
- cleanup_vm(effective_task_id)
- except Exception:
- pass
- try:
- cleanup_browser(effective_task_id)
- except Exception:
- pass
+ self._cleanup_task_resources(effective_task_id)
return {
"final_response": None,
@@ -3247,18 +3242,7 @@ class AIAgent:
self._empty_content_retries = 0 # Reset for next conversation
rolled_back_messages = self._get_messages_up_to_last_assistant(messages)
-
- # Clean up VM and browser
- try:
- cleanup_vm(effective_task_id)
- except Exception as e:
- if self.verbose_logging:
- logging.warning(f"Failed to cleanup VM for task {effective_task_id}: {e}")
- try:
- cleanup_browser(effective_task_id)
- except Exception as e:
- if self.verbose_logging:
- logging.warning(f"Failed to cleanup browser for task {effective_task_id}: {e}")
+ self._cleanup_task_resources(effective_task_id)
return {
"final_response": None,
@@ -3365,7 +3349,6 @@ class AIAgent:
final_response = summary_response.choices[0].message.content
# Strip think blocks from final response
if "" in final_response:
- import re
final_response = re.sub(r'.*?\s*', '', final_response, flags=re.DOTALL).strip()
# Add to messages for session continuity
@@ -3384,17 +3367,7 @@ class AIAgent:
self._save_trajectory(messages, user_message, completed)
# Clean up VM and browser for this task after conversation completes
- try:
- cleanup_vm(effective_task_id)
- except Exception as e:
- if self.verbose_logging:
- logging.warning(f"Failed to cleanup VM for task {effective_task_id}: {e}")
-
- try:
- cleanup_browser(effective_task_id)
- except Exception as e:
- if self.verbose_logging:
- logging.warning(f"Failed to cleanup browser for task {effective_task_id}: {e}")
+ self._cleanup_task_resources(effective_task_id)
# Update session messages and save session log
self._session_messages = messages
@@ -3644,7 +3617,6 @@ def main(
# Save sample trajectory to UUID-named file if requested
if save_sample:
- import uuid
sample_id = str(uuid.uuid4())[:8]
sample_filename = f"sample_{sample_id}.json"
diff --git a/scripts/sample_and_compress.py b/scripts/sample_and_compress.py
index c31496f76..419111d80 100644
--- a/scripts/sample_and_compress.py
+++ b/scripts/sample_and_compress.py
@@ -108,7 +108,7 @@ def _count_tokens_for_entry(entry: Dict) -> Tuple[Dict, int]:
if value:
try:
total += len(_TOKENIZER.encode(value))
- except:
+ except Exception:
# Fallback to character estimate
total += len(value) // 4
diff --git a/tests/test_checkpoint_resumption.py b/tests/test_checkpoint_resumption.py
index d7c88910f..095397212 100644
--- a/tests/test_checkpoint_resumption.py
+++ b/tests/test_checkpoint_resumption.py
@@ -23,7 +23,6 @@ import os
import shutil
import sys
import time
-import signal
from pathlib import Path
from typing import List, Dict, Any
import traceback
diff --git a/tests/test_delegate.py b/tests/test_delegate.py
index fea5cc9f6..811940a02 100644
--- a/tests/test_delegate.py
+++ b/tests/test_delegate.py
@@ -12,7 +12,6 @@ Run with: python -m pytest tests/test_delegate.py -v
import json
import os
import sys
-import time
import unittest
from unittest.mock import MagicMock, patch
diff --git a/tests/test_temperature_fix.py b/tests/test_temperature_fix.py
index bab2ed282..c04086520 100644
--- a/tests/test_temperature_fix.py
+++ b/tests/test_temperature_fix.py
@@ -84,7 +84,7 @@ Create a markdown summary that captures all key information in a well-organized,
max_tokens=4000
)
print(f"✅ SUCCESS")
- except:
+ except Exception:
print(f"❌ FAILED")
await asyncio.sleep(0.5)
@@ -101,7 +101,7 @@ Create a markdown summary that captures all key information in a well-organized,
max_tokens=4000
)
print(f"✅ SUCCESS")
- except:
+ except Exception:
print(f"❌ FAILED")
if __name__ == "__main__":
diff --git a/tests/test_web_tools.py b/tests/test_web_tools.py
index 3214ee283..ed48c4928 100644
--- a/tests/test_web_tools.py
+++ b/tests/test_web_tools.py
@@ -21,7 +21,7 @@ import sys
import os
import argparse
from datetime import datetime
-from typing import List, Dict, Any
+from typing import List
# Import the web tools to test (updated path after moving tools/)
from tools.web_tools import (
diff --git a/tools/browser_tool.py b/tools/browser_tool.py
index e691bedab..2814f3bb0 100644
--- a/tools/browser_tool.py
+++ b/tools/browser_tool.py
@@ -57,6 +57,7 @@ import time
import requests
from typing import Dict, Any, Optional, List
from pathlib import Path
+from hermes_constants import OPENROUTER_CHAT_URL
# Try to import httpx for async LLM calls
try:
@@ -821,7 +822,7 @@ Provide a concise summary focused on interactive elements and key content."""
try:
async with httpx.AsyncClient(timeout=30.0) as client:
response = await client.post(
- "https://openrouter.ai/api/v1/chat/completions",
+ OPENROUTER_CHAT_URL,
headers={
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
@@ -1324,7 +1325,7 @@ Focus on answering the user's specific question."""
async def analyze_screenshot():
async with httpx.AsyncClient(timeout=60.0) as client:
response = await client.post(
- "https://openrouter.ai/api/v1/chat/completions",
+ OPENROUTER_CHAT_URL,
headers={
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
@@ -1374,7 +1375,7 @@ Focus on answering the user's specific question."""
else:
# Fallback: use synchronous requests
response = requests.post(
- "https://openrouter.ai/api/v1/chat/completions",
+ OPENROUTER_CHAT_URL,
headers={
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
diff --git a/tools/code_execution_tool.py b/tools/code_execution_tool.py
index db7d23d94..ca51b2ecf 100644
--- a/tools/code_execution_tool.py
+++ b/tools/code_execution_tool.py
@@ -345,7 +345,7 @@ def execute_code(
# --- Set up temp directory with hermes_tools.py and script.py ---
tmpdir = tempfile.mkdtemp(prefix="hermes_sandbox_")
- sock_path = f"/tmp/hermes_rpc_{uuid.uuid4().hex}.sock"
+ sock_path = os.path.join(tempfile.gettempdir(), f"hermes_rpc_{uuid.uuid4().hex}.sock")
tool_call_log: list = []
tool_call_counter = [0] # mutable so the RPC thread can increment
diff --git a/tools/fuzzy_match.py b/tools/fuzzy_match.py
index 796072ff9..bc8e34403 100644
--- a/tools/fuzzy_match.py
+++ b/tools/fuzzy_match.py
@@ -446,33 +446,3 @@ def _map_normalized_positions(original: str, normalized: str,
original_matches.append((orig_start, min(orig_end, len(original))))
return original_matches
-
-
-# =============================================================================
-# Utility Functions
-# =============================================================================
-
-def find_best_match(content: str, pattern: str) -> Optional[Tuple[int, int, str]]:
- """
- Find the best match for a pattern and return the strategy name.
-
- Returns:
- Tuple of (start, end, strategy_name) or None if no match
- """
- strategies = [
- ("exact", _strategy_exact),
- ("line_trimmed", _strategy_line_trimmed),
- ("whitespace_normalized", _strategy_whitespace_normalized),
- ("indentation_flexible", _strategy_indentation_flexible),
- ("escape_normalized", _strategy_escape_normalized),
- ("trimmed_boundary", _strategy_trimmed_boundary),
- ("block_anchor", _strategy_block_anchor),
- ("context_aware", _strategy_context_aware),
- ]
-
- for strategy_name, strategy_fn in strategies:
- matches = strategy_fn(content, pattern)
- if matches:
- return (matches[0][0], matches[0][1], strategy_name)
-
- return None
diff --git a/tools/mixture_of_agents_tool.py b/tools/mixture_of_agents_tool.py
index 73703269b..37fe5f02f 100644
--- a/tools/mixture_of_agents_tool.py
+++ b/tools/mixture_of_agents_tool.py
@@ -53,8 +53,8 @@ import datetime
from pathlib import Path
from typing import Dict, Any, List, Optional
from openai import AsyncOpenAI
+from hermes_constants import OPENROUTER_BASE_URL
-# Initialize OpenRouter API client lazily (only when needed)
_openrouter_client = None
def _get_openrouter_client():
@@ -66,7 +66,7 @@ def _get_openrouter_client():
raise ValueError("OPENROUTER_API_KEY environment variable not set")
_openrouter_client = AsyncOpenAI(
api_key=api_key,
- base_url="https://openrouter.ai/api/v1"
+ base_url=OPENROUTER_BASE_URL
)
return _openrouter_client
diff --git a/tools/session_search_tool.py b/tools/session_search_tool.py
index 4ba686241..bac2f710f 100644
--- a/tools/session_search_tool.py
+++ b/tools/session_search_tool.py
@@ -23,6 +23,7 @@ import logging
from typing import Dict, Any, List, Optional
from openai import AsyncOpenAI
+from hermes_constants import OPENROUTER_BASE_URL
SUMMARIZER_MODEL = "google/gemini-3-flash-preview"
MAX_SESSION_CHARS = 100_000
@@ -40,7 +41,7 @@ def _get_client() -> AsyncOpenAI:
raise ValueError("OPENROUTER_API_KEY not set")
_summarizer_client = AsyncOpenAI(
api_key=api_key,
- base_url="https://openrouter.ai/api/v1",
+ base_url=OPENROUTER_BASE_URL,
)
return _summarizer_client
diff --git a/tools/skills_guard.py b/tools/skills_guard.py
index 485f44e78..8403855f4 100644
--- a/tools/skills_guard.py
+++ b/tools/skills_guard.py
@@ -29,6 +29,8 @@ from datetime import datetime, timezone
from pathlib import Path
from typing import List, Tuple
+from hermes_constants import OPENROUTER_BASE_URL
+
# ---------------------------------------------------------------------------
# Hardcoded trust configuration
@@ -941,7 +943,7 @@ def llm_audit_skill(skill_path: Path, static_result: ScanResult,
return static_result
client = OpenAI(
- base_url="https://openrouter.ai/api/v1",
+ base_url=OPENROUTER_BASE_URL,
api_key=api_key,
)
response = client.chat.completions.create(
@@ -1037,11 +1039,6 @@ def _get_configured_model() -> str:
return ""
-def check_guard_requirements() -> Tuple[bool, str]:
- """Check if the guard module can operate. Always returns True (no external deps)."""
- return True, "Skills Guard ready"
-
-
# ---------------------------------------------------------------------------
# Internal helpers
# ---------------------------------------------------------------------------
diff --git a/tools/terminal_tool.py b/tools/terminal_tool.py
index 6c93aff76..a28bfcc23 100644
--- a/tools/terminal_tool.py
+++ b/tools/terminal_tool.py
@@ -212,7 +212,7 @@ def _check_disk_usage_warning():
if f.is_file():
try:
total_bytes += f.stat().st_size
- except:
+ except OSError:
pass
total_gb = total_bytes / (1024 ** 3)
@@ -341,7 +341,7 @@ def _prompt_dangerous_approval(command: str, description: str, timeout_seconds:
def get_input():
try:
result["choice"] = input(" Choice [o/s/a/D]: ").strip().lower()
- except:
+ except (EOFError, OSError):
result["choice"] = ""
thread = threading.Thread(target=get_input, daemon=True)
@@ -894,7 +894,7 @@ class _SingularityEnvironment:
"""Cleanup on destruction."""
try:
self.cleanup()
- except:
+ except Exception:
pass
@@ -1022,13 +1022,13 @@ class _SSHEnvironment:
cmd = ["ssh", "-o", f"ControlPath={self.control_socket}", "-O", "exit",
f"{self.user}@{self.host}"]
subprocess.run(cmd, capture_output=True, timeout=5)
- except:
+ except (OSError, subprocess.SubprocessError):
pass
# Remove socket file
try:
self.control_socket.unlink()
- except:
+ except OSError:
pass
def stop(self):
@@ -1039,7 +1039,7 @@ class _SSHEnvironment:
"""Cleanup on destruction."""
try:
self.cleanup()
- except:
+ except Exception:
pass
@@ -1112,7 +1112,7 @@ class _DockerEnvironment:
"""Cleanup on destruction."""
try:
self.cleanup()
- except:
+ except Exception:
pass
@@ -1189,7 +1189,7 @@ class _ModalEnvironment:
"""Cleanup on destruction."""
try:
self.cleanup()
- except:
+ except Exception:
pass
@@ -1504,7 +1504,7 @@ def get_active_environments_info() -> Dict[str, Any]:
try:
size = sum(f.stat().st_size for f in Path(path).rglob('*') if f.is_file())
total_size += size
- except:
+ except OSError:
pass
info["total_disk_usage_mb"] = round(total_size / (1024 * 1024), 2)
@@ -1532,7 +1532,7 @@ def cleanup_all_environments():
try:
shutil.rmtree(path, ignore_errors=True)
print(f"[Terminal Cleanup] Removed orphaned: {path}")
- except:
+ except OSError:
pass
if not os.getenv("HERMES_QUIET") and cleaned > 0:
@@ -1669,7 +1669,6 @@ def terminal_tool(
# This prevents parallel tasks from overwriting each other's files
# In CLI mode (HERMES_QUIET), use the cwd directly without subdirectories
if env_type == "local" and not os.getenv("HERMES_QUIET"):
- import uuid
with _env_lock:
if effective_task_id not in _task_workdirs:
task_workdir = Path(cwd) / f"hermes-{effective_task_id}-{uuid.uuid4().hex[:8]}"
@@ -1944,7 +1943,7 @@ def check_terminal_requirements() -> bool:
if __name__ == "__main__":
- """Simple test when run directly."""
+ # Simple test when run directly
print("Terminal Tool Module (mini-swe-agent backend)")
print("=" * 50)
diff --git a/tools/transcription_tools.py b/tools/transcription_tools.py
index 01992cde9..6ecc8b2fb 100644
--- a/tools/transcription_tools.py
+++ b/tools/transcription_tools.py
@@ -99,8 +99,3 @@ def transcribe_audio(file_path: str, model: Optional[str] = None) -> dict:
"transcript": "",
"error": str(e),
}
-
-
-def check_stt_requirements() -> bool:
- """Check if OpenAI API key is available for speech-to-text."""
- return bool(os.getenv("HERMES_OPENAI_API_KEY") or os.getenv("OPENAI_API_KEY"))
diff --git a/tools/vision_tools.py b/tools/vision_tools.py
index 7c383f3e4..b345fcf8d 100644
--- a/tools/vision_tools.py
+++ b/tools/vision_tools.py
@@ -36,9 +36,9 @@ import base64
from pathlib import Path
from typing import Dict, Any, Optional
from openai import AsyncOpenAI
-import httpx # Use httpx for async HTTP requests
+import httpx
+from hermes_constants import OPENROUTER_BASE_URL
-# Initialize OpenRouter API client lazily (only when needed)
_openrouter_client = None
def _get_openrouter_client():
@@ -50,7 +50,7 @@ def _get_openrouter_client():
raise ValueError("OPENROUTER_API_KEY environment variable not set")
_openrouter_client = AsyncOpenAI(
api_key=api_key,
- base_url="https://openrouter.ai/api/v1"
+ base_url=OPENROUTER_BASE_URL
)
return _openrouter_client
diff --git a/tools/web_tools.py b/tools/web_tools.py
index e5fe72a9b..9ad121e1e 100644
--- a/tools/web_tools.py
+++ b/tools/web_tools.py
@@ -50,6 +50,7 @@ from pathlib import Path
from typing import List, Dict, Any, Optional
from firecrawl import Firecrawl
from openai import AsyncOpenAI
+from hermes_constants import OPENROUTER_BASE_URL
# Initialize Firecrawl client lazily (only when needed)
# This prevents import errors when FIRECRAWL_API_KEY is not set
@@ -77,7 +78,7 @@ def _get_summarizer_client():
raise ValueError("OPENROUTER_API_KEY environment variable not set")
_summarizer_client = AsyncOpenAI(
api_key=api_key,
- base_url="https://openrouter.ai/api/v1"
+ base_url=OPENROUTER_BASE_URL
)
return _summarizer_client
diff --git a/toolsets.py b/toolsets.py
index 2a2daa7e9..771ad25f5 100644
--- a/toolsets.py
+++ b/toolsets.py
@@ -24,7 +24,6 @@ Usage:
"""
from typing import List, Dict, Any, Set, Optional
-import json
# Core toolset definitions
@@ -162,7 +161,7 @@ TOOLSETS = {
"safe": {
"description": "Safe toolkit without terminal access",
"tools": ["mixture_of_agents"],
- "includes": ["web", "vision", "creative"]
+ "includes": ["web", "vision", "image_gen"]
},
# ==========================================================================
@@ -587,50 +586,31 @@ def print_toolset_tree(name: str, indent: int = 0) -> None:
if __name__ == "__main__":
- """
- Demo and testing of the toolsets system
- """
- print("🎯 Toolsets System Demo")
+ print("Toolsets System Demo")
print("=" * 60)
- # Show all available toolsets
- print("\n📦 Available Toolsets:")
+ print("\nAvailable Toolsets:")
print("-" * 40)
for name, toolset in get_all_toolsets().items():
info = get_toolset_info(name)
- composite = "📂" if info["is_composite"] else "🔧"
- print(f"{composite} {name:20} - {toolset['description']}")
- print(f" Tools: {len(info['resolved_tools'])} total")
+ composite = "[composite]" if info["is_composite"] else "[leaf]"
+ print(f" {composite} {name:20} - {toolset['description']}")
+ print(f" Tools: {len(info['resolved_tools'])} total")
-
- # Demo toolset resolution
- print("\n🔍 Toolset Resolution Examples:")
+ print("\nToolset Resolution Examples:")
print("-" * 40)
-
- examples = ["research", "development", "full_stack", "minimal", "safe"]
- for name in examples:
+ for name in ["web", "terminal", "safe", "debugging"]:
tools = resolve_toolset(name)
- print(f"\n{name}:")
- print(f" Resolved to {len(tools)} tools: {', '.join(sorted(tools))}")
+ print(f"\n {name}:")
+ print(f" Resolved to {len(tools)} tools: {', '.join(sorted(tools))}")
- # Show toolset composition tree
- print("\n🌳 Toolset Composition Tree:")
+ print("\nMultiple Toolset Resolution:")
print("-" * 40)
- print("\nExample: 'content_creation' toolset:")
- print_toolset_tree("content_creation")
+ combined = resolve_multiple_toolsets(["web", "vision", "terminal"])
+ print(f" Combining ['web', 'vision', 'terminal']:")
+ print(f" Result: {', '.join(sorted(combined))}")
- print("\nExample: 'full_stack' toolset:")
- print_toolset_tree("full_stack")
-
- # Demo multiple toolset resolution
- print("\n🔗 Multiple Toolset Resolution:")
- print("-" * 40)
- combined = resolve_multiple_toolsets(["minimal", "vision", "reasoning"])
- print(f"Combining ['minimal', 'vision', 'reasoning']:")
- print(f" Result: {', '.join(sorted(combined))}")
-
- # Demo custom toolset creation
- print("\n➕ Custom Toolset Creation:")
+ print("\nCustom Toolset Creation:")
print("-" * 40)
create_custom_toolset(
name="my_custom",
@@ -638,8 +618,7 @@ if __name__ == "__main__":
tools=["web_search"],
includes=["terminal", "vision"]
)
-
custom_info = get_toolset_info("my_custom")
- print(f"Created 'my_custom' toolset:")
- print(f" Description: {custom_info['description']}")
- print(f" Resolved tools: {', '.join(custom_info['resolved_tools'])}")
+ print(f" Created 'my_custom' toolset:")
+ print(f" Description: {custom_info['description']}")
+ print(f" Resolved tools: {', '.join(custom_info['resolved_tools'])}")
diff --git a/trajectory_compressor.py b/trajectory_compressor.py
index 9717f037d..dedae1ade 100644
--- a/trajectory_compressor.py
+++ b/trajectory_compressor.py
@@ -44,6 +44,7 @@ from datetime import datetime
import fire
from rich.progress import Progress, SpinnerColumn, TextColumn, BarColumn, TaskProgressColumn, TimeElapsedColumn, TimeRemainingColumn
from rich.console import Console
+from hermes_constants import OPENROUTER_BASE_URL
# Load environment variables
from dotenv import load_dotenv
@@ -70,7 +71,7 @@ class CompressionConfig:
# Summarization (OpenRouter)
summarization_model: str = "google/gemini-3-flash-preview"
- base_url: str = "https://openrouter.ai/api/v1"
+ base_url: str = OPENROUTER_BASE_URL
api_key_env: str = "OPENROUTER_API_KEY"
temperature: float = 0.3
max_retries: int = 3