2026-02-21 20:22:33 -08:00
|
|
|
"""Send Message Tool -- cross-channel messaging via platform APIs.
|
|
|
|
|
|
|
|
|
|
Sends a message to a user or channel on any connected messaging platform
|
2026-02-22 20:44:15 -08:00
|
|
|
(Telegram, Discord, Slack). Supports listing available targets and resolving
|
|
|
|
|
human-friendly channel names to IDs. Works in both CLI and gateway contexts.
|
2026-02-21 20:22:33 -08:00
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
import json
|
|
|
|
|
import logging
|
2026-02-22 20:44:15 -08:00
|
|
|
import os
|
2026-03-11 09:15:34 +01:00
|
|
|
import re
|
2026-03-14 06:31:52 -07:00
|
|
|
import ssl
|
feat: add Signal messenger gateway platform (#405)
Complete Signal adapter using signal-cli daemon HTTP API.
Based on PR #268 by ibhagwan, rebuilt on current main with bug fixes.
Architecture:
- SSE streaming for inbound messages with exponential backoff (2s→60s)
- JSON-RPC 2.0 for outbound (send, typing, attachments, contacts)
- Health monitor detects stale SSE connections (120s threshold)
- Phone number redaction in all logs and global redact.py
Features:
- DM and group message support with separate access policies
- DM policies: pairing (default), allowlist, open
- Group policies: disabled (default), allowlist, open
- Attachment download with magic-byte type detection
- Typing indicators (8s refresh interval)
- 100MB attachment size limit, 8000 char message limit
- E.164 phone + UUID allowlist support
Integration:
- Platform.SIGNAL enum in gateway/config.py
- Signal in _is_user_authorized() allowlist maps (gateway/run.py)
- Adapter factory in _create_adapter() (gateway/run.py)
- user_id_alt/chat_id_alt fields in SessionSource for UUIDs
- send_message tool support via httpx JSON-RPC (not aiohttp)
- Interactive setup wizard in 'hermes gateway setup'
- Connectivity testing during setup (pings /api/v1/check)
- signal-cli detection and install guidance
Bug fixes from PR #268:
- Timestamp reads from envelope_data (not outer wrapper)
- Uses httpx consistently (not aiohttp in send_message tool)
- SIGNAL_DEBUG scoped to signal logger (not root)
- extract_images regex NOT modified (preserves group numbering)
- pairing.py NOT modified (no cross-platform side effects)
- No dual authorization (adapter defers to run.py for user auth)
- Wildcard uses set membership ('*' in set, not list equality)
- .zip default for PK magic bytes (not .docx)
No new Python dependencies — uses httpx (already core).
External requirement: signal-cli daemon (user-installed).
Tests: 30 new tests covering config, init, helpers, session source,
phone redaction, authorization, and send_message integration.
Co-authored-by: ibhagwan <ibhagwan@users.noreply.github.com>
2026-03-08 20:20:35 -07:00
|
|
|
import time
|
2026-02-21 20:22:33 -08:00
|
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
2026-03-11 09:15:34 +01:00
|
|
|
_TELEGRAM_TOPIC_TARGET_RE = re.compile(r"^\s*(-?\d+)(?::(\d+))?\s*$")
|
feat(gateway): add Feishu/Lark platform support (#3817)
Adds Feishu (ByteDance's enterprise messaging platform) as a gateway
platform adapter with full feature parity: WebSocket + webhook transports,
message batching, dedup, rate limiting, rich post/card content parsing,
media handling (images/audio/files/video), group @mention gating,
reaction routing, and interactive card button support.
Cherry-picked from PR #1793 by penwyp with:
- Moved to current main (PR was 458 commits behind)
- Fixed _send_with_retry shadowing BasePlatformAdapter method (renamed to
_feishu_send_with_retry to avoid signature mismatch crash)
- Fixed import structure: aiohttp/websockets imported independently of
lark_oapi so they remain available when SDK is missing
- Fixed get_hermes_home import (hermes_constants, not hermes_cli.config)
- Added skip decorators for tests requiring lark_oapi SDK
- All 16 integration points added surgically to current main
New dependency: lark-oapi>=1.5.3,<2 (optional, pip install hermes-agent[feishu])
Fixes #1788
Co-authored-by: penwyp <penwyp@users.noreply.github.com>
2026-03-29 18:17:42 -07:00
|
|
|
_FEISHU_TARGET_RE = re.compile(r"^\s*((?:oc|ou|on|chat|open)_[-A-Za-z0-9]+)(?::([-A-Za-z0-9_]+))?\s*$")
|
2026-03-14 04:01:46 -07:00
|
|
|
_IMAGE_EXTS = {".jpg", ".jpeg", ".png", ".webp", ".gif"}
|
|
|
|
|
_VIDEO_EXTS = {".mp4", ".mov", ".avi", ".mkv", ".3gp"}
|
|
|
|
|
_AUDIO_EXTS = {".ogg", ".opus", ".mp3", ".wav", ".m4a"}
|
|
|
|
|
_VOICE_EXTS = {".ogg", ".opus"}
|
2026-03-11 11:39:07 -07:00
|
|
|
|
2026-02-21 20:22:33 -08:00
|
|
|
|
|
|
|
|
SEND_MESSAGE_SCHEMA = {
|
|
|
|
|
"name": "send_message",
|
2026-02-22 20:44:15 -08:00
|
|
|
"description": (
|
|
|
|
|
"Send a message to a connected messaging platform, or list available targets.\n\n"
|
|
|
|
|
"IMPORTANT: When the user asks to send to a specific channel or person "
|
|
|
|
|
"(not just a bare platform name), call send_message(action='list') FIRST to see "
|
|
|
|
|
"available targets, then send to the correct one.\n"
|
|
|
|
|
"If the user just says a platform name like 'send to telegram', send directly "
|
|
|
|
|
"to the home channel without listing first."
|
|
|
|
|
),
|
2026-02-21 20:22:33 -08:00
|
|
|
"parameters": {
|
|
|
|
|
"type": "object",
|
|
|
|
|
"properties": {
|
2026-02-22 20:44:15 -08:00
|
|
|
"action": {
|
|
|
|
|
"type": "string",
|
|
|
|
|
"enum": ["send", "list"],
|
|
|
|
|
"description": "Action to perform. 'send' (default) sends a message. 'list' returns all available channels/contacts across connected platforms."
|
|
|
|
|
},
|
2026-02-21 20:22:33 -08:00
|
|
|
"target": {
|
|
|
|
|
"type": "string",
|
2026-03-11 09:15:34 +01:00
|
|
|
"description": "Delivery target. Format: 'platform' (uses home channel), 'platform:#channel-name', 'platform:chat_id', or Telegram topic 'telegram:chat_id:thread_id'. Examples: 'telegram', 'telegram:-1001234567890:17585', 'discord:#bot-home', 'slack:#engineering', 'signal:+15551234567'"
|
2026-02-21 20:22:33 -08:00
|
|
|
},
|
|
|
|
|
"message": {
|
|
|
|
|
"type": "string",
|
|
|
|
|
"description": "The message text to send"
|
|
|
|
|
}
|
|
|
|
|
},
|
2026-02-22 20:44:15 -08:00
|
|
|
"required": []
|
2026-02-21 20:22:33 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def send_message_tool(args, **kw):
|
2026-02-22 20:44:15 -08:00
|
|
|
"""Handle cross-channel send_message tool calls."""
|
|
|
|
|
action = args.get("action", "send")
|
|
|
|
|
|
|
|
|
|
if action == "list":
|
|
|
|
|
return _handle_list()
|
|
|
|
|
|
|
|
|
|
return _handle_send(args)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _handle_list():
|
|
|
|
|
"""Return formatted list of available messaging targets."""
|
|
|
|
|
try:
|
|
|
|
|
from gateway.channel_directory import format_directory_for_display
|
|
|
|
|
return json.dumps({"targets": format_directory_for_display()})
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return json.dumps({"error": f"Failed to load channel directory: {e}"})
|
2026-02-21 20:22:33 -08:00
|
|
|
|
2026-02-22 20:44:15 -08:00
|
|
|
|
|
|
|
|
def _handle_send(args):
|
|
|
|
|
"""Send a message to a platform target."""
|
2026-02-21 20:22:33 -08:00
|
|
|
target = args.get("target", "")
|
|
|
|
|
message = args.get("message", "")
|
|
|
|
|
if not target or not message:
|
2026-02-22 20:44:15 -08:00
|
|
|
return json.dumps({"error": "Both 'target' and 'message' are required when action='send'"})
|
2026-02-21 20:22:33 -08:00
|
|
|
|
|
|
|
|
parts = target.split(":", 1)
|
|
|
|
|
platform_name = parts[0].strip().lower()
|
2026-03-11 09:15:34 +01:00
|
|
|
target_ref = parts[1].strip() if len(parts) > 1 else None
|
|
|
|
|
chat_id = None
|
|
|
|
|
thread_id = None
|
|
|
|
|
|
|
|
|
|
if target_ref:
|
|
|
|
|
chat_id, thread_id, is_explicit = _parse_target_ref(platform_name, target_ref)
|
|
|
|
|
else:
|
|
|
|
|
is_explicit = False
|
2026-02-21 20:22:33 -08:00
|
|
|
|
2026-02-22 20:44:15 -08:00
|
|
|
# Resolve human-friendly channel names to numeric IDs
|
2026-03-11 09:15:34 +01:00
|
|
|
if target_ref and not is_explicit:
|
2026-02-22 20:44:15 -08:00
|
|
|
try:
|
|
|
|
|
from gateway.channel_directory import resolve_channel_name
|
2026-03-11 09:15:34 +01:00
|
|
|
resolved = resolve_channel_name(platform_name, target_ref)
|
2026-02-22 20:44:15 -08:00
|
|
|
if resolved:
|
2026-03-11 09:15:34 +01:00
|
|
|
chat_id, thread_id, _ = _parse_target_ref(platform_name, resolved)
|
2026-02-22 20:44:15 -08:00
|
|
|
else:
|
|
|
|
|
return json.dumps({
|
2026-03-11 09:15:34 +01:00
|
|
|
"error": f"Could not resolve '{target_ref}' on {platform_name}. "
|
2026-02-22 20:44:15 -08:00
|
|
|
f"Use send_message(action='list') to see available targets."
|
|
|
|
|
})
|
|
|
|
|
except Exception:
|
|
|
|
|
return json.dumps({
|
2026-03-11 09:15:34 +01:00
|
|
|
"error": f"Could not resolve '{target_ref}' on {platform_name}. "
|
2026-02-22 20:44:15 -08:00
|
|
|
f"Try using a numeric channel ID instead."
|
|
|
|
|
})
|
|
|
|
|
|
2026-02-23 02:11:33 -08:00
|
|
|
from tools.interrupt import is_interrupted
|
|
|
|
|
if is_interrupted():
|
|
|
|
|
return json.dumps({"error": "Interrupted"})
|
|
|
|
|
|
2026-02-21 20:22:33 -08:00
|
|
|
try:
|
|
|
|
|
from gateway.config import load_gateway_config, Platform
|
|
|
|
|
config = load_gateway_config()
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return json.dumps({"error": f"Failed to load gateway config: {e}"})
|
|
|
|
|
|
|
|
|
|
platform_map = {
|
|
|
|
|
"telegram": Platform.TELEGRAM,
|
|
|
|
|
"discord": Platform.DISCORD,
|
|
|
|
|
"slack": Platform.SLACK,
|
|
|
|
|
"whatsapp": Platform.WHATSAPP,
|
feat: add Signal messenger gateway platform (#405)
Complete Signal adapter using signal-cli daemon HTTP API.
Based on PR #268 by ibhagwan, rebuilt on current main with bug fixes.
Architecture:
- SSE streaming for inbound messages with exponential backoff (2s→60s)
- JSON-RPC 2.0 for outbound (send, typing, attachments, contacts)
- Health monitor detects stale SSE connections (120s threshold)
- Phone number redaction in all logs and global redact.py
Features:
- DM and group message support with separate access policies
- DM policies: pairing (default), allowlist, open
- Group policies: disabled (default), allowlist, open
- Attachment download with magic-byte type detection
- Typing indicators (8s refresh interval)
- 100MB attachment size limit, 8000 char message limit
- E.164 phone + UUID allowlist support
Integration:
- Platform.SIGNAL enum in gateway/config.py
- Signal in _is_user_authorized() allowlist maps (gateway/run.py)
- Adapter factory in _create_adapter() (gateway/run.py)
- user_id_alt/chat_id_alt fields in SessionSource for UUIDs
- send_message tool support via httpx JSON-RPC (not aiohttp)
- Interactive setup wizard in 'hermes gateway setup'
- Connectivity testing during setup (pings /api/v1/check)
- signal-cli detection and install guidance
Bug fixes from PR #268:
- Timestamp reads from envelope_data (not outer wrapper)
- Uses httpx consistently (not aiohttp in send_message tool)
- SIGNAL_DEBUG scoped to signal logger (not root)
- extract_images regex NOT modified (preserves group numbering)
- pairing.py NOT modified (no cross-platform side effects)
- No dual authorization (adapter defers to run.py for user auth)
- Wildcard uses set membership ('*' in set, not list equality)
- .zip default for PK magic bytes (not .docx)
No new Python dependencies — uses httpx (already core).
External requirement: signal-cli daemon (user-installed).
Tests: 30 new tests covering config, init, helpers, session source,
phone redaction, authorization, and send_message integration.
Co-authored-by: ibhagwan <ibhagwan@users.noreply.github.com>
2026-03-08 20:20:35 -07:00
|
|
|
"signal": Platform.SIGNAL,
|
2026-03-20 08:52:21 -07:00
|
|
|
"matrix": Platform.MATRIX,
|
|
|
|
|
"mattermost": Platform.MATTERMOST,
|
|
|
|
|
"homeassistant": Platform.HOMEASSISTANT,
|
|
|
|
|
"dingtalk": Platform.DINGTALK,
|
feat(gateway): add Feishu/Lark platform support (#3817)
Adds Feishu (ByteDance's enterprise messaging platform) as a gateway
platform adapter with full feature parity: WebSocket + webhook transports,
message batching, dedup, rate limiting, rich post/card content parsing,
media handling (images/audio/files/video), group @mention gating,
reaction routing, and interactive card button support.
Cherry-picked from PR #1793 by penwyp with:
- Moved to current main (PR was 458 commits behind)
- Fixed _send_with_retry shadowing BasePlatformAdapter method (renamed to
_feishu_send_with_retry to avoid signature mismatch crash)
- Fixed import structure: aiohttp/websockets imported independently of
lark_oapi so they remain available when SDK is missing
- Fixed get_hermes_home import (hermes_constants, not hermes_cli.config)
- Added skip decorators for tests requiring lark_oapi SDK
- All 16 integration points added surgically to current main
New dependency: lark-oapi>=1.5.3,<2 (optional, pip install hermes-agent[feishu])
Fixes #1788
Co-authored-by: penwyp <penwyp@users.noreply.github.com>
2026-03-29 18:17:42 -07:00
|
|
|
"feishu": Platform.FEISHU,
|
2026-03-29 21:29:13 -07:00
|
|
|
"wecom": Platform.WECOM,
|
feat: add email gateway platform (IMAP/SMTP)
Allow users to interact with Hermes by sending and receiving emails.
Uses IMAP polling for incoming messages and SMTP for replies with
proper threading (In-Reply-To, References headers).
Integrates with all 14 gateway extension points: config, adapter
factory, authorization, send_message tool, cron delivery, toolsets,
prompt hints, channel directory, setup wizard, status display, and
env example.
65 tests covering config, parsing, dispatch, threading, IMAP fetch,
SMTP send, attachments, and all integration points.
2026-03-10 03:15:38 +03:00
|
|
|
"email": Platform.EMAIL,
|
feat: add SMS (Twilio) platform adapter
Add SMS as a first-class messaging platform via the Twilio API.
Shares credentials with the existing telephony skill — same
TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN, TWILIO_PHONE_NUMBER env vars.
Adapter (gateway/platforms/sms.py):
- aiohttp webhook server for inbound (Twilio form-encoded POSTs)
- Twilio REST API with Basic auth for outbound
- Markdown stripping, smart chunking at 1600 chars
- Echo loop prevention, phone number redaction in logs
Integration (13 files):
- gateway config, run, channel_directory
- agent prompt_builder (SMS platform hint)
- cron scheduler, cronjob tools
- send_message_tool (_send_sms via Twilio API)
- toolsets (hermes-sms + hermes-gateway)
- gateway setup wizard, status display
- pyproject.toml (sms optional extra)
- 21 tests
Docs:
- website/docs/user-guide/messaging/sms.md (full setup guide)
- Updated messaging index (architecture, toolsets, security, links)
- Updated environment-variables.md reference
Inspired by PR #1575 (@sunsakis), rewritten for Twilio.
2026-03-17 03:14:53 -07:00
|
|
|
"sms": Platform.SMS,
|
2026-02-21 20:22:33 -08:00
|
|
|
}
|
|
|
|
|
platform = platform_map.get(platform_name)
|
|
|
|
|
if not platform:
|
|
|
|
|
avail = ", ".join(platform_map.keys())
|
|
|
|
|
return json.dumps({"error": f"Unknown platform: {platform_name}. Available: {avail}"})
|
|
|
|
|
|
|
|
|
|
pconfig = config.platforms.get(platform)
|
|
|
|
|
if not pconfig or not pconfig.enabled:
|
fix: Telegram streaming — config bridge, not-modified, flood control (#1782)
* fix: NameError in OpenCode provider setup (prompt_text -> prompt)
The OpenCode Zen and OpenCode Go setup sections used prompt_text()
which is undefined. All other providers correctly use the local
prompt() function defined in setup.py. Fixes crash during
'hermes setup' when selecting either OpenCode provider.
* fix: Telegram streaming — config bridge, not-modified, flood control
Three fixes for gateway streaming:
1. Bridge streaming config from config.yaml into gateway runtime.
load_gateway_config() now reads the 'streaming' key from config.yaml
(same pattern as session_reset, stt, etc.), matching the docs.
Previously only gateway.json was read.
2. Handle 'Message is not modified' in Telegram edit_message().
This Telegram API error fires when editing with identical content —
a no-op, not a real failure. Previously it returned success=False
which made the stream consumer disable streaming entirely.
3. Handle RetryAfter / flood control in Telegram edit_message().
Fast providers can hit Telegram rate limits during streaming.
Now waits the requested retry_after duration and retries once,
instead of treating it as a fatal edit failure.
Also fixed double-edit on stream finish: the consumer now tracks
last-sent text and skips redundant edits, preventing the not-modified
error at the source.
* refactor: make config.yaml the primary gateway config source
Eliminates the per-key bridge pattern in load_gateway_config().
Previously gateway.json was the primary source and each config.yaml
key needed an individual bridge — easy to forget (streaming was
missing, causing garl4546's bug).
Now config.yaml is read first and its keys are mapped directly into
the GatewayConfig.from_dict() schema. gateway.json is kept as a
legacy fallback layer (loaded first, then overwritten by config.yaml
keys). If gateway.json exists, a log message suggests migrating.
Also:
- Removed dead save_gateway_config() (never called anywhere)
- Updated CLI help text and send_message error to reference
config.yaml instead of gateway.json
---------
Co-authored-by: Test <test@test.com>
2026-03-17 10:51:54 -07:00
|
|
|
return json.dumps({"error": f"Platform '{platform_name}' is not configured. Set up credentials in ~/.hermes/config.yaml or environment variables."})
|
2026-02-21 20:22:33 -08:00
|
|
|
|
2026-03-14 04:01:46 -07:00
|
|
|
from gateway.platforms.base import BasePlatformAdapter
|
|
|
|
|
|
|
|
|
|
media_files, cleaned_message = BasePlatformAdapter.extract_media(message)
|
|
|
|
|
mirror_text = cleaned_message.strip() or _describe_media_for_mirror(media_files)
|
|
|
|
|
|
2026-02-22 17:28:52 -08:00
|
|
|
used_home_channel = False
|
2026-02-21 20:22:33 -08:00
|
|
|
if not chat_id:
|
|
|
|
|
home = config.get_home_channel(platform)
|
|
|
|
|
if home:
|
|
|
|
|
chat_id = home.chat_id
|
2026-02-22 17:28:52 -08:00
|
|
|
used_home_channel = True
|
2026-02-21 20:22:33 -08:00
|
|
|
else:
|
2026-02-22 20:44:15 -08:00
|
|
|
return json.dumps({
|
|
|
|
|
"error": f"No home channel set for {platform_name} to determine where to send the message. "
|
|
|
|
|
f"Either specify a channel directly with '{platform_name}:CHANNEL_NAME', "
|
|
|
|
|
f"or set a home channel via: hermes config set {platform_name.upper()}_HOME_CHANNEL <channel_id>"
|
|
|
|
|
})
|
2026-02-21 20:22:33 -08:00
|
|
|
|
2026-03-14 19:07:50 -07:00
|
|
|
duplicate_skip = _maybe_skip_cron_duplicate_send(platform_name, chat_id, thread_id)
|
|
|
|
|
if duplicate_skip:
|
|
|
|
|
return json.dumps(duplicate_skip)
|
|
|
|
|
|
2026-02-21 20:22:33 -08:00
|
|
|
try:
|
|
|
|
|
from model_tools import _run_async
|
2026-03-14 04:01:46 -07:00
|
|
|
result = _run_async(
|
|
|
|
|
_send_to_platform(
|
|
|
|
|
platform,
|
|
|
|
|
pconfig,
|
|
|
|
|
chat_id,
|
|
|
|
|
cleaned_message,
|
|
|
|
|
thread_id=thread_id,
|
|
|
|
|
media_files=media_files,
|
|
|
|
|
)
|
|
|
|
|
)
|
2026-02-22 17:28:52 -08:00
|
|
|
if used_home_channel and isinstance(result, dict) and result.get("success"):
|
|
|
|
|
result["note"] = f"Sent to {platform_name} home channel (chat_id: {chat_id})"
|
2026-02-22 20:44:15 -08:00
|
|
|
|
|
|
|
|
# Mirror the sent message into the target's gateway session
|
2026-03-14 04:01:46 -07:00
|
|
|
if isinstance(result, dict) and result.get("success") and mirror_text:
|
2026-02-22 20:44:15 -08:00
|
|
|
try:
|
|
|
|
|
from gateway.mirror import mirror_to_session
|
|
|
|
|
source_label = os.getenv("HERMES_SESSION_PLATFORM", "cli")
|
2026-03-14 04:01:46 -07:00
|
|
|
if mirror_to_session(platform_name, chat_id, mirror_text, source_label=source_label, thread_id=thread_id):
|
2026-02-22 20:44:15 -08:00
|
|
|
result["mirrored"] = True
|
|
|
|
|
except Exception:
|
|
|
|
|
pass
|
|
|
|
|
|
2026-02-21 20:22:33 -08:00
|
|
|
return json.dumps(result)
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return json.dumps({"error": f"Send failed: {e}"})
|
|
|
|
|
|
|
|
|
|
|
2026-03-11 09:15:34 +01:00
|
|
|
def _parse_target_ref(platform_name: str, target_ref: str):
|
|
|
|
|
"""Parse a tool target into chat_id/thread_id and whether it is explicit."""
|
|
|
|
|
if platform_name == "telegram":
|
|
|
|
|
match = _TELEGRAM_TOPIC_TARGET_RE.fullmatch(target_ref)
|
|
|
|
|
if match:
|
|
|
|
|
return match.group(1), match.group(2), True
|
feat(gateway): add Feishu/Lark platform support (#3817)
Adds Feishu (ByteDance's enterprise messaging platform) as a gateway
platform adapter with full feature parity: WebSocket + webhook transports,
message batching, dedup, rate limiting, rich post/card content parsing,
media handling (images/audio/files/video), group @mention gating,
reaction routing, and interactive card button support.
Cherry-picked from PR #1793 by penwyp with:
- Moved to current main (PR was 458 commits behind)
- Fixed _send_with_retry shadowing BasePlatformAdapter method (renamed to
_feishu_send_with_retry to avoid signature mismatch crash)
- Fixed import structure: aiohttp/websockets imported independently of
lark_oapi so they remain available when SDK is missing
- Fixed get_hermes_home import (hermes_constants, not hermes_cli.config)
- Added skip decorators for tests requiring lark_oapi SDK
- All 16 integration points added surgically to current main
New dependency: lark-oapi>=1.5.3,<2 (optional, pip install hermes-agent[feishu])
Fixes #1788
Co-authored-by: penwyp <penwyp@users.noreply.github.com>
2026-03-29 18:17:42 -07:00
|
|
|
if platform_name == "feishu":
|
|
|
|
|
match = _FEISHU_TARGET_RE.fullmatch(target_ref)
|
|
|
|
|
if match:
|
|
|
|
|
return match.group(1), match.group(2), True
|
2026-03-11 09:15:34 +01:00
|
|
|
if target_ref.lstrip("-").isdigit():
|
|
|
|
|
return target_ref, None, True
|
|
|
|
|
return None, None, False
|
|
|
|
|
|
|
|
|
|
|
2026-03-14 04:01:46 -07:00
|
|
|
def _describe_media_for_mirror(media_files):
|
|
|
|
|
"""Return a human-readable mirror summary when a message only contains media."""
|
|
|
|
|
if not media_files:
|
|
|
|
|
return ""
|
|
|
|
|
if len(media_files) == 1:
|
|
|
|
|
media_path, is_voice = media_files[0]
|
|
|
|
|
ext = os.path.splitext(media_path)[1].lower()
|
|
|
|
|
if is_voice and ext in _VOICE_EXTS:
|
|
|
|
|
return "[Sent voice message]"
|
|
|
|
|
if ext in _IMAGE_EXTS:
|
|
|
|
|
return "[Sent image attachment]"
|
|
|
|
|
if ext in _VIDEO_EXTS:
|
|
|
|
|
return "[Sent video attachment]"
|
|
|
|
|
if ext in _AUDIO_EXTS:
|
|
|
|
|
return "[Sent audio attachment]"
|
|
|
|
|
return "[Sent document attachment]"
|
|
|
|
|
return f"[Sent {len(media_files)} media attachments]"
|
|
|
|
|
|
|
|
|
|
|
2026-03-14 19:07:50 -07:00
|
|
|
def _get_cron_auto_delivery_target():
|
|
|
|
|
"""Return the cron scheduler's auto-delivery target for the current run, if any."""
|
|
|
|
|
platform = os.getenv("HERMES_CRON_AUTO_DELIVER_PLATFORM", "").strip().lower()
|
|
|
|
|
chat_id = os.getenv("HERMES_CRON_AUTO_DELIVER_CHAT_ID", "").strip()
|
|
|
|
|
if not platform or not chat_id:
|
|
|
|
|
return None
|
|
|
|
|
thread_id = os.getenv("HERMES_CRON_AUTO_DELIVER_THREAD_ID", "").strip() or None
|
|
|
|
|
return {
|
|
|
|
|
"platform": platform,
|
|
|
|
|
"chat_id": chat_id,
|
|
|
|
|
"thread_id": thread_id,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _maybe_skip_cron_duplicate_send(platform_name: str, chat_id: str, thread_id: str | None):
|
|
|
|
|
"""Skip redundant cron send_message calls when the scheduler will auto-deliver there."""
|
|
|
|
|
auto_target = _get_cron_auto_delivery_target()
|
|
|
|
|
if not auto_target:
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
same_target = (
|
|
|
|
|
auto_target["platform"] == platform_name
|
|
|
|
|
and str(auto_target["chat_id"]) == str(chat_id)
|
|
|
|
|
and auto_target.get("thread_id") == thread_id
|
|
|
|
|
)
|
|
|
|
|
if not same_target:
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
target_label = f"{platform_name}:{chat_id}"
|
|
|
|
|
if thread_id is not None:
|
|
|
|
|
target_label += f":{thread_id}"
|
|
|
|
|
|
|
|
|
|
return {
|
|
|
|
|
"success": True,
|
|
|
|
|
"skipped": True,
|
|
|
|
|
"reason": "cron_auto_delivery_duplicate_target",
|
|
|
|
|
"target": target_label,
|
|
|
|
|
"note": (
|
|
|
|
|
f"Skipped send_message to {target_label}. This cron job will already auto-deliver "
|
|
|
|
|
"its final response to that same target. Put the intended user-facing content in "
|
|
|
|
|
"your final response instead, or use a different target if you want an additional message."
|
|
|
|
|
),
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2026-03-14 04:01:46 -07:00
|
|
|
async def _send_to_platform(platform, pconfig, chat_id, message, thread_id=None, media_files=None):
|
fix(tools): chunk long messages in send_message_tool before dispatch (#1552)
* fix: prevent infinite 400 failure loop on context overflow (#1630)
When a gateway session exceeds the model's context window, Anthropic may
return a generic 400 invalid_request_error with just 'Error' as the
message. This bypassed the phrase-based context-length detection,
causing the agent to treat it as a non-retryable client error. Worse,
the failed user message was still persisted to the transcript, making
the session even larger on each attempt — creating an infinite loop.
Three-layer fix:
1. run_agent.py — Fallback heuristic: when a 400 error has a very short
generic message AND the session is large (>40% of context or >80
messages), treat it as a probable context overflow and trigger
compression instead of aborting.
2. run_agent.py + gateway/run.py — Don't persist failed messages:
when the agent returns failed=True before generating any response,
skip writing the user's message to the transcript/DB. This prevents
the session from growing on each failure.
3. gateway/run.py — Smarter error messages: detect context-overflow
failures and suggest /compact or /reset specifically, instead of a
generic 'try again' that will fail identically.
* fix(skills): detect prompt injection patterns and block cache file reads
Adds two security layers to prevent prompt injection via skills hub
cache files (#1558):
1. read_file: blocks direct reads of ~/.hermes/skills/.hub/ directory
(index-cache, catalog files). The 3.5MB clawhub_catalog_v1.json
was the original injection vector — untrusted skill descriptions
in the catalog contained adversarial text that the model executed.
2. skill_view: warns when skills are loaded from outside the trusted
~/.hermes/skills/ directory, and detects common injection patterns
in skill content ("ignore previous instructions", "<system>", etc.).
Cherry-picked from PR #1562 by ygd58.
* fix(tools): chunk long messages in send_message_tool before dispatch (#1552)
Long messages sent via send_message tool or cron delivery silently
failed when exceeding platform limits. Gateway adapters handle this
via truncate_message(), but the standalone senders in send_message_tool
bypassed that entirely.
- Apply truncate_message() chunking in _send_to_platform() before
dispatching to individual platform senders
- Remove naive message[i:i+2000] character split in _send_discord()
in favor of centralized smart splitting
- Attach media files to last chunk only for Telegram
- Add regression tests for chunking and media placement
Cherry-picked from PR #1557 by llbn.
---------
Co-authored-by: buray <ygd58@users.noreply.github.com>
Co-authored-by: lbn <llbn@users.noreply.github.com>
2026-03-17 01:52:43 -07:00
|
|
|
"""Route a message to the appropriate platform sender.
|
|
|
|
|
|
|
|
|
|
Long messages are automatically chunked to fit within platform limits
|
|
|
|
|
using the same smart-splitting algorithm as the gateway adapters
|
|
|
|
|
(preserves code-block boundaries, adds part indicators).
|
|
|
|
|
"""
|
2026-02-21 20:22:33 -08:00
|
|
|
from gateway.config import Platform
|
fix(tools): chunk long messages in send_message_tool before dispatch (#1552)
* fix: prevent infinite 400 failure loop on context overflow (#1630)
When a gateway session exceeds the model's context window, Anthropic may
return a generic 400 invalid_request_error with just 'Error' as the
message. This bypassed the phrase-based context-length detection,
causing the agent to treat it as a non-retryable client error. Worse,
the failed user message was still persisted to the transcript, making
the session even larger on each attempt — creating an infinite loop.
Three-layer fix:
1. run_agent.py — Fallback heuristic: when a 400 error has a very short
generic message AND the session is large (>40% of context or >80
messages), treat it as a probable context overflow and trigger
compression instead of aborting.
2. run_agent.py + gateway/run.py — Don't persist failed messages:
when the agent returns failed=True before generating any response,
skip writing the user's message to the transcript/DB. This prevents
the session from growing on each failure.
3. gateway/run.py — Smarter error messages: detect context-overflow
failures and suggest /compact or /reset specifically, instead of a
generic 'try again' that will fail identically.
* fix(skills): detect prompt injection patterns and block cache file reads
Adds two security layers to prevent prompt injection via skills hub
cache files (#1558):
1. read_file: blocks direct reads of ~/.hermes/skills/.hub/ directory
(index-cache, catalog files). The 3.5MB clawhub_catalog_v1.json
was the original injection vector — untrusted skill descriptions
in the catalog contained adversarial text that the model executed.
2. skill_view: warns when skills are loaded from outside the trusted
~/.hermes/skills/ directory, and detects common injection patterns
in skill content ("ignore previous instructions", "<system>", etc.).
Cherry-picked from PR #1562 by ygd58.
* fix(tools): chunk long messages in send_message_tool before dispatch (#1552)
Long messages sent via send_message tool or cron delivery silently
failed when exceeding platform limits. Gateway adapters handle this
via truncate_message(), but the standalone senders in send_message_tool
bypassed that entirely.
- Apply truncate_message() chunking in _send_to_platform() before
dispatching to individual platform senders
- Remove naive message[i:i+2000] character split in _send_discord()
in favor of centralized smart splitting
- Attach media files to last chunk only for Telegram
- Add regression tests for chunking and media placement
Cherry-picked from PR #1557 by llbn.
---------
Co-authored-by: buray <ygd58@users.noreply.github.com>
Co-authored-by: lbn <llbn@users.noreply.github.com>
2026-03-17 01:52:43 -07:00
|
|
|
from gateway.platforms.base import BasePlatformAdapter
|
|
|
|
|
from gateway.platforms.telegram import TelegramAdapter
|
|
|
|
|
from gateway.platforms.discord import DiscordAdapter
|
|
|
|
|
from gateway.platforms.slack import SlackAdapter
|
2026-03-14 04:01:46 -07:00
|
|
|
|
feat(gateway): add Feishu/Lark platform support (#3817)
Adds Feishu (ByteDance's enterprise messaging platform) as a gateway
platform adapter with full feature parity: WebSocket + webhook transports,
message batching, dedup, rate limiting, rich post/card content parsing,
media handling (images/audio/files/video), group @mention gating,
reaction routing, and interactive card button support.
Cherry-picked from PR #1793 by penwyp with:
- Moved to current main (PR was 458 commits behind)
- Fixed _send_with_retry shadowing BasePlatformAdapter method (renamed to
_feishu_send_with_retry to avoid signature mismatch crash)
- Fixed import structure: aiohttp/websockets imported independently of
lark_oapi so they remain available when SDK is missing
- Fixed get_hermes_home import (hermes_constants, not hermes_cli.config)
- Added skip decorators for tests requiring lark_oapi SDK
- All 16 integration points added surgically to current main
New dependency: lark-oapi>=1.5.3,<2 (optional, pip install hermes-agent[feishu])
Fixes #1788
Co-authored-by: penwyp <penwyp@users.noreply.github.com>
2026-03-29 18:17:42 -07:00
|
|
|
# Feishu adapter import is optional (requires lark-oapi)
|
|
|
|
|
try:
|
|
|
|
|
from gateway.platforms.feishu import FeishuAdapter
|
|
|
|
|
_feishu_available = True
|
|
|
|
|
except ImportError:
|
|
|
|
|
_feishu_available = False
|
|
|
|
|
|
2026-03-14 04:01:46 -07:00
|
|
|
media_files = media_files or []
|
fix(tools): chunk long messages in send_message_tool before dispatch (#1552)
* fix: prevent infinite 400 failure loop on context overflow (#1630)
When a gateway session exceeds the model's context window, Anthropic may
return a generic 400 invalid_request_error with just 'Error' as the
message. This bypassed the phrase-based context-length detection,
causing the agent to treat it as a non-retryable client error. Worse,
the failed user message was still persisted to the transcript, making
the session even larger on each attempt — creating an infinite loop.
Three-layer fix:
1. run_agent.py — Fallback heuristic: when a 400 error has a very short
generic message AND the session is large (>40% of context or >80
messages), treat it as a probable context overflow and trigger
compression instead of aborting.
2. run_agent.py + gateway/run.py — Don't persist failed messages:
when the agent returns failed=True before generating any response,
skip writing the user's message to the transcript/DB. This prevents
the session from growing on each failure.
3. gateway/run.py — Smarter error messages: detect context-overflow
failures and suggest /compact or /reset specifically, instead of a
generic 'try again' that will fail identically.
* fix(skills): detect prompt injection patterns and block cache file reads
Adds two security layers to prevent prompt injection via skills hub
cache files (#1558):
1. read_file: blocks direct reads of ~/.hermes/skills/.hub/ directory
(index-cache, catalog files). The 3.5MB clawhub_catalog_v1.json
was the original injection vector — untrusted skill descriptions
in the catalog contained adversarial text that the model executed.
2. skill_view: warns when skills are loaded from outside the trusted
~/.hermes/skills/ directory, and detects common injection patterns
in skill content ("ignore previous instructions", "<system>", etc.).
Cherry-picked from PR #1562 by ygd58.
* fix(tools): chunk long messages in send_message_tool before dispatch (#1552)
Long messages sent via send_message tool or cron delivery silently
failed when exceeding platform limits. Gateway adapters handle this
via truncate_message(), but the standalone senders in send_message_tool
bypassed that entirely.
- Apply truncate_message() chunking in _send_to_platform() before
dispatching to individual platform senders
- Remove naive message[i:i+2000] character split in _send_discord()
in favor of centralized smart splitting
- Attach media files to last chunk only for Telegram
- Add regression tests for chunking and media placement
Cherry-picked from PR #1557 by llbn.
---------
Co-authored-by: buray <ygd58@users.noreply.github.com>
Co-authored-by: lbn <llbn@users.noreply.github.com>
2026-03-17 01:52:43 -07:00
|
|
|
|
|
|
|
|
# Platform message length limits (from adapter class attributes)
|
|
|
|
|
_MAX_LENGTHS = {
|
|
|
|
|
Platform.TELEGRAM: TelegramAdapter.MAX_MESSAGE_LENGTH,
|
|
|
|
|
Platform.DISCORD: DiscordAdapter.MAX_MESSAGE_LENGTH,
|
|
|
|
|
Platform.SLACK: SlackAdapter.MAX_MESSAGE_LENGTH,
|
|
|
|
|
}
|
feat(gateway): add Feishu/Lark platform support (#3817)
Adds Feishu (ByteDance's enterprise messaging platform) as a gateway
platform adapter with full feature parity: WebSocket + webhook transports,
message batching, dedup, rate limiting, rich post/card content parsing,
media handling (images/audio/files/video), group @mention gating,
reaction routing, and interactive card button support.
Cherry-picked from PR #1793 by penwyp with:
- Moved to current main (PR was 458 commits behind)
- Fixed _send_with_retry shadowing BasePlatformAdapter method (renamed to
_feishu_send_with_retry to avoid signature mismatch crash)
- Fixed import structure: aiohttp/websockets imported independently of
lark_oapi so they remain available when SDK is missing
- Fixed get_hermes_home import (hermes_constants, not hermes_cli.config)
- Added skip decorators for tests requiring lark_oapi SDK
- All 16 integration points added surgically to current main
New dependency: lark-oapi>=1.5.3,<2 (optional, pip install hermes-agent[feishu])
Fixes #1788
Co-authored-by: penwyp <penwyp@users.noreply.github.com>
2026-03-29 18:17:42 -07:00
|
|
|
if _feishu_available:
|
|
|
|
|
_MAX_LENGTHS[Platform.FEISHU] = FeishuAdapter.MAX_MESSAGE_LENGTH
|
fix(tools): chunk long messages in send_message_tool before dispatch (#1552)
* fix: prevent infinite 400 failure loop on context overflow (#1630)
When a gateway session exceeds the model's context window, Anthropic may
return a generic 400 invalid_request_error with just 'Error' as the
message. This bypassed the phrase-based context-length detection,
causing the agent to treat it as a non-retryable client error. Worse,
the failed user message was still persisted to the transcript, making
the session even larger on each attempt — creating an infinite loop.
Three-layer fix:
1. run_agent.py — Fallback heuristic: when a 400 error has a very short
generic message AND the session is large (>40% of context or >80
messages), treat it as a probable context overflow and trigger
compression instead of aborting.
2. run_agent.py + gateway/run.py — Don't persist failed messages:
when the agent returns failed=True before generating any response,
skip writing the user's message to the transcript/DB. This prevents
the session from growing on each failure.
3. gateway/run.py — Smarter error messages: detect context-overflow
failures and suggest /compact or /reset specifically, instead of a
generic 'try again' that will fail identically.
* fix(skills): detect prompt injection patterns and block cache file reads
Adds two security layers to prevent prompt injection via skills hub
cache files (#1558):
1. read_file: blocks direct reads of ~/.hermes/skills/.hub/ directory
(index-cache, catalog files). The 3.5MB clawhub_catalog_v1.json
was the original injection vector — untrusted skill descriptions
in the catalog contained adversarial text that the model executed.
2. skill_view: warns when skills are loaded from outside the trusted
~/.hermes/skills/ directory, and detects common injection patterns
in skill content ("ignore previous instructions", "<system>", etc.).
Cherry-picked from PR #1562 by ygd58.
* fix(tools): chunk long messages in send_message_tool before dispatch (#1552)
Long messages sent via send_message tool or cron delivery silently
failed when exceeding platform limits. Gateway adapters handle this
via truncate_message(), but the standalone senders in send_message_tool
bypassed that entirely.
- Apply truncate_message() chunking in _send_to_platform() before
dispatching to individual platform senders
- Remove naive message[i:i+2000] character split in _send_discord()
in favor of centralized smart splitting
- Attach media files to last chunk only for Telegram
- Add regression tests for chunking and media placement
Cherry-picked from PR #1557 by llbn.
---------
Co-authored-by: buray <ygd58@users.noreply.github.com>
Co-authored-by: lbn <llbn@users.noreply.github.com>
2026-03-17 01:52:43 -07:00
|
|
|
|
|
|
|
|
# Smart-chunk the message to fit within platform limits.
|
|
|
|
|
# For short messages or platforms without a known limit this is a no-op.
|
|
|
|
|
max_len = _MAX_LENGTHS.get(platform)
|
|
|
|
|
if max_len:
|
|
|
|
|
chunks = BasePlatformAdapter.truncate_message(message, max_len)
|
|
|
|
|
else:
|
|
|
|
|
chunks = [message]
|
|
|
|
|
|
|
|
|
|
# --- Telegram: special handling for media attachments ---
|
2026-02-21 20:22:33 -08:00
|
|
|
if platform == Platform.TELEGRAM:
|
fix(tools): chunk long messages in send_message_tool before dispatch (#1552)
* fix: prevent infinite 400 failure loop on context overflow (#1630)
When a gateway session exceeds the model's context window, Anthropic may
return a generic 400 invalid_request_error with just 'Error' as the
message. This bypassed the phrase-based context-length detection,
causing the agent to treat it as a non-retryable client error. Worse,
the failed user message was still persisted to the transcript, making
the session even larger on each attempt — creating an infinite loop.
Three-layer fix:
1. run_agent.py — Fallback heuristic: when a 400 error has a very short
generic message AND the session is large (>40% of context or >80
messages), treat it as a probable context overflow and trigger
compression instead of aborting.
2. run_agent.py + gateway/run.py — Don't persist failed messages:
when the agent returns failed=True before generating any response,
skip writing the user's message to the transcript/DB. This prevents
the session from growing on each failure.
3. gateway/run.py — Smarter error messages: detect context-overflow
failures and suggest /compact or /reset specifically, instead of a
generic 'try again' that will fail identically.
* fix(skills): detect prompt injection patterns and block cache file reads
Adds two security layers to prevent prompt injection via skills hub
cache files (#1558):
1. read_file: blocks direct reads of ~/.hermes/skills/.hub/ directory
(index-cache, catalog files). The 3.5MB clawhub_catalog_v1.json
was the original injection vector — untrusted skill descriptions
in the catalog contained adversarial text that the model executed.
2. skill_view: warns when skills are loaded from outside the trusted
~/.hermes/skills/ directory, and detects common injection patterns
in skill content ("ignore previous instructions", "<system>", etc.).
Cherry-picked from PR #1562 by ygd58.
* fix(tools): chunk long messages in send_message_tool before dispatch (#1552)
Long messages sent via send_message tool or cron delivery silently
failed when exceeding platform limits. Gateway adapters handle this
via truncate_message(), but the standalone senders in send_message_tool
bypassed that entirely.
- Apply truncate_message() chunking in _send_to_platform() before
dispatching to individual platform senders
- Remove naive message[i:i+2000] character split in _send_discord()
in favor of centralized smart splitting
- Attach media files to last chunk only for Telegram
- Add regression tests for chunking and media placement
Cherry-picked from PR #1557 by llbn.
---------
Co-authored-by: buray <ygd58@users.noreply.github.com>
Co-authored-by: lbn <llbn@users.noreply.github.com>
2026-03-17 01:52:43 -07:00
|
|
|
last_result = None
|
|
|
|
|
for i, chunk in enumerate(chunks):
|
|
|
|
|
is_last = (i == len(chunks) - 1)
|
|
|
|
|
result = await _send_telegram(
|
|
|
|
|
pconfig.token,
|
|
|
|
|
chat_id,
|
|
|
|
|
chunk,
|
|
|
|
|
media_files=media_files if is_last else [],
|
|
|
|
|
thread_id=thread_id,
|
|
|
|
|
)
|
|
|
|
|
if isinstance(result, dict) and result.get("error"):
|
|
|
|
|
return result
|
|
|
|
|
last_result = result
|
|
|
|
|
return last_result
|
|
|
|
|
|
|
|
|
|
# --- Non-Telegram platforms ---
|
2026-03-14 04:01:46 -07:00
|
|
|
if media_files and not message.strip():
|
|
|
|
|
return {
|
|
|
|
|
"error": (
|
|
|
|
|
f"send_message MEDIA delivery is currently only supported for telegram; "
|
|
|
|
|
f"target {platform.value} had only media attachments"
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
warning = None
|
|
|
|
|
if media_files:
|
|
|
|
|
warning = (
|
|
|
|
|
f"MEDIA attachments were omitted for {platform.value}; "
|
|
|
|
|
"native send_message media delivery is currently only supported for telegram"
|
|
|
|
|
)
|
|
|
|
|
|
fix(tools): chunk long messages in send_message_tool before dispatch (#1552)
* fix: prevent infinite 400 failure loop on context overflow (#1630)
When a gateway session exceeds the model's context window, Anthropic may
return a generic 400 invalid_request_error with just 'Error' as the
message. This bypassed the phrase-based context-length detection,
causing the agent to treat it as a non-retryable client error. Worse,
the failed user message was still persisted to the transcript, making
the session even larger on each attempt — creating an infinite loop.
Three-layer fix:
1. run_agent.py — Fallback heuristic: when a 400 error has a very short
generic message AND the session is large (>40% of context or >80
messages), treat it as a probable context overflow and trigger
compression instead of aborting.
2. run_agent.py + gateway/run.py — Don't persist failed messages:
when the agent returns failed=True before generating any response,
skip writing the user's message to the transcript/DB. This prevents
the session from growing on each failure.
3. gateway/run.py — Smarter error messages: detect context-overflow
failures and suggest /compact or /reset specifically, instead of a
generic 'try again' that will fail identically.
* fix(skills): detect prompt injection patterns and block cache file reads
Adds two security layers to prevent prompt injection via skills hub
cache files (#1558):
1. read_file: blocks direct reads of ~/.hermes/skills/.hub/ directory
(index-cache, catalog files). The 3.5MB clawhub_catalog_v1.json
was the original injection vector — untrusted skill descriptions
in the catalog contained adversarial text that the model executed.
2. skill_view: warns when skills are loaded from outside the trusted
~/.hermes/skills/ directory, and detects common injection patterns
in skill content ("ignore previous instructions", "<system>", etc.).
Cherry-picked from PR #1562 by ygd58.
* fix(tools): chunk long messages in send_message_tool before dispatch (#1552)
Long messages sent via send_message tool or cron delivery silently
failed when exceeding platform limits. Gateway adapters handle this
via truncate_message(), but the standalone senders in send_message_tool
bypassed that entirely.
- Apply truncate_message() chunking in _send_to_platform() before
dispatching to individual platform senders
- Remove naive message[i:i+2000] character split in _send_discord()
in favor of centralized smart splitting
- Attach media files to last chunk only for Telegram
- Add regression tests for chunking and media placement
Cherry-picked from PR #1557 by llbn.
---------
Co-authored-by: buray <ygd58@users.noreply.github.com>
Co-authored-by: lbn <llbn@users.noreply.github.com>
2026-03-17 01:52:43 -07:00
|
|
|
last_result = None
|
|
|
|
|
for chunk in chunks:
|
|
|
|
|
if platform == Platform.DISCORD:
|
|
|
|
|
result = await _send_discord(pconfig.token, chat_id, chunk)
|
|
|
|
|
elif platform == Platform.SLACK:
|
|
|
|
|
result = await _send_slack(pconfig.token, chat_id, chunk)
|
2026-03-17 15:31:13 +00:00
|
|
|
elif platform == Platform.WHATSAPP:
|
|
|
|
|
result = await _send_whatsapp(pconfig.extra, chat_id, chunk)
|
fix(tools): chunk long messages in send_message_tool before dispatch (#1552)
* fix: prevent infinite 400 failure loop on context overflow (#1630)
When a gateway session exceeds the model's context window, Anthropic may
return a generic 400 invalid_request_error with just 'Error' as the
message. This bypassed the phrase-based context-length detection,
causing the agent to treat it as a non-retryable client error. Worse,
the failed user message was still persisted to the transcript, making
the session even larger on each attempt — creating an infinite loop.
Three-layer fix:
1. run_agent.py — Fallback heuristic: when a 400 error has a very short
generic message AND the session is large (>40% of context or >80
messages), treat it as a probable context overflow and trigger
compression instead of aborting.
2. run_agent.py + gateway/run.py — Don't persist failed messages:
when the agent returns failed=True before generating any response,
skip writing the user's message to the transcript/DB. This prevents
the session from growing on each failure.
3. gateway/run.py — Smarter error messages: detect context-overflow
failures and suggest /compact or /reset specifically, instead of a
generic 'try again' that will fail identically.
* fix(skills): detect prompt injection patterns and block cache file reads
Adds two security layers to prevent prompt injection via skills hub
cache files (#1558):
1. read_file: blocks direct reads of ~/.hermes/skills/.hub/ directory
(index-cache, catalog files). The 3.5MB clawhub_catalog_v1.json
was the original injection vector — untrusted skill descriptions
in the catalog contained adversarial text that the model executed.
2. skill_view: warns when skills are loaded from outside the trusted
~/.hermes/skills/ directory, and detects common injection patterns
in skill content ("ignore previous instructions", "<system>", etc.).
Cherry-picked from PR #1562 by ygd58.
* fix(tools): chunk long messages in send_message_tool before dispatch (#1552)
Long messages sent via send_message tool or cron delivery silently
failed when exceeding platform limits. Gateway adapters handle this
via truncate_message(), but the standalone senders in send_message_tool
bypassed that entirely.
- Apply truncate_message() chunking in _send_to_platform() before
dispatching to individual platform senders
- Remove naive message[i:i+2000] character split in _send_discord()
in favor of centralized smart splitting
- Attach media files to last chunk only for Telegram
- Add regression tests for chunking and media placement
Cherry-picked from PR #1557 by llbn.
---------
Co-authored-by: buray <ygd58@users.noreply.github.com>
Co-authored-by: lbn <llbn@users.noreply.github.com>
2026-03-17 01:52:43 -07:00
|
|
|
elif platform == Platform.SIGNAL:
|
|
|
|
|
result = await _send_signal(pconfig.extra, chat_id, chunk)
|
|
|
|
|
elif platform == Platform.EMAIL:
|
|
|
|
|
result = await _send_email(pconfig.extra, chat_id, chunk)
|
feat: add SMS (Twilio) platform adapter
Add SMS as a first-class messaging platform via the Twilio API.
Shares credentials with the existing telephony skill — same
TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN, TWILIO_PHONE_NUMBER env vars.
Adapter (gateway/platforms/sms.py):
- aiohttp webhook server for inbound (Twilio form-encoded POSTs)
- Twilio REST API with Basic auth for outbound
- Markdown stripping, smart chunking at 1600 chars
- Echo loop prevention, phone number redaction in logs
Integration (13 files):
- gateway config, run, channel_directory
- agent prompt_builder (SMS platform hint)
- cron scheduler, cronjob tools
- send_message_tool (_send_sms via Twilio API)
- toolsets (hermes-sms + hermes-gateway)
- gateway setup wizard, status display
- pyproject.toml (sms optional extra)
- 21 tests
Docs:
- website/docs/user-guide/messaging/sms.md (full setup guide)
- Updated messaging index (architecture, toolsets, security, links)
- Updated environment-variables.md reference
Inspired by PR #1575 (@sunsakis), rewritten for Twilio.
2026-03-17 03:14:53 -07:00
|
|
|
elif platform == Platform.SMS:
|
|
|
|
|
result = await _send_sms(pconfig.api_key, chat_id, chunk)
|
fix(tools): implement send_message routing for Matrix, Mattermost, HomeAssistant, DingTalk (#3796)
* fix(tools): implement send_message routing for Matrix, Mattermost, HomeAssistant, DingTalk
Matrix, Mattermost, HomeAssistant, and DingTalk were present in
platform_map but fell through to the "not yet implemented" else branch,
causing send_message tool calls to silently fail on these platforms.
Add four async sender functions:
- _send_mattermost: POST /api/v4/posts via Mattermost REST API
- _send_matrix: PUT /_matrix/client/v3/rooms/.../send via Matrix CS API
- _send_homeassistant: POST /api/services/notify/notify via HA REST API
- _send_dingtalk: POST to session webhook URL
Add routing in _send_to_platform() and 17 unit tests covering success,
HTTP errors, missing config, env var fallback, and Matrix txn_id uniqueness.
* fix: pass platform tokens explicitly to Mattermost/Matrix/HA senders
The original PR passed pconfig.extra to sender functions, but tokens
live at pconfig.token (not in extra). This caused the senders to always
fall through to env var lookup instead of using the gateway-resolved
token.
Changes:
- Mattermost/Matrix/HA: accept token as first arg, matching the
Telegram/Discord/Slack sender pattern
- DingTalk: add DINGTALK_WEBHOOK_URL env var fallback + docstring
explaining the session-webhook vs robot-webhook difference
- Tests updated for new signatures + new DingTalk env var test
---------
Co-authored-by: sprmn24 <oncuevtv@gmail.com>
2026-03-29 15:17:46 -07:00
|
|
|
elif platform == Platform.MATTERMOST:
|
|
|
|
|
result = await _send_mattermost(pconfig.token, pconfig.extra, chat_id, chunk)
|
|
|
|
|
elif platform == Platform.MATRIX:
|
|
|
|
|
result = await _send_matrix(pconfig.token, pconfig.extra, chat_id, chunk)
|
|
|
|
|
elif platform == Platform.HOMEASSISTANT:
|
|
|
|
|
result = await _send_homeassistant(pconfig.token, pconfig.extra, chat_id, chunk)
|
|
|
|
|
elif platform == Platform.DINGTALK:
|
|
|
|
|
result = await _send_dingtalk(pconfig.extra, chat_id, chunk)
|
feat(gateway): add Feishu/Lark platform support (#3817)
Adds Feishu (ByteDance's enterprise messaging platform) as a gateway
platform adapter with full feature parity: WebSocket + webhook transports,
message batching, dedup, rate limiting, rich post/card content parsing,
media handling (images/audio/files/video), group @mention gating,
reaction routing, and interactive card button support.
Cherry-picked from PR #1793 by penwyp with:
- Moved to current main (PR was 458 commits behind)
- Fixed _send_with_retry shadowing BasePlatformAdapter method (renamed to
_feishu_send_with_retry to avoid signature mismatch crash)
- Fixed import structure: aiohttp/websockets imported independently of
lark_oapi so they remain available when SDK is missing
- Fixed get_hermes_home import (hermes_constants, not hermes_cli.config)
- Added skip decorators for tests requiring lark_oapi SDK
- All 16 integration points added surgically to current main
New dependency: lark-oapi>=1.5.3,<2 (optional, pip install hermes-agent[feishu])
Fixes #1788
Co-authored-by: penwyp <penwyp@users.noreply.github.com>
2026-03-29 18:17:42 -07:00
|
|
|
elif platform == Platform.FEISHU:
|
|
|
|
|
result = await _send_feishu(pconfig, chat_id, chunk, thread_id=thread_id)
|
2026-03-29 21:29:13 -07:00
|
|
|
elif platform == Platform.WECOM:
|
|
|
|
|
result = await _send_wecom(pconfig.extra, chat_id, chunk)
|
fix(tools): chunk long messages in send_message_tool before dispatch (#1552)
* fix: prevent infinite 400 failure loop on context overflow (#1630)
When a gateway session exceeds the model's context window, Anthropic may
return a generic 400 invalid_request_error with just 'Error' as the
message. This bypassed the phrase-based context-length detection,
causing the agent to treat it as a non-retryable client error. Worse,
the failed user message was still persisted to the transcript, making
the session even larger on each attempt — creating an infinite loop.
Three-layer fix:
1. run_agent.py — Fallback heuristic: when a 400 error has a very short
generic message AND the session is large (>40% of context or >80
messages), treat it as a probable context overflow and trigger
compression instead of aborting.
2. run_agent.py + gateway/run.py — Don't persist failed messages:
when the agent returns failed=True before generating any response,
skip writing the user's message to the transcript/DB. This prevents
the session from growing on each failure.
3. gateway/run.py — Smarter error messages: detect context-overflow
failures and suggest /compact or /reset specifically, instead of a
generic 'try again' that will fail identically.
* fix(skills): detect prompt injection patterns and block cache file reads
Adds two security layers to prevent prompt injection via skills hub
cache files (#1558):
1. read_file: blocks direct reads of ~/.hermes/skills/.hub/ directory
(index-cache, catalog files). The 3.5MB clawhub_catalog_v1.json
was the original injection vector — untrusted skill descriptions
in the catalog contained adversarial text that the model executed.
2. skill_view: warns when skills are loaded from outside the trusted
~/.hermes/skills/ directory, and detects common injection patterns
in skill content ("ignore previous instructions", "<system>", etc.).
Cherry-picked from PR #1562 by ygd58.
* fix(tools): chunk long messages in send_message_tool before dispatch (#1552)
Long messages sent via send_message tool or cron delivery silently
failed when exceeding platform limits. Gateway adapters handle this
via truncate_message(), but the standalone senders in send_message_tool
bypassed that entirely.
- Apply truncate_message() chunking in _send_to_platform() before
dispatching to individual platform senders
- Remove naive message[i:i+2000] character split in _send_discord()
in favor of centralized smart splitting
- Attach media files to last chunk only for Telegram
- Add regression tests for chunking and media placement
Cherry-picked from PR #1557 by llbn.
---------
Co-authored-by: buray <ygd58@users.noreply.github.com>
Co-authored-by: lbn <llbn@users.noreply.github.com>
2026-03-17 01:52:43 -07:00
|
|
|
else:
|
|
|
|
|
result = {"error": f"Direct sending not yet implemented for {platform.value}"}
|
2026-03-14 04:01:46 -07:00
|
|
|
|
fix(tools): chunk long messages in send_message_tool before dispatch (#1552)
* fix: prevent infinite 400 failure loop on context overflow (#1630)
When a gateway session exceeds the model's context window, Anthropic may
return a generic 400 invalid_request_error with just 'Error' as the
message. This bypassed the phrase-based context-length detection,
causing the agent to treat it as a non-retryable client error. Worse,
the failed user message was still persisted to the transcript, making
the session even larger on each attempt — creating an infinite loop.
Three-layer fix:
1. run_agent.py — Fallback heuristic: when a 400 error has a very short
generic message AND the session is large (>40% of context or >80
messages), treat it as a probable context overflow and trigger
compression instead of aborting.
2. run_agent.py + gateway/run.py — Don't persist failed messages:
when the agent returns failed=True before generating any response,
skip writing the user's message to the transcript/DB. This prevents
the session from growing on each failure.
3. gateway/run.py — Smarter error messages: detect context-overflow
failures and suggest /compact or /reset specifically, instead of a
generic 'try again' that will fail identically.
* fix(skills): detect prompt injection patterns and block cache file reads
Adds two security layers to prevent prompt injection via skills hub
cache files (#1558):
1. read_file: blocks direct reads of ~/.hermes/skills/.hub/ directory
(index-cache, catalog files). The 3.5MB clawhub_catalog_v1.json
was the original injection vector — untrusted skill descriptions
in the catalog contained adversarial text that the model executed.
2. skill_view: warns when skills are loaded from outside the trusted
~/.hermes/skills/ directory, and detects common injection patterns
in skill content ("ignore previous instructions", "<system>", etc.).
Cherry-picked from PR #1562 by ygd58.
* fix(tools): chunk long messages in send_message_tool before dispatch (#1552)
Long messages sent via send_message tool or cron delivery silently
failed when exceeding platform limits. Gateway adapters handle this
via truncate_message(), but the standalone senders in send_message_tool
bypassed that entirely.
- Apply truncate_message() chunking in _send_to_platform() before
dispatching to individual platform senders
- Remove naive message[i:i+2000] character split in _send_discord()
in favor of centralized smart splitting
- Attach media files to last chunk only for Telegram
- Add regression tests for chunking and media placement
Cherry-picked from PR #1557 by llbn.
---------
Co-authored-by: buray <ygd58@users.noreply.github.com>
Co-authored-by: lbn <llbn@users.noreply.github.com>
2026-03-17 01:52:43 -07:00
|
|
|
if isinstance(result, dict) and result.get("error"):
|
|
|
|
|
return result
|
|
|
|
|
last_result = result
|
|
|
|
|
|
|
|
|
|
if warning and isinstance(last_result, dict) and last_result.get("success"):
|
|
|
|
|
warnings = list(last_result.get("warnings", []))
|
2026-03-14 04:01:46 -07:00
|
|
|
warnings.append(warning)
|
fix(tools): chunk long messages in send_message_tool before dispatch (#1552)
* fix: prevent infinite 400 failure loop on context overflow (#1630)
When a gateway session exceeds the model's context window, Anthropic may
return a generic 400 invalid_request_error with just 'Error' as the
message. This bypassed the phrase-based context-length detection,
causing the agent to treat it as a non-retryable client error. Worse,
the failed user message was still persisted to the transcript, making
the session even larger on each attempt — creating an infinite loop.
Three-layer fix:
1. run_agent.py — Fallback heuristic: when a 400 error has a very short
generic message AND the session is large (>40% of context or >80
messages), treat it as a probable context overflow and trigger
compression instead of aborting.
2. run_agent.py + gateway/run.py — Don't persist failed messages:
when the agent returns failed=True before generating any response,
skip writing the user's message to the transcript/DB. This prevents
the session from growing on each failure.
3. gateway/run.py — Smarter error messages: detect context-overflow
failures and suggest /compact or /reset specifically, instead of a
generic 'try again' that will fail identically.
* fix(skills): detect prompt injection patterns and block cache file reads
Adds two security layers to prevent prompt injection via skills hub
cache files (#1558):
1. read_file: blocks direct reads of ~/.hermes/skills/.hub/ directory
(index-cache, catalog files). The 3.5MB clawhub_catalog_v1.json
was the original injection vector — untrusted skill descriptions
in the catalog contained adversarial text that the model executed.
2. skill_view: warns when skills are loaded from outside the trusted
~/.hermes/skills/ directory, and detects common injection patterns
in skill content ("ignore previous instructions", "<system>", etc.).
Cherry-picked from PR #1562 by ygd58.
* fix(tools): chunk long messages in send_message_tool before dispatch (#1552)
Long messages sent via send_message tool or cron delivery silently
failed when exceeding platform limits. Gateway adapters handle this
via truncate_message(), but the standalone senders in send_message_tool
bypassed that entirely.
- Apply truncate_message() chunking in _send_to_platform() before
dispatching to individual platform senders
- Remove naive message[i:i+2000] character split in _send_discord()
in favor of centralized smart splitting
- Attach media files to last chunk only for Telegram
- Add regression tests for chunking and media placement
Cherry-picked from PR #1557 by llbn.
---------
Co-authored-by: buray <ygd58@users.noreply.github.com>
Co-authored-by: lbn <llbn@users.noreply.github.com>
2026-03-17 01:52:43 -07:00
|
|
|
last_result["warnings"] = warnings
|
|
|
|
|
return last_result
|
2026-02-21 20:22:33 -08:00
|
|
|
|
|
|
|
|
|
2026-03-14 04:01:46 -07:00
|
|
|
async def _send_telegram(token, chat_id, message, media_files=None, thread_id=None):
|
2026-03-17 06:02:50 +01:00
|
|
|
"""Send via Telegram Bot API (one-shot, no polling needed).
|
|
|
|
|
|
|
|
|
|
Applies markdown→MarkdownV2 formatting (same as the gateway adapter)
|
feat(telegram): auto-detect HTML tags and use parse_mode=HTML in send_message (#1709)
* feat: interactive MCP tool configuration in hermes tools
Add the ability to selectively enable/disable individual MCP server
tools through the interactive 'hermes tools' TUI.
Changes:
- tools/mcp_tool.py: Add probe_mcp_server_tools() — lightweight function
that temporarily connects to configured MCP servers, discovers their
tools (names + descriptions), and disconnects. No registry side effects.
- hermes_cli/tools_config.py: Add 'Configure MCP tools' option to the
interactive menu. When selected:
1. Probes all enabled MCP servers for their available tools
2. Shows a per-server curses checklist with tool descriptions
3. Pre-selects tools based on existing include/exclude config
4. Writes changes back as tools.exclude entries in config.yaml
5. Reports which servers failed to connect
The existing CLI commands (hermes tools enable/disable server:tool)
continue to work unchanged. This adds the interactive TUI counterpart
so users can browse and toggle MCP tools visually.
Tests: 22 new tests covering probe function edge cases and interactive
flow (pre-selection, exclude/include modes, description truncation,
multi-server handling, error paths).
* feat(telegram): auto-detect HTML tags and use parse_mode=HTML in send_message
When _send_telegram detects HTML tags in the message body, it now sends
with parse_mode='HTML' instead of converting to MarkdownV2. This allows
cron jobs and agents to send rich HTML-formatted Telegram messages with
bold, italic, code blocks, etc. that render correctly.
Detection uses the same regex from PR #1568 by @ashaney:
re.search(r'<[a-zA-Z/][^>]*>', message)
Plain-text and markdown messages continue through the existing
MarkdownV2 pipeline. The HTML fallback path also catches HTML parse
errors and falls back to plain text, matching the existing MarkdownV2
error handling.
Inspired by: github.com/ashaney — PR #1568
2026-03-17 03:56:06 -07:00
|
|
|
so that bold, links, and headers render correctly. If the message
|
|
|
|
|
already contains HTML tags, it is sent with ``parse_mode='HTML'``
|
|
|
|
|
instead, bypassing MarkdownV2 conversion.
|
2026-03-17 06:02:50 +01:00
|
|
|
"""
|
2026-02-21 20:22:33 -08:00
|
|
|
try:
|
|
|
|
|
from telegram import Bot
|
2026-03-17 06:02:50 +01:00
|
|
|
from telegram.constants import ParseMode
|
|
|
|
|
|
feat(telegram): auto-detect HTML tags and use parse_mode=HTML in send_message (#1709)
* feat: interactive MCP tool configuration in hermes tools
Add the ability to selectively enable/disable individual MCP server
tools through the interactive 'hermes tools' TUI.
Changes:
- tools/mcp_tool.py: Add probe_mcp_server_tools() — lightweight function
that temporarily connects to configured MCP servers, discovers their
tools (names + descriptions), and disconnects. No registry side effects.
- hermes_cli/tools_config.py: Add 'Configure MCP tools' option to the
interactive menu. When selected:
1. Probes all enabled MCP servers for their available tools
2. Shows a per-server curses checklist with tool descriptions
3. Pre-selects tools based on existing include/exclude config
4. Writes changes back as tools.exclude entries in config.yaml
5. Reports which servers failed to connect
The existing CLI commands (hermes tools enable/disable server:tool)
continue to work unchanged. This adds the interactive TUI counterpart
so users can browse and toggle MCP tools visually.
Tests: 22 new tests covering probe function edge cases and interactive
flow (pre-selection, exclude/include modes, description truncation,
multi-server handling, error paths).
* feat(telegram): auto-detect HTML tags and use parse_mode=HTML in send_message
When _send_telegram detects HTML tags in the message body, it now sends
with parse_mode='HTML' instead of converting to MarkdownV2. This allows
cron jobs and agents to send rich HTML-formatted Telegram messages with
bold, italic, code blocks, etc. that render correctly.
Detection uses the same regex from PR #1568 by @ashaney:
re.search(r'<[a-zA-Z/][^>]*>', message)
Plain-text and markdown messages continue through the existing
MarkdownV2 pipeline. The HTML fallback path also catches HTML parse
errors and falls back to plain text, matching the existing MarkdownV2
error handling.
Inspired by: github.com/ashaney — PR #1568
2026-03-17 03:56:06 -07:00
|
|
|
# Auto-detect HTML tags — if present, skip MarkdownV2 and send as HTML.
|
|
|
|
|
# Inspired by github.com/ashaney — PR #1568.
|
|
|
|
|
_has_html = bool(re.search(r'<[a-zA-Z/][^>]*>', message))
|
|
|
|
|
|
|
|
|
|
if _has_html:
|
2026-03-17 06:02:50 +01:00
|
|
|
formatted = message
|
feat(telegram): auto-detect HTML tags and use parse_mode=HTML in send_message (#1709)
* feat: interactive MCP tool configuration in hermes tools
Add the ability to selectively enable/disable individual MCP server
tools through the interactive 'hermes tools' TUI.
Changes:
- tools/mcp_tool.py: Add probe_mcp_server_tools() — lightweight function
that temporarily connects to configured MCP servers, discovers their
tools (names + descriptions), and disconnects. No registry side effects.
- hermes_cli/tools_config.py: Add 'Configure MCP tools' option to the
interactive menu. When selected:
1. Probes all enabled MCP servers for their available tools
2. Shows a per-server curses checklist with tool descriptions
3. Pre-selects tools based on existing include/exclude config
4. Writes changes back as tools.exclude entries in config.yaml
5. Reports which servers failed to connect
The existing CLI commands (hermes tools enable/disable server:tool)
continue to work unchanged. This adds the interactive TUI counterpart
so users can browse and toggle MCP tools visually.
Tests: 22 new tests covering probe function edge cases and interactive
flow (pre-selection, exclude/include modes, description truncation,
multi-server handling, error paths).
* feat(telegram): auto-detect HTML tags and use parse_mode=HTML in send_message
When _send_telegram detects HTML tags in the message body, it now sends
with parse_mode='HTML' instead of converting to MarkdownV2. This allows
cron jobs and agents to send rich HTML-formatted Telegram messages with
bold, italic, code blocks, etc. that render correctly.
Detection uses the same regex from PR #1568 by @ashaney:
re.search(r'<[a-zA-Z/][^>]*>', message)
Plain-text and markdown messages continue through the existing
MarkdownV2 pipeline. The HTML fallback path also catches HTML parse
errors and falls back to plain text, matching the existing MarkdownV2
error handling.
Inspired by: github.com/ashaney — PR #1568
2026-03-17 03:56:06 -07:00
|
|
|
send_parse_mode = ParseMode.HTML
|
|
|
|
|
else:
|
|
|
|
|
# Reuse the gateway adapter's format_message for markdown→MarkdownV2
|
|
|
|
|
try:
|
chore: remove ~100 unused imports across 55 files (#3016)
Automated cleanup via pyflakes + autoflake with manual review.
Changes:
- Removed unused stdlib imports (os, sys, json, pathlib.Path, etc.)
- Removed unused typing imports (List, Dict, Any, Optional, Tuple, Set, etc.)
- Removed unused internal imports (hermes_cli.auth, hermes_cli.config, etc.)
- Fixed cli.py: removed 8 shadowed banner imports (imported from hermes_cli.banner
then immediately redefined locally — only build_welcome_banner is actually used)
- Added noqa comments to imports that appear unused but serve a purpose:
- Re-exports (gateway/session.py SessionResetPolicy, tools/terminal_tool.py
is_interrupted/_interrupt_event)
- SDK presence checks in try/except (daytona, fal_client, discord)
- Test mock targets (auxiliary_client.py Path, mcp_config.py get_hermes_home)
Zero behavioral changes. Full test suite passes (6162/6162, 2 pre-existing
streaming test failures unrelated to this change).
2026-03-25 15:02:03 -07:00
|
|
|
from gateway.platforms.telegram import TelegramAdapter, _strip_mdv2
|
feat(telegram): auto-detect HTML tags and use parse_mode=HTML in send_message (#1709)
* feat: interactive MCP tool configuration in hermes tools
Add the ability to selectively enable/disable individual MCP server
tools through the interactive 'hermes tools' TUI.
Changes:
- tools/mcp_tool.py: Add probe_mcp_server_tools() — lightweight function
that temporarily connects to configured MCP servers, discovers their
tools (names + descriptions), and disconnects. No registry side effects.
- hermes_cli/tools_config.py: Add 'Configure MCP tools' option to the
interactive menu. When selected:
1. Probes all enabled MCP servers for their available tools
2. Shows a per-server curses checklist with tool descriptions
3. Pre-selects tools based on existing include/exclude config
4. Writes changes back as tools.exclude entries in config.yaml
5. Reports which servers failed to connect
The existing CLI commands (hermes tools enable/disable server:tool)
continue to work unchanged. This adds the interactive TUI counterpart
so users can browse and toggle MCP tools visually.
Tests: 22 new tests covering probe function edge cases and interactive
flow (pre-selection, exclude/include modes, description truncation,
multi-server handling, error paths).
* feat(telegram): auto-detect HTML tags and use parse_mode=HTML in send_message
When _send_telegram detects HTML tags in the message body, it now sends
with parse_mode='HTML' instead of converting to MarkdownV2. This allows
cron jobs and agents to send rich HTML-formatted Telegram messages with
bold, italic, code blocks, etc. that render correctly.
Detection uses the same regex from PR #1568 by @ashaney:
re.search(r'<[a-zA-Z/][^>]*>', message)
Plain-text and markdown messages continue through the existing
MarkdownV2 pipeline. The HTML fallback path also catches HTML parse
errors and falls back to plain text, matching the existing MarkdownV2
error handling.
Inspired by: github.com/ashaney — PR #1568
2026-03-17 03:56:06 -07:00
|
|
|
_adapter = TelegramAdapter.__new__(TelegramAdapter)
|
|
|
|
|
formatted = _adapter.format_message(message)
|
|
|
|
|
except Exception:
|
|
|
|
|
# Fallback: send as-is if formatting unavailable
|
|
|
|
|
formatted = message
|
|
|
|
|
send_parse_mode = ParseMode.MARKDOWN_V2
|
2026-03-14 04:01:46 -07:00
|
|
|
|
2026-02-21 20:22:33 -08:00
|
|
|
bot = Bot(token=token)
|
2026-03-11 11:39:07 -07:00
|
|
|
int_chat_id = int(chat_id)
|
2026-03-14 04:01:46 -07:00
|
|
|
media_files = media_files or []
|
2026-03-11 11:39:07 -07:00
|
|
|
thread_kwargs = {}
|
2026-03-11 09:15:34 +01:00
|
|
|
if thread_id is not None:
|
2026-03-11 11:39:07 -07:00
|
|
|
thread_kwargs["message_thread_id"] = int(thread_id)
|
|
|
|
|
|
|
|
|
|
last_msg = None
|
2026-03-14 04:01:46 -07:00
|
|
|
warnings = []
|
|
|
|
|
|
2026-03-17 06:02:50 +01:00
|
|
|
if formatted.strip():
|
|
|
|
|
try:
|
|
|
|
|
last_msg = await bot.send_message(
|
|
|
|
|
chat_id=int_chat_id, text=formatted,
|
feat(telegram): auto-detect HTML tags and use parse_mode=HTML in send_message (#1709)
* feat: interactive MCP tool configuration in hermes tools
Add the ability to selectively enable/disable individual MCP server
tools through the interactive 'hermes tools' TUI.
Changes:
- tools/mcp_tool.py: Add probe_mcp_server_tools() — lightweight function
that temporarily connects to configured MCP servers, discovers their
tools (names + descriptions), and disconnects. No registry side effects.
- hermes_cli/tools_config.py: Add 'Configure MCP tools' option to the
interactive menu. When selected:
1. Probes all enabled MCP servers for their available tools
2. Shows a per-server curses checklist with tool descriptions
3. Pre-selects tools based on existing include/exclude config
4. Writes changes back as tools.exclude entries in config.yaml
5. Reports which servers failed to connect
The existing CLI commands (hermes tools enable/disable server:tool)
continue to work unchanged. This adds the interactive TUI counterpart
so users can browse and toggle MCP tools visually.
Tests: 22 new tests covering probe function edge cases and interactive
flow (pre-selection, exclude/include modes, description truncation,
multi-server handling, error paths).
* feat(telegram): auto-detect HTML tags and use parse_mode=HTML in send_message
When _send_telegram detects HTML tags in the message body, it now sends
with parse_mode='HTML' instead of converting to MarkdownV2. This allows
cron jobs and agents to send rich HTML-formatted Telegram messages with
bold, italic, code blocks, etc. that render correctly.
Detection uses the same regex from PR #1568 by @ashaney:
re.search(r'<[a-zA-Z/][^>]*>', message)
Plain-text and markdown messages continue through the existing
MarkdownV2 pipeline. The HTML fallback path also catches HTML parse
errors and falls back to plain text, matching the existing MarkdownV2
error handling.
Inspired by: github.com/ashaney — PR #1568
2026-03-17 03:56:06 -07:00
|
|
|
parse_mode=send_parse_mode, **thread_kwargs
|
2026-03-17 06:02:50 +01:00
|
|
|
)
|
|
|
|
|
except Exception as md_error:
|
feat(telegram): auto-detect HTML tags and use parse_mode=HTML in send_message (#1709)
* feat: interactive MCP tool configuration in hermes tools
Add the ability to selectively enable/disable individual MCP server
tools through the interactive 'hermes tools' TUI.
Changes:
- tools/mcp_tool.py: Add probe_mcp_server_tools() — lightweight function
that temporarily connects to configured MCP servers, discovers their
tools (names + descriptions), and disconnects. No registry side effects.
- hermes_cli/tools_config.py: Add 'Configure MCP tools' option to the
interactive menu. When selected:
1. Probes all enabled MCP servers for their available tools
2. Shows a per-server curses checklist with tool descriptions
3. Pre-selects tools based on existing include/exclude config
4. Writes changes back as tools.exclude entries in config.yaml
5. Reports which servers failed to connect
The existing CLI commands (hermes tools enable/disable server:tool)
continue to work unchanged. This adds the interactive TUI counterpart
so users can browse and toggle MCP tools visually.
Tests: 22 new tests covering probe function edge cases and interactive
flow (pre-selection, exclude/include modes, description truncation,
multi-server handling, error paths).
* feat(telegram): auto-detect HTML tags and use parse_mode=HTML in send_message
When _send_telegram detects HTML tags in the message body, it now sends
with parse_mode='HTML' instead of converting to MarkdownV2. This allows
cron jobs and agents to send rich HTML-formatted Telegram messages with
bold, italic, code blocks, etc. that render correctly.
Detection uses the same regex from PR #1568 by @ashaney:
re.search(r'<[a-zA-Z/][^>]*>', message)
Plain-text and markdown messages continue through the existing
MarkdownV2 pipeline. The HTML fallback path also catches HTML parse
errors and falls back to plain text, matching the existing MarkdownV2
error handling.
Inspired by: github.com/ashaney — PR #1568
2026-03-17 03:56:06 -07:00
|
|
|
# Parse failed, fall back to plain text
|
|
|
|
|
if "parse" in str(md_error).lower() or "markdown" in str(md_error).lower() or "html" in str(md_error).lower():
|
|
|
|
|
logger.warning("Parse mode %s failed in _send_telegram, falling back to plain text: %s", send_parse_mode, md_error)
|
|
|
|
|
if not _has_html:
|
|
|
|
|
try:
|
|
|
|
|
from gateway.platforms.telegram import _strip_mdv2
|
|
|
|
|
plain = _strip_mdv2(formatted)
|
|
|
|
|
except Exception:
|
|
|
|
|
plain = message
|
|
|
|
|
else:
|
2026-03-17 06:02:50 +01:00
|
|
|
plain = message
|
|
|
|
|
last_msg = await bot.send_message(
|
|
|
|
|
chat_id=int_chat_id, text=plain,
|
|
|
|
|
parse_mode=None, **thread_kwargs
|
|
|
|
|
)
|
|
|
|
|
else:
|
|
|
|
|
raise
|
2026-03-11 11:39:07 -07:00
|
|
|
|
|
|
|
|
for media_path, is_voice in media_files:
|
|
|
|
|
if not os.path.exists(media_path):
|
2026-03-14 04:01:46 -07:00
|
|
|
warning = f"Media file not found, skipping: {media_path}"
|
|
|
|
|
logger.warning(warning)
|
|
|
|
|
warnings.append(warning)
|
2026-03-11 11:39:07 -07:00
|
|
|
continue
|
2026-03-14 04:01:46 -07:00
|
|
|
|
2026-03-11 11:39:07 -07:00
|
|
|
ext = os.path.splitext(media_path)[1].lower()
|
|
|
|
|
try:
|
|
|
|
|
with open(media_path, "rb") as f:
|
|
|
|
|
if ext in _IMAGE_EXTS:
|
|
|
|
|
last_msg = await bot.send_photo(
|
|
|
|
|
chat_id=int_chat_id, photo=f, **thread_kwargs
|
|
|
|
|
)
|
|
|
|
|
elif ext in _VIDEO_EXTS:
|
|
|
|
|
last_msg = await bot.send_video(
|
|
|
|
|
chat_id=int_chat_id, video=f, **thread_kwargs
|
|
|
|
|
)
|
|
|
|
|
elif ext in _VOICE_EXTS and is_voice:
|
|
|
|
|
last_msg = await bot.send_voice(
|
|
|
|
|
chat_id=int_chat_id, voice=f, **thread_kwargs
|
|
|
|
|
)
|
|
|
|
|
elif ext in _AUDIO_EXTS:
|
|
|
|
|
last_msg = await bot.send_audio(
|
|
|
|
|
chat_id=int_chat_id, audio=f, **thread_kwargs
|
|
|
|
|
)
|
|
|
|
|
else:
|
|
|
|
|
last_msg = await bot.send_document(
|
|
|
|
|
chat_id=int_chat_id, document=f, **thread_kwargs
|
|
|
|
|
)
|
|
|
|
|
except Exception as e:
|
2026-03-14 04:01:46 -07:00
|
|
|
warning = f"Failed to send media {media_path}: {e}"
|
|
|
|
|
logger.error(warning)
|
|
|
|
|
warnings.append(warning)
|
2026-03-11 11:39:07 -07:00
|
|
|
|
|
|
|
|
if last_msg is None:
|
2026-03-14 04:01:46 -07:00
|
|
|
error = "No deliverable text or media remained after processing MEDIA tags"
|
|
|
|
|
if warnings:
|
|
|
|
|
return {"error": error, "warnings": warnings}
|
|
|
|
|
return {"error": error}
|
|
|
|
|
|
|
|
|
|
result = {
|
|
|
|
|
"success": True,
|
|
|
|
|
"platform": "telegram",
|
|
|
|
|
"chat_id": chat_id,
|
|
|
|
|
"message_id": str(last_msg.message_id),
|
|
|
|
|
}
|
|
|
|
|
if warnings:
|
|
|
|
|
result["warnings"] = warnings
|
|
|
|
|
return result
|
2026-02-21 20:22:33 -08:00
|
|
|
except ImportError:
|
|
|
|
|
return {"error": "python-telegram-bot not installed. Run: pip install python-telegram-bot"}
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return {"error": f"Telegram send failed: {e}"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def _send_discord(token, chat_id, message):
|
fix(tools): chunk long messages in send_message_tool before dispatch (#1552)
* fix: prevent infinite 400 failure loop on context overflow (#1630)
When a gateway session exceeds the model's context window, Anthropic may
return a generic 400 invalid_request_error with just 'Error' as the
message. This bypassed the phrase-based context-length detection,
causing the agent to treat it as a non-retryable client error. Worse,
the failed user message was still persisted to the transcript, making
the session even larger on each attempt — creating an infinite loop.
Three-layer fix:
1. run_agent.py — Fallback heuristic: when a 400 error has a very short
generic message AND the session is large (>40% of context or >80
messages), treat it as a probable context overflow and trigger
compression instead of aborting.
2. run_agent.py + gateway/run.py — Don't persist failed messages:
when the agent returns failed=True before generating any response,
skip writing the user's message to the transcript/DB. This prevents
the session from growing on each failure.
3. gateway/run.py — Smarter error messages: detect context-overflow
failures and suggest /compact or /reset specifically, instead of a
generic 'try again' that will fail identically.
* fix(skills): detect prompt injection patterns and block cache file reads
Adds two security layers to prevent prompt injection via skills hub
cache files (#1558):
1. read_file: blocks direct reads of ~/.hermes/skills/.hub/ directory
(index-cache, catalog files). The 3.5MB clawhub_catalog_v1.json
was the original injection vector — untrusted skill descriptions
in the catalog contained adversarial text that the model executed.
2. skill_view: warns when skills are loaded from outside the trusted
~/.hermes/skills/ directory, and detects common injection patterns
in skill content ("ignore previous instructions", "<system>", etc.).
Cherry-picked from PR #1562 by ygd58.
* fix(tools): chunk long messages in send_message_tool before dispatch (#1552)
Long messages sent via send_message tool or cron delivery silently
failed when exceeding platform limits. Gateway adapters handle this
via truncate_message(), but the standalone senders in send_message_tool
bypassed that entirely.
- Apply truncate_message() chunking in _send_to_platform() before
dispatching to individual platform senders
- Remove naive message[i:i+2000] character split in _send_discord()
in favor of centralized smart splitting
- Attach media files to last chunk only for Telegram
- Add regression tests for chunking and media placement
Cherry-picked from PR #1557 by llbn.
---------
Co-authored-by: buray <ygd58@users.noreply.github.com>
Co-authored-by: lbn <llbn@users.noreply.github.com>
2026-03-17 01:52:43 -07:00
|
|
|
"""Send a single message via Discord REST API (no websocket client needed).
|
|
|
|
|
|
|
|
|
|
Chunking is handled by _send_to_platform() before this is called.
|
|
|
|
|
"""
|
2026-02-21 20:22:33 -08:00
|
|
|
try:
|
|
|
|
|
import aiohttp
|
|
|
|
|
except ImportError:
|
|
|
|
|
return {"error": "aiohttp not installed. Run: pip install aiohttp"}
|
|
|
|
|
try:
|
|
|
|
|
url = f"https://discord.com/api/v10/channels/{chat_id}/messages"
|
|
|
|
|
headers = {"Authorization": f"Bot {token}", "Content-Type": "application/json"}
|
2026-03-26 11:58:11 +03:00
|
|
|
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=30)) as session:
|
fix(tools): chunk long messages in send_message_tool before dispatch (#1552)
* fix: prevent infinite 400 failure loop on context overflow (#1630)
When a gateway session exceeds the model's context window, Anthropic may
return a generic 400 invalid_request_error with just 'Error' as the
message. This bypassed the phrase-based context-length detection,
causing the agent to treat it as a non-retryable client error. Worse,
the failed user message was still persisted to the transcript, making
the session even larger on each attempt — creating an infinite loop.
Three-layer fix:
1. run_agent.py — Fallback heuristic: when a 400 error has a very short
generic message AND the session is large (>40% of context or >80
messages), treat it as a probable context overflow and trigger
compression instead of aborting.
2. run_agent.py + gateway/run.py — Don't persist failed messages:
when the agent returns failed=True before generating any response,
skip writing the user's message to the transcript/DB. This prevents
the session from growing on each failure.
3. gateway/run.py — Smarter error messages: detect context-overflow
failures and suggest /compact or /reset specifically, instead of a
generic 'try again' that will fail identically.
* fix(skills): detect prompt injection patterns and block cache file reads
Adds two security layers to prevent prompt injection via skills hub
cache files (#1558):
1. read_file: blocks direct reads of ~/.hermes/skills/.hub/ directory
(index-cache, catalog files). The 3.5MB clawhub_catalog_v1.json
was the original injection vector — untrusted skill descriptions
in the catalog contained adversarial text that the model executed.
2. skill_view: warns when skills are loaded from outside the trusted
~/.hermes/skills/ directory, and detects common injection patterns
in skill content ("ignore previous instructions", "<system>", etc.).
Cherry-picked from PR #1562 by ygd58.
* fix(tools): chunk long messages in send_message_tool before dispatch (#1552)
Long messages sent via send_message tool or cron delivery silently
failed when exceeding platform limits. Gateway adapters handle this
via truncate_message(), but the standalone senders in send_message_tool
bypassed that entirely.
- Apply truncate_message() chunking in _send_to_platform() before
dispatching to individual platform senders
- Remove naive message[i:i+2000] character split in _send_discord()
in favor of centralized smart splitting
- Attach media files to last chunk only for Telegram
- Add regression tests for chunking and media placement
Cherry-picked from PR #1557 by llbn.
---------
Co-authored-by: buray <ygd58@users.noreply.github.com>
Co-authored-by: lbn <llbn@users.noreply.github.com>
2026-03-17 01:52:43 -07:00
|
|
|
async with session.post(url, headers=headers, json={"content": message}) as resp:
|
|
|
|
|
if resp.status not in (200, 201):
|
|
|
|
|
body = await resp.text()
|
|
|
|
|
return {"error": f"Discord API error ({resp.status}): {body}"}
|
|
|
|
|
data = await resp.json()
|
|
|
|
|
return {"success": True, "platform": "discord", "chat_id": chat_id, "message_id": data.get("id")}
|
2026-02-21 20:22:33 -08:00
|
|
|
except Exception as e:
|
|
|
|
|
return {"error": f"Discord send failed: {e}"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def _send_slack(token, chat_id, message):
|
|
|
|
|
"""Send via Slack Web API."""
|
|
|
|
|
try:
|
|
|
|
|
import aiohttp
|
|
|
|
|
except ImportError:
|
|
|
|
|
return {"error": "aiohttp not installed. Run: pip install aiohttp"}
|
|
|
|
|
try:
|
|
|
|
|
url = "https://slack.com/api/chat.postMessage"
|
|
|
|
|
headers = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"}
|
2026-03-26 11:58:11 +03:00
|
|
|
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=30)) as session:
|
2026-02-21 20:22:33 -08:00
|
|
|
async with session.post(url, headers=headers, json={"channel": chat_id, "text": message}) as resp:
|
|
|
|
|
data = await resp.json()
|
|
|
|
|
if data.get("ok"):
|
|
|
|
|
return {"success": True, "platform": "slack", "chat_id": chat_id, "message_id": data.get("ts")}
|
|
|
|
|
return {"error": f"Slack API error: {data.get('error', 'unknown')}"}
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return {"error": f"Slack send failed: {e}"}
|
|
|
|
|
|
|
|
|
|
|
2026-03-17 15:31:13 +00:00
|
|
|
async def _send_whatsapp(extra, chat_id, message):
|
|
|
|
|
"""Send via the local WhatsApp bridge HTTP API."""
|
|
|
|
|
try:
|
|
|
|
|
import aiohttp
|
|
|
|
|
except ImportError:
|
|
|
|
|
return {"error": "aiohttp not installed. Run: pip install aiohttp"}
|
|
|
|
|
try:
|
|
|
|
|
bridge_port = extra.get("bridge_port", 3000)
|
|
|
|
|
async with aiohttp.ClientSession() as session:
|
|
|
|
|
async with session.post(
|
|
|
|
|
f"http://localhost:{bridge_port}/send",
|
|
|
|
|
json={"chatId": chat_id, "message": message},
|
|
|
|
|
timeout=aiohttp.ClientTimeout(total=30),
|
|
|
|
|
) as resp:
|
|
|
|
|
if resp.status == 200:
|
|
|
|
|
data = await resp.json()
|
|
|
|
|
return {
|
|
|
|
|
"success": True,
|
|
|
|
|
"platform": "whatsapp",
|
|
|
|
|
"chat_id": chat_id,
|
|
|
|
|
"message_id": data.get("messageId"),
|
|
|
|
|
}
|
|
|
|
|
body = await resp.text()
|
|
|
|
|
return {"error": f"WhatsApp bridge error ({resp.status}): {body}"}
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return {"error": f"WhatsApp send failed: {e}"}
|
|
|
|
|
|
|
|
|
|
|
feat: add Signal messenger gateway platform (#405)
Complete Signal adapter using signal-cli daemon HTTP API.
Based on PR #268 by ibhagwan, rebuilt on current main with bug fixes.
Architecture:
- SSE streaming for inbound messages with exponential backoff (2s→60s)
- JSON-RPC 2.0 for outbound (send, typing, attachments, contacts)
- Health monitor detects stale SSE connections (120s threshold)
- Phone number redaction in all logs and global redact.py
Features:
- DM and group message support with separate access policies
- DM policies: pairing (default), allowlist, open
- Group policies: disabled (default), allowlist, open
- Attachment download with magic-byte type detection
- Typing indicators (8s refresh interval)
- 100MB attachment size limit, 8000 char message limit
- E.164 phone + UUID allowlist support
Integration:
- Platform.SIGNAL enum in gateway/config.py
- Signal in _is_user_authorized() allowlist maps (gateway/run.py)
- Adapter factory in _create_adapter() (gateway/run.py)
- user_id_alt/chat_id_alt fields in SessionSource for UUIDs
- send_message tool support via httpx JSON-RPC (not aiohttp)
- Interactive setup wizard in 'hermes gateway setup'
- Connectivity testing during setup (pings /api/v1/check)
- signal-cli detection and install guidance
Bug fixes from PR #268:
- Timestamp reads from envelope_data (not outer wrapper)
- Uses httpx consistently (not aiohttp in send_message tool)
- SIGNAL_DEBUG scoped to signal logger (not root)
- extract_images regex NOT modified (preserves group numbering)
- pairing.py NOT modified (no cross-platform side effects)
- No dual authorization (adapter defers to run.py for user auth)
- Wildcard uses set membership ('*' in set, not list equality)
- .zip default for PK magic bytes (not .docx)
No new Python dependencies — uses httpx (already core).
External requirement: signal-cli daemon (user-installed).
Tests: 30 new tests covering config, init, helpers, session source,
phone redaction, authorization, and send_message integration.
Co-authored-by: ibhagwan <ibhagwan@users.noreply.github.com>
2026-03-08 20:20:35 -07:00
|
|
|
async def _send_signal(extra, chat_id, message):
|
|
|
|
|
"""Send via signal-cli JSON-RPC API."""
|
|
|
|
|
try:
|
|
|
|
|
import httpx
|
|
|
|
|
except ImportError:
|
|
|
|
|
return {"error": "httpx not installed"}
|
|
|
|
|
try:
|
|
|
|
|
http_url = extra.get("http_url", "http://127.0.0.1:8080").rstrip("/")
|
|
|
|
|
account = extra.get("account", "")
|
|
|
|
|
if not account:
|
|
|
|
|
return {"error": "Signal account not configured"}
|
|
|
|
|
|
|
|
|
|
params = {"account": account, "message": message}
|
|
|
|
|
if chat_id.startswith("group:"):
|
|
|
|
|
params["groupId"] = chat_id[6:]
|
|
|
|
|
else:
|
|
|
|
|
params["recipient"] = [chat_id]
|
|
|
|
|
|
|
|
|
|
payload = {
|
|
|
|
|
"jsonrpc": "2.0",
|
|
|
|
|
"method": "send",
|
|
|
|
|
"params": params,
|
|
|
|
|
"id": f"send_{int(time.time() * 1000)}",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
async with httpx.AsyncClient(timeout=30.0) as client:
|
|
|
|
|
resp = await client.post(f"{http_url}/api/v1/rpc", json=payload)
|
|
|
|
|
resp.raise_for_status()
|
|
|
|
|
data = resp.json()
|
|
|
|
|
if "error" in data:
|
|
|
|
|
return {"error": f"Signal RPC error: {data['error']}"}
|
|
|
|
|
return {"success": True, "platform": "signal", "chat_id": chat_id}
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return {"error": f"Signal send failed: {e}"}
|
|
|
|
|
|
|
|
|
|
|
feat: add email gateway platform (IMAP/SMTP)
Allow users to interact with Hermes by sending and receiving emails.
Uses IMAP polling for incoming messages and SMTP for replies with
proper threading (In-Reply-To, References headers).
Integrates with all 14 gateway extension points: config, adapter
factory, authorization, send_message tool, cron delivery, toolsets,
prompt hints, channel directory, setup wizard, status display, and
env example.
65 tests covering config, parsing, dispatch, threading, IMAP fetch,
SMTP send, attachments, and all integration points.
2026-03-10 03:15:38 +03:00
|
|
|
async def _send_email(extra, chat_id, message):
|
|
|
|
|
"""Send via SMTP (one-shot, no persistent connection needed)."""
|
|
|
|
|
import smtplib
|
|
|
|
|
from email.mime.text import MIMEText
|
|
|
|
|
|
|
|
|
|
address = extra.get("address") or os.getenv("EMAIL_ADDRESS", "")
|
|
|
|
|
password = os.getenv("EMAIL_PASSWORD", "")
|
|
|
|
|
smtp_host = extra.get("smtp_host") or os.getenv("EMAIL_SMTP_HOST", "")
|
|
|
|
|
smtp_port = int(os.getenv("EMAIL_SMTP_PORT", "587"))
|
|
|
|
|
|
|
|
|
|
if not all([address, password, smtp_host]):
|
|
|
|
|
return {"error": "Email not configured (EMAIL_ADDRESS, EMAIL_PASSWORD, EMAIL_SMTP_HOST required)"}
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
msg = MIMEText(message, "plain", "utf-8")
|
|
|
|
|
msg["From"] = address
|
|
|
|
|
msg["To"] = chat_id
|
|
|
|
|
msg["Subject"] = "Hermes Agent"
|
|
|
|
|
|
|
|
|
|
server = smtplib.SMTP(smtp_host, smtp_port)
|
2026-03-14 06:31:52 -07:00
|
|
|
server.starttls(context=ssl.create_default_context())
|
feat: add email gateway platform (IMAP/SMTP)
Allow users to interact with Hermes by sending and receiving emails.
Uses IMAP polling for incoming messages and SMTP for replies with
proper threading (In-Reply-To, References headers).
Integrates with all 14 gateway extension points: config, adapter
factory, authorization, send_message tool, cron delivery, toolsets,
prompt hints, channel directory, setup wizard, status display, and
env example.
65 tests covering config, parsing, dispatch, threading, IMAP fetch,
SMTP send, attachments, and all integration points.
2026-03-10 03:15:38 +03:00
|
|
|
server.login(address, password)
|
|
|
|
|
server.send_message(msg)
|
|
|
|
|
server.quit()
|
|
|
|
|
return {"success": True, "platform": "email", "chat_id": chat_id}
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return {"error": f"Email send failed: {e}"}
|
|
|
|
|
|
|
|
|
|
|
feat: add SMS (Twilio) platform adapter
Add SMS as a first-class messaging platform via the Twilio API.
Shares credentials with the existing telephony skill — same
TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN, TWILIO_PHONE_NUMBER env vars.
Adapter (gateway/platforms/sms.py):
- aiohttp webhook server for inbound (Twilio form-encoded POSTs)
- Twilio REST API with Basic auth for outbound
- Markdown stripping, smart chunking at 1600 chars
- Echo loop prevention, phone number redaction in logs
Integration (13 files):
- gateway config, run, channel_directory
- agent prompt_builder (SMS platform hint)
- cron scheduler, cronjob tools
- send_message_tool (_send_sms via Twilio API)
- toolsets (hermes-sms + hermes-gateway)
- gateway setup wizard, status display
- pyproject.toml (sms optional extra)
- 21 tests
Docs:
- website/docs/user-guide/messaging/sms.md (full setup guide)
- Updated messaging index (architecture, toolsets, security, links)
- Updated environment-variables.md reference
Inspired by PR #1575 (@sunsakis), rewritten for Twilio.
2026-03-17 03:14:53 -07:00
|
|
|
async def _send_sms(auth_token, chat_id, message):
|
|
|
|
|
"""Send a single SMS via Twilio REST API.
|
|
|
|
|
|
|
|
|
|
Uses HTTP Basic auth (Account SID : Auth Token) and form-encoded POST.
|
|
|
|
|
Chunking is handled by _send_to_platform() before this is called.
|
|
|
|
|
"""
|
|
|
|
|
try:
|
|
|
|
|
import aiohttp
|
|
|
|
|
except ImportError:
|
|
|
|
|
return {"error": "aiohttp not installed. Run: pip install aiohttp"}
|
|
|
|
|
|
|
|
|
|
import base64
|
|
|
|
|
|
|
|
|
|
account_sid = os.getenv("TWILIO_ACCOUNT_SID", "")
|
|
|
|
|
from_number = os.getenv("TWILIO_PHONE_NUMBER", "")
|
|
|
|
|
if not account_sid or not auth_token or not from_number:
|
|
|
|
|
return {"error": "SMS not configured (TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN, TWILIO_PHONE_NUMBER required)"}
|
|
|
|
|
|
|
|
|
|
# Strip markdown — SMS renders it as literal characters
|
|
|
|
|
message = re.sub(r"\*\*(.+?)\*\*", r"\1", message, flags=re.DOTALL)
|
|
|
|
|
message = re.sub(r"\*(.+?)\*", r"\1", message, flags=re.DOTALL)
|
|
|
|
|
message = re.sub(r"__(.+?)__", r"\1", message, flags=re.DOTALL)
|
|
|
|
|
message = re.sub(r"_(.+?)_", r"\1", message, flags=re.DOTALL)
|
|
|
|
|
message = re.sub(r"```[a-z]*\n?", "", message)
|
|
|
|
|
message = re.sub(r"`(.+?)`", r"\1", message)
|
|
|
|
|
message = re.sub(r"^#{1,6}\s+", "", message, flags=re.MULTILINE)
|
|
|
|
|
message = re.sub(r"\[([^\]]+)\]\([^\)]+\)", r"\1", message)
|
|
|
|
|
message = re.sub(r"\n{3,}", "\n\n", message)
|
|
|
|
|
message = message.strip()
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
creds = f"{account_sid}:{auth_token}"
|
|
|
|
|
encoded = base64.b64encode(creds.encode("ascii")).decode("ascii")
|
|
|
|
|
url = f"https://api.twilio.com/2010-04-01/Accounts/{account_sid}/Messages.json"
|
|
|
|
|
headers = {"Authorization": f"Basic {encoded}"}
|
|
|
|
|
|
2026-03-26 11:58:11 +03:00
|
|
|
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=30)) as session:
|
feat: add SMS (Twilio) platform adapter
Add SMS as a first-class messaging platform via the Twilio API.
Shares credentials with the existing telephony skill — same
TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN, TWILIO_PHONE_NUMBER env vars.
Adapter (gateway/platforms/sms.py):
- aiohttp webhook server for inbound (Twilio form-encoded POSTs)
- Twilio REST API with Basic auth for outbound
- Markdown stripping, smart chunking at 1600 chars
- Echo loop prevention, phone number redaction in logs
Integration (13 files):
- gateway config, run, channel_directory
- agent prompt_builder (SMS platform hint)
- cron scheduler, cronjob tools
- send_message_tool (_send_sms via Twilio API)
- toolsets (hermes-sms + hermes-gateway)
- gateway setup wizard, status display
- pyproject.toml (sms optional extra)
- 21 tests
Docs:
- website/docs/user-guide/messaging/sms.md (full setup guide)
- Updated messaging index (architecture, toolsets, security, links)
- Updated environment-variables.md reference
Inspired by PR #1575 (@sunsakis), rewritten for Twilio.
2026-03-17 03:14:53 -07:00
|
|
|
form_data = aiohttp.FormData()
|
|
|
|
|
form_data.add_field("From", from_number)
|
|
|
|
|
form_data.add_field("To", chat_id)
|
|
|
|
|
form_data.add_field("Body", message)
|
|
|
|
|
|
|
|
|
|
async with session.post(url, data=form_data, headers=headers) as resp:
|
|
|
|
|
body = await resp.json()
|
|
|
|
|
if resp.status >= 400:
|
|
|
|
|
error_msg = body.get("message", str(body))
|
|
|
|
|
return {"error": f"Twilio API error ({resp.status}): {error_msg}"}
|
|
|
|
|
msg_sid = body.get("sid", "")
|
|
|
|
|
return {"success": True, "platform": "sms", "chat_id": chat_id, "message_id": msg_sid}
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return {"error": f"SMS send failed: {e}"}
|
|
|
|
|
|
|
|
|
|
|
fix(tools): implement send_message routing for Matrix, Mattermost, HomeAssistant, DingTalk (#3796)
* fix(tools): implement send_message routing for Matrix, Mattermost, HomeAssistant, DingTalk
Matrix, Mattermost, HomeAssistant, and DingTalk were present in
platform_map but fell through to the "not yet implemented" else branch,
causing send_message tool calls to silently fail on these platforms.
Add four async sender functions:
- _send_mattermost: POST /api/v4/posts via Mattermost REST API
- _send_matrix: PUT /_matrix/client/v3/rooms/.../send via Matrix CS API
- _send_homeassistant: POST /api/services/notify/notify via HA REST API
- _send_dingtalk: POST to session webhook URL
Add routing in _send_to_platform() and 17 unit tests covering success,
HTTP errors, missing config, env var fallback, and Matrix txn_id uniqueness.
* fix: pass platform tokens explicitly to Mattermost/Matrix/HA senders
The original PR passed pconfig.extra to sender functions, but tokens
live at pconfig.token (not in extra). This caused the senders to always
fall through to env var lookup instead of using the gateway-resolved
token.
Changes:
- Mattermost/Matrix/HA: accept token as first arg, matching the
Telegram/Discord/Slack sender pattern
- DingTalk: add DINGTALK_WEBHOOK_URL env var fallback + docstring
explaining the session-webhook vs robot-webhook difference
- Tests updated for new signatures + new DingTalk env var test
---------
Co-authored-by: sprmn24 <oncuevtv@gmail.com>
2026-03-29 15:17:46 -07:00
|
|
|
async def _send_mattermost(token, extra, chat_id, message):
|
|
|
|
|
"""Send via Mattermost REST API."""
|
|
|
|
|
try:
|
|
|
|
|
import aiohttp
|
|
|
|
|
except ImportError:
|
|
|
|
|
return {"error": "aiohttp not installed. Run: pip install aiohttp"}
|
|
|
|
|
try:
|
|
|
|
|
base_url = (extra.get("url") or os.getenv("MATTERMOST_URL", "")).rstrip("/")
|
|
|
|
|
token = token or os.getenv("MATTERMOST_TOKEN", "")
|
|
|
|
|
if not base_url or not token:
|
|
|
|
|
return {"error": "Mattermost not configured (MATTERMOST_URL, MATTERMOST_TOKEN required)"}
|
|
|
|
|
url = f"{base_url}/api/v4/posts"
|
|
|
|
|
headers = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"}
|
|
|
|
|
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=30)) as session:
|
|
|
|
|
async with session.post(url, headers=headers, json={"channel_id": chat_id, "message": message}) as resp:
|
|
|
|
|
if resp.status not in (200, 201):
|
|
|
|
|
body = await resp.text()
|
|
|
|
|
return {"error": f"Mattermost API error ({resp.status}): {body}"}
|
|
|
|
|
data = await resp.json()
|
|
|
|
|
return {"success": True, "platform": "mattermost", "chat_id": chat_id, "message_id": data.get("id")}
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return {"error": f"Mattermost send failed: {e}"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def _send_matrix(token, extra, chat_id, message):
|
|
|
|
|
"""Send via Matrix Client-Server API."""
|
|
|
|
|
try:
|
|
|
|
|
import aiohttp
|
|
|
|
|
except ImportError:
|
|
|
|
|
return {"error": "aiohttp not installed. Run: pip install aiohttp"}
|
|
|
|
|
try:
|
|
|
|
|
homeserver = (extra.get("homeserver") or os.getenv("MATRIX_HOMESERVER", "")).rstrip("/")
|
|
|
|
|
token = token or os.getenv("MATRIX_ACCESS_TOKEN", "")
|
|
|
|
|
if not homeserver or not token:
|
|
|
|
|
return {"error": "Matrix not configured (MATRIX_HOMESERVER, MATRIX_ACCESS_TOKEN required)"}
|
|
|
|
|
txn_id = f"hermes_{int(time.time() * 1000)}"
|
|
|
|
|
url = f"{homeserver}/_matrix/client/v3/rooms/{chat_id}/send/m.room.message/{txn_id}"
|
|
|
|
|
headers = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"}
|
|
|
|
|
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=30)) as session:
|
|
|
|
|
async with session.put(url, headers=headers, json={"msgtype": "m.text", "body": message}) as resp:
|
|
|
|
|
if resp.status not in (200, 201):
|
|
|
|
|
body = await resp.text()
|
|
|
|
|
return {"error": f"Matrix API error ({resp.status}): {body}"}
|
|
|
|
|
data = await resp.json()
|
|
|
|
|
return {"success": True, "platform": "matrix", "chat_id": chat_id, "message_id": data.get("event_id")}
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return {"error": f"Matrix send failed: {e}"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def _send_homeassistant(token, extra, chat_id, message):
|
|
|
|
|
"""Send via Home Assistant notify service."""
|
|
|
|
|
try:
|
|
|
|
|
import aiohttp
|
|
|
|
|
except ImportError:
|
|
|
|
|
return {"error": "aiohttp not installed. Run: pip install aiohttp"}
|
|
|
|
|
try:
|
|
|
|
|
hass_url = (extra.get("url") or os.getenv("HASS_URL", "")).rstrip("/")
|
|
|
|
|
token = token or os.getenv("HASS_TOKEN", "")
|
|
|
|
|
if not hass_url or not token:
|
|
|
|
|
return {"error": "Home Assistant not configured (HASS_URL, HASS_TOKEN required)"}
|
|
|
|
|
url = f"{hass_url}/api/services/notify/notify"
|
|
|
|
|
headers = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"}
|
|
|
|
|
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=30)) as session:
|
|
|
|
|
async with session.post(url, headers=headers, json={"message": message, "target": chat_id}) as resp:
|
|
|
|
|
if resp.status not in (200, 201):
|
|
|
|
|
body = await resp.text()
|
|
|
|
|
return {"error": f"Home Assistant API error ({resp.status}): {body}"}
|
|
|
|
|
return {"success": True, "platform": "homeassistant", "chat_id": chat_id}
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return {"error": f"Home Assistant send failed: {e}"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def _send_dingtalk(extra, chat_id, message):
|
|
|
|
|
"""Send via DingTalk robot webhook.
|
|
|
|
|
|
|
|
|
|
Note: The gateway's DingTalk adapter uses per-session webhook URLs from
|
|
|
|
|
incoming messages (dingtalk-stream SDK). For cross-platform send_message
|
|
|
|
|
delivery we use a static robot webhook URL instead, which must be
|
|
|
|
|
configured via ``DINGTALK_WEBHOOK_URL`` env var or ``webhook_url`` in the
|
|
|
|
|
platform's extra config.
|
|
|
|
|
"""
|
|
|
|
|
try:
|
|
|
|
|
import httpx
|
|
|
|
|
except ImportError:
|
|
|
|
|
return {"error": "httpx not installed"}
|
|
|
|
|
try:
|
|
|
|
|
webhook_url = extra.get("webhook_url") or os.getenv("DINGTALK_WEBHOOK_URL", "")
|
|
|
|
|
if not webhook_url:
|
|
|
|
|
return {"error": "DingTalk not configured. Set DINGTALK_WEBHOOK_URL env var or webhook_url in dingtalk platform extra config."}
|
|
|
|
|
async with httpx.AsyncClient(timeout=30.0) as client:
|
|
|
|
|
resp = await client.post(
|
|
|
|
|
webhook_url,
|
|
|
|
|
json={"msgtype": "text", "text": {"content": message}},
|
|
|
|
|
)
|
|
|
|
|
resp.raise_for_status()
|
|
|
|
|
data = resp.json()
|
|
|
|
|
if data.get("errcode", 0) != 0:
|
|
|
|
|
return {"error": f"DingTalk API error: {data.get('errmsg', 'unknown')}"}
|
|
|
|
|
return {"success": True, "platform": "dingtalk", "chat_id": chat_id}
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return {"error": f"DingTalk send failed: {e}"}
|
|
|
|
|
|
|
|
|
|
|
2026-03-29 21:29:13 -07:00
|
|
|
async def _send_wecom(extra, chat_id, message):
|
|
|
|
|
"""Send via WeCom using the adapter's WebSocket send pipeline."""
|
|
|
|
|
try:
|
|
|
|
|
from gateway.platforms.wecom import WeComAdapter, check_wecom_requirements
|
|
|
|
|
if not check_wecom_requirements():
|
|
|
|
|
return {"error": "WeCom requirements not met. Need aiohttp + WECOM_BOT_ID/SECRET."}
|
|
|
|
|
except ImportError:
|
|
|
|
|
return {"error": "WeCom adapter not available."}
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
from gateway.config import PlatformConfig
|
|
|
|
|
pconfig = PlatformConfig(extra=extra)
|
|
|
|
|
adapter = WeComAdapter(pconfig)
|
|
|
|
|
connected = await adapter.connect()
|
|
|
|
|
if not connected:
|
|
|
|
|
return {"error": f"WeCom: failed to connect — {adapter.fatal_error_message or 'unknown error'}"}
|
|
|
|
|
try:
|
|
|
|
|
result = await adapter.send(chat_id, message)
|
|
|
|
|
if not result.success:
|
|
|
|
|
return {"error": f"WeCom send failed: {result.error}"}
|
|
|
|
|
return {"success": True, "platform": "wecom", "chat_id": chat_id, "message_id": result.message_id}
|
|
|
|
|
finally:
|
|
|
|
|
await adapter.disconnect()
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return {"error": f"WeCom send failed: {e}"}
|
|
|
|
|
|
|
|
|
|
|
feat(gateway): add Feishu/Lark platform support (#3817)
Adds Feishu (ByteDance's enterprise messaging platform) as a gateway
platform adapter with full feature parity: WebSocket + webhook transports,
message batching, dedup, rate limiting, rich post/card content parsing,
media handling (images/audio/files/video), group @mention gating,
reaction routing, and interactive card button support.
Cherry-picked from PR #1793 by penwyp with:
- Moved to current main (PR was 458 commits behind)
- Fixed _send_with_retry shadowing BasePlatformAdapter method (renamed to
_feishu_send_with_retry to avoid signature mismatch crash)
- Fixed import structure: aiohttp/websockets imported independently of
lark_oapi so they remain available when SDK is missing
- Fixed get_hermes_home import (hermes_constants, not hermes_cli.config)
- Added skip decorators for tests requiring lark_oapi SDK
- All 16 integration points added surgically to current main
New dependency: lark-oapi>=1.5.3,<2 (optional, pip install hermes-agent[feishu])
Fixes #1788
Co-authored-by: penwyp <penwyp@users.noreply.github.com>
2026-03-29 18:17:42 -07:00
|
|
|
async def _send_feishu(pconfig, chat_id, message, media_files=None, thread_id=None):
|
|
|
|
|
"""Send via Feishu/Lark using the adapter's send pipeline."""
|
|
|
|
|
try:
|
|
|
|
|
from gateway.platforms.feishu import FeishuAdapter, FEISHU_AVAILABLE
|
|
|
|
|
if not FEISHU_AVAILABLE:
|
|
|
|
|
return {"error": "Feishu dependencies not installed. Run: pip install 'hermes-agent[feishu]'"}
|
|
|
|
|
from gateway.platforms.feishu import FEISHU_DOMAIN, LARK_DOMAIN
|
|
|
|
|
except ImportError:
|
|
|
|
|
return {"error": "Feishu dependencies not installed. Run: pip install 'hermes-agent[feishu]'"}
|
|
|
|
|
|
|
|
|
|
media_files = media_files or []
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
adapter = FeishuAdapter(pconfig)
|
|
|
|
|
domain_name = getattr(adapter, "_domain_name", "feishu")
|
|
|
|
|
domain = FEISHU_DOMAIN if domain_name != "lark" else LARK_DOMAIN
|
|
|
|
|
adapter._client = adapter._build_lark_client(domain)
|
|
|
|
|
metadata = {"thread_id": thread_id} if thread_id else None
|
|
|
|
|
|
|
|
|
|
last_result = None
|
|
|
|
|
if message.strip():
|
|
|
|
|
last_result = await adapter.send(chat_id, message, metadata=metadata)
|
|
|
|
|
if not last_result.success:
|
|
|
|
|
return {"error": f"Feishu send failed: {last_result.error}"}
|
|
|
|
|
|
|
|
|
|
for media_path, is_voice in media_files:
|
|
|
|
|
if not os.path.exists(media_path):
|
|
|
|
|
return {"error": f"Media file not found: {media_path}"}
|
|
|
|
|
|
|
|
|
|
ext = os.path.splitext(media_path)[1].lower()
|
|
|
|
|
if ext in _IMAGE_EXTS:
|
|
|
|
|
last_result = await adapter.send_image_file(chat_id, media_path, metadata=metadata)
|
|
|
|
|
elif ext in _VIDEO_EXTS:
|
|
|
|
|
last_result = await adapter.send_video(chat_id, media_path, metadata=metadata)
|
|
|
|
|
elif ext in _VOICE_EXTS and is_voice:
|
|
|
|
|
last_result = await adapter.send_voice(chat_id, media_path, metadata=metadata)
|
|
|
|
|
elif ext in _AUDIO_EXTS:
|
|
|
|
|
last_result = await adapter.send_voice(chat_id, media_path, metadata=metadata)
|
|
|
|
|
else:
|
|
|
|
|
last_result = await adapter.send_document(chat_id, media_path, metadata=metadata)
|
|
|
|
|
|
|
|
|
|
if not last_result.success:
|
|
|
|
|
return {"error": f"Feishu media send failed: {last_result.error}"}
|
|
|
|
|
|
|
|
|
|
if last_result is None:
|
|
|
|
|
return {"error": "No deliverable text or media remained after processing MEDIA tags"}
|
|
|
|
|
|
|
|
|
|
return {
|
|
|
|
|
"success": True,
|
|
|
|
|
"platform": "feishu",
|
|
|
|
|
"chat_id": chat_id,
|
|
|
|
|
"message_id": last_result.message_id,
|
|
|
|
|
}
|
|
|
|
|
except Exception as e:
|
|
|
|
|
return {"error": f"Feishu send failed: {e}"}
|
|
|
|
|
|
|
|
|
|
|
2026-02-22 20:44:15 -08:00
|
|
|
def _check_send_message():
|
|
|
|
|
"""Gate send_message on gateway running (always available on messaging platforms)."""
|
|
|
|
|
platform = os.getenv("HERMES_SESSION_PLATFORM", "")
|
|
|
|
|
if platform and platform != "local":
|
|
|
|
|
return True
|
|
|
|
|
try:
|
|
|
|
|
from gateway.status import is_gateway_running
|
|
|
|
|
return is_gateway_running()
|
|
|
|
|
except Exception:
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
|
2026-02-21 20:22:33 -08:00
|
|
|
# --- Registry ---
|
|
|
|
|
from tools.registry import registry
|
|
|
|
|
|
|
|
|
|
registry.register(
|
|
|
|
|
name="send_message",
|
|
|
|
|
toolset="messaging",
|
|
|
|
|
schema=SEND_MESSAGE_SCHEMA,
|
|
|
|
|
handler=send_message_tool,
|
2026-02-22 20:44:15 -08:00
|
|
|
check_fn=_check_send_message,
|
2026-03-15 20:21:21 -07:00
|
|
|
emoji="📨",
|
2026-02-21 20:22:33 -08:00
|
|
|
)
|