2026-02-21 22:31:43 -08:00
|
|
|
"""System prompt assembly -- identity, platform hints, skills index, context files.
|
|
|
|
|
|
|
|
|
|
All functions are stateless. AIAgent._build_system_prompt() calls these to
|
|
|
|
|
assemble pieces, then combines them with memory and ephemeral prompts.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
import logging
|
|
|
|
|
import os
|
|
|
|
|
import re
|
|
|
|
|
from pathlib import Path
|
|
|
|
|
from typing import Optional
|
|
|
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
Harden agent attack surface: scan writes to memory, skills, cron, and context files
The security scanner (skills_guard.py) was only wired into the hub install path.
All other write paths to persistent state — skills created by the agent, memory
entries, cron prompts, and context files — bypassed it entirely. This closes
those gaps:
- file_operations: deny-list blocks writes to ~/.ssh, ~/.aws, ~/.hermes/.env, etc.
- code_execution_tool: filter secret env vars from sandbox child process
- skill_manager_tool: wire scan_skill() into create/edit/patch/write_file with rollback
- skills_guard: add "agent-created" trust level (same policy as community)
- memory_tool: scan content for injection/exfil before system prompt injection
- prompt_builder: scan AGENTS.md, .cursorrules, SOUL.md for prompt injection
- cronjob_tools: scan cron prompts for critical threats before scheduling
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-25 23:43:15 -05:00
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
# Context file scanning — detect prompt injection in AGENTS.md, .cursorrules,
|
|
|
|
|
# SOUL.md before they get injected into the system prompt.
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
_CONTEXT_THREAT_PATTERNS = [
|
|
|
|
|
(r'ignore\s+(previous|all|above|prior)\s+instructions', "prompt_injection"),
|
|
|
|
|
(r'do\s+not\s+tell\s+the\s+user', "deception_hide"),
|
|
|
|
|
(r'system\s+prompt\s+override', "sys_prompt_override"),
|
|
|
|
|
(r'disregard\s+(your|all|any)\s+(instructions|rules|guidelines)', "disregard_rules"),
|
|
|
|
|
(r'act\s+as\s+(if|though)\s+you\s+(have\s+no|don\'t\s+have)\s+(restrictions|limits|rules)', "bypass_restrictions"),
|
|
|
|
|
(r'<!--[^>]*(?:ignore|override|system|secret|hidden)[^>]*-->', "html_comment_injection"),
|
|
|
|
|
(r'<\s*div\s+style\s*=\s*["\'].*display\s*:\s*none', "hidden_div"),
|
|
|
|
|
(r'translate\s+.*\s+into\s+.*\s+and\s+(execute|run|eval)', "translate_execute"),
|
|
|
|
|
(r'curl\s+[^\n]*\$\{?\w*(KEY|TOKEN|SECRET|PASSWORD|CREDENTIAL|API)', "exfil_curl"),
|
|
|
|
|
(r'cat\s+[^\n]*(\.env|credentials|\.netrc|\.pgpass)', "read_secrets"),
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
_CONTEXT_INVISIBLE_CHARS = {
|
|
|
|
|
'\u200b', '\u200c', '\u200d', '\u2060', '\ufeff',
|
|
|
|
|
'\u202a', '\u202b', '\u202c', '\u202d', '\u202e',
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _scan_context_content(content: str, filename: str) -> str:
|
|
|
|
|
"""Scan context file content for injection. Returns sanitized content."""
|
|
|
|
|
findings = []
|
|
|
|
|
|
|
|
|
|
# Check invisible unicode
|
|
|
|
|
for char in _CONTEXT_INVISIBLE_CHARS:
|
|
|
|
|
if char in content:
|
|
|
|
|
findings.append(f"invisible unicode U+{ord(char):04X}")
|
|
|
|
|
|
|
|
|
|
# Check threat patterns
|
|
|
|
|
for pattern, pid in _CONTEXT_THREAT_PATTERNS:
|
|
|
|
|
if re.search(pattern, content, re.IGNORECASE):
|
|
|
|
|
findings.append(pid)
|
|
|
|
|
|
|
|
|
|
if findings:
|
|
|
|
|
logger.warning("Context file %s blocked: %s", filename, ", ".join(findings))
|
|
|
|
|
return f"[BLOCKED: {filename} contained potential prompt injection ({', '.join(findings)}). Content not loaded.]"
|
|
|
|
|
|
|
|
|
|
return content
|
|
|
|
|
|
2026-02-21 22:31:43 -08:00
|
|
|
# =========================================================================
|
|
|
|
|
# Constants
|
|
|
|
|
# =========================================================================
|
|
|
|
|
|
|
|
|
|
DEFAULT_AGENT_IDENTITY = (
|
|
|
|
|
"You are Hermes Agent, an intelligent AI assistant created by Nous Research. "
|
|
|
|
|
"You are helpful, knowledgeable, and direct. You assist users with a wide "
|
|
|
|
|
"range of tasks including answering questions, writing and editing code, "
|
|
|
|
|
"analyzing information, creative work, and executing actions via your tools. "
|
|
|
|
|
"You communicate clearly, admit uncertainty when appropriate, and prioritize "
|
2026-03-07 10:14:19 -08:00
|
|
|
"being genuinely useful over being verbose unless otherwise directed below. "
|
|
|
|
|
"Be targeted and efficient in your exploration and investigations."
|
2026-02-21 22:31:43 -08:00
|
|
|
)
|
|
|
|
|
|
2026-02-22 02:31:52 -08:00
|
|
|
MEMORY_GUIDANCE = (
|
2026-03-14 11:26:18 -07:00
|
|
|
"You have persistent memory across sessions. Save durable facts using the memory "
|
|
|
|
|
"tool: user preferences, environment details, tool quirks, and stable conventions. "
|
|
|
|
|
"Memory is injected into every turn, so keep it compact. Do NOT save task progress, "
|
|
|
|
|
"session outcomes, or completed-work logs to memory; use session_search to recall "
|
|
|
|
|
"those from past transcripts."
|
2026-02-22 02:31:52 -08:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
SESSION_SEARCH_GUIDANCE = (
|
|
|
|
|
"When the user references something from a past conversation or you suspect "
|
2026-03-14 11:26:18 -07:00
|
|
|
"relevant cross-session context exists, use session_search to recall it before "
|
|
|
|
|
"asking them to repeat themselves."
|
2026-02-22 02:31:52 -08:00
|
|
|
)
|
|
|
|
|
|
2026-02-22 13:28:13 -08:00
|
|
|
SKILLS_GUIDANCE = (
|
|
|
|
|
"After completing a complex task (5+ tool calls), fixing a tricky error, "
|
|
|
|
|
"or discovering a non-trivial workflow, consider saving the approach as a "
|
|
|
|
|
"skill with skill_manage so you can reuse it next time."
|
|
|
|
|
)
|
|
|
|
|
|
2026-02-21 22:31:43 -08:00
|
|
|
PLATFORM_HINTS = {
|
|
|
|
|
"whatsapp": (
|
|
|
|
|
"You are on a text messaging communication platform, WhatsApp. "
|
2026-03-02 16:34:49 -03:00
|
|
|
"Please do not use markdown as it does not render. "
|
|
|
|
|
"You can send media files natively: to deliver a file to the user, "
|
|
|
|
|
"include MEDIA:/absolute/path/to/file in your response. The file "
|
|
|
|
|
"will be sent as a native WhatsApp attachment — images (.jpg, .png, "
|
|
|
|
|
".webp) appear as photos, videos (.mp4, .mov) play inline, and other "
|
|
|
|
|
"files arrive as downloadable documents. You can also include image "
|
|
|
|
|
"URLs in markdown format  and they will be sent as photos."
|
2026-02-21 22:31:43 -08:00
|
|
|
),
|
|
|
|
|
"telegram": (
|
|
|
|
|
"You are on a text messaging communication platform, Telegram. "
|
2026-03-02 16:34:49 -03:00
|
|
|
"Please do not use markdown as it does not render. "
|
|
|
|
|
"You can send media files natively: to deliver a file to the user, "
|
2026-03-07 22:57:05 -08:00
|
|
|
"include MEDIA:/absolute/path/to/file in your response. Images "
|
|
|
|
|
"(.png, .jpg, .webp) appear as photos, audio (.ogg) sends as voice "
|
|
|
|
|
"bubbles, and videos (.mp4) play inline. You can also include image "
|
|
|
|
|
"URLs in markdown format  and they will be sent as native photos."
|
2026-02-21 22:31:43 -08:00
|
|
|
),
|
|
|
|
|
"discord": (
|
2026-03-07 22:57:05 -08:00
|
|
|
"You are in a Discord server or group chat communicating with your user. "
|
|
|
|
|
"You can send media files natively: include MEDIA:/absolute/path/to/file "
|
|
|
|
|
"in your response. Images (.png, .jpg, .webp) are sent as photo "
|
|
|
|
|
"attachments, audio as file attachments. You can also include image URLs "
|
|
|
|
|
"in markdown format  and they will be sent as attachments."
|
|
|
|
|
),
|
|
|
|
|
"slack": (
|
|
|
|
|
"You are in a Slack workspace communicating with your user. "
|
|
|
|
|
"You can send media files natively: include MEDIA:/absolute/path/to/file "
|
|
|
|
|
"in your response. Images (.png, .jpg, .webp) are uploaded as photo "
|
|
|
|
|
"attachments, audio as file attachments. You can also include image URLs "
|
|
|
|
|
"in markdown format  and they will be uploaded as attachments."
|
2026-02-21 22:31:43 -08:00
|
|
|
),
|
fix: Signal adapter parity pass — integration gaps, clawdbot features, env var simplification
Integration gaps fixed (7 files missing Signal):
- cron/scheduler.py: Signal in platform_map (cron delivery was broken)
- agent/prompt_builder.py: PLATFORM_HINTS for Signal (agent knows it's on Signal)
- toolsets.py: hermes-signal toolset + added to hermes-gateway composite
- hermes_cli/status.py: Signal + Slack in platform status display
- tools/send_message_tool.py: Signal example in target description
- tools/cronjob_tools.py: Signal in delivery option docs + schema
- gateway/channel_directory.py: Signal in session-based channel discovery
Clawdbot parity features added to signal.py:
- Self-message filtering: prevents reply loops by checking sender != account
- SyncMessage filtering: ignores sync envelopes (sent transcripts, read receipts)
- Edit message support: reads dataMessage from editMessage envelope
- Mention rendering: replaces \uFFFC placeholders with @identifier text
- Jitter in SSE reconnection backoff (20% randomization, prevents thundering herd)
Env var simplification (7 → 4):
- Removed SIGNAL_DM_POLICY (DM auth follows standard platform pattern via
SIGNAL_ALLOWED_USERS + DM pairing, same as Telegram/Discord)
- Removed SIGNAL_GROUP_POLICY (derived from SIGNAL_GROUP_ALLOWED_USERS:
not set = disabled, set with IDs = allowlist, set with * = open)
- Removed SIGNAL_DEBUG (was setting root logger, removed entirely)
- Remaining: SIGNAL_HTTP_URL, SIGNAL_ACCOUNT (required),
SIGNAL_ALLOWED_USERS, SIGNAL_GROUP_ALLOWED_USERS (optional)
Updated all docs (website, AGENTS.md, signal.md) to match.
2026-03-08 21:00:21 -07:00
|
|
|
"signal": (
|
|
|
|
|
"You are on a text messaging communication platform, Signal. "
|
|
|
|
|
"Please do not use markdown as it does not render. "
|
|
|
|
|
"You can send media files natively: to deliver a file to the user, "
|
|
|
|
|
"include MEDIA:/absolute/path/to/file in your response. Images "
|
|
|
|
|
"(.png, .jpg, .webp) appear as photos, audio as attachments, and other "
|
|
|
|
|
"files arrive as downloadable documents. You can also include image "
|
|
|
|
|
"URLs in markdown format  and they will be sent as photos."
|
|
|
|
|
),
|
feat: add email gateway platform (IMAP/SMTP)
Allow users to interact with Hermes by sending and receiving emails.
Uses IMAP polling for incoming messages and SMTP for replies with
proper threading (In-Reply-To, References headers).
Integrates with all 14 gateway extension points: config, adapter
factory, authorization, send_message tool, cron delivery, toolsets,
prompt hints, channel directory, setup wizard, status display, and
env example.
65 tests covering config, parsing, dispatch, threading, IMAP fetch,
SMTP send, attachments, and all integration points.
2026-03-10 03:15:38 +03:00
|
|
|
"email": (
|
|
|
|
|
"You are communicating via email. Write clear, well-structured responses "
|
|
|
|
|
"suitable for email. Use plain text formatting (no markdown). "
|
|
|
|
|
"Keep responses concise but complete. You can send file attachments — "
|
|
|
|
|
"include MEDIA:/absolute/path/to/file in your response. The subject line "
|
|
|
|
|
"is preserved for threading. Do not include greetings or sign-offs unless "
|
|
|
|
|
"contextually appropriate."
|
|
|
|
|
),
|
2026-03-14 19:07:50 -07:00
|
|
|
"cron": (
|
|
|
|
|
"You are running as a scheduled cron job. Your final response is automatically "
|
|
|
|
|
"delivered to the job's configured destination, so do not use send_message to "
|
|
|
|
|
"send to that same target again. If you want the user to receive something in "
|
|
|
|
|
"the scheduled destination, put it directly in your final response. Use "
|
|
|
|
|
"send_message only for additional or different targets."
|
|
|
|
|
),
|
2026-02-21 22:31:43 -08:00
|
|
|
"cli": (
|
|
|
|
|
"You are a CLI AI Agent. Try not to use markdown but simple text "
|
|
|
|
|
"renderable inside a terminal."
|
|
|
|
|
),
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
CONTEXT_FILE_MAX_CHARS = 20_000
|
|
|
|
|
CONTEXT_TRUNCATE_HEAD_RATIO = 0.7
|
|
|
|
|
CONTEXT_TRUNCATE_TAIL_RATIO = 0.2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# =========================================================================
|
|
|
|
|
# Skills index
|
|
|
|
|
# =========================================================================
|
|
|
|
|
|
2026-03-13 03:14:04 -07:00
|
|
|
def _parse_skill_file(skill_file: Path) -> tuple[bool, dict, str]:
|
|
|
|
|
"""Read a SKILL.md once and return platform compatibility, frontmatter, and description.
|
|
|
|
|
|
|
|
|
|
Returns (is_compatible, frontmatter, description). On any error, returns
|
|
|
|
|
(True, {}, "") to err on the side of showing the skill.
|
2026-03-07 00:47:54 -08:00
|
|
|
"""
|
|
|
|
|
try:
|
|
|
|
|
from tools.skills_tool import _parse_frontmatter, skill_matches_platform
|
2026-03-13 03:14:04 -07:00
|
|
|
|
2026-03-07 00:47:54 -08:00
|
|
|
raw = skill_file.read_text(encoding="utf-8")[:2000]
|
|
|
|
|
frontmatter, _ = _parse_frontmatter(raw)
|
2026-03-13 03:14:04 -07:00
|
|
|
|
|
|
|
|
if not skill_matches_platform(frontmatter):
|
|
|
|
|
return False, {}, ""
|
|
|
|
|
|
|
|
|
|
desc = ""
|
|
|
|
|
raw_desc = frontmatter.get("description", "")
|
|
|
|
|
if raw_desc:
|
|
|
|
|
desc = str(raw_desc).strip().strip("'\"")
|
|
|
|
|
if len(desc) > 60:
|
|
|
|
|
desc = desc[:57] + "..."
|
|
|
|
|
|
|
|
|
|
return True, frontmatter, desc
|
2026-03-14 02:19:30 -07:00
|
|
|
except Exception as e:
|
|
|
|
|
logger.debug("Failed to parse skill file %s: %s", skill_file, e)
|
2026-03-13 03:14:04 -07:00
|
|
|
return True, {}, ""
|
2026-03-07 00:47:54 -08:00
|
|
|
|
|
|
|
|
|
2026-03-09 23:13:39 +03:00
|
|
|
def _read_skill_conditions(skill_file: Path) -> dict:
|
|
|
|
|
"""Extract conditional activation fields from SKILL.md frontmatter."""
|
|
|
|
|
try:
|
|
|
|
|
from tools.skills_tool import _parse_frontmatter
|
|
|
|
|
raw = skill_file.read_text(encoding="utf-8")[:2000]
|
|
|
|
|
frontmatter, _ = _parse_frontmatter(raw)
|
|
|
|
|
hermes = frontmatter.get("metadata", {}).get("hermes", {})
|
|
|
|
|
return {
|
|
|
|
|
"fallback_for_toolsets": hermes.get("fallback_for_toolsets", []),
|
|
|
|
|
"requires_toolsets": hermes.get("requires_toolsets", []),
|
|
|
|
|
"fallback_for_tools": hermes.get("fallback_for_tools", []),
|
|
|
|
|
"requires_tools": hermes.get("requires_tools", []),
|
|
|
|
|
}
|
2026-03-14 02:19:30 -07:00
|
|
|
except Exception as e:
|
|
|
|
|
logger.debug("Failed to read skill conditions from %s: %s", skill_file, e)
|
2026-03-09 23:13:39 +03:00
|
|
|
return {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _skill_should_show(
|
|
|
|
|
conditions: dict,
|
|
|
|
|
available_tools: "set[str] | None",
|
|
|
|
|
available_toolsets: "set[str] | None",
|
|
|
|
|
) -> bool:
|
|
|
|
|
"""Return False if the skill's conditional activation rules exclude it."""
|
|
|
|
|
if available_tools is None and available_toolsets is None:
|
|
|
|
|
return True # No filtering info — show everything (backward compat)
|
|
|
|
|
|
|
|
|
|
at = available_tools or set()
|
|
|
|
|
ats = available_toolsets or set()
|
|
|
|
|
|
|
|
|
|
# fallback_for: hide when the primary tool/toolset IS available
|
|
|
|
|
for ts in conditions.get("fallback_for_toolsets", []):
|
|
|
|
|
if ts in ats:
|
|
|
|
|
return False
|
|
|
|
|
for t in conditions.get("fallback_for_tools", []):
|
|
|
|
|
if t in at:
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
# requires: hide when a required tool/toolset is NOT available
|
|
|
|
|
for ts in conditions.get("requires_toolsets", []):
|
|
|
|
|
if ts not in ats:
|
|
|
|
|
return False
|
|
|
|
|
for t in conditions.get("requires_tools", []):
|
|
|
|
|
if t not in at:
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def build_skills_system_prompt(
|
|
|
|
|
available_tools: "set[str] | None" = None,
|
|
|
|
|
available_toolsets: "set[str] | None" = None,
|
|
|
|
|
) -> str:
|
2026-02-21 22:31:43 -08:00
|
|
|
"""Build a compact skill index for the system prompt.
|
|
|
|
|
|
2026-02-22 13:28:13 -08:00
|
|
|
Scans ~/.hermes/skills/ for SKILL.md files grouped by category.
|
|
|
|
|
Includes per-skill descriptions from frontmatter so the model can
|
|
|
|
|
match skills by meaning, not just name.
|
2026-03-07 00:47:54 -08:00
|
|
|
Filters out skills incompatible with the current OS platform.
|
2026-02-21 22:31:43 -08:00
|
|
|
"""
|
|
|
|
|
hermes_home = Path(os.getenv("HERMES_HOME", Path.home() / ".hermes"))
|
|
|
|
|
skills_dir = hermes_home / "skills"
|
|
|
|
|
|
|
|
|
|
if not skills_dir.exists():
|
|
|
|
|
return ""
|
|
|
|
|
|
2026-03-13 03:14:04 -07:00
|
|
|
# Collect skills with descriptions, grouped by category.
|
2026-02-22 13:28:13 -08:00
|
|
|
# Each entry: (skill_name, description)
|
refactor: reorganize skills into sub-categories
The skills directory was getting disorganized — mlops alone had 40
skills in a flat list, and 12 categories were singletons with just
one skill each.
Code change:
- prompt_builder.py: Support sub-categories in skill scanner.
skills/mlops/training/axolotl/SKILL.md now shows as category
'mlops/training' instead of just 'mlops'. Backwards-compatible
with existing flat structure.
Split mlops (40 skills) into 7 sub-categories:
- mlops/training (12): accelerate, axolotl, flash-attention,
grpo-rl-training, peft, pytorch-fsdp, pytorch-lightning,
simpo, slime, torchtitan, trl-fine-tuning, unsloth
- mlops/inference (8): gguf, guidance, instructor, llama-cpp,
obliteratus, outlines, tensorrt-llm, vllm
- mlops/models (6): audiocraft, clip, llava, segment-anything,
stable-diffusion, whisper
- mlops/vector-databases (4): chroma, faiss, pinecone, qdrant
- mlops/evaluation (5): huggingface-tokenizers,
lm-evaluation-harness, nemo-curator, saelens, weights-and-biases
- mlops/cloud (2): lambda-labs, modal
- mlops/research (1): dspy
Merged singleton categories:
- gifs → media (gif-search joins youtube-content)
- music-creation → media (heartmula, songsee)
- diagramming → creative (excalidraw joins ascii-art)
- ocr-and-documents → productivity
- domain → research (domain-intel)
- feeds → research (blogwatcher)
- market-data → research (polymarket)
Fixed misplaced skills:
- mlops/code-review → software-development (not ML-specific)
- mlops/ml-paper-writing → research (academic writing)
Added DESCRIPTION.md files for all new/updated categories.
2026-03-09 03:35:53 -07:00
|
|
|
# Supports sub-categories: skills/mlops/training/axolotl/SKILL.md
|
2026-03-13 03:14:04 -07:00
|
|
|
# -> category "mlops/training", skill "axolotl"
|
2026-02-22 13:28:13 -08:00
|
|
|
skills_by_category: dict[str, list[tuple[str, str]]] = {}
|
2026-02-21 22:31:43 -08:00
|
|
|
for skill_file in skills_dir.rglob("SKILL.md"):
|
2026-03-13 03:14:04 -07:00
|
|
|
is_compatible, _, desc = _parse_skill_file(skill_file)
|
|
|
|
|
if not is_compatible:
|
2026-03-07 00:47:54 -08:00
|
|
|
continue
|
2026-03-09 23:13:39 +03:00
|
|
|
# Skip skills whose conditional activation rules exclude them
|
|
|
|
|
conditions = _read_skill_conditions(skill_file)
|
|
|
|
|
if not _skill_should_show(conditions, available_tools, available_toolsets):
|
|
|
|
|
continue
|
2026-02-21 22:31:43 -08:00
|
|
|
rel_path = skill_file.relative_to(skills_dir)
|
|
|
|
|
parts = rel_path.parts
|
|
|
|
|
if len(parts) >= 2:
|
refactor: reorganize skills into sub-categories
The skills directory was getting disorganized — mlops alone had 40
skills in a flat list, and 12 categories were singletons with just
one skill each.
Code change:
- prompt_builder.py: Support sub-categories in skill scanner.
skills/mlops/training/axolotl/SKILL.md now shows as category
'mlops/training' instead of just 'mlops'. Backwards-compatible
with existing flat structure.
Split mlops (40 skills) into 7 sub-categories:
- mlops/training (12): accelerate, axolotl, flash-attention,
grpo-rl-training, peft, pytorch-fsdp, pytorch-lightning,
simpo, slime, torchtitan, trl-fine-tuning, unsloth
- mlops/inference (8): gguf, guidance, instructor, llama-cpp,
obliteratus, outlines, tensorrt-llm, vllm
- mlops/models (6): audiocraft, clip, llava, segment-anything,
stable-diffusion, whisper
- mlops/vector-databases (4): chroma, faiss, pinecone, qdrant
- mlops/evaluation (5): huggingface-tokenizers,
lm-evaluation-harness, nemo-curator, saelens, weights-and-biases
- mlops/cloud (2): lambda-labs, modal
- mlops/research (1): dspy
Merged singleton categories:
- gifs → media (gif-search joins youtube-content)
- music-creation → media (heartmula, songsee)
- diagramming → creative (excalidraw joins ascii-art)
- ocr-and-documents → productivity
- domain → research (domain-intel)
- feeds → research (blogwatcher)
- market-data → research (polymarket)
Fixed misplaced skills:
- mlops/code-review → software-development (not ML-specific)
- mlops/ml-paper-writing → research (academic writing)
Added DESCRIPTION.md files for all new/updated categories.
2026-03-09 03:35:53 -07:00
|
|
|
# Category is everything between skills_dir and the skill folder
|
|
|
|
|
# e.g. parts = ("mlops", "training", "axolotl", "SKILL.md")
|
|
|
|
|
# → category = "mlops/training", skill_name = "axolotl"
|
|
|
|
|
# e.g. parts = ("github", "github-auth", "SKILL.md")
|
|
|
|
|
# → category = "github", skill_name = "github-auth"
|
2026-02-21 22:31:43 -08:00
|
|
|
skill_name = parts[-2]
|
refactor: reorganize skills into sub-categories
The skills directory was getting disorganized — mlops alone had 40
skills in a flat list, and 12 categories were singletons with just
one skill each.
Code change:
- prompt_builder.py: Support sub-categories in skill scanner.
skills/mlops/training/axolotl/SKILL.md now shows as category
'mlops/training' instead of just 'mlops'. Backwards-compatible
with existing flat structure.
Split mlops (40 skills) into 7 sub-categories:
- mlops/training (12): accelerate, axolotl, flash-attention,
grpo-rl-training, peft, pytorch-fsdp, pytorch-lightning,
simpo, slime, torchtitan, trl-fine-tuning, unsloth
- mlops/inference (8): gguf, guidance, instructor, llama-cpp,
obliteratus, outlines, tensorrt-llm, vllm
- mlops/models (6): audiocraft, clip, llava, segment-anything,
stable-diffusion, whisper
- mlops/vector-databases (4): chroma, faiss, pinecone, qdrant
- mlops/evaluation (5): huggingface-tokenizers,
lm-evaluation-harness, nemo-curator, saelens, weights-and-biases
- mlops/cloud (2): lambda-labs, modal
- mlops/research (1): dspy
Merged singleton categories:
- gifs → media (gif-search joins youtube-content)
- music-creation → media (heartmula, songsee)
- diagramming → creative (excalidraw joins ascii-art)
- ocr-and-documents → productivity
- domain → research (domain-intel)
- feeds → research (blogwatcher)
- market-data → research (polymarket)
Fixed misplaced skills:
- mlops/code-review → software-development (not ML-specific)
- mlops/ml-paper-writing → research (academic writing)
Added DESCRIPTION.md files for all new/updated categories.
2026-03-09 03:35:53 -07:00
|
|
|
category = "/".join(parts[:-2]) if len(parts) > 2 else parts[0]
|
2026-02-21 22:31:43 -08:00
|
|
|
else:
|
|
|
|
|
category = "general"
|
|
|
|
|
skill_name = skill_file.parent.name
|
2026-02-22 13:28:13 -08:00
|
|
|
skills_by_category.setdefault(category, []).append((skill_name, desc))
|
2026-02-21 22:31:43 -08:00
|
|
|
|
|
|
|
|
if not skills_by_category:
|
|
|
|
|
return ""
|
|
|
|
|
|
2026-02-22 13:28:13 -08:00
|
|
|
# Read category-level descriptions from DESCRIPTION.md
|
refactor: reorganize skills into sub-categories
The skills directory was getting disorganized — mlops alone had 40
skills in a flat list, and 12 categories were singletons with just
one skill each.
Code change:
- prompt_builder.py: Support sub-categories in skill scanner.
skills/mlops/training/axolotl/SKILL.md now shows as category
'mlops/training' instead of just 'mlops'. Backwards-compatible
with existing flat structure.
Split mlops (40 skills) into 7 sub-categories:
- mlops/training (12): accelerate, axolotl, flash-attention,
grpo-rl-training, peft, pytorch-fsdp, pytorch-lightning,
simpo, slime, torchtitan, trl-fine-tuning, unsloth
- mlops/inference (8): gguf, guidance, instructor, llama-cpp,
obliteratus, outlines, tensorrt-llm, vllm
- mlops/models (6): audiocraft, clip, llava, segment-anything,
stable-diffusion, whisper
- mlops/vector-databases (4): chroma, faiss, pinecone, qdrant
- mlops/evaluation (5): huggingface-tokenizers,
lm-evaluation-harness, nemo-curator, saelens, weights-and-biases
- mlops/cloud (2): lambda-labs, modal
- mlops/research (1): dspy
Merged singleton categories:
- gifs → media (gif-search joins youtube-content)
- music-creation → media (heartmula, songsee)
- diagramming → creative (excalidraw joins ascii-art)
- ocr-and-documents → productivity
- domain → research (domain-intel)
- feeds → research (blogwatcher)
- market-data → research (polymarket)
Fixed misplaced skills:
- mlops/code-review → software-development (not ML-specific)
- mlops/ml-paper-writing → research (academic writing)
Added DESCRIPTION.md files for all new/updated categories.
2026-03-09 03:35:53 -07:00
|
|
|
# Checks both the exact category path and parent directories
|
2026-02-21 22:31:43 -08:00
|
|
|
category_descriptions = {}
|
|
|
|
|
for category in skills_by_category:
|
refactor: reorganize skills into sub-categories
The skills directory was getting disorganized — mlops alone had 40
skills in a flat list, and 12 categories were singletons with just
one skill each.
Code change:
- prompt_builder.py: Support sub-categories in skill scanner.
skills/mlops/training/axolotl/SKILL.md now shows as category
'mlops/training' instead of just 'mlops'. Backwards-compatible
with existing flat structure.
Split mlops (40 skills) into 7 sub-categories:
- mlops/training (12): accelerate, axolotl, flash-attention,
grpo-rl-training, peft, pytorch-fsdp, pytorch-lightning,
simpo, slime, torchtitan, trl-fine-tuning, unsloth
- mlops/inference (8): gguf, guidance, instructor, llama-cpp,
obliteratus, outlines, tensorrt-llm, vllm
- mlops/models (6): audiocraft, clip, llava, segment-anything,
stable-diffusion, whisper
- mlops/vector-databases (4): chroma, faiss, pinecone, qdrant
- mlops/evaluation (5): huggingface-tokenizers,
lm-evaluation-harness, nemo-curator, saelens, weights-and-biases
- mlops/cloud (2): lambda-labs, modal
- mlops/research (1): dspy
Merged singleton categories:
- gifs → media (gif-search joins youtube-content)
- music-creation → media (heartmula, songsee)
- diagramming → creative (excalidraw joins ascii-art)
- ocr-and-documents → productivity
- domain → research (domain-intel)
- feeds → research (blogwatcher)
- market-data → research (polymarket)
Fixed misplaced skills:
- mlops/code-review → software-development (not ML-specific)
- mlops/ml-paper-writing → research (academic writing)
Added DESCRIPTION.md files for all new/updated categories.
2026-03-09 03:35:53 -07:00
|
|
|
cat_path = Path(category)
|
|
|
|
|
desc_file = skills_dir / cat_path / "DESCRIPTION.md"
|
2026-02-21 22:31:43 -08:00
|
|
|
if desc_file.exists():
|
|
|
|
|
try:
|
|
|
|
|
content = desc_file.read_text(encoding="utf-8")
|
|
|
|
|
match = re.search(r"^---\s*\n.*?description:\s*(.+?)\s*\n.*?^---", content, re.MULTILINE | re.DOTALL)
|
|
|
|
|
if match:
|
|
|
|
|
category_descriptions[category] = match.group(1).strip()
|
|
|
|
|
except Exception as e:
|
|
|
|
|
logger.debug("Could not read skill description %s: %s", desc_file, e)
|
|
|
|
|
|
|
|
|
|
index_lines = []
|
|
|
|
|
for category in sorted(skills_by_category.keys()):
|
2026-02-22 13:28:13 -08:00
|
|
|
cat_desc = category_descriptions.get(category, "")
|
|
|
|
|
if cat_desc:
|
|
|
|
|
index_lines.append(f" {category}: {cat_desc}")
|
2026-02-21 22:31:43 -08:00
|
|
|
else:
|
|
|
|
|
index_lines.append(f" {category}:")
|
2026-02-22 13:28:13 -08:00
|
|
|
# Deduplicate and sort skills within each category
|
|
|
|
|
seen = set()
|
|
|
|
|
for name, desc in sorted(skills_by_category[category], key=lambda x: x[0]):
|
|
|
|
|
if name in seen:
|
|
|
|
|
continue
|
|
|
|
|
seen.add(name)
|
|
|
|
|
if desc:
|
|
|
|
|
index_lines.append(f" - {name}: {desc}")
|
|
|
|
|
else:
|
|
|
|
|
index_lines.append(f" - {name}")
|
2026-02-21 22:31:43 -08:00
|
|
|
|
|
|
|
|
return (
|
|
|
|
|
"## Skills (mandatory)\n"
|
|
|
|
|
"Before replying, scan the skills below. If one clearly matches your task, "
|
|
|
|
|
"load it with skill_view(name) and follow its instructions. "
|
|
|
|
|
"If a skill has issues, fix it with skill_manage(action='patch').\n"
|
|
|
|
|
"\n"
|
|
|
|
|
"<available_skills>\n"
|
|
|
|
|
+ "\n".join(index_lines) + "\n"
|
|
|
|
|
"</available_skills>\n"
|
|
|
|
|
"\n"
|
|
|
|
|
"If none match, proceed normally without loading a skill."
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# =========================================================================
|
|
|
|
|
# Context files (SOUL.md, AGENTS.md, .cursorrules)
|
|
|
|
|
# =========================================================================
|
|
|
|
|
|
|
|
|
|
def _truncate_content(content: str, filename: str, max_chars: int = CONTEXT_FILE_MAX_CHARS) -> str:
|
|
|
|
|
"""Head/tail truncation with a marker in the middle."""
|
|
|
|
|
if len(content) <= max_chars:
|
|
|
|
|
return content
|
|
|
|
|
head_chars = int(max_chars * CONTEXT_TRUNCATE_HEAD_RATIO)
|
|
|
|
|
tail_chars = int(max_chars * CONTEXT_TRUNCATE_TAIL_RATIO)
|
|
|
|
|
head = content[:head_chars]
|
|
|
|
|
tail = content[-tail_chars:]
|
|
|
|
|
marker = f"\n\n[...truncated {filename}: kept {head_chars}+{tail_chars} of {len(content)} chars. Use file tools to read the full file.]\n\n"
|
|
|
|
|
return head + marker + tail
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def build_context_files_prompt(cwd: Optional[str] = None) -> str:
|
|
|
|
|
"""Discover and load context files for the system prompt.
|
|
|
|
|
|
|
|
|
|
Discovery: AGENTS.md (recursive), .cursorrules / .cursor/rules/*.mdc,
|
2026-03-14 08:05:30 -07:00
|
|
|
and SOUL.md from HERMES_HOME only. Each capped at 20,000 chars.
|
2026-02-21 22:31:43 -08:00
|
|
|
"""
|
|
|
|
|
if cwd is None:
|
|
|
|
|
cwd = os.getcwd()
|
|
|
|
|
|
|
|
|
|
cwd_path = Path(cwd).resolve()
|
|
|
|
|
sections = []
|
|
|
|
|
|
|
|
|
|
# AGENTS.md (hierarchical, recursive)
|
|
|
|
|
top_level_agents = None
|
|
|
|
|
for name in ["AGENTS.md", "agents.md"]:
|
|
|
|
|
candidate = cwd_path / name
|
|
|
|
|
if candidate.exists():
|
|
|
|
|
top_level_agents = candidate
|
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
if top_level_agents:
|
|
|
|
|
agents_files = []
|
|
|
|
|
for root, dirs, files in os.walk(cwd_path):
|
|
|
|
|
dirs[:] = [d for d in dirs if not d.startswith('.') and d not in ('node_modules', '__pycache__', 'venv', '.venv')]
|
|
|
|
|
for f in files:
|
|
|
|
|
if f.lower() == "agents.md":
|
|
|
|
|
agents_files.append(Path(root) / f)
|
|
|
|
|
agents_files.sort(key=lambda p: len(p.parts))
|
|
|
|
|
|
|
|
|
|
total_agents_content = ""
|
|
|
|
|
for agents_path in agents_files:
|
|
|
|
|
try:
|
|
|
|
|
content = agents_path.read_text(encoding="utf-8").strip()
|
|
|
|
|
if content:
|
|
|
|
|
rel_path = agents_path.relative_to(cwd_path)
|
Harden agent attack surface: scan writes to memory, skills, cron, and context files
The security scanner (skills_guard.py) was only wired into the hub install path.
All other write paths to persistent state — skills created by the agent, memory
entries, cron prompts, and context files — bypassed it entirely. This closes
those gaps:
- file_operations: deny-list blocks writes to ~/.ssh, ~/.aws, ~/.hermes/.env, etc.
- code_execution_tool: filter secret env vars from sandbox child process
- skill_manager_tool: wire scan_skill() into create/edit/patch/write_file with rollback
- skills_guard: add "agent-created" trust level (same policy as community)
- memory_tool: scan content for injection/exfil before system prompt injection
- prompt_builder: scan AGENTS.md, .cursorrules, SOUL.md for prompt injection
- cronjob_tools: scan cron prompts for critical threats before scheduling
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-25 23:43:15 -05:00
|
|
|
content = _scan_context_content(content, str(rel_path))
|
2026-02-21 22:31:43 -08:00
|
|
|
total_agents_content += f"## {rel_path}\n\n{content}\n\n"
|
|
|
|
|
except Exception as e:
|
|
|
|
|
logger.debug("Could not read %s: %s", agents_path, e)
|
|
|
|
|
|
|
|
|
|
if total_agents_content:
|
|
|
|
|
total_agents_content = _truncate_content(total_agents_content, "AGENTS.md")
|
|
|
|
|
sections.append(total_agents_content)
|
|
|
|
|
|
|
|
|
|
# .cursorrules
|
|
|
|
|
cursorrules_content = ""
|
|
|
|
|
cursorrules_file = cwd_path / ".cursorrules"
|
|
|
|
|
if cursorrules_file.exists():
|
|
|
|
|
try:
|
|
|
|
|
content = cursorrules_file.read_text(encoding="utf-8").strip()
|
|
|
|
|
if content:
|
Harden agent attack surface: scan writes to memory, skills, cron, and context files
The security scanner (skills_guard.py) was only wired into the hub install path.
All other write paths to persistent state — skills created by the agent, memory
entries, cron prompts, and context files — bypassed it entirely. This closes
those gaps:
- file_operations: deny-list blocks writes to ~/.ssh, ~/.aws, ~/.hermes/.env, etc.
- code_execution_tool: filter secret env vars from sandbox child process
- skill_manager_tool: wire scan_skill() into create/edit/patch/write_file with rollback
- skills_guard: add "agent-created" trust level (same policy as community)
- memory_tool: scan content for injection/exfil before system prompt injection
- prompt_builder: scan AGENTS.md, .cursorrules, SOUL.md for prompt injection
- cronjob_tools: scan cron prompts for critical threats before scheduling
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-25 23:43:15 -05:00
|
|
|
content = _scan_context_content(content, ".cursorrules")
|
2026-02-21 22:31:43 -08:00
|
|
|
cursorrules_content += f"## .cursorrules\n\n{content}\n\n"
|
|
|
|
|
except Exception as e:
|
|
|
|
|
logger.debug("Could not read .cursorrules: %s", e)
|
|
|
|
|
|
|
|
|
|
cursor_rules_dir = cwd_path / ".cursor" / "rules"
|
|
|
|
|
if cursor_rules_dir.exists() and cursor_rules_dir.is_dir():
|
|
|
|
|
mdc_files = sorted(cursor_rules_dir.glob("*.mdc"))
|
|
|
|
|
for mdc_file in mdc_files:
|
|
|
|
|
try:
|
|
|
|
|
content = mdc_file.read_text(encoding="utf-8").strip()
|
|
|
|
|
if content:
|
Harden agent attack surface: scan writes to memory, skills, cron, and context files
The security scanner (skills_guard.py) was only wired into the hub install path.
All other write paths to persistent state — skills created by the agent, memory
entries, cron prompts, and context files — bypassed it entirely. This closes
those gaps:
- file_operations: deny-list blocks writes to ~/.ssh, ~/.aws, ~/.hermes/.env, etc.
- code_execution_tool: filter secret env vars from sandbox child process
- skill_manager_tool: wire scan_skill() into create/edit/patch/write_file with rollback
- skills_guard: add "agent-created" trust level (same policy as community)
- memory_tool: scan content for injection/exfil before system prompt injection
- prompt_builder: scan AGENTS.md, .cursorrules, SOUL.md for prompt injection
- cronjob_tools: scan cron prompts for critical threats before scheduling
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-25 23:43:15 -05:00
|
|
|
content = _scan_context_content(content, f".cursor/rules/{mdc_file.name}")
|
2026-02-21 22:31:43 -08:00
|
|
|
cursorrules_content += f"## .cursor/rules/{mdc_file.name}\n\n{content}\n\n"
|
|
|
|
|
except Exception as e:
|
|
|
|
|
logger.debug("Could not read %s: %s", mdc_file, e)
|
|
|
|
|
|
|
|
|
|
if cursorrules_content:
|
|
|
|
|
cursorrules_content = _truncate_content(cursorrules_content, ".cursorrules")
|
|
|
|
|
sections.append(cursorrules_content)
|
|
|
|
|
|
2026-03-14 08:05:30 -07:00
|
|
|
# SOUL.md from HERMES_HOME only
|
|
|
|
|
try:
|
|
|
|
|
from hermes_cli.config import ensure_hermes_home
|
|
|
|
|
ensure_hermes_home()
|
|
|
|
|
except Exception as e:
|
|
|
|
|
logger.debug("Could not ensure HERMES_HOME before loading SOUL.md: %s", e)
|
2026-02-21 22:31:43 -08:00
|
|
|
|
2026-03-14 08:05:30 -07:00
|
|
|
soul_path = Path(os.getenv("HERMES_HOME", Path.home() / ".hermes")) / "SOUL.md"
|
|
|
|
|
if soul_path.exists():
|
2026-02-21 22:31:43 -08:00
|
|
|
try:
|
|
|
|
|
content = soul_path.read_text(encoding="utf-8").strip()
|
|
|
|
|
if content:
|
Harden agent attack surface: scan writes to memory, skills, cron, and context files
The security scanner (skills_guard.py) was only wired into the hub install path.
All other write paths to persistent state — skills created by the agent, memory
entries, cron prompts, and context files — bypassed it entirely. This closes
those gaps:
- file_operations: deny-list blocks writes to ~/.ssh, ~/.aws, ~/.hermes/.env, etc.
- code_execution_tool: filter secret env vars from sandbox child process
- skill_manager_tool: wire scan_skill() into create/edit/patch/write_file with rollback
- skills_guard: add "agent-created" trust level (same policy as community)
- memory_tool: scan content for injection/exfil before system prompt injection
- prompt_builder: scan AGENTS.md, .cursorrules, SOUL.md for prompt injection
- cronjob_tools: scan cron prompts for critical threats before scheduling
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-25 23:43:15 -05:00
|
|
|
content = _scan_context_content(content, "SOUL.md")
|
2026-02-21 22:31:43 -08:00
|
|
|
content = _truncate_content(content, "SOUL.md")
|
2026-03-14 08:05:30 -07:00
|
|
|
sections.append(content)
|
2026-02-21 22:31:43 -08:00
|
|
|
except Exception as e:
|
|
|
|
|
logger.debug("Could not read SOUL.md from %s: %s", soul_path, e)
|
|
|
|
|
|
|
|
|
|
if not sections:
|
|
|
|
|
return ""
|
|
|
|
|
return "# Project Context\n\nThe following project context files have been loaded and should be followed:\n\n" + "\n".join(sections)
|