Compare commits

..

4 Commits

Author SHA1 Message Date
Timmy
f12e1e69a7 poka-yoke: add contextual skill-manager errors
Some checks failed
Forge CI / smoke-and-build (pull_request) Failing after 25s
2026-04-14 15:29:33 -04:00
3f525dd5a1 fix: Add rich error context to skill management (#624)
Some checks failed
Forge CI / smoke-and-build (pull_request) Failing after 21s
2026-04-14 18:25:42 +00:00
a5902d5666 fix: Add tests for error context improvements (#624) 2026-04-14 18:25:16 +00:00
8657ea47ad fix: Update tools/skill_manager_tool.py with rich error context (#624) 2026-04-14 18:17:21 +00:00
5 changed files with 395 additions and 311 deletions

View File

@@ -41,42 +41,6 @@ from agent.model_metadata import is_local_endpoint
logger = logging.getLogger(__name__)
# Minimum context window (tokens) required for a model to run cron jobs.
# Models below this threshold are rejected at job startup.
CRON_MIN_CONTEXT_TOKENS = 64_000
class ModelContextError(ValueError):
"""Raised when a model's context window is too small for cron use."""
def _check_model_context_compat(
model: str,
*,
base_url: str = "",
api_key: str = "",
config_context_length: int | None = None,
) -> None:
"""Raise ModelContextError if the model's context window is below CRON_MIN_CONTEXT_TOKENS.
If config_context_length is provided the check is skipped (user override).
Detection failures are non-fatal (fail-open) — the job proceeds.
"""
if config_context_length is not None:
return
try:
from agent.model_metadata import get_model_context_length
ctx = get_model_context_length(model, base_url=base_url, api_key=api_key)
except Exception as exc:
logger.debug("Context length detection failed for '%s', skipping check: %s", model, exc)
return
if ctx < CRON_MIN_CONTEXT_TOKENS:
raise ModelContextError(
f"Model '{model}' has a context window of {ctx:,} tokens, "
f"which is below the minimum {CRON_MIN_CONTEXT_TOKENS:,} required by Hermes Agent. "
f"To override, set model.context_length in config.yaml."
)
# =====================================================================
# Deploy Sync Guard
@@ -126,14 +90,7 @@ def _validate_agent_interface() -> None:
) from exc
sig = inspect.signature(AIAgent.__init__)
params = sig.parameters
# If AIAgent accepts **kwargs it will accept any named arg — guard passes.
if any(p.kind == inspect.Parameter.VAR_KEYWORD for p in params.values()):
_agent_interface_validated = True
logger.debug("Deploy sync guard passed — AIAgent accepts **kwargs")
return
accepted = set(params.keys()) - {"self"}
accepted = set(sig.parameters.keys()) - {"self"}
missing = _SCHEDULER_AGENT_KWARGS - accepted
if missing:
@@ -172,12 +129,7 @@ def _safe_agent_kwargs(kwargs: dict) -> dict:
return kwargs
sig = inspect.signature(AIAgent.__init__)
params = sig.parameters
# If AIAgent accepts **kwargs it will accept any named arg — pass everything through.
if any(p.kind == inspect.Parameter.VAR_KEYWORD for p in params.values()):
return kwargs
accepted = set(params.keys()) - {"self"}
accepted = set(sig.parameters.keys()) - {"self"}
safe = {}
dropped = []
@@ -593,49 +545,7 @@ def _run_job_script(script_path: str) -> tuple[bool, str]:
return False, f"Script execution failed: {exc}"
_PROVIDER_ALIASES = {
"ollama": {"ollama", "localhost:11434"},
"anthropic": {"anthropic", "claude"},
"nous": {"nous", "mimo"},
"openrouter": {"openrouter"},
"openai": {"openai", "gpt"},
"gemini": {"gemini", "google"},
}
_CLOUD_PREFIXES = frozenset({"nous", "openrouter", "anthropic", "openai", "zai", "kimi", "gemini", "minimax"})
def _classify_runtime(provider: str, model: str) -> str:
"""Return 'cloud', 'local', or 'unknown' based on provider/model hints."""
p = (provider or "").strip().lower()
m = (model or "").strip().lower()
if p and p not in ("ollama", "local"):
return "cloud"
if "/" in m and m.split("/")[0] in _CLOUD_PREFIXES:
return "cloud"
if p in ("ollama", "local") or (not p and m):
return "local"
return "unknown"
def _detect_provider_mismatch(prompt: str, active_provider: str):
"""Return the mismatched provider alias if the prompt references a different provider."""
if not active_provider or not prompt:
return None
pl = prompt.lower()
al = active_provider.lower().strip()
active_group = next(
(g for g, aliases in _PROVIDER_ALIASES.items() if al in aliases or al.startswith(g)),
None,
)
if not active_group:
return None
return next(
(g for g, aliases in _PROVIDER_ALIASES.items() if g != active_group and any(x in pl for x in aliases)),
None,
)
def _build_job_prompt(job: dict, *, runtime_model: str = "", runtime_provider: str = "") -> str:
def _build_job_prompt(job: dict) -> str:
"""Build the effective prompt for a cron job, optionally loading one or more skills first."""
prompt = job.get("prompt", "")
skills = job.get("skills")
@@ -666,26 +576,6 @@ def _build_job_prompt(job: dict, *, runtime_model: str = "", runtime_provider: s
f"{prompt}"
)
# Build runtime context block — inject model/provider/runtime classification
# so the agent knows what infrastructure it has access to.
# Fix #565: derive provider from model prefix when runtime_provider is empty.
_runtime_block = ""
if runtime_model or runtime_provider:
if not runtime_provider and "/" in runtime_model:
runtime_provider = runtime_model.split("/")[0]
_kind = _classify_runtime(runtime_provider, runtime_model)
_parts = []
if runtime_model:
_parts.append(f"MODEL: {runtime_model}")
if runtime_provider:
_parts.append(f"PROVIDER: {runtime_provider}")
if _kind == "local":
_parts.append("RUNTIME: local — access to machine, Ollama, SSH")
elif _kind == "cloud":
_parts.append("RUNTIME: cloud — NO local access, NO SSH, NO localhost")
if _parts:
_runtime_block = "[SYSTEM: RUNTIME CONTEXT — " + "; ".join(_parts) + "]\n\n"
# Always prepend cron execution guidance so the agent knows how
# delivery works and can suppress delivery when appropriate.
cron_hint = (
@@ -707,7 +597,7 @@ def _build_job_prompt(job: dict, *, runtime_model: str = "", runtime_provider: s
"\"[SCRIPT_FAILED]: forge.alexanderwhitestone.com timed out\" "
"\"[SCRIPT_FAILED]: script exited with code 1\".]\\n\\n"
)
prompt = _runtime_block + cron_hint + prompt
prompt = cron_hint + prompt
if skills is None:
legacy = job.get("skill")
skills = [legacy] if legacy else []
@@ -777,23 +667,7 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
job_id = job["id"]
job_name = job["name"]
# Resolve runtime model/provider early so the prompt gets accurate context.
_runtime_model = job.get("model") or os.getenv("HERMES_MODEL") or ""
_runtime_provider = os.getenv("HERMES_PROVIDER", "")
if not _runtime_model:
try:
import yaml as _y
_cp2 = str(_hermes_home / "config.yaml")
if os.path.exists(_cp2):
with open(_cp2) as _f:
_ce = _y.safe_load(_f) or {}
_mc = _ce.get("model", {})
_runtime_model = _mc if isinstance(_mc, str) else (_mc.get("default", "") if isinstance(_mc, dict) else "")
except Exception:
pass
prompt = _build_job_prompt(job, runtime_model=_runtime_model, runtime_provider=_runtime_provider)
prompt = _build_job_prompt(job)
origin = _resolve_origin(job)
_cron_session_id = f"cron_{job_id}_{_hermes_now().strftime('%Y%m%d_%H%M%S')}"
@@ -905,14 +779,6 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
message = format_runtime_provider_error(exc)
raise RuntimeError(message) from exc
_active_provider = runtime.get("provider", "") or ""
_mismatch = _detect_provider_mismatch(job.get("prompt", ""), _active_provider)
if _mismatch:
logger.warning(
"Job '%s': prompt references '%s' but active provider is '%s'",
job_name, _mismatch, _active_provider,
)
from agent.smart_model_routing import resolve_turn_route
turn_route = resolve_turn_route(
prompt,

View File

@@ -7,7 +7,7 @@ from unittest.mock import AsyncMock, patch, MagicMock
import pytest
from cron.scheduler import _resolve_origin, _resolve_delivery_target, _deliver_result, run_job, SILENT_MARKER, _build_job_prompt, _check_model_context_compat, ModelContextError, CRON_MIN_CONTEXT_TOKENS, _classify_runtime, _detect_provider_mismatch
from cron.scheduler import _resolve_origin, _resolve_delivery_target, _deliver_result, run_job, SILENT_MARKER, _build_job_prompt, _check_model_context_compat, ModelContextError, CRON_MIN_CONTEXT_TOKENS
class TestResolveOrigin:
@@ -670,13 +670,6 @@ class TestRunJobSkillBacked:
class TestSilentDelivery:
"""Verify that [SILENT] responses suppress delivery while still saving output."""
@pytest.fixture(autouse=True)
def _isolate_lock(self, tmp_path):
"""Give each test its own tick lock file to prevent parallel test contention."""
with patch("cron.scheduler._LOCK_FILE", tmp_path / ".tick.lock"), \
patch("cron.scheduler._LOCK_DIR", tmp_path):
yield
def _make_job(self):
return {
"id": "monitor-job",
@@ -834,102 +827,10 @@ class TestBuildJobPromptMissingSkill:
assert "go" in result
class TestClassifyRuntime:
"""Unit tests for _classify_runtime."""
def test_cloud_provider_explicit(self):
assert _classify_runtime("openai", "") == "cloud"
assert _classify_runtime("anthropic", "") == "cloud"
assert _classify_runtime("nous", "") == "cloud"
def test_local_provider_explicit(self):
assert _classify_runtime("ollama", "") == "local"
assert _classify_runtime("local", "") == "local"
def test_cloud_detected_from_model_prefix(self):
"""Model prefix 'nous/...' should be classified as cloud even with no provider."""
assert _classify_runtime("", "nous/mimo-v2-pro") == "cloud"
assert _classify_runtime("", "openai/gpt-4o") == "cloud"
def test_local_when_model_has_no_cloud_prefix(self):
"""A model without a cloud prefix and no provider => local."""
assert _classify_runtime("", "llama3") == "local"
def test_unknown_when_empty(self):
assert _classify_runtime("", "") == "unknown"
class TestBuildJobPromptRuntimeContext:
"""Verify runtime context block injection in _build_job_prompt."""
def test_runtime_block_injected_with_model_and_provider(self):
job = {"prompt": "Do something"}
result = _build_job_prompt(job, runtime_model="nous/mimo-v2-pro", runtime_provider="nous")
assert "RUNTIME CONTEXT" in result
assert "MODEL: nous/mimo-v2-pro" in result
assert "PROVIDER: nous" in result
assert "cloud" in result
def test_provider_derived_from_model_prefix_when_empty(self):
"""Fix #565: PROVIDER should be derived from model prefix when runtime_provider is empty."""
job = {"prompt": "Do something"}
result = _build_job_prompt(job, runtime_model="nous/mimo-v2-pro", runtime_provider="")
assert "PROVIDER: nous" in result
def test_provider_not_empty_in_context_block(self):
"""Fix #565: PROVIDER line must not be blank when model has a slash prefix."""
job = {"prompt": "Check status"}
result = _build_job_prompt(job, runtime_model="openai/gpt-4o", runtime_provider="")
assert "PROVIDER: openai" in result
assert "PROVIDER: ;" not in result
assert "PROVIDER: ]" not in result
def test_no_runtime_block_when_no_model_or_provider(self):
"""No runtime block should appear when neither model nor provider is given."""
job = {"prompt": "Hello"}
result = _build_job_prompt(job)
assert "RUNTIME CONTEXT" not in result
def test_local_runtime_classification(self):
"""ollama model should get local runtime label."""
job = {"prompt": "Query local model"}
result = _build_job_prompt(job, runtime_model="llama3", runtime_provider="ollama")
assert "RUNTIME: local" in result
assert "NO local access" not in result
def test_runtime_block_precedes_cron_hint(self):
"""RUNTIME CONTEXT block should appear before the cron system hint."""
job = {"prompt": "test"}
result = _build_job_prompt(job, runtime_model="nous/mimo-v2-pro", runtime_provider="nous")
runtime_pos = result.index("RUNTIME CONTEXT")
cron_pos = result.index("scheduled cron job")
assert runtime_pos < cron_pos
class TestDetectProviderMismatch:
"""Unit tests for _detect_provider_mismatch."""
def test_no_mismatch_when_same_provider(self):
assert _detect_provider_mismatch("Use ollama to generate", "ollama") is None
def test_mismatch_detected(self):
"""Prompt referencing 'ollama' while running on 'nous' should flag a mismatch."""
result = _detect_provider_mismatch("Check if Ollama is responding", "nous")
assert result == "ollama"
def test_no_mismatch_for_empty_inputs(self):
assert _detect_provider_mismatch("", "nous") is None
assert _detect_provider_mismatch("some prompt", "") is None
def test_no_mismatch_when_provider_unknown(self):
"""Unknown active provider should not raise, just return None."""
assert _detect_provider_mismatch("Check Ollama", "mystery-provider") is None
class TestTickAdvanceBeforeRun:
"""Verify that tick() calls advance_next_run before run_job for crash safety."""
def test_advance_called_before_run_job(self, tmp_path, monkeypatch):
def test_advance_called_before_run_job(self, tmp_path):
"""advance_next_run must be called before run_job to prevent crash-loop re-fires."""
call_order = []
@@ -954,9 +855,7 @@ class TestTickAdvanceBeforeRun:
patch("cron.scheduler.run_job", side_effect=fake_run_job), \
patch("cron.scheduler.save_job_output", return_value=tmp_path / "out.md"), \
patch("cron.scheduler.mark_job_run"), \
patch("cron.scheduler._deliver_result"), \
patch("cron.scheduler._LOCK_FILE", tmp_path / ".tick.lock"), \
patch("cron.scheduler._LOCK_DIR", tmp_path):
patch("cron.scheduler._deliver_result"):
from cron.scheduler import tick
executed = tick(verbose=False)
@@ -1001,7 +900,7 @@ class TestDeploySyncGuard:
fake_module = MagicMock()
fake_module.AIAgent = FakeAIAgent
with pytest.raises(RuntimeError, match=r"(?s)missing params:.*tool_choice"):
with pytest.raises(RuntimeError, match="Missing parameters: tool_choice"):
with patch.dict("sys.modules", {"run_agent": fake_module}):
sched_mod._validate_agent_interface()
finally:

View File

@@ -0,0 +1,152 @@
"""
Tests for improved error messages in skill_manager_tool (issue #624).
Verifies that error messages include file paths, context, and suggestions.
"""
import pytest
from pathlib import Path
from unittest.mock import patch, MagicMock
from tools.skill_manager_tool import _format_error, _edit_skill, _patch_skill, skill_manage
class TestFormatError:
"""Test the _format_error helper function."""
def test_basic_error(self):
"""Test basic error formatting."""
result = _format_error("Something went wrong")
assert result["success"] is False
assert "Something went wrong" in result["error"]
assert result["skill_name"] is None
assert result["file_path"] is None
def test_with_skill_name(self):
"""Test error with skill name."""
result = _format_error("Failed", skill_name="test-skill")
assert "test-skill" in result["error"]
assert result["skill_name"] == "test-skill"
def test_with_file_path(self):
"""Test error with file path."""
result = _format_error("Failed", file_path="/path/to/SKILL.md")
assert "/path/to/SKILL.md" in result["error"]
assert result["file_path"] == "/path/to/SKILL.md"
def test_with_suggestion(self):
"""Test error with suggestion."""
result = _format_error("Failed", suggestion="Try again")
assert "Suggestion: Try again" in result["error"]
assert result["suggestion"] == "Try again"
def test_with_context(self):
"""Test error with context dict."""
result = _format_error("Failed", context={"line": 5, "found": "x"})
assert "line: 5" in result["error"]
assert "found: x" in result["error"]
def test_all_fields(self):
"""Test error with all fields."""
result = _format_error(
"Pattern match failed",
skill_name="my-skill",
file_path="/skills/my-skill/SKILL.md",
suggestion="Check whitespace",
context={"expected": "foo", "found": "bar"}
)
assert "Pattern match failed" in result["error"]
assert "Skill: my-skill" in result["error"]
assert "File: /skills/my-skill/SKILL.md" in result["error"]
assert "Suggestion: Check whitespace" in result["error"]
assert "expected: foo" in result["error"]
class TestEditSkillErrors:
"""Test improved error messages in _edit_skill."""
@patch('tools.skill_manager_tool._find_skill')
def test_skill_not_found(self, mock_find):
"""Test skill not found error includes suggestion."""
mock_find.return_value = None
# Provide valid content with frontmatter so it passes validation
valid_content = """---
name: test
description: Test skill
---
Body content here.
"""
result = _edit_skill("nonexistent", valid_content)
assert result["success"] is False
assert "nonexistent" in result["error"]
assert "skills_list()" in result.get("suggestion", "")
@patch('tools.skill_manager_tool._find_skill')
def test_yaml_parse_error_includes_file_path_and_line_number(self, mock_find, tmp_path):
"""Invalid YAML should report target file path and parser line information."""
skill_dir = tmp_path / "my-skill"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text("old", encoding="utf-8")
mock_find.return_value = {"path": skill_dir}
bad_content = """---
name: my-skill
description: [broken
---
Body.
"""
result = _edit_skill("my-skill", bad_content)
assert result["success"] is False
assert str(skill_dir / "SKILL.md") in result["error"]
assert "line" in result["error"].lower()
class TestPatchSkillErrors:
"""Test improved error messages in _patch_skill."""
def test_old_string_required(self):
"""Test old_string required error includes suggestion."""
result = _patch_skill("test-skill", None, "new")
assert result["success"] is False
assert "old_string is required" in result["error"]
assert "suggestion" in result
def test_new_string_required(self):
"""Test new_string required error includes suggestion."""
result = _patch_skill("test-skill", "old", None)
assert result["success"] is False
assert "new_string is required" in result["error"]
assert "suggestion" in result
@patch('tools.skill_manager_tool._find_skill')
def test_skill_not_found(self, mock_find):
"""Test skill not found error includes suggestion."""
mock_find.return_value = None
result = _patch_skill("nonexistent", "old", "new")
assert result["success"] is False
assert "nonexistent" in result["error"]
assert "skills_list()" in result.get("suggestion", "")
@patch('tools.skill_manager_tool._find_skill')
def test_pattern_match_error_includes_state_info(self, mock_find, tmp_path):
"""Patch failures should include file path and patch state info."""
skill_dir = tmp_path / "state-skill"
skill_dir.mkdir()
target = skill_dir / "SKILL.md"
target.write_text("---\nname: state-skill\ndescription: desc\n---\n\nBody content here.\n", encoding="utf-8")
mock_find.return_value = {"path": skill_dir}
result = _patch_skill("state-skill", "missing pattern", "new text", replace_all=False)
assert result["success"] is False
assert str(target) in result["error"]
assert "replace_all" in result["error"]
assert "False" in result["error"]
class TestSkillManageEntryPoint:
def test_patch_missing_old_string_returns_json_error(self):
result = skill_manage(action="patch", name="demo-skill", old_string="", new_string="x")
assert isinstance(result, str)
assert "old_string is required" in result
if __name__ == "__main__":
pytest.main([__file__, "-v"])

View File

@@ -21,6 +21,18 @@ from typing import Callable, Dict, List, Optional, Set
logger = logging.getLogger(__name__)
def tool_error(message: str, success: bool = False, **extra) -> str:
"""Return a standardized JSON error payload for tool handlers.
Many tools import this helper directly from the registry module.
Keeping it here avoids circular helper imports and ensures a consistent
error envelope across tools.
"""
payload = {"success": success, "error": message}
payload.update(extra)
return json.dumps(payload, ensure_ascii=False)
class ToolEntry:
"""Metadata for a single registered tool."""

View File

@@ -40,7 +40,7 @@ import shutil
import tempfile
from pathlib import Path
from hermes_constants import get_hermes_home
from typing import Dict, Any, Optional
from typing import Dict, Any, Optional, Tuple
logger = logging.getLogger(__name__)
@@ -53,6 +53,57 @@ except ImportError:
_GUARD_AVAILABLE = False
def _format_error(error_msg: str, skill_name: str = None, file_path: str = None,
suggestion: str = None, context: dict = None) -> dict:
"""Format an error response with rich context for debugging.
Args:
error_msg: The primary error message
skill_name: Name of the skill being operated on
file_path: Path to the file that failed
suggestion: Suggested action to fix the issue
context: Additional context dict (e.g., {'line': 5, 'found': 'x', 'expected': 'y'})
Returns:
Formatted error dict with success=False
"""
parts = [error_msg]
if skill_name:
parts.append(f"Skill: {skill_name}")
if file_path:
parts.append(f"File: {file_path}")
if context:
for key, value in context.items():
parts.append(f"{key}: {value}")
if suggestion:
parts.append(f"Suggestion: {suggestion}")
return {
"success": False,
"error": " | ".join(parts),
"skill_name": skill_name,
"file_path": file_path,
"suggestion": suggestion,
}
def _get_skill_file_path(skill_name: str, file_path: str = None) -> str:
"""Get the full file path for error messages."""
existing = _find_skill(skill_name)
if not existing:
return f"(skill '{skill_name}' not found)"
skill_dir = existing["path"]
if file_path:
return str(skill_dir / file_path)
return str(skill_dir / "SKILL.md")
def _security_scan_skill(skill_dir: Path) -> Optional[str]:
"""Scan a skill directory after write. Returns error string if blocked, else None."""
if not _GUARD_AVAILABLE:
@@ -92,11 +143,6 @@ VALID_NAME_RE = re.compile(r'^[a-z0-9][a-z0-9._-]*$')
ALLOWED_SUBDIRS = {"references", "templates", "scripts", "assets"}
def check_skill_manage_requirements() -> bool:
"""Skill management has no external requirements -- always available."""
return True
# =============================================================================
# Validation helpers
# =============================================================================
@@ -140,43 +186,58 @@ def _validate_category(category: Optional[str]) -> Optional[str]:
return None
def _validate_frontmatter(content: str) -> Optional[str]:
"""
Validate that SKILL.md content has proper frontmatter with required fields.
Returns error message or None if valid.
def _validate_frontmatter_details(content: str) -> Tuple[Optional[str], Optional[dict]]:
"""Validate SKILL.md frontmatter and return optional structured context.
Returns:
(error_message, context_dict) where both are None when valid.
"""
if not content.strip():
return "Content cannot be empty."
return "Content cannot be empty.", None
if not content.startswith("---"):
return "SKILL.md must start with YAML frontmatter (---). See existing skills for format."
return "SKILL.md must start with YAML frontmatter (---). See existing skills for format.", None
end_match = re.search(r'\n---\s*\n', content[3:])
if not end_match:
return "SKILL.md frontmatter is not closed. Ensure you have a closing '---' line."
return "SKILL.md frontmatter is not closed. Ensure you have a closing '---' line.", None
yaml_content = content[3:end_match.start() + 3]
try:
parsed = yaml.safe_load(yaml_content)
except yaml.YAMLError as e:
return f"YAML frontmatter parse error: {e}"
context = {}
problem_mark = getattr(e, "problem_mark", None)
if problem_mark is not None:
context["line"] = problem_mark.line + 1
context["column"] = problem_mark.column + 1
return f"YAML frontmatter parse error: {e}", (context or None)
if not isinstance(parsed, dict):
return "Frontmatter must be a YAML mapping (key: value pairs)."
return "Frontmatter must be a YAML mapping (key: value pairs). Check for syntax errors in the YAML.", None
if "name" not in parsed:
return "Frontmatter must include 'name' field."
return "Frontmatter must include 'name' field.", None
if "description" not in parsed:
return "Frontmatter must include 'description' field."
return "Frontmatter must include 'description' field.", None
if len(str(parsed["description"])) > MAX_DESCRIPTION_LENGTH:
return f"Description exceeds {MAX_DESCRIPTION_LENGTH} characters."
return f"Description exceeds {MAX_DESCRIPTION_LENGTH} characters.", None
body = content[end_match.end() + 3:].strip()
if not body:
return "SKILL.md must have content after the frontmatter (instructions, procedures, etc.)."
return "SKILL.md must have content after the frontmatter (instructions, procedures, etc.).", None
return None
return None, None
def _validate_frontmatter(content: str) -> Optional[str]:
"""
Validate that SKILL.md content has proper frontmatter with required fields.
Returns error message or None if valid.
"""
err, _context = _validate_frontmatter_details(content)
return err
def _validate_content_size(content: str, label: str = "SKILL.md") -> Optional[str]:
@@ -210,7 +271,15 @@ def _find_skill(name: str) -> Optional[Dict[str, Any]]:
{"path": Path} or None.
"""
from agent.skill_utils import get_all_skills_dirs
for skills_dir in get_all_skills_dirs():
candidate_dirs = []
if isinstance(SKILLS_DIR, Path):
candidate_dirs.append(SKILLS_DIR)
for extra_dir in get_all_skills_dirs():
if extra_dir not in candidate_dirs:
candidate_dirs.append(extra_dir)
for skills_dir in candidate_dirs:
if not skills_dir.exists():
continue
for skill_md in skills_dir.rglob("SKILL.md"):
@@ -224,13 +293,15 @@ def _validate_file_path(file_path: str) -> Optional[str]:
Validate a file path for write_file/remove_file.
Must be under an allowed subdirectory and not escape the skill dir.
"""
from tools.path_security import has_traversal_component
if not file_path:
return "file_path is required."
normalized = Path(file_path)
# Prevent path traversal
if ".." in normalized.parts:
if has_traversal_component(file_path):
return "Path traversal ('..') is not allowed."
# Must be under an allowed subdirectory
@@ -245,6 +316,17 @@ def _validate_file_path(file_path: str) -> Optional[str]:
return None
def _resolve_skill_target(skill_dir: Path, file_path: str) -> Tuple[Optional[Path], Optional[str]]:
"""Resolve a supporting-file path and ensure it stays within the skill directory."""
from tools.path_security import validate_within_dir
target = skill_dir / file_path
error = validate_within_dir(target, skill_dir)
if error:
return None, error
return target, None
def _atomic_write_text(file_path: Path, content: str, encoding: str = "utf-8") -> None:
"""
Atomically write text content to a file.
@@ -292,10 +374,19 @@ def _create_skill(name: str, content: str, category: str = None) -> Dict[str, An
if err:
return {"success": False, "error": err}
skill_dir = _resolve_skill_dir(name, category)
skill_md = skill_dir / "SKILL.md"
# Validate content
err = _validate_frontmatter(content)
err, context = _validate_frontmatter_details(content)
if err:
return {"success": False, "error": err}
return _format_error(
err,
skill_name=name,
file_path=str(skill_md),
context=context,
suggestion="Fix the YAML frontmatter before creating the skill."
)
err = _validate_content_size(content)
if err:
@@ -304,24 +395,29 @@ def _create_skill(name: str, content: str, category: str = None) -> Dict[str, An
# Check for name collisions across all directories
existing = _find_skill(name)
if existing:
return {
"success": False,
"error": f"A skill named '{name}' already exists at {existing['path']}."
}
return _format_error(
f"A skill named '{name}' already exists",
skill_name=name,
file_path=str(existing['path']),
suggestion="Use skill_manage(action='edit') to update the existing skill or choose a different name"
)
# Create the skill directory
skill_dir = _resolve_skill_dir(name, category)
skill_dir.mkdir(parents=True, exist_ok=True)
# Write SKILL.md atomically
skill_md = skill_dir / "SKILL.md"
_atomic_write_text(skill_md, content)
# Security scan — roll back on block
scan_error = _security_scan_skill(skill_dir)
if scan_error:
shutil.rmtree(skill_dir, ignore_errors=True)
return {"success": False, "error": scan_error}
return _format_error(
scan_error,
skill_name=name,
file_path=str(skill_dir),
suggestion="Review the security scan report and fix flagged issues"
)
result = {
"success": True,
@@ -340,19 +436,30 @@ def _create_skill(name: str, content: str, category: str = None) -> Dict[str, An
def _edit_skill(name: str, content: str) -> Dict[str, Any]:
"""Replace the SKILL.md of any existing skill (full rewrite)."""
err = _validate_frontmatter(content)
existing = _find_skill(name)
if not existing:
return _format_error(
f"Skill '{name}' not found",
skill_name=name,
suggestion="Use skills_list() to see available skills or skill_manage(action='create') to create it"
)
skill_md = existing["path"] / "SKILL.md"
err, context = _validate_frontmatter_details(content)
if err:
return {"success": False, "error": err}
return _format_error(
err,
skill_name=name,
file_path=str(skill_md),
context=context,
suggestion="Fix the YAML frontmatter before updating the skill."
)
err = _validate_content_size(content)
if err:
return {"success": False, "error": err}
existing = _find_skill(name)
if not existing:
return {"success": False, "error": f"Skill '{name}' not found. Use skills_list() to see available skills."}
skill_md = existing["path"] / "SKILL.md"
# Back up original content for rollback
original_content = skill_md.read_text(encoding="utf-8") if skill_md.exists() else None
_atomic_write_text(skill_md, content)
@@ -362,7 +469,12 @@ def _edit_skill(name: str, content: str) -> Dict[str, Any]:
if scan_error:
if original_content is not None:
_atomic_write_text(skill_md, original_content)
return {"success": False, "error": scan_error}
return _format_error(
scan_error,
skill_name=name,
file_path=str(skill_md),
suggestion="Review the security scan report and fix flagged issues"
)
return {
"success": True,
@@ -384,13 +496,23 @@ def _patch_skill(
Requires a unique match unless replace_all is True.
"""
if not old_string:
return {"success": False, "error": "old_string is required for 'patch'."}
return _format_error(
"old_string is required for 'patch'",
suggestion="Provide the text to find in the skill file. Use skill_manage(action='edit') for full rewrites."
)
if new_string is None:
return {"success": False, "error": "new_string is required for 'patch'. Use an empty string to delete matched text."}
return _format_error(
"new_string is required for 'patch'",
suggestion="Provide the replacement text. Use empty string '' to delete the matched text."
)
existing = _find_skill(name)
if not existing:
return {"success": False, "error": f"Skill '{name}' not found."}
return _format_error(
f"Skill '{name}' not found",
skill_name=name,
suggestion="Use skills_list() to see available skills"
)
skill_dir = existing["path"]
@@ -399,13 +521,20 @@ def _patch_skill(
err = _validate_file_path(file_path)
if err:
return {"success": False, "error": err}
target = skill_dir / file_path
target, err = _resolve_skill_target(skill_dir, file_path)
if err:
return {"success": False, "error": err}
else:
# Patching SKILL.md
target = skill_dir / "SKILL.md"
if not target.exists():
return {"success": False, "error": f"File not found: {target.relative_to(skill_dir)}"}
return _format_error(
f"File not found: {target.relative_to(skill_dir)}",
skill_name=name,
file_path=str(target),
suggestion=f"Check the file path. Available files in skill: {list(skill_dir.glob('**/*'))}"
)
content = target.read_text(encoding="utf-8")
@@ -421,11 +550,18 @@ def _patch_skill(
if match_error:
# Show a short preview of the file so the model can self-correct
preview = content[:500] + ("..." if len(content) > 500 else "")
return {
"success": False,
"error": match_error,
"file_preview": preview,
}
return _format_error(
f"Pattern match failed: {match_error}",
skill_name=name,
file_path=str(target),
context={
"replace_all": replace_all,
"target_exists": target.exists(),
"content_chars": len(content),
"file_preview": preview[:200] + "..." if len(preview) > 200 else preview,
},
suggestion="Check for whitespace differences, indentation, or escaping issues in old_string"
)
# Check size limit on the result
target_label = "SKILL.md" if not file_path else file_path
@@ -435,12 +571,15 @@ def _patch_skill(
# If patching SKILL.md, validate frontmatter is still intact
if not file_path:
err = _validate_frontmatter(new_content)
err, validation_context = _validate_frontmatter_details(new_content)
if err:
return {
"success": False,
"error": f"Patch would break SKILL.md structure: {err}",
}
return _format_error(
f"Patch would break SKILL.md structure: {err}",
skill_name=name,
file_path=str(target),
context=validation_context,
suggestion="Ensure the patch doesn't corrupt YAML frontmatter (--- delimiters and key: value format)"
)
original_content = content # for rollback
_atomic_write_text(target, new_content)
@@ -461,7 +600,11 @@ def _delete_skill(name: str) -> Dict[str, Any]:
"""Delete a skill."""
existing = _find_skill(name)
if not existing:
return {"success": False, "error": f"Skill '{name}' not found."}
return _format_error(
f"Skill '{name}' not found",
skill_name=name,
suggestion="Use skills_list() to see available skills"
)
skill_dir = existing["path"]
shutil.rmtree(skill_dir)
@@ -503,9 +646,15 @@ def _write_file(name: str, file_path: str, file_content: str) -> Dict[str, Any]:
existing = _find_skill(name)
if not existing:
return {"success": False, "error": f"Skill '{name}' not found. Create it first with action='create'."}
return _format_error(
f"Skill '{name}' not found",
skill_name=name,
suggestion="Use skills_list() to see available skills"
)
target = existing["path"] / file_path
target, err = _resolve_skill_target(existing["path"], file_path)
if err:
return {"success": False, "error": err}
target.parent.mkdir(parents=True, exist_ok=True)
# Back up for rollback
original_content = target.read_text(encoding="utf-8") if target.exists() else None
@@ -535,10 +684,16 @@ def _remove_file(name: str, file_path: str) -> Dict[str, Any]:
existing = _find_skill(name)
if not existing:
return {"success": False, "error": f"Skill '{name}' not found."}
return _format_error(
f"Skill '{name}' not found",
skill_name=name,
suggestion="Use skills_list() to see available skills"
)
skill_dir = existing["path"]
target = skill_dir / file_path
target, err = _resolve_skill_target(skill_dir, file_path)
if err:
return {"success": False, "error": err}
if not target.exists():
# List what's actually there for the model to see
available = []
@@ -589,19 +744,19 @@ def skill_manage(
"""
if action == "create":
if not content:
return json.dumps({"success": False, "error": "content is required for 'create'. Provide the full SKILL.md text (frontmatter + body)."}, ensure_ascii=False)
return tool_error("content is required for 'create'. Provide the full SKILL.md text (frontmatter + body).", success=False)
result = _create_skill(name, content, category)
elif action == "edit":
if not content:
return json.dumps({"success": False, "error": "content is required for 'edit'. Provide the full updated SKILL.md text."}, ensure_ascii=False)
return tool_error("content is required for 'edit'. Provide the full updated SKILL.md text.", success=False)
result = _edit_skill(name, content)
elif action == "patch":
if not old_string:
return json.dumps({"success": False, "error": "old_string is required for 'patch'. Provide the text to find."}, ensure_ascii=False)
return tool_error("old_string is required for 'patch'. Provide the text to find.", success=False)
if new_string is None:
return json.dumps({"success": False, "error": "new_string is required for 'patch'. Use empty string to delete matched text."}, ensure_ascii=False)
return tool_error("new_string is required for 'patch'. Use empty string to delete matched text.", success=False)
result = _patch_skill(name, old_string, new_string, file_path, replace_all)
elif action == "delete":
@@ -609,14 +764,14 @@ def skill_manage(
elif action == "write_file":
if not file_path:
return json.dumps({"success": False, "error": "file_path is required for 'write_file'. Example: 'references/api-guide.md'"}, ensure_ascii=False)
return tool_error("file_path is required for 'write_file'. Example: 'references/api-guide.md'", success=False)
if file_content is None:
return json.dumps({"success": False, "error": "file_content is required for 'write_file'."}, ensure_ascii=False)
return tool_error("file_content is required for 'write_file'.", success=False)
result = _write_file(name, file_path, file_content)
elif action == "remove_file":
if not file_path:
return json.dumps({"success": False, "error": "file_path is required for 'remove_file'."}, ensure_ascii=False)
return tool_error("file_path is required for 'remove_file'.", success=False)
result = _remove_file(name, file_path)
else:
@@ -727,7 +882,7 @@ SKILL_MANAGE_SCHEMA = {
# --- Registry ---
from tools.registry import registry
from tools.registry import registry, tool_error
registry.register(
name="skill_manage",