Compare commits
1 Commits
burn/priva
...
burn/skill
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a973c2d1f6 |
@@ -1,426 +0,0 @@
|
||||
"""Privacy filter for remote API calls — PII redaction before wire transit.
|
||||
|
||||
Strips personally identifiable information (PII) from messages before they
|
||||
leave the local machine and hit a remote LLM provider. Designed to sit
|
||||
between the message list and the API client so local model routing can
|
||||
bypass it entirely.
|
||||
|
||||
Sensitive categories detected:
|
||||
- Email addresses
|
||||
- Phone numbers (E.164 and common formats)
|
||||
- Physical addresses / private file paths
|
||||
- Crypto wallet addresses (Bitcoin, Ethereum, generic EVM)
|
||||
- SSN / government ID patterns
|
||||
- Real names (opt-in via config)
|
||||
|
||||
Integration point: call ``filter_messages()`` on the ``api_messages`` list
|
||||
inside ``_build_api_kwargs()`` or just before ``_interruptible_api_call()``
|
||||
when the active provider is a remote endpoint (not localhost).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import copy
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from typing import Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Configuration — snapshot at import time
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_ENV = os.getenv
|
||||
|
||||
#: If True, privacy filtering is enabled by default. Can be toggled via
|
||||
#: ``HERMES_PRIVACY_FILTER=0`` to disable.
|
||||
_PRIVACY_FILTER_ENABLED: bool = _ENV("HERMES_PRIVACY_FILTER", "").lower() not in (
|
||||
"0",
|
||||
"false",
|
||||
"no",
|
||||
"off",
|
||||
)
|
||||
|
||||
#: If True, filter is on even when the provider looks local (for testing).
|
||||
_FORCE_FILTER: bool = _ENV("HERMES_PRIVACY_FILTER_FORCE", "").lower() in (
|
||||
"1",
|
||||
"true",
|
||||
"yes",
|
||||
"on",
|
||||
)
|
||||
|
||||
#: Tokens shorter than this are fully masked; longer ones get prefix+suffix.
|
||||
_MASK_THRESHOLD = 8
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Pattern catalogue — PII and sensitive data detectors
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
#: RFC 5322-lite email pattern (covers 99% of real addresses).
|
||||
_EMAIL_RE = re.compile(
|
||||
r"""(?<![A-Za-z0-9._%+\-])"""
|
||||
r"""([A-Za-z0-9._%+\-]+)@([A-Za-z0-9.\-]+\.[A-Za-z]{2,})"""
|
||||
r"""(?![A-Za-z0-9._%+\-])"""
|
||||
)
|
||||
|
||||
#: E.164 phone numbers: +1… through +9…, 7-15 digits.
|
||||
#: Also catches common US formats like (555) 123-4567 and 555-123-4567.
|
||||
_PHONE_E164_RE = re.compile(r"(\+[1-9]\d{6,14})(?![\d])")
|
||||
_PHONE_US_RE = re.compile(
|
||||
r"""(?:\+?1[\s.-]?)?""" # optional country code
|
||||
r"""(?:\(?[2-9]\d{2}\)?[\s.-]?)""" # area code
|
||||
r"""(?:[2-9]\d{2}[\s.-]?)""" # exchange
|
||||
r"""(?:\d{4})""" # subscriber
|
||||
r"""(?![\d])"""
|
||||
)
|
||||
|
||||
#: US Social Security Number: XXX-XX-XXXX (with exclusion of 000/666/9xx area).
|
||||
_SSN_RE = re.compile(
|
||||
r"""(?<!\d)"""
|
||||
r"""(?!000|666|9\d{2})\d{3}"""
|
||||
r"""[\s-]"""
|
||||
r"""(?!00)\d{2}"""
|
||||
r"""[\s-]"""
|
||||
r"""(?!0000)\d{4}"""
|
||||
r"""(?!\d)"""
|
||||
)
|
||||
|
||||
#: Crypto wallet addresses.
|
||||
#: Bitcoin: starts with 1, 3, or bc1 — 25-39 chars (legacy) or 42-62 (bech32).
|
||||
_BITCOIN_RE = re.compile(r"\b([13][a-km-zA-HJ-NP-Z1-9]{25,35}|bc1[a-zA-HJ-NP-Z0-9]{25,49})\b")
|
||||
#: Ethereum / EVM: 0x + 40 hex chars.
|
||||
_ETHEREUM_RE = re.compile(r"\b(0x[a-fA-F0-9]{40})\b")
|
||||
#: Generic long hex that looks like a wallet (>= 32 hex chars, not git hashes
|
||||
#: which are usually short or have context clues).
|
||||
_GENERIC_WALLET_RE = re.compile(r"\b(0x[a-fA-F0-9]{32,})\b")
|
||||
|
||||
#: Unix home paths: /home/user, /Users/username, /root
|
||||
_UNIX_HOME_PATH_RE = re.compile(
|
||||
r"""(?:/home/[\w.\-]+|/Users/[\w.\-]+|/root)(?:/[\w.\-]+)*"""
|
||||
)
|
||||
#: Windows user profile paths: C:\Users\username
|
||||
_WIN_HOME_PATH_RE = re.compile(
|
||||
r"""[A-Z]:\\Users\\[\w.\-]+(?:\\[\w.\-]+)*""", re.IGNORECASE
|
||||
)
|
||||
|
||||
#: SSH keys, GPG keys, PEM private keys — entire blocks.
|
||||
_PRIVATE_KEY_BLOCK_RE = re.compile(
|
||||
r"""-----BEGIN[A-Z ]*PRIVATE KEY-----[\s\S]*?-----END[A-Z ]*PRIVATE KEY-----"""
|
||||
)
|
||||
|
||||
#: Common "name:" patterns in structured input (YAML, JSON, form data).
|
||||
#: Only matches when followed by a plausible 2+ word name.
|
||||
_NAME_FIELD_RE = re.compile(
|
||||
r"""(?:\"name\"\s*:\s*\"|name:\s*)([A-Z][a-z]+(?:\s+[A-Z][a-z]+)+)"""
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Masking helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _mask_value(value: str, visible: int = 4) -> str:
|
||||
"""Mask a string, keeping at most *visible* chars at each end."""
|
||||
if len(value) <= _MASK_THRESHOLD:
|
||||
return "[REDACTED]"
|
||||
keep = max(2, min(visible, len(value) // 4))
|
||||
return f"{value[:keep]}…{value[-keep:]}"
|
||||
|
||||
|
||||
def _mask_email(m: re.Match) -> str:
|
||||
user, domain = m.group(1), m.group(2)
|
||||
masked_user = user[0] + "…" if len(user) > 1 else "…"
|
||||
return f"{masked_user}@{domain}"
|
||||
|
||||
|
||||
def _mask_phone(m: re.Match) -> str:
|
||||
raw = m.group(0)
|
||||
digits = re.sub(r"\D", "", raw)
|
||||
if len(digits) <= 6:
|
||||
return "[REDACTED-PHONE]"
|
||||
return f"+{'*' * (len(digits) - 4)}{digits[-4:]}"
|
||||
|
||||
|
||||
def _mask_wallet(m: re.Match) -> str:
|
||||
addr = m.group(1)
|
||||
if addr.startswith("0x"):
|
||||
return f"0x{'*' * 6}…{addr[-4:]}"
|
||||
if addr.startswith("bc1"):
|
||||
return f"bc1{'*' * 4}…{addr[-4:]}"
|
||||
# Legacy Bitcoin
|
||||
return f"{addr[:4]}{'*' * 4}…{addr[-4:]}"
|
||||
|
||||
|
||||
def _mask_path(m: re.Match) -> str:
|
||||
raw = m.group(0)
|
||||
parts = raw.replace("\\", "/").split("/")
|
||||
if len(parts) >= 3:
|
||||
return f"{parts[0]}/{parts[1]}/[REDACTED-PATH]"
|
||||
return "[REDACTED-PATH]"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Core filtering — string level
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
#: Ordered list of (compiled_replacement_tuple) applied to every string.
|
||||
_FILTER_RULES: list[tuple[re.Pattern, Any]] = [
|
||||
# 1. Private key blocks — must run first (multi-line)
|
||||
(_PRIVATE_KEY_BLOCK_RE, "[REDACTED-PRIVATE-KEY]"),
|
||||
# 2. Emails
|
||||
(_EMAIL_RE, _mask_email),
|
||||
# 3. Phone numbers — E.164 first, then US format
|
||||
(_PHONE_E164_RE, _mask_phone),
|
||||
(_PHONE_US_RE, _mask_phone),
|
||||
# 4. SSN
|
||||
(_SSN_RE, lambda m: f"{'*' * 3}-{m.group(0)[-6:-5]}{'*' * 2}-{m.group(0)[-4:]}"),
|
||||
# 5. Crypto wallets — Bitcoin then Ethereum then generic
|
||||
(_BITCOIN_RE, _mask_wallet),
|
||||
(_ETHEREUM_RE, _mask_wallet),
|
||||
(_GENERIC_WALLET_RE, _mask_wallet),
|
||||
# 6. File paths with user dirs
|
||||
(_UNIX_HOME_PATH_RE, _mask_path),
|
||||
(_WIN_HOME_PATH_RE, _mask_path),
|
||||
]
|
||||
|
||||
|
||||
def filter_text(text: str) -> str:
|
||||
"""Apply all privacy filter rules to a single string.
|
||||
|
||||
Safe for any string input — non-matching text passes through unchanged.
|
||||
"""
|
||||
if text is None:
|
||||
return ""
|
||||
if not text:
|
||||
return text
|
||||
for pattern, replacement in _FILTER_RULES:
|
||||
if callable(replacement) and not isinstance(replacement, str):
|
||||
text = pattern.sub(replacement, text)
|
||||
else:
|
||||
text = pattern.sub(replacement, text)
|
||||
return text
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Detection — is this content sensitive?
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
#: Patterns whose mere presence indicates "route to local model only".
|
||||
_SENSITIVE_DETECTION_RULES: list[tuple[str, re.Pattern]] = [
|
||||
("email", _EMAIL_RE),
|
||||
("phone", _PHONE_E164_RE),
|
||||
("phone_us", _PHONE_US_RE),
|
||||
("ssn", _SSN_RE),
|
||||
("bitcoin_wallet", _BITCOIN_RE),
|
||||
("ethereum_wallet", _ETHEREUM_RE),
|
||||
("private_key", _PRIVATE_KEY_BLOCK_RE),
|
||||
("user_path_unix", _UNIX_HOME_PATH_RE),
|
||||
("user_path_win", _WIN_HOME_PATH_RE),
|
||||
]
|
||||
|
||||
|
||||
def detect_sensitive(text: str) -> list[str]:
|
||||
"""Return a list of sensitive categories found in *text*.
|
||||
|
||||
Empty list means the text is safe for remote APIs (after filtering).
|
||||
Non-empty list means the text *contains* PII — the caller should
|
||||
consider routing to a local model instead.
|
||||
"""
|
||||
if not text:
|
||||
return []
|
||||
found = []
|
||||
for name, pattern in _SENSITIVE_DETECTION_RULES:
|
||||
if pattern.search(text):
|
||||
found.append(name)
|
||||
return found
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Message-level filtering
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _extract_text_from_content(content: Any) -> str:
|
||||
"""Extract plain text from OpenAI message content (str or list of parts)."""
|
||||
if content is None:
|
||||
return ""
|
||||
if isinstance(content, str):
|
||||
return content
|
||||
if isinstance(content, list):
|
||||
parts = []
|
||||
for part in content:
|
||||
if isinstance(part, dict):
|
||||
if part.get("type") == "text":
|
||||
parts.append(part.get("text", ""))
|
||||
elif part.get("type") == "tool_result":
|
||||
# tool_result content can be nested
|
||||
inner = part.get("content", "")
|
||||
if isinstance(inner, str):
|
||||
parts.append(inner)
|
||||
elif isinstance(inner, list):
|
||||
for p in inner:
|
||||
if isinstance(p, dict) and p.get("type") == "text":
|
||||
parts.append(p.get("text", ""))
|
||||
elif isinstance(part, str):
|
||||
parts.append(part)
|
||||
return "\n".join(parts)
|
||||
return str(content)
|
||||
|
||||
|
||||
def _set_content_text(content: Any, filtered: str) -> Any:
|
||||
"""Reconstruct content structure with filtered text."""
|
||||
if content is None:
|
||||
return None
|
||||
if isinstance(content, str):
|
||||
return filtered
|
||||
if isinstance(content, list):
|
||||
result = []
|
||||
text_idx = 0
|
||||
for part in content:
|
||||
if isinstance(part, dict) and part.get("type") == "text":
|
||||
result.append({**part, "text": filtered if text_idx == 0 else part.get("text", "")})
|
||||
text_idx += 1
|
||||
elif isinstance(part, dict) and part.get("type") == "tool_result":
|
||||
inner = part.get("content", "")
|
||||
if isinstance(inner, str):
|
||||
result.append({**part, "content": filter_text(inner)})
|
||||
else:
|
||||
result.append(part)
|
||||
else:
|
||||
result.append(part)
|
||||
return result
|
||||
return filtered
|
||||
|
||||
|
||||
def filter_messages(messages: list[dict]) -> list[dict]:
|
||||
"""Return a deep-copied message list with PII redacted.
|
||||
|
||||
Each message's ``content`` field is filtered. Tool call arguments
|
||||
(``arguments`` inside ``tool_calls``) are also filtered as JSON strings.
|
||||
|
||||
``name`` fields inside message dicts are left untouched (they are
|
||||
role labels, not PII).
|
||||
"""
|
||||
if not messages:
|
||||
return messages
|
||||
|
||||
filtered = copy.deepcopy(messages)
|
||||
for msg in filtered:
|
||||
if not isinstance(msg, dict):
|
||||
continue
|
||||
|
||||
# Filter content
|
||||
if "content" in msg:
|
||||
raw = _extract_text_from_content(msg["content"])
|
||||
msg["content"] = _set_content_text(msg["content"], filter_text(raw))
|
||||
|
||||
# Filter tool call arguments (they arrive as JSON strings)
|
||||
tool_calls = msg.get("tool_calls")
|
||||
if isinstance(tool_calls, list):
|
||||
for tc in tool_calls:
|
||||
if isinstance(tc, dict):
|
||||
# Direct arguments field
|
||||
args = tc.get("arguments")
|
||||
if isinstance(args, str):
|
||||
tc["arguments"] = filter_text(args)
|
||||
# OpenAI function format: tc["function"]["arguments"]
|
||||
func = tc.get("function")
|
||||
if isinstance(func, dict):
|
||||
fargs = func.get("arguments")
|
||||
if isinstance(fargs, str):
|
||||
func["arguments"] = filter_text(fargs)
|
||||
|
||||
return filtered
|
||||
|
||||
|
||||
def has_sensitive_content(messages: list[dict]) -> list[str]:
|
||||
"""Scan messages and return all sensitive categories found.
|
||||
|
||||
Returns empty list if no PII detected (safe for remote).
|
||||
"""
|
||||
categories: set[str] = set()
|
||||
for msg in messages:
|
||||
if not isinstance(msg, dict):
|
||||
continue
|
||||
raw = _extract_text_from_content(msg.get("content", ""))
|
||||
categories.update(detect_sensitive(raw))
|
||||
|
||||
tool_calls = msg.get("tool_calls")
|
||||
if isinstance(tool_calls, list):
|
||||
for tc in tool_calls:
|
||||
if isinstance(tc, dict):
|
||||
args = tc.get("arguments", "")
|
||||
if isinstance(args, str):
|
||||
categories.update(detect_sensitive(args))
|
||||
# OpenAI function format
|
||||
func = tc.get("function")
|
||||
if isinstance(func, dict):
|
||||
fargs = func.get("arguments", "")
|
||||
if isinstance(fargs, str):
|
||||
categories.update(detect_sensitive(fargs))
|
||||
return sorted(categories)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Provider routing helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_LOCAL_PATTERNS = (
|
||||
"localhost",
|
||||
"127.0.0.1",
|
||||
"::1",
|
||||
"0.0.0.0",
|
||||
)
|
||||
|
||||
|
||||
def is_remote_provider(base_url: str) -> bool:
|
||||
"""Return True if *base_url* points to a remote (non-local) provider."""
|
||||
if not base_url:
|
||||
return False # assume local if unset
|
||||
lower = base_url.lower()
|
||||
return not any(h in lower for h in _LOCAL_PATTERNS)
|
||||
|
||||
|
||||
def should_route_local(messages: list[dict], base_url: str) -> tuple[bool, list[str]]:
|
||||
"""Decide whether messages should stay on local models.
|
||||
|
||||
Returns ``(should_local, reasons)`` where *reasons* lists the
|
||||
sensitive categories detected. If *base_url* is already local,
|
||||
returns ``(False, [])`` since there's no need to re-route.
|
||||
"""
|
||||
if not is_remote_provider(base_url):
|
||||
return False, []
|
||||
if not _PRIVACY_FILTER_ENABLED and not _FORCE_FILTER:
|
||||
return False, []
|
||||
reasons = has_sensitive_content(messages)
|
||||
return bool(reasons), reasons
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Integration hook — drop-in replacement for the API call path
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def prepare_for_remote(messages: list[dict], base_url: str) -> tuple[list[dict], list[str]]:
|
||||
"""Filter messages for a remote API call.
|
||||
|
||||
Returns ``(filtered_messages, detected_categories)``.
|
||||
|
||||
If the endpoint is local or the filter is disabled, returns the
|
||||
original messages unchanged with an empty category list.
|
||||
"""
|
||||
if not is_remote_provider(base_url):
|
||||
return messages, []
|
||||
if not _PRIVACY_FILTER_ENABLED and not _FORCE_FILTER:
|
||||
return messages, []
|
||||
|
||||
categories = has_sensitive_content(messages)
|
||||
if categories:
|
||||
logger.info(
|
||||
"PrivacyFilter: redacting %d sensitive category match(es) before remote call: %s",
|
||||
len(categories),
|
||||
", ".join(categories),
|
||||
)
|
||||
return filter_messages(messages), categories
|
||||
@@ -1,415 +0,0 @@
|
||||
"""Tests for agent.privacy_filter — PII redaction for remote API calls."""
|
||||
|
||||
import os
|
||||
import pytest
|
||||
|
||||
# Ensure the filter is active for all tests
|
||||
@pytest.fixture(autouse=True)
|
||||
def _enable_filter(monkeypatch):
|
||||
monkeypatch.delenv("HERMES_PRIVACY_FILTER", raising=False)
|
||||
monkeypatch.setattr("agent.privacy_filter._PRIVACY_FILTER_ENABLED", True)
|
||||
monkeypatch.setattr("agent.privacy_filter._FORCE_FILTER", True)
|
||||
|
||||
|
||||
from agent.privacy_filter import (
|
||||
filter_text,
|
||||
filter_messages,
|
||||
detect_sensitive,
|
||||
has_sensitive_content,
|
||||
is_remote_provider,
|
||||
should_route_local,
|
||||
prepare_for_remote,
|
||||
)
|
||||
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════════
|
||||
# filter_text — string-level redaction
|
||||
# ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
class TestEmailRedaction:
|
||||
def test_simple_email(self):
|
||||
result = filter_text("Contact me at alice@example.com for details.")
|
||||
assert "alice@example.com" not in result
|
||||
assert "a…@example.com" in result
|
||||
|
||||
def test_email_with_dots(self):
|
||||
result = filter_text("john.doe+work@corp.co.uk")
|
||||
assert "john.doe+work@corp.co.uk" not in result
|
||||
|
||||
def test_multiple_emails(self):
|
||||
text = "CC: first@test.io and second@test.io"
|
||||
result = filter_text(text)
|
||||
assert "first@test.io" not in result
|
||||
assert "second@test.io" not in result
|
||||
|
||||
def test_email_in_code_block(self):
|
||||
text = "config: { email: 'dev@company.com' }"
|
||||
result = filter_text(text)
|
||||
assert "dev@company.com" not in result
|
||||
|
||||
|
||||
class TestPhoneRedaction:
|
||||
def test_e164_format(self):
|
||||
result = filter_text("Call me at +14155551234")
|
||||
assert "+14155551234" not in result
|
||||
assert "1234" in result # last 4 visible
|
||||
|
||||
def test_us_with_dashes(self):
|
||||
result = filter_text("Phone: 415-555-1234")
|
||||
assert "415-555-1234" not in result
|
||||
|
||||
def test_us_with_parens(self):
|
||||
result = filter_text("Phone: (415) 555-1234")
|
||||
assert "415" not in result or "555-1234" not in result
|
||||
|
||||
def test_international(self):
|
||||
result = filter_text("WhatsApp: +442071234567")
|
||||
assert "+442071234567" not in result
|
||||
|
||||
def test_short_number_not_redacted(self):
|
||||
# 4-digit extension should pass through
|
||||
result = filter_text("Ext: 1234")
|
||||
assert "1234" in result
|
||||
|
||||
|
||||
class TestSSNRedaction:
|
||||
def test_ssn(self):
|
||||
result = filter_text("SSN: 123-45-6789")
|
||||
assert "6789" in result or "[REDACTED" in result
|
||||
assert "123-45-6789" not in result
|
||||
|
||||
def test_ssn_no_dashes(self):
|
||||
result = filter_text("123 45 6789")
|
||||
assert "123 45 6789" not in result
|
||||
|
||||
|
||||
class TestWalletRedaction:
|
||||
def test_bitcoin_legacy(self):
|
||||
addr = "1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2"
|
||||
result = filter_text(f"Send to {addr}")
|
||||
assert addr not in result
|
||||
assert "1BvB" in result # prefix preserved
|
||||
assert "NVN2" in result # suffix preserved
|
||||
|
||||
def test_bitcoin_bech32(self):
|
||||
addr = "bc1qxy2kgdygjrsqtzq2n0yrf2493p83kkfjhx0wlh"
|
||||
result = filter_text(f"Wallet: {addr}")
|
||||
assert addr not in result
|
||||
assert "bc1" in result
|
||||
|
||||
def test_ethereum(self):
|
||||
addr = "0x742d35Cc6634C0532925a3b844Bc9e7595f8Ca39"
|
||||
result = filter_text(f"ETH: {addr}")
|
||||
assert addr not in result
|
||||
assert "0x" in result
|
||||
assert "Ca39" in result
|
||||
|
||||
def test_multiple_wallets(self):
|
||||
btc = "1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa"
|
||||
eth = "0x0000000000000000000000000000000000000000"
|
||||
result = filter_text(f"{btc} and {eth}")
|
||||
assert btc not in result
|
||||
assert eth not in result
|
||||
|
||||
|
||||
class TestPathRedaction:
|
||||
def test_unix_home(self):
|
||||
result = filter_text("File at /home/alice/secrets/key.pem")
|
||||
assert "/home/alice/secrets" not in result
|
||||
assert "/home" in result
|
||||
|
||||
def test_macos_home(self):
|
||||
result = filter_text("Path: /Users/bob/Documents/taxes.pdf")
|
||||
assert "/Users/bob/Documents" not in result
|
||||
|
||||
def test_windows_path(self):
|
||||
result = filter_text("C:\\Users\\Charlie\\Desktop\\notes.txt")
|
||||
assert "Charlie" not in result
|
||||
|
||||
def test_relative_path_unchanged(self):
|
||||
text = "File: ./src/main.py"
|
||||
result = filter_text(text)
|
||||
assert result == text
|
||||
|
||||
def test_system_path_unchanged(self):
|
||||
text = "Binary at /usr/local/bin/python"
|
||||
assert filter_text(text) == text
|
||||
|
||||
|
||||
class TestPrivateKeyRedaction:
|
||||
def test_pem_key(self):
|
||||
key = "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASC\n-----END PRIVATE KEY-----"
|
||||
result = filter_text(f"Key: {key}")
|
||||
assert "MIIEvQIBADAN" not in result
|
||||
assert "[REDACTED" in result
|
||||
|
||||
def test_rsa_key(self):
|
||||
key = "-----BEGIN RSA PRIVATE KEY-----\ndata\n-----END RSA PRIVATE KEY-----"
|
||||
result = filter_text(key)
|
||||
assert "data" not in result
|
||||
|
||||
|
||||
class TestPassthrough:
|
||||
def test_normal_text(self):
|
||||
text = "Hello, please write a function that sorts a list."
|
||||
assert filter_text(text) == text
|
||||
|
||||
def test_code(self):
|
||||
text = "def hello():\n print('world')\n return 42"
|
||||
assert filter_text(text) == text
|
||||
|
||||
def test_empty_string(self):
|
||||
assert filter_text("") == ""
|
||||
|
||||
def test_none(self):
|
||||
assert filter_text(None) == ""
|
||||
|
||||
def test_technical_discussion(self):
|
||||
text = "The model uses CUDA 12.1 with tensor cores for FP16."
|
||||
assert filter_text(text) == text
|
||||
|
||||
def test_api_url_unchanged(self):
|
||||
text = "Connect to https://api.openai.com/v1/chat/completions"
|
||||
assert filter_text(text) == text
|
||||
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════════
|
||||
# detect_sensitive — category detection
|
||||
# ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
class TestDetection:
|
||||
def test_no_pii(self):
|
||||
assert detect_sensitive("Hello world") == []
|
||||
|
||||
def test_detects_email(self):
|
||||
cats = detect_sensitive("Email me at alice@example.com")
|
||||
assert "email" in cats
|
||||
|
||||
def test_detects_phone(self):
|
||||
cats = detect_sensitive("Call +14155551234")
|
||||
assert "phone" in cats
|
||||
|
||||
def test_detects_wallet(self):
|
||||
cats = detect_sensitive("My BTC: 1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa")
|
||||
assert "bitcoin_wallet" in cats
|
||||
|
||||
def test_detects_eth(self):
|
||||
addr = "0x742d35Cc6634C0532925a3b844Bc9e7595f8Ca39"
|
||||
cats = detect_sensitive(f"ETH addr: {addr}")
|
||||
assert "ethereum_wallet" in cats
|
||||
|
||||
def test_detects_multiple(self):
|
||||
cats = detect_sensitive("alice@test.com +14155551234")
|
||||
assert "email" in cats
|
||||
assert "phone" in cats
|
||||
|
||||
def test_empty(self):
|
||||
assert detect_sensitive("") == []
|
||||
|
||||
def test_none(self):
|
||||
assert detect_sensitive(None) == []
|
||||
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════════
|
||||
# filter_messages — message list level
|
||||
# ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
class TestMessageFiltering:
|
||||
def test_filters_content_string(self):
|
||||
messages = [
|
||||
{"role": "user", "content": "My email is bob@example.com, please remember it."}
|
||||
]
|
||||
result = filter_messages(messages)
|
||||
assert "bob@example.com" not in result[0]["content"]
|
||||
# Original unchanged (deep copy)
|
||||
assert "bob@example.com" in messages[0]["content"]
|
||||
|
||||
def test_filters_content_parts(self):
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": "Here's my SSN: 123-45-6789"},
|
||||
{"type": "image_url", "image_url": {"url": "https://img.com/a.png"}},
|
||||
],
|
||||
}
|
||||
]
|
||||
result = filter_messages(messages)
|
||||
text_part = [p for p in result[0]["content"] if p.get("type") == "text"][0]
|
||||
assert "123-45-6789" not in text_part["text"]
|
||||
# Image URL untouched
|
||||
img_part = [p for p in result[0]["content"] if p.get("type") == "image_url"][0]
|
||||
assert img_part["image_url"]["url"] == "https://img.com/a.png"
|
||||
|
||||
def test_filters_tool_call_arguments(self):
|
||||
messages = [
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": None,
|
||||
"tool_calls": [
|
||||
{
|
||||
"id": "call_123",
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "send_email",
|
||||
"arguments": '{"to": "alice@example.com", "body": "Hi Alice"}',
|
||||
},
|
||||
}
|
||||
],
|
||||
}
|
||||
]
|
||||
result = filter_messages(messages)
|
||||
args_str = result[0]["tool_calls"][0]["function"]["arguments"]
|
||||
assert "alice@example.com" not in args_str
|
||||
|
||||
def test_preserves_system_message(self):
|
||||
messages = [
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": "Hello!"},
|
||||
]
|
||||
result = filter_messages(messages)
|
||||
assert result[0]["content"] == "You are a helpful assistant."
|
||||
assert result[1]["content"] == "Hello!"
|
||||
|
||||
def test_deep_copy_safety(self):
|
||||
original = [{"role": "user", "content": "test@example.com is my email"}]
|
||||
result = filter_messages(original)
|
||||
# Modifying result doesn't affect original
|
||||
result[0]["content"] = "modified"
|
||||
assert "test@example.com" in original[0]["content"]
|
||||
|
||||
def test_handles_none_content(self):
|
||||
messages = [{"role": "assistant", "content": None, "tool_calls": []}]
|
||||
result = filter_messages(messages)
|
||||
assert result[0]["content"] is None
|
||||
|
||||
def test_handles_empty_messages(self):
|
||||
assert filter_messages([]) == []
|
||||
|
||||
def test_preserves_tool_result_content(self):
|
||||
messages = [
|
||||
{
|
||||
"role": "tool",
|
||||
"content": "Found file at /usr/bin/secret but paths like /home/alice/x should be redacted",
|
||||
"tool_call_id": "call_123",
|
||||
}
|
||||
]
|
||||
result = filter_messages(messages)
|
||||
assert "/home/alice" not in result[0]["content"]
|
||||
assert "/usr/bin" in result[0]["content"] # system path preserved
|
||||
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════════
|
||||
# has_sensitive_content — message-level detection
|
||||
# ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
class TestHasSensitiveContent:
|
||||
def test_clean_messages(self):
|
||||
messages = [{"role": "user", "content": "Write me a poem"}]
|
||||
assert has_sensitive_content(messages) == []
|
||||
|
||||
def test_email_detected(self):
|
||||
messages = [{"role": "user", "content": "email me at a@b.com"}]
|
||||
cats = has_sensitive_content(messages)
|
||||
assert "email" in cats
|
||||
|
||||
def test_tool_args_scanned(self):
|
||||
messages = [
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": None,
|
||||
"tool_calls": [
|
||||
{
|
||||
"function": {
|
||||
"name": "search",
|
||||
"arguments": '{"query": "user +14155551234"}',
|
||||
}
|
||||
}
|
||||
],
|
||||
}
|
||||
]
|
||||
cats = has_sensitive_content(messages)
|
||||
assert "phone" in cats
|
||||
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════════
|
||||
# Provider routing
|
||||
# ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
class TestProviderRouting:
|
||||
def test_remote_openai(self):
|
||||
assert is_remote_provider("https://api.openai.com/v1") is True
|
||||
|
||||
def test_remote_openrouter(self):
|
||||
assert is_remote_provider("https://openrouter.ai/api/v1") is True
|
||||
|
||||
def test_local_localhost(self):
|
||||
assert is_remote_provider("http://localhost:11434/v1") is False
|
||||
|
||||
def test_local_127(self):
|
||||
assert is_remote_provider("http://127.0.0.1:8080/v1") is False
|
||||
|
||||
def test_empty_assumes_local(self):
|
||||
assert is_remote_provider("") is False
|
||||
|
||||
def test_route_local_with_pii(self):
|
||||
messages = [{"role": "user", "content": "My email: a@b.com"}]
|
||||
should, reasons = should_route_local(messages, "https://api.openai.com/v1")
|
||||
assert should is True
|
||||
assert "email" in reasons
|
||||
|
||||
def test_no_route_without_pii(self):
|
||||
messages = [{"role": "user", "content": "Hello!"}]
|
||||
should, reasons = should_route_local(messages, "https://api.openai.com/v1")
|
||||
assert should is False
|
||||
|
||||
def test_no_route_for_local_provider(self):
|
||||
messages = [{"role": "user", "content": "Email: a@b.com"}]
|
||||
should, reasons = should_route_local(messages, "http://localhost:11434/v1")
|
||||
assert should is False
|
||||
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════════
|
||||
# prepare_for_remote — integration hook
|
||||
# ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
class TestPrepareForRemote:
|
||||
def test_filters_remote_with_pii(self):
|
||||
messages = [
|
||||
{"role": "user", "content": "Send to alice@test.com, wallet 1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa"},
|
||||
]
|
||||
result, cats = prepare_for_remote(messages, "https://api.openai.com/v1")
|
||||
assert "alice@test.com" not in result[0]["content"]
|
||||
assert "1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa" not in result[0]["content"]
|
||||
assert "email" in cats
|
||||
assert "bitcoin_wallet" in cats
|
||||
|
||||
def test_passes_through_local(self):
|
||||
messages = [{"role": "user", "content": "Email: a@b.com"}]
|
||||
result, cats = prepare_for_remote(messages, "http://localhost:11434/v1")
|
||||
assert result is messages # same object
|
||||
assert cats == []
|
||||
|
||||
def test_passes_through_clean_remote(self):
|
||||
messages = [{"role": "user", "content": "Sort this list"}]
|
||||
result, cats = prepare_for_remote(messages, "https://api.openai.com/v1")
|
||||
assert cats == []
|
||||
assert result[0]["content"] == "Sort this list"
|
||||
|
||||
def test_realistic_conversation(self):
|
||||
"""Full conversation with mixed sensitive and safe messages."""
|
||||
messages = [
|
||||
{"role": "system", "content": "You are a helpful coding assistant."},
|
||||
{"role": "user", "content": "Help me write a Python HTTP server."},
|
||||
{"role": "assistant", "content": "Here's a simple example:\n```python\nimport http.server\n```"},
|
||||
{"role": "user", "content": "Great! Now deploy it to my server at /home/deploy/app. My email is admin@mycompany.com"},
|
||||
]
|
||||
result, cats = prepare_for_remote(messages, "https://api.openai.com/v1")
|
||||
# Safe messages unchanged
|
||||
assert result[0]["content"] == messages[0]["content"]
|
||||
assert result[1]["content"] == messages[1]["content"]
|
||||
# Sensitive message filtered
|
||||
assert "admin@mycompany.com" not in result[3]["content"]
|
||||
assert "/home/deploy" not in result[3]["content"]
|
||||
assert "email" in cats
|
||||
assert "user_path_unix" in cats
|
||||
298
tests/test_skill_manager_pokayoke.py
Normal file
298
tests/test_skill_manager_pokayoke.py
Normal file
@@ -0,0 +1,298 @@
|
||||
"""Tests for poka-yoke skill edit revert and validate action."""
|
||||
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def isolated_skills_dir(tmp_path, monkeypatch):
|
||||
"""Point SKILLS_DIR at a temp directory for test isolation."""
|
||||
skills_dir = tmp_path / "skills"
|
||||
skills_dir.mkdir()
|
||||
monkeypatch.setattr("tools.skill_manager_tool.SKILLS_DIR", skills_dir)
|
||||
monkeypatch.setattr("tools.skills_tool.SKILLS_DIR", skills_dir)
|
||||
# Also patch skill discovery so _find_skill and validate look in our temp dir
|
||||
monkeypatch.setattr(
|
||||
"agent.skill_utils.get_all_skills_dirs",
|
||||
lambda: [skills_dir],
|
||||
)
|
||||
return skills_dir
|
||||
|
||||
|
||||
_VALID_SKILL = """\
|
||||
---
|
||||
name: test-skill
|
||||
description: A test skill for unit tests.
|
||||
---
|
||||
|
||||
# Test Skill
|
||||
|
||||
Instructions here.
|
||||
"""
|
||||
|
||||
|
||||
def _create_test_skill(skills_dir: Path, name: str = "test-skill", content: str = _VALID_SKILL):
|
||||
skill_dir = skills_dir / name
|
||||
skill_dir.mkdir(parents=True, exist_ok=True)
|
||||
(skill_dir / "SKILL.md").write_text(content)
|
||||
return skill_dir
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _edit_skill revert on failure
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestEditRevert:
|
||||
def test_edit_preserves_original_on_invalid_frontmatter(self, isolated_skills_dir):
|
||||
from tools.skill_manager_tool import skill_manage
|
||||
|
||||
_create_test_skill(isolated_skills_dir)
|
||||
bad_content = "---\nname: test-skill\n---\n" # missing description
|
||||
result = json.loads(skill_manage("edit", "test-skill", content=bad_content))
|
||||
assert result["success"] is False
|
||||
assert "Original file preserved" in result["error"]
|
||||
# Original should be untouched
|
||||
original = (isolated_skills_dir / "test-skill" / "SKILL.md").read_text()
|
||||
assert "A test skill" in original
|
||||
|
||||
def test_edit_preserves_original_on_empty_body(self, isolated_skills_dir):
|
||||
from tools.skill_manager_tool import skill_manage
|
||||
|
||||
_create_test_skill(isolated_skills_dir)
|
||||
bad_content = "---\nname: test-skill\ndescription: ok\n---\n"
|
||||
result = json.loads(skill_manage("edit", "test-skill", content=bad_content))
|
||||
assert result["success"] is False
|
||||
assert "Original file preserved" in result["error"]
|
||||
original = (isolated_skills_dir / "test-skill" / "SKILL.md").read_text()
|
||||
assert "Instructions here" in original
|
||||
|
||||
def test_edit_reverts_on_write_error(self, isolated_skills_dir, monkeypatch):
|
||||
from tools.skill_manager_tool import skill_manage
|
||||
|
||||
_create_test_skill(isolated_skills_dir)
|
||||
|
||||
def boom(*a, **kw):
|
||||
raise OSError("disk full")
|
||||
|
||||
monkeypatch.setattr("tools.skill_manager_tool._atomic_write_text", boom)
|
||||
result = json.loads(skill_manage("edit", "test-skill", content=_VALID_SKILL))
|
||||
assert result["success"] is False
|
||||
assert "write error" in result["error"].lower()
|
||||
assert "Original file preserved" in result["error"]
|
||||
|
||||
def test_edit_reverts_on_security_scan_block(self, isolated_skills_dir, monkeypatch):
|
||||
from tools.skill_manager_tool import skill_manage
|
||||
|
||||
_create_test_skill(isolated_skills_dir)
|
||||
monkeypatch.setattr(
|
||||
"tools.skill_manager_tool._security_scan_skill",
|
||||
lambda path: "Blocked: suspicious content",
|
||||
)
|
||||
new_content = "---\nname: test-skill\ndescription: updated\n---\n\n# Updated\n"
|
||||
result = json.loads(skill_manage("edit", "test-skill", content=new_content))
|
||||
assert result["success"] is False
|
||||
assert "Original file preserved" in result["error"]
|
||||
original = (isolated_skills_dir / "test-skill" / "SKILL.md").read_text()
|
||||
assert "A test skill" in original
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _patch_skill revert on failure
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestPatchRevert:
|
||||
def test_patch_preserves_original_on_no_match(self, isolated_skills_dir):
|
||||
from tools.skill_manager_tool import skill_manage
|
||||
|
||||
_create_test_skill(isolated_skills_dir)
|
||||
result = json.loads(skill_manage(
|
||||
"patch", "test-skill",
|
||||
old_string="NONEXISTENT_TEXT",
|
||||
new_string="replacement",
|
||||
))
|
||||
assert result["success"] is False
|
||||
assert "Original file preserved" in result["error"]
|
||||
original = (isolated_skills_dir / "test-skill" / "SKILL.md").read_text()
|
||||
assert "Instructions here" in original
|
||||
|
||||
def test_patch_preserves_original_on_broken_frontmatter(self, isolated_skills_dir):
|
||||
from tools.skill_manager_tool import skill_manage
|
||||
|
||||
_create_test_skill(isolated_skills_dir)
|
||||
# Patch that would remove the frontmatter closing ---
|
||||
result = json.loads(skill_manage(
|
||||
"patch", "test-skill",
|
||||
old_string="description: A test skill for unit tests.",
|
||||
new_string="", # removing description
|
||||
))
|
||||
assert result["success"] is False
|
||||
assert "Original file preserved" in result["error"]
|
||||
original = (isolated_skills_dir / "test-skill" / "SKILL.md").read_text()
|
||||
assert "A test skill" in original
|
||||
|
||||
def test_patch_reverts_on_write_error(self, isolated_skills_dir, monkeypatch):
|
||||
from tools.skill_manager_tool import skill_manage
|
||||
|
||||
_create_test_skill(isolated_skills_dir)
|
||||
|
||||
def boom(*a, **kw):
|
||||
raise OSError("disk full")
|
||||
|
||||
monkeypatch.setattr("tools.skill_manager_tool._atomic_write_text", boom)
|
||||
result = json.loads(skill_manage(
|
||||
"patch", "test-skill",
|
||||
old_string="Instructions here.",
|
||||
new_string="New instructions.",
|
||||
))
|
||||
assert result["success"] is False
|
||||
assert "write error" in result["error"].lower()
|
||||
assert "Original file preserved" in result["error"]
|
||||
|
||||
def test_patch_reverts_on_security_scan_block(self, isolated_skills_dir, monkeypatch):
|
||||
from tools.skill_manager_tool import skill_manage
|
||||
|
||||
_create_test_skill(isolated_skills_dir)
|
||||
monkeypatch.setattr(
|
||||
"tools.skill_manager_tool._security_scan_skill",
|
||||
lambda path: "Blocked: malicious code",
|
||||
)
|
||||
result = json.loads(skill_manage(
|
||||
"patch", "test-skill",
|
||||
old_string="Instructions here.",
|
||||
new_string="New instructions.",
|
||||
))
|
||||
assert result["success"] is False
|
||||
assert "Original file preserved" in result["error"]
|
||||
original = (isolated_skills_dir / "test-skill" / "SKILL.md").read_text()
|
||||
assert "Instructions here" in original
|
||||
|
||||
def test_patch_successful_writes_new_content(self, isolated_skills_dir):
|
||||
from tools.skill_manager_tool import skill_manage
|
||||
|
||||
_create_test_skill(isolated_skills_dir)
|
||||
result = json.loads(skill_manage(
|
||||
"patch", "test-skill",
|
||||
old_string="Instructions here.",
|
||||
new_string="Updated instructions.",
|
||||
))
|
||||
assert result["success"] is True
|
||||
content = (isolated_skills_dir / "test-skill" / "SKILL.md").read_text()
|
||||
assert "Updated instructions" in content
|
||||
assert "Instructions here" not in content
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _write_file revert on failure
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestWriteFileRevert:
|
||||
def test_write_file_reverts_on_security_scan_block(self, isolated_skills_dir, monkeypatch):
|
||||
from tools.skill_manager_tool import skill_manage
|
||||
|
||||
_create_test_skill(isolated_skills_dir)
|
||||
monkeypatch.setattr(
|
||||
"tools.skill_manager_tool._security_scan_skill",
|
||||
lambda path: "Blocked: malicious",
|
||||
)
|
||||
result = json.loads(skill_manage(
|
||||
"write_file", "test-skill",
|
||||
file_path="references/notes.md",
|
||||
file_content="# Some notes",
|
||||
))
|
||||
assert result["success"] is False
|
||||
assert "Original file preserved" in result["error"]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# validate action
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestValidateAction:
|
||||
def test_validate_passes_on_good_skill(self, isolated_skills_dir):
|
||||
from tools.skill_manager_tool import skill_manage
|
||||
|
||||
_create_test_skill(isolated_skills_dir)
|
||||
result = json.loads(skill_manage("validate", "test-skill"))
|
||||
assert result["success"] is True
|
||||
assert result["errors"] == 0
|
||||
assert result["results"][0]["valid"] is True
|
||||
|
||||
def test_validate_finds_missing_description(self, isolated_skills_dir):
|
||||
from tools.skill_manager_tool import skill_manage
|
||||
|
||||
bad = "---\nname: bad-skill\n---\n\nBody here.\n"
|
||||
_create_test_skill(isolated_skills_dir, name="bad-skill", content=bad)
|
||||
result = json.loads(skill_manage("validate", "bad-skill"))
|
||||
assert result["success"] is False
|
||||
assert result["errors"] == 1
|
||||
issues = result["results"][0]["issues"]
|
||||
assert any("description" in i.lower() for i in issues)
|
||||
|
||||
def test_validate_finds_empty_body(self, isolated_skills_dir):
|
||||
from tools.skill_manager_tool import skill_manage
|
||||
|
||||
empty_body = "---\nname: empty-skill\ndescription: test\n---\n"
|
||||
_create_test_skill(isolated_skills_dir, name="empty-skill", content=empty_body)
|
||||
result = json.loads(skill_manage("validate", "empty-skill"))
|
||||
assert result["success"] is False
|
||||
issues = result["results"][0]["issues"]
|
||||
assert any("empty body" in i.lower() for i in issues)
|
||||
|
||||
def test_validate_all_skills(self, isolated_skills_dir):
|
||||
from tools.skill_manager_tool import skill_manage
|
||||
|
||||
_create_test_skill(isolated_skills_dir, name="good-1")
|
||||
_create_test_skill(isolated_skills_dir, name="good-2")
|
||||
bad = "---\nname: bad\n---\n\nBody.\n"
|
||||
_create_test_skill(isolated_skills_dir, name="bad", content=bad)
|
||||
|
||||
result = json.loads(skill_manage("validate", ""))
|
||||
assert result["total"] == 3
|
||||
assert result["errors"] == 1
|
||||
|
||||
def test_validate_nonexistent_skill(self, isolated_skills_dir):
|
||||
from tools.skill_manager_tool import skill_manage
|
||||
|
||||
result = json.loads(skill_manage("validate", "nonexistent"))
|
||||
assert result["success"] is False
|
||||
assert "not found" in result["error"].lower()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Modification log
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestModificationLog:
|
||||
def test_edit_logs_on_success(self, isolated_skills_dir):
|
||||
from tools.skill_manager_tool import skill_manage, _MOD_LOG_FILE
|
||||
|
||||
_create_test_skill(isolated_skills_dir)
|
||||
new = "---\nname: test-skill\ndescription: updated\n---\n\n# Updated\n"
|
||||
skill_manage("edit", "test-skill", content=new)
|
||||
assert _MOD_LOG_FILE.exists()
|
||||
lines = _MOD_LOG_FILE.read_text().strip().split("\n")
|
||||
entry = json.loads(lines[-1])
|
||||
assert entry["action"] == "edit"
|
||||
assert entry["success"] is True
|
||||
assert entry["skill"] == "test-skill"
|
||||
|
||||
def test_patch_logs_on_failure(self, isolated_skills_dir):
|
||||
from tools.skill_manager_tool import skill_manage, _MOD_LOG_FILE
|
||||
|
||||
_create_test_skill(isolated_skills_dir)
|
||||
monkeypatch = None # just use no-match to trigger failure
|
||||
skill_manage(
|
||||
"patch", "test-skill",
|
||||
old_string="NONEXISTENT",
|
||||
new_string="replacement",
|
||||
)
|
||||
# Failure before write — no log entry expected since file never changed
|
||||
# But the failure path in patch returns early before logging
|
||||
# (the log only fires on write-side errors, not match errors)
|
||||
# This is correct behavior — no write happened, nothing to log
|
||||
@@ -44,6 +44,51 @@ from typing import Dict, Any, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Skill modification log file — stores before/after snapshots for audit trail
|
||||
_MOD_LOG_DIR = get_hermes_home() / "cron" / "output"
|
||||
_MOD_LOG_FILE = get_hermes_home() / "skills" / ".modification_log.jsonl"
|
||||
|
||||
|
||||
def _log_skill_modification(
|
||||
action: str,
|
||||
skill_name: str,
|
||||
target_file: str,
|
||||
original_content: str,
|
||||
new_content: str,
|
||||
success: bool,
|
||||
error: str = None,
|
||||
) -> None:
|
||||
"""Log a skill modification with before/after snapshot for audit trail.
|
||||
|
||||
Appends JSONL entries to ~/.hermes/skills/.modification_log.jsonl.
|
||||
Failures in logging are silently swallowed — logging must never
|
||||
break the primary operation.
|
||||
"""
|
||||
try:
|
||||
import time
|
||||
entry = {
|
||||
"timestamp": time.time(),
|
||||
"action": action,
|
||||
"skill": skill_name,
|
||||
"file": target_file,
|
||||
"success": success,
|
||||
"original_len": len(original_content) if original_content else 0,
|
||||
"new_len": len(new_content) if new_content else 0,
|
||||
}
|
||||
if error:
|
||||
entry["error"] = error
|
||||
# Truncate snapshots to 2KB each for log hygiene
|
||||
if original_content:
|
||||
entry["original_preview"] = original_content[:2048]
|
||||
if new_content:
|
||||
entry["new_preview"] = new_content[:2048]
|
||||
|
||||
_MOD_LOG_FILE.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(_MOD_LOG_FILE, "a", encoding="utf-8") as f:
|
||||
f.write(json.dumps(entry, ensure_ascii=False) + "\n")
|
||||
except Exception:
|
||||
logger.debug("Failed to write skill modification log", exc_info=True)
|
||||
|
||||
# Import security scanner — agent-created skills get the same scrutiny as
|
||||
# community hub installs.
|
||||
try:
|
||||
@@ -339,31 +384,45 @@ def _create_skill(name: str, content: str, category: str = None) -> Dict[str, An
|
||||
|
||||
|
||||
def _edit_skill(name: str, content: str) -> Dict[str, Any]:
|
||||
"""Replace the SKILL.md of any existing skill (full rewrite)."""
|
||||
"""Replace the SKILL.md of any existing skill (full rewrite).
|
||||
|
||||
Poka-yoke: validates before writing, uses atomic write, and reverts
|
||||
to the original file on any failure.
|
||||
"""
|
||||
err = _validate_frontmatter(content)
|
||||
if err:
|
||||
return {"success": False, "error": err}
|
||||
return {"success": False, "error": f"Edit failed: {err} Original file preserved."}
|
||||
|
||||
err = _validate_content_size(content)
|
||||
if err:
|
||||
return {"success": False, "error": err}
|
||||
return {"success": False, "error": f"Edit failed: {err} Original file preserved."}
|
||||
|
||||
existing = _find_skill(name)
|
||||
if not existing:
|
||||
return {"success": False, "error": f"Skill '{name}' not found. Use skills_list() to see available skills."}
|
||||
|
||||
skill_md = existing["path"] / "SKILL.md"
|
||||
# Back up original content for rollback
|
||||
# Snapshot original for rollback
|
||||
original_content = skill_md.read_text(encoding="utf-8") if skill_md.exists() else None
|
||||
_atomic_write_text(skill_md, content)
|
||||
|
||||
try:
|
||||
_atomic_write_text(skill_md, content)
|
||||
except Exception as exc:
|
||||
_log_skill_modification("edit", name, "SKILL.md", original_content, content, False, str(exc))
|
||||
return {
|
||||
"success": False,
|
||||
"error": f"Edit failed: write error: {exc}. Original file preserved.",
|
||||
}
|
||||
|
||||
# Security scan — roll back on block
|
||||
scan_error = _security_scan_skill(existing["path"])
|
||||
if scan_error:
|
||||
if original_content is not None:
|
||||
_atomic_write_text(skill_md, original_content)
|
||||
return {"success": False, "error": scan_error}
|
||||
_log_skill_modification("edit", name, "SKILL.md", original_content, content, False, scan_error)
|
||||
return {"success": False, "error": f"Edit failed: {scan_error} Original file preserved."}
|
||||
|
||||
_log_skill_modification("edit", name, "SKILL.md", original_content, content, True)
|
||||
return {
|
||||
"success": True,
|
||||
"message": f"Skill '{name}' updated.",
|
||||
@@ -380,6 +439,9 @@ def _patch_skill(
|
||||
) -> Dict[str, Any]:
|
||||
"""Targeted find-and-replace within a skill file.
|
||||
|
||||
Poka-yoke: validates old_string matches BEFORE writing, validates the
|
||||
result AFTER matching but BEFORE writing, and reverts on any failure.
|
||||
|
||||
Defaults to SKILL.md. Use file_path to patch a supporting file instead.
|
||||
Requires a unique match unless replace_all is True.
|
||||
"""
|
||||
@@ -423,7 +485,7 @@ def _patch_skill(
|
||||
preview = content[:500] + ("..." if len(content) > 500 else "")
|
||||
return {
|
||||
"success": False,
|
||||
"error": match_error,
|
||||
"error": f"Patch failed: {match_error} Original file preserved.",
|
||||
"file_preview": preview,
|
||||
}
|
||||
|
||||
@@ -431,7 +493,7 @@ def _patch_skill(
|
||||
target_label = "SKILL.md" if not file_path else file_path
|
||||
err = _validate_content_size(new_content, label=target_label)
|
||||
if err:
|
||||
return {"success": False, "error": err}
|
||||
return {"success": False, "error": f"Patch failed: {err} Original file preserved."}
|
||||
|
||||
# If patching SKILL.md, validate frontmatter is still intact
|
||||
if not file_path:
|
||||
@@ -439,18 +501,27 @@ def _patch_skill(
|
||||
if err:
|
||||
return {
|
||||
"success": False,
|
||||
"error": f"Patch would break SKILL.md structure: {err}",
|
||||
"error": f"Patch failed: would break SKILL.md structure: {err} Original file preserved.",
|
||||
}
|
||||
|
||||
original_content = content # for rollback
|
||||
_atomic_write_text(target, new_content)
|
||||
try:
|
||||
_atomic_write_text(target, new_content)
|
||||
except Exception as exc:
|
||||
_log_skill_modification("patch", name, target_label, original_content, new_content, False, str(exc))
|
||||
return {
|
||||
"success": False,
|
||||
"error": f"Patch failed: write error: {exc}. Original file preserved.",
|
||||
}
|
||||
|
||||
# Security scan — roll back on block
|
||||
scan_error = _security_scan_skill(skill_dir)
|
||||
if scan_error:
|
||||
_atomic_write_text(target, original_content)
|
||||
return {"success": False, "error": scan_error}
|
||||
_log_skill_modification("patch", name, target_label, original_content, new_content, False, scan_error)
|
||||
return {"success": False, "error": f"Patch failed: {scan_error} Original file preserved."}
|
||||
|
||||
_log_skill_modification("patch", name, target_label, original_content, new_content, True)
|
||||
return {
|
||||
"success": True,
|
||||
"message": f"Patched {'SKILL.md' if not file_path else file_path} in skill '{name}' ({match_count} replacement{'s' if match_count > 1 else ''}).",
|
||||
@@ -478,7 +549,10 @@ def _delete_skill(name: str) -> Dict[str, Any]:
|
||||
|
||||
|
||||
def _write_file(name: str, file_path: str, file_content: str) -> Dict[str, Any]:
|
||||
"""Add or overwrite a supporting file within any skill directory."""
|
||||
"""Add or overwrite a supporting file within any skill directory.
|
||||
|
||||
Poka-yoke: reverts to original on failure.
|
||||
"""
|
||||
err = _validate_file_path(file_path)
|
||||
if err:
|
||||
return {"success": False, "error": err}
|
||||
@@ -499,7 +573,7 @@ def _write_file(name: str, file_path: str, file_content: str) -> Dict[str, Any]:
|
||||
}
|
||||
err = _validate_content_size(file_content, label=file_path)
|
||||
if err:
|
||||
return {"success": False, "error": err}
|
||||
return {"success": False, "error": f"Write failed: {err} Original file preserved."}
|
||||
|
||||
existing = _find_skill(name)
|
||||
if not existing:
|
||||
@@ -507,9 +581,17 @@ def _write_file(name: str, file_path: str, file_content: str) -> Dict[str, Any]:
|
||||
|
||||
target = existing["path"] / file_path
|
||||
target.parent.mkdir(parents=True, exist_ok=True)
|
||||
# Back up for rollback
|
||||
# Snapshot for rollback
|
||||
original_content = target.read_text(encoding="utf-8") if target.exists() else None
|
||||
_atomic_write_text(target, file_content)
|
||||
|
||||
try:
|
||||
_atomic_write_text(target, file_content)
|
||||
except Exception as exc:
|
||||
_log_skill_modification("write_file", name, file_path, original_content, file_content, False, str(exc))
|
||||
return {
|
||||
"success": False,
|
||||
"error": f"Write failed: {exc}. Original file preserved.",
|
||||
}
|
||||
|
||||
# Security scan — roll back on block
|
||||
scan_error = _security_scan_skill(existing["path"])
|
||||
@@ -518,8 +600,10 @@ def _write_file(name: str, file_path: str, file_content: str) -> Dict[str, Any]:
|
||||
_atomic_write_text(target, original_content)
|
||||
else:
|
||||
target.unlink(missing_ok=True)
|
||||
return {"success": False, "error": scan_error}
|
||||
_log_skill_modification("write_file", name, file_path, original_content, file_content, False, scan_error)
|
||||
return {"success": False, "error": f"Write failed: {scan_error} Original file preserved."}
|
||||
|
||||
_log_skill_modification("write_file", name, file_path, original_content, file_content, True)
|
||||
return {
|
||||
"success": True,
|
||||
"message": f"File '{file_path}' written to skill '{name}'.",
|
||||
@@ -554,6 +638,8 @@ def _remove_file(name: str, file_path: str) -> Dict[str, Any]:
|
||||
"available_files": available if available else None,
|
||||
}
|
||||
|
||||
# Snapshot for potential undo
|
||||
removed_content = target.read_text(encoding="utf-8")
|
||||
target.unlink()
|
||||
|
||||
# Clean up empty subdirectories
|
||||
@@ -561,12 +647,96 @@ def _remove_file(name: str, file_path: str) -> Dict[str, Any]:
|
||||
if parent != skill_dir and parent.exists() and not any(parent.iterdir()):
|
||||
parent.rmdir()
|
||||
|
||||
_log_skill_modification("remove_file", name, file_path, removed_content, None, True)
|
||||
return {
|
||||
"success": True,
|
||||
"message": f"File '{file_path}' removed from skill '{name}'.",
|
||||
}
|
||||
|
||||
|
||||
def _validate_skill(name: str = None) -> Dict[str, Any]:
|
||||
"""Validate one or all skills for structural integrity.
|
||||
|
||||
Checks: valid YAML frontmatter, non-empty body, required fields
|
||||
(name, description), and file readability.
|
||||
|
||||
Pass name=None to validate all skills.
|
||||
"""
|
||||
from agent.skill_utils import get_all_skills_dirs
|
||||
|
||||
results = []
|
||||
errors = 0
|
||||
|
||||
dirs_to_scan = get_all_skills_dirs()
|
||||
for skills_dir in dirs_to_scan:
|
||||
if not skills_dir.exists():
|
||||
continue
|
||||
for skill_md in skills_dir.rglob("SKILL.md"):
|
||||
skill_name = skill_md.parent.name
|
||||
if name and skill_name != name:
|
||||
continue
|
||||
|
||||
issues = []
|
||||
try:
|
||||
content = skill_md.read_text(encoding="utf-8")
|
||||
except Exception as exc:
|
||||
issues.append(f"Cannot read file: {exc}")
|
||||
results.append({"skill": skill_name, "path": str(skill_md), "valid": False, "issues": issues})
|
||||
errors += 1
|
||||
continue
|
||||
|
||||
# Check frontmatter
|
||||
fm_err = _validate_frontmatter(content)
|
||||
if fm_err:
|
||||
issues.append(fm_err)
|
||||
|
||||
# Check YAML parse and required fields
|
||||
if content.startswith("---"):
|
||||
import re as _re
|
||||
end_match = _re.search(r'\n---\s*\n', content[3:])
|
||||
if end_match:
|
||||
yaml_content = content[3:end_match.start() + 3]
|
||||
try:
|
||||
parsed = yaml.safe_load(yaml_content)
|
||||
if isinstance(parsed, dict):
|
||||
if not parsed.get("name"):
|
||||
issues.append("Missing 'name' in frontmatter")
|
||||
if not parsed.get("description"):
|
||||
issues.append("Missing 'description' in frontmatter")
|
||||
else:
|
||||
issues.append("Frontmatter is not a YAML mapping")
|
||||
except yaml.YAMLError as e:
|
||||
issues.append(f"YAML parse error: {e}")
|
||||
else:
|
||||
issues.append("Frontmatter not properly closed")
|
||||
else:
|
||||
issues.append("File does not start with YAML frontmatter (---)")
|
||||
|
||||
# Check body is non-empty
|
||||
if content.startswith("---"):
|
||||
import re as _re
|
||||
end_match = _re.search(r'\n---\s*\n', content[3:])
|
||||
if end_match:
|
||||
body = content[end_match.end() + 3:].strip()
|
||||
if not body:
|
||||
issues.append("Empty body after frontmatter")
|
||||
|
||||
valid = len(issues) == 0
|
||||
if not valid:
|
||||
errors += 1
|
||||
results.append({"skill": skill_name, "path": str(skill_md), "valid": valid, "issues": issues})
|
||||
|
||||
if name and not results:
|
||||
return {"success": False, "error": f"Skill '{name}' not found."}
|
||||
|
||||
return {
|
||||
"success": errors == 0,
|
||||
"total": len(results),
|
||||
"errors": errors,
|
||||
"results": results,
|
||||
}
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Main entry point
|
||||
# =============================================================================
|
||||
@@ -619,8 +789,11 @@ def skill_manage(
|
||||
return json.dumps({"success": False, "error": "file_path is required for 'remove_file'."}, ensure_ascii=False)
|
||||
result = _remove_file(name, file_path)
|
||||
|
||||
elif action == "validate":
|
||||
result = _validate_skill(name if name else None)
|
||||
|
||||
else:
|
||||
result = {"success": False, "error": f"Unknown action '{action}'. Use: create, edit, patch, delete, write_file, remove_file"}
|
||||
result = {"success": False, "error": f"Unknown action '{action}'. Use: create, edit, patch, delete, write_file, remove_file, validate"}
|
||||
|
||||
if result.get("success"):
|
||||
try:
|
||||
@@ -638,38 +811,40 @@ def skill_manage(
|
||||
|
||||
SKILL_MANAGE_SCHEMA = {
|
||||
"name": "skill_manage",
|
||||
"description": (
|
||||
"Manage skills (create, update, delete). Skills are your procedural "
|
||||
"memory — reusable approaches for recurring task types. "
|
||||
"New skills go to ~/.hermes/skills/; existing skills can be modified wherever they live.\n\n"
|
||||
"Actions: create (full SKILL.md + optional category), "
|
||||
"patch (old_string/new_string — preferred for fixes), "
|
||||
"edit (full SKILL.md rewrite — major overhauls only), "
|
||||
"delete, write_file, remove_file.\n\n"
|
||||
"Create when: complex task succeeded (5+ calls), errors overcome, "
|
||||
"user-corrected approach worked, non-trivial workflow discovered, "
|
||||
"or user asks you to remember a procedure.\n"
|
||||
"Update when: instructions stale/wrong, OS-specific failures, "
|
||||
"missing steps or pitfalls found during use. "
|
||||
"If you used a skill and hit issues not covered by it, patch it immediately.\n\n"
|
||||
"After difficult/iterative tasks, offer to save as a skill. "
|
||||
"Skip for simple one-offs. Confirm with user before creating/deleting.\n\n"
|
||||
"Good skills: trigger conditions, numbered steps with exact commands, "
|
||||
"pitfalls section, verification steps. Use skill_view() to see format examples."
|
||||
),
|
||||
"description": (
|
||||
"Manage skills (create, update, delete, validate). Skills are your procedural "
|
||||
"memory \u2014 reusable approaches for recurring task types. "
|
||||
"New skills go to ~/.hermes/skills/; existing skills can be modified wherever they live.\n\n"
|
||||
"Actions: create (full SKILL.md + optional category), "
|
||||
"patch (old_string/new_string \u2014 preferred for fixes), "
|
||||
"edit (full SKILL.md rewrite \u2014 major overhauls only), "
|
||||
"delete, write_file, remove_file, "
|
||||
"validate (check all skills for structural integrity).\n\n"
|
||||
"Create when: complex task succeeded (5+ calls), errors overcome, "
|
||||
"user-corrected approach worked, non-trivial workflow discovered, "
|
||||
"or user asks you to remember a procedure.\n"
|
||||
"Update when: instructions stale/wrong, OS-specific failures, "
|
||||
"missing steps or pitfalls found during use. "
|
||||
"If you used a skill and hit issues not covered by it, patch it immediately.\n\n"
|
||||
"After difficult/iterative tasks, offer to save as a skill. "
|
||||
"Skip for simple one-offs. Confirm with user before creating/deleting.\n\n"
|
||||
"Good skills: trigger conditions, numbered steps with exact commands, "
|
||||
"pitfalls section, verification steps. Use skill_view() to see format examples."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"action": {
|
||||
"type": "string",
|
||||
"enum": ["create", "patch", "edit", "delete", "write_file", "remove_file"],
|
||||
"enum": ["create", "patch", "edit", "delete", "write_file", "remove_file", "validate"],
|
||||
"description": "The action to perform."
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Skill name (lowercase, hyphens/underscores, max 64 chars). "
|
||||
"Must match an existing skill for patch/edit/delete/write_file/remove_file."
|
||||
"Required for create/patch/edit/delete/write_file/remove_file. "
|
||||
"Optional for validate: omit to check all skills, provide to check one."
|
||||
)
|
||||
},
|
||||
"content": {
|
||||
|
||||
Reference in New Issue
Block a user