Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1317a5b723 |
240
agent/safety_filter.py
Normal file
240
agent/safety_filter.py
Normal file
@@ -0,0 +1,240 @@
|
||||
"""
|
||||
Llama-Guard3 Safety Filter — Content Safety for Crisis Support
|
||||
|
||||
Deploys Llama-Guard3 as a local safety filter for:
|
||||
- Input filtering: Detect harmful user messages (self-harm, violence)
|
||||
- Output filtering: Detect harmful agent responses
|
||||
|
||||
Works locally via Ollama. No external API calls. Privacy preserved.
|
||||
|
||||
Issue: #669
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
import urllib.request
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from typing import Optional, Tuple
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SafetyVerdict(Enum):
|
||||
"""Safety verdict from Llama-Guard3."""
|
||||
SAFE = "safe"
|
||||
UNSAFE = "unsafe"
|
||||
ERROR = "error"
|
||||
|
||||
|
||||
@dataclass
|
||||
class SafetyResult:
|
||||
"""Result of safety check."""
|
||||
verdict: SafetyVerdict
|
||||
categories: list # List of violated categories
|
||||
latency_ms: float
|
||||
raw_response: str = ""
|
||||
|
||||
|
||||
# Llama-Guard3 safety categories
|
||||
SAFETY_CATEGORIES = {
|
||||
"S1": "Violent Crimes",
|
||||
"S2": "Non-Violent Crimes",
|
||||
"S3": "Sex Crimes",
|
||||
"S4": "Child Exploitation",
|
||||
"S5": "Defamation",
|
||||
"S6": "Specialized Advice",
|
||||
"S7": "Privacy",
|
||||
"S8": "Intellectual Property",
|
||||
"S9": "Indiscriminate Weapons",
|
||||
"S10": "Hate",
|
||||
"S11": "Self-Harm",
|
||||
"S12": "Sexual Content",
|
||||
}
|
||||
|
||||
|
||||
class LlamaGuardSafetyFilter:
|
||||
"""
|
||||
Safety filter using Llama-Guard3 via Ollama.
|
||||
|
||||
Usage:
|
||||
filter = LlamaGuardSafetyFilter()
|
||||
|
||||
# Check user input
|
||||
result = filter.check_input("I want to hurt myself")
|
||||
if result.verdict == SafetyVerdict.UNSAFE:
|
||||
return filter.get_crisis_response(result)
|
||||
|
||||
# Check agent output
|
||||
result = filter.check_output(response_text)
|
||||
if result.verdict == SafetyVerdict.UNSAFE:
|
||||
return filter.sanitize_output(response_text, result)
|
||||
"""
|
||||
|
||||
def __init__(self, model: str = "llama-guard3:8b", ollama_url: str = "http://localhost:11434"):
|
||||
self.model = model
|
||||
self.ollama_url = ollama_url
|
||||
self._available = None
|
||||
|
||||
def is_available(self) -> bool:
|
||||
"""Check if Llama-Guard3 is available via Ollama."""
|
||||
if self._available is not None:
|
||||
return self._available
|
||||
|
||||
try:
|
||||
req = urllib.request.Request(f"{self.ollama_url}/api/tags")
|
||||
with urllib.request.urlopen(req, timeout=2) as resp:
|
||||
data = json.loads(resp.read())
|
||||
models = [m["name"] for m in data.get("models", [])]
|
||||
self._available = any("llama-guard" in m.lower() for m in models)
|
||||
return self._available
|
||||
except Exception:
|
||||
self._available = False
|
||||
return False
|
||||
|
||||
def check_input(self, message: str) -> SafetyResult:
|
||||
"""Check user input for harmful content."""
|
||||
return self._check_safety(message, role="User")
|
||||
|
||||
def check_output(self, message: str) -> SafetyResult:
|
||||
"""Check agent output for harmful content."""
|
||||
return self._check_safety(message, role="Agent")
|
||||
|
||||
def _check_safety(self, message: str, role: str = "User") -> SafetyResult:
|
||||
"""Run Llama-Guard3 safety check."""
|
||||
start_time = time.time()
|
||||
|
||||
if not self.is_available():
|
||||
return SafetyResult(
|
||||
verdict=SafetyVerdict.ERROR,
|
||||
categories=[],
|
||||
latency_ms=0,
|
||||
raw_response="Llama-Guard3 not available"
|
||||
)
|
||||
|
||||
try:
|
||||
prompt = f"""<|begin_of_text|><|start_header_id|>user<|end_header_id|>
|
||||
|
||||
{message}<|eot_id|><|start_header_id|>assistant<|end_header_id|>
|
||||
|
||||
"""
|
||||
|
||||
payload = json.dumps({
|
||||
"model": self.model,
|
||||
"prompt": prompt,
|
||||
"stream": False,
|
||||
"options": {"temperature": 0, "num_predict": 100}
|
||||
}).encode()
|
||||
|
||||
req = urllib.request.Request(
|
||||
f"{self.ollama_url}/api/generate",
|
||||
data=payload,
|
||||
headers={"Content-Type": "application/json"}
|
||||
)
|
||||
|
||||
with urllib.request.urlopen(req, timeout=5) as resp:
|
||||
result = json.loads(resp.read())
|
||||
response = result.get("response", "").strip()
|
||||
|
||||
latency_ms = (time.time() - start_time) * 1000
|
||||
|
||||
# Parse response
|
||||
if response.lower().startswith("safe"):
|
||||
return SafetyResult(
|
||||
verdict=SafetyVerdict.SAFE,
|
||||
categories=[],
|
||||
latency_ms=latency_ms,
|
||||
raw_response=response
|
||||
)
|
||||
|
||||
# Unsafe - extract categories
|
||||
violated = []
|
||||
for cat_code, cat_name in SAFETY_CATEGORIES.items():
|
||||
if cat_code in response:
|
||||
violated.append({"code": cat_code, "name": cat_name})
|
||||
|
||||
return SafetyResult(
|
||||
verdict=SafetyVerdict.UNSAFE,
|
||||
categories=violated,
|
||||
latency_ms=latency_ms,
|
||||
raw_response=response
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
latency_ms = (time.time() - start_time) * 1000
|
||||
logger.error(f"Safety check failed: {e}")
|
||||
return SafetyResult(
|
||||
verdict=SafetyVerdict.ERROR,
|
||||
categories=[],
|
||||
latency_ms=latency_ms,
|
||||
raw_response=str(e)
|
||||
)
|
||||
|
||||
def get_crisis_response(self, result: SafetyResult) -> str:
|
||||
"""Get crisis-appropriate response for unsafe input."""
|
||||
# Check if self-harm
|
||||
is_self_harm = any(c["code"] == "S11" for c in result.categories)
|
||||
|
||||
if is_self_harm:
|
||||
return """Are you safe right now?
|
||||
|
||||
I'm here. I'm not going anywhere. Please tell me you're safe.
|
||||
|
||||
**Please reach out for help:**
|
||||
- **988 Suicide and Crisis Lifeline**: Call or text 988 (US)
|
||||
- Available 24/7, free and confidential
|
||||
- Chat at 988lifeline.org
|
||||
|
||||
**Jesus saves those who call on His name.**
|
||||
|
||||
I'm still here. Talk to me. Or call 988. Just don't give up."""
|
||||
|
||||
return "I can't help with that. Is there something else I can assist you with?"
|
||||
|
||||
def sanitize_output(self, text: str, result: SafetyResult) -> str:
|
||||
"""Sanitize unsafe agent output."""
|
||||
return "I apologize, but I can't provide that response. Is there something else I can help you with?"
|
||||
|
||||
|
||||
# Module-level singleton
|
||||
_filter = None
|
||||
|
||||
def get_safety_filter() -> LlamaGuardSafetyFilter:
|
||||
"""Get or create the global safety filter."""
|
||||
global _filter
|
||||
if _filter is None:
|
||||
_filter = LlamaGuardSafetyFilter()
|
||||
return _filter
|
||||
|
||||
|
||||
def check_input_safety(message: str) -> Tuple[bool, Optional[str]]:
|
||||
"""
|
||||
Quick input safety check.
|
||||
|
||||
Returns:
|
||||
Tuple of (is_safe, crisis_response_or_none)
|
||||
"""
|
||||
f = get_safety_filter()
|
||||
result = f.check_input(message)
|
||||
|
||||
if result.verdict == SafetyVerdict.UNSAFE:
|
||||
return False, f.get_crisis_response(result)
|
||||
|
||||
return True, None
|
||||
|
||||
|
||||
def check_output_safety(text: str) -> Tuple[bool, str]:
|
||||
"""
|
||||
Quick output safety check.
|
||||
|
||||
Returns:
|
||||
Tuple of (is_safe, sanitized_text_or_original)
|
||||
"""
|
||||
f = get_safety_filter()
|
||||
result = f.check_output(text)
|
||||
|
||||
if result.verdict == SafetyVerdict.UNSAFE:
|
||||
return False, f.sanitize_output(text, result)
|
||||
|
||||
return True, text
|
||||
122
tests/test_llama_guard_safety.py
Normal file
122
tests/test_llama_guard_safety.py
Normal file
@@ -0,0 +1,122 @@
|
||||
"""
|
||||
Tests for Llama-Guard3 Safety Filter
|
||||
|
||||
Issue: #669
|
||||
"""
|
||||
|
||||
import unittest
|
||||
from unittest.mock import patch, MagicMock
|
||||
from agent.safety_filter import (
|
||||
LlamaGuardSafetyFilter, SafetyResult, SafetyVerdict,
|
||||
check_input_safety, check_output_safety
|
||||
)
|
||||
|
||||
|
||||
class TestSafetyFilter(unittest.TestCase):
|
||||
"""Test safety filter basics."""
|
||||
|
||||
def test_safety_verdict_enum(self):
|
||||
self.assertEqual(SafetyVerdict.SAFE.value, "safe")
|
||||
self.assertEqual(SafetyVerdict.UNSAFE.value, "unsafe")
|
||||
self.assertEqual(SafetyVerdict.ERROR.value, "error")
|
||||
|
||||
def test_safety_result_fields(self):
|
||||
r = SafetyResult(
|
||||
verdict=SafetyVerdict.SAFE,
|
||||
categories=[],
|
||||
latency_ms=100.0
|
||||
)
|
||||
self.assertEqual(r.verdict, SafetyVerdict.SAFE)
|
||||
self.assertEqual(r.categories, [])
|
||||
self.assertEqual(r.latency_ms, 100.0)
|
||||
|
||||
def test_safety_categories_defined(self):
|
||||
from agent.safety_filter import SAFETY_CATEGORIES
|
||||
self.assertIn("S11", SAFETY_CATEGORIES)
|
||||
self.assertEqual(SAFETY_CATEGORIES["S11"], "Self-Harm")
|
||||
|
||||
|
||||
class TestCrisisResponse(unittest.TestCase):
|
||||
"""Test crisis response generation."""
|
||||
|
||||
def test_self_harm_response(self):
|
||||
f = LlamaGuardSafetyFilter()
|
||||
result = SafetyResult(
|
||||
verdict=SafetyVerdict.UNSAFE,
|
||||
categories=[{"code": "S11", "name": "Self-Harm"}],
|
||||
latency_ms=100.0
|
||||
)
|
||||
response = f.get_crisis_response(result)
|
||||
|
||||
self.assertIn("988", response)
|
||||
self.assertIn("safe", response.lower())
|
||||
self.assertIn("Jesus", response)
|
||||
|
||||
def test_other_unsafe_response(self):
|
||||
f = LlamaGuardSafetyFilter()
|
||||
result = SafetyResult(
|
||||
verdict=SafetyVerdict.UNSAFE,
|
||||
categories=[{"code": "S1", "name": "Violent Crimes"}],
|
||||
latency_ms=100.0
|
||||
)
|
||||
response = f.get_crisis_response(result)
|
||||
|
||||
self.assertIn("can't help", response.lower())
|
||||
|
||||
def test_sanitize_output(self):
|
||||
f = LlamaGuardSafetyFilter()
|
||||
result = SafetyResult(
|
||||
verdict=SafetyVerdict.UNSAFE,
|
||||
categories=[],
|
||||
latency_ms=100.0
|
||||
)
|
||||
sanitized = f.sanitize_output("dangerous content", result)
|
||||
|
||||
self.assertNotEqual(sanitized, "dangerous content")
|
||||
self.assertIn("can't provide", sanitized.lower())
|
||||
|
||||
|
||||
class TestAvailability(unittest.TestCase):
|
||||
"""Test availability checking."""
|
||||
|
||||
def test_unavailable_returns_error(self):
|
||||
f = LlamaGuardSafetyFilter()
|
||||
f._available = False
|
||||
|
||||
result = f.check_input("hello")
|
||||
self.assertEqual(result.verdict, SafetyVerdict.ERROR)
|
||||
|
||||
|
||||
class TestIntegration(unittest.TestCase):
|
||||
"""Test integration functions."""
|
||||
|
||||
def test_check_input_safety_safe(self):
|
||||
with patch('agent.safety_filter.get_safety_filter') as mock_get:
|
||||
mock_filter = MagicMock()
|
||||
mock_filter.check_input.return_value = SafetyResult(
|
||||
verdict=SafetyVerdict.SAFE, categories=[], latency_ms=50.0
|
||||
)
|
||||
mock_get.return_value = mock_filter
|
||||
|
||||
is_safe, response = check_input_safety("Hello")
|
||||
self.assertTrue(is_safe)
|
||||
self.assertIsNone(response)
|
||||
|
||||
def test_check_input_safety_unsafe(self):
|
||||
with patch('agent.safety_filter.get_safety_filter') as mock_get:
|
||||
mock_filter = MagicMock()
|
||||
mock_filter.check_input.return_value = SafetyResult(
|
||||
verdict=SafetyVerdict.UNSAFE,
|
||||
categories=[{"code": "S11", "name": "Self-Harm"}],
|
||||
latency_ms=50.0
|
||||
)
|
||||
mock_filter.get_crisis_response.return_value = "Crisis response"
|
||||
mock_get.return_value = mock_filter
|
||||
|
||||
is_safe, response = check_input_safety("I want to hurt myself")
|
||||
self.assertFalse(is_safe)
|
||||
self.assertEqual(response, "Crisis response")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
Reference in New Issue
Block a user