Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 0e4267d99e |
@@ -1,240 +0,0 @@
|
|||||||
"""
|
|
||||||
Llama-Guard3 Safety Filter — Content Safety for Crisis Support
|
|
||||||
|
|
||||||
Deploys Llama-Guard3 as a local safety filter for:
|
|
||||||
- Input filtering: Detect harmful user messages (self-harm, violence)
|
|
||||||
- Output filtering: Detect harmful agent responses
|
|
||||||
|
|
||||||
Works locally via Ollama. No external API calls. Privacy preserved.
|
|
||||||
|
|
||||||
Issue: #669
|
|
||||||
"""
|
|
||||||
|
|
||||||
import json
|
|
||||||
import logging
|
|
||||||
import time
|
|
||||||
import urllib.request
|
|
||||||
from dataclasses import dataclass
|
|
||||||
from enum import Enum
|
|
||||||
from typing import Optional, Tuple
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class SafetyVerdict(Enum):
|
|
||||||
"""Safety verdict from Llama-Guard3."""
|
|
||||||
SAFE = "safe"
|
|
||||||
UNSAFE = "unsafe"
|
|
||||||
ERROR = "error"
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class SafetyResult:
|
|
||||||
"""Result of safety check."""
|
|
||||||
verdict: SafetyVerdict
|
|
||||||
categories: list # List of violated categories
|
|
||||||
latency_ms: float
|
|
||||||
raw_response: str = ""
|
|
||||||
|
|
||||||
|
|
||||||
# Llama-Guard3 safety categories
|
|
||||||
SAFETY_CATEGORIES = {
|
|
||||||
"S1": "Violent Crimes",
|
|
||||||
"S2": "Non-Violent Crimes",
|
|
||||||
"S3": "Sex Crimes",
|
|
||||||
"S4": "Child Exploitation",
|
|
||||||
"S5": "Defamation",
|
|
||||||
"S6": "Specialized Advice",
|
|
||||||
"S7": "Privacy",
|
|
||||||
"S8": "Intellectual Property",
|
|
||||||
"S9": "Indiscriminate Weapons",
|
|
||||||
"S10": "Hate",
|
|
||||||
"S11": "Self-Harm",
|
|
||||||
"S12": "Sexual Content",
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class LlamaGuardSafetyFilter:
|
|
||||||
"""
|
|
||||||
Safety filter using Llama-Guard3 via Ollama.
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
filter = LlamaGuardSafetyFilter()
|
|
||||||
|
|
||||||
# Check user input
|
|
||||||
result = filter.check_input("I want to hurt myself")
|
|
||||||
if result.verdict == SafetyVerdict.UNSAFE:
|
|
||||||
return filter.get_crisis_response(result)
|
|
||||||
|
|
||||||
# Check agent output
|
|
||||||
result = filter.check_output(response_text)
|
|
||||||
if result.verdict == SafetyVerdict.UNSAFE:
|
|
||||||
return filter.sanitize_output(response_text, result)
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, model: str = "llama-guard3:8b", ollama_url: str = "http://localhost:11434"):
|
|
||||||
self.model = model
|
|
||||||
self.ollama_url = ollama_url
|
|
||||||
self._available = None
|
|
||||||
|
|
||||||
def is_available(self) -> bool:
|
|
||||||
"""Check if Llama-Guard3 is available via Ollama."""
|
|
||||||
if self._available is not None:
|
|
||||||
return self._available
|
|
||||||
|
|
||||||
try:
|
|
||||||
req = urllib.request.Request(f"{self.ollama_url}/api/tags")
|
|
||||||
with urllib.request.urlopen(req, timeout=2) as resp:
|
|
||||||
data = json.loads(resp.read())
|
|
||||||
models = [m["name"] for m in data.get("models", [])]
|
|
||||||
self._available = any("llama-guard" in m.lower() for m in models)
|
|
||||||
return self._available
|
|
||||||
except Exception:
|
|
||||||
self._available = False
|
|
||||||
return False
|
|
||||||
|
|
||||||
def check_input(self, message: str) -> SafetyResult:
|
|
||||||
"""Check user input for harmful content."""
|
|
||||||
return self._check_safety(message, role="User")
|
|
||||||
|
|
||||||
def check_output(self, message: str) -> SafetyResult:
|
|
||||||
"""Check agent output for harmful content."""
|
|
||||||
return self._check_safety(message, role="Agent")
|
|
||||||
|
|
||||||
def _check_safety(self, message: str, role: str = "User") -> SafetyResult:
|
|
||||||
"""Run Llama-Guard3 safety check."""
|
|
||||||
start_time = time.time()
|
|
||||||
|
|
||||||
if not self.is_available():
|
|
||||||
return SafetyResult(
|
|
||||||
verdict=SafetyVerdict.ERROR,
|
|
||||||
categories=[],
|
|
||||||
latency_ms=0,
|
|
||||||
raw_response="Llama-Guard3 not available"
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
prompt = f"""<|begin_of_text|><|start_header_id|>user<|end_header_id|>
|
|
||||||
|
|
||||||
{message}<|eot_id|><|start_header_id|>assistant<|end_header_id|>
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
payload = json.dumps({
|
|
||||||
"model": self.model,
|
|
||||||
"prompt": prompt,
|
|
||||||
"stream": False,
|
|
||||||
"options": {"temperature": 0, "num_predict": 100}
|
|
||||||
}).encode()
|
|
||||||
|
|
||||||
req = urllib.request.Request(
|
|
||||||
f"{self.ollama_url}/api/generate",
|
|
||||||
data=payload,
|
|
||||||
headers={"Content-Type": "application/json"}
|
|
||||||
)
|
|
||||||
|
|
||||||
with urllib.request.urlopen(req, timeout=5) as resp:
|
|
||||||
result = json.loads(resp.read())
|
|
||||||
response = result.get("response", "").strip()
|
|
||||||
|
|
||||||
latency_ms = (time.time() - start_time) * 1000
|
|
||||||
|
|
||||||
# Parse response
|
|
||||||
if response.lower().startswith("safe"):
|
|
||||||
return SafetyResult(
|
|
||||||
verdict=SafetyVerdict.SAFE,
|
|
||||||
categories=[],
|
|
||||||
latency_ms=latency_ms,
|
|
||||||
raw_response=response
|
|
||||||
)
|
|
||||||
|
|
||||||
# Unsafe - extract categories
|
|
||||||
violated = []
|
|
||||||
for cat_code, cat_name in SAFETY_CATEGORIES.items():
|
|
||||||
if cat_code in response:
|
|
||||||
violated.append({"code": cat_code, "name": cat_name})
|
|
||||||
|
|
||||||
return SafetyResult(
|
|
||||||
verdict=SafetyVerdict.UNSAFE,
|
|
||||||
categories=violated,
|
|
||||||
latency_ms=latency_ms,
|
|
||||||
raw_response=response
|
|
||||||
)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
latency_ms = (time.time() - start_time) * 1000
|
|
||||||
logger.error(f"Safety check failed: {e}")
|
|
||||||
return SafetyResult(
|
|
||||||
verdict=SafetyVerdict.ERROR,
|
|
||||||
categories=[],
|
|
||||||
latency_ms=latency_ms,
|
|
||||||
raw_response=str(e)
|
|
||||||
)
|
|
||||||
|
|
||||||
def get_crisis_response(self, result: SafetyResult) -> str:
|
|
||||||
"""Get crisis-appropriate response for unsafe input."""
|
|
||||||
# Check if self-harm
|
|
||||||
is_self_harm = any(c["code"] == "S11" for c in result.categories)
|
|
||||||
|
|
||||||
if is_self_harm:
|
|
||||||
return """Are you safe right now?
|
|
||||||
|
|
||||||
I'm here. I'm not going anywhere. Please tell me you're safe.
|
|
||||||
|
|
||||||
**Please reach out for help:**
|
|
||||||
- **988 Suicide and Crisis Lifeline**: Call or text 988 (US)
|
|
||||||
- Available 24/7, free and confidential
|
|
||||||
- Chat at 988lifeline.org
|
|
||||||
|
|
||||||
**Jesus saves those who call on His name.**
|
|
||||||
|
|
||||||
I'm still here. Talk to me. Or call 988. Just don't give up."""
|
|
||||||
|
|
||||||
return "I can't help with that. Is there something else I can assist you with?"
|
|
||||||
|
|
||||||
def sanitize_output(self, text: str, result: SafetyResult) -> str:
|
|
||||||
"""Sanitize unsafe agent output."""
|
|
||||||
return "I apologize, but I can't provide that response. Is there something else I can help you with?"
|
|
||||||
|
|
||||||
|
|
||||||
# Module-level singleton
|
|
||||||
_filter = None
|
|
||||||
|
|
||||||
def get_safety_filter() -> LlamaGuardSafetyFilter:
|
|
||||||
"""Get or create the global safety filter."""
|
|
||||||
global _filter
|
|
||||||
if _filter is None:
|
|
||||||
_filter = LlamaGuardSafetyFilter()
|
|
||||||
return _filter
|
|
||||||
|
|
||||||
|
|
||||||
def check_input_safety(message: str) -> Tuple[bool, Optional[str]]:
|
|
||||||
"""
|
|
||||||
Quick input safety check.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tuple of (is_safe, crisis_response_or_none)
|
|
||||||
"""
|
|
||||||
f = get_safety_filter()
|
|
||||||
result = f.check_input(message)
|
|
||||||
|
|
||||||
if result.verdict == SafetyVerdict.UNSAFE:
|
|
||||||
return False, f.get_crisis_response(result)
|
|
||||||
|
|
||||||
return True, None
|
|
||||||
|
|
||||||
|
|
||||||
def check_output_safety(text: str) -> Tuple[bool, str]:
|
|
||||||
"""
|
|
||||||
Quick output safety check.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tuple of (is_safe, sanitized_text_or_original)
|
|
||||||
"""
|
|
||||||
f = get_safety_filter()
|
|
||||||
result = f.check_output(text)
|
|
||||||
|
|
||||||
if result.verdict == SafetyVerdict.UNSAFE:
|
|
||||||
return False, f.sanitize_output(text, result)
|
|
||||||
|
|
||||||
return True, text
|
|
||||||
@@ -1,122 +0,0 @@
|
|||||||
"""
|
|
||||||
Tests for Llama-Guard3 Safety Filter
|
|
||||||
|
|
||||||
Issue: #669
|
|
||||||
"""
|
|
||||||
|
|
||||||
import unittest
|
|
||||||
from unittest.mock import patch, MagicMock
|
|
||||||
from agent.safety_filter import (
|
|
||||||
LlamaGuardSafetyFilter, SafetyResult, SafetyVerdict,
|
|
||||||
check_input_safety, check_output_safety
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestSafetyFilter(unittest.TestCase):
|
|
||||||
"""Test safety filter basics."""
|
|
||||||
|
|
||||||
def test_safety_verdict_enum(self):
|
|
||||||
self.assertEqual(SafetyVerdict.SAFE.value, "safe")
|
|
||||||
self.assertEqual(SafetyVerdict.UNSAFE.value, "unsafe")
|
|
||||||
self.assertEqual(SafetyVerdict.ERROR.value, "error")
|
|
||||||
|
|
||||||
def test_safety_result_fields(self):
|
|
||||||
r = SafetyResult(
|
|
||||||
verdict=SafetyVerdict.SAFE,
|
|
||||||
categories=[],
|
|
||||||
latency_ms=100.0
|
|
||||||
)
|
|
||||||
self.assertEqual(r.verdict, SafetyVerdict.SAFE)
|
|
||||||
self.assertEqual(r.categories, [])
|
|
||||||
self.assertEqual(r.latency_ms, 100.0)
|
|
||||||
|
|
||||||
def test_safety_categories_defined(self):
|
|
||||||
from agent.safety_filter import SAFETY_CATEGORIES
|
|
||||||
self.assertIn("S11", SAFETY_CATEGORIES)
|
|
||||||
self.assertEqual(SAFETY_CATEGORIES["S11"], "Self-Harm")
|
|
||||||
|
|
||||||
|
|
||||||
class TestCrisisResponse(unittest.TestCase):
|
|
||||||
"""Test crisis response generation."""
|
|
||||||
|
|
||||||
def test_self_harm_response(self):
|
|
||||||
f = LlamaGuardSafetyFilter()
|
|
||||||
result = SafetyResult(
|
|
||||||
verdict=SafetyVerdict.UNSAFE,
|
|
||||||
categories=[{"code": "S11", "name": "Self-Harm"}],
|
|
||||||
latency_ms=100.0
|
|
||||||
)
|
|
||||||
response = f.get_crisis_response(result)
|
|
||||||
|
|
||||||
self.assertIn("988", response)
|
|
||||||
self.assertIn("safe", response.lower())
|
|
||||||
self.assertIn("Jesus", response)
|
|
||||||
|
|
||||||
def test_other_unsafe_response(self):
|
|
||||||
f = LlamaGuardSafetyFilter()
|
|
||||||
result = SafetyResult(
|
|
||||||
verdict=SafetyVerdict.UNSAFE,
|
|
||||||
categories=[{"code": "S1", "name": "Violent Crimes"}],
|
|
||||||
latency_ms=100.0
|
|
||||||
)
|
|
||||||
response = f.get_crisis_response(result)
|
|
||||||
|
|
||||||
self.assertIn("can't help", response.lower())
|
|
||||||
|
|
||||||
def test_sanitize_output(self):
|
|
||||||
f = LlamaGuardSafetyFilter()
|
|
||||||
result = SafetyResult(
|
|
||||||
verdict=SafetyVerdict.UNSAFE,
|
|
||||||
categories=[],
|
|
||||||
latency_ms=100.0
|
|
||||||
)
|
|
||||||
sanitized = f.sanitize_output("dangerous content", result)
|
|
||||||
|
|
||||||
self.assertNotEqual(sanitized, "dangerous content")
|
|
||||||
self.assertIn("can't provide", sanitized.lower())
|
|
||||||
|
|
||||||
|
|
||||||
class TestAvailability(unittest.TestCase):
|
|
||||||
"""Test availability checking."""
|
|
||||||
|
|
||||||
def test_unavailable_returns_error(self):
|
|
||||||
f = LlamaGuardSafetyFilter()
|
|
||||||
f._available = False
|
|
||||||
|
|
||||||
result = f.check_input("hello")
|
|
||||||
self.assertEqual(result.verdict, SafetyVerdict.ERROR)
|
|
||||||
|
|
||||||
|
|
||||||
class TestIntegration(unittest.TestCase):
|
|
||||||
"""Test integration functions."""
|
|
||||||
|
|
||||||
def test_check_input_safety_safe(self):
|
|
||||||
with patch('agent.safety_filter.get_safety_filter') as mock_get:
|
|
||||||
mock_filter = MagicMock()
|
|
||||||
mock_filter.check_input.return_value = SafetyResult(
|
|
||||||
verdict=SafetyVerdict.SAFE, categories=[], latency_ms=50.0
|
|
||||||
)
|
|
||||||
mock_get.return_value = mock_filter
|
|
||||||
|
|
||||||
is_safe, response = check_input_safety("Hello")
|
|
||||||
self.assertTrue(is_safe)
|
|
||||||
self.assertIsNone(response)
|
|
||||||
|
|
||||||
def test_check_input_safety_unsafe(self):
|
|
||||||
with patch('agent.safety_filter.get_safety_filter') as mock_get:
|
|
||||||
mock_filter = MagicMock()
|
|
||||||
mock_filter.check_input.return_value = SafetyResult(
|
|
||||||
verdict=SafetyVerdict.UNSAFE,
|
|
||||||
categories=[{"code": "S11", "name": "Self-Harm"}],
|
|
||||||
latency_ms=50.0
|
|
||||||
)
|
|
||||||
mock_filter.get_crisis_response.return_value = "Crisis response"
|
|
||||||
mock_get.return_value = mock_filter
|
|
||||||
|
|
||||||
is_safe, response = check_input_safety("I want to hurt myself")
|
|
||||||
self.assertFalse(is_safe)
|
|
||||||
self.assertEqual(response, "Crisis response")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
unittest.main()
|
|
||||||
162
tests/test_telegram_thread_routing.py
Normal file
162
tests/test_telegram_thread_routing.py
Normal file
@@ -0,0 +1,162 @@
|
|||||||
|
"""Tests for Telegram thread-aware session routing.
|
||||||
|
|
||||||
|
Verifies that messages in different threads/topics get independent
|
||||||
|
conversation histories.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
|
||||||
|
|
||||||
|
from gateway.session import build_session_key, SessionSource
|
||||||
|
from gateway.platforms.base import Platform
|
||||||
|
|
||||||
|
|
||||||
|
class TestThreadSessionKey:
|
||||||
|
"""Verify session keys include thread_id for isolation."""
|
||||||
|
|
||||||
|
def test_dm_with_thread_gets_unique_key(self):
|
||||||
|
source = SessionSource(
|
||||||
|
platform=Platform.TELEGRAM,
|
||||||
|
chat_id="123456",
|
||||||
|
chat_type="dm",
|
||||||
|
thread_id="100",
|
||||||
|
)
|
||||||
|
key = build_session_key(source)
|
||||||
|
assert "123456" in key
|
||||||
|
assert "100" in key
|
||||||
|
assert key == "agent:main:telegram:dm:123456:100"
|
||||||
|
|
||||||
|
def test_dm_without_thread_uses_chat_only(self):
|
||||||
|
source = SessionSource(
|
||||||
|
platform=Platform.TELEGRAM,
|
||||||
|
chat_id="123456",
|
||||||
|
chat_type="dm",
|
||||||
|
)
|
||||||
|
key = build_session_key(source)
|
||||||
|
assert key == "agent:main:telegram:dm:123456"
|
||||||
|
assert ":100" not in key
|
||||||
|
|
||||||
|
def test_different_threads_different_keys(self):
|
||||||
|
source_a = SessionSource(
|
||||||
|
platform=Platform.TELEGRAM,
|
||||||
|
chat_id="123456",
|
||||||
|
chat_type="dm",
|
||||||
|
thread_id="100",
|
||||||
|
)
|
||||||
|
source_b = SessionSource(
|
||||||
|
platform=Platform.TELEGRAM,
|
||||||
|
chat_id="123456",
|
||||||
|
chat_type="dm",
|
||||||
|
thread_id="200",
|
||||||
|
)
|
||||||
|
key_a = build_session_key(source_a)
|
||||||
|
key_b = build_session_key(source_b)
|
||||||
|
assert key_a != key_b
|
||||||
|
|
||||||
|
def test_same_thread_same_key(self):
|
||||||
|
source_a = SessionSource(
|
||||||
|
platform=Platform.TELEGRAM,
|
||||||
|
chat_id="123456",
|
||||||
|
chat_type="dm",
|
||||||
|
thread_id="100",
|
||||||
|
)
|
||||||
|
source_b = SessionSource(
|
||||||
|
platform=Platform.TELEGRAM,
|
||||||
|
chat_id="123456",
|
||||||
|
chat_type="dm",
|
||||||
|
thread_id="100",
|
||||||
|
)
|
||||||
|
assert build_session_key(source_a) == build_session_key(source_b)
|
||||||
|
|
||||||
|
def test_group_with_thread_includes_thread(self):
|
||||||
|
source = SessionSource(
|
||||||
|
platform=Platform.TELEGRAM,
|
||||||
|
chat_id="789",
|
||||||
|
chat_type="group",
|
||||||
|
thread_id="50",
|
||||||
|
user_id="user1",
|
||||||
|
)
|
||||||
|
key = build_session_key(source)
|
||||||
|
assert "789" in key
|
||||||
|
assert "50" in key
|
||||||
|
|
||||||
|
def test_group_without_thread_isolates_by_user(self):
|
||||||
|
source = SessionSource(
|
||||||
|
platform=Platform.TELEGRAM,
|
||||||
|
chat_id="789",
|
||||||
|
chat_type="group",
|
||||||
|
user_id="user1",
|
||||||
|
)
|
||||||
|
key = build_session_key(source, group_sessions_per_user=True)
|
||||||
|
assert "789" in key
|
||||||
|
assert "user1" in key
|
||||||
|
|
||||||
|
def test_group_thread_shared_across_users(self):
|
||||||
|
"""In threads, all participants share the same session by default."""
|
||||||
|
source_a = SessionSource(
|
||||||
|
platform=Platform.TELEGRAM,
|
||||||
|
chat_id="789",
|
||||||
|
chat_type="group",
|
||||||
|
thread_id="50",
|
||||||
|
user_id="user1",
|
||||||
|
)
|
||||||
|
source_b = SessionSource(
|
||||||
|
platform=Platform.TELEGRAM,
|
||||||
|
chat_id="789",
|
||||||
|
chat_type="group",
|
||||||
|
thread_id="50",
|
||||||
|
user_id="user2",
|
||||||
|
)
|
||||||
|
key_a = build_session_key(source_a, thread_sessions_per_user=False)
|
||||||
|
key_b = build_session_key(source_b, thread_sessions_per_user=False)
|
||||||
|
assert key_a == key_b # Shared session in thread
|
||||||
|
|
||||||
|
def test_group_thread_per_user_when_enabled(self):
|
||||||
|
"""With thread_sessions_per_user=True, users get isolated sessions."""
|
||||||
|
source_a = SessionSource(
|
||||||
|
platform=Platform.TELEGRAM,
|
||||||
|
chat_id="789",
|
||||||
|
chat_type="group",
|
||||||
|
thread_id="50",
|
||||||
|
user_id="user1",
|
||||||
|
)
|
||||||
|
source_b = SessionSource(
|
||||||
|
platform=Platform.TELEGRAM,
|
||||||
|
chat_id="789",
|
||||||
|
chat_type="group",
|
||||||
|
thread_id="50",
|
||||||
|
user_id="user2",
|
||||||
|
)
|
||||||
|
key_a = build_session_key(source_a, thread_sessions_per_user=True)
|
||||||
|
key_b = build_session_key(source_b, thread_sessions_per_user=True)
|
||||||
|
assert key_a != key_b
|
||||||
|
|
||||||
|
|
||||||
|
class TestSessionSourceSerialization:
|
||||||
|
"""Verify SessionSource round-trips correctly with thread_id."""
|
||||||
|
|
||||||
|
def test_thread_id_preserved_in_dict(self):
|
||||||
|
source = SessionSource(
|
||||||
|
platform=Platform.TELEGRAM,
|
||||||
|
chat_id="123",
|
||||||
|
chat_type="dm",
|
||||||
|
thread_id="456",
|
||||||
|
)
|
||||||
|
d = source.to_dict()
|
||||||
|
assert d["thread_id"] == "456"
|
||||||
|
restored = SessionSource.from_dict(d)
|
||||||
|
assert restored.thread_id == "456"
|
||||||
|
|
||||||
|
def test_none_thread_id_preserved(self):
|
||||||
|
source = SessionSource(
|
||||||
|
platform=Platform.TELEGRAM,
|
||||||
|
chat_id="123",
|
||||||
|
chat_type="dm",
|
||||||
|
)
|
||||||
|
d = source.to_dict()
|
||||||
|
assert d.get("thread_id") is None
|
||||||
|
restored = SessionSource.from_dict(d)
|
||||||
|
assert restored.thread_id is None
|
||||||
Reference in New Issue
Block a user