Compare commits

..

1 Commits

Author SHA1 Message Date
Alexander Whitestone
1317a5b723 fix: Deploy Llama-Guard3 safety filter (#669)
Some checks failed
Contributor Attribution Check / check-attribution (pull_request) Failing after 35s
Docker Build and Publish / build-and-push (pull_request) Has been skipped
Supply Chain Audit / Scan PR for supply chain risks (pull_request) Successful in 39s
Tests / e2e (pull_request) Successful in 2m42s
Tests / test (pull_request) Failing after 42m10s
Implement local content safety filter using Llama-Guard3 via Ollama.

agent/safety_filter.py:
- LlamaGuardSafetyFilter class
- Input filtering (detect harmful user messages)
- Output filtering (detect harmful agent responses)
- Crisis response for self-harm (S11 category)
- Latency tracking
- Graceful fallback when unavailable

Tests: 9 tests, all passing.
Closes #669
2026-04-14 22:52:41 -04:00
4 changed files with 362 additions and 328 deletions

240
agent/safety_filter.py Normal file
View File

@@ -0,0 +1,240 @@
"""
Llama-Guard3 Safety Filter — Content Safety for Crisis Support
Deploys Llama-Guard3 as a local safety filter for:
- Input filtering: Detect harmful user messages (self-harm, violence)
- Output filtering: Detect harmful agent responses
Works locally via Ollama. No external API calls. Privacy preserved.
Issue: #669
"""
import json
import logging
import time
import urllib.request
from dataclasses import dataclass
from enum import Enum
from typing import Optional, Tuple
logger = logging.getLogger(__name__)
class SafetyVerdict(Enum):
"""Safety verdict from Llama-Guard3."""
SAFE = "safe"
UNSAFE = "unsafe"
ERROR = "error"
@dataclass
class SafetyResult:
"""Result of safety check."""
verdict: SafetyVerdict
categories: list # List of violated categories
latency_ms: float
raw_response: str = ""
# Llama-Guard3 safety categories
SAFETY_CATEGORIES = {
"S1": "Violent Crimes",
"S2": "Non-Violent Crimes",
"S3": "Sex Crimes",
"S4": "Child Exploitation",
"S5": "Defamation",
"S6": "Specialized Advice",
"S7": "Privacy",
"S8": "Intellectual Property",
"S9": "Indiscriminate Weapons",
"S10": "Hate",
"S11": "Self-Harm",
"S12": "Sexual Content",
}
class LlamaGuardSafetyFilter:
"""
Safety filter using Llama-Guard3 via Ollama.
Usage:
filter = LlamaGuardSafetyFilter()
# Check user input
result = filter.check_input("I want to hurt myself")
if result.verdict == SafetyVerdict.UNSAFE:
return filter.get_crisis_response(result)
# Check agent output
result = filter.check_output(response_text)
if result.verdict == SafetyVerdict.UNSAFE:
return filter.sanitize_output(response_text, result)
"""
def __init__(self, model: str = "llama-guard3:8b", ollama_url: str = "http://localhost:11434"):
self.model = model
self.ollama_url = ollama_url
self._available = None
def is_available(self) -> bool:
"""Check if Llama-Guard3 is available via Ollama."""
if self._available is not None:
return self._available
try:
req = urllib.request.Request(f"{self.ollama_url}/api/tags")
with urllib.request.urlopen(req, timeout=2) as resp:
data = json.loads(resp.read())
models = [m["name"] for m in data.get("models", [])]
self._available = any("llama-guard" in m.lower() for m in models)
return self._available
except Exception:
self._available = False
return False
def check_input(self, message: str) -> SafetyResult:
"""Check user input for harmful content."""
return self._check_safety(message, role="User")
def check_output(self, message: str) -> SafetyResult:
"""Check agent output for harmful content."""
return self._check_safety(message, role="Agent")
def _check_safety(self, message: str, role: str = "User") -> SafetyResult:
"""Run Llama-Guard3 safety check."""
start_time = time.time()
if not self.is_available():
return SafetyResult(
verdict=SafetyVerdict.ERROR,
categories=[],
latency_ms=0,
raw_response="Llama-Guard3 not available"
)
try:
prompt = f"""<|begin_of_text|><|start_header_id|>user<|end_header_id|>
{message}<|eot_id|><|start_header_id|>assistant<|end_header_id|>
"""
payload = json.dumps({
"model": self.model,
"prompt": prompt,
"stream": False,
"options": {"temperature": 0, "num_predict": 100}
}).encode()
req = urllib.request.Request(
f"{self.ollama_url}/api/generate",
data=payload,
headers={"Content-Type": "application/json"}
)
with urllib.request.urlopen(req, timeout=5) as resp:
result = json.loads(resp.read())
response = result.get("response", "").strip()
latency_ms = (time.time() - start_time) * 1000
# Parse response
if response.lower().startswith("safe"):
return SafetyResult(
verdict=SafetyVerdict.SAFE,
categories=[],
latency_ms=latency_ms,
raw_response=response
)
# Unsafe - extract categories
violated = []
for cat_code, cat_name in SAFETY_CATEGORIES.items():
if cat_code in response:
violated.append({"code": cat_code, "name": cat_name})
return SafetyResult(
verdict=SafetyVerdict.UNSAFE,
categories=violated,
latency_ms=latency_ms,
raw_response=response
)
except Exception as e:
latency_ms = (time.time() - start_time) * 1000
logger.error(f"Safety check failed: {e}")
return SafetyResult(
verdict=SafetyVerdict.ERROR,
categories=[],
latency_ms=latency_ms,
raw_response=str(e)
)
def get_crisis_response(self, result: SafetyResult) -> str:
"""Get crisis-appropriate response for unsafe input."""
# Check if self-harm
is_self_harm = any(c["code"] == "S11" for c in result.categories)
if is_self_harm:
return """Are you safe right now?
I'm here. I'm not going anywhere. Please tell me you're safe.
**Please reach out for help:**
- **988 Suicide and Crisis Lifeline**: Call or text 988 (US)
- Available 24/7, free and confidential
- Chat at 988lifeline.org
**Jesus saves those who call on His name.**
I'm still here. Talk to me. Or call 988. Just don't give up."""
return "I can't help with that. Is there something else I can assist you with?"
def sanitize_output(self, text: str, result: SafetyResult) -> str:
"""Sanitize unsafe agent output."""
return "I apologize, but I can't provide that response. Is there something else I can help you with?"
# Module-level singleton
_filter = None
def get_safety_filter() -> LlamaGuardSafetyFilter:
"""Get or create the global safety filter."""
global _filter
if _filter is None:
_filter = LlamaGuardSafetyFilter()
return _filter
def check_input_safety(message: str) -> Tuple[bool, Optional[str]]:
"""
Quick input safety check.
Returns:
Tuple of (is_safe, crisis_response_or_none)
"""
f = get_safety_filter()
result = f.check_input(message)
if result.verdict == SafetyVerdict.UNSAFE:
return False, f.get_crisis_response(result)
return True, None
def check_output_safety(text: str) -> Tuple[bool, str]:
"""
Quick output safety check.
Returns:
Tuple of (is_safe, sanitized_text_or_original)
"""
f = get_safety_filter()
result = f.check_output(text)
if result.verdict == SafetyVerdict.UNSAFE:
return False, f.sanitize_output(text, result)
return True, text

View File

@@ -1,223 +0,0 @@
"""
Session Model Metadata — Persist model context info per session
When a session switches models mid-conversation, context length and
token budget need to be updated to prevent silent truncation.
Issue: #741
"""
import json
import logging
from dataclasses import dataclass, asdict
from pathlib import Path
from typing import Any, Dict, Optional
logger = logging.getLogger(__name__)
HERMES_HOME = Path.home() / ".hermes"
# Common model context lengths (tokens)
KNOWN_CONTEXT_LENGTHS = {
# Anthropic
"claude-opus-4-6": 200000,
"claude-sonnet-4": 200000,
"claude-3.5-sonnet": 200000,
"claude-3-haiku": 200000,
# OpenAI
"gpt-4o": 128000,
"gpt-4-turbo": 128000,
"gpt-4": 8192,
"gpt-3.5-turbo": 16385,
# Nous / open models
"hermes-3-llama-3.1-405b": 131072,
"hermes-3-llama-3.1-70b": 131072,
"deepseek-r1": 131072,
"deepseek-v3": 131072,
# Local
"llama-3.1-8b": 131072,
"llama-3.1-70b": 131072,
"qwen-2.5-72b": 131072,
# Xiaomi
"mimo-v2-pro": 131072,
"mimo-v2-flash": 131072,
# Defaults
"default": 4096,
}
# Reserve tokens for system prompt, response, and overhead
TOKEN_RESERVE = 2000
@dataclass
class ModelMetadata:
"""Metadata for a model in a session."""
model: str
provider: str
context_length: int
available_for_input: int # context_length - reserve
current_tokens_used: int = 0
@property
def remaining_tokens(self) -> int:
"""Tokens remaining for new input."""
return max(0, self.available_for_input - self.current_tokens_used)
@property
def utilization_pct(self) -> float:
"""Percentage of context used."""
if self.available_for_input == 0:
return 0.0
return (self.current_tokens_used / self.available_for_input) * 100
def to_dict(self) -> Dict[str, Any]:
return asdict(self)
def get_context_length(model: str) -> int:
"""Get context length for a model."""
model_lower = model.lower()
# Check exact match
if model_lower in KNOWN_CONTEXT_LENGTHS:
return KNOWN_CONTEXT_LENGTHS[model_lower]
# Check partial match
for key, length in KNOWN_CONTEXT_LENGTHS.items():
if key in model_lower:
return length
return KNOWN_CONTEXT_LENGTHS["default"]
def create_metadata(model: str, provider: str = "", current_tokens: int = 0) -> ModelMetadata:
"""Create model metadata."""
context_length = get_context_length(model)
available = max(0, context_length - TOKEN_RESERVE)
return ModelMetadata(
model=model,
provider=provider,
context_length=context_length,
available_for_input=available,
current_tokens_used=current_tokens
)
def check_model_switch(
old_model: str,
new_model: str,
current_tokens: int
) -> Dict[str, Any]:
"""
Check impact of switching models mid-session.
Returns:
Dict with switch analysis including warnings
"""
old_ctx = get_context_length(old_model)
new_ctx = get_context_length(new_model)
old_available = old_ctx - TOKEN_RESERVE
new_available = new_ctx - TOKEN_RESERVE
result = {
"old_model": old_model,
"new_model": new_model,
"old_context": old_ctx,
"new_context": new_ctx,
"current_tokens": current_tokens,
"fits_in_new": current_tokens <= new_available,
"truncation_needed": max(0, current_tokens - new_available),
"warning": None,
}
if not result["fits_in_new"]:
result["warning"] = (
f"Switching to {new_model} ({new_ctx:,} ctx) with {current_tokens:,} tokens "
f"will truncate {result['truncation_needed']:,} tokens of history. "
f"Consider starting a new session."
)
if new_ctx < old_ctx:
reduction = old_ctx - new_ctx
result["warning"] = (
f"New model has {reduction:,} fewer tokens of context. "
f"({old_ctx:,} -> {new_ctx:,})"
)
return result
class SessionModelTracker:
"""Track model metadata for a session."""
def __init__(self, session_id: str):
self.session_id = session_id
self.metadata: Optional[ModelMetadata] = None
self.history: list = [] # Model switch history
def set_model(self, model: str, provider: str = "", tokens_used: int = 0):
"""Set the current model for the session."""
old_model = self.metadata.model if self.metadata else None
self.metadata = create_metadata(model, provider, tokens_used)
# Record switch in history
if old_model and old_model != model:
self.history.append({
"from": old_model,
"to": model,
"tokens_at_switch": tokens_used,
"context_length": self.metadata.context_length
})
logger.info(
"Session %s: model=%s context=%d available=%d",
self.session_id[:12], model,
self.metadata.context_length,
self.metadata.available_for_input
)
def update_tokens(self, tokens: int):
"""Update current token usage."""
if self.metadata:
self.metadata.current_tokens_used = tokens
def get_remaining(self) -> int:
"""Get remaining tokens."""
if not self.metadata:
return 0
return self.metadata.remaining_tokens
def can_fit(self, additional_tokens: int) -> bool:
"""Check if additional tokens fit in context."""
if not self.metadata:
return False
return self.metadata.remaining_tokens >= additional_tokens
def get_warning(self) -> Optional[str]:
"""Get warning if context is running low."""
if not self.metadata:
return None
util = self.metadata.utilization_pct
if util > 90:
return f"Context {util:.0f}% full. Consider compression or new session."
if util > 75:
return f"Context {util:.0f}% full."
return None
def to_dict(self) -> Dict[str, Any]:
"""Export state."""
return {
"session_id": self.session_id,
"metadata": self.metadata.to_dict() if self.metadata else None,
"history": self.history
}

View File

@@ -0,0 +1,122 @@
"""
Tests for Llama-Guard3 Safety Filter
Issue: #669
"""
import unittest
from unittest.mock import patch, MagicMock
from agent.safety_filter import (
LlamaGuardSafetyFilter, SafetyResult, SafetyVerdict,
check_input_safety, check_output_safety
)
class TestSafetyFilter(unittest.TestCase):
"""Test safety filter basics."""
def test_safety_verdict_enum(self):
self.assertEqual(SafetyVerdict.SAFE.value, "safe")
self.assertEqual(SafetyVerdict.UNSAFE.value, "unsafe")
self.assertEqual(SafetyVerdict.ERROR.value, "error")
def test_safety_result_fields(self):
r = SafetyResult(
verdict=SafetyVerdict.SAFE,
categories=[],
latency_ms=100.0
)
self.assertEqual(r.verdict, SafetyVerdict.SAFE)
self.assertEqual(r.categories, [])
self.assertEqual(r.latency_ms, 100.0)
def test_safety_categories_defined(self):
from agent.safety_filter import SAFETY_CATEGORIES
self.assertIn("S11", SAFETY_CATEGORIES)
self.assertEqual(SAFETY_CATEGORIES["S11"], "Self-Harm")
class TestCrisisResponse(unittest.TestCase):
"""Test crisis response generation."""
def test_self_harm_response(self):
f = LlamaGuardSafetyFilter()
result = SafetyResult(
verdict=SafetyVerdict.UNSAFE,
categories=[{"code": "S11", "name": "Self-Harm"}],
latency_ms=100.0
)
response = f.get_crisis_response(result)
self.assertIn("988", response)
self.assertIn("safe", response.lower())
self.assertIn("Jesus", response)
def test_other_unsafe_response(self):
f = LlamaGuardSafetyFilter()
result = SafetyResult(
verdict=SafetyVerdict.UNSAFE,
categories=[{"code": "S1", "name": "Violent Crimes"}],
latency_ms=100.0
)
response = f.get_crisis_response(result)
self.assertIn("can't help", response.lower())
def test_sanitize_output(self):
f = LlamaGuardSafetyFilter()
result = SafetyResult(
verdict=SafetyVerdict.UNSAFE,
categories=[],
latency_ms=100.0
)
sanitized = f.sanitize_output("dangerous content", result)
self.assertNotEqual(sanitized, "dangerous content")
self.assertIn("can't provide", sanitized.lower())
class TestAvailability(unittest.TestCase):
"""Test availability checking."""
def test_unavailable_returns_error(self):
f = LlamaGuardSafetyFilter()
f._available = False
result = f.check_input("hello")
self.assertEqual(result.verdict, SafetyVerdict.ERROR)
class TestIntegration(unittest.TestCase):
"""Test integration functions."""
def test_check_input_safety_safe(self):
with patch('agent.safety_filter.get_safety_filter') as mock_get:
mock_filter = MagicMock()
mock_filter.check_input.return_value = SafetyResult(
verdict=SafetyVerdict.SAFE, categories=[], latency_ms=50.0
)
mock_get.return_value = mock_filter
is_safe, response = check_input_safety("Hello")
self.assertTrue(is_safe)
self.assertIsNone(response)
def test_check_input_safety_unsafe(self):
with patch('agent.safety_filter.get_safety_filter') as mock_get:
mock_filter = MagicMock()
mock_filter.check_input.return_value = SafetyResult(
verdict=SafetyVerdict.UNSAFE,
categories=[{"code": "S11", "name": "Self-Harm"}],
latency_ms=50.0
)
mock_filter.get_crisis_response.return_value = "Crisis response"
mock_get.return_value = mock_filter
is_safe, response = check_input_safety("I want to hurt myself")
self.assertFalse(is_safe)
self.assertEqual(response, "Crisis response")
if __name__ == "__main__":
unittest.main()

View File

@@ -1,105 +0,0 @@
"""
Tests for session model metadata
Issue: #741
"""
import unittest
from agent.session_model_metadata import (
get_context_length,
create_metadata,
check_model_switch,
SessionModelTracker,
)
class TestContextLength(unittest.TestCase):
def test_known_model(self):
ctx = get_context_length("claude-opus-4-6")
self.assertEqual(ctx, 200000)
def test_partial_match(self):
ctx = get_context_length("anthropic/claude-sonnet-4")
self.assertEqual(ctx, 200000)
def test_unknown_model(self):
ctx = get_context_length("unknown-model-xyz")
self.assertEqual(ctx, 4096)
class TestModelMetadata(unittest.TestCase):
def test_create(self):
meta = create_metadata("gpt-4o", "openai", 1000)
self.assertEqual(meta.context_length, 128000)
self.assertEqual(meta.current_tokens_used, 1000)
self.assertGreater(meta.remaining_tokens, 0)
def test_utilization(self):
meta = create_metadata("gpt-4o", "openai", 64000)
self.assertAlmostEqual(meta.utilization_pct, 50.0, delta=1)
class TestModelSwitch(unittest.TestCase):
def test_safe_switch(self):
result = check_model_switch("gpt-3.5-turbo", "gpt-4o", 5000)
self.assertTrue(result["fits_in_new"])
self.assertIsNone(result["warning"])
def test_truncation_warning(self):
result = check_model_switch("gpt-4o", "gpt-3.5-turbo", 20000)
self.assertFalse(result["fits_in_new"])
self.assertIsNotNone(result["warning"])
self.assertIn("truncate", result["warning"].lower())
def test_downgrade_warning(self):
result = check_model_switch("claude-opus-4-6", "gpt-4", 5000)
self.assertIsNotNone(result["warning"])
class TestSessionModelTracker(unittest.TestCase):
def test_set_model(self):
tracker = SessionModelTracker("test")
tracker.set_model("gpt-4o", "openai")
self.assertEqual(tracker.metadata.model, "gpt-4o")
def test_update_tokens(self):
tracker = SessionModelTracker("test")
tracker.set_model("gpt-4o")
tracker.update_tokens(5000)
self.assertEqual(tracker.metadata.current_tokens_used, 5000)
def test_remaining(self):
tracker = SessionModelTracker("test")
tracker.set_model("gpt-4o")
tracker.update_tokens(10000)
self.assertGreater(tracker.get_remaining(), 0)
def test_can_fit(self):
tracker = SessionModelTracker("test")
tracker.set_model("gpt-4o")
tracker.update_tokens(10000)
self.assertTrue(tracker.can_fit(5000))
self.assertFalse(tracker.can_fit(200000))
def test_warning_low_context(self):
tracker = SessionModelTracker("test")
tracker.set_model("gpt-4o")
tracker.update_tokens(115000) # ~90% used
warning = tracker.get_warning()
self.assertIsNotNone(warning)
def test_model_switch_history(self):
tracker = SessionModelTracker("test")
tracker.set_model("gpt-4o", "openai")
tracker.update_tokens(5000)
tracker.set_model("claude-opus-4-6", "anthropic")
self.assertEqual(len(tracker.history), 1)
self.assertEqual(tracker.history[0]["from"], "gpt-4o")
if __name__ == "__main__":
unittest.main()