Compare commits
2 Commits
fix/132
...
burn/101-1
| Author | SHA1 | Date | |
|---|---|---|---|
| 7a08fb764a | |||
| 4b022f0df4 |
409
crisis/ab_testing.py
Normal file
409
crisis/ab_testing.py
Normal file
@@ -0,0 +1,409 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Crisis Detection A/B Testing Framework
|
||||
|
||||
Allows testing different crisis detection algorithms with:
|
||||
- Feature flags for algorithm A vs B
|
||||
- Logging of which variant triggered for each event
|
||||
- Metrics: false positive rate, detection latency per variant
|
||||
- Statistical significance testing
|
||||
|
||||
Usage:
|
||||
from crisis.ab_testing import ABTestManager, Variant
|
||||
|
||||
manager = ABTestManager()
|
||||
result = manager.detect_with_variant("I'm feeling hopeless")
|
||||
print(result.variant, result.detection_result)
|
||||
|
||||
# Get metrics
|
||||
metrics = manager.get_metrics()
|
||||
print(metrics.false_positive_rate_a, metrics.false_positive_rate_b)
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
import time
|
||||
from dataclasses import dataclass, field, asdict
|
||||
from datetime import datetime, timezone
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Dict, List, Optional, Tuple
|
||||
from uuid import uuid4
|
||||
|
||||
from .detect import detect_crisis, CrisisDetectionResult
|
||||
|
||||
|
||||
class Variant(Enum):
|
||||
"""A/B test variants for crisis detection."""
|
||||
A = "A" # Control: current algorithm
|
||||
B = "B" # Treatment: alternative algorithm
|
||||
|
||||
|
||||
@dataclass
|
||||
class ABTestConfig:
|
||||
"""Configuration for A/B testing."""
|
||||
enabled: bool = True
|
||||
variant_b_percentage: float = 0.5 # 50% split
|
||||
log_file: Optional[str] = None # Default: ~/.the-door/ab_test_logs.jsonl
|
||||
metrics_file: Optional[str] = None # Default: ~/.the-door/ab_metrics.json
|
||||
seed: Optional[int] = None # For reproducible assignments
|
||||
|
||||
|
||||
@dataclass
|
||||
class DetectionEvent:
|
||||
"""A single detection event with variant assignment."""
|
||||
event_id: str
|
||||
timestamp: str
|
||||
variant: str
|
||||
text_hash: str # Hash of input text for privacy
|
||||
detected_level: str
|
||||
detected_score: float
|
||||
indicators: List[str]
|
||||
detection_latency_ms: float
|
||||
is_false_positive: Optional[bool] = None # Null until labeled
|
||||
user_feedback: Optional[str] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class VariantMetrics:
|
||||
"""Metrics for a single variant."""
|
||||
total_detections: int = 0
|
||||
true_positives: int = 0
|
||||
false_positives: int = 0
|
||||
false_negatives: int = 0 # Requires manual labeling
|
||||
avg_latency_ms: float = 0.0
|
||||
p50_latency_ms: float = 0.0
|
||||
p95_latency_ms: float = 0.0
|
||||
p99_latency_ms: float = 0.0
|
||||
level_distribution: Dict[str, int] = field(default_factory=dict)
|
||||
indicator_frequency: Dict[str, int] = field(default_factory=dict)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ABTestMetrics:
|
||||
"""Complete A/B test metrics."""
|
||||
test_id: str
|
||||
start_time: str
|
||||
end_time: Optional[str] = None
|
||||
variant_a: VariantMetrics = field(default_factory=VariantMetrics)
|
||||
variant_b: VariantMetrics = field(default_factory=VariantMetrics)
|
||||
sample_size_a: int = 0
|
||||
sample_size_b: int = 0
|
||||
statistical_significance: Optional[float] = None # p-value if calculable
|
||||
|
||||
|
||||
class ABTestManager:
|
||||
"""Manages A/B testing for crisis detection algorithms."""
|
||||
|
||||
def __init__(self, config: Optional[ABTestConfig] = None):
|
||||
self.config = config or ABTestConfig()
|
||||
self.test_id = str(uuid4())[:8]
|
||||
self.events: List[DetectionEvent] = []
|
||||
|
||||
# Set up file paths
|
||||
home = Path.home() / ".the-door"
|
||||
home.mkdir(exist_ok=True)
|
||||
|
||||
self.log_file = Path(self.config.log_file or home / "ab_test_logs.jsonl")
|
||||
self.metrics_file = Path(self.config.metrics_file or home / "ab_metrics.json")
|
||||
|
||||
# Initialize RNG
|
||||
if self.config.seed is not None:
|
||||
random.seed(self.config.seed)
|
||||
|
||||
def _assign_variant(self, text: str) -> Variant:
|
||||
"""Assign a variant based on text hash for deterministic assignment."""
|
||||
if not self.config.enabled:
|
||||
return Variant.A
|
||||
|
||||
# Use hash of text for consistent assignment
|
||||
text_hash = hash(text) % 100
|
||||
threshold = int(self.config.variant_b_percentage * 100)
|
||||
|
||||
if text_hash < threshold:
|
||||
return Variant.B
|
||||
return Variant.A
|
||||
|
||||
def _detect_variant_a(self, text: str) -> CrisisDetectionResult:
|
||||
"""Variant A: Current algorithm (control)."""
|
||||
return detect_crisis(text)
|
||||
|
||||
def _detect_variant_b(self, text: str) -> CrisisDetectionResult:
|
||||
"""Variant B: Alternative detection algorithm.
|
||||
|
||||
This is a placeholder - in practice, you'd implement a different
|
||||
detection algorithm here. For now, we'll use the same algorithm
|
||||
but with different sensitivity settings.
|
||||
"""
|
||||
# Example: Variant B could use different thresholds or additional patterns
|
||||
result = detect_crisis(text)
|
||||
|
||||
# For demonstration: adjust sensitivity based on confidence score
|
||||
# In practice, this would be a completely different algorithm
|
||||
if result.score > 0.7 and result.level != "CRITICAL":
|
||||
# Variant B is more sensitive to high-confidence detections
|
||||
from .detect import CRITICAL_INDICATORS
|
||||
import re
|
||||
|
||||
for pattern in CRITICAL_INDICATORS:
|
||||
if re.search(pattern, text, re.IGNORECASE):
|
||||
# Upgrade to CRITICAL if we find critical indicators
|
||||
return CrisisDetectionResult(
|
||||
level="CRITICAL",
|
||||
score=result.score,
|
||||
indicators=result.indicators,
|
||||
matched_patterns=result.matched_patterns,
|
||||
recommended_action="immediate_intervention"
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
def detect_with_variant(self, text: str, user_id: Optional[str] = None) -> Tuple[Variant, CrisisDetectionResult, float]:
|
||||
"""
|
||||
Run crisis detection with A/B testing.
|
||||
|
||||
Returns:
|
||||
Tuple of (variant, detection_result, latency_ms)
|
||||
"""
|
||||
if not self.config.enabled:
|
||||
start = time.time()
|
||||
result = self._detect_variant_a(text)
|
||||
latency = (time.time() - start) * 1000
|
||||
return Variant.A, result, latency
|
||||
|
||||
# Assign variant
|
||||
variant = self._assign_variant(text)
|
||||
|
||||
# Run detection with timing
|
||||
start = time.time()
|
||||
if variant == Variant.A:
|
||||
result = self._detect_variant_a(text)
|
||||
else:
|
||||
result = self._detect_variant_b(text)
|
||||
latency_ms = (time.time() - start) * 1000
|
||||
|
||||
# Log event
|
||||
self._log_event(variant, text, result, latency_ms, user_id)
|
||||
|
||||
return variant, result, latency_ms
|
||||
|
||||
def _log_event(self, variant: Variant, text: str, result: CrisisDetectionResult,
|
||||
latency_ms: float, user_id: Optional[str] = None):
|
||||
"""Log a detection event."""
|
||||
import hashlib
|
||||
|
||||
# Hash text for privacy (don't log actual crisis text)
|
||||
text_hash = hashlib.sha256(text.encode()).hexdigest()[:16]
|
||||
|
||||
event = DetectionEvent(
|
||||
event_id=str(uuid4())[:8],
|
||||
timestamp=datetime.now(timezone.utc).isoformat(),
|
||||
variant=variant.value,
|
||||
text_hash=text_hash,
|
||||
detected_level=result.level,
|
||||
detected_score=result.score,
|
||||
indicators=result.indicators[:5], # Limit for storage
|
||||
detection_latency_ms=round(latency_ms, 2),
|
||||
)
|
||||
|
||||
self.events.append(event)
|
||||
|
||||
# Append to log file
|
||||
try:
|
||||
with open(self.log_file, "a") as f:
|
||||
f.write(json.dumps(asdict(event)) + "\n")
|
||||
except Exception:
|
||||
pass # Don't fail on logging errors
|
||||
|
||||
def label_event(self, event_id: str, is_false_positive: bool, feedback: Optional[str] = None):
|
||||
"""Label an event as true/false positive for metrics calculation."""
|
||||
for event in self.events:
|
||||
if event.event_id == event_id:
|
||||
event.is_false_positive = is_false_positive
|
||||
event.user_feedback = feedback
|
||||
break
|
||||
|
||||
# Update log file
|
||||
self._save_events()
|
||||
|
||||
def _save_events(self):
|
||||
"""Save all events to log file."""
|
||||
try:
|
||||
with open(self.log_file, "w") as f:
|
||||
for event in self.events:
|
||||
f.write(json.dumps(asdict(event)) + "\n")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def get_metrics(self) -> ABTestMetrics:
|
||||
"""Calculate metrics for both variants."""
|
||||
metrics = ABTestMetrics(
|
||||
test_id=self.test_id,
|
||||
start_time=self.events[0].timestamp if self.events else datetime.now(timezone.utc).isoformat(),
|
||||
end_time=datetime.now(timezone.utc).isoformat(),
|
||||
)
|
||||
|
||||
# Separate events by variant
|
||||
a_events = [e for e in self.events if e.variant == "A"]
|
||||
b_events = [e for e in self.events if e.variant == "B"]
|
||||
|
||||
metrics.sample_size_a = len(a_events)
|
||||
metrics.sample_size_b = len(b_events)
|
||||
|
||||
# Calculate variant A metrics
|
||||
if a_events:
|
||||
metrics.variant_a = self._calculate_variant_metrics(a_events)
|
||||
|
||||
# Calculate variant B metrics
|
||||
if b_events:
|
||||
metrics.variant_b = self._calculate_variant_metrics(b_events)
|
||||
|
||||
# Calculate statistical significance if we have enough data
|
||||
if len(a_events) >= 30 and len(b_events) >= 30:
|
||||
metrics.statistical_significance = self._calculate_significance(a_events, b_events)
|
||||
|
||||
# Save metrics to file
|
||||
self._save_metrics(metrics)
|
||||
|
||||
return metrics
|
||||
|
||||
def _calculate_variant_metrics(self, events: List[DetectionEvent]) -> VariantMetrics:
|
||||
"""Calculate metrics for a single variant."""
|
||||
if not events:
|
||||
return VariantMetrics()
|
||||
|
||||
# Latency statistics
|
||||
latencies = [e.detection_latency_ms for e in events]
|
||||
latencies.sort()
|
||||
|
||||
n = len(latencies)
|
||||
p50_idx = int(n * 0.5)
|
||||
p95_idx = int(n * 0.95)
|
||||
p99_idx = int(n * 0.99)
|
||||
|
||||
# Level distribution
|
||||
level_dist = {}
|
||||
for e in events:
|
||||
level_dist[e.detected_level] = level_dist.get(e.detected_level, 0) + 1
|
||||
|
||||
# Indicator frequency
|
||||
indicator_freq = {}
|
||||
for e in events:
|
||||
for ind in e.indicators:
|
||||
indicator_freq[ind] = indicator_freq.get(ind, 0) + 1
|
||||
|
||||
# False positive rate (only for labeled events)
|
||||
labeled = [e for e in events if e.is_false_positive is not None]
|
||||
fp_count = sum(1 for e in labeled if e.is_false_positive)
|
||||
tp_count = sum(1 for e in labeled if not e.is_false_positive)
|
||||
|
||||
return VariantMetrics(
|
||||
total_detections=len(events),
|
||||
true_positives=tp_count,
|
||||
false_positives=fp_count,
|
||||
avg_latency_ms=sum(latencies) / n,
|
||||
p50_latency_ms=latencies[p50_idx] if n > 0 else 0,
|
||||
p95_latency_ms=latencies[p95_idx] if n > 0 else 0,
|
||||
p99_latency_ms=latencies[p99_idx] if n > 0 else 0,
|
||||
level_distribution=level_dist,
|
||||
indicator_frequency=dict(sorted(indicator_freq.items(), key=lambda x: -x[1])[:10]),
|
||||
)
|
||||
|
||||
def _calculate_significance(self, a_events: List[DetectionEvent],
|
||||
b_events: List[DetectionEvent]) -> Optional[float]:
|
||||
"""Calculate statistical significance (p-value) using chi-squared test."""
|
||||
try:
|
||||
# Count detections at each level for each variant
|
||||
a_levels = {"CRITICAL": 0, "HIGH": 0, "MEDIUM": 0, "LOW": 0, "NONE": 0}
|
||||
b_levels = {"CRITICAL": 0, "HIGH": 0, "MEDIUM": 0, "LOW": 0, "NONE": 0}
|
||||
|
||||
for e in a_events:
|
||||
a_levels[e.detected_level] = a_levels.get(e.detected_level, 0) + 1
|
||||
for e in b_events:
|
||||
b_levels[e.detected_level] = b_levels.get(e.detected_level, 0) + 1
|
||||
|
||||
# Simple chi-squared test for level distribution difference
|
||||
# This is a simplified version - in production you'd use scipy.stats.chi2_contingency
|
||||
total_a = len(a_events)
|
||||
total_b = len(b_events)
|
||||
|
||||
if total_a == 0 or total_b == 0:
|
||||
return None
|
||||
|
||||
# Calculate expected frequencies
|
||||
chi_sq = 0
|
||||
for level in a_levels:
|
||||
expected_a = (a_levels[level] + b_levels[level]) * total_a / (total_a + total_b)
|
||||
expected_b = (a_levels[level] + b_levels[level]) * total_b / (total_a + total_b)
|
||||
|
||||
if expected_a > 0:
|
||||
chi_sq += (a_levels[level] - expected_a) ** 2 / expected_a
|
||||
if expected_b > 0:
|
||||
chi_sq += (b_levels[level] - expected_b) ** 2 / expected_b
|
||||
|
||||
# Return chi-squared value (not p-value, as we don't have scipy)
|
||||
# Higher values indicate more significant difference
|
||||
return chi_sq
|
||||
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def _save_metrics(self, metrics: ABTestMetrics):
|
||||
"""Save metrics to file."""
|
||||
try:
|
||||
with open(self.metrics_file, "w") as f:
|
||||
json.dump(asdict(metrics), f, indent=2)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def get_variant_distribution(self) -> Dict[str, int]:
|
||||
"""Get current distribution of events across variants."""
|
||||
dist = {"A": 0, "B": 0}
|
||||
for event in self.events:
|
||||
dist[event.variant] = dist.get(event.variant, 0) + 1
|
||||
return dist
|
||||
|
||||
def force_variant(self, variant: Variant):
|
||||
"""Force all subsequent detections to use a specific variant."""
|
||||
self.config.enabled = False
|
||||
self._forced_variant = variant
|
||||
|
||||
def reset(self):
|
||||
"""Reset the A/B test."""
|
||||
self.events = []
|
||||
self.config.enabled = True
|
||||
if hasattr(self, '_forced_variant'):
|
||||
delattr(self, '_forced_variant')
|
||||
|
||||
|
||||
# Convenience function for easy integration
|
||||
_default_manager = None
|
||||
|
||||
def get_ab_manager() -> ABTestManager:
|
||||
"""Get the default A/B test manager instance."""
|
||||
global _default_manager
|
||||
if _default_manager is None:
|
||||
_default_manager = ABTestManager()
|
||||
return _default_manager
|
||||
|
||||
def detect_with_ab(text: str, user_id: Optional[str] = None) -> dict:
|
||||
"""
|
||||
Detect crisis with A/B testing.
|
||||
|
||||
Returns dict with variant, detection result, and metrics.
|
||||
"""
|
||||
manager = get_ab_manager()
|
||||
variant, result, latency = manager.detect_with_variant(text, user_id)
|
||||
|
||||
return {
|
||||
"variant": variant.value,
|
||||
"detection": {
|
||||
"level": result.level,
|
||||
"score": result.score,
|
||||
"indicators": result.indicators,
|
||||
"recommended_action": result.recommended_action,
|
||||
},
|
||||
"latency_ms": round(latency, 2),
|
||||
"test_id": manager.test_id,
|
||||
}
|
||||
@@ -1,155 +0,0 @@
|
||||
"""
|
||||
Image Content Screening for Self-Harm Indicators
|
||||
|
||||
Screens images sent in chat for self-harm content using vision models.
|
||||
Privacy-preserving: analyzes in-memory, discards after scoring. No image retention.
|
||||
"""
|
||||
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
|
||||
logger = logging.getLogger("the-door.image_screening")
|
||||
|
||||
|
||||
class RiskLevel(str, Enum):
|
||||
SAFE = "safe"
|
||||
CONCERNING = "concerning"
|
||||
CRITICAL = "critical"
|
||||
|
||||
|
||||
@dataclass
|
||||
class ImageScreenResult:
|
||||
risk_level: RiskLevel
|
||||
confidence: float
|
||||
categories: list
|
||||
reasoning: str
|
||||
model_used: str
|
||||
|
||||
@property
|
||||
def needs_crisis_response(self):
|
||||
return self.risk_level == RiskLevel.CRITICAL
|
||||
|
||||
@property
|
||||
def needs_followup(self):
|
||||
return self.risk_level in (RiskLevel.CONCERNING, RiskLevel.CRITICAL)
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
"risk_level": self.risk_level.value,
|
||||
"confidence": round(self.confidence, 3),
|
||||
"categories": self.categories,
|
||||
"reasoning": self.reasoning,
|
||||
"model_used": self.model_used,
|
||||
}
|
||||
|
||||
|
||||
SCREENING_PROMPT = """Analyze this image for indicators of self-harm or crisis. Look for:
|
||||
|
||||
1. Self-harm wounds or injuries
|
||||
2. Medication/pills in concerning quantities
|
||||
3. Farewell-type imagery (empty rooms, items laid out, goodbye notes)
|
||||
4. Screenshots of crisis-related searches
|
||||
5. Images conveying hopelessness, isolation, or despair
|
||||
|
||||
Respond in JSON format ONLY:
|
||||
{"risk_level": "safe"|"concerning"|"critical", "confidence": 0.0-1.0, "categories": [], "reasoning": ""}
|
||||
|
||||
CRITICAL: imminent self-harm indicators
|
||||
CONCERNING: ambiguous but worrying
|
||||
SAFE: no indicators detected"""
|
||||
|
||||
|
||||
def _analyze_with_ollama(image_b64, model="gemma3:4b"):
|
||||
try:
|
||||
import urllib.request
|
||||
payload = json.dumps({
|
||||
"model": model,
|
||||
"messages": [{
|
||||
"role": "user",
|
||||
"content": SCREENING_PROMPT,
|
||||
"images": [image_b64],
|
||||
}],
|
||||
"stream": False,
|
||||
"options": {"temperature": 0.1},
|
||||
}).encode()
|
||||
req = urllib.request.Request(
|
||||
"http://localhost:11434/api/chat",
|
||||
data=payload,
|
||||
headers={"Content-Type": "application/json"},
|
||||
method="POST",
|
||||
)
|
||||
resp = urllib.request.urlopen(req, timeout=30)
|
||||
data = json.loads(resp.read())
|
||||
content = data.get("message", {}).get("content", "")
|
||||
json_start = content.find("{")
|
||||
json_end = content.rfind("}") + 1
|
||||
if json_start == -1 or json_end <= json_start:
|
||||
return None
|
||||
result = json.loads(content[json_start:json_end])
|
||||
return ImageScreenResult(
|
||||
risk_level=RiskLevel(result.get("risk_level", "safe")),
|
||||
confidence=float(result.get("confidence", 0.5)),
|
||||
categories=result.get("categories", []),
|
||||
reasoning=result.get("reasoning", ""),
|
||||
model_used=f"ollama:{model}",
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Ollama vision analysis failed: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def _analyze_fallback(image_bytes):
|
||||
return ImageScreenResult(
|
||||
risk_level=RiskLevel.SAFE,
|
||||
confidence=0.2,
|
||||
categories=["unanalyzed"],
|
||||
reasoning="No vision model available. Defaulting to safe with low confidence.",
|
||||
model_used="fallback:heuristic",
|
||||
)
|
||||
|
||||
|
||||
def screen_image(image_data, use_vision_model=True, model="gemma3:4b"):
|
||||
"""Screen image for self-harm indicators. Analyzes in-memory, no retention."""
|
||||
if isinstance(image_data, bytes):
|
||||
image_b64 = base64.b64encode(image_data).decode()
|
||||
else:
|
||||
image_b64 = image_data
|
||||
image_data = base64.b64decode(image_b64)
|
||||
|
||||
if use_vision_model:
|
||||
result = _analyze_with_ollama(image_b64, model)
|
||||
if result:
|
||||
logger.info(f"Image screened: {result.risk_level.value} (conf: {result.confidence:.2f})")
|
||||
if result.needs_crisis_response:
|
||||
logger.warning(f"CRITICAL image: {result.reasoning}")
|
||||
return result
|
||||
|
||||
return _analyze_fallback(image_data)
|
||||
|
||||
|
||||
def handle_chat_image(image_data):
|
||||
"""Handle image from chat. Returns action dict for gateway."""
|
||||
result = screen_image(image_data)
|
||||
action = {
|
||||
"result": result.to_dict(),
|
||||
"show_crisis_overlay": result.needs_crisis_response,
|
||||
"log_event": result.needs_followup,
|
||||
"response_text": None,
|
||||
}
|
||||
if result.risk_level == RiskLevel.CRITICAL:
|
||||
action["response_text"] = (
|
||||
"I noticed something concerning in the image you shared. "
|
||||
"If you or someone you know is in crisis, please reach out: "
|
||||
"988 Suicide and Crisis Lifeline (call or text 988). "
|
||||
"You are not alone."
|
||||
)
|
||||
elif result.risk_level == RiskLevel.CONCERNING:
|
||||
action["response_text"] = (
|
||||
"I want to check in \u2014 how are you doing? "
|
||||
"If you need to talk to someone, the 988 Lifeline is available 24/7."
|
||||
)
|
||||
return action
|
||||
453
tests/test_crisis_ab_testing.py
Normal file
453
tests/test_crisis_ab_testing.py
Normal file
@@ -0,0 +1,453 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Tests for Crisis Detection A/B Testing Framework.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
# Add crisis module to path
|
||||
import sys
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
|
||||
|
||||
from crisis.ab_testing import (
|
||||
ABTestManager,
|
||||
ABTestConfig,
|
||||
Variant,
|
||||
DetectionEvent,
|
||||
VariantMetrics,
|
||||
ABTestMetrics,
|
||||
get_ab_manager,
|
||||
detect_with_ab,
|
||||
)
|
||||
from crisis.detect import CrisisDetectionResult
|
||||
|
||||
|
||||
class TestABTestConfig:
|
||||
"""Test A/B test configuration."""
|
||||
|
||||
def test_default_config(self):
|
||||
config = ABTestConfig()
|
||||
assert config.enabled is True
|
||||
assert config.variant_b_percentage == 0.5
|
||||
assert config.log_file is None
|
||||
assert config.metrics_file is None
|
||||
|
||||
def test_custom_config(self):
|
||||
config = ABTestConfig(
|
||||
enabled=False,
|
||||
variant_b_percentage=0.3,
|
||||
log_file="/tmp/test.log",
|
||||
metrics_file="/tmp/metrics.json",
|
||||
seed=42
|
||||
)
|
||||
assert config.enabled is False
|
||||
assert config.variant_b_percentage == 0.3
|
||||
assert config.log_file == "/tmp/test.log"
|
||||
assert config.seed == 42
|
||||
|
||||
|
||||
class TestVariant:
|
||||
"""Test variant enum."""
|
||||
|
||||
def test_values(self):
|
||||
assert Variant.A.value == "A"
|
||||
assert Variant.B.value == "B"
|
||||
|
||||
def test_comparison(self):
|
||||
assert Variant.A != Variant.B
|
||||
assert Variant.A == Variant.A
|
||||
|
||||
|
||||
class TestDetectionEvent:
|
||||
"""Test detection event dataclass."""
|
||||
|
||||
def test_creation(self):
|
||||
event = DetectionEvent(
|
||||
event_id="test-123",
|
||||
timestamp="2026-01-01T00:00:00Z",
|
||||
variant="A",
|
||||
text_hash="abc123",
|
||||
detected_level="HIGH",
|
||||
detected_score=0.8,
|
||||
indicators=["despair"],
|
||||
detection_latency_ms=5.2
|
||||
)
|
||||
assert event.event_id == "test-123"
|
||||
assert event.variant == "A"
|
||||
assert event.detected_level == "HIGH"
|
||||
assert event.is_false_positive is None
|
||||
|
||||
def test_to_dict(self):
|
||||
event = DetectionEvent(
|
||||
event_id="test-123",
|
||||
timestamp="2026-01-01T00:00:00Z",
|
||||
variant="B",
|
||||
text_hash="def456",
|
||||
detected_level="CRITICAL",
|
||||
detected_score=0.95,
|
||||
indicators=["suicide"],
|
||||
detection_latency_ms=3.1
|
||||
)
|
||||
d = event.__dict__
|
||||
assert "event_id" in d
|
||||
assert "variant" in d
|
||||
|
||||
|
||||
class TestABTestManager:
|
||||
"""Test A/B test manager."""
|
||||
|
||||
def test_initialization(self, tmp_path):
|
||||
config = ABTestConfig(
|
||||
log_file=str(tmp_path / "test.log"),
|
||||
metrics_file=str(tmp_path / "metrics.json")
|
||||
)
|
||||
manager = ABTestManager(config)
|
||||
|
||||
assert manager.config.enabled is True
|
||||
assert manager.test_id is not None
|
||||
assert len(manager.events) == 0
|
||||
|
||||
def test_variant_assignment_deterministic(self):
|
||||
"""Same text should always get same variant."""
|
||||
manager = ABTestManager(ABTestConfig(seed=42))
|
||||
|
||||
text = "I feel hopeless about everything"
|
||||
variant1 = manager._assign_variant(text)
|
||||
variant2 = manager._assign_variant(text)
|
||||
|
||||
assert variant1 == variant2
|
||||
|
||||
def test_variant_distribution(self):
|
||||
"""Test that variant distribution is roughly 50/50."""
|
||||
manager = ABTestManager(ABTestConfig(seed=42))
|
||||
|
||||
variants = []
|
||||
for i in range(100):
|
||||
text = f"Test message number {i}"
|
||||
variant = manager._assign_variant(text)
|
||||
variants.append(variant)
|
||||
|
||||
a_count = sum(1 for v in variants if v == Variant.A)
|
||||
b_count = sum(1 for v in variants if v == Variant.B)
|
||||
|
||||
# Should be roughly 50/50 (within 20% tolerance)
|
||||
assert 30 <= a_count <= 70
|
||||
assert 30 <= b_count <= 70
|
||||
|
||||
def test_disabled_ab_testing(self, tmp_path):
|
||||
"""When disabled, should always use variant A."""
|
||||
config = ABTestConfig(
|
||||
enabled=False,
|
||||
log_file=str(tmp_path / "test.log")
|
||||
)
|
||||
manager = ABTestManager(config)
|
||||
|
||||
for i in range(10):
|
||||
text = f"Test message {i}"
|
||||
variant = manager._assign_variant(text)
|
||||
assert variant == Variant.A
|
||||
|
||||
@patch('crisis.ab_testing.detect_crisis')
|
||||
def test_detect_with_variant_a(self, mock_detect, tmp_path):
|
||||
"""Test detection with variant A (control)."""
|
||||
mock_detect.return_value = CrisisDetectionResult(
|
||||
level="HIGH",
|
||||
score=0.8,
|
||||
indicators=["despair"],
|
||||
matched_patterns=[],
|
||||
recommended_action="provide_resources"
|
||||
)
|
||||
|
||||
config = ABTestConfig(
|
||||
enabled=False, # Force variant A
|
||||
log_file=str(tmp_path / "test.log")
|
||||
)
|
||||
manager = ABTestManager(config)
|
||||
|
||||
variant, result, latency = manager.detect_with_variant("I'm feeling hopeless")
|
||||
|
||||
assert variant == Variant.A
|
||||
assert result.level == "HIGH"
|
||||
assert latency >= 0
|
||||
|
||||
@patch('crisis.ab_testing.detect_crisis')
|
||||
def test_detect_with_variant_b(self, mock_detect, tmp_path):
|
||||
"""Test detection with variant B (treatment)."""
|
||||
mock_detect.return_value = CrisisDetectionResult(
|
||||
level="MEDIUM",
|
||||
score=0.75,
|
||||
indicators=["no hope"],
|
||||
matched_patterns=[],
|
||||
recommended_action="provide_resources"
|
||||
)
|
||||
|
||||
config = ABTestConfig(
|
||||
variant_b_percentage=1.0, # Always variant B
|
||||
log_file=str(tmp_path / "test.log")
|
||||
)
|
||||
manager = ABTestManager(config)
|
||||
|
||||
# Use text that hashes to variant B
|
||||
for i in range(20):
|
||||
text = f"Test message {i}"
|
||||
variant, result, latency = manager.detect_with_variant(text)
|
||||
if variant == Variant.B:
|
||||
break
|
||||
|
||||
assert variant == Variant.B
|
||||
|
||||
def test_event_logging(self, tmp_path):
|
||||
"""Test that events are logged to file."""
|
||||
log_file = tmp_path / "test.jsonl"
|
||||
config = ABTestConfig(
|
||||
log_file=str(log_file),
|
||||
metrics_file=str(tmp_path / "metrics.json")
|
||||
)
|
||||
manager = ABTestManager(config)
|
||||
|
||||
# Mock detection
|
||||
with patch('crisis.ab_testing.detect_crisis') as mock_detect:
|
||||
mock_detect.return_value = CrisisDetectionResult(
|
||||
level="LOW",
|
||||
score=0.3,
|
||||
indicators=[],
|
||||
matched_patterns=[],
|
||||
recommended_action="none"
|
||||
)
|
||||
|
||||
manager.detect_with_variant("Test message")
|
||||
|
||||
# Check log file exists and has content
|
||||
assert log_file.exists()
|
||||
with open(log_file) as f:
|
||||
lines = f.readlines()
|
||||
assert len(lines) >= 1
|
||||
|
||||
# Parse log entry
|
||||
entry = json.loads(lines[0])
|
||||
assert "event_id" in entry
|
||||
assert "variant" in entry
|
||||
assert "detected_level" in entry
|
||||
|
||||
def test_label_event(self, tmp_path):
|
||||
"""Test labeling events as false positives."""
|
||||
config = ABTestConfig(
|
||||
log_file=str(tmp_path / "test.log"),
|
||||
metrics_file=str(tmp_path / "metrics.json")
|
||||
)
|
||||
manager = ABTestManager(config)
|
||||
|
||||
# Create a mock event
|
||||
event = DetectionEvent(
|
||||
event_id="test-123",
|
||||
timestamp="2026-01-01T00:00:00Z",
|
||||
variant="A",
|
||||
text_hash="abc123",
|
||||
detected_level="HIGH",
|
||||
detected_score=0.8,
|
||||
indicators=["despair"],
|
||||
detection_latency_ms=5.0
|
||||
)
|
||||
manager.events.append(event)
|
||||
|
||||
# Label it
|
||||
manager.label_event("test-123", is_false_positive=True, feedback="Not actually crisis")
|
||||
|
||||
# Check labeling
|
||||
assert event.is_false_positive is True
|
||||
assert event.user_feedback == "Not actually crisis"
|
||||
|
||||
def test_get_metrics_empty(self, tmp_path):
|
||||
"""Test metrics with no events."""
|
||||
config = ABTestConfig(
|
||||
metrics_file=str(tmp_path / "metrics.json")
|
||||
)
|
||||
manager = ABTestManager(config)
|
||||
|
||||
metrics = manager.get_metrics()
|
||||
assert metrics.sample_size_a == 0
|
||||
assert metrics.sample_size_b == 0
|
||||
assert metrics.variant_a.total_detections == 0
|
||||
|
||||
def test_get_metrics_with_events(self, tmp_path):
|
||||
"""Test metrics calculation with events."""
|
||||
config = ABTestConfig(
|
||||
log_file=str(tmp_path / "test.log"),
|
||||
metrics_file=str(tmp_path / "metrics.json")
|
||||
)
|
||||
manager = ABTestManager(config)
|
||||
|
||||
# Add some mock events
|
||||
for i in range(10):
|
||||
event = DetectionEvent(
|
||||
event_id=f"event-{i}",
|
||||
timestamp="2026-01-01T00:00:00Z",
|
||||
variant="A" if i % 2 == 0 else "B",
|
||||
text_hash=f"hash-{i}",
|
||||
detected_level="HIGH" if i % 3 == 0 else "MEDIUM",
|
||||
detected_score=0.7 + (i % 3) * 0.1,
|
||||
indicators=["despair"] if i % 2 == 0 else [],
|
||||
detection_latency_ms=3.0 + i * 0.5
|
||||
)
|
||||
# Label some as false positives
|
||||
if i % 4 == 0:
|
||||
event.is_false_positive = True
|
||||
elif i % 4 == 1:
|
||||
event.is_false_positive = False
|
||||
|
||||
manager.events.append(event)
|
||||
|
||||
metrics = manager.get_metrics()
|
||||
|
||||
# Check we have events in both variants
|
||||
assert metrics.sample_size_a > 0
|
||||
assert metrics.sample_size_b > 0
|
||||
|
||||
# Check latency calculations
|
||||
assert metrics.variant_a.avg_latency_ms > 0
|
||||
assert metrics.variant_b.avg_latency_ms > 0
|
||||
|
||||
# Check level distribution
|
||||
assert len(metrics.variant_a.level_distribution) > 0
|
||||
|
||||
def test_variant_distribution(self, tmp_path):
|
||||
"""Test getting variant distribution."""
|
||||
config = ABTestConfig()
|
||||
manager = ABTestManager(config)
|
||||
|
||||
# Add events
|
||||
for i in range(5):
|
||||
event = DetectionEvent(
|
||||
event_id=f"event-{i}",
|
||||
timestamp="2026-01-01T00:00:00Z",
|
||||
variant="A" if i < 3 else "B",
|
||||
text_hash=f"hash-{i}",
|
||||
detected_level="LOW",
|
||||
detected_score=0.5,
|
||||
indicators=[],
|
||||
detection_latency_ms=2.0
|
||||
)
|
||||
manager.events.append(event)
|
||||
|
||||
dist = manager.get_variant_distribution()
|
||||
assert dist["A"] == 3
|
||||
assert detect_with_ab
|
||||
assert dist["B"] == 2
|
||||
|
||||
def test_force_variant(self, tmp_path):
|
||||
"""Test forcing a specific variant."""
|
||||
config = ABTestConfig()
|
||||
manager = ABTestManager(config)
|
||||
|
||||
manager.force_variant(Variant.B)
|
||||
|
||||
# After forcing, all should be variant B
|
||||
for i in range(5):
|
||||
text = f"Test message {i}"
|
||||
variant = manager._assign_variant(text)
|
||||
assert variant == Variant.B
|
||||
|
||||
def test_reset(self, tmp_path):
|
||||
"""Test resetting the A/B test."""
|
||||
config = ABTestConfig(
|
||||
log_file=str(tmp_path / "test.log"),
|
||||
metrics_file=str(tmp_path / "metrics.json")
|
||||
)
|
||||
manager = ABTestManager(config)
|
||||
|
||||
# Add some events
|
||||
for i in range(3):
|
||||
event = DetectionEvent(
|
||||
event_id=f"event-{i}",
|
||||
timestamp="2026-01-01T00:00:00Z",
|
||||
variant="A",
|
||||
text_hash=f"hash-{i}",
|
||||
detected_level="LOW",
|
||||
detected_score=0.5,
|
||||
indicators=[],
|
||||
detection_latency_ms=2.0
|
||||
)
|
||||
manager.events.append(event)
|
||||
|
||||
assert len(manager.events) == 3
|
||||
|
||||
# Reset
|
||||
manager.reset()
|
||||
|
||||
assert len(manager.events) == 0
|
||||
assert manager.config.enabled is True
|
||||
|
||||
|
||||
class TestConvenienceFunctions:
|
||||
"""Test convenience functions."""
|
||||
|
||||
def test_get_ab_manager(self):
|
||||
"""Test getting default manager."""
|
||||
manager = get_ab_manager()
|
||||
assert isinstance(manager, ABTestManager)
|
||||
|
||||
@patch('crisis.ab_testing.detect_crisis')
|
||||
def test_detect_with_ab(self, mock_detect):
|
||||
"""Test convenience detection function."""
|
||||
mock_detect.return_value = CrisisDetectionResult(
|
||||
level="HIGH",
|
||||
score=0.8,
|
||||
indicators=["despair"],
|
||||
matched_patterns=[],
|
||||
recommended_action="provide_resources"
|
||||
)
|
||||
|
||||
result = detect_with_ab("I'm feeling hopeless")
|
||||
|
||||
assert "variant" in result
|
||||
assert "detection" in result
|
||||
assert "latency_ms" in result
|
||||
assert "test_id" in result
|
||||
assert result["detection"]["level"] == "HIGH"
|
||||
|
||||
|
||||
class TestMetricsCalculation:
|
||||
"""Test metrics calculation edge cases."""
|
||||
|
||||
def test_percentile_calculation(self, tmp_path):
|
||||
"""Test that percentiles are calculated correctly."""
|
||||
config = ABTestConfig()
|
||||
manager = ABTestManager(config)
|
||||
|
||||
# Create events with known latencies
|
||||
latencies = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]
|
||||
events = []
|
||||
for i, lat in enumerate(latencies):
|
||||
events.append(DetectionEvent(
|
||||
event_id=f"event-{i}",
|
||||
timestamp="2026-01-01T00:00:00Z",
|
||||
variant="A",
|
||||
text_hash=f"hash-{i}",
|
||||
detected_level="LOW",
|
||||
detected_score=0.5,
|
||||
indicators=[],
|
||||
detection_latency_ms=lat
|
||||
))
|
||||
|
||||
metrics = manager._calculate_variant_metrics(events)
|
||||
|
||||
assert metrics.p50_latency_ms == 5.0 # 50th percentile
|
||||
assert metrics.p95_latency_ms == 10.0 # 95th percentile (last element)
|
||||
assert metrics.avg_latency_ms == 5.5 # Average
|
||||
|
||||
def test_empty_variant_metrics(self, tmp_path):
|
||||
"""Test metrics with no events."""
|
||||
config = ABTestConfig()
|
||||
manager = ABTestManager(config)
|
||||
|
||||
metrics = manager._calculate_variant_metrics([])
|
||||
|
||||
assert metrics.total_detections == 0
|
||||
assert metrics.avg_latency_ms == 0
|
||||
assert metrics.level_distribution == {}
|
||||
@@ -1,84 +0,0 @@
|
||||
"""Tests for image content screening module."""
|
||||
|
||||
import json
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
from image_screening import (
|
||||
RiskLevel,
|
||||
ImageScreenResult,
|
||||
screen_image,
|
||||
handle_chat_image,
|
||||
_analyze_fallback,
|
||||
)
|
||||
|
||||
|
||||
class TestImageScreenResult:
|
||||
def test_safe_result(self):
|
||||
result = ImageScreenResult(
|
||||
risk_level=RiskLevel.SAFE, confidence=0.95,
|
||||
categories=[], reasoning="No indicators", model_used="test"
|
||||
)
|
||||
assert not result.needs_crisis_response
|
||||
assert not result.needs_followup
|
||||
assert result.to_dict()["risk_level"] == "safe"
|
||||
|
||||
def test_critical_result(self):
|
||||
result = ImageScreenResult(
|
||||
risk_level=RiskLevel.CRITICAL, confidence=0.9,
|
||||
categories=["wounds"], reasoning="Detected", model_used="test"
|
||||
)
|
||||
assert result.needs_crisis_response
|
||||
assert result.needs_followup
|
||||
|
||||
def test_concerning_result(self):
|
||||
result = ImageScreenResult(
|
||||
risk_level=RiskLevel.CONCERNING, confidence=0.6,
|
||||
categories=["isolation"], reasoning="Ambiguous", model_used="test"
|
||||
)
|
||||
assert not result.needs_crisis_response
|
||||
assert result.needs_followup
|
||||
|
||||
|
||||
class TestScreenImage:
|
||||
def test_fallback_returns_safe(self):
|
||||
result = screen_image(b"fake_image_data", use_vision_model=False)
|
||||
assert result.risk_level == RiskLevel.SAFE
|
||||
assert result.model_used == "fallback:heuristic"
|
||||
assert result.confidence < 0.5
|
||||
|
||||
def test_base64_input(self):
|
||||
import base64
|
||||
b64 = base64.b64encode(b"fake").decode()
|
||||
result = screen_image(b64, use_vision_model=False)
|
||||
assert result.risk_level == RiskLevel.SAFE
|
||||
|
||||
|
||||
class TestHandleChatImage:
|
||||
def test_safe_image_no_overlay(self):
|
||||
action = handle_chat_image(b"safe_image")
|
||||
assert not action["show_crisis_overlay"]
|
||||
assert action["response_text"] is None
|
||||
|
||||
@patch("image_screening._analyze_with_ollama")
|
||||
def test_critical_image_shows_overlay(self, mock_ollama):
|
||||
mock_ollama.return_value = ImageScreenResult(
|
||||
risk_level=RiskLevel.CRITICAL, confidence=0.95,
|
||||
categories=["wounds"], reasoning="Self-harm detected",
|
||||
model_used="ollama:gemma3:4b"
|
||||
)
|
||||
action = handle_chat_image(b"concerning_image")
|
||||
assert action["show_crisis_overlay"]
|
||||
assert "988" in action["response_text"]
|
||||
assert action["log_event"]
|
||||
|
||||
@patch("image_screening._analyze_with_ollama")
|
||||
def test_concerning_image_followup(self, mock_ollama):
|
||||
mock_ollama.return_value = ImageScreenResult(
|
||||
risk_level=RiskLevel.CONCERNING, confidence=0.6,
|
||||
categories=["isolation"], reasoning="Empty room",
|
||||
model_used="ollama:gemma3:4b"
|
||||
)
|
||||
action = handle_chat_image(b"maybe_concerning")
|
||||
assert not action["show_crisis_overlay"]
|
||||
assert action["log_event"]
|
||||
assert "check in" in action["response_text"]
|
||||
Reference in New Issue
Block a user