Compare commits
1 Commits
fix/59
...
burn/101-1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
722feae199 |
387
crisis/ab_test.py
Normal file
387
crisis/ab_test.py
Normal file
@@ -0,0 +1,387 @@
|
||||
"""
|
||||
Crisis Detection A/B Testing Framework for the-door.
|
||||
|
||||
Provides feature-flagged A/B testing for crisis detection algorithms.
|
||||
Variant A: Current canonical detector (crisis/detect.py)
|
||||
Variant B: Enhanced detector with contextual scoring (configurable)
|
||||
|
||||
Logs which variant triggered for each event and tracks metrics:
|
||||
- False positive rate per variant
|
||||
- Detection latency per variant
|
||||
- Detection distribution per variant
|
||||
"""
|
||||
|
||||
import json
|
||||
import time
|
||||
import hashlib
|
||||
import os
|
||||
from dataclasses import dataclass, field, asdict
|
||||
from typing import Dict, List, Optional, Callable, Any
|
||||
from datetime import datetime, timezone
|
||||
from enum import Enum
|
||||
|
||||
from crisis.detect import detect_crisis as detect_crisis_variant_a, CrisisDetectionResult
|
||||
|
||||
|
||||
class Variant(Enum):
|
||||
"""A/B test variants."""
|
||||
A = "A" # Control: current canonical detector
|
||||
B = "B" # Treatment: enhanced detector
|
||||
|
||||
|
||||
@dataclass
|
||||
class ABTestConfig:
|
||||
"""Configuration for A/B test."""
|
||||
enabled: bool = True
|
||||
variant_b_percentage: float = 0.5 # 50% traffic to variant B
|
||||
seed: Optional[str] = None # For deterministic assignment
|
||||
log_file: str = "crisis_ab_test.jsonl"
|
||||
metrics_file: str = "crisis_ab_metrics.json"
|
||||
|
||||
|
||||
@dataclass
|
||||
class DetectionEvent:
|
||||
"""Single detection event for logging."""
|
||||
event_id: str
|
||||
timestamp: str
|
||||
text_hash: str # Hash of input text (privacy-preserving)
|
||||
variant: str
|
||||
level: str
|
||||
score: float
|
||||
indicators: List[str]
|
||||
latency_ms: float
|
||||
metadata: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
|
||||
@dataclass
|
||||
class VariantMetrics:
|
||||
"""Metrics for a single variant."""
|
||||
variant: str
|
||||
total_events: int = 0
|
||||
detections_by_level: Dict[str, int] = field(default_factory=lambda: {
|
||||
"CRITICAL": 0, "HIGH": 0, "MEDIUM": 0, "LOW": 0, "NONE": 0
|
||||
})
|
||||
avg_latency_ms: float = 0.0
|
||||
total_latency_ms: float = 0.0
|
||||
false_positives: int = 0 # Requires human labeling
|
||||
true_positives: int = 0 # Requires human labeling
|
||||
false_positive_rate: Optional[float] = None # Calculated when labels available
|
||||
|
||||
|
||||
@dataclass
|
||||
class ABTestMetrics:
|
||||
"""Aggregate metrics for A/B test."""
|
||||
variant_a: VariantMetrics = field(default_factory=lambda: VariantMetrics("A"))
|
||||
variant_b: VariantMetrics = field(default_factory=lambda: VariantMetrics("B"))
|
||||
start_time: str = ""
|
||||
end_time: str = ""
|
||||
total_events: int = 0
|
||||
disagreements: int = 0 # Cases where variants disagree
|
||||
|
||||
|
||||
class CrisisABTester:
|
||||
"""
|
||||
A/B testing framework for crisis detection algorithms.
|
||||
|
||||
Usage:
|
||||
tester = CrisisABTester()
|
||||
result = tester.detect("I feel hopeless")
|
||||
# Returns CrisisDetectionResult from assigned variant
|
||||
# Logs event and updates metrics
|
||||
"""
|
||||
|
||||
def __init__(self, config: Optional[ABTestConfig] = None):
|
||||
self.config = config or ABTestConfig()
|
||||
self.metrics = ABTestMetrics(start_time=datetime.now(timezone.utc).isoformat())
|
||||
self._variant_b_detector: Optional[Callable] = None
|
||||
self._event_log: List[DetectionEvent] = []
|
||||
|
||||
# Load existing metrics if file exists
|
||||
if os.path.exists(self.config.metrics_file):
|
||||
self._load_metrics()
|
||||
|
||||
def set_variant_b_detector(self, detector: Callable[[str], CrisisDetectionResult]):
|
||||
"""Set the detector function for variant B."""
|
||||
self._variant_b_detector = detector
|
||||
|
||||
def _assign_variant(self, text: str) -> Variant:
|
||||
"""
|
||||
Assign variant based on text hash (deterministic) or random.
|
||||
Uses hash for consistent assignment of same/similar texts.
|
||||
"""
|
||||
if not self.config.enabled:
|
||||
return Variant.A
|
||||
|
||||
# Use text hash for deterministic assignment
|
||||
hash_input = text.strip().lower()
|
||||
if self.config.seed:
|
||||
hash_input = self.config.seed + hash_input
|
||||
|
||||
hash_val = int(hashlib.md5(hash_input.encode()).hexdigest(), 16)
|
||||
normalized = (hash_val % 1000) / 1000.0 # 0.0 to 0.999
|
||||
|
||||
return Variant.B if normalized < self.config.variant_b_percentage else Variant.A
|
||||
|
||||
def _get_variant_b_result(self, text: str) -> CrisisDetectionResult:
|
||||
"""
|
||||
Get detection result from variant B.
|
||||
Falls back to variant A if no variant B detector is set.
|
||||
"""
|
||||
if self._variant_b_detector:
|
||||
return self._variant_b_detector(text)
|
||||
|
||||
# Default variant B: enhanced contextual scoring
|
||||
# More sensitive to MEDIUM indicators (requires only 1 instead of 2)
|
||||
from crisis.detect import _find_indicators, ACTIONS, SCORES
|
||||
|
||||
text_lower = text.lower()
|
||||
matches = _find_indicators(text_lower)
|
||||
|
||||
if not matches:
|
||||
return CrisisDetectionResult(level="NONE", score=0.0)
|
||||
|
||||
# CRITICAL and HIGH: same as variant A
|
||||
for tier in ("CRITICAL", "HIGH"):
|
||||
if matches[tier]:
|
||||
tier_matches = matches[tier]
|
||||
patterns = [m["pattern"] for m in tier_matches]
|
||||
return CrisisDetectionResult(
|
||||
level=tier,
|
||||
indicators=patterns,
|
||||
recommended_action=ACTIONS[tier],
|
||||
score=SCORES[tier],
|
||||
matches=tier_matches,
|
||||
)
|
||||
|
||||
# MEDIUM: variant B requires only 1 indicator (vs 2 in variant A)
|
||||
if matches["MEDIUM"]:
|
||||
tier_matches = matches["MEDIUM"]
|
||||
patterns = [m["pattern"] for m in tier_matches]
|
||||
return CrisisDetectionResult(
|
||||
level="MEDIUM",
|
||||
indicators=patterns,
|
||||
recommended_action=ACTIONS["MEDIUM"],
|
||||
score=SCORES["MEDIUM"],
|
||||
matches=tier_matches,
|
||||
)
|
||||
|
||||
if matches["LOW"]:
|
||||
tier_matches = matches["LOW"]
|
||||
patterns = [m["pattern"] for m in tier_matches]
|
||||
return CrisisDetectionResult(
|
||||
level="LOW",
|
||||
indicators=patterns,
|
||||
recommended_action=ACTIONS["LOW"],
|
||||
score=SCORES["LOW"],
|
||||
matches=tier_matches,
|
||||
)
|
||||
|
||||
return CrisisDetectionResult(level="NONE", score=0.0)
|
||||
|
||||
def detect(self, text: str, metadata: Optional[Dict] = None) -> CrisisDetectionResult:
|
||||
"""
|
||||
Run A/B test detection.
|
||||
|
||||
Args:
|
||||
text: Input text to analyze
|
||||
metadata: Optional metadata to attach to event log
|
||||
|
||||
Returns:
|
||||
CrisisDetectionResult from assigned variant
|
||||
"""
|
||||
if not self.config.enabled:
|
||||
return detect_crisis_variant_a(text)
|
||||
|
||||
variant = self._assign_variant(text)
|
||||
start_time = time.perf_counter()
|
||||
|
||||
if variant == Variant.A:
|
||||
result = detect_crisis_variant_a(text)
|
||||
else:
|
||||
result = self._get_variant_b_result(text)
|
||||
|
||||
latency_ms = (time.perf_counter() - start_time) * 1000
|
||||
|
||||
# Log event
|
||||
event = DetectionEvent(
|
||||
event_id=f"{int(time.time() * 1000)}-{hash(text) % 10000:04d}",
|
||||
timestamp=datetime.now(timezone.utc).isoformat(),
|
||||
text_hash=hashlib.sha256(text.encode()).hexdigest()[:16],
|
||||
variant=variant.value,
|
||||
level=result.level,
|
||||
score=result.score,
|
||||
indicators=result.indicators[:5], # Limit for privacy
|
||||
latency_ms=round(latency_ms, 3),
|
||||
metadata=metadata or {}
|
||||
)
|
||||
self._event_log.append(event)
|
||||
self._log_event(event)
|
||||
|
||||
# Update metrics
|
||||
self._update_metrics(variant, result, latency_ms)
|
||||
|
||||
return result
|
||||
|
||||
def _log_event(self, event: DetectionEvent):
|
||||
"""Append event to JSONL log file."""
|
||||
try:
|
||||
with open(self.config.log_file, "a") as f:
|
||||
f.write(json.dumps(asdict(event)) + "\n")
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not log A/B test event: {e}")
|
||||
|
||||
def _update_metrics(self, variant: Variant, result: CrisisDetectionResult, latency_ms: float):
|
||||
"""Update running metrics."""
|
||||
vm = self.metrics.variant_a if variant == Variant.A else self.metrics.variant_b
|
||||
|
||||
vm.total_events += 1
|
||||
vm.detections_by_level[result.level] = vm.detections_by_level.get(result.level, 0) + 1
|
||||
vm.total_latency_ms += latency_ms
|
||||
vm.avg_latency_ms = vm.total_latency_ms / vm.total_events
|
||||
|
||||
self.metrics.total_events += 1
|
||||
|
||||
def compare_results(self, text: str) -> Dict[str, CrisisDetectionResult]:
|
||||
"""
|
||||
Run both variants and return both results (for analysis).
|
||||
Does not log to A/B test metrics.
|
||||
"""
|
||||
result_a = detect_crisis_variant_a(text)
|
||||
result_b = self._get_variant_b_result(text)
|
||||
return {"A": result_a, "B": result_b}
|
||||
|
||||
def get_disagreement_rate(self) -> float:
|
||||
"""
|
||||
Calculate disagreement rate from logged events.
|
||||
Requires running detect() for same texts with both variants.
|
||||
"""
|
||||
if not self._event_log:
|
||||
return 0.0
|
||||
|
||||
# Group by text_hash
|
||||
by_text: Dict[str, Dict[str, str]] = {}
|
||||
for event in self._event_log:
|
||||
if event.text_hash not in by_text:
|
||||
by_text[event.text_hash] = {}
|
||||
by_text[event.text_hash][event.variant] = event.level
|
||||
|
||||
disagreements = sum(
|
||||
1 for variants in by_text.values()
|
||||
if "A" in variants and "B" in variants and variants["A"] != variants["B"]
|
||||
)
|
||||
|
||||
return disagreements / len(by_text) if by_text else 0.0
|
||||
|
||||
def get_metrics(self) -> ABTestMetrics:
|
||||
"""Get current metrics snapshot."""
|
||||
self.metrics.end_time = datetime.now(timezone.utc).isoformat()
|
||||
self.metrics.disagreements = int(self.get_disagreement_rate() * self.metrics.total_events)
|
||||
return self.metrics
|
||||
|
||||
def save_metrics(self):
|
||||
"""Save metrics to JSON file."""
|
||||
try:
|
||||
with open(self.config.metrics_file, "w") as f:
|
||||
json.dump(asdict(self.get_metrics()), f, indent=2)
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not save A/B test metrics: {e}")
|
||||
|
||||
def _load_metrics(self):
|
||||
"""Load metrics from JSON file."""
|
||||
try:
|
||||
with open(self.config.metrics_file, "r") as f:
|
||||
data = json.load(f)
|
||||
# Reconstruct metrics from saved data
|
||||
if "variant_a" in data:
|
||||
self.metrics.variant_a = VariantMetrics(**data["variant_a"])
|
||||
if "variant_b" in data:
|
||||
self.metrics.variant_b = VariantMetrics(**data["variant_b"])
|
||||
self.metrics.total_events = data.get("total_events", 0)
|
||||
self.metrics.disagreements = data.get("disagreements", 0)
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not load A/B test metrics: {e}")
|
||||
|
||||
def label_event(self, event_id: str, is_true_positive: bool):
|
||||
"""
|
||||
Label an event as true/false positive (requires human review).
|
||||
Updates false positive rate metrics.
|
||||
"""
|
||||
for event in self._event_log:
|
||||
if event.event_id == event_id:
|
||||
vm = self.metrics.variant_a if event.variant == "A" else self.metrics.variant_b
|
||||
if is_true_positive:
|
||||
vm.true_positives += 1
|
||||
else:
|
||||
vm.false_positives += 1
|
||||
|
||||
# Recalculate false positive rate
|
||||
total_labelled = vm.true_positives + vm.false_positives
|
||||
if total_labelled > 0:
|
||||
vm.false_positive_rate = vm.false_positives / total_labelled
|
||||
|
||||
self.save_metrics()
|
||||
return
|
||||
|
||||
raise ValueError(f"Event {event_id} not found")
|
||||
|
||||
def get_report(self) -> str:
|
||||
"""Generate human-readable A/B test report."""
|
||||
m = self.get_metrics()
|
||||
|
||||
lines = [
|
||||
"=" * 60,
|
||||
"CRISIS DETECTION A/B TEST REPORT",
|
||||
"=" * 60,
|
||||
f"Period: {m.start_time} to {m.end_time}",
|
||||
f"Total Events: {m.total_events}",
|
||||
f"Disagreements: {m.disagreements}",
|
||||
"",
|
||||
"VARIANT A (Control - Current Detector):",
|
||||
f" Events: {m.variant_a.total_events}",
|
||||
f" Avg Latency: {m.variant_a.avg_latency_ms:.3f} ms",
|
||||
f" Detection Distribution:",
|
||||
]
|
||||
|
||||
for level, count in m.variant_a.detections_by_level.items():
|
||||
pct = (count / m.variant_a.total_events * 100) if m.variant_a.total_events else 0
|
||||
lines.append(f" {level}: {count} ({pct:.1f}%)")
|
||||
|
||||
if m.variant_a.false_positive_rate is not None:
|
||||
lines.append(f" False Positive Rate: {m.variant_a.false_positive_rate:.1%}")
|
||||
|
||||
lines.extend([
|
||||
"",
|
||||
"VARIANT B (Treatment - Enhanced Detector):",
|
||||
f" Events: {m.variant_b.total_events}",
|
||||
f" Avg Latency: {m.variant_b.avg_latency_ms:.3f} ms",
|
||||
f" Detection Distribution:",
|
||||
])
|
||||
|
||||
for level, count in m.variant_b.detections_by_level.items():
|
||||
pct = (count / m.variant_b.total_events * 100) if m.variant_b.total_events else 0
|
||||
lines.append(f" {level}: {count} ({pct:.1f}%)")
|
||||
|
||||
if m.variant_b.false_positive_rate is not None:
|
||||
lines.append(f" False Positive Rate: {m.variant_b.false_positive_rate:.1%}")
|
||||
|
||||
lines.append("=" * 60)
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
# ── Module-level convenience ──────────────────────────────────────
|
||||
|
||||
_default_tester: Optional[CrisisABTester] = None
|
||||
|
||||
|
||||
def get_ab_tester(config: Optional[ABTestConfig] = None) -> CrisisABTester:
|
||||
"""Get or create the default A/B tester instance."""
|
||||
global _default_tester
|
||||
if _default_tester is None:
|
||||
_default_tester = CrisisABTester(config)
|
||||
return _default_tester
|
||||
|
||||
|
||||
def detect_crisis_ab(text: str, metadata: Optional[Dict] = None) -> CrisisDetectionResult:
|
||||
"""Convenience function for A/B tested crisis detection."""
|
||||
return get_ab_tester().detect(text, metadata)
|
||||
@@ -680,7 +680,7 @@ html, body {
|
||||
|
||||
<!-- Footer -->
|
||||
<footer id="footer">
|
||||
<a href="/about.html" aria-label="About The Door">about</a>
|
||||
<a href="/about" aria-label="About The Door">about</a>
|
||||
<button id="safety-plan-btn" aria-label="Open My Safety Plan">my safety plan</button>
|
||||
<button id="clear-chat-btn" aria-label="Clear chat history">clear chat</button>
|
||||
</footer>
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
import pathlib
|
||||
import unittest
|
||||
|
||||
|
||||
ROOT = pathlib.Path(__file__).resolve().parents[1]
|
||||
INDEX_HTML = ROOT / 'index.html'
|
||||
ABOUT_HTML = ROOT / 'about.html'
|
||||
|
||||
|
||||
class TestAboutLink(unittest.TestCase):
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.html = INDEX_HTML.read_text(encoding='utf-8')
|
||||
|
||||
def test_about_page_exists(self):
|
||||
self.assertTrue(ABOUT_HTML.exists(), 'about.html should exist for static serving')
|
||||
|
||||
def test_footer_about_link_targets_static_about_html(self):
|
||||
self.assertIn('href="/about.html"', self.html)
|
||||
self.assertNotIn('href="/about"', self.html)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
357
tests/test_crisis_ab.py
Normal file
357
tests/test_crisis_ab.py
Normal file
@@ -0,0 +1,357 @@
|
||||
"""
|
||||
Tests for Crisis Detection A/B Testing Framework.
|
||||
"""
|
||||
|
||||
import unittest
|
||||
import os
|
||||
import json
|
||||
import tempfile
|
||||
import shutil
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
# Import from the crisis module
|
||||
import sys
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from crisis.ab_test import (
|
||||
CrisisABTester,
|
||||
ABTestConfig,
|
||||
Variant,
|
||||
DetectionEvent,
|
||||
VariantMetrics,
|
||||
ABTestMetrics,
|
||||
detect_crisis_ab,
|
||||
)
|
||||
from crisis.detect import CrisisDetectionResult
|
||||
|
||||
|
||||
class TestABTestConfig(unittest.TestCase):
|
||||
"""Test A/B test configuration."""
|
||||
|
||||
def test_default_config(self):
|
||||
config = ABTestConfig()
|
||||
self.assertTrue(config.enabled)
|
||||
self.assertEqual(config.variant_b_percentage, 0.5)
|
||||
self.assertIsNone(config.seed)
|
||||
self.assertEqual(config.log_file, "crisis_ab_test.jsonl")
|
||||
self.assertEqual(config.metrics_file, "crisis_ab_metrics.json")
|
||||
|
||||
def test_custom_config(self):
|
||||
config = ABTestConfig(
|
||||
enabled=False,
|
||||
variant_b_percentage=0.3,
|
||||
seed="test-seed",
|
||||
log_file="custom.jsonl",
|
||||
metrics_file="custom.json"
|
||||
)
|
||||
self.assertFalse(config.enabled)
|
||||
self.assertEqual(config.variant_b_percentage, 0.3)
|
||||
self.assertEqual(config.seed, "test-seed")
|
||||
|
||||
|
||||
class TestVariantAssignment(unittest.TestCase):
|
||||
"""Test variant assignment logic."""
|
||||
|
||||
def test_deterministic_assignment(self):
|
||||
"""Same text should always get same variant with same seed."""
|
||||
config = ABTestConfig(seed="test-seed")
|
||||
tester = CrisisABTester(config)
|
||||
|
||||
text = "I feel hopeless today"
|
||||
variant1 = tester._assign_variant(text)
|
||||
variant2 = tester._assign_variant(text)
|
||||
|
||||
self.assertEqual(variant1, variant2)
|
||||
|
||||
def test_assignment_distribution(self):
|
||||
"""With 50% split, roughly half should go to each variant."""
|
||||
config = ABTestConfig(seed="test-seed")
|
||||
tester = CrisisABTester(config)
|
||||
|
||||
variants_a = 0
|
||||
variants_b = 0
|
||||
test_texts = [f"test message {i}" for i in range(100)]
|
||||
|
||||
for text in test_texts:
|
||||
variant = tester._assign_variant(text)
|
||||
if variant == Variant.A:
|
||||
variants_a += 1
|
||||
else:
|
||||
variants_b += 1
|
||||
|
||||
# Should be roughly 50/50 (allow some variance)
|
||||
self.assertGreater(variants_a, 30)
|
||||
self.assertGreater(variants_b, 30)
|
||||
|
||||
def test_disabled_returns_a(self):
|
||||
"""When disabled, should always return variant A."""
|
||||
config = ABTestConfig(enabled=False)
|
||||
tester = CrisisABTester(config)
|
||||
|
||||
for i in range(10):
|
||||
variant = tester._assign_variant(f"test {i}")
|
||||
self.assertEqual(variant, Variant.A)
|
||||
|
||||
|
||||
class TestDetection(unittest.TestCase):
|
||||
"""Test A/B detection logic."""
|
||||
|
||||
def setUp(self):
|
||||
self.temp_dir = tempfile.mkdtemp()
|
||||
self.log_file = os.path.join(self.temp_dir, "test_ab.jsonl")
|
||||
self.metrics_file = os.path.join(self.temp_dir, "test_metrics.json")
|
||||
self.config = ABTestConfig(
|
||||
seed="test-seed",
|
||||
log_file=self.log_file,
|
||||
metrics_file=self.metrics_file
|
||||
)
|
||||
self.tester = CrisisABTester(self.config)
|
||||
|
||||
def tearDown(self):
|
||||
shutil.rmtree(self.temp_dir, ignore_errors=True)
|
||||
|
||||
def test_detect_returns_result(self):
|
||||
"""detect() should return CrisisDetectionResult."""
|
||||
result = self.tester.detect("I feel sad")
|
||||
self.assertIsInstance(result, CrisisDetectionResult)
|
||||
self.assertIn(result.level, ["NONE", "LOW", "MEDIUM", "HIGH", "CRITICAL"])
|
||||
|
||||
def test_detect_critical_text(self):
|
||||
"""Critical text should be detected regardless of variant."""
|
||||
result = self.tester.detect("I want to kill myself")
|
||||
self.assertEqual(result.level, "CRITICAL")
|
||||
|
||||
def test_detect_none_text(self):
|
||||
"""Non-crisis text should return NONE."""
|
||||
result = self.tester.detect("The weather is nice today")
|
||||
self.assertEqual(result.level, "NONE")
|
||||
|
||||
def test_disabled_uses_variant_a(self):
|
||||
"""When disabled, should use variant A only."""
|
||||
config = ABTestConfig(enabled=False, log_file=self.log_file, metrics_file=self.metrics_file)
|
||||
tester = CrisisABTester(config)
|
||||
|
||||
result = tester.detect("I feel hopeless")
|
||||
self.assertIsInstance(result, CrisisDetectionResult)
|
||||
|
||||
def test_logs_events(self):
|
||||
"""detect() should log events to JSONL file."""
|
||||
self.tester.detect("I feel sad")
|
||||
self.tester.detect("I feel happy")
|
||||
|
||||
self.assertTrue(os.path.exists(self.log_file))
|
||||
with open(self.log_file) as f:
|
||||
lines = f.readlines()
|
||||
self.assertEqual(len(lines), 2)
|
||||
|
||||
event = json.loads(lines[0])
|
||||
self.assertIn("event_id", event)
|
||||
self.assertIn("variant", event)
|
||||
self.assertIn("level", event)
|
||||
|
||||
def test_updates_metrics(self):
|
||||
"""detect() should update metrics."""
|
||||
self.tester.detect("I feel hopeless") # Should trigger detection
|
||||
self.tester.detect("Hello world") # Should not trigger
|
||||
|
||||
metrics = self.tester.get_metrics()
|
||||
self.assertEqual(metrics.total_events, 2)
|
||||
|
||||
def test_variant_b_more_sensitive(self):
|
||||
"""Variant B should be more sensitive to MEDIUM indicators."""
|
||||
# Create tester that always assigns variant B
|
||||
config = ABTestConfig(
|
||||
variant_b_percentage=1.0, # 100% to B
|
||||
seed="test-seed",
|
||||
log_file=self.log_file,
|
||||
metrics_file=self.metrics_file
|
||||
)
|
||||
tester_b = CrisisABTester(config)
|
||||
|
||||
# Single MEDIUM indicator - variant A would return LOW, variant B returns MEDIUM
|
||||
result_b = tester_b.detect("I feel worthless")
|
||||
|
||||
# Compare with variant A
|
||||
config_a = ABTestConfig(
|
||||
variant_b_percentage=0.0, # 0% to B (100% to A)
|
||||
seed="test-seed",
|
||||
log_file=self.log_file + ".a",
|
||||
metrics_file=self.metrics_file + ".a"
|
||||
)
|
||||
tester_a = CrisisABTester(config_a)
|
||||
result_a = tester_a.detect("I feel worthless")
|
||||
|
||||
# Variant B should be at least as sensitive
|
||||
level_order = {"NONE": 0, "LOW": 1, "MEDIUM": 2, "HIGH": 3, "CRITICAL": 4}
|
||||
self.assertGreaterEqual(
|
||||
level_order[result_b.level],
|
||||
level_order[result_a.level]
|
||||
)
|
||||
|
||||
|
||||
class TestCompareResults(unittest.TestCase):
|
||||
"""Test compare_results functionality."""
|
||||
|
||||
def test_compare_returns_both_variants(self):
|
||||
tester = CrisisABTester()
|
||||
results = tester.compare_results("I feel hopeless")
|
||||
|
||||
self.assertIn("A", results)
|
||||
self.assertIn("B", results)
|
||||
self.assertIsInstance(results["A"], CrisisDetectionResult)
|
||||
self.assertIsInstance(results["B"], CrisisDetectionResult)
|
||||
|
||||
def test_compare_does_not_log(self):
|
||||
"""compare_results should not log to A/B test metrics."""
|
||||
temp_dir = tempfile.mkdtemp()
|
||||
log_file = os.path.join(temp_dir, "test.jsonl")
|
||||
metrics_file = os.path.join(temp_dir, "metrics.json")
|
||||
|
||||
config = ABTestConfig(log_file=log_file, metrics_file=metrics_file)
|
||||
tester = CrisisABTester(config)
|
||||
|
||||
tester.compare_results("I feel sad")
|
||||
|
||||
# Log file should not exist (no events logged)
|
||||
self.assertFalse(os.path.exists(log_file))
|
||||
|
||||
shutil.rmtree(temp_dir)
|
||||
|
||||
|
||||
class TestMetrics(unittest.TestCase):
|
||||
"""Test metrics tracking."""
|
||||
|
||||
def setUp(self):
|
||||
self.temp_dir = tempfile.mkdtemp()
|
||||
self.log_file = os.path.join(self.temp_dir, "test.jsonl")
|
||||
self.metrics_file = os.path.join(self.temp_dir, "metrics.json")
|
||||
self.config = ABTestConfig(
|
||||
seed="test-seed",
|
||||
log_file=self.log_file,
|
||||
metrics_file=self.metrics_file
|
||||
)
|
||||
|
||||
def tearDown(self):
|
||||
shutil.rmtree(self.temp_dir, ignore_errors=True)
|
||||
|
||||
def test_metrics_track_events(self):
|
||||
tester = CrisisABTester(self.config)
|
||||
|
||||
for i in range(10):
|
||||
tester.detect(f"test message {i}")
|
||||
|
||||
metrics = tester.get_metrics()
|
||||
self.assertEqual(metrics.total_events, 10)
|
||||
|
||||
def test_metrics_track_levels(self):
|
||||
tester = CrisisABTester(self.config)
|
||||
|
||||
tester.detect("I want to kill myself") # CRITICAL
|
||||
tester.detect("I feel hopeless") # HIGH or MEDIUM
|
||||
tester.detect("Hello world") # NONE
|
||||
|
||||
metrics = tester.get_metrics()
|
||||
total_detections = sum(metrics.variant_a.detections_by_level.values())
|
||||
total_detections += sum(metrics.variant_b.detections_by_level.values())
|
||||
self.assertEqual(total_detections, 3)
|
||||
|
||||
def test_save_and_load_metrics(self):
|
||||
tester = CrisisABTester(self.config)
|
||||
|
||||
for i in range(5):
|
||||
tester.detect(f"test {i}")
|
||||
|
||||
tester.save_metrics()
|
||||
self.assertTrue(os.path.exists(self.metrics_file))
|
||||
|
||||
# Create new tester that loads saved metrics
|
||||
tester2 = CrisisABTester(self.config)
|
||||
self.assertEqual(tester2.metrics.total_events, 5)
|
||||
|
||||
|
||||
class TestEventLabeling(unittest.TestCase):
|
||||
"""Test event labeling for false positive tracking."""
|
||||
|
||||
def setUp(self):
|
||||
self.temp_dir = tempfile.mkdtemp()
|
||||
self.log_file = os.path.join(self.temp_dir, "test.jsonl")
|
||||
self.metrics_file = os.path.join(self.temp_dir, "metrics.json")
|
||||
self.config = ABTestConfig(
|
||||
seed="test-seed",
|
||||
log_file=self.log_file,
|
||||
metrics_file=self.metrics_file
|
||||
)
|
||||
self.tester = CrisisABTester(self.config)
|
||||
|
||||
def tearDown(self):
|
||||
shutil.rmtree(self.temp_dir, ignore_errors=True)
|
||||
|
||||
def test_label_event_updates_metrics(self):
|
||||
self.tester.detect("I feel hopeless")
|
||||
|
||||
event_id = self.tester._event_log[0].event_id
|
||||
self.tester.label_event(event_id, is_true_positive=False)
|
||||
|
||||
metrics = self.tester.get_metrics()
|
||||
# Find which variant was assigned
|
||||
variant = self.tester._event_log[0].variant
|
||||
vm = metrics.variant_a if variant == "A" else metrics.variant_b
|
||||
self.assertEqual(vm.false_positives, 1)
|
||||
self.assertEqual(vm.false_positive_rate, 1.0)
|
||||
|
||||
def test_label_nonexistent_event_raises(self):
|
||||
with self.assertRaises(ValueError):
|
||||
self.tester.label_event("nonexistent-id", is_true_positive=True)
|
||||
|
||||
|
||||
class TestReport(unittest.TestCase):
|
||||
"""Test report generation."""
|
||||
|
||||
def test_report_format(self):
|
||||
tester = CrisisABTester()
|
||||
|
||||
for i in range(5):
|
||||
tester.detect(f"test message {i}")
|
||||
|
||||
report = tester.get_report()
|
||||
|
||||
self.assertIn("CRISIS DETECTION A/B TEST REPORT", report)
|
||||
self.assertIn("VARIANT A", report)
|
||||
self.assertIn("VARIANT B", report)
|
||||
self.assertIn("Total Events: 5", report)
|
||||
|
||||
|
||||
class TestConvenienceFunction(unittest.TestCase):
|
||||
"""Test module-level convenience function."""
|
||||
|
||||
def test_detect_crisis_ab(self):
|
||||
result = detect_crisis_ab("I feel sad")
|
||||
self.assertIsInstance(result, CrisisDetectionResult)
|
||||
|
||||
def test_detect_crisis_ab_with_metadata(self):
|
||||
result = detect_crisis_ab("I feel sad", metadata={"source": "test"})
|
||||
self.assertIsInstance(result, CrisisDetectionResult)
|
||||
|
||||
|
||||
class TestCustomVariantBDetector(unittest.TestCase):
|
||||
"""Test custom variant B detector."""
|
||||
|
||||
def test_custom_detector(self):
|
||||
"""Should use custom detector when set."""
|
||||
def custom_detector(text: str) -> CrisisDetectionResult:
|
||||
return CrisisDetectionResult(
|
||||
level="HIGH",
|
||||
indicators=["custom"],
|
||||
score=0.9
|
||||
)
|
||||
|
||||
tester = CrisisABTester(ABTestConfig(variant_b_percentage=1.0))
|
||||
tester.set_variant_b_detector(custom_detector)
|
||||
|
||||
result = tester.detect("Hello world")
|
||||
self.assertEqual(result.level, "HIGH")
|
||||
self.assertEqual(result.indicators, ["custom"])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
Reference in New Issue
Block a user