Compare commits
2 Commits
fix/136
...
burn/101-1
| Author | SHA1 | Date | |
|---|---|---|---|
| 7a08fb764a | |||
| 4b022f0df4 |
@@ -1,5 +1,22 @@
|
||||
"""Crisis detection and metrics module."""
|
||||
"""
|
||||
Crisis detection and response system for the-door.
|
||||
|
||||
from .metrics import get_metrics_summary, get_metrics_report
|
||||
Stands between a broken man and a machine that would tell him to die.
|
||||
"""
|
||||
|
||||
__all__ = ["get_metrics_summary", "get_metrics_report"]
|
||||
from .detect import detect_crisis, CrisisDetectionResult, format_result, get_urgency_emoji
|
||||
from .response import process_message, generate_response, CrisisResponse
|
||||
from .gateway import check_crisis, get_system_prompt, format_gateway_response
|
||||
|
||||
__all__ = [
|
||||
"detect_crisis",
|
||||
"CrisisDetectionResult",
|
||||
"process_message",
|
||||
"generate_response",
|
||||
"CrisisResponse",
|
||||
"check_crisis",
|
||||
"get_system_prompt",
|
||||
"format_result",
|
||||
"format_gateway_response",
|
||||
"get_urgency_emoji",
|
||||
]
|
||||
|
||||
409
crisis/ab_testing.py
Normal file
409
crisis/ab_testing.py
Normal file
@@ -0,0 +1,409 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Crisis Detection A/B Testing Framework
|
||||
|
||||
Allows testing different crisis detection algorithms with:
|
||||
- Feature flags for algorithm A vs B
|
||||
- Logging of which variant triggered for each event
|
||||
- Metrics: false positive rate, detection latency per variant
|
||||
- Statistical significance testing
|
||||
|
||||
Usage:
|
||||
from crisis.ab_testing import ABTestManager, Variant
|
||||
|
||||
manager = ABTestManager()
|
||||
result = manager.detect_with_variant("I'm feeling hopeless")
|
||||
print(result.variant, result.detection_result)
|
||||
|
||||
# Get metrics
|
||||
metrics = manager.get_metrics()
|
||||
print(metrics.false_positive_rate_a, metrics.false_positive_rate_b)
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
import time
|
||||
from dataclasses import dataclass, field, asdict
|
||||
from datetime import datetime, timezone
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Dict, List, Optional, Tuple
|
||||
from uuid import uuid4
|
||||
|
||||
from .detect import detect_crisis, CrisisDetectionResult
|
||||
|
||||
|
||||
class Variant(Enum):
|
||||
"""A/B test variants for crisis detection."""
|
||||
A = "A" # Control: current algorithm
|
||||
B = "B" # Treatment: alternative algorithm
|
||||
|
||||
|
||||
@dataclass
|
||||
class ABTestConfig:
|
||||
"""Configuration for A/B testing."""
|
||||
enabled: bool = True
|
||||
variant_b_percentage: float = 0.5 # 50% split
|
||||
log_file: Optional[str] = None # Default: ~/.the-door/ab_test_logs.jsonl
|
||||
metrics_file: Optional[str] = None # Default: ~/.the-door/ab_metrics.json
|
||||
seed: Optional[int] = None # For reproducible assignments
|
||||
|
||||
|
||||
@dataclass
|
||||
class DetectionEvent:
|
||||
"""A single detection event with variant assignment."""
|
||||
event_id: str
|
||||
timestamp: str
|
||||
variant: str
|
||||
text_hash: str # Hash of input text for privacy
|
||||
detected_level: str
|
||||
detected_score: float
|
||||
indicators: List[str]
|
||||
detection_latency_ms: float
|
||||
is_false_positive: Optional[bool] = None # Null until labeled
|
||||
user_feedback: Optional[str] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class VariantMetrics:
|
||||
"""Metrics for a single variant."""
|
||||
total_detections: int = 0
|
||||
true_positives: int = 0
|
||||
false_positives: int = 0
|
||||
false_negatives: int = 0 # Requires manual labeling
|
||||
avg_latency_ms: float = 0.0
|
||||
p50_latency_ms: float = 0.0
|
||||
p95_latency_ms: float = 0.0
|
||||
p99_latency_ms: float = 0.0
|
||||
level_distribution: Dict[str, int] = field(default_factory=dict)
|
||||
indicator_frequency: Dict[str, int] = field(default_factory=dict)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ABTestMetrics:
|
||||
"""Complete A/B test metrics."""
|
||||
test_id: str
|
||||
start_time: str
|
||||
end_time: Optional[str] = None
|
||||
variant_a: VariantMetrics = field(default_factory=VariantMetrics)
|
||||
variant_b: VariantMetrics = field(default_factory=VariantMetrics)
|
||||
sample_size_a: int = 0
|
||||
sample_size_b: int = 0
|
||||
statistical_significance: Optional[float] = None # p-value if calculable
|
||||
|
||||
|
||||
class ABTestManager:
|
||||
"""Manages A/B testing for crisis detection algorithms."""
|
||||
|
||||
def __init__(self, config: Optional[ABTestConfig] = None):
|
||||
self.config = config or ABTestConfig()
|
||||
self.test_id = str(uuid4())[:8]
|
||||
self.events: List[DetectionEvent] = []
|
||||
|
||||
# Set up file paths
|
||||
home = Path.home() / ".the-door"
|
||||
home.mkdir(exist_ok=True)
|
||||
|
||||
self.log_file = Path(self.config.log_file or home / "ab_test_logs.jsonl")
|
||||
self.metrics_file = Path(self.config.metrics_file or home / "ab_metrics.json")
|
||||
|
||||
# Initialize RNG
|
||||
if self.config.seed is not None:
|
||||
random.seed(self.config.seed)
|
||||
|
||||
def _assign_variant(self, text: str) -> Variant:
|
||||
"""Assign a variant based on text hash for deterministic assignment."""
|
||||
if not self.config.enabled:
|
||||
return Variant.A
|
||||
|
||||
# Use hash of text for consistent assignment
|
||||
text_hash = hash(text) % 100
|
||||
threshold = int(self.config.variant_b_percentage * 100)
|
||||
|
||||
if text_hash < threshold:
|
||||
return Variant.B
|
||||
return Variant.A
|
||||
|
||||
def _detect_variant_a(self, text: str) -> CrisisDetectionResult:
|
||||
"""Variant A: Current algorithm (control)."""
|
||||
return detect_crisis(text)
|
||||
|
||||
def _detect_variant_b(self, text: str) -> CrisisDetectionResult:
|
||||
"""Variant B: Alternative detection algorithm.
|
||||
|
||||
This is a placeholder - in practice, you'd implement a different
|
||||
detection algorithm here. For now, we'll use the same algorithm
|
||||
but with different sensitivity settings.
|
||||
"""
|
||||
# Example: Variant B could use different thresholds or additional patterns
|
||||
result = detect_crisis(text)
|
||||
|
||||
# For demonstration: adjust sensitivity based on confidence score
|
||||
# In practice, this would be a completely different algorithm
|
||||
if result.score > 0.7 and result.level != "CRITICAL":
|
||||
# Variant B is more sensitive to high-confidence detections
|
||||
from .detect import CRITICAL_INDICATORS
|
||||
import re
|
||||
|
||||
for pattern in CRITICAL_INDICATORS:
|
||||
if re.search(pattern, text, re.IGNORECASE):
|
||||
# Upgrade to CRITICAL if we find critical indicators
|
||||
return CrisisDetectionResult(
|
||||
level="CRITICAL",
|
||||
score=result.score,
|
||||
indicators=result.indicators,
|
||||
matched_patterns=result.matched_patterns,
|
||||
recommended_action="immediate_intervention"
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
def detect_with_variant(self, text: str, user_id: Optional[str] = None) -> Tuple[Variant, CrisisDetectionResult, float]:
|
||||
"""
|
||||
Run crisis detection with A/B testing.
|
||||
|
||||
Returns:
|
||||
Tuple of (variant, detection_result, latency_ms)
|
||||
"""
|
||||
if not self.config.enabled:
|
||||
start = time.time()
|
||||
result = self._detect_variant_a(text)
|
||||
latency = (time.time() - start) * 1000
|
||||
return Variant.A, result, latency
|
||||
|
||||
# Assign variant
|
||||
variant = self._assign_variant(text)
|
||||
|
||||
# Run detection with timing
|
||||
start = time.time()
|
||||
if variant == Variant.A:
|
||||
result = self._detect_variant_a(text)
|
||||
else:
|
||||
result = self._detect_variant_b(text)
|
||||
latency_ms = (time.time() - start) * 1000
|
||||
|
||||
# Log event
|
||||
self._log_event(variant, text, result, latency_ms, user_id)
|
||||
|
||||
return variant, result, latency_ms
|
||||
|
||||
def _log_event(self, variant: Variant, text: str, result: CrisisDetectionResult,
|
||||
latency_ms: float, user_id: Optional[str] = None):
|
||||
"""Log a detection event."""
|
||||
import hashlib
|
||||
|
||||
# Hash text for privacy (don't log actual crisis text)
|
||||
text_hash = hashlib.sha256(text.encode()).hexdigest()[:16]
|
||||
|
||||
event = DetectionEvent(
|
||||
event_id=str(uuid4())[:8],
|
||||
timestamp=datetime.now(timezone.utc).isoformat(),
|
||||
variant=variant.value,
|
||||
text_hash=text_hash,
|
||||
detected_level=result.level,
|
||||
detected_score=result.score,
|
||||
indicators=result.indicators[:5], # Limit for storage
|
||||
detection_latency_ms=round(latency_ms, 2),
|
||||
)
|
||||
|
||||
self.events.append(event)
|
||||
|
||||
# Append to log file
|
||||
try:
|
||||
with open(self.log_file, "a") as f:
|
||||
f.write(json.dumps(asdict(event)) + "\n")
|
||||
except Exception:
|
||||
pass # Don't fail on logging errors
|
||||
|
||||
def label_event(self, event_id: str, is_false_positive: bool, feedback: Optional[str] = None):
|
||||
"""Label an event as true/false positive for metrics calculation."""
|
||||
for event in self.events:
|
||||
if event.event_id == event_id:
|
||||
event.is_false_positive = is_false_positive
|
||||
event.user_feedback = feedback
|
||||
break
|
||||
|
||||
# Update log file
|
||||
self._save_events()
|
||||
|
||||
def _save_events(self):
|
||||
"""Save all events to log file."""
|
||||
try:
|
||||
with open(self.log_file, "w") as f:
|
||||
for event in self.events:
|
||||
f.write(json.dumps(asdict(event)) + "\n")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def get_metrics(self) -> ABTestMetrics:
|
||||
"""Calculate metrics for both variants."""
|
||||
metrics = ABTestMetrics(
|
||||
test_id=self.test_id,
|
||||
start_time=self.events[0].timestamp if self.events else datetime.now(timezone.utc).isoformat(),
|
||||
end_time=datetime.now(timezone.utc).isoformat(),
|
||||
)
|
||||
|
||||
# Separate events by variant
|
||||
a_events = [e for e in self.events if e.variant == "A"]
|
||||
b_events = [e for e in self.events if e.variant == "B"]
|
||||
|
||||
metrics.sample_size_a = len(a_events)
|
||||
metrics.sample_size_b = len(b_events)
|
||||
|
||||
# Calculate variant A metrics
|
||||
if a_events:
|
||||
metrics.variant_a = self._calculate_variant_metrics(a_events)
|
||||
|
||||
# Calculate variant B metrics
|
||||
if b_events:
|
||||
metrics.variant_b = self._calculate_variant_metrics(b_events)
|
||||
|
||||
# Calculate statistical significance if we have enough data
|
||||
if len(a_events) >= 30 and len(b_events) >= 30:
|
||||
metrics.statistical_significance = self._calculate_significance(a_events, b_events)
|
||||
|
||||
# Save metrics to file
|
||||
self._save_metrics(metrics)
|
||||
|
||||
return metrics
|
||||
|
||||
def _calculate_variant_metrics(self, events: List[DetectionEvent]) -> VariantMetrics:
|
||||
"""Calculate metrics for a single variant."""
|
||||
if not events:
|
||||
return VariantMetrics()
|
||||
|
||||
# Latency statistics
|
||||
latencies = [e.detection_latency_ms for e in events]
|
||||
latencies.sort()
|
||||
|
||||
n = len(latencies)
|
||||
p50_idx = int(n * 0.5)
|
||||
p95_idx = int(n * 0.95)
|
||||
p99_idx = int(n * 0.99)
|
||||
|
||||
# Level distribution
|
||||
level_dist = {}
|
||||
for e in events:
|
||||
level_dist[e.detected_level] = level_dist.get(e.detected_level, 0) + 1
|
||||
|
||||
# Indicator frequency
|
||||
indicator_freq = {}
|
||||
for e in events:
|
||||
for ind in e.indicators:
|
||||
indicator_freq[ind] = indicator_freq.get(ind, 0) + 1
|
||||
|
||||
# False positive rate (only for labeled events)
|
||||
labeled = [e for e in events if e.is_false_positive is not None]
|
||||
fp_count = sum(1 for e in labeled if e.is_false_positive)
|
||||
tp_count = sum(1 for e in labeled if not e.is_false_positive)
|
||||
|
||||
return VariantMetrics(
|
||||
total_detections=len(events),
|
||||
true_positives=tp_count,
|
||||
false_positives=fp_count,
|
||||
avg_latency_ms=sum(latencies) / n,
|
||||
p50_latency_ms=latencies[p50_idx] if n > 0 else 0,
|
||||
p95_latency_ms=latencies[p95_idx] if n > 0 else 0,
|
||||
p99_latency_ms=latencies[p99_idx] if n > 0 else 0,
|
||||
level_distribution=level_dist,
|
||||
indicator_frequency=dict(sorted(indicator_freq.items(), key=lambda x: -x[1])[:10]),
|
||||
)
|
||||
|
||||
def _calculate_significance(self, a_events: List[DetectionEvent],
|
||||
b_events: List[DetectionEvent]) -> Optional[float]:
|
||||
"""Calculate statistical significance (p-value) using chi-squared test."""
|
||||
try:
|
||||
# Count detections at each level for each variant
|
||||
a_levels = {"CRITICAL": 0, "HIGH": 0, "MEDIUM": 0, "LOW": 0, "NONE": 0}
|
||||
b_levels = {"CRITICAL": 0, "HIGH": 0, "MEDIUM": 0, "LOW": 0, "NONE": 0}
|
||||
|
||||
for e in a_events:
|
||||
a_levels[e.detected_level] = a_levels.get(e.detected_level, 0) + 1
|
||||
for e in b_events:
|
||||
b_levels[e.detected_level] = b_levels.get(e.detected_level, 0) + 1
|
||||
|
||||
# Simple chi-squared test for level distribution difference
|
||||
# This is a simplified version - in production you'd use scipy.stats.chi2_contingency
|
||||
total_a = len(a_events)
|
||||
total_b = len(b_events)
|
||||
|
||||
if total_a == 0 or total_b == 0:
|
||||
return None
|
||||
|
||||
# Calculate expected frequencies
|
||||
chi_sq = 0
|
||||
for level in a_levels:
|
||||
expected_a = (a_levels[level] + b_levels[level]) * total_a / (total_a + total_b)
|
||||
expected_b = (a_levels[level] + b_levels[level]) * total_b / (total_a + total_b)
|
||||
|
||||
if expected_a > 0:
|
||||
chi_sq += (a_levels[level] - expected_a) ** 2 / expected_a
|
||||
if expected_b > 0:
|
||||
chi_sq += (b_levels[level] - expected_b) ** 2 / expected_b
|
||||
|
||||
# Return chi-squared value (not p-value, as we don't have scipy)
|
||||
# Higher values indicate more significant difference
|
||||
return chi_sq
|
||||
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def _save_metrics(self, metrics: ABTestMetrics):
|
||||
"""Save metrics to file."""
|
||||
try:
|
||||
with open(self.metrics_file, "w") as f:
|
||||
json.dump(asdict(metrics), f, indent=2)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def get_variant_distribution(self) -> Dict[str, int]:
|
||||
"""Get current distribution of events across variants."""
|
||||
dist = {"A": 0, "B": 0}
|
||||
for event in self.events:
|
||||
dist[event.variant] = dist.get(event.variant, 0) + 1
|
||||
return dist
|
||||
|
||||
def force_variant(self, variant: Variant):
|
||||
"""Force all subsequent detections to use a specific variant."""
|
||||
self.config.enabled = False
|
||||
self._forced_variant = variant
|
||||
|
||||
def reset(self):
|
||||
"""Reset the A/B test."""
|
||||
self.events = []
|
||||
self.config.enabled = True
|
||||
if hasattr(self, '_forced_variant'):
|
||||
delattr(self, '_forced_variant')
|
||||
|
||||
|
||||
# Convenience function for easy integration
|
||||
_default_manager = None
|
||||
|
||||
def get_ab_manager() -> ABTestManager:
|
||||
"""Get the default A/B test manager instance."""
|
||||
global _default_manager
|
||||
if _default_manager is None:
|
||||
_default_manager = ABTestManager()
|
||||
return _default_manager
|
||||
|
||||
def detect_with_ab(text: str, user_id: Optional[str] = None) -> dict:
|
||||
"""
|
||||
Detect crisis with A/B testing.
|
||||
|
||||
Returns dict with variant, detection result, and metrics.
|
||||
"""
|
||||
manager = get_ab_manager()
|
||||
variant, result, latency = manager.detect_with_variant(text, user_id)
|
||||
|
||||
return {
|
||||
"variant": variant.value,
|
||||
"detection": {
|
||||
"level": result.level,
|
||||
"score": result.score,
|
||||
"indicators": result.indicators,
|
||||
"recommended_action": result.recommended_action,
|
||||
},
|
||||
"latency_ms": round(latency, 2),
|
||||
"test_id": manager.test_id,
|
||||
}
|
||||
@@ -1,161 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Crisis Metrics CLI — View crisis detection health metrics.
|
||||
|
||||
Usage:
|
||||
python3 -m crisis.metrics --summary # weekly report
|
||||
python3 -m crisis.metrics --json # raw JSON export
|
||||
python3 -m crisis.metrics --today # today only
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import sys
|
||||
import time
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
|
||||
# Metrics file location
|
||||
METRICS_FILE = Path.home() / ".the-door" / "crisis_metrics.json"
|
||||
|
||||
|
||||
def load_metrics():
|
||||
"""Load metrics from file."""
|
||||
if not METRICS_FILE.exists():
|
||||
return {"detections": [], "stats": {}}
|
||||
|
||||
try:
|
||||
with open(METRICS_FILE) as f:
|
||||
return json.load(f)
|
||||
except (json.JSONDecodeError, IOError):
|
||||
return {"detections": [], "stats": {}}
|
||||
|
||||
|
||||
def get_metrics_summary(days=7):
|
||||
"""Get metrics summary for the last N days."""
|
||||
data = load_metrics()
|
||||
detections = data.get("detections", [])
|
||||
|
||||
cutoff = time.time() - (days * 86400)
|
||||
recent = [d for d in detections if d.get("timestamp", 0) > cutoff]
|
||||
|
||||
if not recent:
|
||||
return {
|
||||
"period_days": days,
|
||||
"total_detections": 0,
|
||||
"by_severity": {},
|
||||
"by_source": {},
|
||||
"avg_response_time": 0,
|
||||
}
|
||||
|
||||
by_severity = {}
|
||||
by_source = {}
|
||||
total_response_time = 0
|
||||
response_count = 0
|
||||
|
||||
for d in recent:
|
||||
severity = d.get("severity", "unknown")
|
||||
source = d.get("source", "unknown")
|
||||
|
||||
by_severity[severity] = by_severity.get(severity, 0) + 1
|
||||
by_source[source] = by_source.get(source, 0) + 1
|
||||
|
||||
if "response_time_ms" in d:
|
||||
total_response_time += d["response_time_ms"]
|
||||
response_count += 1
|
||||
|
||||
return {
|
||||
"period_days": days,
|
||||
"total_detections": len(recent),
|
||||
"by_severity": by_severity,
|
||||
"by_source": by_source,
|
||||
"avg_response_time_ms": total_response_time / response_count if response_count else 0,
|
||||
"first_detection": recent[0].get("timestamp"),
|
||||
"last_detection": recent[-1].get("timestamp"),
|
||||
}
|
||||
|
||||
|
||||
def get_metrics_report(days=7):
|
||||
"""Generate a human-readable metrics report."""
|
||||
summary = get_metrics_summary(days)
|
||||
|
||||
lines = []
|
||||
lines.append("=" * 50)
|
||||
lines.append("CRISIS DETECTION METRICS")
|
||||
lines.append(f"Period: Last {days} days")
|
||||
lines.append("=" * 50)
|
||||
lines.append("")
|
||||
|
||||
total = summary["total_detections"]
|
||||
lines.append(f"Total detections: {total}")
|
||||
lines.append("")
|
||||
|
||||
if total > 0:
|
||||
lines.append("By severity:")
|
||||
for sev, count in sorted(summary["by_severity"].items()):
|
||||
pct = (count / total) * 100
|
||||
bar = "█" * int(pct / 5)
|
||||
lines.append(f" {sev:12} {count:4} ({pct:5.1f}%) {bar}")
|
||||
lines.append("")
|
||||
|
||||
lines.append("By source:")
|
||||
for src, count in sorted(summary["by_source"].items()):
|
||||
lines.append(f" {src:20} {count:4}")
|
||||
lines.append("")
|
||||
|
||||
avg_ms = summary.get("avg_response_time_ms", 0)
|
||||
lines.append(f"Avg response time: {avg_ms:.0f}ms")
|
||||
|
||||
first = summary.get("first_detection")
|
||||
last = summary.get("last_detection")
|
||||
if first and last:
|
||||
first_dt = datetime.fromtimestamp(first)
|
||||
last_dt = datetime.fromtimestamp(last)
|
||||
lines.append(f"First detection: {first_dt.strftime('%Y-%m-%d %H:%M')}")
|
||||
lines.append(f"Last detection: {last_dt.strftime('%Y-%m-%d %H:%M')}")
|
||||
else:
|
||||
lines.append("No crisis detections in this period.")
|
||||
|
||||
lines.append("")
|
||||
lines.append("=" * 50)
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Crisis Detection Metrics CLI",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
%(prog)s --summary Weekly summary report
|
||||
%(prog)s --today Today only
|
||||
%(prog)s --json Raw JSON export
|
||||
%(prog)s --days 30 Last 30 days
|
||||
""",
|
||||
)
|
||||
|
||||
parser.add_argument("--summary", action="store_true", help="Show summary report")
|
||||
parser.add_argument("--json", action="store_true", dest="json_output", help="Output as JSON")
|
||||
parser.add_argument("--today", action="store_true", help="Today only (1 day)")
|
||||
parser.add_argument("--days", type=int, default=7, help="Number of days (default: 7)")
|
||||
parser.add_argument("--metrics-file", type=str, help="Custom metrics file path")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.metrics_file:
|
||||
global METRICS_FILE
|
||||
METRICS_FILE = Path(args.metrics_file)
|
||||
|
||||
days = 1 if args.today else args.days
|
||||
|
||||
if args.json_output:
|
||||
summary = get_metrics_summary(days)
|
||||
print(json.dumps(summary, indent=2, default=str))
|
||||
else:
|
||||
report = get_metrics_report(days)
|
||||
print(report)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
453
tests/test_crisis_ab_testing.py
Normal file
453
tests/test_crisis_ab_testing.py
Normal file
@@ -0,0 +1,453 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Tests for Crisis Detection A/B Testing Framework.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
# Add crisis module to path
|
||||
import sys
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
|
||||
|
||||
from crisis.ab_testing import (
|
||||
ABTestManager,
|
||||
ABTestConfig,
|
||||
Variant,
|
||||
DetectionEvent,
|
||||
VariantMetrics,
|
||||
ABTestMetrics,
|
||||
get_ab_manager,
|
||||
detect_with_ab,
|
||||
)
|
||||
from crisis.detect import CrisisDetectionResult
|
||||
|
||||
|
||||
class TestABTestConfig:
|
||||
"""Test A/B test configuration."""
|
||||
|
||||
def test_default_config(self):
|
||||
config = ABTestConfig()
|
||||
assert config.enabled is True
|
||||
assert config.variant_b_percentage == 0.5
|
||||
assert config.log_file is None
|
||||
assert config.metrics_file is None
|
||||
|
||||
def test_custom_config(self):
|
||||
config = ABTestConfig(
|
||||
enabled=False,
|
||||
variant_b_percentage=0.3,
|
||||
log_file="/tmp/test.log",
|
||||
metrics_file="/tmp/metrics.json",
|
||||
seed=42
|
||||
)
|
||||
assert config.enabled is False
|
||||
assert config.variant_b_percentage == 0.3
|
||||
assert config.log_file == "/tmp/test.log"
|
||||
assert config.seed == 42
|
||||
|
||||
|
||||
class TestVariant:
|
||||
"""Test variant enum."""
|
||||
|
||||
def test_values(self):
|
||||
assert Variant.A.value == "A"
|
||||
assert Variant.B.value == "B"
|
||||
|
||||
def test_comparison(self):
|
||||
assert Variant.A != Variant.B
|
||||
assert Variant.A == Variant.A
|
||||
|
||||
|
||||
class TestDetectionEvent:
|
||||
"""Test detection event dataclass."""
|
||||
|
||||
def test_creation(self):
|
||||
event = DetectionEvent(
|
||||
event_id="test-123",
|
||||
timestamp="2026-01-01T00:00:00Z",
|
||||
variant="A",
|
||||
text_hash="abc123",
|
||||
detected_level="HIGH",
|
||||
detected_score=0.8,
|
||||
indicators=["despair"],
|
||||
detection_latency_ms=5.2
|
||||
)
|
||||
assert event.event_id == "test-123"
|
||||
assert event.variant == "A"
|
||||
assert event.detected_level == "HIGH"
|
||||
assert event.is_false_positive is None
|
||||
|
||||
def test_to_dict(self):
|
||||
event = DetectionEvent(
|
||||
event_id="test-123",
|
||||
timestamp="2026-01-01T00:00:00Z",
|
||||
variant="B",
|
||||
text_hash="def456",
|
||||
detected_level="CRITICAL",
|
||||
detected_score=0.95,
|
||||
indicators=["suicide"],
|
||||
detection_latency_ms=3.1
|
||||
)
|
||||
d = event.__dict__
|
||||
assert "event_id" in d
|
||||
assert "variant" in d
|
||||
|
||||
|
||||
class TestABTestManager:
|
||||
"""Test A/B test manager."""
|
||||
|
||||
def test_initialization(self, tmp_path):
|
||||
config = ABTestConfig(
|
||||
log_file=str(tmp_path / "test.log"),
|
||||
metrics_file=str(tmp_path / "metrics.json")
|
||||
)
|
||||
manager = ABTestManager(config)
|
||||
|
||||
assert manager.config.enabled is True
|
||||
assert manager.test_id is not None
|
||||
assert len(manager.events) == 0
|
||||
|
||||
def test_variant_assignment_deterministic(self):
|
||||
"""Same text should always get same variant."""
|
||||
manager = ABTestManager(ABTestConfig(seed=42))
|
||||
|
||||
text = "I feel hopeless about everything"
|
||||
variant1 = manager._assign_variant(text)
|
||||
variant2 = manager._assign_variant(text)
|
||||
|
||||
assert variant1 == variant2
|
||||
|
||||
def test_variant_distribution(self):
|
||||
"""Test that variant distribution is roughly 50/50."""
|
||||
manager = ABTestManager(ABTestConfig(seed=42))
|
||||
|
||||
variants = []
|
||||
for i in range(100):
|
||||
text = f"Test message number {i}"
|
||||
variant = manager._assign_variant(text)
|
||||
variants.append(variant)
|
||||
|
||||
a_count = sum(1 for v in variants if v == Variant.A)
|
||||
b_count = sum(1 for v in variants if v == Variant.B)
|
||||
|
||||
# Should be roughly 50/50 (within 20% tolerance)
|
||||
assert 30 <= a_count <= 70
|
||||
assert 30 <= b_count <= 70
|
||||
|
||||
def test_disabled_ab_testing(self, tmp_path):
|
||||
"""When disabled, should always use variant A."""
|
||||
config = ABTestConfig(
|
||||
enabled=False,
|
||||
log_file=str(tmp_path / "test.log")
|
||||
)
|
||||
manager = ABTestManager(config)
|
||||
|
||||
for i in range(10):
|
||||
text = f"Test message {i}"
|
||||
variant = manager._assign_variant(text)
|
||||
assert variant == Variant.A
|
||||
|
||||
@patch('crisis.ab_testing.detect_crisis')
|
||||
def test_detect_with_variant_a(self, mock_detect, tmp_path):
|
||||
"""Test detection with variant A (control)."""
|
||||
mock_detect.return_value = CrisisDetectionResult(
|
||||
level="HIGH",
|
||||
score=0.8,
|
||||
indicators=["despair"],
|
||||
matched_patterns=[],
|
||||
recommended_action="provide_resources"
|
||||
)
|
||||
|
||||
config = ABTestConfig(
|
||||
enabled=False, # Force variant A
|
||||
log_file=str(tmp_path / "test.log")
|
||||
)
|
||||
manager = ABTestManager(config)
|
||||
|
||||
variant, result, latency = manager.detect_with_variant("I'm feeling hopeless")
|
||||
|
||||
assert variant == Variant.A
|
||||
assert result.level == "HIGH"
|
||||
assert latency >= 0
|
||||
|
||||
@patch('crisis.ab_testing.detect_crisis')
|
||||
def test_detect_with_variant_b(self, mock_detect, tmp_path):
|
||||
"""Test detection with variant B (treatment)."""
|
||||
mock_detect.return_value = CrisisDetectionResult(
|
||||
level="MEDIUM",
|
||||
score=0.75,
|
||||
indicators=["no hope"],
|
||||
matched_patterns=[],
|
||||
recommended_action="provide_resources"
|
||||
)
|
||||
|
||||
config = ABTestConfig(
|
||||
variant_b_percentage=1.0, # Always variant B
|
||||
log_file=str(tmp_path / "test.log")
|
||||
)
|
||||
manager = ABTestManager(config)
|
||||
|
||||
# Use text that hashes to variant B
|
||||
for i in range(20):
|
||||
text = f"Test message {i}"
|
||||
variant, result, latency = manager.detect_with_variant(text)
|
||||
if variant == Variant.B:
|
||||
break
|
||||
|
||||
assert variant == Variant.B
|
||||
|
||||
def test_event_logging(self, tmp_path):
|
||||
"""Test that events are logged to file."""
|
||||
log_file = tmp_path / "test.jsonl"
|
||||
config = ABTestConfig(
|
||||
log_file=str(log_file),
|
||||
metrics_file=str(tmp_path / "metrics.json")
|
||||
)
|
||||
manager = ABTestManager(config)
|
||||
|
||||
# Mock detection
|
||||
with patch('crisis.ab_testing.detect_crisis') as mock_detect:
|
||||
mock_detect.return_value = CrisisDetectionResult(
|
||||
level="LOW",
|
||||
score=0.3,
|
||||
indicators=[],
|
||||
matched_patterns=[],
|
||||
recommended_action="none"
|
||||
)
|
||||
|
||||
manager.detect_with_variant("Test message")
|
||||
|
||||
# Check log file exists and has content
|
||||
assert log_file.exists()
|
||||
with open(log_file) as f:
|
||||
lines = f.readlines()
|
||||
assert len(lines) >= 1
|
||||
|
||||
# Parse log entry
|
||||
entry = json.loads(lines[0])
|
||||
assert "event_id" in entry
|
||||
assert "variant" in entry
|
||||
assert "detected_level" in entry
|
||||
|
||||
def test_label_event(self, tmp_path):
|
||||
"""Test labeling events as false positives."""
|
||||
config = ABTestConfig(
|
||||
log_file=str(tmp_path / "test.log"),
|
||||
metrics_file=str(tmp_path / "metrics.json")
|
||||
)
|
||||
manager = ABTestManager(config)
|
||||
|
||||
# Create a mock event
|
||||
event = DetectionEvent(
|
||||
event_id="test-123",
|
||||
timestamp="2026-01-01T00:00:00Z",
|
||||
variant="A",
|
||||
text_hash="abc123",
|
||||
detected_level="HIGH",
|
||||
detected_score=0.8,
|
||||
indicators=["despair"],
|
||||
detection_latency_ms=5.0
|
||||
)
|
||||
manager.events.append(event)
|
||||
|
||||
# Label it
|
||||
manager.label_event("test-123", is_false_positive=True, feedback="Not actually crisis")
|
||||
|
||||
# Check labeling
|
||||
assert event.is_false_positive is True
|
||||
assert event.user_feedback == "Not actually crisis"
|
||||
|
||||
def test_get_metrics_empty(self, tmp_path):
|
||||
"""Test metrics with no events."""
|
||||
config = ABTestConfig(
|
||||
metrics_file=str(tmp_path / "metrics.json")
|
||||
)
|
||||
manager = ABTestManager(config)
|
||||
|
||||
metrics = manager.get_metrics()
|
||||
assert metrics.sample_size_a == 0
|
||||
assert metrics.sample_size_b == 0
|
||||
assert metrics.variant_a.total_detections == 0
|
||||
|
||||
def test_get_metrics_with_events(self, tmp_path):
|
||||
"""Test metrics calculation with events."""
|
||||
config = ABTestConfig(
|
||||
log_file=str(tmp_path / "test.log"),
|
||||
metrics_file=str(tmp_path / "metrics.json")
|
||||
)
|
||||
manager = ABTestManager(config)
|
||||
|
||||
# Add some mock events
|
||||
for i in range(10):
|
||||
event = DetectionEvent(
|
||||
event_id=f"event-{i}",
|
||||
timestamp="2026-01-01T00:00:00Z",
|
||||
variant="A" if i % 2 == 0 else "B",
|
||||
text_hash=f"hash-{i}",
|
||||
detected_level="HIGH" if i % 3 == 0 else "MEDIUM",
|
||||
detected_score=0.7 + (i % 3) * 0.1,
|
||||
indicators=["despair"] if i % 2 == 0 else [],
|
||||
detection_latency_ms=3.0 + i * 0.5
|
||||
)
|
||||
# Label some as false positives
|
||||
if i % 4 == 0:
|
||||
event.is_false_positive = True
|
||||
elif i % 4 == 1:
|
||||
event.is_false_positive = False
|
||||
|
||||
manager.events.append(event)
|
||||
|
||||
metrics = manager.get_metrics()
|
||||
|
||||
# Check we have events in both variants
|
||||
assert metrics.sample_size_a > 0
|
||||
assert metrics.sample_size_b > 0
|
||||
|
||||
# Check latency calculations
|
||||
assert metrics.variant_a.avg_latency_ms > 0
|
||||
assert metrics.variant_b.avg_latency_ms > 0
|
||||
|
||||
# Check level distribution
|
||||
assert len(metrics.variant_a.level_distribution) > 0
|
||||
|
||||
def test_variant_distribution(self, tmp_path):
|
||||
"""Test getting variant distribution."""
|
||||
config = ABTestConfig()
|
||||
manager = ABTestManager(config)
|
||||
|
||||
# Add events
|
||||
for i in range(5):
|
||||
event = DetectionEvent(
|
||||
event_id=f"event-{i}",
|
||||
timestamp="2026-01-01T00:00:00Z",
|
||||
variant="A" if i < 3 else "B",
|
||||
text_hash=f"hash-{i}",
|
||||
detected_level="LOW",
|
||||
detected_score=0.5,
|
||||
indicators=[],
|
||||
detection_latency_ms=2.0
|
||||
)
|
||||
manager.events.append(event)
|
||||
|
||||
dist = manager.get_variant_distribution()
|
||||
assert dist["A"] == 3
|
||||
assert detect_with_ab
|
||||
assert dist["B"] == 2
|
||||
|
||||
def test_force_variant(self, tmp_path):
|
||||
"""Test forcing a specific variant."""
|
||||
config = ABTestConfig()
|
||||
manager = ABTestManager(config)
|
||||
|
||||
manager.force_variant(Variant.B)
|
||||
|
||||
# After forcing, all should be variant B
|
||||
for i in range(5):
|
||||
text = f"Test message {i}"
|
||||
variant = manager._assign_variant(text)
|
||||
assert variant == Variant.B
|
||||
|
||||
def test_reset(self, tmp_path):
|
||||
"""Test resetting the A/B test."""
|
||||
config = ABTestConfig(
|
||||
log_file=str(tmp_path / "test.log"),
|
||||
metrics_file=str(tmp_path / "metrics.json")
|
||||
)
|
||||
manager = ABTestManager(config)
|
||||
|
||||
# Add some events
|
||||
for i in range(3):
|
||||
event = DetectionEvent(
|
||||
event_id=f"event-{i}",
|
||||
timestamp="2026-01-01T00:00:00Z",
|
||||
variant="A",
|
||||
text_hash=f"hash-{i}",
|
||||
detected_level="LOW",
|
||||
detected_score=0.5,
|
||||
indicators=[],
|
||||
detection_latency_ms=2.0
|
||||
)
|
||||
manager.events.append(event)
|
||||
|
||||
assert len(manager.events) == 3
|
||||
|
||||
# Reset
|
||||
manager.reset()
|
||||
|
||||
assert len(manager.events) == 0
|
||||
assert manager.config.enabled is True
|
||||
|
||||
|
||||
class TestConvenienceFunctions:
|
||||
"""Test convenience functions."""
|
||||
|
||||
def test_get_ab_manager(self):
|
||||
"""Test getting default manager."""
|
||||
manager = get_ab_manager()
|
||||
assert isinstance(manager, ABTestManager)
|
||||
|
||||
@patch('crisis.ab_testing.detect_crisis')
|
||||
def test_detect_with_ab(self, mock_detect):
|
||||
"""Test convenience detection function."""
|
||||
mock_detect.return_value = CrisisDetectionResult(
|
||||
level="HIGH",
|
||||
score=0.8,
|
||||
indicators=["despair"],
|
||||
matched_patterns=[],
|
||||
recommended_action="provide_resources"
|
||||
)
|
||||
|
||||
result = detect_with_ab("I'm feeling hopeless")
|
||||
|
||||
assert "variant" in result
|
||||
assert "detection" in result
|
||||
assert "latency_ms" in result
|
||||
assert "test_id" in result
|
||||
assert result["detection"]["level"] == "HIGH"
|
||||
|
||||
|
||||
class TestMetricsCalculation:
|
||||
"""Test metrics calculation edge cases."""
|
||||
|
||||
def test_percentile_calculation(self, tmp_path):
|
||||
"""Test that percentiles are calculated correctly."""
|
||||
config = ABTestConfig()
|
||||
manager = ABTestManager(config)
|
||||
|
||||
# Create events with known latencies
|
||||
latencies = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]
|
||||
events = []
|
||||
for i, lat in enumerate(latencies):
|
||||
events.append(DetectionEvent(
|
||||
event_id=f"event-{i}",
|
||||
timestamp="2026-01-01T00:00:00Z",
|
||||
variant="A",
|
||||
text_hash=f"hash-{i}",
|
||||
detected_level="LOW",
|
||||
detected_score=0.5,
|
||||
indicators=[],
|
||||
detection_latency_ms=lat
|
||||
))
|
||||
|
||||
metrics = manager._calculate_variant_metrics(events)
|
||||
|
||||
assert metrics.p50_latency_ms == 5.0 # 50th percentile
|
||||
assert metrics.p95_latency_ms == 10.0 # 95th percentile (last element)
|
||||
assert metrics.avg_latency_ms == 5.5 # Average
|
||||
|
||||
def test_empty_variant_metrics(self, tmp_path):
|
||||
"""Test metrics with no events."""
|
||||
config = ABTestConfig()
|
||||
manager = ABTestManager(config)
|
||||
|
||||
metrics = manager._calculate_variant_metrics([])
|
||||
|
||||
assert metrics.total_detections == 0
|
||||
assert metrics.avg_latency_ms == 0
|
||||
assert metrics.level_distribution == {}
|
||||
Reference in New Issue
Block a user