Compare commits

..

2 Commits

Author SHA1 Message Date
65d6fc6119 test: add A/B testing framework tests (#101)
All checks were successful
Sanity Checks / sanity-test (pull_request) Successful in 7s
Smoke Test / smoke (pull_request) Successful in 10s
2026-04-15 03:58:27 +00:00
70d04cdbfd feat: add crisis detection A/B test framework (#101) 2026-04-15 03:58:26 +00:00
3 changed files with 281 additions and 311 deletions

152
crisis/ab_testing.py Normal file
View File

@@ -0,0 +1,152 @@
"""
A/B Test Framework for Crisis Detection in the-door.
Allows running two crisis detection variants side-by-side with
logged outcomes for comparison. No PII stored — only variant labels,
levels, and timing.
Usage:
from crisis.ab_testing import ABTestCrisisDetector
detector = ABTestCrisisDetector(variant_a=detect_v1, variant_b=detect_v2)
result, variant = detector.detect("I feel hopeless")
# result: CrisisDetectionResult
# variant: "A" or "B"
# Get comparison metrics
stats = detector.get_stats()
# {"A": {"count": 100, "avg_latency_ms": 2.3, ...}, "B": {...}}
"""
import os
import random
import time
from dataclasses import dataclass, field
from typing import Callable, Dict, List, Optional, Tuple
from .detect import CrisisDetectionResult
# ── Feature flag ───────────────────────────────────────────────
def _get_variant_override() -> Optional[str]:
"""Check for environment variable override (testing/debugging)."""
val = os.environ.get("CRISIS_AB_VARIANT", "").upper()
if val in ("A", "B"):
return val
return None
@dataclass
class VariantRecord:
"""Single detection event record — no PII, only metadata."""
variant: str
level: str
latency_ms: float
indicator_count: int
class ABTestCrisisDetector:
"""
A/B test wrapper for crisis detection.
Routes calls to variant A or B based on configurable split,
logs outcomes for comparison, and provides aggregate stats.
"""
def __init__(
self,
variant_a: Callable[[str], CrisisDetectionResult],
variant_b: Callable[[str], CrisisDetectionResult],
split: float = 0.5,
variant_a_name: str = "A",
variant_b_name: str = "B",
):
"""
Args:
variant_a: First detection function
variant_b: Second detection function
split: Probability of selecting variant A (0.0 to 1.0)
variant_a_name: Label for variant A in reports
variant_b_name: Label for variant B in reports
"""
self.variant_a = variant_a
self.variant_b = variant_b
self.split = split
self.variant_a_name = variant_a_name
self.variant_b_name = variant_b_name
self.records: List[VariantRecord] = []
def _select_variant(self) -> str:
"""Select variant based on split and optional env override."""
override = _get_variant_override()
if override:
return override
return "A" if random.random() < self.split else "B"
def detect(self, text: str) -> Tuple[CrisisDetectionResult, str]:
"""
Run detection on the selected variant and log the result.
Returns:
(CrisisDetectionResult, variant_label)
"""
variant = self._select_variant()
if variant == "A":
fn = self.variant_a
else:
fn = self.variant_b
start = time.perf_counter()
result = fn(text)
latency_ms = (time.perf_counter() - start) * 1000
# Log record (no PII — only level, timing, count)
record = VariantRecord(
variant=variant,
level=result.level,
latency_ms=latency_ms,
indicator_count=len(result.indicators),
)
self.records.append(record)
return result, variant
def get_stats(self) -> Dict[str, dict]:
"""
Get per-variant comparison statistics.
Returns dict with variant labels as keys:
{
"A": {"count": 100, "avg_latency_ms": 2.3, "levels": {...}},
"B": {"count": 95, "avg_latency_ms": 3.1, "levels": {...}}
"""
stats = {}
for label in ("A", "B"):
recs = [r for r in self.records if r.variant == label]
if not recs:
stats[label] = {"count": 0}
continue
latencies = [r.latency_ms for r in recs]
levels = {}
for r in recs:
levels[r.level] = levels.get(r.level, 0) + 1
stats[label] = {
"count": len(recs),
"avg_latency_ms": round(sum(latencies) / len(latencies), 2),
"max_latency_ms": round(max(latencies), 2),
"min_latency_ms": round(min(latencies), 2),
"levels": levels,
"avg_indicators": round(
sum(r.indicator_count for r in recs) / len(recs), 2
),
}
return stats
def reset(self) -> None:
"""Clear all records. For testing."""
self.records.clear()

View File

@@ -1,311 +0,0 @@
#!/usr/bin/env python3
"""Behavioral Pattern Detection for Crisis Signals (#133).
Detects crisis risk from session-level behavioral patterns:
- Message frequency (increasing urgency = rapid-fire messages)
- Time-of-day (late-night messages correlate with crisis risk)
- Withdrawal (decreasing communication after engagement)
- Escalation (crisis indicators getting stronger over time)
Usage:
from crisis.behavioral import analyze_session, BehavioralSignal
signals = analyze_session(messages)
for sig in signals:
if sig.risk_level == "HIGH":
# Escalate to crisis protocol
pass
"""
import math
from dataclasses import dataclass, field
from datetime import datetime, timezone
from typing import Optional
@dataclass
class Message:
"""A single message in a session."""
timestamp: datetime
content: str
crisis_score: float = 0.0 # 0.0-1.0 from text detector
role: str = "user" # "user" or "assistant"
@dataclass
class BehavioralSignal:
"""A detected behavioral pattern indicating crisis risk."""
signal_type: str # "frequency", "time", "withdrawal", "escalation"
risk_level: str # "LOW", "MEDIUM", "HIGH"
description: str
evidence: list = field(default_factory=list)
score: float = 0.0 # 0.0-1.0
# ── Configuration ─────────────────────────────────────────────────────────────
# Message frequency thresholds (messages per hour)
FREQ_NORMAL = 6 # <6/hr = normal
FREQ_ELEVATED = 15 # 6-15/hr = elevated
FREQ_HIGH = 30 # >30/hr = high urgency
# Time-of-day risk windows (hours in 24h format)
HIGH_RISK_HOURS = set(range(1, 5)) # 1AM-4AM
ELEVATED_RISK_HOURS = set(range(22, 24)) | set(range(5, 7)) # 10PM-12AM, 5AM-7AM
# Withdrawal: messages/day trend
WITHDRAWAL_THRESHOLD = 0.3 # Current day < 30% of average = withdrawal
# Escalation: crisis score trend
ESCALATION_WINDOW = 5 # Look at last N messages
# ── Frequency Analysis ────────────────────────────────────────────────────────
def _analyze_frequency(messages: list[Message]) -> Optional[BehavioralSignal]:
"""Detect rapid-fire messaging (urgency indicator)."""
if len(messages) < 3:
return None
user_msgs = [m for m in messages if m.role == "user"]
if len(user_msgs) < 3:
return None
# Calculate messages per hour in the most recent window
recent = user_msgs[-10:] # Last 10 user messages
if len(recent) < 2:
return None
time_span = (recent[-1].timestamp - recent[0].timestamp).total_seconds()
if time_span <= 0:
return None
msg_per_hour = len(recent) / (time_span / 3600)
if msg_per_hour >= FREQ_HIGH:
return BehavioralSignal(
signal_type="frequency",
risk_level="HIGH",
description=f"Very rapid messaging: {msg_per_hour:.0f} messages/hour",
evidence=[f"Last {len(recent)} messages in {time_span/60:.0f} minutes"],
score=min(1.0, msg_per_hour / FREQ_HIGH),
)
elif msg_per_hour >= FREQ_ELEVATED:
return BehavioralSignal(
signal_type="frequency",
risk_level="MEDIUM",
description=f"Elevated messaging rate: {msg_per_hour:.0f} messages/hour",
evidence=[f"Last {len(recent)} messages in {time_span/60:.0f} minutes"],
score=msg_per_hour / FREQ_HIGH,
)
return None
# ── Time-of-Day Analysis ─────────────────────────────────────────────────────
def _analyze_time(messages: list[Message]) -> Optional[BehavioralSignal]:
"""Detect late-night messaging (correlates with crisis risk)."""
if not messages:
return None
# Check most recent messages
recent = messages[-5:]
late_night_count = sum(1 for m in recent if m.timestamp.hour in HIGH_RISK_HOURS)
elevated_count = sum(1 for m in recent if m.timestamp.hour in ELEVATED_RISK_HOURS)
if late_night_count >= 3:
return BehavioralSignal(
signal_type="time",
risk_level="HIGH",
description=f"Late-night messaging pattern: {late_night_count}/5 messages between 1-4 AM",
evidence=[f"Message at {m.timestamp.strftime('%H:%M')}" for m in recent if m.timestamp.hour in HIGH_RISK_HOURS],
score=late_night_count / len(recent),
)
elif elevated_count >= 3:
return BehavioralSignal(
signal_type="time",
risk_level="MEDIUM",
description=f"Off-hours messaging: {elevated_count}/5 messages in elevated-risk window",
evidence=[f"Message at {m.timestamp.strftime('%H:%M')}" for m in recent if m.timestamp.hour in ELEVATED_RISK_HOURS],
score=elevated_count / len(recent) * 0.5,
)
return None
# ── Withdrawal Detection ──────────────────────────────────────────────────────
def _analyze_withdrawal(messages: list[Message]) -> Optional[BehavioralSignal]:
"""Detect communication withdrawal (decreasing engagement)."""
user_msgs = [m for m in messages if m.role == "user"]
if len(user_msgs) < 10:
return None
# Split into first half and second half
mid = len(user_msgs) // 2
first_half = user_msgs[:mid]
second_half = user_msgs[mid:]
# Average message length as engagement proxy
first_avg_len = sum(len(m.content) for m in first_half) / len(first_half)
second_avg_len = sum(len(m.content) for m in second_half) / len(second_half)
# Time between messages
def avg_gap(msgs):
if len(msgs) < 2:
return 0
gaps = [(msgs[i+1].timestamp - msgs[i].timestamp).total_seconds() for i in range(len(msgs)-1)]
return sum(gaps) / len(gaps)
first_gap = avg_gap(first_half)
second_gap = avg_gap(second_half)
# Withdrawal = shorter messages AND longer gaps
length_ratio = second_avg_len / first_avg_len if first_avg_len > 0 else 1.0
gap_ratio = second_gap / first_gap if first_gap > 0 else 1.0
if length_ratio < 0.5 and gap_ratio > 2.0:
return BehavioralSignal(
signal_type="withdrawal",
risk_level="HIGH",
description="Significant withdrawal: messages shorter and less frequent",
evidence=[
f"Message length: {first_avg_len:.0f} -> {second_avg_len:.0f} chars ({length_ratio:.0%})",
f"Message gap: {first_gap/60:.0f}min -> {second_gap/60:.0f}min ({gap_ratio:.1f}x)",
],
score=min(1.0, (1 - length_ratio) * 0.5 + (gap_ratio - 1) * 0.25),
)
elif length_ratio < 0.7 or gap_ratio > 1.5:
return BehavioralSignal(
signal_type="withdrawal",
risk_level="MEDIUM",
description="Moderate withdrawal: engagement decreasing",
evidence=[
f"Message length: {first_avg_len:.0f} -> {second_avg_len:.0f} chars",
f"Message gap: {first_gap/60:.0f}min -> {second_gap/60:.0f}min",
],
score=(1 - length_ratio) * 0.3 + (gap_ratio - 1) * 0.15,
)
return None
# ── Escalation Detection ─────────────────────────────────────────────────────
def _analyze_escalation(messages: list[Message]) -> Optional[BehavioralSignal]:
"""Detect rising crisis scores over recent messages."""
user_msgs = [m for m in messages if m.role == "user" and m.crisis_score > 0]
if len(user_msgs) < ESCALATION_WINDOW:
return None
recent = user_msgs[-ESCALATION_WINDOW:]
scores = [m.crisis_score for m in recent]
# Check for upward trend
if len(scores) < 3:
return None
# Simple linear trend: is score increasing?
first_half_avg = sum(scores[:len(scores)//2]) / (len(scores)//2)
second_half_avg = sum(scores[len(scores)//2:]) / (len(scores) - len(scores)//2)
if second_half_avg > first_half_avg * 1.5 and second_half_avg > 0.5:
return BehavioralSignal(
signal_type="escalation",
risk_level="HIGH",
description=f"Crisis escalation detected: scores rising from {first_half_avg:.2f} to {second_half_avg:.2f}",
evidence=[f"Score {i+1}: {s:.2f}" for i, s in enumerate(scores)],
score=min(1.0, second_half_avg),
)
elif second_half_avg > first_half_avg * 1.2 and second_half_avg > 0.3:
return BehavioralSignal(
signal_type="escalation",
risk_level="MEDIUM",
description=f"Mild escalation: scores trending up",
evidence=[f"Score {i+1}: {s:.2f}" for i, s in enumerate(scores)],
score=second_half_avg * 0.5,
)
return None
# ── Combined Analysis ─────────────────────────────────────────────────────────
def analyze_session(messages: list[Message]) -> list[BehavioralSignal]:
"""Analyze a session for behavioral crisis signals.
Args:
messages: List of Message objects with timestamps, content, and crisis scores.
Returns:
List of BehavioralSignal objects, sorted by risk level (HIGH first).
"""
signals = []
freq = _analyze_frequency(messages)
if freq:
signals.append(freq)
time_sig = _analyze_time(messages)
if time_sig:
signals.append(time_sig)
withdrawal = _analyze_withdrawal(messages)
if withdrawal:
signals.append(withdrawal)
escalation = _analyze_escalation(messages)
if escalation:
signals.append(escalation)
# Sort: HIGH first, then MEDIUM, then LOW
risk_order = {"HIGH": 0, "MEDIUM": 1, "LOW": 2}
signals.sort(key=lambda s: (risk_order.get(s.risk_level, 9), -s.score))
return signals
def get_session_risk_level(signals: list[BehavioralSignal]) -> str:
"""Get overall session risk from behavioral signals."""
if not signals:
return "NONE"
if any(s.risk_level == "HIGH" for s in signals):
return "HIGH"
if any(s.risk_level == "MEDIUM" for s in signals):
return "MEDIUM"
return "LOW"
# ── Self-Test ─────────────────────────────────────────────────────────────────
if __name__ == "__main__":
from datetime import timedelta
now = datetime.now(timezone.utc)
# Test: rapid-fire messaging
rapid_msgs = [
Message(timestamp=now - timedelta(minutes=i), content="help me", role="user")
for i in range(20, 0, -1)
]
signals = analyze_session(rapid_msgs)
print(f"Rapid-fire: {[s.signal_type + ':' + s.risk_level for s in signals]}")
assert any(s.signal_type == "frequency" for s in signals), "Should detect frequency"
# Test: late-night
late_msgs = [
Message(timestamp=now.replace(hour=2, minute=i*5), content="cant sleep", role="user")
for i in range(5)
]
signals = analyze_session(late_msgs)
print(f"Late-night: {[s.signal_type + ':' + s.risk_level for s in signals]}")
assert any(s.signal_type == "time" for s in signals), "Should detect time"
# Test: escalation
esc_msgs = [
Message(timestamp=now - timedelta(minutes=i*10), content="feeling bad",
role="user", crisis_score=0.1 + i*0.15)
for i in range(5, 0, -1)
]
signals = analyze_session(esc_msgs)
print(f"Escalation: {[s.signal_type + ':' + s.risk_level for s in signals]}")
assert any(s.signal_type == "escalation" for s in signals), "Should detect escalation"
print("\nAll self-tests passed!")

129
tests/test_ab_testing.py Normal file
View File

@@ -0,0 +1,129 @@
"""
Tests for crisis/ab_testing.py — A/B test framework for crisis detection.
Verifies variant selection, logging, stats aggregation, and env override.
"""
import os
from unittest.mock import patch
import pytest
from crisis.ab_testing import ABTestCrisisDetector
from crisis.detect import CrisisDetectionResult, detect_crisis
def _make_variant(level: str):
"""Create a mock detection function that returns a fixed level."""
def fn(text: str) -> CrisisDetectionResult:
return CrisisDetectionResult(level=level, indicators=[f"mock_{level}"])
return fn
class TestABTestCrisisDetector:
"""A/B test framework unit tests."""
def setup_method(self):
"""Ensure no env override."""
os.environ.pop("CRISIS_AB_VARIANT", None)
def test_returns_result_and_variant(self):
detector = ABTestCrisisDetector(
variant_a=_make_variant("LOW"),
variant_b=_make_variant("HIGH"),
)
result, variant = detector.detect("test message")
assert isinstance(result, CrisisDetectionResult)
assert variant in ("A", "B")
def test_records_are_logged(self):
detector = ABTestCrisisDetector(
variant_a=_make_variant("LOW"),
variant_b=_make_variant("HIGH"),
)
# Force variant A
with patch.object(detector, "_select_variant", return_value="A"):
detector.detect("test")
assert len(detector.records) == 1
assert detector.records[0].variant == "A"
assert detector.records[0].level == "LOW"
def test_stats_empty(self):
detector = ABTestCrisisDetector(
variant_a=_make_variant("LOW"),
variant_b=_make_variant("HIGH"),
)
stats = detector.get_stats()
assert stats["A"]["count"] == 0
assert stats["B"]["count"] == 0
def test_stats_with_data(self):
detector = ABTestCrisisDetector(
variant_a=_make_variant("LOW"),
variant_b=_make_variant("HIGH"),
)
# Force 5 A and 3 B
with patch.object(detector, "_select_variant", side_effect=["A"] * 5 + ["B"] * 3):
for _ in range(8):
detector.detect("test")
stats = detector.get_stats()
assert stats["A"]["count"] == 5
assert stats["B"]["count"] == 3
assert "avg_latency_ms" in stats["A"]
assert stats["A"]["levels"]["LOW"] == 5
assert stats["B"]["levels"]["HIGH"] == 3
def test_env_override_a(self):
os.environ["CRISIS_AB_VARIANT"] = "A"
detector = ABTestCrisisDetector(
variant_a=_make_variant("LOW"),
variant_b=_make_variant("HIGH"),
)
for _ in range(10):
result, variant = detector.detect("test")
assert variant == "A"
assert result.level == "LOW"
def test_env_override_b(self):
os.environ["CRISIS_AB_VARIANT"] = "b"
detector = ABTestCrisisDetector(
variant_a=_make_variant("LOW"),
variant_b=_make_variant("HIGH"),
)
for _ in range(10):
result, variant = detector.detect("test")
assert variant == "B"
assert result.level == "HIGH"
def test_reset_clears_records(self):
detector = ABTestCrisisDetector(
variant_a=_make_variant("LOW"),
variant_b=_make_variant("HIGH"),
)
detector.detect("test")
detector.detect("test")
assert len(detector.records) == 2
detector.reset()
assert len(detector.records) == 0
def test_split_respected(self):
"""With split=1.0, always get variant A."""
detector = ABTestCrisisDetector(
variant_a=_make_variant("LOW"),
variant_b=_make_variant("HIGH"),
split=1.0,
)
for _ in range(10):
_, variant = detector.detect("test")
assert variant == "A"
def test_with_real_detector(self):
"""Integration test using actual detect_crisis as both variants."""
detector = ABTestCrisisDetector(
variant_a=detect_crisis,
variant_b=detect_crisis,
)
result, variant = detector.detect("I want to kill myself")
assert result.level == "CRITICAL"
assert variant in ("A", "B")