Compare commits

..

2 Commits

Author SHA1 Message Date
65d6fc6119 test: add A/B testing framework tests (#101)
All checks were successful
Sanity Checks / sanity-test (pull_request) Successful in 7s
Smoke Test / smoke (pull_request) Successful in 10s
2026-04-15 03:58:27 +00:00
70d04cdbfd feat: add crisis detection A/B test framework (#101) 2026-04-15 03:58:26 +00:00
4 changed files with 281 additions and 239 deletions

152
crisis/ab_testing.py Normal file
View File

@@ -0,0 +1,152 @@
"""
A/B Test Framework for Crisis Detection in the-door.
Allows running two crisis detection variants side-by-side with
logged outcomes for comparison. No PII stored — only variant labels,
levels, and timing.
Usage:
from crisis.ab_testing import ABTestCrisisDetector
detector = ABTestCrisisDetector(variant_a=detect_v1, variant_b=detect_v2)
result, variant = detector.detect("I feel hopeless")
# result: CrisisDetectionResult
# variant: "A" or "B"
# Get comparison metrics
stats = detector.get_stats()
# {"A": {"count": 100, "avg_latency_ms": 2.3, ...}, "B": {...}}
"""
import os
import random
import time
from dataclasses import dataclass, field
from typing import Callable, Dict, List, Optional, Tuple
from .detect import CrisisDetectionResult
# ── Feature flag ───────────────────────────────────────────────
def _get_variant_override() -> Optional[str]:
"""Check for environment variable override (testing/debugging)."""
val = os.environ.get("CRISIS_AB_VARIANT", "").upper()
if val in ("A", "B"):
return val
return None
@dataclass
class VariantRecord:
"""Single detection event record — no PII, only metadata."""
variant: str
level: str
latency_ms: float
indicator_count: int
class ABTestCrisisDetector:
"""
A/B test wrapper for crisis detection.
Routes calls to variant A or B based on configurable split,
logs outcomes for comparison, and provides aggregate stats.
"""
def __init__(
self,
variant_a: Callable[[str], CrisisDetectionResult],
variant_b: Callable[[str], CrisisDetectionResult],
split: float = 0.5,
variant_a_name: str = "A",
variant_b_name: str = "B",
):
"""
Args:
variant_a: First detection function
variant_b: Second detection function
split: Probability of selecting variant A (0.0 to 1.0)
variant_a_name: Label for variant A in reports
variant_b_name: Label for variant B in reports
"""
self.variant_a = variant_a
self.variant_b = variant_b
self.split = split
self.variant_a_name = variant_a_name
self.variant_b_name = variant_b_name
self.records: List[VariantRecord] = []
def _select_variant(self) -> str:
"""Select variant based on split and optional env override."""
override = _get_variant_override()
if override:
return override
return "A" if random.random() < self.split else "B"
def detect(self, text: str) -> Tuple[CrisisDetectionResult, str]:
"""
Run detection on the selected variant and log the result.
Returns:
(CrisisDetectionResult, variant_label)
"""
variant = self._select_variant()
if variant == "A":
fn = self.variant_a
else:
fn = self.variant_b
start = time.perf_counter()
result = fn(text)
latency_ms = (time.perf_counter() - start) * 1000
# Log record (no PII — only level, timing, count)
record = VariantRecord(
variant=variant,
level=result.level,
latency_ms=latency_ms,
indicator_count=len(result.indicators),
)
self.records.append(record)
return result, variant
def get_stats(self) -> Dict[str, dict]:
"""
Get per-variant comparison statistics.
Returns dict with variant labels as keys:
{
"A": {"count": 100, "avg_latency_ms": 2.3, "levels": {...}},
"B": {"count": 95, "avg_latency_ms": 3.1, "levels": {...}}
"""
stats = {}
for label in ("A", "B"):
recs = [r for r in self.records if r.variant == label]
if not recs:
stats[label] = {"count": 0}
continue
latencies = [r.latency_ms for r in recs]
levels = {}
for r in recs:
levels[r.level] = levels.get(r.level, 0) + 1
stats[label] = {
"count": len(recs),
"avg_latency_ms": round(sum(latencies) / len(latencies), 2),
"max_latency_ms": round(max(latencies), 2),
"min_latency_ms": round(min(latencies), 2),
"levels": levels,
"avg_indicators": round(
sum(r.indicator_count for r in recs) / len(recs), 2
),
}
return stats
def reset(self) -> None:
"""Clear all records. For testing."""
self.records.clear()

View File

@@ -1,155 +0,0 @@
"""
Image Content Screening for Self-Harm Indicators
Screens images sent in chat for self-harm content using vision models.
Privacy-preserving: analyzes in-memory, discards after scoring. No image retention.
"""
import base64
import json
import logging
from dataclasses import dataclass
from enum import Enum
from typing import Optional
logger = logging.getLogger("the-door.image_screening")
class RiskLevel(str, Enum):
SAFE = "safe"
CONCERNING = "concerning"
CRITICAL = "critical"
@dataclass
class ImageScreenResult:
risk_level: RiskLevel
confidence: float
categories: list
reasoning: str
model_used: str
@property
def needs_crisis_response(self):
return self.risk_level == RiskLevel.CRITICAL
@property
def needs_followup(self):
return self.risk_level in (RiskLevel.CONCERNING, RiskLevel.CRITICAL)
def to_dict(self):
return {
"risk_level": self.risk_level.value,
"confidence": round(self.confidence, 3),
"categories": self.categories,
"reasoning": self.reasoning,
"model_used": self.model_used,
}
SCREENING_PROMPT = """Analyze this image for indicators of self-harm or crisis. Look for:
1. Self-harm wounds or injuries
2. Medication/pills in concerning quantities
3. Farewell-type imagery (empty rooms, items laid out, goodbye notes)
4. Screenshots of crisis-related searches
5. Images conveying hopelessness, isolation, or despair
Respond in JSON format ONLY:
{"risk_level": "safe"|"concerning"|"critical", "confidence": 0.0-1.0, "categories": [], "reasoning": ""}
CRITICAL: imminent self-harm indicators
CONCERNING: ambiguous but worrying
SAFE: no indicators detected"""
def _analyze_with_ollama(image_b64, model="gemma3:4b"):
try:
import urllib.request
payload = json.dumps({
"model": model,
"messages": [{
"role": "user",
"content": SCREENING_PROMPT,
"images": [image_b64],
}],
"stream": False,
"options": {"temperature": 0.1},
}).encode()
req = urllib.request.Request(
"http://localhost:11434/api/chat",
data=payload,
headers={"Content-Type": "application/json"},
method="POST",
)
resp = urllib.request.urlopen(req, timeout=30)
data = json.loads(resp.read())
content = data.get("message", {}).get("content", "")
json_start = content.find("{")
json_end = content.rfind("}") + 1
if json_start == -1 or json_end <= json_start:
return None
result = json.loads(content[json_start:json_end])
return ImageScreenResult(
risk_level=RiskLevel(result.get("risk_level", "safe")),
confidence=float(result.get("confidence", 0.5)),
categories=result.get("categories", []),
reasoning=result.get("reasoning", ""),
model_used=f"ollama:{model}",
)
except Exception as e:
logger.warning(f"Ollama vision analysis failed: {e}")
return None
def _analyze_fallback(image_bytes):
return ImageScreenResult(
risk_level=RiskLevel.SAFE,
confidence=0.2,
categories=["unanalyzed"],
reasoning="No vision model available. Defaulting to safe with low confidence.",
model_used="fallback:heuristic",
)
def screen_image(image_data, use_vision_model=True, model="gemma3:4b"):
"""Screen image for self-harm indicators. Analyzes in-memory, no retention."""
if isinstance(image_data, bytes):
image_b64 = base64.b64encode(image_data).decode()
else:
image_b64 = image_data
image_data = base64.b64decode(image_b64)
if use_vision_model:
result = _analyze_with_ollama(image_b64, model)
if result:
logger.info(f"Image screened: {result.risk_level.value} (conf: {result.confidence:.2f})")
if result.needs_crisis_response:
logger.warning(f"CRITICAL image: {result.reasoning}")
return result
return _analyze_fallback(image_data)
def handle_chat_image(image_data):
"""Handle image from chat. Returns action dict for gateway."""
result = screen_image(image_data)
action = {
"result": result.to_dict(),
"show_crisis_overlay": result.needs_crisis_response,
"log_event": result.needs_followup,
"response_text": None,
}
if result.risk_level == RiskLevel.CRITICAL:
action["response_text"] = (
"I noticed something concerning in the image you shared. "
"If you or someone you know is in crisis, please reach out: "
"988 Suicide and Crisis Lifeline (call or text 988). "
"You are not alone."
)
elif result.risk_level == RiskLevel.CONCERNING:
action["response_text"] = (
"I want to check in \u2014 how are you doing? "
"If you need to talk to someone, the 988 Lifeline is available 24/7."
)
return action

129
tests/test_ab_testing.py Normal file
View File

@@ -0,0 +1,129 @@
"""
Tests for crisis/ab_testing.py — A/B test framework for crisis detection.
Verifies variant selection, logging, stats aggregation, and env override.
"""
import os
from unittest.mock import patch
import pytest
from crisis.ab_testing import ABTestCrisisDetector
from crisis.detect import CrisisDetectionResult, detect_crisis
def _make_variant(level: str):
"""Create a mock detection function that returns a fixed level."""
def fn(text: str) -> CrisisDetectionResult:
return CrisisDetectionResult(level=level, indicators=[f"mock_{level}"])
return fn
class TestABTestCrisisDetector:
"""A/B test framework unit tests."""
def setup_method(self):
"""Ensure no env override."""
os.environ.pop("CRISIS_AB_VARIANT", None)
def test_returns_result_and_variant(self):
detector = ABTestCrisisDetector(
variant_a=_make_variant("LOW"),
variant_b=_make_variant("HIGH"),
)
result, variant = detector.detect("test message")
assert isinstance(result, CrisisDetectionResult)
assert variant in ("A", "B")
def test_records_are_logged(self):
detector = ABTestCrisisDetector(
variant_a=_make_variant("LOW"),
variant_b=_make_variant("HIGH"),
)
# Force variant A
with patch.object(detector, "_select_variant", return_value="A"):
detector.detect("test")
assert len(detector.records) == 1
assert detector.records[0].variant == "A"
assert detector.records[0].level == "LOW"
def test_stats_empty(self):
detector = ABTestCrisisDetector(
variant_a=_make_variant("LOW"),
variant_b=_make_variant("HIGH"),
)
stats = detector.get_stats()
assert stats["A"]["count"] == 0
assert stats["B"]["count"] == 0
def test_stats_with_data(self):
detector = ABTestCrisisDetector(
variant_a=_make_variant("LOW"),
variant_b=_make_variant("HIGH"),
)
# Force 5 A and 3 B
with patch.object(detector, "_select_variant", side_effect=["A"] * 5 + ["B"] * 3):
for _ in range(8):
detector.detect("test")
stats = detector.get_stats()
assert stats["A"]["count"] == 5
assert stats["B"]["count"] == 3
assert "avg_latency_ms" in stats["A"]
assert stats["A"]["levels"]["LOW"] == 5
assert stats["B"]["levels"]["HIGH"] == 3
def test_env_override_a(self):
os.environ["CRISIS_AB_VARIANT"] = "A"
detector = ABTestCrisisDetector(
variant_a=_make_variant("LOW"),
variant_b=_make_variant("HIGH"),
)
for _ in range(10):
result, variant = detector.detect("test")
assert variant == "A"
assert result.level == "LOW"
def test_env_override_b(self):
os.environ["CRISIS_AB_VARIANT"] = "b"
detector = ABTestCrisisDetector(
variant_a=_make_variant("LOW"),
variant_b=_make_variant("HIGH"),
)
for _ in range(10):
result, variant = detector.detect("test")
assert variant == "B"
assert result.level == "HIGH"
def test_reset_clears_records(self):
detector = ABTestCrisisDetector(
variant_a=_make_variant("LOW"),
variant_b=_make_variant("HIGH"),
)
detector.detect("test")
detector.detect("test")
assert len(detector.records) == 2
detector.reset()
assert len(detector.records) == 0
def test_split_respected(self):
"""With split=1.0, always get variant A."""
detector = ABTestCrisisDetector(
variant_a=_make_variant("LOW"),
variant_b=_make_variant("HIGH"),
split=1.0,
)
for _ in range(10):
_, variant = detector.detect("test")
assert variant == "A"
def test_with_real_detector(self):
"""Integration test using actual detect_crisis as both variants."""
detector = ABTestCrisisDetector(
variant_a=detect_crisis,
variant_b=detect_crisis,
)
result, variant = detector.detect("I want to kill myself")
assert result.level == "CRITICAL"
assert variant in ("A", "B")

View File

@@ -1,84 +0,0 @@
"""Tests for image content screening module."""
import json
from unittest.mock import patch, MagicMock
from image_screening import (
RiskLevel,
ImageScreenResult,
screen_image,
handle_chat_image,
_analyze_fallback,
)
class TestImageScreenResult:
def test_safe_result(self):
result = ImageScreenResult(
risk_level=RiskLevel.SAFE, confidence=0.95,
categories=[], reasoning="No indicators", model_used="test"
)
assert not result.needs_crisis_response
assert not result.needs_followup
assert result.to_dict()["risk_level"] == "safe"
def test_critical_result(self):
result = ImageScreenResult(
risk_level=RiskLevel.CRITICAL, confidence=0.9,
categories=["wounds"], reasoning="Detected", model_used="test"
)
assert result.needs_crisis_response
assert result.needs_followup
def test_concerning_result(self):
result = ImageScreenResult(
risk_level=RiskLevel.CONCERNING, confidence=0.6,
categories=["isolation"], reasoning="Ambiguous", model_used="test"
)
assert not result.needs_crisis_response
assert result.needs_followup
class TestScreenImage:
def test_fallback_returns_safe(self):
result = screen_image(b"fake_image_data", use_vision_model=False)
assert result.risk_level == RiskLevel.SAFE
assert result.model_used == "fallback:heuristic"
assert result.confidence < 0.5
def test_base64_input(self):
import base64
b64 = base64.b64encode(b"fake").decode()
result = screen_image(b64, use_vision_model=False)
assert result.risk_level == RiskLevel.SAFE
class TestHandleChatImage:
def test_safe_image_no_overlay(self):
action = handle_chat_image(b"safe_image")
assert not action["show_crisis_overlay"]
assert action["response_text"] is None
@patch("image_screening._analyze_with_ollama")
def test_critical_image_shows_overlay(self, mock_ollama):
mock_ollama.return_value = ImageScreenResult(
risk_level=RiskLevel.CRITICAL, confidence=0.95,
categories=["wounds"], reasoning="Self-harm detected",
model_used="ollama:gemma3:4b"
)
action = handle_chat_image(b"concerning_image")
assert action["show_crisis_overlay"]
assert "988" in action["response_text"]
assert action["log_event"]
@patch("image_screening._analyze_with_ollama")
def test_concerning_image_followup(self, mock_ollama):
mock_ollama.return_value = ImageScreenResult(
risk_level=RiskLevel.CONCERNING, confidence=0.6,
categories=["isolation"], reasoning="Empty room",
model_used="ollama:gemma3:4b"
)
action = handle_chat_image(b"maybe_concerning")
assert not action["show_crisis_overlay"]
assert action["log_event"]
assert "check in" in action["response_text"]