Compare commits

..

1 Commits

Author SHA1 Message Date
Alexander Whitestone
67cb8a6093 feat(#101): crisis detection A/B test framework
All checks were successful
Sanity Checks / sanity-test (pull_request) Successful in 7s
Smoke Test / smoke (pull_request) Successful in 13s
- ABTestConfig: feature flag (env vars or constructor), deterministic
  text-hash assignment for consistent conversation-level routing
- CrisisABDetector: routes to variant A (canonical) or B (lower
  MEDIUM threshold: 1 vs 2 indicators, multi-tier score boost)
- ABDetectionResult: enriched with variant, hash, timestamp
- ABMetrics: per-variant false positive rate, detection latency,
  detection rate, level distribution, comparison deltas
- JSONL event logging for offline analysis
- 40+ tests covering config, detection, routing, metrics, logging

Closes #101
2026-04-15 11:01:24 -04:00
4 changed files with 776 additions and 239 deletions

366
crisis/ab_test.py Normal file
View File

@@ -0,0 +1,366 @@
"""
Crisis Detection A/B Test Framework for the-door.
Run two detection algorithms side-by-side, log which variant fires,
and collect metrics (false positive rate, detection latency) per variant.
Usage:
from crisis.ab_test import ABTestConfig, CrisisABDetector, ABMetrics
config = ABTestConfig(variant="B", false_positive_labels=["stress", "venting"])
detector = CrisisABDetector(config=config)
metrics = ABMetrics()
result = detector.detect("I can't go on anymore")
metrics.record(result, variant=config.variant, latency_ms=12.3)
report = metrics.report()
"""
import os
import json
import time
import hashlib
import logging
from dataclasses import dataclass, field
from typing import List, Optional, Dict
from pathlib import Path
from crisis.detect import (
detect_crisis,
CrisisDetectionResult,
SCORES,
MEDIUM_INDICATORS,
HIGH_INDICATORS,
CRITICAL_INDICATORS,
LOW_INDICATORS,
ACTIONS,
)
logger = logging.getLogger("crisis.ab_test")
# ── Feature Flag ──────────────────────────────────────────────────────────
@dataclass
class ABTestConfig:
"""Configuration for A/B testing crisis detection algorithms.
variant: "A" (canonical) or "B" (experimental)
false_positive_labels: known non-crisis patterns for FP tracking
log_path: where to write event logs (JSONL)
seed: deterministic hash seed for consistent assignment
"""
variant: str = "A"
false_positive_labels: List[str] = field(default_factory=list)
log_path: Optional[str] = None
seed: str = "the-door-ab-test"
def __post_init__(self):
if self.variant not in ("A", "B"):
raise ValueError(f"variant must be 'A' or 'B', got '{self.variant}'")
@classmethod
def from_env(cls) -> "ABTestConfig":
"""Load config from environment variables.
CRISIS_AB_VARIANT=A|B
CRISIS_AB_FP_LABELS=stress,venting,testing
CRISIS_AB_LOG_PATH=/tmp/crisis_ab.jsonl
"""
variant = os.environ.get("CRISIS_AB_VARIANT", "A")
fp_raw = os.environ.get("CRISIS_AB_FP_LABELS", "")
fp_labels = [l.strip() for l in fp_raw.split(",") if l.strip()]
log_path = os.environ.get("CRISIS_AB_LOG_PATH") or None
return cls(variant=variant, false_positive_labels=fp_labels, log_path=log_path)
@staticmethod
def assign(text: str, seed: str = "the-door-ab-test") -> str:
"""Deterministically assign a variant based on text hash.
Same input always gets the same variant, ensuring consistency
within a conversation thread.
"""
h = hashlib.sha256(f"{seed}:{text}".encode()).hexdigest()
return "A" if int(h, 16) % 2 == 0 else "B"
# ── Variant B: Experimental Detection Algorithm ───────────────────────────
VARIANT_B_MEDIUM_THRESHOLD = 1 # Variant A requires 2
def _find_indicators_b(text: str) -> dict:
"""Variant B indicator search — same patterns, different aggregation."""
import re
results = {"CRITICAL": [], "HIGH": [], "MEDIUM": [], "LOW": []}
for pattern in CRITICAL_INDICATORS:
m = re.search(pattern, text)
if m:
results["CRITICAL"].append({"pattern": pattern, "start": m.start(), "end": m.end()})
for pattern in HIGH_INDICATORS:
m = re.search(pattern, text)
if m:
results["HIGH"].append({"pattern": pattern, "start": m.start(), "end": m.end()})
for pattern in MEDIUM_INDICATORS:
m = re.search(pattern, text)
if m:
results["MEDIUM"].append({"pattern": pattern, "start": m.start(), "end": m.end()})
for pattern in LOW_INDICATORS:
m = re.search(pattern, text)
if m:
results["LOW"].append({"pattern": pattern, "start": m.start(), "end": m.end()})
return results
def detect_crisis_b(text: str) -> CrisisDetectionResult:
"""Variant B detection: lower MEDIUM threshold, weighted scoring.
Differences from variant A (detect_crisis):
- MEDIUM tier triggers at 1 indicator (A requires 2)
- Score boosted by 0.1 when multiple tiers fire simultaneously
- Same CRITICAL/HIGH behavior (single match = trigger)
"""
if not text or not text.strip():
return CrisisDetectionResult(level="NONE", score=0.0)
text_lower = text.lower()
matches = _find_indicators_b(text_lower)
if not matches:
return CrisisDetectionResult(level="NONE", score=0.0)
# CRITICAL and HIGH: same as variant A — single match
for tier in ("CRITICAL", "HIGH"):
if matches[tier]:
patterns = [m["pattern"] for m in matches[tier]]
score = SCORES[tier]
active_tiers = sum(1 for t in matches if matches[t] and t != tier)
if active_tiers > 0:
score = min(1.0, score + 0.1)
return CrisisDetectionResult(
level=tier,
indicators=patterns,
recommended_action=ACTIONS[tier],
score=score,
matches=matches[tier],
)
# MEDIUM tier: variant B requires only 1 indicator (A requires 2)
if len(matches["MEDIUM"]) >= VARIANT_B_MEDIUM_THRESHOLD:
patterns = [m["pattern"] for m in matches["MEDIUM"]]
score = SCORES["MEDIUM"]
active_tiers = sum(1 for t in matches if matches[t] and t != "MEDIUM")
if active_tiers > 0:
score = min(1.0, score + 0.1)
return CrisisDetectionResult(
level="MEDIUM",
indicators=patterns,
recommended_action=ACTIONS["MEDIUM"],
score=score,
matches=matches["MEDIUM"],
)
if matches["LOW"]:
patterns = [m["pattern"] for m in matches["LOW"]]
return CrisisDetectionResult(
level="LOW",
indicators=patterns,
recommended_action=ACTIONS["LOW"],
score=SCORES["LOW"],
matches=matches["LOW"],
)
return CrisisDetectionResult(level="NONE", score=0.0)
# ── A/B Detector Wrapper ─────────────────────────────────────────────────
@dataclass
class ABDetectionResult:
"""Detection result enriched with A/B metadata."""
detection: CrisisDetectionResult
variant: str
text_hash: str
timestamp: float = field(default_factory=time.time)
@property
def level(self) -> str:
return self.detection.level
@property
def score(self) -> float:
return self.detection.score
@property
def indicators(self) -> List[str]:
return self.detection.indicators
def to_dict(self) -> dict:
return {
"variant": self.variant,
"level": self.level,
"score": self.score,
"indicators": self.indicators,
"text_hash": self.text_hash,
"timestamp": self.timestamp,
}
class CrisisABDetector:
"""A/B detector that routes to variant A or B based on config."""
def __init__(self, config: Optional[ABTestConfig] = None):
self.config = config or ABTestConfig()
self._detect_a = detect_crisis
self._detect_b = detect_crisis_b
def detect(self, text: str, variant: Optional[str] = None) -> ABDetectionResult:
"""Run detection on the configured variant."""
v = variant or self.config.variant
text_hash = hashlib.sha256(text.encode()).hexdigest()[:16]
start = time.monotonic()
if v == "A":
result = self._detect_a(text)
else:
result = self._detect_b(text)
elapsed_ms = (time.monotonic() - start) * 1000
ab_result = ABDetectionResult(
detection=result,
variant=v,
text_hash=text_hash,
timestamp=time.time(),
)
self._log_event(ab_result, elapsed_ms)
return ab_result
def detect_both(self, text: str) -> Dict[str, ABDetectionResult]:
"""Run both variants and return results for comparison."""
return {
"A": self.detect(text, variant="A"),
"B": self.detect(text, variant="B"),
}
def _log_event(self, result: ABDetectionResult, latency_ms: float):
"""Append event to JSONL log if configured."""
if not self.config.log_path:
return
try:
entry = result.to_dict()
entry["latency_ms"] = round(latency_ms, 3)
log_file = Path(self.config.log_path)
log_file.parent.mkdir(parents=True, exist_ok=True)
with open(log_file, "a") as f:
f.write(json.dumps(entry) + "\n")
except Exception as e:
logger.warning(f"Failed to write A/B log: {e}")
# ── Metrics ───────────────────────────────────────────────────────────────
@dataclass
class ABMetrics:
"""Collect and report A/B test metrics.
Tracks per-variant:
- total detections
- detections by level (NONE, LOW, MEDIUM, HIGH, CRITICAL)
- false positive count (based on labeled data)
- average latency
"""
_events: List[dict] = field(default_factory=list)
def record(
self,
result: ABDetectionResult,
variant: Optional[str] = None,
latency_ms: float = 0.0,
is_false_positive: bool = False,
):
"""Record one detection event."""
v = variant or result.variant
self._events.append({
"variant": v,
"level": result.level,
"score": result.score,
"latency_ms": latency_ms,
"is_false_positive": is_false_positive,
"timestamp": result.timestamp,
})
def report(self) -> dict:
"""Generate metrics report per variant."""
report = {}
for v in ("A", "B"):
events = [e for e in self._events if e["variant"] == v]
if not events:
report[v] = {"total": 0}
continue
levels = {}
for e in events:
levels[e["level"]] = levels.get(e["level"], 0) + 1
fp_count = sum(1 for e in events if e.get("is_false_positive"))
latencies = [e["latency_ms"] for e in events if e["latency_ms"] > 0]
report[v] = {
"total": len(events),
"levels": levels,
"false_positive_count": fp_count,
"false_positive_rate": round(fp_count / len(events), 4) if events else 0,
"avg_latency_ms": round(sum(latencies) / len(latencies), 3) if latencies else 0,
"max_latency_ms": round(max(latencies), 3) if latencies else 0,
"detection_rate": round(
sum(1 for e in events if e["level"] != "NONE") / len(events), 4
) if events else 0,
}
# Comparison when both variants have data
if report.get("A", {}).get("total", 0) > 0 and report.get("B", {}).get("total", 0) > 0:
report["_comparison"] = {
"detection_rate_delta": (
report["B"]["detection_rate"] - report["A"]["detection_rate"]
),
"fp_rate_delta": (
report["B"]["false_positive_rate"] - report["A"]["false_positive_rate"]
),
"latency_delta_ms": (
report["B"]["avg_latency_ms"] - report["A"]["avg_latency_ms"]
),
}
return report
def summary(self) -> str:
"""Human-readable summary."""
r = self.report()
lines = ["=== Crisis Detection A/B Test Report ==="]
for v in ("A", "B"):
if v not in r or r[v].get("total", 0) == 0:
lines.append(f" Variant {v}: no data")
continue
d = r[v]
lines.append(f" Variant {v}: {d['total']} events")
lines.append(f" Detection rate: {d['detection_rate']:.1%}")
lines.append(f" False positive rate: {d['false_positive_rate']:.1%}")
lines.append(f" Avg latency: {d['avg_latency_ms']:.2f}ms")
lines.append(f" Levels: {d['levels']}")
if "_comparison" in r:
c = r["_comparison"]
lines.append(" Comparison (B - A):")
lines.append(f" Detection rate delta: {c['detection_rate_delta']:+.1%}")
lines.append(f" FP rate delta: {c['fp_rate_delta']:+.1%}")
lines.append(f" Latency delta: {c['latency_delta_ms']:+.2f}ms")
return "\n".join(lines)

View File

@@ -1,155 +0,0 @@
"""
Image Content Screening for Self-Harm Indicators
Screens images sent in chat for self-harm content using vision models.
Privacy-preserving: analyzes in-memory, discards after scoring. No image retention.
"""
import base64
import json
import logging
from dataclasses import dataclass
from enum import Enum
from typing import Optional
logger = logging.getLogger("the-door.image_screening")
class RiskLevel(str, Enum):
SAFE = "safe"
CONCERNING = "concerning"
CRITICAL = "critical"
@dataclass
class ImageScreenResult:
risk_level: RiskLevel
confidence: float
categories: list
reasoning: str
model_used: str
@property
def needs_crisis_response(self):
return self.risk_level == RiskLevel.CRITICAL
@property
def needs_followup(self):
return self.risk_level in (RiskLevel.CONCERNING, RiskLevel.CRITICAL)
def to_dict(self):
return {
"risk_level": self.risk_level.value,
"confidence": round(self.confidence, 3),
"categories": self.categories,
"reasoning": self.reasoning,
"model_used": self.model_used,
}
SCREENING_PROMPT = """Analyze this image for indicators of self-harm or crisis. Look for:
1. Self-harm wounds or injuries
2. Medication/pills in concerning quantities
3. Farewell-type imagery (empty rooms, items laid out, goodbye notes)
4. Screenshots of crisis-related searches
5. Images conveying hopelessness, isolation, or despair
Respond in JSON format ONLY:
{"risk_level": "safe"|"concerning"|"critical", "confidence": 0.0-1.0, "categories": [], "reasoning": ""}
CRITICAL: imminent self-harm indicators
CONCERNING: ambiguous but worrying
SAFE: no indicators detected"""
def _analyze_with_ollama(image_b64, model="gemma3:4b"):
try:
import urllib.request
payload = json.dumps({
"model": model,
"messages": [{
"role": "user",
"content": SCREENING_PROMPT,
"images": [image_b64],
}],
"stream": False,
"options": {"temperature": 0.1},
}).encode()
req = urllib.request.Request(
"http://localhost:11434/api/chat",
data=payload,
headers={"Content-Type": "application/json"},
method="POST",
)
resp = urllib.request.urlopen(req, timeout=30)
data = json.loads(resp.read())
content = data.get("message", {}).get("content", "")
json_start = content.find("{")
json_end = content.rfind("}") + 1
if json_start == -1 or json_end <= json_start:
return None
result = json.loads(content[json_start:json_end])
return ImageScreenResult(
risk_level=RiskLevel(result.get("risk_level", "safe")),
confidence=float(result.get("confidence", 0.5)),
categories=result.get("categories", []),
reasoning=result.get("reasoning", ""),
model_used=f"ollama:{model}",
)
except Exception as e:
logger.warning(f"Ollama vision analysis failed: {e}")
return None
def _analyze_fallback(image_bytes):
return ImageScreenResult(
risk_level=RiskLevel.SAFE,
confidence=0.2,
categories=["unanalyzed"],
reasoning="No vision model available. Defaulting to safe with low confidence.",
model_used="fallback:heuristic",
)
def screen_image(image_data, use_vision_model=True, model="gemma3:4b"):
"""Screen image for self-harm indicators. Analyzes in-memory, no retention."""
if isinstance(image_data, bytes):
image_b64 = base64.b64encode(image_data).decode()
else:
image_b64 = image_data
image_data = base64.b64decode(image_b64)
if use_vision_model:
result = _analyze_with_ollama(image_b64, model)
if result:
logger.info(f"Image screened: {result.risk_level.value} (conf: {result.confidence:.2f})")
if result.needs_crisis_response:
logger.warning(f"CRITICAL image: {result.reasoning}")
return result
return _analyze_fallback(image_data)
def handle_chat_image(image_data):
"""Handle image from chat. Returns action dict for gateway."""
result = screen_image(image_data)
action = {
"result": result.to_dict(),
"show_crisis_overlay": result.needs_crisis_response,
"log_event": result.needs_followup,
"response_text": None,
}
if result.risk_level == RiskLevel.CRITICAL:
action["response_text"] = (
"I noticed something concerning in the image you shared. "
"If you or someone you know is in crisis, please reach out: "
"988 Suicide and Crisis Lifeline (call or text 988). "
"You are not alone."
)
elif result.risk_level == RiskLevel.CONCERNING:
action["response_text"] = (
"I want to check in \u2014 how are you doing? "
"If you need to talk to someone, the 988 Lifeline is available 24/7."
)
return action

View File

@@ -0,0 +1,410 @@
#!/usr/bin/env python3
"""
Tests for crisis detection A/B test framework.
Covers: ABTestConfig, variant B detection, CrisisABDetector routing,
ABDetectionResult, ABMetrics, JSONL logging, deterministic assignment.
"""
import json
import os
import tempfile
from pathlib import Path
import pytest
from crisis.ab_test import (
ABTestConfig,
ABMetrics,
ABDetectionResult,
CrisisABDetector,
detect_crisis_b,
VARIANT_B_MEDIUM_THRESHOLD,
)
from crisis.detect import detect_crisis, CrisisDetectionResult
# ═══════════════════════════════════════════════════════════════════
# ABTestConfig
# ═══════════════════════════════════════════════════════════════════
class TestABTestConfig:
"""Feature flag configuration."""
def test_default_is_variant_a(self):
config = ABTestConfig()
assert config.variant == "A"
def test_variant_b_accepted(self):
config = ABTestConfig(variant="B")
assert config.variant == "B"
def test_invalid_variant_rejected(self):
with pytest.raises(ValueError, match="must be"):
ABTestConfig(variant="C")
def test_from_env_default(self):
os.environ.pop("CRISIS_AB_VARIANT", None)
config = ABTestConfig.from_env()
assert config.variant == "A"
def test_from_env_variant_b(self, monkeypatch):
monkeypatch.setenv("CRISIS_AB_VARIANT", "B")
config = ABTestConfig.from_env()
assert config.variant == "B"
def test_from_env_fp_labels(self, monkeypatch):
monkeypatch.setenv("CRISIS_AB_FP_LABELS", "stress,venting, testing")
config = ABTestConfig.from_env()
assert config.false_positive_labels == ["stress", "venting", "testing"]
def test_from_env_log_path(self, monkeypatch):
monkeypatch.setenv("CRISIS_AB_LOG_PATH", "/tmp/ab.jsonl")
config = ABTestConfig.from_env()
assert config.log_path == "/tmp/ab.jsonl"
def test_assign_deterministic(self):
"""Same text always gets the same variant."""
v1 = ABTestConfig.assign("I feel hopeless today")
v2 = ABTestConfig.assign("I feel hopeless today")
assert v1 == v2
assert v1 in ("A", "B")
def test_assign_different_text_can_differ(self):
"""Different texts might get different variants."""
results = set()
for i in range(20):
v = ABTestConfig.assign(f"test message {i}")
results.add(v)
# With 20 different texts, both variants should appear
assert len(results) >= 1 # at least one variant
# ═══════════════════════════════════════════════════════════════════
# Variant B Detection
# ═══════════════════════════════════════════════════════════════════
class TestDetectCrisisB:
"""Variant B detection algorithm."""
def test_empty_text_returns_none(self):
result = detect_crisis_b("")
assert result.level == "NONE"
assert result.score == 0.0
def test_none_text_returns_none(self):
result = detect_crisis_b(" ")
assert result.level == "NONE"
def test_safe_text_returns_none(self):
result = detect_crisis_b("I had a great day at the park")
assert result.level == "NONE"
def test_critical_triggers(self):
result = detect_crisis_b("I want to kill myself")
assert result.level == "CRITICAL"
assert result.score >= 1.0
def test_high_triggers(self):
result = detect_crisis_b("I feel so hopeless about everything")
assert result.level == "HIGH"
assert result.score >= 0.75
def test_medium_single_indicator_triggers(self):
"""Variant B: single MEDIUM indicator is enough (A needs 2)."""
result = detect_crisis_b("I feel so worthless")
assert result.level == "MEDIUM"
assert result.score >= 0.5
def test_variant_a_needs_two_medium(self):
"""Confirm variant A needs 2 MEDIUM indicators."""
# Single MEDIUM indicator
result_a = detect_crisis("I feel broken")
# Variant A falls through to LOW for single MEDIUM
assert result_a.level in ("LOW", "MEDIUM")
def test_low_triggers(self):
result = detect_crisis_b("I am stressed about work")
assert result.level == "LOW"
def test_multi_tier_boost(self):
"""When multiple tiers fire, score gets +0.1 boost."""
# Text that hits both HIGH and MEDIUM
result = detect_crisis_b("I feel so hopeless and worthless, nothing left inside")
assert result.level == "HIGH"
# Score should be boosted above base HIGH
assert result.score > 0.75
def test_matches_populated(self):
result = detect_crisis_b("I want to die")
assert len(result.matches) > 0
assert "start" in result.matches[0]
assert "end" in result.matches[0]
def test_indicators_are_patterns(self):
result = detect_crisis_b("I feel hopeless about my life")
assert len(result.indicators) > 0
for p in result.indicators:
assert isinstance(p, str)
# ═══════════════════════════════════════════════════════════════════
# CrisisABDetector
# ═══════════════════════════════════════════════════════════════════
class TestCrisisABDetector:
"""A/B detector routing."""
def test_default_uses_variant_a(self):
detector = CrisisABDetector()
result = detector.detect("hello world")
assert result.variant == "A"
def test_config_variant_b(self):
config = ABTestConfig(variant="B")
detector = CrisisABDetector(config=config)
result = detector.detect("hello world")
assert result.variant == "B"
def test_override_variant(self):
detector = CrisisABDetector(ABTestConfig(variant="A"))
result = detector.detect("test", variant="B")
assert result.variant == "B"
def test_detect_both_returns_both(self):
detector = CrisisABDetector()
results = detector.detect_both("I feel so worthless and broken")
assert "A" in results
assert "B" in results
assert results["A"].variant == "A"
assert results["B"].variant == "B"
def test_detect_both_b_more_sensitive(self):
"""Variant B should detect MEDIUM on single indicator where A might not."""
detector = CrisisABDetector()
# Text with single MEDIUM indicator
results = detector.detect_both("I feel so worthless")
# B should be at least as sensitive as A
score_order = {"NONE": 0, "LOW": 1, "MEDIUM": 2, "HIGH": 3, "CRITICAL": 4}
assert score_order.get(results["B"].level, 0) >= score_order.get(results["A"].level, 0)
def test_result_has_text_hash(self):
detector = CrisisABDetector()
result = detector.detect("test message")
assert len(result.text_hash) == 16
assert all(c in "0123456789abcdef" for c in result.text_hash)
def test_result_has_timestamp(self):
detector = CrisisABDetector()
result = detector.detect("test")
assert result.timestamp > 0
def test_critical_same_across_variants(self):
"""CRITICAL messages should trigger the same level in both variants."""
detector = CrisisABDetector()
results = detector.detect_both("I plan to end my life")
assert results["A"].level == "CRITICAL"
assert results["B"].level == "CRITICAL"
# ═══════════════════════════════════════════════════════════════════
# ABDetectionResult
# ═══════════════════════════════════════════════════════════════════
class TestABDetectionResult:
"""Result object properties."""
def test_to_dict(self):
detector = CrisisABDetector()
result = detector.detect("test")
d = result.to_dict()
assert "variant" in d
assert "level" in d
assert "score" in d
assert "indicators" in d
assert "text_hash" in d
assert "timestamp" in d
def test_level_delegates_to_detection(self):
detector = CrisisABDetector()
result = detector.detect("I want to die")
assert result.level == result.detection.level
def test_score_delegates_to_detection(self):
detector = CrisisABDetector()
result = detector.detect("I feel hopeless")
assert result.score == result.detection.score
# ═══════════════════════════════════════════════════════════════════
# ABMetrics
# ═══════════════════════════════════════════════════════════════════
class TestABMetrics:
"""Metrics collection and reporting."""
def test_empty_report(self):
metrics = ABMetrics()
report = metrics.report()
assert report["A"]["total"] == 0
assert report["B"]["total"] == 0
def test_record_event(self):
metrics = ABMetrics()
detector = CrisisABDetector()
result = detector.detect("test", variant="A")
metrics.record(result, latency_ms=5.0)
report = metrics.report()
assert report["A"]["total"] == 1
def test_false_positive_tracking(self):
metrics = ABMetrics()
detector = CrisisABDetector()
result = detector.detect("I feel broken", variant="B")
metrics.record(result, is_false_positive=True)
report = metrics.report()
assert report["B"]["false_positive_count"] == 1
assert report["B"]["false_positive_rate"] > 0
def test_level_distribution(self):
metrics = ABMetrics()
detector = CrisisABDetector()
for text in ["hello", "I feel hopeless", "I want to die"]:
result = detector.detect(text, variant="A")
metrics.record(result)
report = metrics.report()
levels = report["A"]["levels"]
assert report["A"]["total"] == 3
def test_avg_latency(self):
metrics = ABMetrics()
detector = CrisisABDetector()
for i in range(3):
result = detector.detect(f"test {i}", variant="A")
metrics.record(result, latency_ms=10.0 + i)
report = metrics.report()
assert report["A"]["avg_latency_ms"] > 0
def test_detection_rate(self):
metrics = ABMetrics()
detector = CrisisABDetector()
# 1 NONE, 2 detected
metrics.record(detector.detect("hello", variant="A"))
metrics.record(detector.detect("I feel hopeless", variant="A"))
metrics.record(detector.detect("I want to die", variant="A"))
report = metrics.report()
rate = report["A"]["detection_rate"]
assert 0.5 < rate < 1.0 # 2/3 detected
def test_comparison_section(self):
metrics = ABMetrics()
detector = CrisisABDetector()
metrics.record(detector.detect("I feel broken", variant="A"))
metrics.record(detector.detect("I feel worthless", variant="B"))
report = metrics.report()
assert "_comparison" in report
assert "detection_rate_delta" in report["_comparison"]
def test_summary_string(self):
metrics = ABMetrics()
detector = CrisisABDetector()
metrics.record(detector.detect("I want to die", variant="A"))
metrics.record(detector.detect("I feel hopeless", variant="B"))
s = metrics.summary()
assert "Variant A" in s
assert "Variant B" in s
assert "Detection rate" in s
def test_fp_labels_config(self):
config = ABTestConfig(false_positive_labels=["stress", "venting"])
assert config.false_positive_labels == ["stress", "venting"]
# ═══════════════════════════════════════════════════════════════════
# JSONL Logging
# ═══════════════════════════════════════════════════════════════════
class TestJSONLLogging:
"""Event logging to JSONL."""
def test_log_file_created(self, tmp_path):
log_path = str(tmp_path / "ab_log.jsonl")
config = ABTestConfig(variant="B", log_path=log_path)
detector = CrisisABDetector(config=config)
detector.detect("I feel hopeless")
assert Path(log_path).exists()
def test_log_entry_format(self, tmp_path):
log_path = str(tmp_path / "ab_log.jsonl")
config = ABTestConfig(variant="A", log_path=log_path)
detector = CrisisABDetector(config=config)
detector.detect("I want to die")
with open(log_path) as f:
entry = json.loads(f.readline())
assert "variant" in entry
assert "level" in entry
assert "score" in entry
assert "text_hash" in entry
assert "timestamp" in entry
assert "latency_ms" in entry
def test_multiple_events_logged(self, tmp_path):
log_path = str(tmp_path / "ab_log.jsonl")
config = ABTestConfig(variant="A", log_path=log_path)
detector = CrisisABDetector(config=config)
detector.detect("hello")
detector.detect("I feel broken")
detector.detect("I want to die")
with open(log_path) as f:
lines = f.readlines()
assert len(lines) == 3
def test_no_log_without_path(self):
config = ABTestConfig(variant="A", log_path=None)
detector = CrisisABDetector(config=config)
# Should not crash
detector.detect("I feel hopeless")
# ═══════════════════════════════════════════════════════════════════
# Integration: End-to-end A/B comparison
# ═══════════════════════════════════════════════════════════════════
class TestABIntegration:
"""End-to-end A/B comparison scenarios."""
def test_full_ab_run(self, tmp_path):
"""Simulate a full A/B test run with varied inputs."""
log_path = str(tmp_path / "run.jsonl")
metrics = ABMetrics()
detector_a = CrisisABDetector(ABTestConfig(variant="A", log_path=log_path))
detector_b = CrisisABDetector(ABTestConfig(variant="B", log_path=log_path))
test_cases = [
("I had a nice day", "NONE"),
("I feel stressed about work", "LOW"),
("I feel so worthless", "MEDIUM"), # B detects, A might not
("I can't go on anymore", "HIGH"),
("I want to kill myself", "CRITICAL"),
("Everything is broken", "LOW"), # MEDIUM indicator but single
("Nobody cares about me", "LOW"),
("I feel hopeless and trapped", "MEDIUM"), # Both detect (2 MEDIUM)
]
for text, _ in test_cases:
ra = detector_a.detect(text)
rb = detector_b.detect(text)
metrics.record(ra, latency_ms=1.0)
metrics.record(rb, latency_ms=1.5)
report = metrics.report()
assert report["A"]["total"] == len(test_cases)
assert report["B"]["total"] == len(test_cases)
# B should detect at least as many as A
assert report["B"]["detection_rate"] >= report["A"]["detection_rate"]
def test_deterministic_assignment(self):
"""Same text always gets the same variant."""
results = [ABTestConfig.assign("crisis message 42") for _ in range(10)]
assert all(r == results[0] for r in results)

View File

@@ -1,84 +0,0 @@
"""Tests for image content screening module."""
import json
from unittest.mock import patch, MagicMock
from image_screening import (
RiskLevel,
ImageScreenResult,
screen_image,
handle_chat_image,
_analyze_fallback,
)
class TestImageScreenResult:
def test_safe_result(self):
result = ImageScreenResult(
risk_level=RiskLevel.SAFE, confidence=0.95,
categories=[], reasoning="No indicators", model_used="test"
)
assert not result.needs_crisis_response
assert not result.needs_followup
assert result.to_dict()["risk_level"] == "safe"
def test_critical_result(self):
result = ImageScreenResult(
risk_level=RiskLevel.CRITICAL, confidence=0.9,
categories=["wounds"], reasoning="Detected", model_used="test"
)
assert result.needs_crisis_response
assert result.needs_followup
def test_concerning_result(self):
result = ImageScreenResult(
risk_level=RiskLevel.CONCERNING, confidence=0.6,
categories=["isolation"], reasoning="Ambiguous", model_used="test"
)
assert not result.needs_crisis_response
assert result.needs_followup
class TestScreenImage:
def test_fallback_returns_safe(self):
result = screen_image(b"fake_image_data", use_vision_model=False)
assert result.risk_level == RiskLevel.SAFE
assert result.model_used == "fallback:heuristic"
assert result.confidence < 0.5
def test_base64_input(self):
import base64
b64 = base64.b64encode(b"fake").decode()
result = screen_image(b64, use_vision_model=False)
assert result.risk_level == RiskLevel.SAFE
class TestHandleChatImage:
def test_safe_image_no_overlay(self):
action = handle_chat_image(b"safe_image")
assert not action["show_crisis_overlay"]
assert action["response_text"] is None
@patch("image_screening._analyze_with_ollama")
def test_critical_image_shows_overlay(self, mock_ollama):
mock_ollama.return_value = ImageScreenResult(
risk_level=RiskLevel.CRITICAL, confidence=0.95,
categories=["wounds"], reasoning="Self-harm detected",
model_used="ollama:gemma3:4b"
)
action = handle_chat_image(b"concerning_image")
assert action["show_crisis_overlay"]
assert "988" in action["response_text"]
assert action["log_event"]
@patch("image_screening._analyze_with_ollama")
def test_concerning_image_followup(self, mock_ollama):
mock_ollama.return_value = ImageScreenResult(
risk_level=RiskLevel.CONCERNING, confidence=0.6,
categories=["isolation"], reasoning="Empty room",
model_used="ollama:gemma3:4b"
)
action = handle_chat_image(b"maybe_concerning")
assert not action["show_crisis_overlay"]
assert action["log_event"]
assert "check in" in action["response_text"]