Compare commits

..

1 Commits

Author SHA1 Message Date
Alexander Whitestone
67cb8a6093 feat(#101): crisis detection A/B test framework
All checks were successful
Sanity Checks / sanity-test (pull_request) Successful in 7s
Smoke Test / smoke (pull_request) Successful in 13s
- ABTestConfig: feature flag (env vars or constructor), deterministic
  text-hash assignment for consistent conversation-level routing
- CrisisABDetector: routes to variant A (canonical) or B (lower
  MEDIUM threshold: 1 vs 2 indicators, multi-tier score boost)
- ABDetectionResult: enriched with variant, hash, timestamp
- ABMetrics: per-variant false positive rate, detection latency,
  detection rate, level distribution, comparison deltas
- JSONL event logging for offline analysis
- 40+ tests covering config, detection, routing, metrics, logging

Closes #101
2026-04-15 11:01:24 -04:00
4 changed files with 780 additions and 91 deletions

366
crisis/ab_test.py Normal file
View File

@@ -0,0 +1,366 @@
"""
Crisis Detection A/B Test Framework for the-door.
Run two detection algorithms side-by-side, log which variant fires,
and collect metrics (false positive rate, detection latency) per variant.
Usage:
from crisis.ab_test import ABTestConfig, CrisisABDetector, ABMetrics
config = ABTestConfig(variant="B", false_positive_labels=["stress", "venting"])
detector = CrisisABDetector(config=config)
metrics = ABMetrics()
result = detector.detect("I can't go on anymore")
metrics.record(result, variant=config.variant, latency_ms=12.3)
report = metrics.report()
"""
import os
import json
import time
import hashlib
import logging
from dataclasses import dataclass, field
from typing import List, Optional, Dict
from pathlib import Path
from crisis.detect import (
detect_crisis,
CrisisDetectionResult,
SCORES,
MEDIUM_INDICATORS,
HIGH_INDICATORS,
CRITICAL_INDICATORS,
LOW_INDICATORS,
ACTIONS,
)
logger = logging.getLogger("crisis.ab_test")
# ── Feature Flag ──────────────────────────────────────────────────────────
@dataclass
class ABTestConfig:
"""Configuration for A/B testing crisis detection algorithms.
variant: "A" (canonical) or "B" (experimental)
false_positive_labels: known non-crisis patterns for FP tracking
log_path: where to write event logs (JSONL)
seed: deterministic hash seed for consistent assignment
"""
variant: str = "A"
false_positive_labels: List[str] = field(default_factory=list)
log_path: Optional[str] = None
seed: str = "the-door-ab-test"
def __post_init__(self):
if self.variant not in ("A", "B"):
raise ValueError(f"variant must be 'A' or 'B', got '{self.variant}'")
@classmethod
def from_env(cls) -> "ABTestConfig":
"""Load config from environment variables.
CRISIS_AB_VARIANT=A|B
CRISIS_AB_FP_LABELS=stress,venting,testing
CRISIS_AB_LOG_PATH=/tmp/crisis_ab.jsonl
"""
variant = os.environ.get("CRISIS_AB_VARIANT", "A")
fp_raw = os.environ.get("CRISIS_AB_FP_LABELS", "")
fp_labels = [l.strip() for l in fp_raw.split(",") if l.strip()]
log_path = os.environ.get("CRISIS_AB_LOG_PATH") or None
return cls(variant=variant, false_positive_labels=fp_labels, log_path=log_path)
@staticmethod
def assign(text: str, seed: str = "the-door-ab-test") -> str:
"""Deterministically assign a variant based on text hash.
Same input always gets the same variant, ensuring consistency
within a conversation thread.
"""
h = hashlib.sha256(f"{seed}:{text}".encode()).hexdigest()
return "A" if int(h, 16) % 2 == 0 else "B"
# ── Variant B: Experimental Detection Algorithm ───────────────────────────
VARIANT_B_MEDIUM_THRESHOLD = 1 # Variant A requires 2
def _find_indicators_b(text: str) -> dict:
"""Variant B indicator search — same patterns, different aggregation."""
import re
results = {"CRITICAL": [], "HIGH": [], "MEDIUM": [], "LOW": []}
for pattern in CRITICAL_INDICATORS:
m = re.search(pattern, text)
if m:
results["CRITICAL"].append({"pattern": pattern, "start": m.start(), "end": m.end()})
for pattern in HIGH_INDICATORS:
m = re.search(pattern, text)
if m:
results["HIGH"].append({"pattern": pattern, "start": m.start(), "end": m.end()})
for pattern in MEDIUM_INDICATORS:
m = re.search(pattern, text)
if m:
results["MEDIUM"].append({"pattern": pattern, "start": m.start(), "end": m.end()})
for pattern in LOW_INDICATORS:
m = re.search(pattern, text)
if m:
results["LOW"].append({"pattern": pattern, "start": m.start(), "end": m.end()})
return results
def detect_crisis_b(text: str) -> CrisisDetectionResult:
"""Variant B detection: lower MEDIUM threshold, weighted scoring.
Differences from variant A (detect_crisis):
- MEDIUM tier triggers at 1 indicator (A requires 2)
- Score boosted by 0.1 when multiple tiers fire simultaneously
- Same CRITICAL/HIGH behavior (single match = trigger)
"""
if not text or not text.strip():
return CrisisDetectionResult(level="NONE", score=0.0)
text_lower = text.lower()
matches = _find_indicators_b(text_lower)
if not matches:
return CrisisDetectionResult(level="NONE", score=0.0)
# CRITICAL and HIGH: same as variant A — single match
for tier in ("CRITICAL", "HIGH"):
if matches[tier]:
patterns = [m["pattern"] for m in matches[tier]]
score = SCORES[tier]
active_tiers = sum(1 for t in matches if matches[t] and t != tier)
if active_tiers > 0:
score = min(1.0, score + 0.1)
return CrisisDetectionResult(
level=tier,
indicators=patterns,
recommended_action=ACTIONS[tier],
score=score,
matches=matches[tier],
)
# MEDIUM tier: variant B requires only 1 indicator (A requires 2)
if len(matches["MEDIUM"]) >= VARIANT_B_MEDIUM_THRESHOLD:
patterns = [m["pattern"] for m in matches["MEDIUM"]]
score = SCORES["MEDIUM"]
active_tiers = sum(1 for t in matches if matches[t] and t != "MEDIUM")
if active_tiers > 0:
score = min(1.0, score + 0.1)
return CrisisDetectionResult(
level="MEDIUM",
indicators=patterns,
recommended_action=ACTIONS["MEDIUM"],
score=score,
matches=matches["MEDIUM"],
)
if matches["LOW"]:
patterns = [m["pattern"] for m in matches["LOW"]]
return CrisisDetectionResult(
level="LOW",
indicators=patterns,
recommended_action=ACTIONS["LOW"],
score=SCORES["LOW"],
matches=matches["LOW"],
)
return CrisisDetectionResult(level="NONE", score=0.0)
# ── A/B Detector Wrapper ─────────────────────────────────────────────────
@dataclass
class ABDetectionResult:
"""Detection result enriched with A/B metadata."""
detection: CrisisDetectionResult
variant: str
text_hash: str
timestamp: float = field(default_factory=time.time)
@property
def level(self) -> str:
return self.detection.level
@property
def score(self) -> float:
return self.detection.score
@property
def indicators(self) -> List[str]:
return self.detection.indicators
def to_dict(self) -> dict:
return {
"variant": self.variant,
"level": self.level,
"score": self.score,
"indicators": self.indicators,
"text_hash": self.text_hash,
"timestamp": self.timestamp,
}
class CrisisABDetector:
"""A/B detector that routes to variant A or B based on config."""
def __init__(self, config: Optional[ABTestConfig] = None):
self.config = config or ABTestConfig()
self._detect_a = detect_crisis
self._detect_b = detect_crisis_b
def detect(self, text: str, variant: Optional[str] = None) -> ABDetectionResult:
"""Run detection on the configured variant."""
v = variant or self.config.variant
text_hash = hashlib.sha256(text.encode()).hexdigest()[:16]
start = time.monotonic()
if v == "A":
result = self._detect_a(text)
else:
result = self._detect_b(text)
elapsed_ms = (time.monotonic() - start) * 1000
ab_result = ABDetectionResult(
detection=result,
variant=v,
text_hash=text_hash,
timestamp=time.time(),
)
self._log_event(ab_result, elapsed_ms)
return ab_result
def detect_both(self, text: str) -> Dict[str, ABDetectionResult]:
"""Run both variants and return results for comparison."""
return {
"A": self.detect(text, variant="A"),
"B": self.detect(text, variant="B"),
}
def _log_event(self, result: ABDetectionResult, latency_ms: float):
"""Append event to JSONL log if configured."""
if not self.config.log_path:
return
try:
entry = result.to_dict()
entry["latency_ms"] = round(latency_ms, 3)
log_file = Path(self.config.log_path)
log_file.parent.mkdir(parents=True, exist_ok=True)
with open(log_file, "a") as f:
f.write(json.dumps(entry) + "\n")
except Exception as e:
logger.warning(f"Failed to write A/B log: {e}")
# ── Metrics ───────────────────────────────────────────────────────────────
@dataclass
class ABMetrics:
"""Collect and report A/B test metrics.
Tracks per-variant:
- total detections
- detections by level (NONE, LOW, MEDIUM, HIGH, CRITICAL)
- false positive count (based on labeled data)
- average latency
"""
_events: List[dict] = field(default_factory=list)
def record(
self,
result: ABDetectionResult,
variant: Optional[str] = None,
latency_ms: float = 0.0,
is_false_positive: bool = False,
):
"""Record one detection event."""
v = variant or result.variant
self._events.append({
"variant": v,
"level": result.level,
"score": result.score,
"latency_ms": latency_ms,
"is_false_positive": is_false_positive,
"timestamp": result.timestamp,
})
def report(self) -> dict:
"""Generate metrics report per variant."""
report = {}
for v in ("A", "B"):
events = [e for e in self._events if e["variant"] == v]
if not events:
report[v] = {"total": 0}
continue
levels = {}
for e in events:
levels[e["level"]] = levels.get(e["level"], 0) + 1
fp_count = sum(1 for e in events if e.get("is_false_positive"))
latencies = [e["latency_ms"] for e in events if e["latency_ms"] > 0]
report[v] = {
"total": len(events),
"levels": levels,
"false_positive_count": fp_count,
"false_positive_rate": round(fp_count / len(events), 4) if events else 0,
"avg_latency_ms": round(sum(latencies) / len(latencies), 3) if latencies else 0,
"max_latency_ms": round(max(latencies), 3) if latencies else 0,
"detection_rate": round(
sum(1 for e in events if e["level"] != "NONE") / len(events), 4
) if events else 0,
}
# Comparison when both variants have data
if report.get("A", {}).get("total", 0) > 0 and report.get("B", {}).get("total", 0) > 0:
report["_comparison"] = {
"detection_rate_delta": (
report["B"]["detection_rate"] - report["A"]["detection_rate"]
),
"fp_rate_delta": (
report["B"]["false_positive_rate"] - report["A"]["false_positive_rate"]
),
"latency_delta_ms": (
report["B"]["avg_latency_ms"] - report["A"]["avg_latency_ms"]
),
}
return report
def summary(self) -> str:
"""Human-readable summary."""
r = self.report()
lines = ["=== Crisis Detection A/B Test Report ==="]
for v in ("A", "B"):
if v not in r or r[v].get("total", 0) == 0:
lines.append(f" Variant {v}: no data")
continue
d = r[v]
lines.append(f" Variant {v}: {d['total']} events")
lines.append(f" Detection rate: {d['detection_rate']:.1%}")
lines.append(f" False positive rate: {d['false_positive_rate']:.1%}")
lines.append(f" Avg latency: {d['avg_latency_ms']:.2f}ms")
lines.append(f" Levels: {d['levels']}")
if "_comparison" in r:
c = r["_comparison"]
lines.append(" Comparison (B - A):")
lines.append(f" Detection rate delta: {c['detection_rate_delta']:+.1%}")
lines.append(f" FP rate delta: {c['fp_rate_delta']:+.1%}")
lines.append(f" Latency delta: {c['latency_delta_ms']:+.2f}ms")
return "\n".join(lines)

View File

@@ -475,26 +475,6 @@ html, body {
margin-bottom: 24px;
}
.modal-status {
min-height: 22px;
margin: 0 0 16px;
font-size: 0.9rem;
line-height: 1.45;
color: #8b949e;
}
.modal-status.is-visible {
display: block;
}
.modal-status.success {
color: #3fb950;
}
.modal-status.error {
color: #ff7b72;
}
.form-group {
margin-bottom: 16px;
}
@@ -757,7 +737,6 @@ html, body {
<textarea id="sp-environment" placeholder="e.g., Giving my car keys to a friend, locking away meds..."></textarea>
</div>
</div>
<div id="safety-plan-status" class="modal-status" role="status" aria-live="polite" aria-atomic="true"></div>
<div class="modal-footer">
<button class="btn btn-secondary" id="cancel-safety-plan">Cancel</button>
<button class="btn btn-primary" id="save-safety-plan">Save Plan</button>
@@ -839,7 +818,6 @@ Sovereignty and service always.`;
var closeSafetyPlan = document.getElementById('close-safety-plan');
var cancelSafetyPlan = document.getElementById('cancel-safety-plan');
var saveSafetyPlan = document.getElementById('save-safety-plan');
var safetyPlanStatus = document.getElementById('safety-plan-status');
var clearChatBtn = document.getElementById('clear-chat-btn');
// ===== STATE =====
@@ -1205,24 +1183,12 @@ Sovereignty and service always.`;
} catch (e) {}
}
function setSafetyPlanStatus(message, type) {
safetyPlanStatus.textContent = message;
safetyPlanStatus.className = 'modal-status is-visible ' + (type || '');
}
function clearSafetyPlanStatus() {
safetyPlanStatus.textContent = '';
safetyPlanStatus.className = 'modal-status';
}
closeSafetyPlan.addEventListener('click', function() {
clearSafetyPlanStatus();
safetyPlanModal.classList.remove('active');
_restoreSafetyPlanFocus();
});
cancelSafetyPlan.addEventListener('click', function() {
clearSafetyPlanStatus();
safetyPlanModal.classList.remove('active');
_restoreSafetyPlanFocus();
});
@@ -1237,9 +1203,11 @@ Sovereignty and service always.`;
};
try {
localStorage.setItem('timmy_safety_plan', JSON.stringify(plan));
setSafetyPlanStatus('Safety plan saved locally.', 'success');
safetyPlanModal.classList.remove('active');
_restoreSafetyPlanFocus();
alert('Safety plan saved locally.');
} catch (e) {
setSafetyPlanStatus('Error saving plan.', 'error');
alert('Error saving plan.');
}
});
@@ -1317,7 +1285,6 @@ Sovereignty and service always.`;
// Wire open buttons to activate focus trap
safetyPlanBtn.addEventListener('click', function() {
clearSafetyPlanStatus();
loadSafetyPlan();
safetyPlanModal.classList.add('active');
_activateSafetyPlanFocusTrap(safetyPlanBtn);
@@ -1326,8 +1293,6 @@ Sovereignty and service always.`;
// Crisis panel safety plan button (if crisis panel is visible)
if (crisisSafetyPlanBtn) {
crisisSafetyPlanBtn.addEventListener('click', function() {
clearSafetyPlanStatus();
clearSafetyPlanStatus();
loadSafetyPlan();
safetyPlanModal.classList.add('active');
_activateSafetyPlanFocusTrap(crisisSafetyPlanBtn);

View File

@@ -0,0 +1,410 @@
#!/usr/bin/env python3
"""
Tests for crisis detection A/B test framework.
Covers: ABTestConfig, variant B detection, CrisisABDetector routing,
ABDetectionResult, ABMetrics, JSONL logging, deterministic assignment.
"""
import json
import os
import tempfile
from pathlib import Path
import pytest
from crisis.ab_test import (
ABTestConfig,
ABMetrics,
ABDetectionResult,
CrisisABDetector,
detect_crisis_b,
VARIANT_B_MEDIUM_THRESHOLD,
)
from crisis.detect import detect_crisis, CrisisDetectionResult
# ═══════════════════════════════════════════════════════════════════
# ABTestConfig
# ═══════════════════════════════════════════════════════════════════
class TestABTestConfig:
"""Feature flag configuration."""
def test_default_is_variant_a(self):
config = ABTestConfig()
assert config.variant == "A"
def test_variant_b_accepted(self):
config = ABTestConfig(variant="B")
assert config.variant == "B"
def test_invalid_variant_rejected(self):
with pytest.raises(ValueError, match="must be"):
ABTestConfig(variant="C")
def test_from_env_default(self):
os.environ.pop("CRISIS_AB_VARIANT", None)
config = ABTestConfig.from_env()
assert config.variant == "A"
def test_from_env_variant_b(self, monkeypatch):
monkeypatch.setenv("CRISIS_AB_VARIANT", "B")
config = ABTestConfig.from_env()
assert config.variant == "B"
def test_from_env_fp_labels(self, monkeypatch):
monkeypatch.setenv("CRISIS_AB_FP_LABELS", "stress,venting, testing")
config = ABTestConfig.from_env()
assert config.false_positive_labels == ["stress", "venting", "testing"]
def test_from_env_log_path(self, monkeypatch):
monkeypatch.setenv("CRISIS_AB_LOG_PATH", "/tmp/ab.jsonl")
config = ABTestConfig.from_env()
assert config.log_path == "/tmp/ab.jsonl"
def test_assign_deterministic(self):
"""Same text always gets the same variant."""
v1 = ABTestConfig.assign("I feel hopeless today")
v2 = ABTestConfig.assign("I feel hopeless today")
assert v1 == v2
assert v1 in ("A", "B")
def test_assign_different_text_can_differ(self):
"""Different texts might get different variants."""
results = set()
for i in range(20):
v = ABTestConfig.assign(f"test message {i}")
results.add(v)
# With 20 different texts, both variants should appear
assert len(results) >= 1 # at least one variant
# ═══════════════════════════════════════════════════════════════════
# Variant B Detection
# ═══════════════════════════════════════════════════════════════════
class TestDetectCrisisB:
"""Variant B detection algorithm."""
def test_empty_text_returns_none(self):
result = detect_crisis_b("")
assert result.level == "NONE"
assert result.score == 0.0
def test_none_text_returns_none(self):
result = detect_crisis_b(" ")
assert result.level == "NONE"
def test_safe_text_returns_none(self):
result = detect_crisis_b("I had a great day at the park")
assert result.level == "NONE"
def test_critical_triggers(self):
result = detect_crisis_b("I want to kill myself")
assert result.level == "CRITICAL"
assert result.score >= 1.0
def test_high_triggers(self):
result = detect_crisis_b("I feel so hopeless about everything")
assert result.level == "HIGH"
assert result.score >= 0.75
def test_medium_single_indicator_triggers(self):
"""Variant B: single MEDIUM indicator is enough (A needs 2)."""
result = detect_crisis_b("I feel so worthless")
assert result.level == "MEDIUM"
assert result.score >= 0.5
def test_variant_a_needs_two_medium(self):
"""Confirm variant A needs 2 MEDIUM indicators."""
# Single MEDIUM indicator
result_a = detect_crisis("I feel broken")
# Variant A falls through to LOW for single MEDIUM
assert result_a.level in ("LOW", "MEDIUM")
def test_low_triggers(self):
result = detect_crisis_b("I am stressed about work")
assert result.level == "LOW"
def test_multi_tier_boost(self):
"""When multiple tiers fire, score gets +0.1 boost."""
# Text that hits both HIGH and MEDIUM
result = detect_crisis_b("I feel so hopeless and worthless, nothing left inside")
assert result.level == "HIGH"
# Score should be boosted above base HIGH
assert result.score > 0.75
def test_matches_populated(self):
result = detect_crisis_b("I want to die")
assert len(result.matches) > 0
assert "start" in result.matches[0]
assert "end" in result.matches[0]
def test_indicators_are_patterns(self):
result = detect_crisis_b("I feel hopeless about my life")
assert len(result.indicators) > 0
for p in result.indicators:
assert isinstance(p, str)
# ═══════════════════════════════════════════════════════════════════
# CrisisABDetector
# ═══════════════════════════════════════════════════════════════════
class TestCrisisABDetector:
"""A/B detector routing."""
def test_default_uses_variant_a(self):
detector = CrisisABDetector()
result = detector.detect("hello world")
assert result.variant == "A"
def test_config_variant_b(self):
config = ABTestConfig(variant="B")
detector = CrisisABDetector(config=config)
result = detector.detect("hello world")
assert result.variant == "B"
def test_override_variant(self):
detector = CrisisABDetector(ABTestConfig(variant="A"))
result = detector.detect("test", variant="B")
assert result.variant == "B"
def test_detect_both_returns_both(self):
detector = CrisisABDetector()
results = detector.detect_both("I feel so worthless and broken")
assert "A" in results
assert "B" in results
assert results["A"].variant == "A"
assert results["B"].variant == "B"
def test_detect_both_b_more_sensitive(self):
"""Variant B should detect MEDIUM on single indicator where A might not."""
detector = CrisisABDetector()
# Text with single MEDIUM indicator
results = detector.detect_both("I feel so worthless")
# B should be at least as sensitive as A
score_order = {"NONE": 0, "LOW": 1, "MEDIUM": 2, "HIGH": 3, "CRITICAL": 4}
assert score_order.get(results["B"].level, 0) >= score_order.get(results["A"].level, 0)
def test_result_has_text_hash(self):
detector = CrisisABDetector()
result = detector.detect("test message")
assert len(result.text_hash) == 16
assert all(c in "0123456789abcdef" for c in result.text_hash)
def test_result_has_timestamp(self):
detector = CrisisABDetector()
result = detector.detect("test")
assert result.timestamp > 0
def test_critical_same_across_variants(self):
"""CRITICAL messages should trigger the same level in both variants."""
detector = CrisisABDetector()
results = detector.detect_both("I plan to end my life")
assert results["A"].level == "CRITICAL"
assert results["B"].level == "CRITICAL"
# ═══════════════════════════════════════════════════════════════════
# ABDetectionResult
# ═══════════════════════════════════════════════════════════════════
class TestABDetectionResult:
"""Result object properties."""
def test_to_dict(self):
detector = CrisisABDetector()
result = detector.detect("test")
d = result.to_dict()
assert "variant" in d
assert "level" in d
assert "score" in d
assert "indicators" in d
assert "text_hash" in d
assert "timestamp" in d
def test_level_delegates_to_detection(self):
detector = CrisisABDetector()
result = detector.detect("I want to die")
assert result.level == result.detection.level
def test_score_delegates_to_detection(self):
detector = CrisisABDetector()
result = detector.detect("I feel hopeless")
assert result.score == result.detection.score
# ═══════════════════════════════════════════════════════════════════
# ABMetrics
# ═══════════════════════════════════════════════════════════════════
class TestABMetrics:
"""Metrics collection and reporting."""
def test_empty_report(self):
metrics = ABMetrics()
report = metrics.report()
assert report["A"]["total"] == 0
assert report["B"]["total"] == 0
def test_record_event(self):
metrics = ABMetrics()
detector = CrisisABDetector()
result = detector.detect("test", variant="A")
metrics.record(result, latency_ms=5.0)
report = metrics.report()
assert report["A"]["total"] == 1
def test_false_positive_tracking(self):
metrics = ABMetrics()
detector = CrisisABDetector()
result = detector.detect("I feel broken", variant="B")
metrics.record(result, is_false_positive=True)
report = metrics.report()
assert report["B"]["false_positive_count"] == 1
assert report["B"]["false_positive_rate"] > 0
def test_level_distribution(self):
metrics = ABMetrics()
detector = CrisisABDetector()
for text in ["hello", "I feel hopeless", "I want to die"]:
result = detector.detect(text, variant="A")
metrics.record(result)
report = metrics.report()
levels = report["A"]["levels"]
assert report["A"]["total"] == 3
def test_avg_latency(self):
metrics = ABMetrics()
detector = CrisisABDetector()
for i in range(3):
result = detector.detect(f"test {i}", variant="A")
metrics.record(result, latency_ms=10.0 + i)
report = metrics.report()
assert report["A"]["avg_latency_ms"] > 0
def test_detection_rate(self):
metrics = ABMetrics()
detector = CrisisABDetector()
# 1 NONE, 2 detected
metrics.record(detector.detect("hello", variant="A"))
metrics.record(detector.detect("I feel hopeless", variant="A"))
metrics.record(detector.detect("I want to die", variant="A"))
report = metrics.report()
rate = report["A"]["detection_rate"]
assert 0.5 < rate < 1.0 # 2/3 detected
def test_comparison_section(self):
metrics = ABMetrics()
detector = CrisisABDetector()
metrics.record(detector.detect("I feel broken", variant="A"))
metrics.record(detector.detect("I feel worthless", variant="B"))
report = metrics.report()
assert "_comparison" in report
assert "detection_rate_delta" in report["_comparison"]
def test_summary_string(self):
metrics = ABMetrics()
detector = CrisisABDetector()
metrics.record(detector.detect("I want to die", variant="A"))
metrics.record(detector.detect("I feel hopeless", variant="B"))
s = metrics.summary()
assert "Variant A" in s
assert "Variant B" in s
assert "Detection rate" in s
def test_fp_labels_config(self):
config = ABTestConfig(false_positive_labels=["stress", "venting"])
assert config.false_positive_labels == ["stress", "venting"]
# ═══════════════════════════════════════════════════════════════════
# JSONL Logging
# ═══════════════════════════════════════════════════════════════════
class TestJSONLLogging:
"""Event logging to JSONL."""
def test_log_file_created(self, tmp_path):
log_path = str(tmp_path / "ab_log.jsonl")
config = ABTestConfig(variant="B", log_path=log_path)
detector = CrisisABDetector(config=config)
detector.detect("I feel hopeless")
assert Path(log_path).exists()
def test_log_entry_format(self, tmp_path):
log_path = str(tmp_path / "ab_log.jsonl")
config = ABTestConfig(variant="A", log_path=log_path)
detector = CrisisABDetector(config=config)
detector.detect("I want to die")
with open(log_path) as f:
entry = json.loads(f.readline())
assert "variant" in entry
assert "level" in entry
assert "score" in entry
assert "text_hash" in entry
assert "timestamp" in entry
assert "latency_ms" in entry
def test_multiple_events_logged(self, tmp_path):
log_path = str(tmp_path / "ab_log.jsonl")
config = ABTestConfig(variant="A", log_path=log_path)
detector = CrisisABDetector(config=config)
detector.detect("hello")
detector.detect("I feel broken")
detector.detect("I want to die")
with open(log_path) as f:
lines = f.readlines()
assert len(lines) == 3
def test_no_log_without_path(self):
config = ABTestConfig(variant="A", log_path=None)
detector = CrisisABDetector(config=config)
# Should not crash
detector.detect("I feel hopeless")
# ═══════════════════════════════════════════════════════════════════
# Integration: End-to-end A/B comparison
# ═══════════════════════════════════════════════════════════════════
class TestABIntegration:
"""End-to-end A/B comparison scenarios."""
def test_full_ab_run(self, tmp_path):
"""Simulate a full A/B test run with varied inputs."""
log_path = str(tmp_path / "run.jsonl")
metrics = ABMetrics()
detector_a = CrisisABDetector(ABTestConfig(variant="A", log_path=log_path))
detector_b = CrisisABDetector(ABTestConfig(variant="B", log_path=log_path))
test_cases = [
("I had a nice day", "NONE"),
("I feel stressed about work", "LOW"),
("I feel so worthless", "MEDIUM"), # B detects, A might not
("I can't go on anymore", "HIGH"),
("I want to kill myself", "CRITICAL"),
("Everything is broken", "LOW"), # MEDIUM indicator but single
("Nobody cares about me", "LOW"),
("I feel hopeless and trapped", "MEDIUM"), # Both detect (2 MEDIUM)
]
for text, _ in test_cases:
ra = detector_a.detect(text)
rb = detector_b.detect(text)
metrics.record(ra, latency_ms=1.0)
metrics.record(rb, latency_ms=1.5)
report = metrics.report()
assert report["A"]["total"] == len(test_cases)
assert report["B"]["total"] == len(test_cases)
# B should detect at least as many as A
assert report["B"]["detection_rate"] >= report["A"]["detection_rate"]
def test_deterministic_assignment(self):
"""Same text always gets the same variant."""
results = [ABTestConfig.assign("crisis message 42") for _ in range(10)]
assert all(r == results[0] for r in results)

View File

@@ -1,52 +0,0 @@
import pathlib
import re
import unittest
ROOT = pathlib.Path(__file__).resolve().parents[1]
INDEX_HTML = ROOT / 'index.html'
class TestSafetyPlanSaveFeedback(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.html = INDEX_HTML.read_text()
def test_modal_has_inline_status_live_region(self):
self.assertRegex(
self.html,
r'<div[^>]+id="safety-plan-status"[^>]+role="status"[^>]+aria-live="polite"[^>]*>',
'Expected an inline polite live region for safety plan save feedback.',
)
def test_save_feedback_does_not_use_blocking_alerts(self):
self.assertNotIn(
"alert('Safety plan saved locally.')",
self.html,
'Expected success feedback to stop using blocking alert().',
)
self.assertNotIn(
"alert('Error saving plan.')",
self.html,
'Expected error feedback to stop using blocking alert().',
)
def test_save_logic_updates_inline_status_for_success_and_error(self):
self.assertRegex(
self.html,
r'function\s+setSafetyPlanStatus\s*\(',
'Expected a helper to update inline save feedback.',
)
self.assertRegex(
self.html,
r"setSafetyPlanStatus\('Safety plan saved locally\.'\s*,\s*'success'\)",
'Expected success path to update inline status.',
)
self.assertRegex(
self.html,
r"setSafetyPlanStatus\('Error saving plan\.'\s*,\s*'error'\)",
'Expected error path to update inline status.',
)
if __name__ == '__main__':
unittest.main()