Compare commits
13 Commits
burn/37-17
...
fix/101
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7cef18fdcb | ||
|
|
706024e11e | ||
| d412939b4f | |||
| 07c582aa08 | |||
| 5f95dc1e39 | |||
| b1f3cac36d | |||
| 07b3f67845 | |||
| c22bbbaf65 | |||
| 543cb1d40f | |||
| 3cfd01815a | |||
| 5a7ba9f207 | |||
| 8ed8f20a17 | |||
| 9d7d26033e |
@@ -6,8 +6,9 @@ Stands between a broken man and a machine that would tell him to die.
|
||||
|
||||
from .detect import detect_crisis, CrisisDetectionResult, format_result, get_urgency_emoji
|
||||
from .response import process_message, generate_response, CrisisResponse
|
||||
from .gateway import check_crisis, get_system_prompt, format_gateway_response, get_metrics_summary, get_metrics_report
|
||||
from .metrics import record_detection, record_continuation, get_metrics
|
||||
from .gateway import check_crisis, get_system_prompt, format_gateway_response
|
||||
from .session_tracker import CrisisSessionTracker, SessionState, check_crisis_with_session
|
||||
from .ab_testing import ABTestCrisisDetector, VariantRecord
|
||||
|
||||
__all__ = [
|
||||
"detect_crisis",
|
||||
@@ -20,9 +21,9 @@ __all__ = [
|
||||
"format_result",
|
||||
"format_gateway_response",
|
||||
"get_urgency_emoji",
|
||||
"get_metrics_summary",
|
||||
"get_metrics_report",
|
||||
"record_detection",
|
||||
"record_continuation",
|
||||
"get_metrics",
|
||||
"CrisisSessionTracker",
|
||||
"SessionState",
|
||||
"check_crisis_with_session",
|
||||
"ABTestCrisisDetector",
|
||||
"VariantRecord",
|
||||
]
|
||||
|
||||
112
crisis/ab_testing.py
Normal file
112
crisis/ab_testing.py
Normal file
@@ -0,0 +1,112 @@
|
||||
"""A/B test framework for crisis detection in the-door."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import random
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from typing import Callable, Dict, List, Optional, Tuple
|
||||
|
||||
from .detect import CrisisDetectionResult
|
||||
|
||||
|
||||
def _get_variant_override() -> Optional[str]:
|
||||
"""Return env override for deterministic testing/debugging."""
|
||||
value = os.environ.get("CRISIS_AB_VARIANT", "").strip().upper()
|
||||
if value in {"A", "B"}:
|
||||
return value
|
||||
return None
|
||||
|
||||
|
||||
@dataclass
|
||||
class VariantRecord:
|
||||
"""Single crisis detection event record with no user text or PII."""
|
||||
|
||||
variant: str
|
||||
level: str
|
||||
latency_ms: float
|
||||
indicator_count: int
|
||||
false_positive: Optional[bool] = None
|
||||
|
||||
|
||||
class ABTestCrisisDetector:
|
||||
"""Route crisis detection between two variants and collect comparison stats."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
variant_a: Callable[[str], CrisisDetectionResult],
|
||||
variant_b: Callable[[str], CrisisDetectionResult],
|
||||
split: float = 0.5,
|
||||
):
|
||||
self.variant_a = variant_a
|
||||
self.variant_b = variant_b
|
||||
self.split = max(0.0, min(1.0, float(split)))
|
||||
self.records: List[VariantRecord] = []
|
||||
|
||||
def _select_variant(self) -> str:
|
||||
override = _get_variant_override()
|
||||
if override:
|
||||
return override
|
||||
return "A" if random.random() < self.split else "B"
|
||||
|
||||
def detect(self, text: str) -> Tuple[CrisisDetectionResult, str, int]:
|
||||
variant = self._select_variant()
|
||||
detector = self.variant_a if variant == "A" else self.variant_b
|
||||
|
||||
start = time.perf_counter()
|
||||
result = detector(text)
|
||||
latency_ms = (time.perf_counter() - start) * 1000.0
|
||||
|
||||
record = VariantRecord(
|
||||
variant=variant,
|
||||
level=result.level,
|
||||
latency_ms=latency_ms,
|
||||
indicator_count=len(result.indicators),
|
||||
)
|
||||
self.records.append(record)
|
||||
return result, variant, len(self.records) - 1
|
||||
|
||||
def record_outcome(self, record_id: int, *, false_positive: bool) -> None:
|
||||
if record_id < 0 or record_id >= len(self.records):
|
||||
raise IndexError(f"Unknown record id: {record_id}")
|
||||
self.records[record_id].false_positive = bool(false_positive)
|
||||
|
||||
def get_stats(self) -> Dict[str, dict]:
|
||||
stats: Dict[str, dict] = {}
|
||||
for variant in ("A", "B"):
|
||||
records = [record for record in self.records if record.variant == variant]
|
||||
if not records:
|
||||
stats[variant] = {
|
||||
"count": 0,
|
||||
"reviewed_count": 0,
|
||||
"false_positive_rate": None,
|
||||
}
|
||||
continue
|
||||
|
||||
levels: Dict[str, int] = {}
|
||||
for record in records:
|
||||
levels[record.level] = levels.get(record.level, 0) + 1
|
||||
|
||||
reviewed = [record for record in records if record.false_positive is not None]
|
||||
false_positive_rate = None
|
||||
if reviewed:
|
||||
false_positive_rate = round(
|
||||
sum(1 for record in reviewed if record.false_positive) / len(reviewed),
|
||||
4,
|
||||
)
|
||||
|
||||
stats[variant] = {
|
||||
"count": len(records),
|
||||
"avg_latency_ms": round(sum(record.latency_ms for record in records) / len(records), 4),
|
||||
"max_latency_ms": round(max(record.latency_ms for record in records), 4),
|
||||
"min_latency_ms": round(min(record.latency_ms for record in records), 4),
|
||||
"avg_indicator_count": round(sum(record.indicator_count for record in records) / len(records), 4),
|
||||
"levels": levels,
|
||||
"reviewed_count": len(reviewed),
|
||||
"false_positive_rate": false_positive_rate,
|
||||
}
|
||||
return stats
|
||||
|
||||
def reset(self) -> None:
|
||||
self.records.clear()
|
||||
@@ -22,7 +22,7 @@ from .response import (
|
||||
get_system_prompt_modifier,
|
||||
CrisisResponse,
|
||||
)
|
||||
from .metrics import record_detection, get_summary, print_summary, record_continuation
|
||||
from .session_tracker import CrisisSessionTracker
|
||||
|
||||
|
||||
def check_crisis(text: str) -> dict:
|
||||
@@ -35,9 +35,6 @@ def check_crisis(text: str) -> dict:
|
||||
detection = detect_crisis(text)
|
||||
response = generate_response(detection)
|
||||
|
||||
# Record metrics (privacy-preserving — no message content stored)
|
||||
record_detection(detection.level, detection.indicators)
|
||||
|
||||
return {
|
||||
"level": detection.level,
|
||||
"score": detection.score,
|
||||
@@ -97,21 +94,6 @@ def format_gateway_response(text: str, pretty: bool = True) -> str:
|
||||
return json.dumps(result)
|
||||
|
||||
|
||||
# ── Metrics endpoint ─────────────────────────────────────────────
|
||||
|
||||
def get_metrics_summary() -> dict:
|
||||
"""
|
||||
Return crisis detection metrics summary.
|
||||
Privacy-preserving: no PII, no message content, just counts.
|
||||
"""
|
||||
return get_summary()
|
||||
|
||||
|
||||
def get_metrics_report() -> str:
|
||||
"""Return formatted weekly metrics report for stdout/logs."""
|
||||
return print_summary()
|
||||
|
||||
|
||||
# ── Quick test interface ────────────────────────────────────────
|
||||
|
||||
def _interactive():
|
||||
|
||||
@@ -1,270 +0,0 @@
|
||||
"""
|
||||
Crisis Detection Metrics for the-door.
|
||||
|
||||
Privacy-preserving analytics layer. Tracks:
|
||||
- Detection counts per level (CRITICAL, HIGH, MEDIUM, LOW)
|
||||
- Keyword firing frequency (pattern hashes, not raw text)
|
||||
- Time-based distribution (hourly buckets)
|
||||
- Post-intervention behavior
|
||||
|
||||
NO PII is stored — no message content, no user identifiers, no timestamps
|
||||
finer than hourly granularity.
|
||||
|
||||
Storage: JSON file at crisis_metrics.json (configurable path).
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import hashlib
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
from dataclasses import dataclass, field, asdict
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
|
||||
# ── Pattern hash helper ───────────────────────────────────────────
|
||||
|
||||
def _hash_pattern(pattern: str) -> str:
|
||||
"""Hash a regex pattern to avoid storing raw crisis text."""
|
||||
return hashlib.sha256(pattern.encode()).hexdigest()[:12]
|
||||
|
||||
|
||||
# ── Hour bucket helper ────────────────────────────────────────────
|
||||
|
||||
def _current_hour_key() -> str:
|
||||
"""Return current UTC hour as 'YYYY-MM-DDTHH'."""
|
||||
return datetime.now(timezone.utc).strftime("%Y-%m-%dT%H")
|
||||
|
||||
|
||||
def _current_day_key() -> str:
|
||||
"""Return current UTC day as 'YYYY-MM-DD'."""
|
||||
return datetime.now(timezone.utc).strftime("%Y-%m-%d")
|
||||
|
||||
|
||||
# ── Metrics store ─────────────────────────────────────────────────
|
||||
|
||||
@dataclass
|
||||
class CrisisMetrics:
|
||||
"""In-memory metrics accumulator with JSON persistence."""
|
||||
|
||||
# Total detections per level
|
||||
detections_by_level: Dict[str, int] = field(default_factory=lambda: {
|
||||
"CRITICAL": 0, "HIGH": 0, "MEDIUM": 0, "LOW": 0, "NONE": 0
|
||||
})
|
||||
|
||||
# Pattern hash -> count (tracks which indicators fire most)
|
||||
keyword_frequency: Dict[str, int] = field(default_factory=dict)
|
||||
|
||||
# Hourly detection counts: "YYYY-MM-DDTHH" -> total
|
||||
hourly_counts: Dict[str, int] = field(default_factory=dict)
|
||||
|
||||
# Daily detection counts: "YYYY-MM-DD" -> {level: count}
|
||||
daily_counts: Dict[str, Dict[str, int]] = field(default_factory=dict)
|
||||
|
||||
# Total messages scanned (for false-positive estimation)
|
||||
total_scanned: int = 0
|
||||
|
||||
# Total detections (non-NONE)
|
||||
total_detections: int = 0
|
||||
|
||||
# Post-intervention tracking (session-scoped, reset on restart)
|
||||
interventions: int = 0
|
||||
continued_after_intervention: int = 0
|
||||
|
||||
def record(self, level: str, indicators: List[str]) -> None:
|
||||
"""Record a single detection event."""
|
||||
self.total_scanned += 1
|
||||
|
||||
# Level counts
|
||||
self.detections_by_level[level] = self.detections_by_level.get(level, 0) + 1
|
||||
|
||||
if level != "NONE":
|
||||
self.total_detections += 1
|
||||
|
||||
# Hourly bucket
|
||||
hour = _current_hour_key()
|
||||
self.hourly_counts[hour] = self.hourly_counts.get(hour, 0) + 1
|
||||
|
||||
# Daily bucket
|
||||
day = _current_day_key()
|
||||
if day not in self.daily_counts:
|
||||
self.daily_counts[day] = {}
|
||||
daily = self.daily_counts[day]
|
||||
daily[level] = daily.get(level, 0) + 1
|
||||
|
||||
# Keyword frequency (hash patterns)
|
||||
for pattern in indicators:
|
||||
h = _hash_pattern(pattern)
|
||||
self.keyword_frequency[h] = self.keyword_frequency.get(h, 0) + 1
|
||||
|
||||
# Intervention tracking
|
||||
if level in ("CRITICAL", "HIGH"):
|
||||
self.interventions += 1
|
||||
|
||||
def record_continuation(self) -> None:
|
||||
"""Call when a user continues conversation after crisis intervention."""
|
||||
self.continued_after_intervention += 1
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""Serialize to JSON-safe dict."""
|
||||
return asdict(self)
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> "CrisisMetrics":
|
||||
"""Deserialize from dict."""
|
||||
return cls(
|
||||
detections_by_level=data.get("detections_by_level", {}),
|
||||
keyword_frequency=data.get("keyword_frequency", {}),
|
||||
hourly_counts=data.get("hourly_counts", {}),
|
||||
daily_counts=data.get("daily_counts", {}),
|
||||
total_scanned=data.get("total_scanned", 0),
|
||||
total_detections=data.get("total_detections", 0),
|
||||
interventions=data.get("interventions", 0),
|
||||
continued_after_intervention=data.get("continued_after_intervention", 0),
|
||||
)
|
||||
|
||||
# ── Derived metrics ───────────────────────────────────────────
|
||||
|
||||
@property
|
||||
def false_positive_estimate(self) -> float:
|
||||
"""
|
||||
Estimate false positive rate.
|
||||
Heuristic: users who continue chatting after HIGH/CRITICAL intervention
|
||||
were likely not in true crisis. Returns 0.0 if no interventions.
|
||||
"""
|
||||
if self.interventions == 0:
|
||||
return 0.0
|
||||
return self.continued_after_intervention / self.interventions
|
||||
|
||||
@property
|
||||
def detection_rate(self) -> float:
|
||||
"""Fraction of scanned messages that triggered any detection."""
|
||||
if self.total_scanned == 0:
|
||||
return 0.0
|
||||
return self.total_detections / self.total_scanned
|
||||
|
||||
def top_keywords(self, n: int = 10) -> List[tuple]:
|
||||
"""Return top N most-fired pattern hashes with counts."""
|
||||
sorted_kw = sorted(self.keyword_frequency.items(), key=lambda x: -x[1])
|
||||
return sorted_kw[:n]
|
||||
|
||||
def weekly_summary(self) -> dict:
|
||||
"""Generate a 7-day summary from daily_counts."""
|
||||
from datetime import timedelta
|
||||
|
||||
today = datetime.now(timezone.utc).date()
|
||||
summary = {
|
||||
"period_start": (today - timedelta(days=6)).isoformat(),
|
||||
"period_end": today.isoformat(),
|
||||
"total_detections": 0,
|
||||
"by_level": {"CRITICAL": 0, "HIGH": 0, "MEDIUM": 0, "LOW": 0},
|
||||
"daily_totals": {},
|
||||
}
|
||||
|
||||
for i in range(7):
|
||||
day = (today - timedelta(days=6 - i)).isoformat()
|
||||
day_data = self.daily_counts.get(day, {})
|
||||
day_total = sum(day_data.values())
|
||||
summary["daily_totals"][day] = day_total
|
||||
summary["total_detections"] += day_total
|
||||
for level in ("CRITICAL", "HIGH", "MEDIUM", "LOW"):
|
||||
summary["by_level"][level] += day_data.get(level, 0)
|
||||
|
||||
summary["false_positive_estimate"] = self.false_positive_estimate
|
||||
summary["detection_rate"] = self.detection_rate
|
||||
summary["top_keywords"] = self.top_keywords(5)
|
||||
return summary
|
||||
|
||||
def format_summary(self) -> str:
|
||||
"""Human-readable weekly summary for stdout/log."""
|
||||
s = self.weekly_summary()
|
||||
lines = [
|
||||
"=== Crisis Detection Weekly Summary ===",
|
||||
f"Period: {s['period_start']} to {s['period_end']}",
|
||||
f"Total detections: {s['total_detections']}",
|
||||
"",
|
||||
"By level:",
|
||||
]
|
||||
for level in ("CRITICAL", "HIGH", "MEDIUM", "LOW"):
|
||||
count = s["by_level"][level]
|
||||
bar = "#" * count
|
||||
lines.append(f" {level:10s} {count:4d} {bar}")
|
||||
|
||||
lines.append("")
|
||||
lines.append("Daily trend:")
|
||||
for day, count in s["daily_totals"].items():
|
||||
bar = "#" * count
|
||||
lines.append(f" {day} {count:4d} {bar}")
|
||||
|
||||
lines.append("")
|
||||
lines.append(f"Detection rate: {s['detection_rate']:.1%}")
|
||||
lines.append(f"False positive estimate: {s['false_positive_estimate']:.1%}")
|
||||
lines.append("")
|
||||
lines.append("Top indicators (hashed):")
|
||||
for h, count in s["top_keywords"]:
|
||||
lines.append(f" {h} {count:4d}x")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
# ── Persistence ───────────────────────────────────────────────────
|
||||
|
||||
_DEFAULT_PATH = os.path.join(os.path.dirname(__file__), "crisis_metrics.json")
|
||||
|
||||
|
||||
def load_metrics(path: str = _DEFAULT_PATH) -> CrisisMetrics:
|
||||
"""Load metrics from JSON file, or return empty metrics if missing."""
|
||||
try:
|
||||
with open(path) as f:
|
||||
data = json.load(f)
|
||||
return CrisisMetrics.from_dict(data)
|
||||
except (FileNotFoundError, json.JSONDecodeError):
|
||||
return CrisisMetrics()
|
||||
|
||||
|
||||
def save_metrics(metrics: CrisisMetrics, path: str = _DEFAULT_PATH) -> None:
|
||||
"""Persist metrics to JSON file."""
|
||||
with open(path, "w") as f:
|
||||
json.dump(metrics.to_dict(), f, indent=2)
|
||||
|
||||
|
||||
# ── Global singleton ──────────────────────────────────────────────
|
||||
|
||||
_metrics: Optional[CrisisMetrics] = None
|
||||
_metrics_path: str = _DEFAULT_PATH
|
||||
|
||||
|
||||
def get_metrics(path: str = None) -> CrisisMetrics:
|
||||
"""Get or initialize the global metrics singleton."""
|
||||
global _metrics, _metrics_path
|
||||
if path:
|
||||
_metrics_path = path
|
||||
if _metrics is None:
|
||||
_metrics = load_metrics(_metrics_path)
|
||||
return _metrics
|
||||
|
||||
|
||||
def record_detection(level: str, indicators: List[str], path: str = None) -> None:
|
||||
"""Record a crisis detection event and persist."""
|
||||
m = get_metrics(path)
|
||||
m.record(level, indicators)
|
||||
save_metrics(m, _metrics_path)
|
||||
|
||||
|
||||
def record_continuation(path: str = None) -> None:
|
||||
"""Record that a user continued after crisis intervention."""
|
||||
m = get_metrics(path)
|
||||
m.record_continuation()
|
||||
save_metrics(m, _metrics_path)
|
||||
|
||||
|
||||
def get_summary(path: str = None) -> dict:
|
||||
"""Get the weekly summary as a dict."""
|
||||
m = get_metrics(path)
|
||||
return m.weekly_summary()
|
||||
|
||||
|
||||
def print_summary(path: str = None) -> str:
|
||||
"""Get and format the weekly summary."""
|
||||
m = get_metrics(path)
|
||||
return m.format_summary()
|
||||
259
crisis/session_tracker.py
Normal file
259
crisis/session_tracker.py
Normal file
@@ -0,0 +1,259 @@
|
||||
"""
|
||||
Session-level crisis tracking and escalation for the-door (P0 #35).
|
||||
|
||||
Tracks crisis detection across messages within a single conversation,
|
||||
detecting escalation and de-escalation patterns. Privacy-first: no
|
||||
persistence beyond the conversation session.
|
||||
|
||||
Each message is analyzed in isolation by detect.py, but this module
|
||||
maintains session state so the system can recognize patterns like:
|
||||
- "I'm fine" → "I'm struggling" → "I can't go on" (rapid escalation)
|
||||
- "I want to die" → "I'm calmer now" → "feeling better" (de-escalation)
|
||||
|
||||
Usage:
|
||||
from crisis.session_tracker import CrisisSessionTracker
|
||||
|
||||
tracker = CrisisSessionTracker()
|
||||
|
||||
# Feed each message's detection result
|
||||
state = tracker.record(detect_crisis("I'm having a tough day"))
|
||||
print(state.current_level) # "LOW"
|
||||
print(state.is_escalating) # False
|
||||
|
||||
state = tracker.record(detect_crisis("I feel hopeless"))
|
||||
print(state.is_escalating) # True (LOW → MEDIUM/HIGH in 2 messages)
|
||||
|
||||
# Get system prompt modifier
|
||||
modifier = tracker.get_session_modifier()
|
||||
# "User has escalated from LOW to HIGH over 2 messages."
|
||||
|
||||
# Reset for new session
|
||||
tracker.reset()
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from typing import List, Optional
|
||||
|
||||
from .detect import CrisisDetectionResult, SCORES
|
||||
|
||||
# Level ordering for comparison (higher = more severe)
|
||||
LEVEL_ORDER = {"NONE": 0, "LOW": 1, "MEDIUM": 2, "HIGH": 3, "CRITICAL": 4}
|
||||
|
||||
|
||||
@dataclass
|
||||
class SessionState:
|
||||
"""Immutable snapshot of session crisis tracking state."""
|
||||
|
||||
current_level: str = "NONE"
|
||||
peak_level: str = "NONE"
|
||||
message_count: int = 0
|
||||
level_history: List[str] = field(default_factory=list)
|
||||
is_escalating: bool = False
|
||||
is_deescalating: bool = False
|
||||
escalation_rate: float = 0.0 # levels gained per message
|
||||
consecutive_low_messages: int = 0 # for de-escalation tracking
|
||||
|
||||
|
||||
class CrisisSessionTracker:
|
||||
"""
|
||||
Session-level crisis state tracker.
|
||||
|
||||
Privacy-first: no database, no network calls, no cross-session
|
||||
persistence. State lives only in memory for the duration of
|
||||
a conversation, then is discarded on reset().
|
||||
"""
|
||||
|
||||
# Thresholds (from issue #35)
|
||||
ESCALATION_WINDOW = 3 # messages: LOW → HIGH in ≤3 messages = rapid escalation
|
||||
DEESCALATION_WINDOW = 5 # messages: need 5+ consecutive LOW messages after CRITICAL
|
||||
|
||||
def __init__(self):
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
"""Reset all session state. Call on new conversation."""
|
||||
self._current_level = "NONE"
|
||||
self._peak_level = "NONE"
|
||||
self._message_count = 0
|
||||
self._level_history: List[str] = []
|
||||
self._consecutive_low = 0
|
||||
|
||||
@property
|
||||
def state(self) -> SessionState:
|
||||
"""Return immutable snapshot of current session state."""
|
||||
is_escalating = self._detect_escalation()
|
||||
is_deescalating = self._detect_deescalation()
|
||||
rate = self._compute_escalation_rate()
|
||||
|
||||
return SessionState(
|
||||
current_level=self._current_level,
|
||||
peak_level=self._peak_level,
|
||||
message_count=self._message_count,
|
||||
level_history=list(self._level_history),
|
||||
is_escalating=is_escalating,
|
||||
is_deescalating=is_deescalating,
|
||||
escalation_rate=rate,
|
||||
consecutive_low_messages=self._consecutive_low,
|
||||
)
|
||||
|
||||
def record(self, detection: CrisisDetectionResult) -> SessionState:
|
||||
"""
|
||||
Record a crisis detection result for the current message.
|
||||
|
||||
Returns updated SessionState.
|
||||
"""
|
||||
level = detection.level
|
||||
self._message_count += 1
|
||||
self._level_history.append(level)
|
||||
|
||||
# Update peak
|
||||
if LEVEL_ORDER.get(level, 0) > LEVEL_ORDER.get(self._peak_level, 0):
|
||||
self._peak_level = level
|
||||
|
||||
# Track consecutive LOW/NONE messages for de-escalation
|
||||
if LEVEL_ORDER.get(level, 0) <= LEVEL_ORDER["LOW"]:
|
||||
self._consecutive_low += 1
|
||||
else:
|
||||
self._consecutive_low = 0
|
||||
|
||||
self._current_level = level
|
||||
return self.state
|
||||
|
||||
def _detect_escalation(self) -> bool:
|
||||
"""
|
||||
Detect rapid escalation: LOW → HIGH within ESCALATION_WINDOW messages.
|
||||
|
||||
Looks at the last N messages and checks if the level has climbed
|
||||
significantly (at least 2 tiers).
|
||||
"""
|
||||
if len(self._level_history) < 2:
|
||||
return False
|
||||
|
||||
window = self._level_history[-self.ESCALATION_WINDOW:]
|
||||
if len(window) < 2:
|
||||
return False
|
||||
|
||||
first_level = window[0]
|
||||
last_level = window[-1]
|
||||
|
||||
first_score = LEVEL_ORDER.get(first_level, 0)
|
||||
last_score = LEVEL_ORDER.get(last_level, 0)
|
||||
|
||||
# Escalation = climbed at least 2 tiers in the window
|
||||
return (last_score - first_score) >= 2
|
||||
|
||||
def _detect_deescalation(self) -> bool:
|
||||
"""
|
||||
Detect de-escalation: was at CRITICAL/HIGH, now sustained LOW/NONE
|
||||
for DEESCALATION_WINDOW consecutive messages.
|
||||
"""
|
||||
if LEVEL_ORDER.get(self._peak_level, 0) < LEVEL_ORDER["HIGH"]:
|
||||
return False
|
||||
|
||||
return self._consecutive_low >= self.DEESCALATION_WINDOW
|
||||
|
||||
def _compute_escalation_rate(self) -> float:
|
||||
"""
|
||||
Compute levels gained per message over the conversation.
|
||||
|
||||
Positive = escalating, negative = de-escalating, 0 = stable.
|
||||
"""
|
||||
if self._message_count < 2:
|
||||
return 0.0
|
||||
|
||||
first = LEVEL_ORDER.get(self._level_history[0], 0)
|
||||
current = LEVEL_ORDER.get(self._current_level, 0)
|
||||
|
||||
return (current - first) / (self._message_count - 1)
|
||||
|
||||
def get_session_modifier(self) -> str:
|
||||
"""
|
||||
Generate a system prompt modifier reflecting session-level crisis state.
|
||||
|
||||
Returns empty string if no session context is relevant.
|
||||
"""
|
||||
if self._message_count < 2:
|
||||
return ""
|
||||
|
||||
s = self.state
|
||||
|
||||
if s.is_escalating:
|
||||
return (
|
||||
f"User has escalated from {self._level_history[0]} to "
|
||||
f"{s.current_level} over {s.message_count} messages. "
|
||||
f"Peak crisis level this session: {s.peak_level}. "
|
||||
"Respond with heightened awareness. The trajectory is "
|
||||
"worsening — prioritize safety and connection."
|
||||
)
|
||||
|
||||
if s.is_deescalating:
|
||||
return (
|
||||
f"User previously reached {s.peak_level} crisis level "
|
||||
f"but has been at {s.current_level} or below for "
|
||||
f"{s.consecutive_low_messages} consecutive messages. "
|
||||
"The situation appears to be stabilizing. Continue "
|
||||
"supportive engagement while remaining vigilant."
|
||||
)
|
||||
|
||||
if s.peak_level in ("CRITICAL", "HIGH") and s.current_level not in ("CRITICAL", "HIGH"):
|
||||
return (
|
||||
f"User previously reached {s.peak_level} crisis level "
|
||||
f"this session (currently {s.current_level}). "
|
||||
"Continue with care and awareness of the earlier crisis."
|
||||
)
|
||||
|
||||
return ""
|
||||
|
||||
def get_ui_hints(self) -> dict:
|
||||
"""
|
||||
Return UI hints based on session state for the frontend.
|
||||
|
||||
These are advisory — the frontend decides what to show.
|
||||
"""
|
||||
s = self.state
|
||||
|
||||
hints = {
|
||||
"session_escalating": s.is_escalating,
|
||||
"session_deescalating": s.is_deescalating,
|
||||
"session_peak_level": s.peak_level,
|
||||
"session_message_count": s.message_count,
|
||||
}
|
||||
|
||||
if s.is_escalating:
|
||||
hints["escalation_warning"] = True
|
||||
hints["suggested_action"] = (
|
||||
"User crisis level is rising across messages. "
|
||||
"Consider increasing intervention level."
|
||||
)
|
||||
|
||||
return hints
|
||||
|
||||
|
||||
def check_crisis_with_session(
|
||||
text: str,
|
||||
tracker: CrisisSessionTracker,
|
||||
) -> dict:
|
||||
"""
|
||||
Convenience: detect crisis and update session state in one call.
|
||||
|
||||
Returns combined single-message detection + session-level context.
|
||||
"""
|
||||
from .detect import detect_crisis
|
||||
from .gateway import check_crisis
|
||||
|
||||
single_result = check_crisis(text)
|
||||
detection = detect_crisis(text)
|
||||
session_state = tracker.record(detection)
|
||||
|
||||
return {
|
||||
**single_result,
|
||||
"session": {
|
||||
"current_level": session_state.current_level,
|
||||
"peak_level": session_state.peak_level,
|
||||
"message_count": session_state.message_count,
|
||||
"is_escalating": session_state.is_escalating,
|
||||
"is_deescalating": session_state.is_deescalating,
|
||||
"modifier": tracker.get_session_modifier(),
|
||||
"ui_hints": tracker.get_ui_hints(),
|
||||
},
|
||||
}
|
||||
@@ -680,7 +680,7 @@ html, body {
|
||||
|
||||
<!-- Footer -->
|
||||
<footer id="footer">
|
||||
<a href="/about" aria-label="About The Door">about</a>
|
||||
<a href="/about.html" aria-label="About The Door">about</a>
|
||||
<button id="safety-plan-btn" aria-label="Open My Safety Plan">my safety plan</button>
|
||||
<button id="clear-chat-btn" aria-label="Clear chat history">clear chat</button>
|
||||
</footer>
|
||||
@@ -808,6 +808,7 @@ Sovereignty and service always.`;
|
||||
var crisisPanel = document.getElementById('crisis-panel');
|
||||
var crisisOverlay = document.getElementById('crisis-overlay');
|
||||
var overlayDismissBtn = document.getElementById('overlay-dismiss-btn');
|
||||
var overlayCallLink = document.querySelector('.overlay-call');
|
||||
var statusDot = document.querySelector('.status-dot');
|
||||
var statusText = document.getElementById('status-text');
|
||||
|
||||
@@ -1050,7 +1051,8 @@ Sovereignty and service always.`;
|
||||
}
|
||||
}, 1000);
|
||||
|
||||
overlayDismissBtn.focus();
|
||||
// Focus the Call 988 link (always enabled) — disabled buttons cannot receive focus
|
||||
if (overlayCallLink) overlayCallLink.focus();
|
||||
}
|
||||
|
||||
// Register focus trap on document (always listening, gated by class check)
|
||||
|
||||
138
tests/test_ab_testing.py
Normal file
138
tests/test_ab_testing.py
Normal file
@@ -0,0 +1,138 @@
|
||||
"""Tests for crisis.ab_testing — A/B test framework for crisis detection (#101)."""
|
||||
|
||||
import os
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from crisis.ab_testing import ABTestCrisisDetector
|
||||
from crisis.detect import CrisisDetectionResult, detect_crisis
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def clear_variant_override():
|
||||
old = os.environ.pop("CRISIS_AB_VARIANT", None)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if old is not None:
|
||||
os.environ["CRISIS_AB_VARIANT"] = old
|
||||
else:
|
||||
os.environ.pop("CRISIS_AB_VARIANT", None)
|
||||
|
||||
|
||||
def _make_variant(level: str, indicators=None):
|
||||
indicators = indicators or [f"mock_{level.lower()}"]
|
||||
|
||||
def fn(text: str) -> CrisisDetectionResult:
|
||||
return CrisisDetectionResult(level=level, indicators=list(indicators))
|
||||
|
||||
return fn
|
||||
|
||||
|
||||
def test_detect_returns_result_variant_and_logged_record():
|
||||
detector = ABTestCrisisDetector(
|
||||
variant_a=_make_variant("LOW"),
|
||||
variant_b=_make_variant("HIGH"),
|
||||
)
|
||||
|
||||
with patch.object(detector, "_select_variant", return_value="A"):
|
||||
result, variant, record_id = detector.detect("test message")
|
||||
|
||||
assert isinstance(result, CrisisDetectionResult)
|
||||
assert variant == "A"
|
||||
assert record_id == 0
|
||||
assert len(detector.records) == 1
|
||||
assert detector.records[0].variant == "A"
|
||||
assert detector.records[0].level == "LOW"
|
||||
|
||||
|
||||
def test_env_override_forces_variant_b():
|
||||
os.environ["CRISIS_AB_VARIANT"] = "b"
|
||||
detector = ABTestCrisisDetector(
|
||||
variant_a=_make_variant("LOW"),
|
||||
variant_b=_make_variant("HIGH"),
|
||||
)
|
||||
|
||||
result, variant, _ = detector.detect("test")
|
||||
|
||||
assert variant == "B"
|
||||
assert result.level == "HIGH"
|
||||
|
||||
|
||||
def test_get_stats_reports_latency_counts_and_level_breakdown():
|
||||
detector = ABTestCrisisDetector(
|
||||
variant_a=_make_variant("LOW"),
|
||||
variant_b=_make_variant("CRITICAL"),
|
||||
)
|
||||
|
||||
with patch.object(detector, "_select_variant", side_effect=["A", "A", "B"]):
|
||||
detector.detect("first")
|
||||
detector.detect("second")
|
||||
detector.detect("third")
|
||||
|
||||
stats = detector.get_stats()
|
||||
assert stats["A"]["count"] == 2
|
||||
assert stats["B"]["count"] == 1
|
||||
assert stats["A"]["levels"]["LOW"] == 2
|
||||
assert stats["B"]["levels"]["CRITICAL"] == 1
|
||||
assert "avg_latency_ms" in stats["A"]
|
||||
assert "avg_indicator_count" in stats["B"]
|
||||
|
||||
|
||||
def test_false_positive_rate_is_computed_from_reviewed_outcomes():
|
||||
detector = ABTestCrisisDetector(
|
||||
variant_a=_make_variant("LOW"),
|
||||
variant_b=_make_variant("HIGH"),
|
||||
)
|
||||
|
||||
with patch.object(detector, "_select_variant", side_effect=["A", "A", "B"]):
|
||||
_, _, a0 = detector.detect("first")
|
||||
_, _, a1 = detector.detect("second")
|
||||
_, _, b0 = detector.detect("third")
|
||||
|
||||
detector.record_outcome(a0, false_positive=True)
|
||||
detector.record_outcome(a1, false_positive=False)
|
||||
detector.record_outcome(b0, false_positive=False)
|
||||
|
||||
stats = detector.get_stats()
|
||||
assert stats["A"]["reviewed_count"] == 2
|
||||
assert stats["A"]["false_positive_rate"] == 0.5
|
||||
assert stats["B"]["false_positive_rate"] == 0.0
|
||||
|
||||
|
||||
def test_record_outcome_rejects_unknown_record():
|
||||
detector = ABTestCrisisDetector(
|
||||
variant_a=_make_variant("LOW"),
|
||||
variant_b=_make_variant("HIGH"),
|
||||
)
|
||||
|
||||
with pytest.raises(IndexError):
|
||||
detector.record_outcome(99, false_positive=True)
|
||||
|
||||
|
||||
def test_reset_clears_records_and_stats():
|
||||
detector = ABTestCrisisDetector(
|
||||
variant_a=_make_variant("LOW"),
|
||||
variant_b=_make_variant("HIGH"),
|
||||
)
|
||||
detector.detect("test")
|
||||
detector.reset()
|
||||
|
||||
assert detector.records == []
|
||||
stats = detector.get_stats()
|
||||
assert stats["A"]["count"] == 0
|
||||
assert stats["B"]["count"] == 0
|
||||
|
||||
|
||||
def test_with_real_detector_integration():
|
||||
detector = ABTestCrisisDetector(
|
||||
variant_a=detect_crisis,
|
||||
variant_b=detect_crisis,
|
||||
)
|
||||
|
||||
result, variant, record_id = detector.detect("I want to kill myself")
|
||||
|
||||
assert result.level == "CRITICAL"
|
||||
assert variant in ("A", "B")
|
||||
assert record_id == 0
|
||||
@@ -1,263 +0,0 @@
|
||||
"""
|
||||
Tests for crisis detection metrics (issue #37).
|
||||
Verifies privacy-preserving analytics layer works correctly.
|
||||
"""
|
||||
|
||||
import unittest
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
import tempfile
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from crisis.metrics import (
|
||||
CrisisMetrics,
|
||||
load_metrics,
|
||||
save_metrics,
|
||||
_hash_pattern,
|
||||
)
|
||||
|
||||
|
||||
class TestCrisisMetrics(unittest.TestCase):
|
||||
"""Test the CrisisMetrics dataclass and persistence."""
|
||||
|
||||
def setUp(self):
|
||||
self.metrics = CrisisMetrics()
|
||||
self.tmpfile = tempfile.NamedTemporaryFile(suffix=".json", delete=False)
|
||||
self.tmpfile.close()
|
||||
|
||||
def tearDown(self):
|
||||
if os.path.exists(self.tmpfile.name):
|
||||
os.unlink(self.tmpfile.name)
|
||||
|
||||
def test_record_none(self):
|
||||
"""Recording NONE should increment scanned but not detections."""
|
||||
self.metrics.record("NONE", [])
|
||||
self.assertEqual(self.metrics.total_scanned, 1)
|
||||
self.assertEqual(self.metrics.total_detections, 0)
|
||||
self.assertEqual(self.metrics.detections_by_level["NONE"], 1)
|
||||
|
||||
def test_record_critical(self):
|
||||
"""Recording CRITICAL should increment all relevant counters."""
|
||||
pattern = r"\bkill\s*(my)?self\b"
|
||||
self.metrics.record("CRITICAL", [pattern])
|
||||
self.assertEqual(self.metrics.total_scanned, 1)
|
||||
self.assertEqual(self.metrics.total_detections, 1)
|
||||
self.assertEqual(self.metrics.detections_by_level["CRITICAL"], 1)
|
||||
self.assertEqual(self.metrics.interventions, 1)
|
||||
|
||||
# Keyword should be hashed, not raw
|
||||
h = _hash_pattern(pattern)
|
||||
self.assertEqual(self.metrics.keyword_frequency[h], 1)
|
||||
|
||||
def test_record_high_is_intervention(self):
|
||||
"""HIGH detections should count as interventions."""
|
||||
self.metrics.record("HIGH", [r"\bdespair\b"])
|
||||
self.assertEqual(self.metrics.interventions, 1)
|
||||
|
||||
def test_record_medium_not_intervention(self):
|
||||
"""MEDIUM detections should NOT count as interventions."""
|
||||
self.metrics.record("MEDIUM", [r"\bbroken\b", r"\bworthless\b"])
|
||||
self.assertEqual(self.metrics.interventions, 0)
|
||||
self.assertEqual(self.metrics.total_detections, 1)
|
||||
|
||||
def test_record_multiple(self):
|
||||
"""Multiple detections should accumulate."""
|
||||
self.metrics.record("CRITICAL", [r"\bpattern1\b"])
|
||||
self.metrics.record("HIGH", [r"\bpattern2\b"])
|
||||
self.metrics.record("NONE", [])
|
||||
self.metrics.record("LOW", [r"\bsad\b"])
|
||||
|
||||
self.assertEqual(self.metrics.total_scanned, 4)
|
||||
self.assertEqual(self.metrics.total_detections, 3)
|
||||
self.assertEqual(self.metrics.interventions, 2)
|
||||
|
||||
def test_continuation_tracking(self):
|
||||
"""Recording continuation should affect false positive estimate."""
|
||||
self.metrics.record("CRITICAL", [r"\bpattern\b"])
|
||||
self.assertEqual(self.metrics.false_positive_estimate, 0.0)
|
||||
|
||||
self.metrics.record_continuation()
|
||||
self.assertEqual(self.metrics.continued_after_intervention, 1)
|
||||
self.assertEqual(self.metrics.false_positive_estimate, 1.0)
|
||||
|
||||
def test_false_positive_estimate_zero_when_no_interventions(self):
|
||||
"""False positive rate should be 0.0 with no interventions."""
|
||||
self.assertEqual(self.metrics.false_positive_estimate, 0.0)
|
||||
|
||||
def test_detection_rate(self):
|
||||
"""Detection rate should be detections/scanned."""
|
||||
self.metrics.record("CRITICAL", [])
|
||||
self.metrics.record("NONE", [])
|
||||
self.metrics.record("NONE", [])
|
||||
self.assertEqual(self.metrics.detection_rate, 1.0 / 3.0)
|
||||
|
||||
def test_detection_rate_zero_when_no_scans(self):
|
||||
self.assertEqual(self.metrics.detection_rate, 0.0)
|
||||
|
||||
def test_hourly_bucket(self):
|
||||
"""Detections should be bucketed by hour."""
|
||||
self.metrics.record("HIGH", [])
|
||||
hour_key = self.metrics.hourly_counts
|
||||
self.assertEqual(len(hour_key), 1)
|
||||
for k, v in hour_key.items():
|
||||
self.assertRegex(k, r"^\d{4}-\d{2}-\d{2}T\d{2}$")
|
||||
self.assertEqual(v, 1)
|
||||
|
||||
def test_daily_bucket(self):
|
||||
"""Detections should be bucketed by day with level."""
|
||||
self.metrics.record("CRITICAL", [])
|
||||
self.metrics.record("HIGH", [])
|
||||
self.assertEqual(len(self.metrics.daily_counts), 1)
|
||||
for day, levels in self.metrics.daily_counts.items():
|
||||
self.assertRegex(day, r"^\d{4}-\d{2}-\d{2}$")
|
||||
self.assertEqual(levels["CRITICAL"], 1)
|
||||
self.assertEqual(levels["HIGH"], 1)
|
||||
|
||||
def test_top_keywords(self):
|
||||
"""top_keywords should return most frequent hashed patterns."""
|
||||
p1 = r"\bpattern_a\b"
|
||||
p2 = r"\bpattern_b\b"
|
||||
h1 = _hash_pattern(p1)
|
||||
h2 = _hash_pattern(p2)
|
||||
|
||||
for _ in range(5):
|
||||
self.metrics.record("HIGH", [p1])
|
||||
for _ in range(2):
|
||||
self.metrics.record("MEDIUM", [p2])
|
||||
|
||||
top = self.metrics.top_keywords(2)
|
||||
self.assertEqual(len(top), 2)
|
||||
self.assertEqual(top[0], (h1, 5))
|
||||
self.assertEqual(top[1], (h2, 2))
|
||||
|
||||
def test_weekly_summary_structure(self):
|
||||
"""Weekly summary should have expected keys."""
|
||||
self.metrics.record("CRITICAL", [])
|
||||
summary = self.metrics.weekly_summary()
|
||||
|
||||
self.assertIn("period_start", summary)
|
||||
self.assertIn("period_end", summary)
|
||||
self.assertIn("total_detections", summary)
|
||||
self.assertIn("by_level", summary)
|
||||
self.assertIn("daily_totals", summary)
|
||||
self.assertIn("false_positive_estimate", summary)
|
||||
self.assertIn("detection_rate", summary)
|
||||
self.assertIn("top_keywords", summary)
|
||||
|
||||
self.assertEqual(summary["total_detections"], 1)
|
||||
self.assertEqual(summary["by_level"]["CRITICAL"], 1)
|
||||
|
||||
def test_format_summary_returns_string(self):
|
||||
"""format_summary should return a non-empty string."""
|
||||
self.metrics.record("CRITICAL", [])
|
||||
result = self.metrics.format_summary()
|
||||
self.assertIsInstance(result, str)
|
||||
self.assertIn("CRITICAL", result)
|
||||
|
||||
def test_persistence_round_trip(self):
|
||||
"""Metrics should survive save/load cycle."""
|
||||
self.metrics.record("CRITICAL", [r"\btest\b"])
|
||||
self.metrics.record("HIGH", [])
|
||||
self.metrics.record_continuation()
|
||||
|
||||
save_metrics(self.metrics, self.tmpfile.name)
|
||||
loaded = load_metrics(self.tmpfile.name)
|
||||
|
||||
self.assertEqual(loaded.total_scanned, 2)
|
||||
self.assertEqual(loaded.total_detections, 2)
|
||||
self.assertEqual(loaded.detections_by_level["CRITICAL"], 1)
|
||||
self.assertEqual(loaded.detections_by_level["HIGH"], 1)
|
||||
self.assertEqual(loaded.interventions, 2)
|
||||
self.assertEqual(loaded.continued_after_intervention, 1)
|
||||
|
||||
def test_load_missing_file_returns_empty(self):
|
||||
"""Loading a nonexistent file should return empty metrics."""
|
||||
m = load_metrics("/tmp/nonexistent_metrics_999.json")
|
||||
self.assertEqual(m.total_scanned, 0)
|
||||
self.assertEqual(m.total_detections, 0)
|
||||
|
||||
def test_load_corrupt_file_returns_empty(self):
|
||||
"""Loading a corrupt JSON file should return empty metrics."""
|
||||
with open(self.tmpfile.name, "w") as f:
|
||||
f.write("NOT JSON {{{")
|
||||
m = load_metrics(self.tmpfile.name)
|
||||
self.assertEqual(m.total_scanned, 0)
|
||||
|
||||
def test_hash_pattern_consistent(self):
|
||||
"""Same pattern should always produce same hash."""
|
||||
h1 = _hash_pattern(r"\bkill\s*self\b")
|
||||
h2 = _hash_pattern(r"\bkill\s*self\b")
|
||||
self.assertEqual(h1, h2)
|
||||
|
||||
def test_hash_pattern_different(self):
|
||||
"""Different patterns should produce different hashes."""
|
||||
h1 = _hash_pattern(r"\bpattern_a\b")
|
||||
h2 = _hash_pattern(r"\bpattern_b\b")
|
||||
self.assertNotEqual(h1, h2)
|
||||
|
||||
|
||||
class TestMetricsGatewayIntegration(unittest.TestCase):
|
||||
"""Test that metrics are recorded through the gateway."""
|
||||
|
||||
def test_check_crisis_records_metrics(self):
|
||||
"""check_crisis should record metrics automatically."""
|
||||
import tempfile
|
||||
from crisis.metrics import get_metrics, _metrics
|
||||
|
||||
# Use a temp metrics file
|
||||
tmpfile = tempfile.NamedTemporaryFile(suffix=".json", delete=False)
|
||||
tmpfile.close()
|
||||
|
||||
try:
|
||||
# Reset global singleton
|
||||
import crisis.metrics
|
||||
crisis.metrics._metrics = None
|
||||
crisis.metrics._metrics_path = tmpfile.name
|
||||
|
||||
from crisis.gateway import check_crisis
|
||||
|
||||
# A critical message
|
||||
check_crisis("I want to kill myself")
|
||||
|
||||
m = get_metrics()
|
||||
self.assertEqual(m.total_scanned, 1)
|
||||
self.assertEqual(m.detections_by_level["CRITICAL"], 1)
|
||||
|
||||
# A safe message
|
||||
check_crisis("I had a good day today")
|
||||
self.assertEqual(m.total_scanned, 2)
|
||||
self.assertEqual(m.detections_by_level["NONE"], 1)
|
||||
|
||||
finally:
|
||||
# Reset singleton
|
||||
crisis.metrics._metrics = None
|
||||
if os.path.exists(tmpfile.name):
|
||||
os.unlink(tmpfile.name)
|
||||
|
||||
|
||||
class TestNoPIIStorage(unittest.TestCase):
|
||||
"""Verify that no message content is stored in metrics."""
|
||||
|
||||
def test_no_text_in_serialized_metrics(self):
|
||||
"""Metrics to_dict should never contain message text."""
|
||||
from crisis.metrics import CrisisMetrics
|
||||
|
||||
m = CrisisMetrics()
|
||||
# Record with actual crisis text — only patterns should be hashed
|
||||
m.record("CRITICAL", [r"\bkill\s*self\b"])
|
||||
|
||||
serialized = json.dumps(m.to_dict())
|
||||
|
||||
# These strings should NOT appear in serialized metrics
|
||||
self.assertNotIn("kill myself", serialized)
|
||||
self.assertNotIn("I want to", serialized)
|
||||
self.assertNotIn("user", serialized.lower())
|
||||
|
||||
# Only hashed patterns should appear (short hex strings)
|
||||
self.assertIn(_hash_pattern(r"\bkill\s*self\b"), serialized)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
@@ -52,6 +52,34 @@ class TestCrisisOverlayFocusTrap(unittest.TestCase):
|
||||
'Expected overlay dismissal to restore focus to the prior target.',
|
||||
)
|
||||
|
||||
def test_overlay_initial_focus_targets_enabled_call_link(self):
|
||||
"""Overlay must focus the Call 988 link, not the disabled dismiss button."""
|
||||
# Find the showOverlay function body (up to the closing of the setInterval callback
|
||||
# and the focus call that follows)
|
||||
show_start = self.html.find('function showOverlay()')
|
||||
self.assertGreater(show_start, -1, "showOverlay function not found")
|
||||
# Find the focus call within showOverlay (before the next function registration)
|
||||
focus_section = self.html[show_start:show_start + 2000]
|
||||
self.assertIn(
|
||||
'overlayCallLink',
|
||||
focus_section,
|
||||
"Expected showOverlay to reference overlayCallLink for initial focus.",
|
||||
)
|
||||
# Ensure the old buggy pattern is gone
|
||||
focus_line_region = self.html[show_start + 800:show_start + 1200]
|
||||
self.assertNotIn(
|
||||
'overlayDismissBtn.focus()',
|
||||
focus_line_region,
|
||||
"showOverlay must not focus the disabled dismiss button.",
|
||||
)
|
||||
|
||||
def test_overlay_call_link_variable_is_declared(self):
|
||||
self.assertIn(
|
||||
"querySelector('.overlay-call')",
|
||||
self.html,
|
||||
"Expected a JS reference to the .overlay-call link element.",
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
@@ -50,6 +50,22 @@ class TestCrisisOfflinePage(unittest.TestCase):
|
||||
for phrase in required_phrases:
|
||||
self.assertIn(phrase, self.lower_html)
|
||||
|
||||
def test_no_external_resources(self):
|
||||
"""Offline page must work without any network — no external CSS/JS."""
|
||||
import re
|
||||
html = self.html
|
||||
# No https:// links (except tel: and sms: which are protocol links, not network)
|
||||
external_urls = re.findall(r'href=["\']https://|src=["\']https://', html)
|
||||
self.assertEqual(external_urls, [], 'Offline page must not load external resources')
|
||||
# CSS and JS must be inline
|
||||
self.assertIn('<style>', html, 'CSS must be inline')
|
||||
self.assertIn('<script>', html, 'JS must be inline')
|
||||
|
||||
def test_retry_button_present(self):
|
||||
"""User must be able to retry connection from offline page."""
|
||||
self.assertIn('retry-connection', self.html)
|
||||
self.assertIn('Retry connection', self.html)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
277
tests/test_session_tracker.py
Normal file
277
tests/test_session_tracker.py
Normal file
@@ -0,0 +1,277 @@
|
||||
"""
|
||||
Tests for crisis session tracking and escalation (P0 #35).
|
||||
|
||||
Covers: session_tracker.py
|
||||
Run with: python -m pytest tests/test_session_tracker.py -v
|
||||
"""
|
||||
|
||||
import unittest
|
||||
import sys
|
||||
import os
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from crisis.detect import detect_crisis
|
||||
from crisis.session_tracker import (
|
||||
CrisisSessionTracker,
|
||||
SessionState,
|
||||
check_crisis_with_session,
|
||||
)
|
||||
|
||||
|
||||
class TestSessionState(unittest.TestCase):
|
||||
"""Test SessionState defaults."""
|
||||
|
||||
def test_default_state(self):
|
||||
s = SessionState()
|
||||
self.assertEqual(s.current_level, "NONE")
|
||||
self.assertEqual(s.peak_level, "NONE")
|
||||
self.assertEqual(s.message_count, 0)
|
||||
self.assertEqual(s.level_history, [])
|
||||
self.assertFalse(s.is_escalating)
|
||||
self.assertFalse(s.is_deescalating)
|
||||
|
||||
|
||||
class TestSessionTracking(unittest.TestCase):
|
||||
"""Test basic session state tracking."""
|
||||
|
||||
def setUp(self):
|
||||
self.tracker = CrisisSessionTracker()
|
||||
|
||||
def test_record_none_message(self):
|
||||
state = self.tracker.record(detect_crisis("Hello Timmy"))
|
||||
self.assertEqual(state.current_level, "NONE")
|
||||
self.assertEqual(state.message_count, 1)
|
||||
self.assertEqual(state.peak_level, "NONE")
|
||||
|
||||
def test_record_low_message(self):
|
||||
self.tracker.record(detect_crisis("Hello"))
|
||||
state = self.tracker.record(detect_crisis("Having a rough day"))
|
||||
self.assertIn(state.current_level, ("LOW", "NONE"))
|
||||
self.assertEqual(state.message_count, 2)
|
||||
|
||||
def test_record_critical_updates_peak(self):
|
||||
self.tracker.record(detect_crisis("Having a rough day"))
|
||||
state = self.tracker.record(detect_crisis("I want to kill myself"))
|
||||
self.assertEqual(state.current_level, "CRITICAL")
|
||||
self.assertEqual(state.peak_level, "CRITICAL")
|
||||
|
||||
def test_peak_preserved_after_drop(self):
|
||||
"""Peak level should stay at the highest seen, even after de-escalation."""
|
||||
self.tracker.record(detect_crisis("I want to kill myself"))
|
||||
state = self.tracker.record(detect_crisis("I'm feeling a bit better"))
|
||||
self.assertEqual(state.peak_level, "CRITICAL")
|
||||
|
||||
def test_level_history(self):
|
||||
self.tracker.record(detect_crisis("Hello"))
|
||||
self.tracker.record(detect_crisis("Having a rough day"))
|
||||
state = self.tracker.record(detect_crisis("I want to die"))
|
||||
self.assertEqual(len(state.level_history), 3)
|
||||
self.assertEqual(state.level_history[0], "NONE")
|
||||
self.assertEqual(state.level_history[2], "CRITICAL")
|
||||
|
||||
def test_reset_clears_state(self):
|
||||
self.tracker.record(detect_crisis("I want to kill myself"))
|
||||
self.tracker.reset()
|
||||
state = self.tracker.state
|
||||
self.assertEqual(state.current_level, "NONE")
|
||||
self.assertEqual(state.peak_level, "NONE")
|
||||
self.assertEqual(state.message_count, 0)
|
||||
self.assertEqual(state.level_history, [])
|
||||
|
||||
|
||||
class TestEscalationDetection(unittest.TestCase):
|
||||
"""Test escalation detection: LOW → HIGH in ≤3 messages."""
|
||||
|
||||
def setUp(self):
|
||||
self.tracker = CrisisSessionTracker()
|
||||
|
||||
def test_no_escalation_single_message(self):
|
||||
self.tracker.record(detect_crisis("Hello"))
|
||||
self.assertFalse(self.tracker.state.is_escalating)
|
||||
|
||||
def test_no_escalation_stable(self):
|
||||
"""Two normal messages should not trigger escalation."""
|
||||
self.tracker.record(detect_crisis("Hello"))
|
||||
state = self.tracker.record(detect_crisis("How are you?"))
|
||||
self.assertFalse(state.is_escalating)
|
||||
|
||||
def test_rapid_escalation_low_to_high(self):
|
||||
"""LOW → HIGH in 2 messages = rapid escalation."""
|
||||
self.tracker.record(detect_crisis("Having a rough day"))
|
||||
state = self.tracker.record(detect_crisis("I can't take this anymore, everything is pointless"))
|
||||
# Depending on detection, this could be HIGH or CRITICAL
|
||||
if state.current_level in ("HIGH", "CRITICAL"):
|
||||
self.assertTrue(state.is_escalating)
|
||||
|
||||
def test_rapid_escalation_three_messages(self):
|
||||
"""NONE → LOW → HIGH in 3 messages = escalation."""
|
||||
self.tracker.record(detect_crisis("Hello"))
|
||||
self.tracker.record(detect_crisis("Having a rough day"))
|
||||
state = self.tracker.record(detect_crisis("I feel completely hopeless with no way out"))
|
||||
if state.current_level in ("HIGH", "CRITICAL"):
|
||||
self.assertTrue(state.is_escalating)
|
||||
|
||||
def test_escalation_rate(self):
|
||||
"""Rate should be positive when escalating."""
|
||||
self.tracker.record(detect_crisis("Hello"))
|
||||
self.tracker.record(detect_crisis("I want to die"))
|
||||
state = self.tracker.state
|
||||
self.assertGreater(state.escalation_rate, 0)
|
||||
|
||||
|
||||
class TestDeescalationDetection(unittest.TestCase):
|
||||
"""Test de-escalation: sustained LOW after HIGH/CRITICAL."""
|
||||
|
||||
def setUp(self):
|
||||
self.tracker = CrisisSessionTracker()
|
||||
|
||||
def test_no_deescalation_without_prior_crisis(self):
|
||||
"""No de-escalation if never reached HIGH/CRITICAL."""
|
||||
for _ in range(6):
|
||||
self.tracker.record(detect_crisis("Hello"))
|
||||
self.assertFalse(self.tracker.state.is_deescalating)
|
||||
|
||||
def test_deescalation_after_critical(self):
|
||||
"""5+ consecutive LOW/NONE messages after CRITICAL = de-escalation."""
|
||||
self.tracker.record(detect_crisis("I want to kill myself"))
|
||||
for _ in range(5):
|
||||
self.tracker.record(detect_crisis("I'm doing better today"))
|
||||
state = self.tracker.state
|
||||
if state.peak_level == "CRITICAL":
|
||||
self.assertTrue(state.is_deescalating)
|
||||
|
||||
def test_deescalation_after_high(self):
|
||||
"""5+ consecutive LOW/NONE messages after HIGH = de-escalation."""
|
||||
self.tracker.record(detect_crisis("I feel completely hopeless with no way out"))
|
||||
for _ in range(5):
|
||||
self.tracker.record(detect_crisis("Feeling okay"))
|
||||
state = self.tracker.state
|
||||
if state.peak_level == "HIGH":
|
||||
self.assertTrue(state.is_deescalating)
|
||||
|
||||
def test_interrupted_deescalation(self):
|
||||
"""De-escalation resets if a HIGH message interrupts."""
|
||||
self.tracker.record(detect_crisis("I want to kill myself"))
|
||||
for _ in range(3):
|
||||
self.tracker.record(detect_crisis("Doing better"))
|
||||
# Interrupt with another crisis
|
||||
self.tracker.record(detect_crisis("I feel hopeless again"))
|
||||
self.tracker.record(detect_crisis("Feeling okay now"))
|
||||
state = self.tracker.state
|
||||
# Should NOT be de-escalating yet (counter reset)
|
||||
self.assertFalse(state.is_deescalating)
|
||||
|
||||
|
||||
class TestSessionModifier(unittest.TestCase):
|
||||
"""Test system prompt modifier generation."""
|
||||
|
||||
def setUp(self):
|
||||
self.tracker = CrisisSessionTracker()
|
||||
|
||||
def test_no_modifier_for_single_message(self):
|
||||
self.tracker.record(detect_crisis("Hello"))
|
||||
self.assertEqual(self.tracker.get_session_modifier(), "")
|
||||
|
||||
def test_no_modifier_for_stable_session(self):
|
||||
self.tracker.record(detect_crisis("Hello"))
|
||||
self.tracker.record(detect_crisis("Good morning"))
|
||||
self.assertEqual(self.tracker.get_session_modifier(), "")
|
||||
|
||||
def test_escalation_modifier(self):
|
||||
"""Escalating session should produce a modifier."""
|
||||
self.tracker.record(detect_crisis("Hello"))
|
||||
self.tracker.record(detect_crisis("I want to die"))
|
||||
modifier = self.tracker.get_session_modifier()
|
||||
if self.tracker.state.is_escalating:
|
||||
self.assertIn("escalated", modifier.lower())
|
||||
self.assertIn("NONE", modifier)
|
||||
self.assertIn("CRITICAL", modifier)
|
||||
|
||||
def test_deescalation_modifier(self):
|
||||
"""De-escalating session should mention stabilizing."""
|
||||
self.tracker.record(detect_crisis("I want to kill myself"))
|
||||
for _ in range(5):
|
||||
self.tracker.record(detect_crisis("I'm feeling okay"))
|
||||
modifier = self.tracker.get_session_modifier()
|
||||
if self.tracker.state.is_deescalating:
|
||||
self.assertIn("stabilizing", modifier.lower())
|
||||
|
||||
def test_prior_crisis_modifier(self):
|
||||
"""Past crisis should be noted even without active escalation."""
|
||||
self.tracker.record(detect_crisis("I want to die"))
|
||||
self.tracker.record(detect_crisis("Feeling a bit better"))
|
||||
modifier = self.tracker.get_session_modifier()
|
||||
# Should note the prior CRITICAL
|
||||
if modifier:
|
||||
self.assertIn("CRITICAL", modifier)
|
||||
|
||||
|
||||
class TestUIHints(unittest.TestCase):
|
||||
"""Test UI hint generation."""
|
||||
|
||||
def setUp(self):
|
||||
self.tracker = CrisisSessionTracker()
|
||||
|
||||
def test_ui_hints_structure(self):
|
||||
self.tracker.record(detect_crisis("Hello"))
|
||||
hints = self.tracker.get_ui_hints()
|
||||
self.assertIn("session_escalating", hints)
|
||||
self.assertIn("session_deescalating", hints)
|
||||
self.assertIn("session_peak_level", hints)
|
||||
self.assertIn("session_message_count", hints)
|
||||
|
||||
def test_ui_hints_escalation_warning(self):
|
||||
"""Escalating session should have warning hint."""
|
||||
self.tracker.record(detect_crisis("Hello"))
|
||||
self.tracker.record(detect_crisis("I want to die"))
|
||||
hints = self.tracker.get_ui_hints()
|
||||
if hints["session_escalating"]:
|
||||
self.assertTrue(hints.get("escalation_warning"))
|
||||
self.assertIn("suggested_action", hints)
|
||||
|
||||
|
||||
class TestCheckCrisisWithSession(unittest.TestCase):
|
||||
"""Test the convenience function combining detection + session tracking."""
|
||||
|
||||
def test_returns_combined_data(self):
|
||||
tracker = CrisisSessionTracker()
|
||||
result = check_crisis_with_session("I want to die", tracker)
|
||||
self.assertIn("level", result)
|
||||
self.assertIn("session", result)
|
||||
self.assertIn("current_level", result["session"])
|
||||
self.assertIn("peak_level", result["session"])
|
||||
self.assertIn("modifier", result["session"])
|
||||
|
||||
def test_session_updates_across_calls(self):
|
||||
tracker = CrisisSessionTracker()
|
||||
check_crisis_with_session("Hello", tracker)
|
||||
result = check_crisis_with_session("I want to die", tracker)
|
||||
self.assertEqual(result["session"]["message_count"], 2)
|
||||
self.assertEqual(result["session"]["peak_level"], "CRITICAL")
|
||||
|
||||
|
||||
class TestPrivacy(unittest.TestCase):
|
||||
"""Verify privacy-first design principles."""
|
||||
|
||||
def test_no_persistence_mechanism(self):
|
||||
"""Session tracker should have no database, file, or network calls."""
|
||||
import inspect
|
||||
source = inspect.getsource(CrisisSessionTracker)
|
||||
# Should not import database, requests, or file I/O
|
||||
forbidden = ["sqlite", "requests", "urllib", "open(", "httpx", "aiohttp"]
|
||||
for word in forbidden:
|
||||
self.assertNotIn(word, source.lower(),
|
||||
f"Session tracker should not use {word} — privacy-first design")
|
||||
|
||||
def test_state_contained_in_memory(self):
|
||||
"""All state should be instance attributes, not module-level."""
|
||||
tracker = CrisisSessionTracker()
|
||||
tracker.record(detect_crisis("I want to die"))
|
||||
# New tracker should have clean state (no global contamination)
|
||||
fresh = CrisisSessionTracker()
|
||||
self.assertEqual(fresh.state.current_level, "NONE")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
Reference in New Issue
Block a user