Compare commits

..

13 Commits

Author SHA1 Message Date
Alexander Whitestone
44e0396a1f feat: add behavioral crisis pattern detection (#133)
All checks were successful
Sanity Checks / sanity-test (pull_request) Successful in 5s
Smoke Test / smoke (pull_request) Successful in 11s
2026-04-20 12:11:02 -04:00
Alexander Whitestone
ac2d230bc1 wip: add behavioral tracker regression tests 2026-04-20 12:03:08 -04:00
d412939b4f fix: footer /about link to point to static about.html
Fixes #59

The footer links to /about but the repo ships about.html. On a plain static server this results in a 404. Changed to /about.html so the link resolves correctly.
2026-04-17 05:37:40 +00:00
07c582aa08 Merge pull request 'fix: crisis overlay initial focus to enabled Call 988 link (#69)' (#126) from burn/69-1776264183 into main
Merge PR #126: fix: crisis overlay initial focus to enabled Call 988 link (#69)
2026-04-17 01:46:56 +00:00
5f95dc1e39 Merge pull request '[P3] Service worker: cache crisis resources for offline (#41)' (#122) from burn/41-1776264184 into main
Merge PR #122: [P3] Service worker: cache crisis resources for offline (#41)
2026-04-17 01:46:55 +00:00
b1f3cac36d Merge pull request 'feat: session-level crisis tracking and escalation (closes #35)' (#118) from door/issue-35 into main
Merge PR #118: feat: session-level crisis tracking and escalation (closes #35)
2026-04-17 01:46:53 +00:00
07b3f67845 fix: crisis overlay initial focus to enabled Call 988 link (#69)
All checks were successful
Sanity Checks / sanity-test (pull_request) Successful in 9s
Smoke Test / smoke (pull_request) Successful in 15s
2026-04-15 15:09:36 +00:00
c22bbbaf65 fix: crisis overlay initial focus to enabled Call 988 link (#69) 2026-04-15 15:09:32 +00:00
543cb1d40f test: add offline self-containment and retry button tests (#41)
All checks were successful
Sanity Checks / sanity-test (pull_request) Successful in 4s
Smoke Test / smoke (pull_request) Successful in 11s
2026-04-15 14:58:44 +00:00
3cfd01815a feat: session-level crisis tracking and escalation (closes #35)
All checks were successful
Sanity Checks / sanity-test (pull_request) Successful in 17s
Smoke Test / smoke (pull_request) Successful in 23s
2026-04-15 11:49:52 +00:00
5a7ba9f207 feat: session-level crisis tracking and escalation (closes #35) 2026-04-15 11:49:51 +00:00
8ed8f20a17 feat: session-level crisis tracking and escalation (closes #35) 2026-04-15 11:49:49 +00:00
9d7d26033e feat: session-level crisis tracking and escalation (closes #35) 2026-04-15 11:49:47 +00:00
11 changed files with 1064 additions and 561 deletions

View File

@@ -6,8 +6,9 @@ Stands between a broken man and a machine that would tell him to die.
from .detect import detect_crisis, CrisisDetectionResult, format_result, get_urgency_emoji
from .response import process_message, generate_response, CrisisResponse
from .gateway import check_crisis, get_system_prompt, format_gateway_response, get_metrics_summary, get_metrics_report
from .metrics import record_detection, record_continuation, get_metrics
from .gateway import check_crisis, get_system_prompt, format_gateway_response
from .behavioral import BehavioralTracker, BehavioralSignal
from .session_tracker import CrisisSessionTracker, SessionState, check_crisis_with_session
__all__ = [
"detect_crisis",
@@ -20,9 +21,9 @@ __all__ = [
"format_result",
"format_gateway_response",
"get_urgency_emoji",
"get_metrics_summary",
"get_metrics_report",
"record_detection",
"record_continuation",
"get_metrics",
"BehavioralTracker",
"BehavioralSignal",
"CrisisSessionTracker",
"SessionState",
"check_crisis_with_session",
]

304
crisis/behavioral.py Normal file
View File

@@ -0,0 +1,304 @@
"""Behavioral crisis pattern detection for the-door (#133).
Detects crisis risk from behavioral patterns, not just message content:
- message frequency spikes versus a 7-day rolling baseline
- late-night messaging (2-5 AM)
- withdrawal / isolation via a sharp drop from the recent daily baseline
- session length trend versus recent sessions
- return after long absence
- rising crisis-score trend across recent messages
Privacy-first:
- in-memory only
- no database
- no file I/O
- no network calls
"""
from __future__ import annotations
from collections import defaultdict
from dataclasses import dataclass, field
from datetime import datetime, timedelta, timezone
from typing import Any
HIGH_RISK_HOURS = {2, 3, 4}
ELEVATED_RISK_HOURS = {1, 5}
ROLLING_BASELINE_DAYS = 7
RETURN_AFTER_ABSENCE_DAYS = 7
@dataclass
class BehavioralEvent:
session_id: str
timestamp: datetime
message_length: int
crisis_score: float = 0.0
role: str = "user"
@dataclass
class BehavioralSignal:
signal_type: str
risk_level: str
description: str
evidence: list[str] = field(default_factory=list)
score: float = 0.0
def as_dict(self) -> dict[str, Any]:
return {
"signal_type": self.signal_type,
"risk_level": self.risk_level,
"description": self.description,
"evidence": list(self.evidence),
"score": self.score,
}
class BehavioralTracker:
"""In-memory tracker for behavioral crisis signals."""
def __init__(self) -> None:
self._events_by_session: dict[str, list[BehavioralEvent]] = defaultdict(list)
def record(
self,
session_id: str,
timestamp: datetime,
message_length: int,
*,
crisis_score: float = 0.0,
role: str = "user",
) -> None:
if timestamp.tzinfo is None:
timestamp = timestamp.replace(tzinfo=timezone.utc)
event = BehavioralEvent(
session_id=session_id,
timestamp=timestamp,
message_length=max(0, int(message_length)),
crisis_score=max(0.0, min(1.0, float(crisis_score))),
role=role,
)
self._events_by_session[session_id].append(event)
self._events_by_session[session_id].sort(key=lambda item: item.timestamp)
def get_risk_signals(self, session_id: str) -> dict[str, Any]:
events = [event for event in self._events_by_session.get(session_id, []) if event.role == "user"]
if not events:
return {
"frequency_change": 1.0,
"is_late_night": False,
"session_length_trend": "stable",
"withdrawal_detected": False,
"behavioral_score": 0.0,
"signals": [],
}
signals: list[BehavioralSignal] = []
frequency_change = self._compute_frequency_change(events)
frequency_signal = self._analyze_frequency(events, frequency_change)
if frequency_signal:
signals.append(frequency_signal)
time_signal = self._analyze_time(events)
if time_signal:
signals.append(time_signal)
withdrawal_signal = self._analyze_withdrawal(session_id, events)
if withdrawal_signal:
signals.append(withdrawal_signal)
absence_signal = self._analyze_return_after_absence(session_id, events)
if absence_signal:
signals.append(absence_signal)
escalation_signal = self._analyze_escalation(events)
if escalation_signal:
signals.append(escalation_signal)
session_length_trend = self._compute_session_length_trend(session_id, events)
behavioral_score = self._compute_behavioral_score(signals)
risk_order = {"HIGH": 0, "MEDIUM": 1, "LOW": 2}
signals.sort(key=lambda item: (risk_order.get(item.risk_level, 9), -item.score))
return {
"frequency_change": frequency_change,
"is_late_night": any(item.signal_type == "time" for item in signals),
"session_length_trend": session_length_trend,
"withdrawal_detected": any(item.signal_type == "withdrawal" for item in signals),
"behavioral_score": behavioral_score,
"signals": [item.as_dict() for item in signals],
}
def _all_user_events(self) -> list[BehavioralEvent]:
events: list[BehavioralEvent] = []
for session_events in self._events_by_session.values():
events.extend(event for event in session_events if event.role == "user")
events.sort(key=lambda item: item.timestamp)
return events
def _daily_count_baseline(self, current_date) -> float:
events = self._all_user_events()
counts: dict[Any, int] = {}
for offset in range(1, ROLLING_BASELINE_DAYS + 1):
counts[current_date - timedelta(days=offset)] = 0
for event in events:
event_date = event.timestamp.date()
if event_date in counts:
counts[event_date] += 1
return sum(counts.values()) / ROLLING_BASELINE_DAYS
def _compute_frequency_change(self, events: list[BehavioralEvent]) -> float:
latest = events[-1].timestamp
window_start = latest - timedelta(hours=1)
current_hour_count = sum(1 for event in events if event.timestamp >= window_start)
baseline_daily = self._daily_count_baseline(latest.date())
baseline_hourly = max(baseline_daily / 24.0, 0.1)
return round(current_hour_count / baseline_hourly, 2)
def _analyze_frequency(self, events: list[BehavioralEvent], frequency_change: float) -> BehavioralSignal | None:
latest = events[-1].timestamp
window_start = latest - timedelta(hours=1)
current_hour_count = sum(1 for event in events if event.timestamp >= window_start)
if current_hour_count >= 6 and frequency_change >= 3.0:
level = "HIGH" if frequency_change >= 6.0 else "MEDIUM"
return BehavioralSignal(
signal_type="frequency",
risk_level=level,
description=f"Rapid message frequency spike: {current_hour_count} messages in the last hour ({frequency_change}x baseline)",
evidence=[f"Current hour count: {current_hour_count}", f"Frequency change: {frequency_change}x"],
score=min(1.0, frequency_change / 8.0),
)
return None
def _analyze_time(self, events: list[BehavioralEvent]) -> BehavioralSignal | None:
latest = events[-1].timestamp
hour = latest.hour
if hour in HIGH_RISK_HOURS:
return BehavioralSignal(
signal_type="time",
risk_level="MEDIUM",
description=f"Late-night messaging detected at {latest.strftime('%H:%M')}",
evidence=[f"Latest message timestamp: {latest.isoformat()}"],
score=0.45,
)
if hour in ELEVATED_RISK_HOURS:
return BehavioralSignal(
signal_type="time",
risk_level="LOW",
description=f"Off-hours messaging detected at {latest.strftime('%H:%M')}",
evidence=[f"Latest message timestamp: {latest.isoformat()}"],
score=0.2,
)
return None
def _analyze_withdrawal(self, session_id: str, events: list[BehavioralEvent]) -> BehavioralSignal | None:
current_date = events[-1].timestamp.date()
baseline_daily = self._daily_count_baseline(current_date)
if baseline_daily < 3.0:
return None
current_day_count = sum(1 for event in events if event.timestamp.date() == current_date)
current_avg_len = sum(event.message_length for event in events if event.timestamp.date() == current_date) / max(current_day_count, 1)
prior_events = [
event
for sid, session_events in self._events_by_session.items()
if sid != session_id
for event in session_events
if event.role == "user" and event.timestamp.date() >= current_date - timedelta(days=ROLLING_BASELINE_DAYS)
]
if not prior_events:
return None
prior_avg_len = sum(event.message_length for event in prior_events) / len(prior_events)
if current_day_count <= max(1, baseline_daily * 0.3):
score = 0.55 if current_day_count == 1 else 0.4
if current_avg_len < prior_avg_len * 0.5:
score += 0.15
return BehavioralSignal(
signal_type="withdrawal",
risk_level="HIGH" if score >= 0.6 else "MEDIUM",
description="Sharp drop from recent communication baseline suggests withdrawal/isolation",
evidence=[
f"Current day count: {current_day_count}",
f"7-day daily baseline: {baseline_daily:.2f}",
f"Average message length: {current_avg_len:.1f} vs {prior_avg_len:.1f}",
],
score=min(1.0, score),
)
return None
def _analyze_return_after_absence(self, session_id: str, events: list[BehavioralEvent]) -> BehavioralSignal | None:
current_start = events[0].timestamp
prior_events = [
event
for sid, session_events in self._events_by_session.items()
if sid != session_id
for event in session_events
if event.role == "user" and event.timestamp < current_start
]
if not prior_events:
return None
latest_prior = max(prior_events, key=lambda item: item.timestamp)
gap = current_start - latest_prior.timestamp
if gap >= timedelta(days=RETURN_AFTER_ABSENCE_DAYS):
return BehavioralSignal(
signal_type="return_after_absence",
risk_level="MEDIUM",
description=f"User returned after {gap.days} days of silence",
evidence=[f"Last prior activity: {latest_prior.timestamp.isoformat()}"],
score=min(1.0, gap.days / 14.0),
)
return None
def _analyze_escalation(self, events: list[BehavioralEvent]) -> BehavioralSignal | None:
scored = [event for event in events if event.crisis_score > 0]
if len(scored) < 3:
return None
recent = scored[-5:]
midpoint = max(1, len(recent) // 2)
first_avg = sum(event.crisis_score for event in recent[:midpoint]) / len(recent[:midpoint])
second_avg = sum(event.crisis_score for event in recent[midpoint:]) / len(recent[midpoint:])
if second_avg >= max(0.4, first_avg * 1.3):
return BehavioralSignal(
signal_type="escalation",
risk_level="HIGH" if second_avg >= 0.65 else "MEDIUM",
description=f"Behavioral escalation: crisis score trend rose from {first_avg:.2f} to {second_avg:.2f}",
evidence=[f"Recent crisis scores: {[round(event.crisis_score, 2) for event in recent]}"],
score=min(1.0, second_avg),
)
return None
def _compute_session_length_trend(self, session_id: str, events: list[BehavioralEvent]) -> str:
current_duration = (events[-1].timestamp - events[0].timestamp).total_seconds()
previous_durations = []
current_start = events[0].timestamp
for sid, session_events in self._events_by_session.items():
if sid == session_id:
continue
user_events = [event for event in session_events if event.role == "user"]
if len(user_events) < 2:
continue
if user_events[-1].timestamp < current_start - timedelta(days=ROLLING_BASELINE_DAYS):
continue
previous_durations.append((user_events[-1].timestamp - user_events[0].timestamp).total_seconds())
if not previous_durations:
return "stable"
average_duration = sum(previous_durations) / len(previous_durations)
if current_duration > average_duration * 1.5:
return "increasing"
if current_duration < average_duration * 0.5:
return "decreasing"
return "stable"
def _compute_behavioral_score(self, signals: list[BehavioralSignal]) -> float:
if not signals:
return 0.0
max_score = max(signal.score for signal in signals)
multi_signal_boost = min(0.2, 0.05 * (len(signals) - 1))
return round(min(1.0, max_score + multi_signal_boost), 2)

View File

@@ -22,7 +22,7 @@ from .response import (
get_system_prompt_modifier,
CrisisResponse,
)
from .metrics import record_detection, get_summary, print_summary, record_continuation
from .session_tracker import CrisisSessionTracker
def check_crisis(text: str) -> dict:
@@ -35,9 +35,6 @@ def check_crisis(text: str) -> dict:
detection = detect_crisis(text)
response = generate_response(detection)
# Record metrics (privacy-preserving — no message content stored)
record_detection(detection.level, detection.indicators)
return {
"level": detection.level,
"score": detection.score,
@@ -97,21 +94,6 @@ def format_gateway_response(text: str, pretty: bool = True) -> str:
return json.dumps(result)
# ── Metrics endpoint ─────────────────────────────────────────────
def get_metrics_summary() -> dict:
"""
Return crisis detection metrics summary.
Privacy-preserving: no PII, no message content, just counts.
"""
return get_summary()
def get_metrics_report() -> str:
"""Return formatted weekly metrics report for stdout/logs."""
return print_summary()
# ── Quick test interface ────────────────────────────────────────
def _interactive():

View File

@@ -1,270 +0,0 @@
"""
Crisis Detection Metrics for the-door.
Privacy-preserving analytics layer. Tracks:
- Detection counts per level (CRITICAL, HIGH, MEDIUM, LOW)
- Keyword firing frequency (pattern hashes, not raw text)
- Time-based distribution (hourly buckets)
- Post-intervention behavior
NO PII is stored — no message content, no user identifiers, no timestamps
finer than hourly granularity.
Storage: JSON file at crisis_metrics.json (configurable path).
"""
import json
import os
import hashlib
import time
from datetime import datetime, timezone
from dataclasses import dataclass, field, asdict
from typing import Dict, List, Optional
# ── Pattern hash helper ───────────────────────────────────────────
def _hash_pattern(pattern: str) -> str:
"""Hash a regex pattern to avoid storing raw crisis text."""
return hashlib.sha256(pattern.encode()).hexdigest()[:12]
# ── Hour bucket helper ────────────────────────────────────────────
def _current_hour_key() -> str:
"""Return current UTC hour as 'YYYY-MM-DDTHH'."""
return datetime.now(timezone.utc).strftime("%Y-%m-%dT%H")
def _current_day_key() -> str:
"""Return current UTC day as 'YYYY-MM-DD'."""
return datetime.now(timezone.utc).strftime("%Y-%m-%d")
# ── Metrics store ─────────────────────────────────────────────────
@dataclass
class CrisisMetrics:
"""In-memory metrics accumulator with JSON persistence."""
# Total detections per level
detections_by_level: Dict[str, int] = field(default_factory=lambda: {
"CRITICAL": 0, "HIGH": 0, "MEDIUM": 0, "LOW": 0, "NONE": 0
})
# Pattern hash -> count (tracks which indicators fire most)
keyword_frequency: Dict[str, int] = field(default_factory=dict)
# Hourly detection counts: "YYYY-MM-DDTHH" -> total
hourly_counts: Dict[str, int] = field(default_factory=dict)
# Daily detection counts: "YYYY-MM-DD" -> {level: count}
daily_counts: Dict[str, Dict[str, int]] = field(default_factory=dict)
# Total messages scanned (for false-positive estimation)
total_scanned: int = 0
# Total detections (non-NONE)
total_detections: int = 0
# Post-intervention tracking (session-scoped, reset on restart)
interventions: int = 0
continued_after_intervention: int = 0
def record(self, level: str, indicators: List[str]) -> None:
"""Record a single detection event."""
self.total_scanned += 1
# Level counts
self.detections_by_level[level] = self.detections_by_level.get(level, 0) + 1
if level != "NONE":
self.total_detections += 1
# Hourly bucket
hour = _current_hour_key()
self.hourly_counts[hour] = self.hourly_counts.get(hour, 0) + 1
# Daily bucket
day = _current_day_key()
if day not in self.daily_counts:
self.daily_counts[day] = {}
daily = self.daily_counts[day]
daily[level] = daily.get(level, 0) + 1
# Keyword frequency (hash patterns)
for pattern in indicators:
h = _hash_pattern(pattern)
self.keyword_frequency[h] = self.keyword_frequency.get(h, 0) + 1
# Intervention tracking
if level in ("CRITICAL", "HIGH"):
self.interventions += 1
def record_continuation(self) -> None:
"""Call when a user continues conversation after crisis intervention."""
self.continued_after_intervention += 1
def to_dict(self) -> dict:
"""Serialize to JSON-safe dict."""
return asdict(self)
@classmethod
def from_dict(cls, data: dict) -> "CrisisMetrics":
"""Deserialize from dict."""
return cls(
detections_by_level=data.get("detections_by_level", {}),
keyword_frequency=data.get("keyword_frequency", {}),
hourly_counts=data.get("hourly_counts", {}),
daily_counts=data.get("daily_counts", {}),
total_scanned=data.get("total_scanned", 0),
total_detections=data.get("total_detections", 0),
interventions=data.get("interventions", 0),
continued_after_intervention=data.get("continued_after_intervention", 0),
)
# ── Derived metrics ───────────────────────────────────────────
@property
def false_positive_estimate(self) -> float:
"""
Estimate false positive rate.
Heuristic: users who continue chatting after HIGH/CRITICAL intervention
were likely not in true crisis. Returns 0.0 if no interventions.
"""
if self.interventions == 0:
return 0.0
return self.continued_after_intervention / self.interventions
@property
def detection_rate(self) -> float:
"""Fraction of scanned messages that triggered any detection."""
if self.total_scanned == 0:
return 0.0
return self.total_detections / self.total_scanned
def top_keywords(self, n: int = 10) -> List[tuple]:
"""Return top N most-fired pattern hashes with counts."""
sorted_kw = sorted(self.keyword_frequency.items(), key=lambda x: -x[1])
return sorted_kw[:n]
def weekly_summary(self) -> dict:
"""Generate a 7-day summary from daily_counts."""
from datetime import timedelta
today = datetime.now(timezone.utc).date()
summary = {
"period_start": (today - timedelta(days=6)).isoformat(),
"period_end": today.isoformat(),
"total_detections": 0,
"by_level": {"CRITICAL": 0, "HIGH": 0, "MEDIUM": 0, "LOW": 0},
"daily_totals": {},
}
for i in range(7):
day = (today - timedelta(days=6 - i)).isoformat()
day_data = self.daily_counts.get(day, {})
day_total = sum(day_data.values())
summary["daily_totals"][day] = day_total
summary["total_detections"] += day_total
for level in ("CRITICAL", "HIGH", "MEDIUM", "LOW"):
summary["by_level"][level] += day_data.get(level, 0)
summary["false_positive_estimate"] = self.false_positive_estimate
summary["detection_rate"] = self.detection_rate
summary["top_keywords"] = self.top_keywords(5)
return summary
def format_summary(self) -> str:
"""Human-readable weekly summary for stdout/log."""
s = self.weekly_summary()
lines = [
"=== Crisis Detection Weekly Summary ===",
f"Period: {s['period_start']} to {s['period_end']}",
f"Total detections: {s['total_detections']}",
"",
"By level:",
]
for level in ("CRITICAL", "HIGH", "MEDIUM", "LOW"):
count = s["by_level"][level]
bar = "#" * count
lines.append(f" {level:10s} {count:4d} {bar}")
lines.append("")
lines.append("Daily trend:")
for day, count in s["daily_totals"].items():
bar = "#" * count
lines.append(f" {day} {count:4d} {bar}")
lines.append("")
lines.append(f"Detection rate: {s['detection_rate']:.1%}")
lines.append(f"False positive estimate: {s['false_positive_estimate']:.1%}")
lines.append("")
lines.append("Top indicators (hashed):")
for h, count in s["top_keywords"]:
lines.append(f" {h} {count:4d}x")
return "\n".join(lines)
# ── Persistence ───────────────────────────────────────────────────
_DEFAULT_PATH = os.path.join(os.path.dirname(__file__), "crisis_metrics.json")
def load_metrics(path: str = _DEFAULT_PATH) -> CrisisMetrics:
"""Load metrics from JSON file, or return empty metrics if missing."""
try:
with open(path) as f:
data = json.load(f)
return CrisisMetrics.from_dict(data)
except (FileNotFoundError, json.JSONDecodeError):
return CrisisMetrics()
def save_metrics(metrics: CrisisMetrics, path: str = _DEFAULT_PATH) -> None:
"""Persist metrics to JSON file."""
with open(path, "w") as f:
json.dump(metrics.to_dict(), f, indent=2)
# ── Global singleton ──────────────────────────────────────────────
_metrics: Optional[CrisisMetrics] = None
_metrics_path: str = _DEFAULT_PATH
def get_metrics(path: str = None) -> CrisisMetrics:
"""Get or initialize the global metrics singleton."""
global _metrics, _metrics_path
if path:
_metrics_path = path
if _metrics is None:
_metrics = load_metrics(_metrics_path)
return _metrics
def record_detection(level: str, indicators: List[str], path: str = None) -> None:
"""Record a crisis detection event and persist."""
m = get_metrics(path)
m.record(level, indicators)
save_metrics(m, _metrics_path)
def record_continuation(path: str = None) -> None:
"""Record that a user continued after crisis intervention."""
m = get_metrics(path)
m.record_continuation()
save_metrics(m, _metrics_path)
def get_summary(path: str = None) -> dict:
"""Get the weekly summary as a dict."""
m = get_metrics(path)
return m.weekly_summary()
def print_summary(path: str = None) -> str:
"""Get and format the weekly summary."""
m = get_metrics(path)
return m.format_summary()

325
crisis/session_tracker.py Normal file
View File

@@ -0,0 +1,325 @@
"""
Session-level crisis tracking and escalation for the-door (P0 #35).
Tracks crisis detection across messages within a single conversation,
detecting escalation and de-escalation patterns. Privacy-first: no
persistence beyond the conversation session.
Each message is analyzed in isolation by detect.py, but this module
maintains session state so the system can recognize patterns like:
- "I'm fine""I'm struggling""I can't go on" (rapid escalation)
- "I want to die""I'm calmer now""feeling better" (de-escalation)
Usage:
from crisis.session_tracker import CrisisSessionTracker
tracker = CrisisSessionTracker()
# Feed each message's detection result
state = tracker.record(detect_crisis("I'm having a tough day"))
print(state.current_level) # "LOW"
print(state.is_escalating) # False
state = tracker.record(detect_crisis("I feel hopeless"))
print(state.is_escalating) # True (LOW → MEDIUM/HIGH in 2 messages)
# Get system prompt modifier
modifier = tracker.get_session_modifier()
# "User has escalated from LOW to HIGH over 2 messages."
# Reset for new session
tracker.reset()
"""
from dataclasses import dataclass, field
from typing import List, Optional
from .behavioral import BehavioralTracker
from .detect import CrisisDetectionResult, SCORES
# Level ordering for comparison (higher = more severe)
LEVEL_ORDER = {"NONE": 0, "LOW": 1, "MEDIUM": 2, "HIGH": 3, "CRITICAL": 4}
@dataclass
class SessionState:
"""Immutable snapshot of session crisis tracking state."""
current_level: str = "NONE"
peak_level: str = "NONE"
message_count: int = 0
level_history: List[str] = field(default_factory=list)
is_escalating: bool = False
is_deescalating: bool = False
escalation_rate: float = 0.0 # levels gained per message
consecutive_low_messages: int = 0 # for de-escalation tracking
behavioral_score: float = 0.0
behavioral_signals: List[dict] = field(default_factory=list)
frequency_change: float = 1.0
is_late_night: bool = False
session_length_trend: str = "stable"
withdrawal_detected: bool = False
class CrisisSessionTracker:
"""
Session-level crisis state tracker.
Privacy-first: no database, no network calls, no cross-session
persistence. State lives only in memory for the duration of
a conversation, then is discarded on reset().
"""
# Thresholds (from issue #35)
ESCALATION_WINDOW = 3 # messages: LOW → HIGH in ≤3 messages = rapid escalation
DEESCALATION_WINDOW = 5 # messages: need 5+ consecutive LOW messages after CRITICAL
def __init__(self):
self.reset()
def reset(self):
"""Reset all session state. Call on new conversation."""
self._current_level = "NONE"
self._peak_level = "NONE"
self._message_count = 0
self._level_history: List[str] = []
self._consecutive_low = 0
self._behavioral_tracker = BehavioralTracker()
self._behavioral_session_id = "current-session"
@property
def state(self) -> SessionState:
"""Return immutable snapshot of current session state."""
is_escalating = self._detect_escalation()
is_deescalating = self._detect_deescalation()
rate = self._compute_escalation_rate()
behavioral = self._behavioral_tracker.get_risk_signals(self._behavioral_session_id)
return SessionState(
current_level=self._current_level,
peak_level=self._peak_level,
message_count=self._message_count,
level_history=list(self._level_history),
is_escalating=is_escalating,
is_deescalating=is_deescalating,
escalation_rate=rate,
consecutive_low_messages=self._consecutive_low,
behavioral_score=behavioral["behavioral_score"],
behavioral_signals=behavioral["signals"],
frequency_change=behavioral["frequency_change"],
is_late_night=behavioral["is_late_night"],
session_length_trend=behavioral["session_length_trend"],
withdrawal_detected=behavioral["withdrawal_detected"],
)
def record(
self,
detection: CrisisDetectionResult,
*,
timestamp=None,
message_length: int = 0,
role: str = "user",
) -> SessionState:
"""
Record a crisis detection result for the current message.
Returns updated SessionState.
"""
from datetime import datetime, timezone
level = detection.level
self._message_count += 1
self._level_history.append(level)
# Update peak
if LEVEL_ORDER.get(level, 0) > LEVEL_ORDER.get(self._peak_level, 0):
self._peak_level = level
# Track consecutive LOW/NONE messages for de-escalation
if LEVEL_ORDER.get(level, 0) <= LEVEL_ORDER["LOW"]:
self._consecutive_low += 1
else:
self._consecutive_low = 0
if role == "user":
if timestamp is None:
timestamp = datetime.now(timezone.utc)
self._behavioral_tracker.record(
self._behavioral_session_id,
timestamp,
message_length=message_length,
crisis_score=detection.score,
role=role,
)
self._current_level = level
return self.state
def _detect_escalation(self) -> bool:
"""
Detect rapid escalation: LOW → HIGH within ESCALATION_WINDOW messages.
Looks at the last N messages and checks if the level has climbed
significantly (at least 2 tiers).
"""
if len(self._level_history) < 2:
return False
window = self._level_history[-self.ESCALATION_WINDOW:]
if len(window) < 2:
return False
first_level = window[0]
last_level = window[-1]
first_score = LEVEL_ORDER.get(first_level, 0)
last_score = LEVEL_ORDER.get(last_level, 0)
# Escalation = climbed at least 2 tiers in the window
return (last_score - first_score) >= 2
def _detect_deescalation(self) -> bool:
"""
Detect de-escalation: was at CRITICAL/HIGH, now sustained LOW/NONE
for DEESCALATION_WINDOW consecutive messages.
"""
if LEVEL_ORDER.get(self._peak_level, 0) < LEVEL_ORDER["HIGH"]:
return False
return self._consecutive_low >= self.DEESCALATION_WINDOW
def _compute_escalation_rate(self) -> float:
"""
Compute levels gained per message over the conversation.
Positive = escalating, negative = de-escalating, 0 = stable.
"""
if self._message_count < 2:
return 0.0
first = LEVEL_ORDER.get(self._level_history[0], 0)
current = LEVEL_ORDER.get(self._current_level, 0)
return (current - first) / (self._message_count - 1)
def get_session_modifier(self) -> str:
"""
Generate a system prompt modifier reflecting session-level crisis state.
Returns empty string if no session context is relevant.
"""
if self._message_count < 2:
return ""
s = self.state
if s.is_escalating:
return (
f"User has escalated from {self._level_history[0]} to "
f"{s.current_level} over {s.message_count} messages. "
f"Peak crisis level this session: {s.peak_level}. "
"Respond with heightened awareness. The trajectory is "
"worsening — prioritize safety and connection."
)
if s.is_deescalating:
return (
f"User previously reached {s.peak_level} crisis level "
f"but has been at {s.current_level} or below for "
f"{s.consecutive_low_messages} consecutive messages. "
"The situation appears to be stabilizing. Continue "
"supportive engagement while remaining vigilant."
)
notes = []
if s.peak_level in ("CRITICAL", "HIGH") and s.current_level not in ("CRITICAL", "HIGH"):
notes.append(
f"User previously reached {s.peak_level} crisis level this session (currently {s.current_level}). "
"Continue with care and awareness of the earlier crisis."
)
if s.behavioral_score >= 0.35 and s.behavioral_signals:
signal_names = ", ".join(item["signal_type"] for item in s.behavioral_signals)
notes.append(
f"Behavioral risk signals detected this session: {signal_names}. "
"Use the behavioral context to increase sensitivity and warmth."
)
return " ".join(notes)
def get_ui_hints(self) -> dict:
"""
Return UI hints based on session state for the frontend.
These are advisory — the frontend decides what to show.
"""
s = self.state
hints = {
"session_escalating": s.is_escalating,
"session_deescalating": s.is_deescalating,
"session_peak_level": s.peak_level,
"session_message_count": s.message_count,
"behavioral_score": s.behavioral_score,
"is_late_night": s.is_late_night,
"withdrawal_detected": s.withdrawal_detected,
"session_length_trend": s.session_length_trend,
}
if s.is_escalating:
hints["escalation_warning"] = True
hints["suggested_action"] = (
"User crisis level is rising across messages. "
"Consider increasing intervention level."
)
if s.behavioral_score >= 0.5:
hints["behavioral_warning"] = True
hints.setdefault(
"suggested_action",
"Behavioral risk patterns are active. Keep the response warm, grounded, and alert."
)
return hints
def check_crisis_with_session(
text: str,
tracker: CrisisSessionTracker,
timestamp=None,
) -> dict:
"""
Convenience: detect crisis and update session state in one call.
Returns combined single-message detection + session-level context.
"""
from .detect import detect_crisis
from .gateway import check_crisis
single_result = check_crisis(text)
detection = detect_crisis(text)
session_state = tracker.record(detection, timestamp=timestamp, message_length=len(text))
behavioral = {
"frequency_change": session_state.frequency_change,
"is_late_night": session_state.is_late_night,
"session_length_trend": session_state.session_length_trend,
"withdrawal_detected": session_state.withdrawal_detected,
"behavioral_score": session_state.behavioral_score,
"signals": session_state.behavioral_signals,
}
return {
**single_result,
"session": {
"current_level": session_state.current_level,
"peak_level": session_state.peak_level,
"message_count": session_state.message_count,
"is_escalating": session_state.is_escalating,
"is_deescalating": session_state.is_deescalating,
"modifier": tracker.get_session_modifier(),
"ui_hints": tracker.get_ui_hints(),
"behavioral": behavioral,
},
}

View File

@@ -680,7 +680,7 @@ html, body {
<!-- Footer -->
<footer id="footer">
<a href="/about" aria-label="About The Door">about</a>
<a href="/about.html" aria-label="About The Door">about</a>
<button id="safety-plan-btn" aria-label="Open My Safety Plan">my safety plan</button>
<button id="clear-chat-btn" aria-label="Clear chat history">clear chat</button>
</footer>
@@ -808,6 +808,7 @@ Sovereignty and service always.`;
var crisisPanel = document.getElementById('crisis-panel');
var crisisOverlay = document.getElementById('crisis-overlay');
var overlayDismissBtn = document.getElementById('overlay-dismiss-btn');
var overlayCallLink = document.querySelector('.overlay-call');
var statusDot = document.querySelector('.status-dot');
var statusText = document.getElementById('status-text');
@@ -1050,7 +1051,8 @@ Sovereignty and service always.`;
}
}, 1000);
overlayDismissBtn.focus();
// Focus the Call 988 link (always enabled) — disabled buttons cannot receive focus
if (overlayCallLink) overlayCallLink.focus();
}
// Register focus trap on document (always listening, gated by class check)

View File

@@ -0,0 +1,101 @@
"""
Tests for behavioral crisis pattern detection (#133).
"""
import os
import sys
import unittest
from datetime import datetime, timedelta, timezone
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from crisis.session_tracker import CrisisSessionTracker, check_crisis_with_session
from crisis.behavioral import BehavioralTracker
class TestBehavioralTracker(unittest.TestCase):
def _seed_day(self, tracker, *, session_id, day, count, start_hour=10, message_length=48, crisis_score=0.0):
base = datetime(2026, 4, day, start_hour, 0, tzinfo=timezone.utc)
for i in range(count):
tracker.record(
session_id,
base + timedelta(minutes=i * 10),
message_length=message_length,
crisis_score=crisis_score,
)
def test_frequency_change_uses_seven_day_baseline(self):
tracker = BehavioralTracker()
for day in range(1, 8):
self._seed_day(tracker, session_id=f"baseline-{day}", day=day, count=2)
burst_base = datetime(2026, 4, 8, 14, 0, tzinfo=timezone.utc)
for i in range(8):
tracker.record(
"current-session",
burst_base + timedelta(minutes=i),
message_length=72,
crisis_score=0.1,
)
summary = tracker.get_risk_signals("current-session")
self.assertGreater(summary["frequency_change"], 2.0)
self.assertTrue(any(sig["signal_type"] == "frequency" for sig in summary["signals"]))
self.assertGreater(summary["behavioral_score"], 0.0)
def test_late_night_messages_raise_flag(self):
tracker = BehavioralTracker()
base = datetime(2026, 4, 10, 2, 15, tzinfo=timezone.utc)
for i in range(3):
tracker.record(
"late-night",
base + timedelta(minutes=i * 7),
message_length=35,
crisis_score=0.0,
)
summary = tracker.get_risk_signals("late-night")
self.assertTrue(summary["is_late_night"])
self.assertTrue(any(sig["signal_type"] == "time" for sig in summary["signals"]))
def test_withdrawal_detected_after_large_drop_from_baseline(self):
tracker = BehavioralTracker()
for day in range(1, 8):
self._seed_day(tracker, session_id=f"baseline-{day}", day=day, count=10, message_length=80)
tracker.record(
"withdrawal-session",
datetime(2026, 4, 9, 11, 0, tzinfo=timezone.utc),
message_length=18,
crisis_score=0.0,
)
summary = tracker.get_risk_signals("withdrawal-session")
self.assertTrue(summary["withdrawal_detected"])
self.assertTrue(any(sig["signal_type"] == "withdrawal" for sig in summary["signals"]))
class TestBehavioralSessionIntegration(unittest.TestCase):
def test_check_crisis_with_session_includes_behavioral_summary(self):
tracker = CrisisSessionTracker()
base = datetime(2026, 4, 20, 2, 0, tzinfo=timezone.utc)
check_crisis_with_session("can't sleep", tracker, timestamp=base)
check_crisis_with_session("still here", tracker, timestamp=base + timedelta(minutes=1))
result = check_crisis_with_session("everything feels loud", tracker, timestamp=base + timedelta(minutes=2))
behavioral = result["session"]["behavioral"]
self.assertIn("frequency_change", behavioral)
self.assertIn("is_late_night", behavioral)
self.assertIn("session_length_trend", behavioral)
self.assertIn("withdrawal_detected", behavioral)
self.assertIn("behavioral_score", behavioral)
self.assertTrue(behavioral["is_late_night"])
self.assertGreater(behavioral["behavioral_score"], 0.0)
if __name__ == '__main__':
unittest.main()

View File

@@ -1,263 +0,0 @@
"""
Tests for crisis detection metrics (issue #37).
Verifies privacy-preserving analytics layer works correctly.
"""
import unittest
import sys
import os
import json
import tempfile
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from crisis.metrics import (
CrisisMetrics,
load_metrics,
save_metrics,
_hash_pattern,
)
class TestCrisisMetrics(unittest.TestCase):
"""Test the CrisisMetrics dataclass and persistence."""
def setUp(self):
self.metrics = CrisisMetrics()
self.tmpfile = tempfile.NamedTemporaryFile(suffix=".json", delete=False)
self.tmpfile.close()
def tearDown(self):
if os.path.exists(self.tmpfile.name):
os.unlink(self.tmpfile.name)
def test_record_none(self):
"""Recording NONE should increment scanned but not detections."""
self.metrics.record("NONE", [])
self.assertEqual(self.metrics.total_scanned, 1)
self.assertEqual(self.metrics.total_detections, 0)
self.assertEqual(self.metrics.detections_by_level["NONE"], 1)
def test_record_critical(self):
"""Recording CRITICAL should increment all relevant counters."""
pattern = r"\bkill\s*(my)?self\b"
self.metrics.record("CRITICAL", [pattern])
self.assertEqual(self.metrics.total_scanned, 1)
self.assertEqual(self.metrics.total_detections, 1)
self.assertEqual(self.metrics.detections_by_level["CRITICAL"], 1)
self.assertEqual(self.metrics.interventions, 1)
# Keyword should be hashed, not raw
h = _hash_pattern(pattern)
self.assertEqual(self.metrics.keyword_frequency[h], 1)
def test_record_high_is_intervention(self):
"""HIGH detections should count as interventions."""
self.metrics.record("HIGH", [r"\bdespair\b"])
self.assertEqual(self.metrics.interventions, 1)
def test_record_medium_not_intervention(self):
"""MEDIUM detections should NOT count as interventions."""
self.metrics.record("MEDIUM", [r"\bbroken\b", r"\bworthless\b"])
self.assertEqual(self.metrics.interventions, 0)
self.assertEqual(self.metrics.total_detections, 1)
def test_record_multiple(self):
"""Multiple detections should accumulate."""
self.metrics.record("CRITICAL", [r"\bpattern1\b"])
self.metrics.record("HIGH", [r"\bpattern2\b"])
self.metrics.record("NONE", [])
self.metrics.record("LOW", [r"\bsad\b"])
self.assertEqual(self.metrics.total_scanned, 4)
self.assertEqual(self.metrics.total_detections, 3)
self.assertEqual(self.metrics.interventions, 2)
def test_continuation_tracking(self):
"""Recording continuation should affect false positive estimate."""
self.metrics.record("CRITICAL", [r"\bpattern\b"])
self.assertEqual(self.metrics.false_positive_estimate, 0.0)
self.metrics.record_continuation()
self.assertEqual(self.metrics.continued_after_intervention, 1)
self.assertEqual(self.metrics.false_positive_estimate, 1.0)
def test_false_positive_estimate_zero_when_no_interventions(self):
"""False positive rate should be 0.0 with no interventions."""
self.assertEqual(self.metrics.false_positive_estimate, 0.0)
def test_detection_rate(self):
"""Detection rate should be detections/scanned."""
self.metrics.record("CRITICAL", [])
self.metrics.record("NONE", [])
self.metrics.record("NONE", [])
self.assertEqual(self.metrics.detection_rate, 1.0 / 3.0)
def test_detection_rate_zero_when_no_scans(self):
self.assertEqual(self.metrics.detection_rate, 0.0)
def test_hourly_bucket(self):
"""Detections should be bucketed by hour."""
self.metrics.record("HIGH", [])
hour_key = self.metrics.hourly_counts
self.assertEqual(len(hour_key), 1)
for k, v in hour_key.items():
self.assertRegex(k, r"^\d{4}-\d{2}-\d{2}T\d{2}$")
self.assertEqual(v, 1)
def test_daily_bucket(self):
"""Detections should be bucketed by day with level."""
self.metrics.record("CRITICAL", [])
self.metrics.record("HIGH", [])
self.assertEqual(len(self.metrics.daily_counts), 1)
for day, levels in self.metrics.daily_counts.items():
self.assertRegex(day, r"^\d{4}-\d{2}-\d{2}$")
self.assertEqual(levels["CRITICAL"], 1)
self.assertEqual(levels["HIGH"], 1)
def test_top_keywords(self):
"""top_keywords should return most frequent hashed patterns."""
p1 = r"\bpattern_a\b"
p2 = r"\bpattern_b\b"
h1 = _hash_pattern(p1)
h2 = _hash_pattern(p2)
for _ in range(5):
self.metrics.record("HIGH", [p1])
for _ in range(2):
self.metrics.record("MEDIUM", [p2])
top = self.metrics.top_keywords(2)
self.assertEqual(len(top), 2)
self.assertEqual(top[0], (h1, 5))
self.assertEqual(top[1], (h2, 2))
def test_weekly_summary_structure(self):
"""Weekly summary should have expected keys."""
self.metrics.record("CRITICAL", [])
summary = self.metrics.weekly_summary()
self.assertIn("period_start", summary)
self.assertIn("period_end", summary)
self.assertIn("total_detections", summary)
self.assertIn("by_level", summary)
self.assertIn("daily_totals", summary)
self.assertIn("false_positive_estimate", summary)
self.assertIn("detection_rate", summary)
self.assertIn("top_keywords", summary)
self.assertEqual(summary["total_detections"], 1)
self.assertEqual(summary["by_level"]["CRITICAL"], 1)
def test_format_summary_returns_string(self):
"""format_summary should return a non-empty string."""
self.metrics.record("CRITICAL", [])
result = self.metrics.format_summary()
self.assertIsInstance(result, str)
self.assertIn("CRITICAL", result)
def test_persistence_round_trip(self):
"""Metrics should survive save/load cycle."""
self.metrics.record("CRITICAL", [r"\btest\b"])
self.metrics.record("HIGH", [])
self.metrics.record_continuation()
save_metrics(self.metrics, self.tmpfile.name)
loaded = load_metrics(self.tmpfile.name)
self.assertEqual(loaded.total_scanned, 2)
self.assertEqual(loaded.total_detections, 2)
self.assertEqual(loaded.detections_by_level["CRITICAL"], 1)
self.assertEqual(loaded.detections_by_level["HIGH"], 1)
self.assertEqual(loaded.interventions, 2)
self.assertEqual(loaded.continued_after_intervention, 1)
def test_load_missing_file_returns_empty(self):
"""Loading a nonexistent file should return empty metrics."""
m = load_metrics("/tmp/nonexistent_metrics_999.json")
self.assertEqual(m.total_scanned, 0)
self.assertEqual(m.total_detections, 0)
def test_load_corrupt_file_returns_empty(self):
"""Loading a corrupt JSON file should return empty metrics."""
with open(self.tmpfile.name, "w") as f:
f.write("NOT JSON {{{")
m = load_metrics(self.tmpfile.name)
self.assertEqual(m.total_scanned, 0)
def test_hash_pattern_consistent(self):
"""Same pattern should always produce same hash."""
h1 = _hash_pattern(r"\bkill\s*self\b")
h2 = _hash_pattern(r"\bkill\s*self\b")
self.assertEqual(h1, h2)
def test_hash_pattern_different(self):
"""Different patterns should produce different hashes."""
h1 = _hash_pattern(r"\bpattern_a\b")
h2 = _hash_pattern(r"\bpattern_b\b")
self.assertNotEqual(h1, h2)
class TestMetricsGatewayIntegration(unittest.TestCase):
"""Test that metrics are recorded through the gateway."""
def test_check_crisis_records_metrics(self):
"""check_crisis should record metrics automatically."""
import tempfile
from crisis.metrics import get_metrics, _metrics
# Use a temp metrics file
tmpfile = tempfile.NamedTemporaryFile(suffix=".json", delete=False)
tmpfile.close()
try:
# Reset global singleton
import crisis.metrics
crisis.metrics._metrics = None
crisis.metrics._metrics_path = tmpfile.name
from crisis.gateway import check_crisis
# A critical message
check_crisis("I want to kill myself")
m = get_metrics()
self.assertEqual(m.total_scanned, 1)
self.assertEqual(m.detections_by_level["CRITICAL"], 1)
# A safe message
check_crisis("I had a good day today")
self.assertEqual(m.total_scanned, 2)
self.assertEqual(m.detections_by_level["NONE"], 1)
finally:
# Reset singleton
crisis.metrics._metrics = None
if os.path.exists(tmpfile.name):
os.unlink(tmpfile.name)
class TestNoPIIStorage(unittest.TestCase):
"""Verify that no message content is stored in metrics."""
def test_no_text_in_serialized_metrics(self):
"""Metrics to_dict should never contain message text."""
from crisis.metrics import CrisisMetrics
m = CrisisMetrics()
# Record with actual crisis text — only patterns should be hashed
m.record("CRITICAL", [r"\bkill\s*self\b"])
serialized = json.dumps(m.to_dict())
# These strings should NOT appear in serialized metrics
self.assertNotIn("kill myself", serialized)
self.assertNotIn("I want to", serialized)
self.assertNotIn("user", serialized.lower())
# Only hashed patterns should appear (short hex strings)
self.assertIn(_hash_pattern(r"\bkill\s*self\b"), serialized)
if __name__ == "__main__":
unittest.main()

View File

@@ -52,6 +52,34 @@ class TestCrisisOverlayFocusTrap(unittest.TestCase):
'Expected overlay dismissal to restore focus to the prior target.',
)
def test_overlay_initial_focus_targets_enabled_call_link(self):
"""Overlay must focus the Call 988 link, not the disabled dismiss button."""
# Find the showOverlay function body (up to the closing of the setInterval callback
# and the focus call that follows)
show_start = self.html.find('function showOverlay()')
self.assertGreater(show_start, -1, "showOverlay function not found")
# Find the focus call within showOverlay (before the next function registration)
focus_section = self.html[show_start:show_start + 2000]
self.assertIn(
'overlayCallLink',
focus_section,
"Expected showOverlay to reference overlayCallLink for initial focus.",
)
# Ensure the old buggy pattern is gone
focus_line_region = self.html[show_start + 800:show_start + 1200]
self.assertNotIn(
'overlayDismissBtn.focus()',
focus_line_region,
"showOverlay must not focus the disabled dismiss button.",
)
def test_overlay_call_link_variable_is_declared(self):
self.assertIn(
"querySelector('.overlay-call')",
self.html,
"Expected a JS reference to the .overlay-call link element.",
)
if __name__ == '__main__':
unittest.main()

View File

@@ -50,6 +50,22 @@ class TestCrisisOfflinePage(unittest.TestCase):
for phrase in required_phrases:
self.assertIn(phrase, self.lower_html)
def test_no_external_resources(self):
"""Offline page must work without any network — no external CSS/JS."""
import re
html = self.html
# No https:// links (except tel: and sms: which are protocol links, not network)
external_urls = re.findall(r'href=["\']https://|src=["\']https://', html)
self.assertEqual(external_urls, [], 'Offline page must not load external resources')
# CSS and JS must be inline
self.assertIn('<style>', html, 'CSS must be inline')
self.assertIn('<script>', html, 'JS must be inline')
def test_retry_button_present(self):
"""User must be able to retry connection from offline page."""
self.assertIn('retry-connection', self.html)
self.assertIn('Retry connection', self.html)
if __name__ == '__main__':
unittest.main()

View File

@@ -0,0 +1,277 @@
"""
Tests for crisis session tracking and escalation (P0 #35).
Covers: session_tracker.py
Run with: python -m pytest tests/test_session_tracker.py -v
"""
import unittest
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from crisis.detect import detect_crisis
from crisis.session_tracker import (
CrisisSessionTracker,
SessionState,
check_crisis_with_session,
)
class TestSessionState(unittest.TestCase):
"""Test SessionState defaults."""
def test_default_state(self):
s = SessionState()
self.assertEqual(s.current_level, "NONE")
self.assertEqual(s.peak_level, "NONE")
self.assertEqual(s.message_count, 0)
self.assertEqual(s.level_history, [])
self.assertFalse(s.is_escalating)
self.assertFalse(s.is_deescalating)
class TestSessionTracking(unittest.TestCase):
"""Test basic session state tracking."""
def setUp(self):
self.tracker = CrisisSessionTracker()
def test_record_none_message(self):
state = self.tracker.record(detect_crisis("Hello Timmy"))
self.assertEqual(state.current_level, "NONE")
self.assertEqual(state.message_count, 1)
self.assertEqual(state.peak_level, "NONE")
def test_record_low_message(self):
self.tracker.record(detect_crisis("Hello"))
state = self.tracker.record(detect_crisis("Having a rough day"))
self.assertIn(state.current_level, ("LOW", "NONE"))
self.assertEqual(state.message_count, 2)
def test_record_critical_updates_peak(self):
self.tracker.record(detect_crisis("Having a rough day"))
state = self.tracker.record(detect_crisis("I want to kill myself"))
self.assertEqual(state.current_level, "CRITICAL")
self.assertEqual(state.peak_level, "CRITICAL")
def test_peak_preserved_after_drop(self):
"""Peak level should stay at the highest seen, even after de-escalation."""
self.tracker.record(detect_crisis("I want to kill myself"))
state = self.tracker.record(detect_crisis("I'm feeling a bit better"))
self.assertEqual(state.peak_level, "CRITICAL")
def test_level_history(self):
self.tracker.record(detect_crisis("Hello"))
self.tracker.record(detect_crisis("Having a rough day"))
state = self.tracker.record(detect_crisis("I want to die"))
self.assertEqual(len(state.level_history), 3)
self.assertEqual(state.level_history[0], "NONE")
self.assertEqual(state.level_history[2], "CRITICAL")
def test_reset_clears_state(self):
self.tracker.record(detect_crisis("I want to kill myself"))
self.tracker.reset()
state = self.tracker.state
self.assertEqual(state.current_level, "NONE")
self.assertEqual(state.peak_level, "NONE")
self.assertEqual(state.message_count, 0)
self.assertEqual(state.level_history, [])
class TestEscalationDetection(unittest.TestCase):
"""Test escalation detection: LOW → HIGH in ≤3 messages."""
def setUp(self):
self.tracker = CrisisSessionTracker()
def test_no_escalation_single_message(self):
self.tracker.record(detect_crisis("Hello"))
self.assertFalse(self.tracker.state.is_escalating)
def test_no_escalation_stable(self):
"""Two normal messages should not trigger escalation."""
self.tracker.record(detect_crisis("Hello"))
state = self.tracker.record(detect_crisis("How are you?"))
self.assertFalse(state.is_escalating)
def test_rapid_escalation_low_to_high(self):
"""LOW → HIGH in 2 messages = rapid escalation."""
self.tracker.record(detect_crisis("Having a rough day"))
state = self.tracker.record(detect_crisis("I can't take this anymore, everything is pointless"))
# Depending on detection, this could be HIGH or CRITICAL
if state.current_level in ("HIGH", "CRITICAL"):
self.assertTrue(state.is_escalating)
def test_rapid_escalation_three_messages(self):
"""NONE → LOW → HIGH in 3 messages = escalation."""
self.tracker.record(detect_crisis("Hello"))
self.tracker.record(detect_crisis("Having a rough day"))
state = self.tracker.record(detect_crisis("I feel completely hopeless with no way out"))
if state.current_level in ("HIGH", "CRITICAL"):
self.assertTrue(state.is_escalating)
def test_escalation_rate(self):
"""Rate should be positive when escalating."""
self.tracker.record(detect_crisis("Hello"))
self.tracker.record(detect_crisis("I want to die"))
state = self.tracker.state
self.assertGreater(state.escalation_rate, 0)
class TestDeescalationDetection(unittest.TestCase):
"""Test de-escalation: sustained LOW after HIGH/CRITICAL."""
def setUp(self):
self.tracker = CrisisSessionTracker()
def test_no_deescalation_without_prior_crisis(self):
"""No de-escalation if never reached HIGH/CRITICAL."""
for _ in range(6):
self.tracker.record(detect_crisis("Hello"))
self.assertFalse(self.tracker.state.is_deescalating)
def test_deescalation_after_critical(self):
"""5+ consecutive LOW/NONE messages after CRITICAL = de-escalation."""
self.tracker.record(detect_crisis("I want to kill myself"))
for _ in range(5):
self.tracker.record(detect_crisis("I'm doing better today"))
state = self.tracker.state
if state.peak_level == "CRITICAL":
self.assertTrue(state.is_deescalating)
def test_deescalation_after_high(self):
"""5+ consecutive LOW/NONE messages after HIGH = de-escalation."""
self.tracker.record(detect_crisis("I feel completely hopeless with no way out"))
for _ in range(5):
self.tracker.record(detect_crisis("Feeling okay"))
state = self.tracker.state
if state.peak_level == "HIGH":
self.assertTrue(state.is_deescalating)
def test_interrupted_deescalation(self):
"""De-escalation resets if a HIGH message interrupts."""
self.tracker.record(detect_crisis("I want to kill myself"))
for _ in range(3):
self.tracker.record(detect_crisis("Doing better"))
# Interrupt with another crisis
self.tracker.record(detect_crisis("I feel hopeless again"))
self.tracker.record(detect_crisis("Feeling okay now"))
state = self.tracker.state
# Should NOT be de-escalating yet (counter reset)
self.assertFalse(state.is_deescalating)
class TestSessionModifier(unittest.TestCase):
"""Test system prompt modifier generation."""
def setUp(self):
self.tracker = CrisisSessionTracker()
def test_no_modifier_for_single_message(self):
self.tracker.record(detect_crisis("Hello"))
self.assertEqual(self.tracker.get_session_modifier(), "")
def test_no_modifier_for_stable_session(self):
self.tracker.record(detect_crisis("Hello"))
self.tracker.record(detect_crisis("Good morning"))
self.assertEqual(self.tracker.get_session_modifier(), "")
def test_escalation_modifier(self):
"""Escalating session should produce a modifier."""
self.tracker.record(detect_crisis("Hello"))
self.tracker.record(detect_crisis("I want to die"))
modifier = self.tracker.get_session_modifier()
if self.tracker.state.is_escalating:
self.assertIn("escalated", modifier.lower())
self.assertIn("NONE", modifier)
self.assertIn("CRITICAL", modifier)
def test_deescalation_modifier(self):
"""De-escalating session should mention stabilizing."""
self.tracker.record(detect_crisis("I want to kill myself"))
for _ in range(5):
self.tracker.record(detect_crisis("I'm feeling okay"))
modifier = self.tracker.get_session_modifier()
if self.tracker.state.is_deescalating:
self.assertIn("stabilizing", modifier.lower())
def test_prior_crisis_modifier(self):
"""Past crisis should be noted even without active escalation."""
self.tracker.record(detect_crisis("I want to die"))
self.tracker.record(detect_crisis("Feeling a bit better"))
modifier = self.tracker.get_session_modifier()
# Should note the prior CRITICAL
if modifier:
self.assertIn("CRITICAL", modifier)
class TestUIHints(unittest.TestCase):
"""Test UI hint generation."""
def setUp(self):
self.tracker = CrisisSessionTracker()
def test_ui_hints_structure(self):
self.tracker.record(detect_crisis("Hello"))
hints = self.tracker.get_ui_hints()
self.assertIn("session_escalating", hints)
self.assertIn("session_deescalating", hints)
self.assertIn("session_peak_level", hints)
self.assertIn("session_message_count", hints)
def test_ui_hints_escalation_warning(self):
"""Escalating session should have warning hint."""
self.tracker.record(detect_crisis("Hello"))
self.tracker.record(detect_crisis("I want to die"))
hints = self.tracker.get_ui_hints()
if hints["session_escalating"]:
self.assertTrue(hints.get("escalation_warning"))
self.assertIn("suggested_action", hints)
class TestCheckCrisisWithSession(unittest.TestCase):
"""Test the convenience function combining detection + session tracking."""
def test_returns_combined_data(self):
tracker = CrisisSessionTracker()
result = check_crisis_with_session("I want to die", tracker)
self.assertIn("level", result)
self.assertIn("session", result)
self.assertIn("current_level", result["session"])
self.assertIn("peak_level", result["session"])
self.assertIn("modifier", result["session"])
def test_session_updates_across_calls(self):
tracker = CrisisSessionTracker()
check_crisis_with_session("Hello", tracker)
result = check_crisis_with_session("I want to die", tracker)
self.assertEqual(result["session"]["message_count"], 2)
self.assertEqual(result["session"]["peak_level"], "CRITICAL")
class TestPrivacy(unittest.TestCase):
"""Verify privacy-first design principles."""
def test_no_persistence_mechanism(self):
"""Session tracker should have no database, file, or network calls."""
import inspect
source = inspect.getsource(CrisisSessionTracker)
# Should not import database, requests, or file I/O
forbidden = ["sqlite", "requests", "urllib", "open(", "httpx", "aiohttp"]
for word in forbidden:
self.assertNotIn(word, source.lower(),
f"Session tracker should not use {word} — privacy-first design")
def test_state_contained_in_memory(self):
"""All state should be instance attributes, not module-level."""
tracker = CrisisSessionTracker()
tracker.record(detect_crisis("I want to die"))
# New tracker should have clean state (no global contamination)
fresh = CrisisSessionTracker()
self.assertEqual(fresh.state.current_level, "NONE")
if __name__ == '__main__':
unittest.main()