Compare commits

..

1 Commits

Author SHA1 Message Date
Alexander Whitestone
4d084654d8 feat: add crisis analytics metrics (#37)
All checks were successful
Sanity Checks / sanity-test (pull_request) Successful in 5s
Smoke Test / smoke (pull_request) Successful in 6s
2026-04-17 02:24:01 -04:00
7 changed files with 305 additions and 270 deletions

View File

@@ -6,8 +6,15 @@ Stands between a broken man and a machine that would tell him to die.
from .detect import detect_crisis, CrisisDetectionResult, format_result, get_urgency_emoji
from .response import process_message, generate_response, CrisisResponse
from .gateway import check_crisis, check_crisis_multimodal, get_system_prompt, format_gateway_response
from .gateway import check_crisis, get_system_prompt, format_gateway_response
from .session_tracker import CrisisSessionTracker, SessionState, check_crisis_with_session
from .metrics import (
build_metrics_event,
append_metrics_event,
load_metrics_events,
build_weekly_summary,
render_weekly_summary,
)
__all__ = [
"detect_crisis",
@@ -16,7 +23,6 @@ __all__ = [
"generate_response",
"CrisisResponse",
"check_crisis",
"check_crisis_multimodal",
"get_system_prompt",
"format_result",
"format_gateway_response",
@@ -24,4 +30,9 @@ __all__ = [
"CrisisSessionTracker",
"SessionState",
"check_crisis_with_session",
"build_metrics_event",
"append_metrics_event",
"load_metrics_events",
"build_weekly_summary",
"render_weekly_summary",
]

View File

@@ -2,21 +2,18 @@
Crisis Gateway Module for the-door.
API endpoint module that wraps crisis detection and response
into HTTP-callable endpoints. Integrates detect.py, unified_scorer.py, and response.py.
into HTTP-callable endpoints. Integrates detect.py and response.py.
Usage:
from crisis.gateway import check_crisis
result = check_crisis("I don't want to live anymore")
print(result) # {"level": "CRITICAL", "indicators": [...], "response": {...}}
"""
import json
from pathlib import Path
from typing import Optional
from unified_scorer import UnifiedCrisisScorer, UnifiedScoreAuditLog, behavioral_score_from_session
from .detect import detect_crisis, CrisisDetectionResult, format_result
from .compassion_router import router
from .response import (
@@ -26,9 +23,17 @@ from .response import (
CrisisResponse,
)
from .session_tracker import CrisisSessionTracker
from .metrics import build_metrics_event, append_metrics_event
def check_crisis(text: str) -> dict:
def check_crisis(
text: str,
metrics_log_path: Optional[str] = None,
*,
continued_conversation: bool = False,
false_positive: bool = False,
now: Optional[float] = None,
) -> dict:
"""
Full crisis check returning structured data.
@@ -38,7 +43,7 @@ def check_crisis(text: str) -> dict:
detection = detect_crisis(text)
response = generate_response(detection)
return {
result = {
"level": detection.level,
"score": detection.score,
"indicators": detection.indicators,
@@ -52,72 +57,21 @@ def check_crisis(text: str) -> dict:
"escalate": response.escalate,
}
def check_crisis_multimodal(
text: str,
*,
tracker: Optional[CrisisSessionTracker] = None,
voice_score: Optional[float] = None,
image_score: Optional[float] = None,
behavioral_score: Optional[float] = None,
audit_log_path: Optional[Path] = None,
weights: Optional[dict] = None,
) -> dict:
"""Combine text, voice, image, and behavioral signals into one crisis assessment."""
detection = detect_crisis(text)
session_state = tracker.record(detection) if tracker is not None else None
if behavioral_score is None and session_state is not None:
behavioral_score = behavioral_score_from_session(session_state)
scorer = UnifiedCrisisScorer(
weights=weights,
audit_log=UnifiedScoreAuditLog(audit_log_path) if audit_log_path else None,
metrics_event = build_metrics_event(
detection,
continued_conversation=continued_conversation,
false_positive=false_positive,
now=now,
)
assessment = scorer.score(
text_score=detection.score,
voice_score=voice_score,
image_score=image_score,
behavioral_score=behavioral_score,
source_text=text,
)
unified_detection = CrisisDetectionResult(
level=assessment.level.value,
indicators=detection.indicators,
recommended_action=detection.recommended_action,
score=assessment.combined_score,
matches=detection.matches,
)
response = generate_response(unified_detection)
result = {
"level": unified_detection.level,
"score": unified_detection.score,
"indicators": detection.indicators,
"recommended_action": unified_detection.recommended_action,
"timmy_message": response.timmy_message,
"ui": {
"show_crisis_panel": response.show_crisis_panel,
"show_overlay": response.show_overlay,
"provide_988": response.provide_988,
},
"escalate": response.escalate,
"unified": {
"level": assessment.level.value,
"combined_score": assessment.combined_score,
"weights": assessment.weights,
"modalities": assessment.modalities,
"present_modalities": assessment.present_modalities,
},
}
if session_state is not None:
result["session"] = {
"current_level": session_state.current_level,
"peak_level": session_state.peak_level,
"message_count": session_state.message_count,
"is_escalating": session_state.is_escalating,
"is_deescalating": session_state.is_deescalating,
}
if metrics_log_path:
metrics_event = append_metrics_event(
metrics_log_path,
detection,
continued_conversation=continued_conversation,
false_positive=false_positive,
now=now,
)
result["metrics_event"] = metrics_event
return result

166
crisis/metrics.py Normal file
View File

@@ -0,0 +1,166 @@
"""Privacy-preserving crisis analytics metrics for the-door.
Stores only timestamps, crisis levels, indicator categories, and operator
feedback flags. No raw message text or PII is persisted.
"""
from __future__ import annotations
import argparse
import json
import time
from collections import Counter
from pathlib import Path
from typing import Iterable
from .detect import CrisisDetectionResult, detect_crisis
LEVELS = ("NONE", "LOW", "MEDIUM", "HIGH", "CRITICAL")
def normalize_indicator(indicator: str) -> str:
"""Return a stable privacy-safe keyword/category identifier."""
return indicator
def build_metrics_event(
detection: CrisisDetectionResult,
*,
continued_conversation: bool = False,
false_positive: bool = False,
now: float | None = None,
) -> dict:
timestamp = float(time.time() if now is None else now)
indicators = [normalize_indicator(indicator) for indicator in detection.indicators]
return {
"timestamp": timestamp,
"level": detection.level,
"indicator_count": len(indicators),
"indicators": indicators,
"continued_conversation": bool(continued_conversation),
"false_positive": bool(false_positive),
}
def append_metrics_event(
log_path: str | Path,
detection: CrisisDetectionResult,
*,
continued_conversation: bool = False,
false_positive: bool = False,
now: float | None = None,
) -> dict:
event = build_metrics_event(
detection,
continued_conversation=continued_conversation,
false_positive=false_positive,
now=now,
)
path = Path(log_path)
path.parent.mkdir(parents=True, exist_ok=True)
with path.open("a", encoding="utf-8") as handle:
handle.write(json.dumps(event) + "\n")
return event
def load_metrics_events(log_path: str | Path) -> list[dict]:
path = Path(log_path)
if not path.exists():
return []
events = []
for line in path.read_text(encoding="utf-8").splitlines():
if not line.strip():
continue
events.append(json.loads(line))
return events
def build_weekly_summary(
events: Iterable[dict],
*,
now: float | None = None,
window_days: int = 7,
) -> dict:
current_time = float(time.time() if now is None else now)
cutoff = current_time - (window_days * 86400)
filtered = [event for event in events if float(event.get("timestamp", 0)) >= cutoff]
detections_per_level = {level: 0 for level in LEVELS}
keyword_counts: Counter[str] = Counter()
detections = []
continued_after_intervention = 0
for event in filtered:
level = event.get("level", "NONE")
detections_per_level[level] = detections_per_level.get(level, 0) + 1
keyword_counts.update(event.get("indicators", []))
if level != "NONE":
detections.append(event)
if event.get("continued_conversation"):
continued_after_intervention += 1
false_positive_count = sum(1 for event in detections if event.get("false_positive"))
false_positive_estimate = (
false_positive_count / len(detections) if detections else 0.0
)
return {
"window_days": window_days,
"total_events": len(filtered),
"detections_per_level": detections_per_level,
"most_common_keywords": [
{"keyword": keyword, "count": count}
for keyword, count in keyword_counts.most_common(10)
],
"false_positive_estimate": false_positive_estimate,
"continued_after_intervention": continued_after_intervention,
}
def render_weekly_summary(summary: dict) -> str:
return json.dumps(summary, indent=2)
def write_weekly_summary(path: str | Path, summary: dict) -> Path:
output_path = Path(path)
output_path.parent.mkdir(parents=True, exist_ok=True)
output_path.write_text(render_weekly_summary(summary) + "\n", encoding="utf-8")
return output_path
def record_text_event(
text: str,
log_path: str | Path,
*,
continued_conversation: bool = False,
false_positive: bool = False,
now: float | None = None,
) -> dict:
detection = detect_crisis(text)
return append_metrics_event(
log_path,
detection,
continued_conversation=continued_conversation,
false_positive=false_positive,
now=now,
)
def main(argv: list[str] | None = None) -> int:
parser = argparse.ArgumentParser(description="Privacy-preserving crisis metrics summary")
parser.add_argument("--log-path", required=True, help="JSONL event log path")
parser.add_argument("--days", type=int, default=7, help="Summary window in days")
parser.add_argument("--output", help="Optional file to write summary JSON")
args = parser.parse_args(argv)
events = load_metrics_events(args.log_path)
summary = build_weekly_summary(events, window_days=args.days)
rendered = render_weekly_summary(summary)
print(rendered)
if args.output:
write_weekly_summary(args.output, summary)
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,100 @@
"""Tests for privacy-preserving crisis metrics aggregation (issue #37)."""
from __future__ import annotations
import json
import os
import pathlib
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from crisis.detect import detect_crisis
from crisis.gateway import check_crisis
from crisis.metrics import (
append_metrics_event,
build_metrics_event,
build_weekly_summary,
load_metrics_events,
render_weekly_summary,
)
class TestMetricsEvent(unittest.TestCase):
def test_event_is_privacy_preserving(self):
detection = detect_crisis("I want to kill myself")
event = build_metrics_event(
detection,
continued_conversation=True,
false_positive=False,
now=1_700_000_000,
)
self.assertEqual(event["timestamp"], 1_700_000_000)
self.assertEqual(event["level"], "CRITICAL")
self.assertTrue(event["continued_conversation"])
self.assertFalse(event["false_positive"])
self.assertNotIn("text", event)
self.assertNotIn("message", event)
self.assertGreaterEqual(event["indicator_count"], 1)
self.assertTrue(event["indicators"])
class TestMetricsLogAndSummary(unittest.TestCase):
def test_append_and_load_metrics_events(self):
log_path = pathlib.Path(self._testMethodName).with_suffix(".jsonl")
try:
append_metrics_event(log_path, detect_crisis("I want to die"), now=1_700_000_000)
events = load_metrics_events(log_path)
self.assertEqual(len(events), 1)
self.assertEqual(events[0]["level"], "CRITICAL")
finally:
if log_path.exists():
log_path.unlink()
def test_weekly_summary_counts_levels_keywords_and_false_positives(self):
events = [
build_metrics_event(detect_crisis("I want to die"), continued_conversation=True, false_positive=False, now=1_700_000_000),
build_metrics_event(detect_crisis("I'm having a rough day"), continued_conversation=False, false_positive=False, now=1_700_000_100),
build_metrics_event(detect_crisis("I want to die"), continued_conversation=False, false_positive=True, now=1_700_000_200),
build_metrics_event(detect_crisis("Hello there"), continued_conversation=False, false_positive=False, now=1_700_000_300),
]
summary = build_weekly_summary(events, now=1_700_000_400, window_days=7)
self.assertEqual(summary["detections_per_level"]["CRITICAL"], 2)
self.assertEqual(summary["detections_per_level"]["LOW"], 1)
self.assertEqual(summary["detections_per_level"]["NONE"], 1)
self.assertEqual(summary["continued_after_intervention"], 1)
self.assertAlmostEqual(summary["false_positive_estimate"], 1 / 3, places=4)
self.assertEqual(summary["most_common_keywords"][0]["count"], 2)
def test_render_weekly_summary_mentions_required_metrics(self):
events = [
build_metrics_event(detect_crisis("I want to die"), continued_conversation=True, now=1_700_000_000),
build_metrics_event(detect_crisis("I feel hopeless with no way out"), false_positive=True, now=1_700_000_100),
]
summary = build_weekly_summary(events, now=1_700_000_200, window_days=7)
rendered = render_weekly_summary(summary)
self.assertIn("detections_per_level", rendered)
self.assertIn("most_common_keywords", rendered)
self.assertIn("false_positive_estimate", rendered)
self.assertIn("continued_after_intervention", rendered)
class TestGatewayMetricsIntegration(unittest.TestCase):
def test_check_crisis_can_emit_metrics_event(self):
result = check_crisis(
"I want to die",
metrics_log_path=None,
continued_conversation=True,
false_positive=False,
now=1_700_000_000,
)
self.assertEqual(result["level"], "CRITICAL")
self.assertIn("metrics_event", result)
self.assertEqual(result["metrics_event"]["timestamp"], 1_700_000_000)
self.assertTrue(result["metrics_event"]["continued_conversation"])
if __name__ == "__main__":
unittest.main()

View File

@@ -1,19 +0,0 @@
from crisis.gateway import check_crisis_multimodal
from crisis.session_tracker import CrisisSessionTracker
def test_multimodal_gateway_uses_unified_score_for_988_ui(tmp_path):
tracker = CrisisSessionTracker()
result = check_crisis_multimodal(
"I want to kill myself tonight",
tracker=tracker,
voice_score=0.92,
image_score=0.6,
audit_log_path=tmp_path / "audit.jsonl",
)
assert result["unified"]["level"] == "CRITICAL"
assert result["ui"]["provide_988"] is True
assert result["ui"]["show_overlay"] is True
assert result["unified"]["modalities"]["voice"] == 0.92
assert result["unified"]["modalities"]["behavioral"] >= 0.0

View File

@@ -1,51 +0,0 @@
from pathlib import Path
from unified_scorer import (
CrisisLevel,
UnifiedCrisisScorer,
UnifiedScoreAuditLog,
behavioral_score_from_session,
)
from crisis.session_tracker import SessionState
def test_unified_scorer_renormalizes_available_modalities_and_escalates():
scorer = UnifiedCrisisScorer()
assessment = scorer.score(
text_score=1.0,
voice_score=0.8,
image_score=None,
behavioral_score=0.7,
)
assert assessment.level is CrisisLevel.CRITICAL
assert assessment.combined_score > 0.8
assert assessment.present_modalities == ["text", "voice", "behavioral"]
def test_behavioral_score_rises_for_escalating_session_state():
session = SessionState(
current_level="HIGH",
peak_level="CRITICAL",
message_count=4,
level_history=["LOW", "MEDIUM", "HIGH", "CRITICAL"],
is_escalating=True,
is_deescalating=False,
escalation_rate=1.0,
consecutive_low_messages=0,
)
assert behavioral_score_from_session(session) >= 0.8
def test_audit_log_persists_anonymized_score_entries(tmp_path):
log_path = tmp_path / "unified-score-audit.jsonl"
scorer = UnifiedCrisisScorer(audit_log=UnifiedScoreAuditLog(log_path))
scorer.score(text_score=0.75, voice_score=0.2, image_score=0.1, behavioral_score=0.6, source_text="I feel trapped and hopeless")
lines = log_path.read_text().strip().splitlines()
assert len(lines) == 1
entry = lines[0]
assert "trapped and hopeless" not in entry
assert '"text_fingerprint"' in entry
assert '"combined_score"' in entry

View File

@@ -1,126 +0,0 @@
"""Unified multimodal crisis scoring for the-door."""
from __future__ import annotations
import hashlib
import json
from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from typing import Dict, List, Optional, TYPE_CHECKING
if TYPE_CHECKING:
from crisis.session_tracker import SessionState
SCORE_BY_LEVEL = {"NONE": 0.0, "LOW": 0.25, "MEDIUM": 0.5, "HIGH": 0.75, "CRITICAL": 1.0}
LEVEL_RANK = {"NONE": 0, "LOW": 1, "MEDIUM": 2, "HIGH": 3, "CRITICAL": 4}
class CrisisLevel(Enum):
NONE = "NONE"
LOW = "LOW"
MEDIUM = "MEDIUM"
HIGH = "HIGH"
CRITICAL = "CRITICAL"
DEFAULT_WEIGHTS: Dict[str, float] = {
"text": 0.4,
"voice": 0.25,
"behavioral": 0.2,
"image": 0.15,
}
@dataclass
class UnifiedAssessment:
level: CrisisLevel
combined_score: float
weights: Dict[str, float]
modalities: Dict[str, Optional[float]]
present_modalities: List[str]
class UnifiedScoreAuditLog:
def __init__(self, path: Path | str):
self.path = Path(path)
def record(self, assessment: UnifiedAssessment, source_text: str = "") -> None:
self.path.parent.mkdir(parents=True, exist_ok=True)
fingerprint = hashlib.sha256(source_text.encode("utf-8")).hexdigest()[:12] if source_text else None
payload = {
"level": assessment.level.value,
"combined_score": round(assessment.combined_score, 4),
"weights": assessment.weights,
"modalities": assessment.modalities,
"present_modalities": assessment.present_modalities,
"text_fingerprint": fingerprint,
}
with self.path.open("a", encoding="utf-8") as fh:
fh.write(json.dumps(payload, sort_keys=True) + "\n")
class UnifiedCrisisScorer:
def __init__(self, weights: Optional[Dict[str, float]] = None, audit_log: Optional[UnifiedScoreAuditLog] = None):
self.weights = dict(DEFAULT_WEIGHTS)
if weights:
self.weights.update(weights)
self.audit_log = audit_log
def _normalize(self, modalities: Dict[str, Optional[float]]) -> Dict[str, float]:
present = [name for name, score in modalities.items() if score is not None]
if not present:
return {}
total = sum(self.weights[name] for name in present)
return {name: self.weights[name] / total for name in present}
def _level_for_score(self, score: float) -> CrisisLevel:
if score > 0.8:
return CrisisLevel.CRITICAL
if score > 0.6:
return CrisisLevel.HIGH
if score > 0.4:
return CrisisLevel.MEDIUM
if score > 0.0:
return CrisisLevel.LOW
return CrisisLevel.NONE
def score(
self,
*,
text_score: Optional[float],
voice_score: Optional[float] = None,
image_score: Optional[float] = None,
behavioral_score: Optional[float] = None,
source_text: str = "",
) -> UnifiedAssessment:
modalities = {
"text": text_score,
"voice": voice_score,
"behavioral": behavioral_score,
"image": image_score,
}
normalized = self._normalize(modalities)
combined = 0.0
for name, weight in normalized.items():
combined += float(modalities[name]) * weight
assessment = UnifiedAssessment(
level=self._level_for_score(combined),
combined_score=combined,
weights=normalized,
modalities=modalities,
present_modalities=[name for name, score in modalities.items() if score is not None],
)
if self.audit_log:
self.audit_log.record(assessment, source_text=source_text)
return assessment
def behavioral_score_from_session(session: 'SessionState') -> float:
current = SCORE_BY_LEVEL.get(session.current_level, 0.0)
peak_bonus = 0.1 if LEVEL_RANK.get(session.peak_level, 0) >= LEVEL_RANK["HIGH"] else 0.0
escalation_bonus = 0.15 if session.is_escalating else 0.0
rate_bonus = min(max(session.escalation_rate, 0.0), 1.0) * 0.1
deescalation_penalty = 0.15 if session.is_deescalating else 0.0
return max(0.0, min(1.0, current + peak_bonus + escalation_bonus + rate_bonus - deescalation_penalty))