Compare commits

..

1 Commits

Author SHA1 Message Date
Alexander Whitestone
4d084654d8 feat: add crisis analytics metrics (#37)
All checks were successful
Sanity Checks / sanity-test (pull_request) Successful in 5s
Smoke Test / smoke (pull_request) Successful in 6s
2026-04-17 02:24:01 -04:00
6 changed files with 306 additions and 308 deletions

View File

@@ -8,6 +8,13 @@ from .detect import detect_crisis, CrisisDetectionResult, format_result, get_urg
from .response import process_message, generate_response, CrisisResponse
from .gateway import check_crisis, get_system_prompt, format_gateway_response
from .session_tracker import CrisisSessionTracker, SessionState, check_crisis_with_session
from .metrics import (
build_metrics_event,
append_metrics_event,
load_metrics_events,
build_weekly_summary,
render_weekly_summary,
)
__all__ = [
"detect_crisis",
@@ -23,4 +30,9 @@ __all__ = [
"CrisisSessionTracker",
"SessionState",
"check_crisis_with_session",
"build_metrics_event",
"append_metrics_event",
"load_metrics_events",
"build_weekly_summary",
"render_weekly_summary",
]

View File

@@ -23,9 +23,17 @@ from .response import (
CrisisResponse,
)
from .session_tracker import CrisisSessionTracker
from .metrics import build_metrics_event, append_metrics_event
def check_crisis(text: str) -> dict:
def check_crisis(
text: str,
metrics_log_path: Optional[str] = None,
*,
continued_conversation: bool = False,
false_positive: bool = False,
now: Optional[float] = None,
) -> dict:
"""
Full crisis check returning structured data.
@@ -35,7 +43,7 @@ def check_crisis(text: str) -> dict:
detection = detect_crisis(text)
response = generate_response(detection)
return {
result = {
"level": detection.level,
"score": detection.score,
"indicators": detection.indicators,
@@ -49,6 +57,23 @@ def check_crisis(text: str) -> dict:
"escalate": response.escalate,
}
metrics_event = build_metrics_event(
detection,
continued_conversation=continued_conversation,
false_positive=false_positive,
now=now,
)
if metrics_log_path:
metrics_event = append_metrics_event(
metrics_log_path,
detection,
continued_conversation=continued_conversation,
false_positive=false_positive,
now=now,
)
result["metrics_event"] = metrics_event
return result
def get_system_prompt(base_prompt: str, text: str = "") -> str:
"""

166
crisis/metrics.py Normal file
View File

@@ -0,0 +1,166 @@
"""Privacy-preserving crisis analytics metrics for the-door.
Stores only timestamps, crisis levels, indicator categories, and operator
feedback flags. No raw message text or PII is persisted.
"""
from __future__ import annotations
import argparse
import json
import time
from collections import Counter
from pathlib import Path
from typing import Iterable
from .detect import CrisisDetectionResult, detect_crisis
LEVELS = ("NONE", "LOW", "MEDIUM", "HIGH", "CRITICAL")
def normalize_indicator(indicator: str) -> str:
"""Return a stable privacy-safe keyword/category identifier."""
return indicator
def build_metrics_event(
detection: CrisisDetectionResult,
*,
continued_conversation: bool = False,
false_positive: bool = False,
now: float | None = None,
) -> dict:
timestamp = float(time.time() if now is None else now)
indicators = [normalize_indicator(indicator) for indicator in detection.indicators]
return {
"timestamp": timestamp,
"level": detection.level,
"indicator_count": len(indicators),
"indicators": indicators,
"continued_conversation": bool(continued_conversation),
"false_positive": bool(false_positive),
}
def append_metrics_event(
log_path: str | Path,
detection: CrisisDetectionResult,
*,
continued_conversation: bool = False,
false_positive: bool = False,
now: float | None = None,
) -> dict:
event = build_metrics_event(
detection,
continued_conversation=continued_conversation,
false_positive=false_positive,
now=now,
)
path = Path(log_path)
path.parent.mkdir(parents=True, exist_ok=True)
with path.open("a", encoding="utf-8") as handle:
handle.write(json.dumps(event) + "\n")
return event
def load_metrics_events(log_path: str | Path) -> list[dict]:
path = Path(log_path)
if not path.exists():
return []
events = []
for line in path.read_text(encoding="utf-8").splitlines():
if not line.strip():
continue
events.append(json.loads(line))
return events
def build_weekly_summary(
events: Iterable[dict],
*,
now: float | None = None,
window_days: int = 7,
) -> dict:
current_time = float(time.time() if now is None else now)
cutoff = current_time - (window_days * 86400)
filtered = [event for event in events if float(event.get("timestamp", 0)) >= cutoff]
detections_per_level = {level: 0 for level in LEVELS}
keyword_counts: Counter[str] = Counter()
detections = []
continued_after_intervention = 0
for event in filtered:
level = event.get("level", "NONE")
detections_per_level[level] = detections_per_level.get(level, 0) + 1
keyword_counts.update(event.get("indicators", []))
if level != "NONE":
detections.append(event)
if event.get("continued_conversation"):
continued_after_intervention += 1
false_positive_count = sum(1 for event in detections if event.get("false_positive"))
false_positive_estimate = (
false_positive_count / len(detections) if detections else 0.0
)
return {
"window_days": window_days,
"total_events": len(filtered),
"detections_per_level": detections_per_level,
"most_common_keywords": [
{"keyword": keyword, "count": count}
for keyword, count in keyword_counts.most_common(10)
],
"false_positive_estimate": false_positive_estimate,
"continued_after_intervention": continued_after_intervention,
}
def render_weekly_summary(summary: dict) -> str:
return json.dumps(summary, indent=2)
def write_weekly_summary(path: str | Path, summary: dict) -> Path:
output_path = Path(path)
output_path.parent.mkdir(parents=True, exist_ok=True)
output_path.write_text(render_weekly_summary(summary) + "\n", encoding="utf-8")
return output_path
def record_text_event(
text: str,
log_path: str | Path,
*,
continued_conversation: bool = False,
false_positive: bool = False,
now: float | None = None,
) -> dict:
detection = detect_crisis(text)
return append_metrics_event(
log_path,
detection,
continued_conversation=continued_conversation,
false_positive=false_positive,
now=now,
)
def main(argv: list[str] | None = None) -> int:
parser = argparse.ArgumentParser(description="Privacy-preserving crisis metrics summary")
parser.add_argument("--log-path", required=True, help="JSONL event log path")
parser.add_argument("--days", type=int, default=7, help="Summary window in days")
parser.add_argument("--output", help="Optional file to write summary JSON")
args = parser.parse_args(argv)
events = load_metrics_events(args.log_path)
summary = build_weekly_summary(events, window_days=args.days)
rendered = render_weekly_summary(summary)
print(rendered)
if args.output:
write_weekly_summary(args.output, summary)
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -1,195 +1 @@
"""Crisis synthesizer — learn from anonymized crisis interactions.
This is deliberately simple and privacy-preserving. It does not train a model or
modify detection rules automatically. It only logs metadata, summarizes patterns,
and suggests human-reviewed keyword weight adjustments.
"""
from __future__ import annotations
import argparse
import json
import time
from collections import Counter, defaultdict
from pathlib import Path
from typing import Iterable
DEFAULT_LOG_PATH = Path.home() / ".the-door" / "crisis-interactions.jsonl"
LEVELS = ("NONE", "LOW", "MEDIUM", "HIGH", "CRITICAL")
def build_interaction_event(
level: str,
indicators: list[str],
response_given: str,
continued_conversation: bool,
false_positive: bool,
*,
now: float | None = None,
) -> dict:
return {
"timestamp": float(time.time() if now is None else now),
"level": level,
"indicators": list(indicators),
"indicator_count": len(indicators),
"response_given": response_given,
"continued_conversation": bool(continued_conversation),
"false_positive": bool(false_positive),
}
def append_interaction_event(
log_path: str | Path,
*,
level: str,
indicators: list[str],
response_given: str,
continued_conversation: bool,
false_positive: bool,
now: float | None = None,
) -> dict:
event = build_interaction_event(
level,
indicators,
response_given,
continued_conversation,
false_positive,
now=now,
)
path = Path(log_path)
path.parent.mkdir(parents=True, exist_ok=True)
with path.open("a", encoding="utf-8") as handle:
handle.write(json.dumps(event) + "\n")
return event
def load_interaction_events(log_path: str | Path) -> list[dict]:
path = Path(log_path)
if not path.exists():
return []
events = []
for line in path.read_text(encoding="utf-8").splitlines():
if not line.strip():
continue
events.append(json.loads(line))
return events
def summarize_keywords(events: Iterable[dict]) -> list[dict]:
counts: Counter[str] = Counter()
for event in events:
counts.update(event.get("indicators", []))
return [{"keyword": keyword, "count": count} for keyword, count in counts.most_common(10)]
def suggest_keyword_adjustments(events: Iterable[dict], *, min_observations: int = 5) -> list[dict]:
stats: dict[str, dict[str, int]] = defaultdict(lambda: {
"observations": 0,
"true_positive_count": 0,
"false_positive_count": 0,
"continued_conversation_count": 0,
})
for event in events:
for keyword in event.get("indicators", []):
bucket = stats[keyword]
bucket["observations"] += 1
if event.get("false_positive"):
bucket["false_positive_count"] += 1
else:
bucket["true_positive_count"] += 1
if event.get("continued_conversation"):
bucket["continued_conversation_count"] += 1
suggestions = []
for keyword, bucket in sorted(stats.items()):
if bucket["observations"] < min_observations:
continue
fp = bucket["false_positive_count"]
tp = bucket["true_positive_count"]
if fp >= min_observations and tp == 0:
adjustment = "lower_weight"
rationale = "Observed only false positives across the sample window."
elif tp >= min_observations and fp == 0:
adjustment = "raise_weight"
rationale = "Observed repeated genuine crises with no false positives."
else:
adjustment = "observe"
rationale = "Mixed evidence; keep monitoring before changing weights."
suggestions.append(
{
"keyword": keyword,
**bucket,
"suggested_adjustment": adjustment,
"rationale": rationale,
}
)
return suggestions
def build_weekly_report(
events: Iterable[dict],
*,
now: float | None = None,
window_days: int = 7,
min_observations: int = 3,
) -> dict:
current_time = float(time.time() if now is None else now)
cutoff = current_time - (window_days * 86400)
filtered = [event for event in events if float(event.get("timestamp", 0)) >= cutoff]
detections_per_level = {level: 0 for level in LEVELS}
detected_events = []
continued_after_intervention = 0
for event in filtered:
level = event.get("level", "NONE")
detections_per_level[level] = detections_per_level.get(level, 0) + 1
if level != "NONE":
detected_events.append(event)
if event.get("continued_conversation"):
continued_after_intervention += 1
false_positive_count = sum(1 for event in detected_events if event.get("false_positive"))
false_positive_estimate = false_positive_count / len(detected_events) if detected_events else 0.0
return {
"window_days": window_days,
"total_events": len(filtered),
"detections_per_level": detections_per_level,
"most_common_keywords": summarize_keywords(filtered),
"false_positive_estimate": false_positive_estimate,
"continued_after_intervention": continued_after_intervention,
"keyword_weight_suggestions": suggest_keyword_adjustments(filtered, min_observations=min_observations),
}
def render_weekly_report(summary: dict) -> str:
return json.dumps(summary, indent=2)
def write_weekly_report(output_path: str | Path, summary: dict) -> Path:
path = Path(output_path)
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(render_weekly_report(summary) + "\n", encoding="utf-8")
return path
def main(argv: list[str] | None = None) -> int:
parser = argparse.ArgumentParser(description="Summarize anonymized crisis interactions")
parser.add_argument("--log-path", default=str(DEFAULT_LOG_PATH), help="JSONL crisis interaction log")
parser.add_argument("--days", type=int, default=7, help="Lookback window in days")
parser.add_argument("--min-observations", type=int, default=3, help="Minimum observations before suggesting keyword adjustments")
parser.add_argument("--output", help="Optional file to write the weekly report JSON")
args = parser.parse_args(argv)
events = load_interaction_events(args.log_path)
summary = build_weekly_report(events, window_days=args.days, min_observations=args.min_observations)
rendered = render_weekly_report(summary)
print(rendered)
if args.output:
write_weekly_report(args.output, summary)
return 0
if __name__ == "__main__":
raise SystemExit(main())
...

View File

@@ -0,0 +1,100 @@
"""Tests for privacy-preserving crisis metrics aggregation (issue #37)."""
from __future__ import annotations
import json
import os
import pathlib
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from crisis.detect import detect_crisis
from crisis.gateway import check_crisis
from crisis.metrics import (
append_metrics_event,
build_metrics_event,
build_weekly_summary,
load_metrics_events,
render_weekly_summary,
)
class TestMetricsEvent(unittest.TestCase):
def test_event_is_privacy_preserving(self):
detection = detect_crisis("I want to kill myself")
event = build_metrics_event(
detection,
continued_conversation=True,
false_positive=False,
now=1_700_000_000,
)
self.assertEqual(event["timestamp"], 1_700_000_000)
self.assertEqual(event["level"], "CRITICAL")
self.assertTrue(event["continued_conversation"])
self.assertFalse(event["false_positive"])
self.assertNotIn("text", event)
self.assertNotIn("message", event)
self.assertGreaterEqual(event["indicator_count"], 1)
self.assertTrue(event["indicators"])
class TestMetricsLogAndSummary(unittest.TestCase):
def test_append_and_load_metrics_events(self):
log_path = pathlib.Path(self._testMethodName).with_suffix(".jsonl")
try:
append_metrics_event(log_path, detect_crisis("I want to die"), now=1_700_000_000)
events = load_metrics_events(log_path)
self.assertEqual(len(events), 1)
self.assertEqual(events[0]["level"], "CRITICAL")
finally:
if log_path.exists():
log_path.unlink()
def test_weekly_summary_counts_levels_keywords_and_false_positives(self):
events = [
build_metrics_event(detect_crisis("I want to die"), continued_conversation=True, false_positive=False, now=1_700_000_000),
build_metrics_event(detect_crisis("I'm having a rough day"), continued_conversation=False, false_positive=False, now=1_700_000_100),
build_metrics_event(detect_crisis("I want to die"), continued_conversation=False, false_positive=True, now=1_700_000_200),
build_metrics_event(detect_crisis("Hello there"), continued_conversation=False, false_positive=False, now=1_700_000_300),
]
summary = build_weekly_summary(events, now=1_700_000_400, window_days=7)
self.assertEqual(summary["detections_per_level"]["CRITICAL"], 2)
self.assertEqual(summary["detections_per_level"]["LOW"], 1)
self.assertEqual(summary["detections_per_level"]["NONE"], 1)
self.assertEqual(summary["continued_after_intervention"], 1)
self.assertAlmostEqual(summary["false_positive_estimate"], 1 / 3, places=4)
self.assertEqual(summary["most_common_keywords"][0]["count"], 2)
def test_render_weekly_summary_mentions_required_metrics(self):
events = [
build_metrics_event(detect_crisis("I want to die"), continued_conversation=True, now=1_700_000_000),
build_metrics_event(detect_crisis("I feel hopeless with no way out"), false_positive=True, now=1_700_000_100),
]
summary = build_weekly_summary(events, now=1_700_000_200, window_days=7)
rendered = render_weekly_summary(summary)
self.assertIn("detections_per_level", rendered)
self.assertIn("most_common_keywords", rendered)
self.assertIn("false_positive_estimate", rendered)
self.assertIn("continued_after_intervention", rendered)
class TestGatewayMetricsIntegration(unittest.TestCase):
def test_check_crisis_can_emit_metrics_event(self):
result = check_crisis(
"I want to die",
metrics_log_path=None,
continued_conversation=True,
false_positive=False,
now=1_700_000_000,
)
self.assertEqual(result["level"], "CRITICAL")
self.assertIn("metrics_event", result)
self.assertEqual(result["metrics_event"]["timestamp"], 1_700_000_000)
self.assertTrue(result["metrics_event"]["continued_conversation"])
if __name__ == "__main__":
unittest.main()

View File

@@ -1,111 +0,0 @@
"""Tests for evolution/crisis_synthesizer.py (issue #36)."""
from __future__ import annotations
import importlib.util
import json
import pathlib
import sys
import tempfile
import unittest
ROOT = pathlib.Path(__file__).resolve().parents[1]
SCRIPT = ROOT / 'evolution' / 'crisis_synthesizer.py'
spec = importlib.util.spec_from_file_location('crisis_synthesizer', str(SCRIPT))
mod = importlib.util.module_from_spec(spec)
sys.modules['crisis_synthesizer'] = mod
spec.loader.exec_module(mod)
class TestCrisisSynthesizerEvent(unittest.TestCase):
def test_build_interaction_event_is_privacy_preserving(self):
event = mod.build_interaction_event(
level='CRITICAL',
indicators=['want_to_die', 'no_way_out'],
response_given='guardian',
continued_conversation=True,
false_positive=False,
now=1700000000,
)
self.assertEqual(event['timestamp'], 1700000000)
self.assertEqual(event['level'], 'CRITICAL')
self.assertEqual(event['response_given'], 'guardian')
self.assertTrue(event['continued_conversation'])
self.assertFalse(event['false_positive'])
self.assertEqual(event['indicators'], ['want_to_die', 'no_way_out'])
for forbidden in ['text', 'message', 'content', 'ip', 'session_id', 'user_id']:
self.assertNotIn(forbidden, event)
class TestCrisisSynthesizerStorage(unittest.TestCase):
def test_append_and_load_events_round_trip(self):
with tempfile.TemporaryDirectory() as tmp:
log_path = pathlib.Path(tmp) / 'crisis-events.jsonl'
mod.append_interaction_event(
log_path,
level='HIGH',
indicators=['hopeless'],
response_given='companion',
continued_conversation=False,
false_positive=True,
now=1700000100,
)
events = mod.load_interaction_events(log_path)
self.assertEqual(len(events), 1)
self.assertEqual(events[0]['level'], 'HIGH')
self.assertEqual(events[0]['indicators'], ['hopeless'])
class TestCrisisSynthesizerSummary(unittest.TestCase):
def test_weekly_report_contains_required_metrics(self):
events = [
mod.build_interaction_event('CRITICAL', ['want_to_die'], 'guardian', True, False, now=1700000000),
mod.build_interaction_event('HIGH', ['hopeless'], 'companion', False, True, now=1700000100),
mod.build_interaction_event('LOW', ['rough_day'], 'friend', False, False, now=1700000200),
mod.build_interaction_event('CRITICAL', ['want_to_die'], 'guardian', False, False, now=1700000300),
mod.build_interaction_event('NONE', [], 'friend', False, False, now=1700000400),
]
summary = mod.build_weekly_report(events, now=1700000500, window_days=7)
self.assertEqual(summary['detections_per_level']['CRITICAL'], 2)
self.assertEqual(summary['detections_per_level']['HIGH'], 1)
self.assertEqual(summary['detections_per_level']['LOW'], 1)
self.assertEqual(summary['detections_per_level']['NONE'], 1)
self.assertEqual(summary['continued_after_intervention'], 1)
self.assertAlmostEqual(summary['false_positive_estimate'], 0.25)
self.assertEqual(summary['most_common_keywords'][0]['keyword'], 'want_to_die')
self.assertEqual(summary['most_common_keywords'][0]['count'], 2)
class TestCrisisSynthesizerSuggestions(unittest.TestCase):
def test_suggests_weight_adjustments_from_interactions(self):
events = []
for ts in range(3):
events.append(mod.build_interaction_event('CRITICAL', ['want_to_die'], 'guardian', True, False, now=1700000000 + ts))
for ts in range(3):
events.append(mod.build_interaction_event('LOW', ['rough_day'], 'friend', False, True, now=1700000100 + ts))
suggestions = mod.suggest_keyword_adjustments(events, min_observations=3)
by_keyword = {s['keyword']: s for s in suggestions}
self.assertEqual(by_keyword['want_to_die']['suggested_adjustment'], 'raise_weight')
self.assertEqual(by_keyword['rough_day']['suggested_adjustment'], 'lower_weight')
class TestCrisisSynthesizerRendering(unittest.TestCase):
def test_render_weekly_report_outputs_json(self):
summary = {
'detections_per_level': {'NONE': 0, 'LOW': 1, 'MEDIUM': 0, 'HIGH': 0, 'CRITICAL': 0},
'most_common_keywords': [{'keyword': 'rough_day', 'count': 1}],
'false_positive_estimate': 0.0,
'continued_after_intervention': 0,
'keyword_weight_suggestions': [],
'window_days': 7,
'total_events': 1,
}
rendered = mod.render_weekly_report(summary)
parsed = json.loads(rendered)
self.assertEqual(parsed['window_days'], 7)
self.assertEqual(parsed['most_common_keywords'][0]['keyword'], 'rough_day')
if __name__ == '__main__':
unittest.main()