Compare commits
1 Commits
fix/37
...
feat/131-v
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4dc6819079 |
@@ -7,14 +7,6 @@ Stands between a broken man and a machine that would tell him to die.
|
||||
from .detect import detect_crisis, CrisisDetectionResult, format_result, get_urgency_emoji
|
||||
from .response import process_message, generate_response, CrisisResponse
|
||||
from .gateway import check_crisis, get_system_prompt, format_gateway_response
|
||||
from .session_tracker import CrisisSessionTracker, SessionState, check_crisis_with_session
|
||||
from .metrics import (
|
||||
build_metrics_event,
|
||||
append_metrics_event,
|
||||
load_metrics_events,
|
||||
build_weekly_summary,
|
||||
render_weekly_summary,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"detect_crisis",
|
||||
@@ -27,12 +19,4 @@ __all__ = [
|
||||
"format_result",
|
||||
"format_gateway_response",
|
||||
"get_urgency_emoji",
|
||||
"CrisisSessionTracker",
|
||||
"SessionState",
|
||||
"check_crisis_with_session",
|
||||
"build_metrics_event",
|
||||
"append_metrics_event",
|
||||
"load_metrics_events",
|
||||
"build_weekly_summary",
|
||||
"render_weekly_summary",
|
||||
]
|
||||
|
||||
@@ -22,18 +22,9 @@ from .response import (
|
||||
get_system_prompt_modifier,
|
||||
CrisisResponse,
|
||||
)
|
||||
from .session_tracker import CrisisSessionTracker
|
||||
from .metrics import build_metrics_event, append_metrics_event
|
||||
|
||||
|
||||
def check_crisis(
|
||||
text: str,
|
||||
metrics_log_path: Optional[str] = None,
|
||||
*,
|
||||
continued_conversation: bool = False,
|
||||
false_positive: bool = False,
|
||||
now: Optional[float] = None,
|
||||
) -> dict:
|
||||
def check_crisis(text: str) -> dict:
|
||||
"""
|
||||
Full crisis check returning structured data.
|
||||
|
||||
@@ -43,7 +34,7 @@ def check_crisis(
|
||||
detection = detect_crisis(text)
|
||||
response = generate_response(detection)
|
||||
|
||||
result = {
|
||||
return {
|
||||
"level": detection.level,
|
||||
"score": detection.score,
|
||||
"indicators": detection.indicators,
|
||||
@@ -57,23 +48,6 @@ def check_crisis(
|
||||
"escalate": response.escalate,
|
||||
}
|
||||
|
||||
metrics_event = build_metrics_event(
|
||||
detection,
|
||||
continued_conversation=continued_conversation,
|
||||
false_positive=false_positive,
|
||||
now=now,
|
||||
)
|
||||
if metrics_log_path:
|
||||
metrics_event = append_metrics_event(
|
||||
metrics_log_path,
|
||||
detection,
|
||||
continued_conversation=continued_conversation,
|
||||
false_positive=false_positive,
|
||||
now=now,
|
||||
)
|
||||
result["metrics_event"] = metrics_event
|
||||
return result
|
||||
|
||||
|
||||
def get_system_prompt(base_prompt: str, text: str = "") -> str:
|
||||
"""
|
||||
|
||||
@@ -1,166 +0,0 @@
|
||||
"""Privacy-preserving crisis analytics metrics for the-door.
|
||||
|
||||
Stores only timestamps, crisis levels, indicator categories, and operator
|
||||
feedback flags. No raw message text or PII is persisted.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import time
|
||||
from collections import Counter
|
||||
from pathlib import Path
|
||||
from typing import Iterable
|
||||
|
||||
from .detect import CrisisDetectionResult, detect_crisis
|
||||
|
||||
LEVELS = ("NONE", "LOW", "MEDIUM", "HIGH", "CRITICAL")
|
||||
|
||||
|
||||
def normalize_indicator(indicator: str) -> str:
|
||||
"""Return a stable privacy-safe keyword/category identifier."""
|
||||
return indicator
|
||||
|
||||
|
||||
def build_metrics_event(
|
||||
detection: CrisisDetectionResult,
|
||||
*,
|
||||
continued_conversation: bool = False,
|
||||
false_positive: bool = False,
|
||||
now: float | None = None,
|
||||
) -> dict:
|
||||
timestamp = float(time.time() if now is None else now)
|
||||
indicators = [normalize_indicator(indicator) for indicator in detection.indicators]
|
||||
return {
|
||||
"timestamp": timestamp,
|
||||
"level": detection.level,
|
||||
"indicator_count": len(indicators),
|
||||
"indicators": indicators,
|
||||
"continued_conversation": bool(continued_conversation),
|
||||
"false_positive": bool(false_positive),
|
||||
}
|
||||
|
||||
|
||||
def append_metrics_event(
|
||||
log_path: str | Path,
|
||||
detection: CrisisDetectionResult,
|
||||
*,
|
||||
continued_conversation: bool = False,
|
||||
false_positive: bool = False,
|
||||
now: float | None = None,
|
||||
) -> dict:
|
||||
event = build_metrics_event(
|
||||
detection,
|
||||
continued_conversation=continued_conversation,
|
||||
false_positive=false_positive,
|
||||
now=now,
|
||||
)
|
||||
path = Path(log_path)
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with path.open("a", encoding="utf-8") as handle:
|
||||
handle.write(json.dumps(event) + "\n")
|
||||
return event
|
||||
|
||||
|
||||
def load_metrics_events(log_path: str | Path) -> list[dict]:
|
||||
path = Path(log_path)
|
||||
if not path.exists():
|
||||
return []
|
||||
events = []
|
||||
for line in path.read_text(encoding="utf-8").splitlines():
|
||||
if not line.strip():
|
||||
continue
|
||||
events.append(json.loads(line))
|
||||
return events
|
||||
|
||||
|
||||
def build_weekly_summary(
|
||||
events: Iterable[dict],
|
||||
*,
|
||||
now: float | None = None,
|
||||
window_days: int = 7,
|
||||
) -> dict:
|
||||
current_time = float(time.time() if now is None else now)
|
||||
cutoff = current_time - (window_days * 86400)
|
||||
filtered = [event for event in events if float(event.get("timestamp", 0)) >= cutoff]
|
||||
|
||||
detections_per_level = {level: 0 for level in LEVELS}
|
||||
keyword_counts: Counter[str] = Counter()
|
||||
detections = []
|
||||
continued_after_intervention = 0
|
||||
|
||||
for event in filtered:
|
||||
level = event.get("level", "NONE")
|
||||
detections_per_level[level] = detections_per_level.get(level, 0) + 1
|
||||
keyword_counts.update(event.get("indicators", []))
|
||||
if level != "NONE":
|
||||
detections.append(event)
|
||||
if event.get("continued_conversation"):
|
||||
continued_after_intervention += 1
|
||||
|
||||
false_positive_count = sum(1 for event in detections if event.get("false_positive"))
|
||||
false_positive_estimate = (
|
||||
false_positive_count / len(detections) if detections else 0.0
|
||||
)
|
||||
|
||||
return {
|
||||
"window_days": window_days,
|
||||
"total_events": len(filtered),
|
||||
"detections_per_level": detections_per_level,
|
||||
"most_common_keywords": [
|
||||
{"keyword": keyword, "count": count}
|
||||
for keyword, count in keyword_counts.most_common(10)
|
||||
],
|
||||
"false_positive_estimate": false_positive_estimate,
|
||||
"continued_after_intervention": continued_after_intervention,
|
||||
}
|
||||
|
||||
|
||||
def render_weekly_summary(summary: dict) -> str:
|
||||
return json.dumps(summary, indent=2)
|
||||
|
||||
|
||||
def write_weekly_summary(path: str | Path, summary: dict) -> Path:
|
||||
output_path = Path(path)
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
output_path.write_text(render_weekly_summary(summary) + "\n", encoding="utf-8")
|
||||
return output_path
|
||||
|
||||
|
||||
def record_text_event(
|
||||
text: str,
|
||||
log_path: str | Path,
|
||||
*,
|
||||
continued_conversation: bool = False,
|
||||
false_positive: bool = False,
|
||||
now: float | None = None,
|
||||
) -> dict:
|
||||
detection = detect_crisis(text)
|
||||
return append_metrics_event(
|
||||
log_path,
|
||||
detection,
|
||||
continued_conversation=continued_conversation,
|
||||
false_positive=false_positive,
|
||||
now=now,
|
||||
)
|
||||
|
||||
|
||||
def main(argv: list[str] | None = None) -> int:
|
||||
parser = argparse.ArgumentParser(description="Privacy-preserving crisis metrics summary")
|
||||
parser.add_argument("--log-path", required=True, help="JSONL event log path")
|
||||
parser.add_argument("--days", type=int, default=7, help="Summary window in days")
|
||||
parser.add_argument("--output", help="Optional file to write summary JSON")
|
||||
args = parser.parse_args(argv)
|
||||
|
||||
events = load_metrics_events(args.log_path)
|
||||
summary = build_weekly_summary(events, window_days=args.days)
|
||||
rendered = render_weekly_summary(summary)
|
||||
print(rendered)
|
||||
if args.output:
|
||||
write_weekly_summary(args.output, summary)
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
@@ -1,259 +0,0 @@
|
||||
"""
|
||||
Session-level crisis tracking and escalation for the-door (P0 #35).
|
||||
|
||||
Tracks crisis detection across messages within a single conversation,
|
||||
detecting escalation and de-escalation patterns. Privacy-first: no
|
||||
persistence beyond the conversation session.
|
||||
|
||||
Each message is analyzed in isolation by detect.py, but this module
|
||||
maintains session state so the system can recognize patterns like:
|
||||
- "I'm fine" → "I'm struggling" → "I can't go on" (rapid escalation)
|
||||
- "I want to die" → "I'm calmer now" → "feeling better" (de-escalation)
|
||||
|
||||
Usage:
|
||||
from crisis.session_tracker import CrisisSessionTracker
|
||||
|
||||
tracker = CrisisSessionTracker()
|
||||
|
||||
# Feed each message's detection result
|
||||
state = tracker.record(detect_crisis("I'm having a tough day"))
|
||||
print(state.current_level) # "LOW"
|
||||
print(state.is_escalating) # False
|
||||
|
||||
state = tracker.record(detect_crisis("I feel hopeless"))
|
||||
print(state.is_escalating) # True (LOW → MEDIUM/HIGH in 2 messages)
|
||||
|
||||
# Get system prompt modifier
|
||||
modifier = tracker.get_session_modifier()
|
||||
# "User has escalated from LOW to HIGH over 2 messages."
|
||||
|
||||
# Reset for new session
|
||||
tracker.reset()
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from typing import List, Optional
|
||||
|
||||
from .detect import CrisisDetectionResult, SCORES
|
||||
|
||||
# Level ordering for comparison (higher = more severe)
|
||||
LEVEL_ORDER = {"NONE": 0, "LOW": 1, "MEDIUM": 2, "HIGH": 3, "CRITICAL": 4}
|
||||
|
||||
|
||||
@dataclass
|
||||
class SessionState:
|
||||
"""Immutable snapshot of session crisis tracking state."""
|
||||
|
||||
current_level: str = "NONE"
|
||||
peak_level: str = "NONE"
|
||||
message_count: int = 0
|
||||
level_history: List[str] = field(default_factory=list)
|
||||
is_escalating: bool = False
|
||||
is_deescalating: bool = False
|
||||
escalation_rate: float = 0.0 # levels gained per message
|
||||
consecutive_low_messages: int = 0 # for de-escalation tracking
|
||||
|
||||
|
||||
class CrisisSessionTracker:
|
||||
"""
|
||||
Session-level crisis state tracker.
|
||||
|
||||
Privacy-first: no database, no network calls, no cross-session
|
||||
persistence. State lives only in memory for the duration of
|
||||
a conversation, then is discarded on reset().
|
||||
"""
|
||||
|
||||
# Thresholds (from issue #35)
|
||||
ESCALATION_WINDOW = 3 # messages: LOW → HIGH in ≤3 messages = rapid escalation
|
||||
DEESCALATION_WINDOW = 5 # messages: need 5+ consecutive LOW messages after CRITICAL
|
||||
|
||||
def __init__(self):
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
"""Reset all session state. Call on new conversation."""
|
||||
self._current_level = "NONE"
|
||||
self._peak_level = "NONE"
|
||||
self._message_count = 0
|
||||
self._level_history: List[str] = []
|
||||
self._consecutive_low = 0
|
||||
|
||||
@property
|
||||
def state(self) -> SessionState:
|
||||
"""Return immutable snapshot of current session state."""
|
||||
is_escalating = self._detect_escalation()
|
||||
is_deescalating = self._detect_deescalation()
|
||||
rate = self._compute_escalation_rate()
|
||||
|
||||
return SessionState(
|
||||
current_level=self._current_level,
|
||||
peak_level=self._peak_level,
|
||||
message_count=self._message_count,
|
||||
level_history=list(self._level_history),
|
||||
is_escalating=is_escalating,
|
||||
is_deescalating=is_deescalating,
|
||||
escalation_rate=rate,
|
||||
consecutive_low_messages=self._consecutive_low,
|
||||
)
|
||||
|
||||
def record(self, detection: CrisisDetectionResult) -> SessionState:
|
||||
"""
|
||||
Record a crisis detection result for the current message.
|
||||
|
||||
Returns updated SessionState.
|
||||
"""
|
||||
level = detection.level
|
||||
self._message_count += 1
|
||||
self._level_history.append(level)
|
||||
|
||||
# Update peak
|
||||
if LEVEL_ORDER.get(level, 0) > LEVEL_ORDER.get(self._peak_level, 0):
|
||||
self._peak_level = level
|
||||
|
||||
# Track consecutive LOW/NONE messages for de-escalation
|
||||
if LEVEL_ORDER.get(level, 0) <= LEVEL_ORDER["LOW"]:
|
||||
self._consecutive_low += 1
|
||||
else:
|
||||
self._consecutive_low = 0
|
||||
|
||||
self._current_level = level
|
||||
return self.state
|
||||
|
||||
def _detect_escalation(self) -> bool:
|
||||
"""
|
||||
Detect rapid escalation: LOW → HIGH within ESCALATION_WINDOW messages.
|
||||
|
||||
Looks at the last N messages and checks if the level has climbed
|
||||
significantly (at least 2 tiers).
|
||||
"""
|
||||
if len(self._level_history) < 2:
|
||||
return False
|
||||
|
||||
window = self._level_history[-self.ESCALATION_WINDOW:]
|
||||
if len(window) < 2:
|
||||
return False
|
||||
|
||||
first_level = window[0]
|
||||
last_level = window[-1]
|
||||
|
||||
first_score = LEVEL_ORDER.get(first_level, 0)
|
||||
last_score = LEVEL_ORDER.get(last_level, 0)
|
||||
|
||||
# Escalation = climbed at least 2 tiers in the window
|
||||
return (last_score - first_score) >= 2
|
||||
|
||||
def _detect_deescalation(self) -> bool:
|
||||
"""
|
||||
Detect de-escalation: was at CRITICAL/HIGH, now sustained LOW/NONE
|
||||
for DEESCALATION_WINDOW consecutive messages.
|
||||
"""
|
||||
if LEVEL_ORDER.get(self._peak_level, 0) < LEVEL_ORDER["HIGH"]:
|
||||
return False
|
||||
|
||||
return self._consecutive_low >= self.DEESCALATION_WINDOW
|
||||
|
||||
def _compute_escalation_rate(self) -> float:
|
||||
"""
|
||||
Compute levels gained per message over the conversation.
|
||||
|
||||
Positive = escalating, negative = de-escalating, 0 = stable.
|
||||
"""
|
||||
if self._message_count < 2:
|
||||
return 0.0
|
||||
|
||||
first = LEVEL_ORDER.get(self._level_history[0], 0)
|
||||
current = LEVEL_ORDER.get(self._current_level, 0)
|
||||
|
||||
return (current - first) / (self._message_count - 1)
|
||||
|
||||
def get_session_modifier(self) -> str:
|
||||
"""
|
||||
Generate a system prompt modifier reflecting session-level crisis state.
|
||||
|
||||
Returns empty string if no session context is relevant.
|
||||
"""
|
||||
if self._message_count < 2:
|
||||
return ""
|
||||
|
||||
s = self.state
|
||||
|
||||
if s.is_escalating:
|
||||
return (
|
||||
f"User has escalated from {self._level_history[0]} to "
|
||||
f"{s.current_level} over {s.message_count} messages. "
|
||||
f"Peak crisis level this session: {s.peak_level}. "
|
||||
"Respond with heightened awareness. The trajectory is "
|
||||
"worsening — prioritize safety and connection."
|
||||
)
|
||||
|
||||
if s.is_deescalating:
|
||||
return (
|
||||
f"User previously reached {s.peak_level} crisis level "
|
||||
f"but has been at {s.current_level} or below for "
|
||||
f"{s.consecutive_low_messages} consecutive messages. "
|
||||
"The situation appears to be stabilizing. Continue "
|
||||
"supportive engagement while remaining vigilant."
|
||||
)
|
||||
|
||||
if s.peak_level in ("CRITICAL", "HIGH") and s.current_level not in ("CRITICAL", "HIGH"):
|
||||
return (
|
||||
f"User previously reached {s.peak_level} crisis level "
|
||||
f"this session (currently {s.current_level}). "
|
||||
"Continue with care and awareness of the earlier crisis."
|
||||
)
|
||||
|
||||
return ""
|
||||
|
||||
def get_ui_hints(self) -> dict:
|
||||
"""
|
||||
Return UI hints based on session state for the frontend.
|
||||
|
||||
These are advisory — the frontend decides what to show.
|
||||
"""
|
||||
s = self.state
|
||||
|
||||
hints = {
|
||||
"session_escalating": s.is_escalating,
|
||||
"session_deescalating": s.is_deescalating,
|
||||
"session_peak_level": s.peak_level,
|
||||
"session_message_count": s.message_count,
|
||||
}
|
||||
|
||||
if s.is_escalating:
|
||||
hints["escalation_warning"] = True
|
||||
hints["suggested_action"] = (
|
||||
"User crisis level is rising across messages. "
|
||||
"Consider increasing intervention level."
|
||||
)
|
||||
|
||||
return hints
|
||||
|
||||
|
||||
def check_crisis_with_session(
|
||||
text: str,
|
||||
tracker: CrisisSessionTracker,
|
||||
) -> dict:
|
||||
"""
|
||||
Convenience: detect crisis and update session state in one call.
|
||||
|
||||
Returns combined single-message detection + session-level context.
|
||||
"""
|
||||
from .detect import detect_crisis
|
||||
from .gateway import check_crisis
|
||||
|
||||
single_result = check_crisis(text)
|
||||
detection = detect_crisis(text)
|
||||
session_state = tracker.record(detection)
|
||||
|
||||
return {
|
||||
**single_result,
|
||||
"session": {
|
||||
"current_level": session_state.current_level,
|
||||
"peak_level": session_state.peak_level,
|
||||
"message_count": session_state.message_count,
|
||||
"is_escalating": session_state.is_escalating,
|
||||
"is_deescalating": session_state.is_deescalating,
|
||||
"modifier": tracker.get_session_modifier(),
|
||||
"ui_hints": tracker.get_ui_hints(),
|
||||
},
|
||||
}
|
||||
@@ -680,7 +680,7 @@ html, body {
|
||||
|
||||
<!-- Footer -->
|
||||
<footer id="footer">
|
||||
<a href="/about.html" aria-label="About The Door">about</a>
|
||||
<a href="/about" aria-label="About The Door">about</a>
|
||||
<button id="safety-plan-btn" aria-label="Open My Safety Plan">my safety plan</button>
|
||||
<button id="clear-chat-btn" aria-label="Clear chat history">clear chat</button>
|
||||
</footer>
|
||||
@@ -808,7 +808,6 @@ Sovereignty and service always.`;
|
||||
var crisisPanel = document.getElementById('crisis-panel');
|
||||
var crisisOverlay = document.getElementById('crisis-overlay');
|
||||
var overlayDismissBtn = document.getElementById('overlay-dismiss-btn');
|
||||
var overlayCallLink = document.querySelector('.overlay-call');
|
||||
var statusDot = document.querySelector('.status-dot');
|
||||
var statusText = document.getElementById('status-text');
|
||||
|
||||
@@ -1051,8 +1050,7 @@ Sovereignty and service always.`;
|
||||
}
|
||||
}, 1000);
|
||||
|
||||
// Focus the Call 988 link (always enabled) — disabled buttons cannot receive focus
|
||||
if (overlayCallLink) overlayCallLink.focus();
|
||||
overlayDismissBtn.focus();
|
||||
}
|
||||
|
||||
// Register focus trap on document (always listening, gated by class check)
|
||||
|
||||
@@ -1,100 +0,0 @@
|
||||
"""Tests for privacy-preserving crisis metrics aggregation (issue #37)."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import pathlib
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from crisis.detect import detect_crisis
|
||||
from crisis.gateway import check_crisis
|
||||
from crisis.metrics import (
|
||||
append_metrics_event,
|
||||
build_metrics_event,
|
||||
build_weekly_summary,
|
||||
load_metrics_events,
|
||||
render_weekly_summary,
|
||||
)
|
||||
|
||||
|
||||
class TestMetricsEvent(unittest.TestCase):
|
||||
def test_event_is_privacy_preserving(self):
|
||||
detection = detect_crisis("I want to kill myself")
|
||||
event = build_metrics_event(
|
||||
detection,
|
||||
continued_conversation=True,
|
||||
false_positive=False,
|
||||
now=1_700_000_000,
|
||||
)
|
||||
self.assertEqual(event["timestamp"], 1_700_000_000)
|
||||
self.assertEqual(event["level"], "CRITICAL")
|
||||
self.assertTrue(event["continued_conversation"])
|
||||
self.assertFalse(event["false_positive"])
|
||||
self.assertNotIn("text", event)
|
||||
self.assertNotIn("message", event)
|
||||
self.assertGreaterEqual(event["indicator_count"], 1)
|
||||
self.assertTrue(event["indicators"])
|
||||
|
||||
|
||||
class TestMetricsLogAndSummary(unittest.TestCase):
|
||||
def test_append_and_load_metrics_events(self):
|
||||
log_path = pathlib.Path(self._testMethodName).with_suffix(".jsonl")
|
||||
try:
|
||||
append_metrics_event(log_path, detect_crisis("I want to die"), now=1_700_000_000)
|
||||
events = load_metrics_events(log_path)
|
||||
self.assertEqual(len(events), 1)
|
||||
self.assertEqual(events[0]["level"], "CRITICAL")
|
||||
finally:
|
||||
if log_path.exists():
|
||||
log_path.unlink()
|
||||
|
||||
def test_weekly_summary_counts_levels_keywords_and_false_positives(self):
|
||||
events = [
|
||||
build_metrics_event(detect_crisis("I want to die"), continued_conversation=True, false_positive=False, now=1_700_000_000),
|
||||
build_metrics_event(detect_crisis("I'm having a rough day"), continued_conversation=False, false_positive=False, now=1_700_000_100),
|
||||
build_metrics_event(detect_crisis("I want to die"), continued_conversation=False, false_positive=True, now=1_700_000_200),
|
||||
build_metrics_event(detect_crisis("Hello there"), continued_conversation=False, false_positive=False, now=1_700_000_300),
|
||||
]
|
||||
summary = build_weekly_summary(events, now=1_700_000_400, window_days=7)
|
||||
|
||||
self.assertEqual(summary["detections_per_level"]["CRITICAL"], 2)
|
||||
self.assertEqual(summary["detections_per_level"]["LOW"], 1)
|
||||
self.assertEqual(summary["detections_per_level"]["NONE"], 1)
|
||||
self.assertEqual(summary["continued_after_intervention"], 1)
|
||||
self.assertAlmostEqual(summary["false_positive_estimate"], 1 / 3, places=4)
|
||||
self.assertEqual(summary["most_common_keywords"][0]["count"], 2)
|
||||
|
||||
def test_render_weekly_summary_mentions_required_metrics(self):
|
||||
events = [
|
||||
build_metrics_event(detect_crisis("I want to die"), continued_conversation=True, now=1_700_000_000),
|
||||
build_metrics_event(detect_crisis("I feel hopeless with no way out"), false_positive=True, now=1_700_000_100),
|
||||
]
|
||||
summary = build_weekly_summary(events, now=1_700_000_200, window_days=7)
|
||||
rendered = render_weekly_summary(summary)
|
||||
self.assertIn("detections_per_level", rendered)
|
||||
self.assertIn("most_common_keywords", rendered)
|
||||
self.assertIn("false_positive_estimate", rendered)
|
||||
self.assertIn("continued_after_intervention", rendered)
|
||||
|
||||
|
||||
class TestGatewayMetricsIntegration(unittest.TestCase):
|
||||
def test_check_crisis_can_emit_metrics_event(self):
|
||||
result = check_crisis(
|
||||
"I want to die",
|
||||
metrics_log_path=None,
|
||||
continued_conversation=True,
|
||||
false_positive=False,
|
||||
now=1_700_000_000,
|
||||
)
|
||||
self.assertEqual(result["level"], "CRITICAL")
|
||||
self.assertIn("metrics_event", result)
|
||||
self.assertEqual(result["metrics_event"]["timestamp"], 1_700_000_000)
|
||||
self.assertTrue(result["metrics_event"]["continued_conversation"])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
@@ -52,34 +52,6 @@ class TestCrisisOverlayFocusTrap(unittest.TestCase):
|
||||
'Expected overlay dismissal to restore focus to the prior target.',
|
||||
)
|
||||
|
||||
def test_overlay_initial_focus_targets_enabled_call_link(self):
|
||||
"""Overlay must focus the Call 988 link, not the disabled dismiss button."""
|
||||
# Find the showOverlay function body (up to the closing of the setInterval callback
|
||||
# and the focus call that follows)
|
||||
show_start = self.html.find('function showOverlay()')
|
||||
self.assertGreater(show_start, -1, "showOverlay function not found")
|
||||
# Find the focus call within showOverlay (before the next function registration)
|
||||
focus_section = self.html[show_start:show_start + 2000]
|
||||
self.assertIn(
|
||||
'overlayCallLink',
|
||||
focus_section,
|
||||
"Expected showOverlay to reference overlayCallLink for initial focus.",
|
||||
)
|
||||
# Ensure the old buggy pattern is gone
|
||||
focus_line_region = self.html[show_start + 800:show_start + 1200]
|
||||
self.assertNotIn(
|
||||
'overlayDismissBtn.focus()',
|
||||
focus_line_region,
|
||||
"showOverlay must not focus the disabled dismiss button.",
|
||||
)
|
||||
|
||||
def test_overlay_call_link_variable_is_declared(self):
|
||||
self.assertIn(
|
||||
"querySelector('.overlay-call')",
|
||||
self.html,
|
||||
"Expected a JS reference to the .overlay-call link element.",
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
@@ -50,22 +50,6 @@ class TestCrisisOfflinePage(unittest.TestCase):
|
||||
for phrase in required_phrases:
|
||||
self.assertIn(phrase, self.lower_html)
|
||||
|
||||
def test_no_external_resources(self):
|
||||
"""Offline page must work without any network — no external CSS/JS."""
|
||||
import re
|
||||
html = self.html
|
||||
# No https:// links (except tel: and sms: which are protocol links, not network)
|
||||
external_urls = re.findall(r'href=["\']https://|src=["\']https://', html)
|
||||
self.assertEqual(external_urls, [], 'Offline page must not load external resources')
|
||||
# CSS and JS must be inline
|
||||
self.assertIn('<style>', html, 'CSS must be inline')
|
||||
self.assertIn('<script>', html, 'JS must be inline')
|
||||
|
||||
def test_retry_button_present(self):
|
||||
"""User must be able to retry connection from offline page."""
|
||||
self.assertIn('retry-connection', self.html)
|
||||
self.assertIn('Retry connection', self.html)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
@@ -1,277 +0,0 @@
|
||||
"""
|
||||
Tests for crisis session tracking and escalation (P0 #35).
|
||||
|
||||
Covers: session_tracker.py
|
||||
Run with: python -m pytest tests/test_session_tracker.py -v
|
||||
"""
|
||||
|
||||
import unittest
|
||||
import sys
|
||||
import os
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from crisis.detect import detect_crisis
|
||||
from crisis.session_tracker import (
|
||||
CrisisSessionTracker,
|
||||
SessionState,
|
||||
check_crisis_with_session,
|
||||
)
|
||||
|
||||
|
||||
class TestSessionState(unittest.TestCase):
|
||||
"""Test SessionState defaults."""
|
||||
|
||||
def test_default_state(self):
|
||||
s = SessionState()
|
||||
self.assertEqual(s.current_level, "NONE")
|
||||
self.assertEqual(s.peak_level, "NONE")
|
||||
self.assertEqual(s.message_count, 0)
|
||||
self.assertEqual(s.level_history, [])
|
||||
self.assertFalse(s.is_escalating)
|
||||
self.assertFalse(s.is_deescalating)
|
||||
|
||||
|
||||
class TestSessionTracking(unittest.TestCase):
|
||||
"""Test basic session state tracking."""
|
||||
|
||||
def setUp(self):
|
||||
self.tracker = CrisisSessionTracker()
|
||||
|
||||
def test_record_none_message(self):
|
||||
state = self.tracker.record(detect_crisis("Hello Timmy"))
|
||||
self.assertEqual(state.current_level, "NONE")
|
||||
self.assertEqual(state.message_count, 1)
|
||||
self.assertEqual(state.peak_level, "NONE")
|
||||
|
||||
def test_record_low_message(self):
|
||||
self.tracker.record(detect_crisis("Hello"))
|
||||
state = self.tracker.record(detect_crisis("Having a rough day"))
|
||||
self.assertIn(state.current_level, ("LOW", "NONE"))
|
||||
self.assertEqual(state.message_count, 2)
|
||||
|
||||
def test_record_critical_updates_peak(self):
|
||||
self.tracker.record(detect_crisis("Having a rough day"))
|
||||
state = self.tracker.record(detect_crisis("I want to kill myself"))
|
||||
self.assertEqual(state.current_level, "CRITICAL")
|
||||
self.assertEqual(state.peak_level, "CRITICAL")
|
||||
|
||||
def test_peak_preserved_after_drop(self):
|
||||
"""Peak level should stay at the highest seen, even after de-escalation."""
|
||||
self.tracker.record(detect_crisis("I want to kill myself"))
|
||||
state = self.tracker.record(detect_crisis("I'm feeling a bit better"))
|
||||
self.assertEqual(state.peak_level, "CRITICAL")
|
||||
|
||||
def test_level_history(self):
|
||||
self.tracker.record(detect_crisis("Hello"))
|
||||
self.tracker.record(detect_crisis("Having a rough day"))
|
||||
state = self.tracker.record(detect_crisis("I want to die"))
|
||||
self.assertEqual(len(state.level_history), 3)
|
||||
self.assertEqual(state.level_history[0], "NONE")
|
||||
self.assertEqual(state.level_history[2], "CRITICAL")
|
||||
|
||||
def test_reset_clears_state(self):
|
||||
self.tracker.record(detect_crisis("I want to kill myself"))
|
||||
self.tracker.reset()
|
||||
state = self.tracker.state
|
||||
self.assertEqual(state.current_level, "NONE")
|
||||
self.assertEqual(state.peak_level, "NONE")
|
||||
self.assertEqual(state.message_count, 0)
|
||||
self.assertEqual(state.level_history, [])
|
||||
|
||||
|
||||
class TestEscalationDetection(unittest.TestCase):
|
||||
"""Test escalation detection: LOW → HIGH in ≤3 messages."""
|
||||
|
||||
def setUp(self):
|
||||
self.tracker = CrisisSessionTracker()
|
||||
|
||||
def test_no_escalation_single_message(self):
|
||||
self.tracker.record(detect_crisis("Hello"))
|
||||
self.assertFalse(self.tracker.state.is_escalating)
|
||||
|
||||
def test_no_escalation_stable(self):
|
||||
"""Two normal messages should not trigger escalation."""
|
||||
self.tracker.record(detect_crisis("Hello"))
|
||||
state = self.tracker.record(detect_crisis("How are you?"))
|
||||
self.assertFalse(state.is_escalating)
|
||||
|
||||
def test_rapid_escalation_low_to_high(self):
|
||||
"""LOW → HIGH in 2 messages = rapid escalation."""
|
||||
self.tracker.record(detect_crisis("Having a rough day"))
|
||||
state = self.tracker.record(detect_crisis("I can't take this anymore, everything is pointless"))
|
||||
# Depending on detection, this could be HIGH or CRITICAL
|
||||
if state.current_level in ("HIGH", "CRITICAL"):
|
||||
self.assertTrue(state.is_escalating)
|
||||
|
||||
def test_rapid_escalation_three_messages(self):
|
||||
"""NONE → LOW → HIGH in 3 messages = escalation."""
|
||||
self.tracker.record(detect_crisis("Hello"))
|
||||
self.tracker.record(detect_crisis("Having a rough day"))
|
||||
state = self.tracker.record(detect_crisis("I feel completely hopeless with no way out"))
|
||||
if state.current_level in ("HIGH", "CRITICAL"):
|
||||
self.assertTrue(state.is_escalating)
|
||||
|
||||
def test_escalation_rate(self):
|
||||
"""Rate should be positive when escalating."""
|
||||
self.tracker.record(detect_crisis("Hello"))
|
||||
self.tracker.record(detect_crisis("I want to die"))
|
||||
state = self.tracker.state
|
||||
self.assertGreater(state.escalation_rate, 0)
|
||||
|
||||
|
||||
class TestDeescalationDetection(unittest.TestCase):
|
||||
"""Test de-escalation: sustained LOW after HIGH/CRITICAL."""
|
||||
|
||||
def setUp(self):
|
||||
self.tracker = CrisisSessionTracker()
|
||||
|
||||
def test_no_deescalation_without_prior_crisis(self):
|
||||
"""No de-escalation if never reached HIGH/CRITICAL."""
|
||||
for _ in range(6):
|
||||
self.tracker.record(detect_crisis("Hello"))
|
||||
self.assertFalse(self.tracker.state.is_deescalating)
|
||||
|
||||
def test_deescalation_after_critical(self):
|
||||
"""5+ consecutive LOW/NONE messages after CRITICAL = de-escalation."""
|
||||
self.tracker.record(detect_crisis("I want to kill myself"))
|
||||
for _ in range(5):
|
||||
self.tracker.record(detect_crisis("I'm doing better today"))
|
||||
state = self.tracker.state
|
||||
if state.peak_level == "CRITICAL":
|
||||
self.assertTrue(state.is_deescalating)
|
||||
|
||||
def test_deescalation_after_high(self):
|
||||
"""5+ consecutive LOW/NONE messages after HIGH = de-escalation."""
|
||||
self.tracker.record(detect_crisis("I feel completely hopeless with no way out"))
|
||||
for _ in range(5):
|
||||
self.tracker.record(detect_crisis("Feeling okay"))
|
||||
state = self.tracker.state
|
||||
if state.peak_level == "HIGH":
|
||||
self.assertTrue(state.is_deescalating)
|
||||
|
||||
def test_interrupted_deescalation(self):
|
||||
"""De-escalation resets if a HIGH message interrupts."""
|
||||
self.tracker.record(detect_crisis("I want to kill myself"))
|
||||
for _ in range(3):
|
||||
self.tracker.record(detect_crisis("Doing better"))
|
||||
# Interrupt with another crisis
|
||||
self.tracker.record(detect_crisis("I feel hopeless again"))
|
||||
self.tracker.record(detect_crisis("Feeling okay now"))
|
||||
state = self.tracker.state
|
||||
# Should NOT be de-escalating yet (counter reset)
|
||||
self.assertFalse(state.is_deescalating)
|
||||
|
||||
|
||||
class TestSessionModifier(unittest.TestCase):
|
||||
"""Test system prompt modifier generation."""
|
||||
|
||||
def setUp(self):
|
||||
self.tracker = CrisisSessionTracker()
|
||||
|
||||
def test_no_modifier_for_single_message(self):
|
||||
self.tracker.record(detect_crisis("Hello"))
|
||||
self.assertEqual(self.tracker.get_session_modifier(), "")
|
||||
|
||||
def test_no_modifier_for_stable_session(self):
|
||||
self.tracker.record(detect_crisis("Hello"))
|
||||
self.tracker.record(detect_crisis("Good morning"))
|
||||
self.assertEqual(self.tracker.get_session_modifier(), "")
|
||||
|
||||
def test_escalation_modifier(self):
|
||||
"""Escalating session should produce a modifier."""
|
||||
self.tracker.record(detect_crisis("Hello"))
|
||||
self.tracker.record(detect_crisis("I want to die"))
|
||||
modifier = self.tracker.get_session_modifier()
|
||||
if self.tracker.state.is_escalating:
|
||||
self.assertIn("escalated", modifier.lower())
|
||||
self.assertIn("NONE", modifier)
|
||||
self.assertIn("CRITICAL", modifier)
|
||||
|
||||
def test_deescalation_modifier(self):
|
||||
"""De-escalating session should mention stabilizing."""
|
||||
self.tracker.record(detect_crisis("I want to kill myself"))
|
||||
for _ in range(5):
|
||||
self.tracker.record(detect_crisis("I'm feeling okay"))
|
||||
modifier = self.tracker.get_session_modifier()
|
||||
if self.tracker.state.is_deescalating:
|
||||
self.assertIn("stabilizing", modifier.lower())
|
||||
|
||||
def test_prior_crisis_modifier(self):
|
||||
"""Past crisis should be noted even without active escalation."""
|
||||
self.tracker.record(detect_crisis("I want to die"))
|
||||
self.tracker.record(detect_crisis("Feeling a bit better"))
|
||||
modifier = self.tracker.get_session_modifier()
|
||||
# Should note the prior CRITICAL
|
||||
if modifier:
|
||||
self.assertIn("CRITICAL", modifier)
|
||||
|
||||
|
||||
class TestUIHints(unittest.TestCase):
|
||||
"""Test UI hint generation."""
|
||||
|
||||
def setUp(self):
|
||||
self.tracker = CrisisSessionTracker()
|
||||
|
||||
def test_ui_hints_structure(self):
|
||||
self.tracker.record(detect_crisis("Hello"))
|
||||
hints = self.tracker.get_ui_hints()
|
||||
self.assertIn("session_escalating", hints)
|
||||
self.assertIn("session_deescalating", hints)
|
||||
self.assertIn("session_peak_level", hints)
|
||||
self.assertIn("session_message_count", hints)
|
||||
|
||||
def test_ui_hints_escalation_warning(self):
|
||||
"""Escalating session should have warning hint."""
|
||||
self.tracker.record(detect_crisis("Hello"))
|
||||
self.tracker.record(detect_crisis("I want to die"))
|
||||
hints = self.tracker.get_ui_hints()
|
||||
if hints["session_escalating"]:
|
||||
self.assertTrue(hints.get("escalation_warning"))
|
||||
self.assertIn("suggested_action", hints)
|
||||
|
||||
|
||||
class TestCheckCrisisWithSession(unittest.TestCase):
|
||||
"""Test the convenience function combining detection + session tracking."""
|
||||
|
||||
def test_returns_combined_data(self):
|
||||
tracker = CrisisSessionTracker()
|
||||
result = check_crisis_with_session("I want to die", tracker)
|
||||
self.assertIn("level", result)
|
||||
self.assertIn("session", result)
|
||||
self.assertIn("current_level", result["session"])
|
||||
self.assertIn("peak_level", result["session"])
|
||||
self.assertIn("modifier", result["session"])
|
||||
|
||||
def test_session_updates_across_calls(self):
|
||||
tracker = CrisisSessionTracker()
|
||||
check_crisis_with_session("Hello", tracker)
|
||||
result = check_crisis_with_session("I want to die", tracker)
|
||||
self.assertEqual(result["session"]["message_count"], 2)
|
||||
self.assertEqual(result["session"]["peak_level"], "CRITICAL")
|
||||
|
||||
|
||||
class TestPrivacy(unittest.TestCase):
|
||||
"""Verify privacy-first design principles."""
|
||||
|
||||
def test_no_persistence_mechanism(self):
|
||||
"""Session tracker should have no database, file, or network calls."""
|
||||
import inspect
|
||||
source = inspect.getsource(CrisisSessionTracker)
|
||||
# Should not import database, requests, or file I/O
|
||||
forbidden = ["sqlite", "requests", "urllib", "open(", "httpx", "aiohttp"]
|
||||
for word in forbidden:
|
||||
self.assertNotIn(word, source.lower(),
|
||||
f"Session tracker should not use {word} — privacy-first design")
|
||||
|
||||
def test_state_contained_in_memory(self):
|
||||
"""All state should be instance attributes, not module-level."""
|
||||
tracker = CrisisSessionTracker()
|
||||
tracker.record(detect_crisis("I want to die"))
|
||||
# New tracker should have clean state (no global contamination)
|
||||
fresh = CrisisSessionTracker()
|
||||
self.assertEqual(fresh.state.current_level, "NONE")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
134
tests/test_voice_analysis.py
Normal file
134
tests/test_voice_analysis.py
Normal file
@@ -0,0 +1,134 @@
|
||||
"""Tests for voice message distress analysis (#131)."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import pytest
|
||||
|
||||
from voice_analysis import (
|
||||
VoiceAnalysisResult,
|
||||
compute_speech_rate,
|
||||
compute_distress_score,
|
||||
DISTRESS_THRESHOLDS,
|
||||
NORMAL_SPEECH_RATE,
|
||||
NORMAL_PITCH_VAR,
|
||||
)
|
||||
|
||||
|
||||
class TestDistressScore:
|
||||
"""Distress score computation from paralinguistic features."""
|
||||
|
||||
def test_normal_speech_no_distress(self):
|
||||
score, signals = compute_distress_score(
|
||||
speech_rate=140, # normal
|
||||
pitch_variability=50, # normal
|
||||
silence_ratio=0.15, # normal
|
||||
volume_db=-20, # normal
|
||||
)
|
||||
assert score < 0.1
|
||||
assert not signals
|
||||
|
||||
def test_slow_speech_detected(self):
|
||||
score, signals = compute_distress_score(
|
||||
speech_rate=60, # very slow
|
||||
pitch_variability=50,
|
||||
silence_ratio=0.15,
|
||||
volume_db=-20,
|
||||
)
|
||||
assert score > 0.1
|
||||
assert any("slow" in s for s in signals)
|
||||
|
||||
def test_monotone_detected(self):
|
||||
score, signals = compute_distress_score(
|
||||
speech_rate=140,
|
||||
pitch_variability=10, # very monotone
|
||||
silence_ratio=0.15,
|
||||
volume_db=-20,
|
||||
)
|
||||
assert score > 0.1
|
||||
assert any("monotone" in s for s in signals)
|
||||
|
||||
def test_long_pauses_detected(self):
|
||||
score, signals = compute_distress_score(
|
||||
speech_rate=140,
|
||||
pitch_variability=50,
|
||||
silence_ratio=0.50, # very quiet
|
||||
volume_db=-20,
|
||||
)
|
||||
assert score > 0.1
|
||||
assert any("pause" in s for s in signals)
|
||||
|
||||
def test_quiet_voice_detected(self):
|
||||
score, signals = compute_distress_score(
|
||||
speech_rate=140,
|
||||
pitch_variability=50,
|
||||
silence_ratio=0.15,
|
||||
volume_db=-45, # very quiet
|
||||
)
|
||||
assert score > 0.1
|
||||
assert any("quiet" in s for s in signals)
|
||||
|
||||
def test_multiple_signals_compound(self):
|
||||
score, signals = compute_distress_score(
|
||||
speech_rate=50, # very slow
|
||||
pitch_variability=5, # very monotone
|
||||
silence_ratio=0.55, # long pauses
|
||||
volume_db=-50, # very quiet
|
||||
)
|
||||
assert score > 0.5
|
||||
assert len(signals) >= 3
|
||||
|
||||
def test_max_score_is_1(self):
|
||||
score, _ = compute_distress_score(
|
||||
speech_rate=0,
|
||||
pitch_variability=0,
|
||||
silence_ratio=1.0,
|
||||
volume_db=-100,
|
||||
)
|
||||
assert score <= 1.0
|
||||
|
||||
|
||||
class TestSpeechRate:
|
||||
"""Speech rate computation."""
|
||||
|
||||
def test_normal_rate(self):
|
||||
# 100 words in 60 seconds = 100 wpm
|
||||
segments = [{"start": 0.0, "end": 60.0, "text": "x"}]
|
||||
wpm = compute_speech_rate("word " * 100, segments)
|
||||
assert abs(wpm - 100) < 5
|
||||
|
||||
def test_empty_transcript(self):
|
||||
assert compute_speech_rate("", []) == 0.0
|
||||
|
||||
def test_no_segments(self):
|
||||
assert compute_speech_rate("hello world", []) == 0.0
|
||||
|
||||
|
||||
class TestDistressThresholds:
|
||||
"""Threshold configuration."""
|
||||
|
||||
def test_thresholds_ordered(self):
|
||||
assert DISTRESS_THRESHOLDS["low"] < DISTRESS_THRESHOLDS["medium"]
|
||||
assert DISTRESS_THRESHOLDS["medium"] < DISTRESS_THRESHOLDS["high"]
|
||||
|
||||
def test_low_is_03(self):
|
||||
assert DISTRESS_THRESHOLDS["low"] == 0.3
|
||||
|
||||
def test_high_is_10(self):
|
||||
assert DISTRESS_THRESHOLDS["high"] == 1.0
|
||||
|
||||
|
||||
class TestVoiceAnalysisResult:
|
||||
"""Result data structure."""
|
||||
|
||||
def test_creation(self):
|
||||
result = VoiceAnalysisResult(
|
||||
transcript="hello", speech_rate_wpm=120.0,
|
||||
pitch_mean_hz=150.0, pitch_variability=40.0,
|
||||
silence_ratio=0.2, volume_db=-20.0,
|
||||
volume_variability=5.0, duration_seconds=10.0,
|
||||
distress_score=0.1, distress_level="low",
|
||||
distress_signals=[],
|
||||
)
|
||||
assert result.transcript == "hello"
|
||||
assert result.distress_level == "low"
|
||||
assert not result.distress_signals
|
||||
356
voice_analysis.py
Normal file
356
voice_analysis.py
Normal file
@@ -0,0 +1,356 @@
|
||||
"""Voice message distress analysis — paralinguistic features (#131).
|
||||
|
||||
Analyzes audio (OGG/MP3/WAV) for distress signals using audio
|
||||
features extracted without a neural model — pure DSP analysis.
|
||||
|
||||
Signals detected:
|
||||
- Speech rate (words per minute from timestamps)
|
||||
- Pitch variability (F0 std deviation — monotone = depression indicator)
|
||||
- Silence ratio (long pauses)
|
||||
- Volume dynamics (drops, tremor proxy)
|
||||
|
||||
Uses whisper for transcription + word timestamps. All other features
|
||||
are computed from raw audio via librosa.
|
||||
|
||||
Refs: #131 — Epic #102 (Multimodal Crisis Detection)
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import tempfile
|
||||
from dataclasses import dataclass, asdict
|
||||
from pathlib import Path
|
||||
from typing import Optional, List, Dict, Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class VoiceAnalysisResult:
|
||||
"""Result of voice message paralinguistic analysis."""
|
||||
transcript: str
|
||||
speech_rate_wpm: float # words per minute
|
||||
pitch_mean_hz: float # mean F0 in Hz
|
||||
pitch_variability: float # F0 standard deviation (low = monotone)
|
||||
silence_ratio: float # fraction of audio that is silence (0-1)
|
||||
volume_db: float # mean volume in dB
|
||||
volume_variability: float # volume std deviation
|
||||
duration_seconds: float # total audio duration
|
||||
distress_score: float # 0-1 composite score
|
||||
distress_level: str # "low", "medium", "high"
|
||||
distress_signals: List[str] # list of detected signals
|
||||
|
||||
|
||||
# Distress thresholds
|
||||
DISTRESS_THRESHOLDS = {
|
||||
"low": 0.3,
|
||||
"medium": 0.7,
|
||||
"high": 1.0,
|
||||
}
|
||||
|
||||
# Paralinguistic distress indicators
|
||||
# These are heuristic — the model learns what "normal" looks like
|
||||
# and flags deviations.
|
||||
NORMAL_SPEECH_RATE = (100, 180) # words per minute
|
||||
NORMAL_PITCH_VAR = (20, 80) # F0 std deviation in Hz
|
||||
NORMAL_SILENCE_RATIO = (0.05, 0.35) # fraction of silence
|
||||
NORMAL_VOLUME_DB = (-30, -10) # dB range
|
||||
|
||||
|
||||
def _ensure_whisper():
|
||||
"""Check if whisper is available."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["whisper", "--help"],
|
||||
capture_output=True, text=True, timeout=5,
|
||||
)
|
||||
return True
|
||||
except (FileNotFoundError, subprocess.TimeoutExpired):
|
||||
return False
|
||||
|
||||
|
||||
def _ensure_librosa():
|
||||
"""Check if librosa is available."""
|
||||
try:
|
||||
import librosa
|
||||
return True
|
||||
except ImportError:
|
||||
return False
|
||||
|
||||
|
||||
def transcribe_with_timestamps(audio_path: str) -> Dict[str, Any]:
|
||||
"""Transcribe audio using whisper and extract word-level timestamps.
|
||||
|
||||
Returns dict with 'text' and 'segments' (list of {start, end, text}).
|
||||
Falls back to subprocess whisper if Python whisper not available.
|
||||
"""
|
||||
try:
|
||||
import whisper
|
||||
model = whisper.load_model("base")
|
||||
result = model.transcribe(audio_path, word_timestamps=True)
|
||||
return {
|
||||
"text": result["text"],
|
||||
"segments": [
|
||||
{"start": s["start"], "end": s["end"], "text": s["text"]}
|
||||
for s in result.get("segments", [])
|
||||
],
|
||||
}
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
# Fallback: subprocess whisper
|
||||
with tempfile.NamedTemporaryFile(suffix=".json", delete=False) as f:
|
||||
json_out = f.name
|
||||
|
||||
try:
|
||||
subprocess.run(
|
||||
["whisper", audio_path, "--model", "base", "--output_format", "json",
|
||||
"--output_dir", os.path.dirname(json_out)],
|
||||
capture_output=True, text=True, timeout=120,
|
||||
)
|
||||
|
||||
# Whisper outputs to <filename>.json in output_dir
|
||||
base = Path(audio_path).stem
|
||||
whisper_out = Path(os.path.dirname(json_out)) / f"{base}.json"
|
||||
|
||||
if whisper_out.exists():
|
||||
with open(whisper_out) as f:
|
||||
data = json.load(f)
|
||||
os.unlink(whisper_out)
|
||||
return {
|
||||
"text": data.get("text", ""),
|
||||
"segments": [
|
||||
{"start": s["start"], "end": s["end"], "text": s["text"]}
|
||||
for s in data.get("segments", [])
|
||||
],
|
||||
}
|
||||
except Exception as e:
|
||||
logger.warning("Whisper transcription failed: %s", e)
|
||||
finally:
|
||||
if os.path.exists(json_out):
|
||||
os.unlink(json_out)
|
||||
|
||||
return {"text": "", "segments": []}
|
||||
|
||||
|
||||
def extract_audio_features(audio_path: str) -> Dict[str, float]:
|
||||
"""Extract paralinguistic features from raw audio using librosa.
|
||||
|
||||
Returns dict with pitch, volume, and silence metrics.
|
||||
"""
|
||||
try:
|
||||
import librosa
|
||||
import numpy as np
|
||||
except ImportError:
|
||||
logger.warning("librosa not available — returning defaults")
|
||||
return {
|
||||
"pitch_mean_hz": 0.0, "pitch_variability": 0.0,
|
||||
"silence_ratio": 0.0, "volume_db": 0.0, "volume_variability": 0.0,
|
||||
"duration_seconds": 0.0,
|
||||
}
|
||||
|
||||
try:
|
||||
y, sr = librosa.load(audio_path, sr=None)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to load audio %s: %s", audio_path, e)
|
||||
return {
|
||||
"pitch_mean_hz": 0.0, "pitch_variability": 0.0,
|
||||
"silence_ratio": 0.0, "volume_db": 0.0, "volume_variability": 0.0,
|
||||
"duration_seconds": 0.0,
|
||||
}
|
||||
|
||||
duration = len(y) / sr
|
||||
|
||||
# Pitch (F0) estimation using pyin
|
||||
try:
|
||||
f0, voiced_flag, _ = librosa.pyin(y, fmin=50, fmax=500, sr=sr)
|
||||
f0_voiced = f0[~np.isnan(f0)]
|
||||
if len(f0_voiced) > 0:
|
||||
pitch_mean = float(np.mean(f0_voiced))
|
||||
pitch_var = float(np.std(f0_voiced))
|
||||
else:
|
||||
pitch_mean = 0.0
|
||||
pitch_var = 0.0
|
||||
except Exception:
|
||||
pitch_mean = 0.0
|
||||
pitch_var = 0.0
|
||||
|
||||
# Volume (RMS energy)
|
||||
rms = librosa.feature.rms(y=y)[0]
|
||||
volume_db = float(librosa.amplitude_to_db(rms, ref=np.max).mean())
|
||||
volume_var = float(librosa.amplitude_to_db(rms, ref=np.max).std())
|
||||
|
||||
# Silence ratio
|
||||
try:
|
||||
intervals = librosa.effects.split(y, top_db=30)
|
||||
speech_samples = sum(end - start for start, end in intervals)
|
||||
silence_ratio = 1.0 - (speech_samples / len(y)) if len(y) > 0 else 0.0
|
||||
except Exception:
|
||||
silence_ratio = 0.0
|
||||
|
||||
return {
|
||||
"pitch_mean_hz": round(pitch_mean, 1),
|
||||
"pitch_variability": round(pitch_var, 1),
|
||||
"silence_ratio": round(silence_ratio, 3),
|
||||
"volume_db": round(volume_db, 1),
|
||||
"volume_variability": round(volume_var, 1),
|
||||
"duration_seconds": round(duration, 2),
|
||||
}
|
||||
|
||||
|
||||
def compute_speech_rate(transcript: str, segments: List[dict]) -> float:
|
||||
"""Compute words per minute from transcript and timestamps."""
|
||||
words = len(transcript.split())
|
||||
if words == 0:
|
||||
return 0.0
|
||||
|
||||
if not segments:
|
||||
return 0.0
|
||||
|
||||
total_duration = max(s["end"] for s in segments) - min(s["start"] for s in segments)
|
||||
if total_duration <= 0:
|
||||
return 0.0
|
||||
|
||||
wpm = words / (total_duration / 60.0)
|
||||
return round(wpm, 1)
|
||||
|
||||
|
||||
def compute_distress_score(
|
||||
speech_rate: float,
|
||||
pitch_variability: float,
|
||||
silence_ratio: float,
|
||||
volume_db: float,
|
||||
) -> tuple[float, List[str]]:
|
||||
"""Compute composite distress score from paralinguistic features.
|
||||
|
||||
Returns (score, signals) where score is 0-1 and signals is a list
|
||||
of detected distress indicators.
|
||||
"""
|
||||
signals = []
|
||||
scores = []
|
||||
|
||||
# Speech rate: very slow (<80) or very fast (>200) is concerning
|
||||
if speech_rate > 0:
|
||||
if speech_rate < NORMAL_SPEECH_RATE[0]:
|
||||
signals.append(f"very_slow_speech ({speech_rate:.0f} wpm)")
|
||||
scores.append(min(1.0, (NORMAL_SPEECH_RATE[0] - speech_rate) / 50))
|
||||
elif speech_rate > NORMAL_SPEECH_RATE[1]:
|
||||
signals.append(f"very_fast_speech ({speech_rate:.0f} wpm)")
|
||||
scores.append(min(1.0, (speech_rate - NORMAL_SPEECH_RATE[1]) / 80))
|
||||
else:
|
||||
scores.append(0.0)
|
||||
|
||||
# Pitch variability: low = monotone (depression indicator)
|
||||
if pitch_variability > 0:
|
||||
if pitch_variability < NORMAL_PITCH_VAR[0]:
|
||||
signals.append(f"monotone_voice (F0_var={pitch_variability:.0f}Hz)")
|
||||
scores.append(min(1.0, (NORMAL_PITCH_VAR[0] - pitch_variability) / NORMAL_PITCH_VAR[0]))
|
||||
else:
|
||||
scores.append(0.0)
|
||||
|
||||
# Silence ratio: high = long pauses
|
||||
if silence_ratio > NORMAL_SILENCE_RATIO[1]:
|
||||
signals.append(f"long_pauses (silence={silence_ratio:.0%})")
|
||||
scores.append(min(1.0, (silence_ratio - NORMAL_SILENCE_RATIO[1]) / 0.4))
|
||||
else:
|
||||
scores.append(0.0)
|
||||
|
||||
# Volume: very quiet
|
||||
if volume_db < NORMAL_VOLUME_DB[0]:
|
||||
signals.append(f"very_quiet ({volume_db:.0f}dB)")
|
||||
scores.append(min(1.0, abs(volume_db - NORMAL_VOLUME_DB[0]) / 20))
|
||||
else:
|
||||
scores.append(0.0)
|
||||
|
||||
# Composite: max of individual signals (not average — one severe signal is enough)
|
||||
if scores:
|
||||
score = max(scores)
|
||||
else:
|
||||
score = 0.0
|
||||
|
||||
return round(score, 3), signals
|
||||
|
||||
|
||||
def analyze_voice_message(audio_path: str) -> VoiceAnalysisResult:
|
||||
"""Analyze a voice message for distress signals.
|
||||
|
||||
Args:
|
||||
audio_path: Path to audio file (OGG, MP3, WAV).
|
||||
|
||||
Returns:
|
||||
VoiceAnalysisResult with all paralinguistic features.
|
||||
"""
|
||||
# Step 1: Transcribe with timestamps
|
||||
transcription = transcribe_with_timestamps(audio_path)
|
||||
transcript = transcription["text"]
|
||||
segments = transcription["segments"]
|
||||
|
||||
# Step 2: Extract audio features
|
||||
features = extract_audio_features(audio_path)
|
||||
|
||||
# Step 3: Compute speech rate
|
||||
wpm = compute_speech_rate(transcript, segments)
|
||||
|
||||
# Step 4: Compute distress score
|
||||
distress_score, distress_signals = compute_distress_score(
|
||||
speech_rate=wpm,
|
||||
pitch_variability=features["pitch_variability"],
|
||||
silence_ratio=features["silence_ratio"],
|
||||
volume_db=features["volume_db"],
|
||||
)
|
||||
|
||||
# Determine level
|
||||
if distress_score >= DISTRESS_THRESHOLDS["high"]:
|
||||
level = "high"
|
||||
elif distress_score >= DISTRESS_THRESHOLDS["medium"]:
|
||||
level = "medium"
|
||||
else:
|
||||
level = "low"
|
||||
|
||||
return VoiceAnalysisResult(
|
||||
transcript=transcript,
|
||||
speech_rate_wpm=wpm,
|
||||
pitch_mean_hz=features["pitch_mean_hz"],
|
||||
pitch_variability=features["pitch_variability"],
|
||||
silence_ratio=features["silence_ratio"],
|
||||
volume_db=features["volume_db"],
|
||||
volume_variability=features["volume_variability"],
|
||||
duration_seconds=features["duration_seconds"],
|
||||
distress_score=distress_score,
|
||||
distress_level=level,
|
||||
distress_signals=distress_signals,
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
p = argparse.ArgumentParser(description="Voice message distress analysis")
|
||||
p.add_argument("audio", help="Path to audio file")
|
||||
p.add_argument("--json", action="store_true")
|
||||
a = p.parse_args()
|
||||
|
||||
if not os.path.exists(a.audio):
|
||||
print(f"File not found: {a.audio}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
result = analyze_voice_message(a.audio)
|
||||
|
||||
if a.json:
|
||||
print(json.dumps(asdict(result), indent=2))
|
||||
else:
|
||||
print(f"Transcript: {result.transcript[:100]}...")
|
||||
print(f"Speech rate: {result.speech_rate_wpm} wpm")
|
||||
print(f"Pitch: {result.pitch_mean_hz} Hz (variability: {result.pitch_variability})")
|
||||
print(f"Silence: {result.silence_ratio:.0%}")
|
||||
print(f"Volume: {result.volume_db} dB")
|
||||
print(f"Distress: {result.distress_score:.2f} ({result.distress_level})")
|
||||
if result.distress_signals:
|
||||
print(f"Signals: {', '.join(result.distress_signals)}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user