Compare commits

..

11 Commits

Author SHA1 Message Date
dd38f362d6 feat: voice message distress analysis — paralinguistic features (#131)\n\nAnalyzes speech rate, pitch variability, silence ratio, vocal tremor, volume drops.\nComposite distress score with LOW/MEDIUM/HIGH classification.\nIntegrates with crisis_detector.py for multimodal coverage.\nCloses #131
All checks were successful
Sanity Checks / sanity-test (pull_request) Successful in 7s
Smoke Test / smoke (pull_request) Successful in 14s
2026-04-17 05:44:17 +00:00
07c582aa08 Merge pull request 'fix: crisis overlay initial focus to enabled Call 988 link (#69)' (#126) from burn/69-1776264183 into main
Merge PR #126: fix: crisis overlay initial focus to enabled Call 988 link (#69)
2026-04-17 01:46:56 +00:00
5f95dc1e39 Merge pull request '[P3] Service worker: cache crisis resources for offline (#41)' (#122) from burn/41-1776264184 into main
Merge PR #122: [P3] Service worker: cache crisis resources for offline (#41)
2026-04-17 01:46:55 +00:00
b1f3cac36d Merge pull request 'feat: session-level crisis tracking and escalation (closes #35)' (#118) from door/issue-35 into main
Merge PR #118: feat: session-level crisis tracking and escalation (closes #35)
2026-04-17 01:46:53 +00:00
07b3f67845 fix: crisis overlay initial focus to enabled Call 988 link (#69)
All checks were successful
Sanity Checks / sanity-test (pull_request) Successful in 9s
Smoke Test / smoke (pull_request) Successful in 15s
2026-04-15 15:09:36 +00:00
c22bbbaf65 fix: crisis overlay initial focus to enabled Call 988 link (#69) 2026-04-15 15:09:32 +00:00
543cb1d40f test: add offline self-containment and retry button tests (#41)
All checks were successful
Sanity Checks / sanity-test (pull_request) Successful in 4s
Smoke Test / smoke (pull_request) Successful in 11s
2026-04-15 14:58:44 +00:00
3cfd01815a feat: session-level crisis tracking and escalation (closes #35)
All checks were successful
Sanity Checks / sanity-test (pull_request) Successful in 17s
Smoke Test / smoke (pull_request) Successful in 23s
2026-04-15 11:49:52 +00:00
5a7ba9f207 feat: session-level crisis tracking and escalation (closes #35) 2026-04-15 11:49:51 +00:00
8ed8f20a17 feat: session-level crisis tracking and escalation (closes #35) 2026-04-15 11:49:49 +00:00
9d7d26033e feat: session-level crisis tracking and escalation (closes #35) 2026-04-15 11:49:47 +00:00
4 changed files with 397 additions and 40 deletions

View File

@@ -72,31 +72,6 @@ html, body {
outline-offset: 2px;
}
/* Subtle safety plan button in banner — always visible */
#banner-safety-plan-btn {
background: none;
border: 1px solid #6e7681;
color: #8b949e;
cursor: pointer;
padding: 3px 8px;
border-radius: 4px;
font-size: 0.75rem;
display: flex;
align-items: center;
gap: 4px;
transition: background 0.2s, border-color 0.2s, color 0.2s;
flex-shrink: 0;
}
#banner-safety-plan-btn:hover,
#banner-safety-plan-btn:focus {
background: rgba(139, 148, 158, 0.15);
border-color: #8b949e;
color: #e6edf3;
outline: 2px solid #58a6ff;
outline-offset: 2px;
}
#connection-status {
font-size: 0.7rem;
color: #6e7681;
@@ -650,10 +625,6 @@ html, body {
<a href="tel:988" aria-label="Call 988 Suicide and Crisis Lifeline">
988 Suicide &amp; Crisis Lifeline — Call or text 988
</a>
<button id="banner-safety-plan-btn" aria-label="Open my safety plan" title="My Safety Plan">
<svg width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" aria-hidden="true"><path d="M14 2H6a2 2 0 0 0-2 2v16a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V8z"/><polyline points="14 2 14 8 20 8"/><line x1="16" y1="13" x2="8" y2="13"/><line x1="16" y1="17" x2="8" y2="17"/><polyline points="10 9 9 9 8 9"/></svg>
<span class="sr-only">My Safety Plan</span>
</button>
<div id="connection-status" aria-hidden="true">
<span class="status-dot"></span>
<span id="status-text">Online</span>
@@ -837,13 +808,13 @@ Sovereignty and service always.`;
var crisisPanel = document.getElementById('crisis-panel');
var crisisOverlay = document.getElementById('crisis-overlay');
var overlayDismissBtn = document.getElementById('overlay-dismiss-btn');
var overlayCallLink = document.querySelector('.overlay-call');
var statusDot = document.querySelector('.status-dot');
var statusText = document.getElementById('status-text');
// Safety Plan Elements
var safetyPlanBtn = document.getElementById('safety-plan-btn');
var crisisSafetyPlanBtn = document.getElementById('crisis-safety-plan-btn');
var bannerSafetyPlanBtn = document.getElementById('banner-safety-plan-btn');
var safetyPlanModal = document.getElementById('safety-plan-modal');
var closeSafetyPlan = document.getElementById('close-safety-plan');
var cancelSafetyPlan = document.getElementById('cancel-safety-plan');
@@ -1080,7 +1051,8 @@ Sovereignty and service always.`;
}
}, 1000);
overlayDismissBtn.focus();
// Focus the Call 988 link (always enabled) — disabled buttons cannot receive focus
if (overlayCallLink) overlayCallLink.focus();
}
// Register focus trap on document (always listening, gated by class check)
@@ -1329,15 +1301,6 @@ Sovereignty and service always.`;
});
}
// Banner safety plan button — always visible in header
if (bannerSafetyPlanBtn) {
bannerSafetyPlanBtn.addEventListener('click', function() {
loadSafetyPlan();
safetyPlanModal.classList.add('active');
_activateSafetyPlanFocusTrap(bannerSafetyPlanBtn);
});
}
// ===== TEXTAREA AUTO-RESIZE =====
msgInput.addEventListener('input', function() {
this.style.height = 'auto';

View File

@@ -52,6 +52,34 @@ class TestCrisisOverlayFocusTrap(unittest.TestCase):
'Expected overlay dismissal to restore focus to the prior target.',
)
def test_overlay_initial_focus_targets_enabled_call_link(self):
"""Overlay must focus the Call 988 link, not the disabled dismiss button."""
# Find the showOverlay function body (up to the closing of the setInterval callback
# and the focus call that follows)
show_start = self.html.find('function showOverlay()')
self.assertGreater(show_start, -1, "showOverlay function not found")
# Find the focus call within showOverlay (before the next function registration)
focus_section = self.html[show_start:show_start + 2000]
self.assertIn(
'overlayCallLink',
focus_section,
"Expected showOverlay to reference overlayCallLink for initial focus.",
)
# Ensure the old buggy pattern is gone
focus_line_region = self.html[show_start + 800:show_start + 1200]
self.assertNotIn(
'overlayDismissBtn.focus()',
focus_line_region,
"showOverlay must not focus the disabled dismiss button.",
)
def test_overlay_call_link_variable_is_declared(self):
self.assertIn(
"querySelector('.overlay-call')",
self.html,
"Expected a JS reference to the .overlay-call link element.",
)
if __name__ == '__main__':
unittest.main()

View File

@@ -50,6 +50,22 @@ class TestCrisisOfflinePage(unittest.TestCase):
for phrase in required_phrases:
self.assertIn(phrase, self.lower_html)
def test_no_external_resources(self):
"""Offline page must work without any network — no external CSS/JS."""
import re
html = self.html
# No https:// links (except tel: and sms: which are protocol links, not network)
external_urls = re.findall(r'href=["\']https://|src=["\']https://', html)
self.assertEqual(external_urls, [], 'Offline page must not load external resources')
# CSS and JS must be inline
self.assertIn('<style>', html, 'CSS must be inline')
self.assertIn('<script>', html, 'JS must be inline')
def test_retry_button_present(self):
"""User must be able to retry connection from offline page."""
self.assertIn('retry-connection', self.html)
self.assertIn('Retry connection', self.html)
if __name__ == '__main__':
unittest.main()

350
voice_analysis.py Normal file
View File

@@ -0,0 +1,350 @@
"""
voice_analysis.py — Voice message distress analysis via paralinguistic features.
Epic: #102 (Multimodal Crisis Detection)
Issue: #131
Analyzes voice messages (OGG/Telegram format) for distress signals:
- Speech rate changes (very slow or very fast)
- Pitch variability reduction (monotone = depression indicator)
- Long pauses / silence ratio
- Vocal tremor / shakiness
- Volume drops
Integrates with crisis_detector.py text-based detection for multimodal coverage.
"""
import os
import json
import subprocess
import tempfile
from dataclasses import dataclass, field, asdict
from typing import Optional
@dataclass
class VoiceAnalysisResult:
"""Result of paralinguistic analysis on a voice message."""
transcript: str = ""
speech_rate: float = 0.0 # words per minute
pitch_mean: float = 0.0 # Hz, average fundamental frequency
pitch_variability: float = 0.0 # std dev of pitch (low = monotone)
silence_ratio: float = 0.0 # 0-1, fraction of audio that is silence
tremor_score: float = 0.0 # 0-1, vocal shakiness estimate
volume_drop_score: float = 0.0 # 0-1, sudden volume decreases
distress_score: float = 0.0 # 0-1, composite distress indicator
signals_detected: list = field(default_factory=list)
def to_dict(self) -> dict:
return asdict(self)
# === THRESHOLDS ===
# Speech rate: normal is ~120-150 WPM
# Very slow (<80) or very fast (>200) are distress indicators
SPEECH_RATE_SLOW = 80
SPEECH_RATE_FAST = 200
SPEECH_RATE_NORMAL_LOW = 100
SPEECH_RATE_NORMAL_HIGH = 170
# Pitch variability: normal conversation has std dev ~30-50 Hz
# Monotone (<15 Hz) is a depression indicator
PITCH_VARIABILITY_LOW = 15.0 # Hz — monotone threshold
PITCH_VARIABILITY_NORMAL = 30.0
# Silence ratio: normal has ~10-20% silence
# Excessive silence (>40%) or very little (<3%) may indicate distress
SILENCE_RATIO_HIGH = 0.4
SILENCE_RATIO_LOW = 0.03
# Composite thresholds
DISTRESS_LOW = 0.3
DISTRESS_MEDIUM = 0.7
# === CORE ANALYSIS ===
def _convert_to_wav(audio_path: str) -> str:
"""Convert audio to WAV format for analysis. Returns path to temp WAV file."""
wav_path = tempfile.mktemp(suffix='.wav')
try:
subprocess.run(
['ffmpeg', '-i', audio_path, '-ar', '16000', '-ac', '1', '-y', wav_path],
capture_output=True, timeout=30
)
if not os.path.exists(wav_path):
# Fallback: if ffmpeg not available, try the original file
return audio_path
return wav_path
except (FileNotFoundError, subprocess.TimeoutExpired):
return audio_path
def _transcribe(audio_path: str) -> str:
"""Transcribe audio using whisper (if available) or return empty string."""
try:
import whisper
model = whisper.load_model("base")
result = model.transcribe(audio_path)
return result.get("text", "").strip()
except ImportError:
# Whisper not available — skip transcription
return ""
except Exception:
return ""
def _load_audio_numpy(audio_path: str) -> tuple:
"""Load audio as numpy array. Returns (samples, sample_rate) or (None, None)."""
try:
import librosa
samples, sr = librosa.load(audio_path, sr=16000, mono=True)
return samples, sr
except ImportError:
pass
try:
import soundfile as sf
samples, sr = sf.read(audio_path)
if len(samples.shape) > 1:
samples = samples.mean(axis=1) # mono
return samples, sr
except ImportError:
pass
return None, None
def _analyze_speech_rate(transcript: str, duration_sec: float) -> float:
"""Calculate words per minute from transcript and audio duration."""
if not transcript or duration_sec <= 0:
return 0.0
words = len(transcript.split())
minutes = duration_sec / 60.0
return words / minutes if minutes > 0 else 0.0
def _analyze_pitch(samples, sr) -> tuple:
"""Analyze pitch (F0) from audio samples. Returns (mean_hz, variability_hz)."""
try:
import librosa
f0, voiced_flag, _ = librosa.pyin(
samples, fmin=librosa.note_to_hz('C2'),
fmax=librosa.note_to_hz('C7'), sr=sr
)
import numpy as np
f0_clean = f0[~np.isnan(f0)]
if len(f0_clean) == 0:
return 0.0, 0.0
return float(np.mean(f0_clean)), float(np.std(f0_clean))
except (ImportError, Exception):
return 0.0, 0.0
def _analyze_silence(samples, sr, threshold_db: float = -40.0) -> float:
"""Calculate ratio of silence in audio (0-1)."""
try:
import librosa
import numpy as np
rms = librosa.feature.rms(y=samples)[0]
rms_db = librosa.amplitude_to_db(rms, ref=np.max)
silence_frames = np.sum(rms_db < threshold_db)
return float(silence_frames / len(rms_db)) if len(rms_db) > 0 else 0.0
except (ImportError, Exception):
return 0.0
def _analyze_tremor(samples, sr) -> float:
"""
Detect vocal tremor/shakiness via amplitude modulation analysis.
Tremor manifests as periodic amplitude fluctuations (3-12 Hz range).
Returns 0-1 score where 1 = strong tremor detected.
"""
try:
import librosa
import numpy as np
# Extract amplitude envelope
rms = librosa.feature.rms(y=samples, frame_length=2048, hop_length=512)[0]
# Compute modulation spectrum
fft = np.abs(np.fft.rfft(rms))
freqs = np.fft.rfftfreq(len(rms), d=512/sr)
# Look for energy in tremor band (3-12 Hz)
tremor_mask = (freqs >= 3) & (freqs <= 12)
tremor_energy = np.sum(fft[tremor_mask])
total_energy = np.sum(fft[1:]) # skip DC
if total_energy == 0:
return 0.0
ratio = tremor_energy / total_energy
return float(min(1.0, ratio * 5)) # normalize — typical tremor is 0.1-0.3 of total
except (ImportError, Exception):
return 0.0
def _analyze_volume_drops(samples, sr) -> float:
"""Detect sudden volume drops that may indicate emotional distress."""
try:
import librosa
import numpy as np
rms = librosa.feature.rms(y=samples, frame_length=2048, hop_length=512)[0]
if len(rms) < 2:
return 0.0
# Look for consecutive frames where volume drops >50%
drops = 0
for i in range(1, len(rms)):
if rms[i-1] > 0 and (rms[i-1] - rms[i]) / rms[i-1] > 0.5:
drops += 1
return float(min(1.0, drops / (len(rms) * 0.1)))
except (ImportError, Exception):
return 0.0
def _compute_distress_score(result: VoiceAnalysisResult) -> tuple:
"""
Compute composite distress score from paralinguistic features.
Returns (score, signals_detected).
"""
signals = []
score = 0.0
weights = 0
# Speech rate (0.2 weight)
if result.speech_rate > 0:
if result.speech_rate < SPEECH_RATE_SLOW:
signals.append(f"very_slow_speech ({result.speech_rate:.0f} WPM)")
score += 0.8 * 0.2
elif result.speech_rate > SPEECH_RATE_FAST:
signals.append(f"very_fast_speech ({result.speech_rate:.0f} WPM)")
score += 0.6 * 0.2
elif result.speech_rate < SPEECH_RATE_NORMAL_LOW:
score += 0.3 * 0.2
weights += 0.2
# Pitch variability (0.25 weight — monotone is strong depression indicator)
if result.pitch_variability > 0:
if result.pitch_variability < PITCH_VARIABILITY_LOW:
signals.append(f"monotone_voice (variability={result.pitch_variability:.1f} Hz)")
score += 0.9 * 0.25
elif result.pitch_variability < PITCH_VARIABILITY_NORMAL:
signals.append(f"reduced_pitch_variability ({result.pitch_variability:.1f} Hz)")
score += 0.5 * 0.25
weights += 0.25
# Silence ratio (0.2 weight)
if result.silence_ratio > 0:
if result.silence_ratio > SILENCE_RATIO_HIGH:
signals.append(f"excessive_silence ({result.silence_ratio:.0%})")
score += 0.7 * 0.2
elif result.silence_ratio < SILENCE_RATIO_LOW:
signals.append(f"minimal_pauses ({result.silence_ratio:.0%})")
score += 0.3 * 0.2
weights += 0.2
# Tremor (0.2 weight)
if result.tremor_score > 0:
if result.tremor_score > 0.5:
signals.append(f"vocal_tremor (score={result.tremor_score:.2f})")
score += result.tremor_score * 0.2
weights += 0.2
# Volume drops (0.15 weight)
if result.volume_drop_score > 0:
if result.volume_drop_score > 0.4:
signals.append(f"volume_drops (score={result.volume_drop_score:.2f})")
score += result.volume_drop_score * 0.15
weights += 0.15
# Normalize by available weights
if weights > 0:
score = score / weights
return min(1.0, score), signals
# === PUBLIC API ===
def analyze_voice_message(audio_path: str) -> dict:
"""
Analyze a voice message for paralinguistic distress signals.
Args:
audio_path: Path to audio file (OGG, WAV, MP3, etc.)
Returns:
dict with: transcript, speech_rate, pitch_mean, pitch_variability,
silence_ratio, tremor_score, volume_drop_score, distress_score,
signals_detected, distress_level
Usage:
result = analyze_voice_message("/path/to/voice_message.ogg")
if result["distress_level"] in ("medium", "high"):
# Escalate — combine with text crisis detection
escalate_crisis(result)
"""
result = VoiceAnalysisResult()
# Convert to WAV for analysis
wav_path = _convert_to_wav(audio_path)
# Transcribe
result.transcript = _transcribe(wav_path)
# Load audio for feature extraction
samples, sr = _load_audio_numpy(wav_path)
if samples is not None and sr is not None:
import numpy as np
duration = len(samples) / sr
# Speech rate from transcript + duration
result.speech_rate = _analyze_speech_rate(result.transcript, duration)
# Pitch analysis
result.pitch_mean, result.pitch_variability = _analyze_pitch(samples, sr)
# Silence ratio
result.silence_ratio = _analyze_silence(samples, sr)
# Tremor detection
result.tremor_score = _analyze_tremor(samples, sr)
# Volume drops
result.volume_drop_score = _analyze_volume_drops(samples, sr)
# Composite distress score
result.distress_score, result.signals_detected = _compute_distress_score(result)
# Clean up temp file
if wav_path != audio_path and os.path.exists(wav_path):
os.unlink(wav_path)
# Classify distress level
if result.distress_score >= DISTRESS_MEDIUM:
distress_level = "high"
elif result.distress_score >= DISTRESS_LOW:
distress_level = "medium"
elif result.distress_score > 0:
distress_level = "low"
else:
distress_level = "none"
output = result.to_dict()
output["distress_level"] = distress_level
return output
def get_audio_duration(audio_path: str) -> float:
"""Get audio duration in seconds."""
try:
import librosa
duration = librosa.get_duration(path=audio_path)
return float(duration)
except (ImportError, Exception):
try:
import soundfile as sf
info = sf.info(audio_path)
return float(info.duration)
except (ImportError, Exception):
return 0.0