Compare commits
3 Commits
fix/136-cr
...
fix/134
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
622cac0654 | ||
|
|
1cd1dd3288 | ||
| d412939b4f |
10
Makefile
10
Makefile
@@ -12,7 +12,7 @@ VPS := alexanderwhitestone.com
|
||||
DOMAIN := alexanderwhitestone.com
|
||||
DEPLOY_DIR := deploy
|
||||
|
||||
.PHONY: help deploy deploy-bash check ssl push service metrics
|
||||
.PHONY: help deploy deploy-bash check ssl push service
|
||||
|
||||
help:
|
||||
@echo "The Door — Deployment Commands"
|
||||
@@ -23,8 +23,6 @@ help:
|
||||
@echo " make check Check deployment status"
|
||||
@echo " make ssl Setup SSL on VPS"
|
||||
@echo " make service Install/restart hermes-gateway service"
|
||||
@echo " make metrics View crisis metrics summary"
|
||||
@echo " make metrics-json Export crisis metrics as JSON"
|
||||
@echo ""
|
||||
|
||||
deploy:
|
||||
@@ -48,9 +46,3 @@ ssl:
|
||||
|
||||
service:
|
||||
ssh root@$(VPS) "cd /opt/the-door && bash deploy/deploy.sh --service"
|
||||
|
||||
metrics:
|
||||
python3 -m crisis.metrics --summary
|
||||
|
||||
metrics-json:
|
||||
python3 -m crisis.metrics --json
|
||||
|
||||
@@ -6,9 +6,8 @@ Stands between a broken man and a machine that would tell him to die.
|
||||
|
||||
from .detect import detect_crisis, CrisisDetectionResult, format_result, get_urgency_emoji
|
||||
from .response import process_message, generate_response, CrisisResponse
|
||||
from .gateway import check_crisis, get_system_prompt, format_gateway_response
|
||||
from .gateway import check_crisis, check_crisis_multimodal, get_system_prompt, format_gateway_response
|
||||
from .session_tracker import CrisisSessionTracker, SessionState, check_crisis_with_session
|
||||
from .metrics import CrisisMetrics, AggregateMetrics
|
||||
|
||||
__all__ = [
|
||||
"detect_crisis",
|
||||
@@ -17,6 +16,7 @@ __all__ = [
|
||||
"generate_response",
|
||||
"CrisisResponse",
|
||||
"check_crisis",
|
||||
"check_crisis_multimodal",
|
||||
"get_system_prompt",
|
||||
"format_result",
|
||||
"format_gateway_response",
|
||||
@@ -24,6 +24,4 @@ __all__ = [
|
||||
"CrisisSessionTracker",
|
||||
"SessionState",
|
||||
"check_crisis_with_session",
|
||||
"CrisisMetrics",
|
||||
"AggregateMetrics",
|
||||
]
|
||||
|
||||
@@ -2,18 +2,21 @@
|
||||
Crisis Gateway Module for the-door.
|
||||
|
||||
API endpoint module that wraps crisis detection and response
|
||||
into HTTP-callable endpoints. Integrates detect.py and response.py.
|
||||
into HTTP-callable endpoints. Integrates detect.py, unified_scorer.py, and response.py.
|
||||
|
||||
Usage:
|
||||
from crisis.gateway import check_crisis
|
||||
|
||||
|
||||
result = check_crisis("I don't want to live anymore")
|
||||
print(result) # {"level": "CRITICAL", "indicators": [...], "response": {...}}
|
||||
"""
|
||||
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from unified_scorer import UnifiedCrisisScorer, UnifiedScoreAuditLog, behavioral_score_from_session
|
||||
|
||||
from .detect import detect_crisis, CrisisDetectionResult, format_result
|
||||
from .compassion_router import router
|
||||
from .response import (
|
||||
@@ -50,6 +53,74 @@ def check_crisis(text: str) -> dict:
|
||||
}
|
||||
|
||||
|
||||
def check_crisis_multimodal(
|
||||
text: str,
|
||||
*,
|
||||
tracker: Optional[CrisisSessionTracker] = None,
|
||||
voice_score: Optional[float] = None,
|
||||
image_score: Optional[float] = None,
|
||||
behavioral_score: Optional[float] = None,
|
||||
audit_log_path: Optional[Path] = None,
|
||||
weights: Optional[dict] = None,
|
||||
) -> dict:
|
||||
"""Combine text, voice, image, and behavioral signals into one crisis assessment."""
|
||||
detection = detect_crisis(text)
|
||||
session_state = tracker.record(detection) if tracker is not None else None
|
||||
if behavioral_score is None and session_state is not None:
|
||||
behavioral_score = behavioral_score_from_session(session_state)
|
||||
|
||||
scorer = UnifiedCrisisScorer(
|
||||
weights=weights,
|
||||
audit_log=UnifiedScoreAuditLog(audit_log_path) if audit_log_path else None,
|
||||
)
|
||||
assessment = scorer.score(
|
||||
text_score=detection.score,
|
||||
voice_score=voice_score,
|
||||
image_score=image_score,
|
||||
behavioral_score=behavioral_score,
|
||||
source_text=text,
|
||||
)
|
||||
|
||||
unified_detection = CrisisDetectionResult(
|
||||
level=assessment.level.value,
|
||||
indicators=detection.indicators,
|
||||
recommended_action=detection.recommended_action,
|
||||
score=assessment.combined_score,
|
||||
matches=detection.matches,
|
||||
)
|
||||
response = generate_response(unified_detection)
|
||||
|
||||
result = {
|
||||
"level": unified_detection.level,
|
||||
"score": unified_detection.score,
|
||||
"indicators": detection.indicators,
|
||||
"recommended_action": unified_detection.recommended_action,
|
||||
"timmy_message": response.timmy_message,
|
||||
"ui": {
|
||||
"show_crisis_panel": response.show_crisis_panel,
|
||||
"show_overlay": response.show_overlay,
|
||||
"provide_988": response.provide_988,
|
||||
},
|
||||
"escalate": response.escalate,
|
||||
"unified": {
|
||||
"level": assessment.level.value,
|
||||
"combined_score": assessment.combined_score,
|
||||
"weights": assessment.weights,
|
||||
"modalities": assessment.modalities,
|
||||
"present_modalities": assessment.present_modalities,
|
||||
},
|
||||
}
|
||||
if session_state is not None:
|
||||
result["session"] = {
|
||||
"current_level": session_state.current_level,
|
||||
"peak_level": session_state.peak_level,
|
||||
"message_count": session_state.message_count,
|
||||
"is_escalating": session_state.is_escalating,
|
||||
"is_deescalating": session_state.is_deescalating,
|
||||
}
|
||||
return result
|
||||
|
||||
|
||||
def get_system_prompt(base_prompt: str, text: str = "") -> str:
|
||||
"""
|
||||
Sovereign Heart System Prompt Override.
|
||||
|
||||
@@ -1,244 +0,0 @@
|
||||
"""
|
||||
crisis/metrics.py — Aggregate crisis detection metrics.
|
||||
|
||||
Tracks session-level crisis data for aggregate reporting.
|
||||
Privacy-first: stores only aggregate counts, never user content.
|
||||
|
||||
Usage:
|
||||
from crisis.metrics import CrisisMetrics
|
||||
|
||||
metrics = CrisisMetrics()
|
||||
metrics.record_session(tracker.state)
|
||||
summary = metrics.get_summary()
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
from dataclasses import dataclass, field, asdict
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
METRICS_DIR = Path.home() / ".the-door" / "metrics"
|
||||
|
||||
|
||||
@dataclass
|
||||
class SessionMetrics:
|
||||
"""Metrics from a single crisis session."""
|
||||
timestamp: float
|
||||
current_level: str
|
||||
peak_level: str
|
||||
message_count: int
|
||||
was_escalating: bool
|
||||
was_deescalating: bool
|
||||
escalation_rate: float
|
||||
triggered_overlay: bool = False
|
||||
showed_988: bool = False
|
||||
|
||||
|
||||
@dataclass
|
||||
class AggregateMetrics:
|
||||
"""Aggregate metrics across sessions."""
|
||||
total_sessions: int = 0
|
||||
total_messages: int = 0
|
||||
|
||||
# Level distribution
|
||||
level_counts: Dict[str, int] = field(default_factory=lambda: {
|
||||
"NONE": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 0, "CRITICAL": 0
|
||||
})
|
||||
|
||||
# Escalation tracking
|
||||
escalating_sessions: int = 0
|
||||
deescalating_sessions: int = 0
|
||||
|
||||
# Safety interventions
|
||||
overlay_triggers: int = 0
|
||||
ninety_eight_show: int = 0
|
||||
|
||||
# Time window
|
||||
period_start: Optional[float] = None
|
||||
period_end: Optional[float] = None
|
||||
|
||||
|
||||
class CrisisMetrics:
|
||||
"""
|
||||
Aggregate crisis metrics with local JSON persistence.
|
||||
|
||||
Privacy-first: stores only aggregate counts per day.
|
||||
Never stores user messages, content, or identifying info.
|
||||
"""
|
||||
|
||||
def __init__(self, metrics_dir: Optional[Path] = None):
|
||||
self.metrics_dir = metrics_dir or METRICS_DIR
|
||||
self.metrics_dir.mkdir(parents=True, exist_ok=True)
|
||||
self._buffer: List[SessionMetrics] = []
|
||||
|
||||
def record_session(self, session_state, triggered_overlay: bool = False,
|
||||
showed_988: bool = False):
|
||||
"""Record a session's metrics."""
|
||||
from .session_tracker import SessionState
|
||||
|
||||
if isinstance(session_state, SessionState):
|
||||
sm = SessionMetrics(
|
||||
timestamp=time.time(),
|
||||
current_level=session_state.current_level,
|
||||
peak_level=session_state.peak_level,
|
||||
message_count=session_state.message_count,
|
||||
was_escalating=session_state.is_escalating,
|
||||
was_deescalating=session_state.is_deescalating,
|
||||
escalation_rate=session_state.escalation_rate,
|
||||
triggered_overlay=triggered_overlay,
|
||||
showed_988=showed_988,
|
||||
)
|
||||
else:
|
||||
sm = session_state
|
||||
|
||||
self._buffer.append(sm)
|
||||
self._flush()
|
||||
|
||||
def _flush(self):
|
||||
"""Write buffered sessions to daily file."""
|
||||
if not self._buffer:
|
||||
return
|
||||
|
||||
today = datetime.utcnow().strftime("%Y-%m-%d")
|
||||
filepath = self.metrics_dir / f"{today}.jsonl"
|
||||
|
||||
with open(filepath, 'a') as f:
|
||||
for sm in self._buffer:
|
||||
f.write(json.dumps(asdict(sm)) + '\n')
|
||||
|
||||
self._buffer.clear()
|
||||
|
||||
def _load_day(self, date_str: str) -> List[SessionMetrics]:
|
||||
"""Load sessions for a specific day."""
|
||||
filepath = self.metrics_dir / f"{date_str}.jsonl"
|
||||
if not filepath.exists():
|
||||
return []
|
||||
|
||||
sessions = []
|
||||
with open(filepath) as f:
|
||||
for line in f:
|
||||
if line.strip():
|
||||
data = json.loads(line)
|
||||
sessions.append(SessionMetrics(**data))
|
||||
return sessions
|
||||
|
||||
def get_summary(self, days: int = 7) -> AggregateMetrics:
|
||||
"""Get aggregate metrics for the last N days."""
|
||||
agg = AggregateMetrics()
|
||||
|
||||
now = datetime.utcnow()
|
||||
for i in range(days):
|
||||
date = (now - timedelta(days=i)).strftime("%Y-%m-%d")
|
||||
sessions = self._load_day(date)
|
||||
|
||||
for sm in sessions:
|
||||
agg.total_sessions += 1
|
||||
agg.total_messages += sm.message_count
|
||||
|
||||
# Level counts (use peak level)
|
||||
level = sm.peak_level
|
||||
agg.level_counts[level] = agg.level_counts.get(level, 0) + 1
|
||||
|
||||
if sm.was_escalating:
|
||||
agg.escalating_sessions += 1
|
||||
if sm.was_deescalating:
|
||||
agg.deescalating_sessions += 1
|
||||
if sm.triggered_overlay:
|
||||
agg.overlay_triggers += 1
|
||||
if sm.showed_988:
|
||||
agg.ninety_eight_show += 1
|
||||
|
||||
# Time window
|
||||
if agg.period_start is None or sm.timestamp < agg.period_start:
|
||||
agg.period_start = sm.timestamp
|
||||
if agg.period_end is None or sm.timestamp > agg.period_end:
|
||||
agg.period_end = sm.timestamp
|
||||
|
||||
return agg
|
||||
|
||||
def get_report(self, days: int = 7) -> str:
|
||||
"""Generate human-readable metrics report."""
|
||||
agg = self.get_summary(days)
|
||||
|
||||
lines = []
|
||||
lines.append("=" * 50)
|
||||
lines.append(" CRISIS METRICS REPORT")
|
||||
lines.append(f" Last {days} days")
|
||||
if agg.period_start:
|
||||
start = datetime.fromtimestamp(agg.period_start).strftime("%Y-%m-%d %H:%M")
|
||||
lines.append(f" Period: {start} → now")
|
||||
lines.append("=" * 50)
|
||||
|
||||
lines.append(f"\n Sessions: {agg.total_sessions}")
|
||||
lines.append(f" Messages tracked: {agg.total_messages}")
|
||||
|
||||
lines.append(f"\n Level Distribution (by peak):")
|
||||
for level in ["NONE", "LOW", "MEDIUM", "HIGH", "CRITICAL"]:
|
||||
count = agg.level_counts.get(level, 0)
|
||||
pct = (count / agg.total_sessions * 100) if agg.total_sessions > 0 else 0
|
||||
bar = "█" * int(pct / 5)
|
||||
lines.append(f" {level:<10} {count:>5} ({pct:>5.1f}%) {bar}")
|
||||
|
||||
lines.append(f"\n Escalations: {agg.escalating_sessions}")
|
||||
lines.append(f" De-escalations: {agg.deescalating_sessions}")
|
||||
lines.append(f" Overlay triggers: {agg.overlay_triggers}")
|
||||
lines.append(f" 988 shown: {agg.ninety_eight_show}")
|
||||
|
||||
if agg.total_sessions > 0:
|
||||
escalation_rate = agg.escalating_sessions / agg.total_sessions * 100
|
||||
lines.append(f"\n Escalation rate: {escalation_rate:.1f}%")
|
||||
|
||||
lines.append("=" * 50)
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
def get_json(self, days: int = 7) -> str:
|
||||
"""Export metrics as JSON."""
|
||||
agg = self.get_summary(days)
|
||||
return json.dumps(asdict(agg), indent=2)
|
||||
|
||||
|
||||
def main():
|
||||
"""CLI entry point for crisis metrics."""
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description="Crisis Detection Metrics")
|
||||
parser.add_argument("--summary", action="store_true", help="Show summary report")
|
||||
parser.add_argument("--json", action="store_true", help="JSON export")
|
||||
parser.add_argument("--days", type=int, default=7, help="Days to include")
|
||||
parser.add_argument("--demo", action="store_true", help="Generate demo data")
|
||||
args = parser.parse_args()
|
||||
|
||||
metrics = CrisisMetrics()
|
||||
|
||||
if args.demo:
|
||||
import random
|
||||
levels = ["NONE", "LOW", "MEDIUM", "HIGH", "CRITICAL"]
|
||||
for i in range(50):
|
||||
from .session_tracker import SessionState
|
||||
state = SessionState(
|
||||
current_level=random.choice(levels),
|
||||
peak_level=random.choice(levels),
|
||||
message_count=random.randint(1, 20),
|
||||
is_escalating=random.random() > 0.7,
|
||||
is_deescalating=random.random() > 0.8,
|
||||
escalation_rate=random.random(),
|
||||
)
|
||||
metrics.record_session(
|
||||
state,
|
||||
triggered_overlay=random.random() > 0.8,
|
||||
showed_988=random.random() > 0.7,
|
||||
)
|
||||
print("Generated 50 demo sessions.")
|
||||
|
||||
if args.json:
|
||||
print(metrics.get_json(args.days))
|
||||
else:
|
||||
print(metrics.get_report(args.days))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -680,7 +680,7 @@ html, body {
|
||||
|
||||
<!-- Footer -->
|
||||
<footer id="footer">
|
||||
<a href="/about" aria-label="About The Door">about</a>
|
||||
<a href="/about.html" aria-label="About The Door">about</a>
|
||||
<button id="safety-plan-btn" aria-label="Open My Safety Plan">my safety plan</button>
|
||||
<button id="clear-chat-btn" aria-label="Clear chat history">clear chat</button>
|
||||
</footer>
|
||||
|
||||
@@ -1,118 +0,0 @@
|
||||
"""
|
||||
Tests for crisis/metrics.py — Aggregate crisis metrics.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
|
||||
import sys
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from crisis.metrics import CrisisMetrics, SessionMetrics, AggregateMetrics
|
||||
|
||||
|
||||
class TestCrisisMetrics(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.tmpdir = tempfile.mkdtemp()
|
||||
self.metrics = CrisisMetrics(Path(self.tmpdir))
|
||||
|
||||
def tearDown(self):
|
||||
shutil.rmtree(self.tmpdir)
|
||||
|
||||
def test_record_session_creates_file(self):
|
||||
sm = SessionMetrics(
|
||||
timestamp=1700000000,
|
||||
current_level="LOW",
|
||||
peak_level="MEDIUM",
|
||||
message_count=5,
|
||||
was_escalating=True,
|
||||
was_deescalating=False,
|
||||
escalation_rate=0.5,
|
||||
)
|
||||
self.metrics.record_session(sm)
|
||||
|
||||
files = list(Path(self.tmpdir).glob("*.jsonl"))
|
||||
self.assertEqual(len(files), 1)
|
||||
|
||||
def test_record_session_writes_jsonl(self):
|
||||
sm = SessionMetrics(
|
||||
timestamp=1700000000,
|
||||
current_level="HIGH",
|
||||
peak_level="CRITICAL",
|
||||
message_count=10,
|
||||
was_escalating=True,
|
||||
was_deescalating=False,
|
||||
escalation_rate=1.0,
|
||||
triggered_overlay=True,
|
||||
showed_988=True,
|
||||
)
|
||||
self.metrics.record_session(sm)
|
||||
|
||||
files = list(Path(self.tmpdir).glob("*.jsonl"))
|
||||
with open(files[0]) as f:
|
||||
data = json.loads(f.readline())
|
||||
self.assertEqual(data['peak_level'], 'CRITICAL')
|
||||
self.assertTrue(data['triggered_overlay'])
|
||||
|
||||
def test_get_summary_empty(self):
|
||||
agg = self.metrics.get_summary(days=7)
|
||||
self.assertEqual(agg.total_sessions, 0)
|
||||
self.assertEqual(agg.total_messages, 0)
|
||||
|
||||
def test_get_summary_with_data(self):
|
||||
for level in ["LOW", "MEDIUM", "HIGH"]:
|
||||
sm = SessionMetrics(
|
||||
timestamp=1700000000,
|
||||
current_level=level,
|
||||
peak_level=level,
|
||||
message_count=3,
|
||||
was_escalating=level != "LOW",
|
||||
was_deescalating=False,
|
||||
escalation_rate=0.5,
|
||||
)
|
||||
self.metrics.record_session(sm)
|
||||
|
||||
agg = self.metrics.get_summary(days=1)
|
||||
self.assertEqual(agg.total_sessions, 3)
|
||||
self.assertEqual(agg.total_messages, 9)
|
||||
self.assertEqual(agg.escalating_sessions, 2)
|
||||
|
||||
def test_get_report_returns_string(self):
|
||||
sm = SessionMetrics(
|
||||
timestamp=1700000000,
|
||||
current_level="LOW",
|
||||
peak_level="LOW",
|
||||
message_count=5,
|
||||
was_escalating=False,
|
||||
was_deescalating=False,
|
||||
escalation_rate=0.0,
|
||||
)
|
||||
self.metrics.record_session(sm)
|
||||
|
||||
report = self.metrics.get_report(days=1)
|
||||
self.assertIn("CRISIS METRICS REPORT", report)
|
||||
self.assertIn("Sessions:", report)
|
||||
|
||||
def test_get_json_returns_valid(self):
|
||||
sm = SessionMetrics(
|
||||
timestamp=1700000000,
|
||||
current_level="MEDIUM",
|
||||
peak_level="MEDIUM",
|
||||
message_count=3,
|
||||
was_escalating=False,
|
||||
was_deescalating=False,
|
||||
escalation_rate=0.0,
|
||||
)
|
||||
self.metrics.record_session(sm)
|
||||
|
||||
json_str = self.metrics.get_json(days=1)
|
||||
data = json.loads(json_str)
|
||||
self.assertEqual(data['total_sessions'], 1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
19
tests/test_unified_gateway.py
Normal file
19
tests/test_unified_gateway.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from crisis.gateway import check_crisis_multimodal
|
||||
from crisis.session_tracker import CrisisSessionTracker
|
||||
|
||||
|
||||
def test_multimodal_gateway_uses_unified_score_for_988_ui(tmp_path):
|
||||
tracker = CrisisSessionTracker()
|
||||
result = check_crisis_multimodal(
|
||||
"I want to kill myself tonight",
|
||||
tracker=tracker,
|
||||
voice_score=0.92,
|
||||
image_score=0.6,
|
||||
audit_log_path=tmp_path / "audit.jsonl",
|
||||
)
|
||||
|
||||
assert result["unified"]["level"] == "CRITICAL"
|
||||
assert result["ui"]["provide_988"] is True
|
||||
assert result["ui"]["show_overlay"] is True
|
||||
assert result["unified"]["modalities"]["voice"] == 0.92
|
||||
assert result["unified"]["modalities"]["behavioral"] >= 0.0
|
||||
51
tests/test_unified_scorer.py
Normal file
51
tests/test_unified_scorer.py
Normal file
@@ -0,0 +1,51 @@
|
||||
from pathlib import Path
|
||||
|
||||
from unified_scorer import (
|
||||
CrisisLevel,
|
||||
UnifiedCrisisScorer,
|
||||
UnifiedScoreAuditLog,
|
||||
behavioral_score_from_session,
|
||||
)
|
||||
from crisis.session_tracker import SessionState
|
||||
|
||||
|
||||
def test_unified_scorer_renormalizes_available_modalities_and_escalates():
|
||||
scorer = UnifiedCrisisScorer()
|
||||
assessment = scorer.score(
|
||||
text_score=1.0,
|
||||
voice_score=0.8,
|
||||
image_score=None,
|
||||
behavioral_score=0.7,
|
||||
)
|
||||
|
||||
assert assessment.level is CrisisLevel.CRITICAL
|
||||
assert assessment.combined_score > 0.8
|
||||
assert assessment.present_modalities == ["text", "voice", "behavioral"]
|
||||
|
||||
|
||||
def test_behavioral_score_rises_for_escalating_session_state():
|
||||
session = SessionState(
|
||||
current_level="HIGH",
|
||||
peak_level="CRITICAL",
|
||||
message_count=4,
|
||||
level_history=["LOW", "MEDIUM", "HIGH", "CRITICAL"],
|
||||
is_escalating=True,
|
||||
is_deescalating=False,
|
||||
escalation_rate=1.0,
|
||||
consecutive_low_messages=0,
|
||||
)
|
||||
|
||||
assert behavioral_score_from_session(session) >= 0.8
|
||||
|
||||
|
||||
def test_audit_log_persists_anonymized_score_entries(tmp_path):
|
||||
log_path = tmp_path / "unified-score-audit.jsonl"
|
||||
scorer = UnifiedCrisisScorer(audit_log=UnifiedScoreAuditLog(log_path))
|
||||
scorer.score(text_score=0.75, voice_score=0.2, image_score=0.1, behavioral_score=0.6, source_text="I feel trapped and hopeless")
|
||||
|
||||
lines = log_path.read_text().strip().splitlines()
|
||||
assert len(lines) == 1
|
||||
entry = lines[0]
|
||||
assert "trapped and hopeless" not in entry
|
||||
assert '"text_fingerprint"' in entry
|
||||
assert '"combined_score"' in entry
|
||||
126
unified_scorer.py
Normal file
126
unified_scorer.py
Normal file
@@ -0,0 +1,126 @@
|
||||
"""Unified multimodal crisis scoring for the-door."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crisis.session_tracker import SessionState
|
||||
|
||||
|
||||
SCORE_BY_LEVEL = {"NONE": 0.0, "LOW": 0.25, "MEDIUM": 0.5, "HIGH": 0.75, "CRITICAL": 1.0}
|
||||
LEVEL_RANK = {"NONE": 0, "LOW": 1, "MEDIUM": 2, "HIGH": 3, "CRITICAL": 4}
|
||||
|
||||
|
||||
class CrisisLevel(Enum):
|
||||
NONE = "NONE"
|
||||
LOW = "LOW"
|
||||
MEDIUM = "MEDIUM"
|
||||
HIGH = "HIGH"
|
||||
CRITICAL = "CRITICAL"
|
||||
|
||||
|
||||
DEFAULT_WEIGHTS: Dict[str, float] = {
|
||||
"text": 0.4,
|
||||
"voice": 0.25,
|
||||
"behavioral": 0.2,
|
||||
"image": 0.15,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class UnifiedAssessment:
|
||||
level: CrisisLevel
|
||||
combined_score: float
|
||||
weights: Dict[str, float]
|
||||
modalities: Dict[str, Optional[float]]
|
||||
present_modalities: List[str]
|
||||
|
||||
|
||||
class UnifiedScoreAuditLog:
|
||||
def __init__(self, path: Path | str):
|
||||
self.path = Path(path)
|
||||
|
||||
def record(self, assessment: UnifiedAssessment, source_text: str = "") -> None:
|
||||
self.path.parent.mkdir(parents=True, exist_ok=True)
|
||||
fingerprint = hashlib.sha256(source_text.encode("utf-8")).hexdigest()[:12] if source_text else None
|
||||
payload = {
|
||||
"level": assessment.level.value,
|
||||
"combined_score": round(assessment.combined_score, 4),
|
||||
"weights": assessment.weights,
|
||||
"modalities": assessment.modalities,
|
||||
"present_modalities": assessment.present_modalities,
|
||||
"text_fingerprint": fingerprint,
|
||||
}
|
||||
with self.path.open("a", encoding="utf-8") as fh:
|
||||
fh.write(json.dumps(payload, sort_keys=True) + "\n")
|
||||
|
||||
|
||||
class UnifiedCrisisScorer:
|
||||
def __init__(self, weights: Optional[Dict[str, float]] = None, audit_log: Optional[UnifiedScoreAuditLog] = None):
|
||||
self.weights = dict(DEFAULT_WEIGHTS)
|
||||
if weights:
|
||||
self.weights.update(weights)
|
||||
self.audit_log = audit_log
|
||||
|
||||
def _normalize(self, modalities: Dict[str, Optional[float]]) -> Dict[str, float]:
|
||||
present = [name for name, score in modalities.items() if score is not None]
|
||||
if not present:
|
||||
return {}
|
||||
total = sum(self.weights[name] for name in present)
|
||||
return {name: self.weights[name] / total for name in present}
|
||||
|
||||
def _level_for_score(self, score: float) -> CrisisLevel:
|
||||
if score > 0.8:
|
||||
return CrisisLevel.CRITICAL
|
||||
if score > 0.6:
|
||||
return CrisisLevel.HIGH
|
||||
if score > 0.4:
|
||||
return CrisisLevel.MEDIUM
|
||||
if score > 0.0:
|
||||
return CrisisLevel.LOW
|
||||
return CrisisLevel.NONE
|
||||
|
||||
def score(
|
||||
self,
|
||||
*,
|
||||
text_score: Optional[float],
|
||||
voice_score: Optional[float] = None,
|
||||
image_score: Optional[float] = None,
|
||||
behavioral_score: Optional[float] = None,
|
||||
source_text: str = "",
|
||||
) -> UnifiedAssessment:
|
||||
modalities = {
|
||||
"text": text_score,
|
||||
"voice": voice_score,
|
||||
"behavioral": behavioral_score,
|
||||
"image": image_score,
|
||||
}
|
||||
normalized = self._normalize(modalities)
|
||||
combined = 0.0
|
||||
for name, weight in normalized.items():
|
||||
combined += float(modalities[name]) * weight
|
||||
assessment = UnifiedAssessment(
|
||||
level=self._level_for_score(combined),
|
||||
combined_score=combined,
|
||||
weights=normalized,
|
||||
modalities=modalities,
|
||||
present_modalities=[name for name, score in modalities.items() if score is not None],
|
||||
)
|
||||
if self.audit_log:
|
||||
self.audit_log.record(assessment, source_text=source_text)
|
||||
return assessment
|
||||
|
||||
|
||||
def behavioral_score_from_session(session: 'SessionState') -> float:
|
||||
current = SCORE_BY_LEVEL.get(session.current_level, 0.0)
|
||||
peak_bonus = 0.1 if LEVEL_RANK.get(session.peak_level, 0) >= LEVEL_RANK["HIGH"] else 0.0
|
||||
escalation_bonus = 0.15 if session.is_escalating else 0.0
|
||||
rate_bonus = min(max(session.escalation_rate, 0.0), 1.0) * 0.1
|
||||
deescalation_penalty = 0.15 if session.is_deescalating else 0.0
|
||||
return max(0.0, min(1.0, current + peak_bonus + escalation_bonus + rate_bonus - deescalation_penalty))
|
||||
Reference in New Issue
Block a user