Compare commits
1 Commits
fix/130-be
...
fix/132
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0ab2626ef2 |
@@ -1,311 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Behavioral Pattern Detection for Crisis Signals (#133).
|
||||
|
||||
Detects crisis risk from session-level behavioral patterns:
|
||||
- Message frequency (increasing urgency = rapid-fire messages)
|
||||
- Time-of-day (late-night messages correlate with crisis risk)
|
||||
- Withdrawal (decreasing communication after engagement)
|
||||
- Escalation (crisis indicators getting stronger over time)
|
||||
|
||||
Usage:
|
||||
from crisis.behavioral import analyze_session, BehavioralSignal
|
||||
|
||||
signals = analyze_session(messages)
|
||||
for sig in signals:
|
||||
if sig.risk_level == "HIGH":
|
||||
# Escalate to crisis protocol
|
||||
pass
|
||||
"""
|
||||
|
||||
import math
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from typing import Optional
|
||||
|
||||
|
||||
@dataclass
|
||||
class Message:
|
||||
"""A single message in a session."""
|
||||
timestamp: datetime
|
||||
content: str
|
||||
crisis_score: float = 0.0 # 0.0-1.0 from text detector
|
||||
role: str = "user" # "user" or "assistant"
|
||||
|
||||
|
||||
@dataclass
|
||||
class BehavioralSignal:
|
||||
"""A detected behavioral pattern indicating crisis risk."""
|
||||
signal_type: str # "frequency", "time", "withdrawal", "escalation"
|
||||
risk_level: str # "LOW", "MEDIUM", "HIGH"
|
||||
description: str
|
||||
evidence: list = field(default_factory=list)
|
||||
score: float = 0.0 # 0.0-1.0
|
||||
|
||||
|
||||
# ── Configuration ─────────────────────────────────────────────────────────────
|
||||
|
||||
# Message frequency thresholds (messages per hour)
|
||||
FREQ_NORMAL = 6 # <6/hr = normal
|
||||
FREQ_ELEVATED = 15 # 6-15/hr = elevated
|
||||
FREQ_HIGH = 30 # >30/hr = high urgency
|
||||
|
||||
# Time-of-day risk windows (hours in 24h format)
|
||||
HIGH_RISK_HOURS = set(range(1, 5)) # 1AM-4AM
|
||||
ELEVATED_RISK_HOURS = set(range(22, 24)) | set(range(5, 7)) # 10PM-12AM, 5AM-7AM
|
||||
|
||||
# Withdrawal: messages/day trend
|
||||
WITHDRAWAL_THRESHOLD = 0.3 # Current day < 30% of average = withdrawal
|
||||
|
||||
# Escalation: crisis score trend
|
||||
ESCALATION_WINDOW = 5 # Look at last N messages
|
||||
|
||||
|
||||
# ── Frequency Analysis ────────────────────────────────────────────────────────
|
||||
|
||||
def _analyze_frequency(messages: list[Message]) -> Optional[BehavioralSignal]:
|
||||
"""Detect rapid-fire messaging (urgency indicator)."""
|
||||
if len(messages) < 3:
|
||||
return None
|
||||
|
||||
user_msgs = [m for m in messages if m.role == "user"]
|
||||
if len(user_msgs) < 3:
|
||||
return None
|
||||
|
||||
# Calculate messages per hour in the most recent window
|
||||
recent = user_msgs[-10:] # Last 10 user messages
|
||||
if len(recent) < 2:
|
||||
return None
|
||||
|
||||
time_span = (recent[-1].timestamp - recent[0].timestamp).total_seconds()
|
||||
if time_span <= 0:
|
||||
return None
|
||||
|
||||
msg_per_hour = len(recent) / (time_span / 3600)
|
||||
|
||||
if msg_per_hour >= FREQ_HIGH:
|
||||
return BehavioralSignal(
|
||||
signal_type="frequency",
|
||||
risk_level="HIGH",
|
||||
description=f"Very rapid messaging: {msg_per_hour:.0f} messages/hour",
|
||||
evidence=[f"Last {len(recent)} messages in {time_span/60:.0f} minutes"],
|
||||
score=min(1.0, msg_per_hour / FREQ_HIGH),
|
||||
)
|
||||
elif msg_per_hour >= FREQ_ELEVATED:
|
||||
return BehavioralSignal(
|
||||
signal_type="frequency",
|
||||
risk_level="MEDIUM",
|
||||
description=f"Elevated messaging rate: {msg_per_hour:.0f} messages/hour",
|
||||
evidence=[f"Last {len(recent)} messages in {time_span/60:.0f} minutes"],
|
||||
score=msg_per_hour / FREQ_HIGH,
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
# ── Time-of-Day Analysis ─────────────────────────────────────────────────────
|
||||
|
||||
def _analyze_time(messages: list[Message]) -> Optional[BehavioralSignal]:
|
||||
"""Detect late-night messaging (correlates with crisis risk)."""
|
||||
if not messages:
|
||||
return None
|
||||
|
||||
# Check most recent messages
|
||||
recent = messages[-5:]
|
||||
late_night_count = sum(1 for m in recent if m.timestamp.hour in HIGH_RISK_HOURS)
|
||||
elevated_count = sum(1 for m in recent if m.timestamp.hour in ELEVATED_RISK_HOURS)
|
||||
|
||||
if late_night_count >= 3:
|
||||
return BehavioralSignal(
|
||||
signal_type="time",
|
||||
risk_level="HIGH",
|
||||
description=f"Late-night messaging pattern: {late_night_count}/5 messages between 1-4 AM",
|
||||
evidence=[f"Message at {m.timestamp.strftime('%H:%M')}" for m in recent if m.timestamp.hour in HIGH_RISK_HOURS],
|
||||
score=late_night_count / len(recent),
|
||||
)
|
||||
elif elevated_count >= 3:
|
||||
return BehavioralSignal(
|
||||
signal_type="time",
|
||||
risk_level="MEDIUM",
|
||||
description=f"Off-hours messaging: {elevated_count}/5 messages in elevated-risk window",
|
||||
evidence=[f"Message at {m.timestamp.strftime('%H:%M')}" for m in recent if m.timestamp.hour in ELEVATED_RISK_HOURS],
|
||||
score=elevated_count / len(recent) * 0.5,
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
# ── Withdrawal Detection ──────────────────────────────────────────────────────
|
||||
|
||||
def _analyze_withdrawal(messages: list[Message]) -> Optional[BehavioralSignal]:
|
||||
"""Detect communication withdrawal (decreasing engagement)."""
|
||||
user_msgs = [m for m in messages if m.role == "user"]
|
||||
if len(user_msgs) < 10:
|
||||
return None
|
||||
|
||||
# Split into first half and second half
|
||||
mid = len(user_msgs) // 2
|
||||
first_half = user_msgs[:mid]
|
||||
second_half = user_msgs[mid:]
|
||||
|
||||
# Average message length as engagement proxy
|
||||
first_avg_len = sum(len(m.content) for m in first_half) / len(first_half)
|
||||
second_avg_len = sum(len(m.content) for m in second_half) / len(second_half)
|
||||
|
||||
# Time between messages
|
||||
def avg_gap(msgs):
|
||||
if len(msgs) < 2:
|
||||
return 0
|
||||
gaps = [(msgs[i+1].timestamp - msgs[i].timestamp).total_seconds() for i in range(len(msgs)-1)]
|
||||
return sum(gaps) / len(gaps)
|
||||
|
||||
first_gap = avg_gap(first_half)
|
||||
second_gap = avg_gap(second_half)
|
||||
|
||||
# Withdrawal = shorter messages AND longer gaps
|
||||
length_ratio = second_avg_len / first_avg_len if first_avg_len > 0 else 1.0
|
||||
gap_ratio = second_gap / first_gap if first_gap > 0 else 1.0
|
||||
|
||||
if length_ratio < 0.5 and gap_ratio > 2.0:
|
||||
return BehavioralSignal(
|
||||
signal_type="withdrawal",
|
||||
risk_level="HIGH",
|
||||
description="Significant withdrawal: messages shorter and less frequent",
|
||||
evidence=[
|
||||
f"Message length: {first_avg_len:.0f} -> {second_avg_len:.0f} chars ({length_ratio:.0%})",
|
||||
f"Message gap: {first_gap/60:.0f}min -> {second_gap/60:.0f}min ({gap_ratio:.1f}x)",
|
||||
],
|
||||
score=min(1.0, (1 - length_ratio) * 0.5 + (gap_ratio - 1) * 0.25),
|
||||
)
|
||||
elif length_ratio < 0.7 or gap_ratio > 1.5:
|
||||
return BehavioralSignal(
|
||||
signal_type="withdrawal",
|
||||
risk_level="MEDIUM",
|
||||
description="Moderate withdrawal: engagement decreasing",
|
||||
evidence=[
|
||||
f"Message length: {first_avg_len:.0f} -> {second_avg_len:.0f} chars",
|
||||
f"Message gap: {first_gap/60:.0f}min -> {second_gap/60:.0f}min",
|
||||
],
|
||||
score=(1 - length_ratio) * 0.3 + (gap_ratio - 1) * 0.15,
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
# ── Escalation Detection ─────────────────────────────────────────────────────
|
||||
|
||||
def _analyze_escalation(messages: list[Message]) -> Optional[BehavioralSignal]:
|
||||
"""Detect rising crisis scores over recent messages."""
|
||||
user_msgs = [m for m in messages if m.role == "user" and m.crisis_score > 0]
|
||||
if len(user_msgs) < ESCALATION_WINDOW:
|
||||
return None
|
||||
|
||||
recent = user_msgs[-ESCALATION_WINDOW:]
|
||||
scores = [m.crisis_score for m in recent]
|
||||
|
||||
# Check for upward trend
|
||||
if len(scores) < 3:
|
||||
return None
|
||||
|
||||
# Simple linear trend: is score increasing?
|
||||
first_half_avg = sum(scores[:len(scores)//2]) / (len(scores)//2)
|
||||
second_half_avg = sum(scores[len(scores)//2:]) / (len(scores) - len(scores)//2)
|
||||
|
||||
if second_half_avg > first_half_avg * 1.5 and second_half_avg > 0.5:
|
||||
return BehavioralSignal(
|
||||
signal_type="escalation",
|
||||
risk_level="HIGH",
|
||||
description=f"Crisis escalation detected: scores rising from {first_half_avg:.2f} to {second_half_avg:.2f}",
|
||||
evidence=[f"Score {i+1}: {s:.2f}" for i, s in enumerate(scores)],
|
||||
score=min(1.0, second_half_avg),
|
||||
)
|
||||
elif second_half_avg > first_half_avg * 1.2 and second_half_avg > 0.3:
|
||||
return BehavioralSignal(
|
||||
signal_type="escalation",
|
||||
risk_level="MEDIUM",
|
||||
description=f"Mild escalation: scores trending up",
|
||||
evidence=[f"Score {i+1}: {s:.2f}" for i, s in enumerate(scores)],
|
||||
score=second_half_avg * 0.5,
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
# ── Combined Analysis ─────────────────────────────────────────────────────────
|
||||
|
||||
def analyze_session(messages: list[Message]) -> list[BehavioralSignal]:
|
||||
"""Analyze a session for behavioral crisis signals.
|
||||
|
||||
Args:
|
||||
messages: List of Message objects with timestamps, content, and crisis scores.
|
||||
|
||||
Returns:
|
||||
List of BehavioralSignal objects, sorted by risk level (HIGH first).
|
||||
"""
|
||||
signals = []
|
||||
|
||||
freq = _analyze_frequency(messages)
|
||||
if freq:
|
||||
signals.append(freq)
|
||||
|
||||
time_sig = _analyze_time(messages)
|
||||
if time_sig:
|
||||
signals.append(time_sig)
|
||||
|
||||
withdrawal = _analyze_withdrawal(messages)
|
||||
if withdrawal:
|
||||
signals.append(withdrawal)
|
||||
|
||||
escalation = _analyze_escalation(messages)
|
||||
if escalation:
|
||||
signals.append(escalation)
|
||||
|
||||
# Sort: HIGH first, then MEDIUM, then LOW
|
||||
risk_order = {"HIGH": 0, "MEDIUM": 1, "LOW": 2}
|
||||
signals.sort(key=lambda s: (risk_order.get(s.risk_level, 9), -s.score))
|
||||
|
||||
return signals
|
||||
|
||||
|
||||
def get_session_risk_level(signals: list[BehavioralSignal]) -> str:
|
||||
"""Get overall session risk from behavioral signals."""
|
||||
if not signals:
|
||||
return "NONE"
|
||||
if any(s.risk_level == "HIGH" for s in signals):
|
||||
return "HIGH"
|
||||
if any(s.risk_level == "MEDIUM" for s in signals):
|
||||
return "MEDIUM"
|
||||
return "LOW"
|
||||
|
||||
|
||||
# ── Self-Test ─────────────────────────────────────────────────────────────────
|
||||
|
||||
if __name__ == "__main__":
|
||||
from datetime import timedelta
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
|
||||
# Test: rapid-fire messaging
|
||||
rapid_msgs = [
|
||||
Message(timestamp=now - timedelta(minutes=i), content="help me", role="user")
|
||||
for i in range(20, 0, -1)
|
||||
]
|
||||
signals = analyze_session(rapid_msgs)
|
||||
print(f"Rapid-fire: {[s.signal_type + ':' + s.risk_level for s in signals]}")
|
||||
assert any(s.signal_type == "frequency" for s in signals), "Should detect frequency"
|
||||
|
||||
# Test: late-night
|
||||
late_msgs = [
|
||||
Message(timestamp=now.replace(hour=2, minute=i*5), content="cant sleep", role="user")
|
||||
for i in range(5)
|
||||
]
|
||||
signals = analyze_session(late_msgs)
|
||||
print(f"Late-night: {[s.signal_type + ':' + s.risk_level for s in signals]}")
|
||||
assert any(s.signal_type == "time" for s in signals), "Should detect time"
|
||||
|
||||
# Test: escalation
|
||||
esc_msgs = [
|
||||
Message(timestamp=now - timedelta(minutes=i*10), content="feeling bad",
|
||||
role="user", crisis_score=0.1 + i*0.15)
|
||||
for i in range(5, 0, -1)
|
||||
]
|
||||
signals = analyze_session(esc_msgs)
|
||||
print(f"Escalation: {[s.signal_type + ':' + s.risk_level for s in signals]}")
|
||||
assert any(s.signal_type == "escalation" for s in signals), "Should detect escalation"
|
||||
|
||||
print("\nAll self-tests passed!")
|
||||
155
image_screening.py
Normal file
155
image_screening.py
Normal file
@@ -0,0 +1,155 @@
|
||||
"""
|
||||
Image Content Screening for Self-Harm Indicators
|
||||
|
||||
Screens images sent in chat for self-harm content using vision models.
|
||||
Privacy-preserving: analyzes in-memory, discards after scoring. No image retention.
|
||||
"""
|
||||
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
|
||||
logger = logging.getLogger("the-door.image_screening")
|
||||
|
||||
|
||||
class RiskLevel(str, Enum):
|
||||
SAFE = "safe"
|
||||
CONCERNING = "concerning"
|
||||
CRITICAL = "critical"
|
||||
|
||||
|
||||
@dataclass
|
||||
class ImageScreenResult:
|
||||
risk_level: RiskLevel
|
||||
confidence: float
|
||||
categories: list
|
||||
reasoning: str
|
||||
model_used: str
|
||||
|
||||
@property
|
||||
def needs_crisis_response(self):
|
||||
return self.risk_level == RiskLevel.CRITICAL
|
||||
|
||||
@property
|
||||
def needs_followup(self):
|
||||
return self.risk_level in (RiskLevel.CONCERNING, RiskLevel.CRITICAL)
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
"risk_level": self.risk_level.value,
|
||||
"confidence": round(self.confidence, 3),
|
||||
"categories": self.categories,
|
||||
"reasoning": self.reasoning,
|
||||
"model_used": self.model_used,
|
||||
}
|
||||
|
||||
|
||||
SCREENING_PROMPT = """Analyze this image for indicators of self-harm or crisis. Look for:
|
||||
|
||||
1. Self-harm wounds or injuries
|
||||
2. Medication/pills in concerning quantities
|
||||
3. Farewell-type imagery (empty rooms, items laid out, goodbye notes)
|
||||
4. Screenshots of crisis-related searches
|
||||
5. Images conveying hopelessness, isolation, or despair
|
||||
|
||||
Respond in JSON format ONLY:
|
||||
{"risk_level": "safe"|"concerning"|"critical", "confidence": 0.0-1.0, "categories": [], "reasoning": ""}
|
||||
|
||||
CRITICAL: imminent self-harm indicators
|
||||
CONCERNING: ambiguous but worrying
|
||||
SAFE: no indicators detected"""
|
||||
|
||||
|
||||
def _analyze_with_ollama(image_b64, model="gemma3:4b"):
|
||||
try:
|
||||
import urllib.request
|
||||
payload = json.dumps({
|
||||
"model": model,
|
||||
"messages": [{
|
||||
"role": "user",
|
||||
"content": SCREENING_PROMPT,
|
||||
"images": [image_b64],
|
||||
}],
|
||||
"stream": False,
|
||||
"options": {"temperature": 0.1},
|
||||
}).encode()
|
||||
req = urllib.request.Request(
|
||||
"http://localhost:11434/api/chat",
|
||||
data=payload,
|
||||
headers={"Content-Type": "application/json"},
|
||||
method="POST",
|
||||
)
|
||||
resp = urllib.request.urlopen(req, timeout=30)
|
||||
data = json.loads(resp.read())
|
||||
content = data.get("message", {}).get("content", "")
|
||||
json_start = content.find("{")
|
||||
json_end = content.rfind("}") + 1
|
||||
if json_start == -1 or json_end <= json_start:
|
||||
return None
|
||||
result = json.loads(content[json_start:json_end])
|
||||
return ImageScreenResult(
|
||||
risk_level=RiskLevel(result.get("risk_level", "safe")),
|
||||
confidence=float(result.get("confidence", 0.5)),
|
||||
categories=result.get("categories", []),
|
||||
reasoning=result.get("reasoning", ""),
|
||||
model_used=f"ollama:{model}",
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Ollama vision analysis failed: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def _analyze_fallback(image_bytes):
|
||||
return ImageScreenResult(
|
||||
risk_level=RiskLevel.SAFE,
|
||||
confidence=0.2,
|
||||
categories=["unanalyzed"],
|
||||
reasoning="No vision model available. Defaulting to safe with low confidence.",
|
||||
model_used="fallback:heuristic",
|
||||
)
|
||||
|
||||
|
||||
def screen_image(image_data, use_vision_model=True, model="gemma3:4b"):
|
||||
"""Screen image for self-harm indicators. Analyzes in-memory, no retention."""
|
||||
if isinstance(image_data, bytes):
|
||||
image_b64 = base64.b64encode(image_data).decode()
|
||||
else:
|
||||
image_b64 = image_data
|
||||
image_data = base64.b64decode(image_b64)
|
||||
|
||||
if use_vision_model:
|
||||
result = _analyze_with_ollama(image_b64, model)
|
||||
if result:
|
||||
logger.info(f"Image screened: {result.risk_level.value} (conf: {result.confidence:.2f})")
|
||||
if result.needs_crisis_response:
|
||||
logger.warning(f"CRITICAL image: {result.reasoning}")
|
||||
return result
|
||||
|
||||
return _analyze_fallback(image_data)
|
||||
|
||||
|
||||
def handle_chat_image(image_data):
|
||||
"""Handle image from chat. Returns action dict for gateway."""
|
||||
result = screen_image(image_data)
|
||||
action = {
|
||||
"result": result.to_dict(),
|
||||
"show_crisis_overlay": result.needs_crisis_response,
|
||||
"log_event": result.needs_followup,
|
||||
"response_text": None,
|
||||
}
|
||||
if result.risk_level == RiskLevel.CRITICAL:
|
||||
action["response_text"] = (
|
||||
"I noticed something concerning in the image you shared. "
|
||||
"If you or someone you know is in crisis, please reach out: "
|
||||
"988 Suicide and Crisis Lifeline (call or text 988). "
|
||||
"You are not alone."
|
||||
)
|
||||
elif result.risk_level == RiskLevel.CONCERNING:
|
||||
action["response_text"] = (
|
||||
"I want to check in \u2014 how are you doing? "
|
||||
"If you need to talk to someone, the 988 Lifeline is available 24/7."
|
||||
)
|
||||
return action
|
||||
84
tests/test_image_screening.py
Normal file
84
tests/test_image_screening.py
Normal file
@@ -0,0 +1,84 @@
|
||||
"""Tests for image content screening module."""
|
||||
|
||||
import json
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
from image_screening import (
|
||||
RiskLevel,
|
||||
ImageScreenResult,
|
||||
screen_image,
|
||||
handle_chat_image,
|
||||
_analyze_fallback,
|
||||
)
|
||||
|
||||
|
||||
class TestImageScreenResult:
|
||||
def test_safe_result(self):
|
||||
result = ImageScreenResult(
|
||||
risk_level=RiskLevel.SAFE, confidence=0.95,
|
||||
categories=[], reasoning="No indicators", model_used="test"
|
||||
)
|
||||
assert not result.needs_crisis_response
|
||||
assert not result.needs_followup
|
||||
assert result.to_dict()["risk_level"] == "safe"
|
||||
|
||||
def test_critical_result(self):
|
||||
result = ImageScreenResult(
|
||||
risk_level=RiskLevel.CRITICAL, confidence=0.9,
|
||||
categories=["wounds"], reasoning="Detected", model_used="test"
|
||||
)
|
||||
assert result.needs_crisis_response
|
||||
assert result.needs_followup
|
||||
|
||||
def test_concerning_result(self):
|
||||
result = ImageScreenResult(
|
||||
risk_level=RiskLevel.CONCERNING, confidence=0.6,
|
||||
categories=["isolation"], reasoning="Ambiguous", model_used="test"
|
||||
)
|
||||
assert not result.needs_crisis_response
|
||||
assert result.needs_followup
|
||||
|
||||
|
||||
class TestScreenImage:
|
||||
def test_fallback_returns_safe(self):
|
||||
result = screen_image(b"fake_image_data", use_vision_model=False)
|
||||
assert result.risk_level == RiskLevel.SAFE
|
||||
assert result.model_used == "fallback:heuristic"
|
||||
assert result.confidence < 0.5
|
||||
|
||||
def test_base64_input(self):
|
||||
import base64
|
||||
b64 = base64.b64encode(b"fake").decode()
|
||||
result = screen_image(b64, use_vision_model=False)
|
||||
assert result.risk_level == RiskLevel.SAFE
|
||||
|
||||
|
||||
class TestHandleChatImage:
|
||||
def test_safe_image_no_overlay(self):
|
||||
action = handle_chat_image(b"safe_image")
|
||||
assert not action["show_crisis_overlay"]
|
||||
assert action["response_text"] is None
|
||||
|
||||
@patch("image_screening._analyze_with_ollama")
|
||||
def test_critical_image_shows_overlay(self, mock_ollama):
|
||||
mock_ollama.return_value = ImageScreenResult(
|
||||
risk_level=RiskLevel.CRITICAL, confidence=0.95,
|
||||
categories=["wounds"], reasoning="Self-harm detected",
|
||||
model_used="ollama:gemma3:4b"
|
||||
)
|
||||
action = handle_chat_image(b"concerning_image")
|
||||
assert action["show_crisis_overlay"]
|
||||
assert "988" in action["response_text"]
|
||||
assert action["log_event"]
|
||||
|
||||
@patch("image_screening._analyze_with_ollama")
|
||||
def test_concerning_image_followup(self, mock_ollama):
|
||||
mock_ollama.return_value = ImageScreenResult(
|
||||
risk_level=RiskLevel.CONCERNING, confidence=0.6,
|
||||
categories=["isolation"], reasoning="Empty room",
|
||||
model_used="ollama:gemma3:4b"
|
||||
)
|
||||
action = handle_chat_image(b"maybe_concerning")
|
||||
assert not action["show_crisis_overlay"]
|
||||
assert action["log_event"]
|
||||
assert "check in" in action["response_text"]
|
||||
Reference in New Issue
Block a user