Compare commits

..

1 Commits

Author SHA1 Message Date
Alexander Whitestone
0ab2626ef2 feat: image content screening for self-harm indicators (closes #132)
All checks were successful
Sanity Checks / sanity-test (pull_request) Successful in 4s
Smoke Test / smoke (pull_request) Successful in 10s
2026-04-15 12:02:22 -04:00
4 changed files with 239 additions and 144 deletions

155
image_screening.py Normal file
View File

@@ -0,0 +1,155 @@
"""
Image Content Screening for Self-Harm Indicators
Screens images sent in chat for self-harm content using vision models.
Privacy-preserving: analyzes in-memory, discards after scoring. No image retention.
"""
import base64
import json
import logging
from dataclasses import dataclass
from enum import Enum
from typing import Optional
logger = logging.getLogger("the-door.image_screening")
class RiskLevel(str, Enum):
SAFE = "safe"
CONCERNING = "concerning"
CRITICAL = "critical"
@dataclass
class ImageScreenResult:
risk_level: RiskLevel
confidence: float
categories: list
reasoning: str
model_used: str
@property
def needs_crisis_response(self):
return self.risk_level == RiskLevel.CRITICAL
@property
def needs_followup(self):
return self.risk_level in (RiskLevel.CONCERNING, RiskLevel.CRITICAL)
def to_dict(self):
return {
"risk_level": self.risk_level.value,
"confidence": round(self.confidence, 3),
"categories": self.categories,
"reasoning": self.reasoning,
"model_used": self.model_used,
}
SCREENING_PROMPT = """Analyze this image for indicators of self-harm or crisis. Look for:
1. Self-harm wounds or injuries
2. Medication/pills in concerning quantities
3. Farewell-type imagery (empty rooms, items laid out, goodbye notes)
4. Screenshots of crisis-related searches
5. Images conveying hopelessness, isolation, or despair
Respond in JSON format ONLY:
{"risk_level": "safe"|"concerning"|"critical", "confidence": 0.0-1.0, "categories": [], "reasoning": ""}
CRITICAL: imminent self-harm indicators
CONCERNING: ambiguous but worrying
SAFE: no indicators detected"""
def _analyze_with_ollama(image_b64, model="gemma3:4b"):
try:
import urllib.request
payload = json.dumps({
"model": model,
"messages": [{
"role": "user",
"content": SCREENING_PROMPT,
"images": [image_b64],
}],
"stream": False,
"options": {"temperature": 0.1},
}).encode()
req = urllib.request.Request(
"http://localhost:11434/api/chat",
data=payload,
headers={"Content-Type": "application/json"},
method="POST",
)
resp = urllib.request.urlopen(req, timeout=30)
data = json.loads(resp.read())
content = data.get("message", {}).get("content", "")
json_start = content.find("{")
json_end = content.rfind("}") + 1
if json_start == -1 or json_end <= json_start:
return None
result = json.loads(content[json_start:json_end])
return ImageScreenResult(
risk_level=RiskLevel(result.get("risk_level", "safe")),
confidence=float(result.get("confidence", 0.5)),
categories=result.get("categories", []),
reasoning=result.get("reasoning", ""),
model_used=f"ollama:{model}",
)
except Exception as e:
logger.warning(f"Ollama vision analysis failed: {e}")
return None
def _analyze_fallback(image_bytes):
return ImageScreenResult(
risk_level=RiskLevel.SAFE,
confidence=0.2,
categories=["unanalyzed"],
reasoning="No vision model available. Defaulting to safe with low confidence.",
model_used="fallback:heuristic",
)
def screen_image(image_data, use_vision_model=True, model="gemma3:4b"):
"""Screen image for self-harm indicators. Analyzes in-memory, no retention."""
if isinstance(image_data, bytes):
image_b64 = base64.b64encode(image_data).decode()
else:
image_b64 = image_data
image_data = base64.b64decode(image_b64)
if use_vision_model:
result = _analyze_with_ollama(image_b64, model)
if result:
logger.info(f"Image screened: {result.risk_level.value} (conf: {result.confidence:.2f})")
if result.needs_crisis_response:
logger.warning(f"CRITICAL image: {result.reasoning}")
return result
return _analyze_fallback(image_data)
def handle_chat_image(image_data):
"""Handle image from chat. Returns action dict for gateway."""
result = screen_image(image_data)
action = {
"result": result.to_dict(),
"show_crisis_overlay": result.needs_crisis_response,
"log_event": result.needs_followup,
"response_text": None,
}
if result.risk_level == RiskLevel.CRITICAL:
action["response_text"] = (
"I noticed something concerning in the image you shared. "
"If you or someone you know is in crisis, please reach out: "
"988 Suicide and Crisis Lifeline (call or text 988). "
"You are not alone."
)
elif result.risk_level == RiskLevel.CONCERNING:
action["response_text"] = (
"I want to check in \u2014 how are you doing? "
"If you need to talk to someone, the 988 Lifeline is available 24/7."
)
return action

View File

@@ -423,35 +423,6 @@ html, body {
fill: currentColor;
}
/* Chat safety plan button — always visible, subtle */
#chat-safety-plan-btn {
flex-shrink: 0;
width: 44px;
height: 44px;
background: transparent;
color: #8b949e;
border: 1px solid #30363d;
border-radius: 12px;
cursor: pointer;
display: flex;
align-items: center;
justify-content: center;
transition: background 0.2s, color 0.2s, border-color 0.2s;
-webkit-appearance: none;
}
#chat-safety-plan-btn:hover,
#chat-safety-plan-btn:focus {
background: #161b22;
color: #58a6ff;
border-color: #58a6ff;
outline: 2px solid #58a6ff;
outline-offset: 2px;
}
#chat-safety-plan-btn svg {
width: 20px;
height: 20px;
}
/* ===== MODALS ===== */
.modal-overlay {
position: fixed;
@@ -704,9 +675,6 @@ html, body {
<button id="send-btn" type="button" aria-label="Send message" disabled>
<svg viewBox="0 0 24 24" aria-hidden="true"><path d="M2.01 21L23 12 2.01 3 2 10l15 2-15 2z"/></svg>
</button>
<button id="chat-safety-plan-btn" type="button" aria-label="Open My Safety Plan" title="My Safety Plan">
<svg viewBox="0 0 24 24" aria-hidden="true" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><path d="M12 22s8-4 8-10V5l-8-3-8 3v7c0 6 8 10 8 10z"/></svg>
</button>
</div>
</div>
@@ -845,7 +813,6 @@ Sovereignty and service always.`;
// Safety Plan Elements
var safetyPlanBtn = document.getElementById('safety-plan-btn');
var chatSafetyPlanBtn = document.getElementById('chat-safety-plan-btn');
var crisisSafetyPlanBtn = document.getElementById('crisis-safety-plan-btn');
var safetyPlanModal = document.getElementById('safety-plan-modal');
var closeSafetyPlan = document.getElementById('close-safety-plan');
@@ -1323,15 +1290,6 @@ Sovereignty and service always.`;
_activateSafetyPlanFocusTrap(safetyPlanBtn);
});
// Chat input area safety plan button — always visible (#38)
if (chatSafetyPlanBtn) {
chatSafetyPlanBtn.addEventListener('click', function() {
loadSafetyPlan();
safetyPlanModal.classList.add('active');
_activateSafetyPlanFocusTrap(chatSafetyPlanBtn);
});
}
// Crisis panel safety plan button (if crisis panel is visible)
if (crisisSafetyPlanBtn) {
crisisSafetyPlanBtn.addEventListener('click', function() {

View File

@@ -0,0 +1,84 @@
"""Tests for image content screening module."""
import json
from unittest.mock import patch, MagicMock
from image_screening import (
RiskLevel,
ImageScreenResult,
screen_image,
handle_chat_image,
_analyze_fallback,
)
class TestImageScreenResult:
def test_safe_result(self):
result = ImageScreenResult(
risk_level=RiskLevel.SAFE, confidence=0.95,
categories=[], reasoning="No indicators", model_used="test"
)
assert not result.needs_crisis_response
assert not result.needs_followup
assert result.to_dict()["risk_level"] == "safe"
def test_critical_result(self):
result = ImageScreenResult(
risk_level=RiskLevel.CRITICAL, confidence=0.9,
categories=["wounds"], reasoning="Detected", model_used="test"
)
assert result.needs_crisis_response
assert result.needs_followup
def test_concerning_result(self):
result = ImageScreenResult(
risk_level=RiskLevel.CONCERNING, confidence=0.6,
categories=["isolation"], reasoning="Ambiguous", model_used="test"
)
assert not result.needs_crisis_response
assert result.needs_followup
class TestScreenImage:
def test_fallback_returns_safe(self):
result = screen_image(b"fake_image_data", use_vision_model=False)
assert result.risk_level == RiskLevel.SAFE
assert result.model_used == "fallback:heuristic"
assert result.confidence < 0.5
def test_base64_input(self):
import base64
b64 = base64.b64encode(b"fake").decode()
result = screen_image(b64, use_vision_model=False)
assert result.risk_level == RiskLevel.SAFE
class TestHandleChatImage:
def test_safe_image_no_overlay(self):
action = handle_chat_image(b"safe_image")
assert not action["show_crisis_overlay"]
assert action["response_text"] is None
@patch("image_screening._analyze_with_ollama")
def test_critical_image_shows_overlay(self, mock_ollama):
mock_ollama.return_value = ImageScreenResult(
risk_level=RiskLevel.CRITICAL, confidence=0.95,
categories=["wounds"], reasoning="Self-harm detected",
model_used="ollama:gemma3:4b"
)
action = handle_chat_image(b"concerning_image")
assert action["show_crisis_overlay"]
assert "988" in action["response_text"]
assert action["log_event"]
@patch("image_screening._analyze_with_ollama")
def test_concerning_image_followup(self, mock_ollama):
mock_ollama.return_value = ImageScreenResult(
risk_level=RiskLevel.CONCERNING, confidence=0.6,
categories=["isolation"], reasoning="Empty room",
model_used="ollama:gemma3:4b"
)
action = handle_chat_image(b"maybe_concerning")
assert not action["show_crisis_overlay"]
assert action["log_event"]
assert "check in" in action["response_text"]

View File

@@ -1,102 +0,0 @@
"""
Tests for #38 — Safety plan accessible from chat (not just overlay).
Verifies:
1. Safety plan button exists in the input area
2. Button has proper ARIA attributes
3. Button is keyboard focusable
4. Button does not require crisis detection to be visible
"""
import re
import unittest
from pathlib import Path
INDEX_HTML = Path(__file__).parent.parent / "index.html"
class TestSafetyPlanInChat(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.html = INDEX_HTML.read_text()
def test_chat_safety_plan_button_exists(self):
"""Button #chat-safety-plan-btn exists in the DOM."""
self.assertIn('id="chat-safety-plan-btn"', self.html)
def test_button_has_aria_label(self):
"""Button has aria-label for screen readers."""
match = re.search(
r'<button[^>]*id="chat-safety-plan-btn"[^>]*aria-label="([^"]*)"',
self.html
)
self.assertIsNotNone(match, "chat-safety-plan-btn missing aria-label")
self.assertIn("safety", match.group(1).lower())
def test_button_has_title(self):
"""Button has title attribute for tooltip."""
self.assertRegex(
self.html,
r'<button[^>]*id="chat-safety-plan-btn"[^>]*title="[^"]*"[^>]*>'
)
def test_button_is_in_input_area(self):
"""Button is inside #input-area, not in crisis overlay."""
input_area = re.search(
r'<div id="input-area">(.*?)</div>\s*</div>',
self.html, re.DOTALL
)
self.assertIsNotNone(input_area)
self.assertIn('chat-safety-plan-btn', input_area.group(1))
def test_button_not_in_crisis_overlay(self):
"""Button is NOT inside #crisis-overlay (always visible, no detection)."""
overlay = re.search(
r'<div id="crisis-overlay".*?</div>\s*</div>',
self.html, re.DOTALL
)
if overlay:
self.assertNotIn('chat-safety-plan-btn', overlay.group(0))
def test_button_has_shield_icon(self):
"""Button includes a shield SVG icon."""
btn_match = re.search(
r'<button[^>]*id="chat-safety-plan-btn"[^>]*>(.*?)</button>',
self.html, re.DOTALL
)
self.assertIsNotNone(btn_match)
self.assertIn('svg', btn_match.group(1).lower())
# Shield path
self.assertIn('M12 22s8-4 8-10V5l-8-3-8 3v7c0 6 8 10 8 10z', btn_match.group(1))
def test_css_exists_for_button(self):
"""CSS rules exist for #chat-safety-plan-btn."""
self.assertIn('#chat-safety-plan-btn', self.html)
# Check for hover/focus styles
self.assertIn('#chat-safety-plan-btn:hover', self.html)
self.assertIn('#chat-safety-plan-btn:focus', self.html)
def test_javascript_listener_exists(self):
"""JavaScript event listener exists for the button."""
self.assertIn('chatSafetyPlanBtn', self.html)
self.assertIn("chatSafetyPlanBtn.addEventListener('click'", self.html)
def test_javascript_calls_load_safety_plan(self):
"""Click handler calls loadSafetyPlan() and shows modal."""
listener = re.search(
r'chatSafetyPlanBtn\.addEventListener.*?\{(.*?)\}',
self.html, re.DOTALL
)
self.assertIsNotNone(listener)
body = listener.group(1)
self.assertIn('loadSafetyPlan()', body)
self.assertIn("safetyPlanModal.classList.add('active')", body)
def test_focus_trap_uses_button_as_return_target(self):
"""Focus trap returns focus to chatSafetyPlanBtn when modal closes."""
self.assertIn('_activateSafetyPlanFocusTrap(chatSafetyPlanBtn)', self.html)
if __name__ == "__main__":
unittest.main()