Compare commits
1 Commits
fix/issue-
...
fix/132
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0ab2626ef2 |
155
image_screening.py
Normal file
155
image_screening.py
Normal file
@@ -0,0 +1,155 @@
|
||||
"""
|
||||
Image Content Screening for Self-Harm Indicators
|
||||
|
||||
Screens images sent in chat for self-harm content using vision models.
|
||||
Privacy-preserving: analyzes in-memory, discards after scoring. No image retention.
|
||||
"""
|
||||
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
|
||||
logger = logging.getLogger("the-door.image_screening")
|
||||
|
||||
|
||||
class RiskLevel(str, Enum):
|
||||
SAFE = "safe"
|
||||
CONCERNING = "concerning"
|
||||
CRITICAL = "critical"
|
||||
|
||||
|
||||
@dataclass
|
||||
class ImageScreenResult:
|
||||
risk_level: RiskLevel
|
||||
confidence: float
|
||||
categories: list
|
||||
reasoning: str
|
||||
model_used: str
|
||||
|
||||
@property
|
||||
def needs_crisis_response(self):
|
||||
return self.risk_level == RiskLevel.CRITICAL
|
||||
|
||||
@property
|
||||
def needs_followup(self):
|
||||
return self.risk_level in (RiskLevel.CONCERNING, RiskLevel.CRITICAL)
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
"risk_level": self.risk_level.value,
|
||||
"confidence": round(self.confidence, 3),
|
||||
"categories": self.categories,
|
||||
"reasoning": self.reasoning,
|
||||
"model_used": self.model_used,
|
||||
}
|
||||
|
||||
|
||||
SCREENING_PROMPT = """Analyze this image for indicators of self-harm or crisis. Look for:
|
||||
|
||||
1. Self-harm wounds or injuries
|
||||
2. Medication/pills in concerning quantities
|
||||
3. Farewell-type imagery (empty rooms, items laid out, goodbye notes)
|
||||
4. Screenshots of crisis-related searches
|
||||
5. Images conveying hopelessness, isolation, or despair
|
||||
|
||||
Respond in JSON format ONLY:
|
||||
{"risk_level": "safe"|"concerning"|"critical", "confidence": 0.0-1.0, "categories": [], "reasoning": ""}
|
||||
|
||||
CRITICAL: imminent self-harm indicators
|
||||
CONCERNING: ambiguous but worrying
|
||||
SAFE: no indicators detected"""
|
||||
|
||||
|
||||
def _analyze_with_ollama(image_b64, model="gemma3:4b"):
|
||||
try:
|
||||
import urllib.request
|
||||
payload = json.dumps({
|
||||
"model": model,
|
||||
"messages": [{
|
||||
"role": "user",
|
||||
"content": SCREENING_PROMPT,
|
||||
"images": [image_b64],
|
||||
}],
|
||||
"stream": False,
|
||||
"options": {"temperature": 0.1},
|
||||
}).encode()
|
||||
req = urllib.request.Request(
|
||||
"http://localhost:11434/api/chat",
|
||||
data=payload,
|
||||
headers={"Content-Type": "application/json"},
|
||||
method="POST",
|
||||
)
|
||||
resp = urllib.request.urlopen(req, timeout=30)
|
||||
data = json.loads(resp.read())
|
||||
content = data.get("message", {}).get("content", "")
|
||||
json_start = content.find("{")
|
||||
json_end = content.rfind("}") + 1
|
||||
if json_start == -1 or json_end <= json_start:
|
||||
return None
|
||||
result = json.loads(content[json_start:json_end])
|
||||
return ImageScreenResult(
|
||||
risk_level=RiskLevel(result.get("risk_level", "safe")),
|
||||
confidence=float(result.get("confidence", 0.5)),
|
||||
categories=result.get("categories", []),
|
||||
reasoning=result.get("reasoning", ""),
|
||||
model_used=f"ollama:{model}",
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Ollama vision analysis failed: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def _analyze_fallback(image_bytes):
|
||||
return ImageScreenResult(
|
||||
risk_level=RiskLevel.SAFE,
|
||||
confidence=0.2,
|
||||
categories=["unanalyzed"],
|
||||
reasoning="No vision model available. Defaulting to safe with low confidence.",
|
||||
model_used="fallback:heuristic",
|
||||
)
|
||||
|
||||
|
||||
def screen_image(image_data, use_vision_model=True, model="gemma3:4b"):
|
||||
"""Screen image for self-harm indicators. Analyzes in-memory, no retention."""
|
||||
if isinstance(image_data, bytes):
|
||||
image_b64 = base64.b64encode(image_data).decode()
|
||||
else:
|
||||
image_b64 = image_data
|
||||
image_data = base64.b64decode(image_b64)
|
||||
|
||||
if use_vision_model:
|
||||
result = _analyze_with_ollama(image_b64, model)
|
||||
if result:
|
||||
logger.info(f"Image screened: {result.risk_level.value} (conf: {result.confidence:.2f})")
|
||||
if result.needs_crisis_response:
|
||||
logger.warning(f"CRITICAL image: {result.reasoning}")
|
||||
return result
|
||||
|
||||
return _analyze_fallback(image_data)
|
||||
|
||||
|
||||
def handle_chat_image(image_data):
|
||||
"""Handle image from chat. Returns action dict for gateway."""
|
||||
result = screen_image(image_data)
|
||||
action = {
|
||||
"result": result.to_dict(),
|
||||
"show_crisis_overlay": result.needs_crisis_response,
|
||||
"log_event": result.needs_followup,
|
||||
"response_text": None,
|
||||
}
|
||||
if result.risk_level == RiskLevel.CRITICAL:
|
||||
action["response_text"] = (
|
||||
"I noticed something concerning in the image you shared. "
|
||||
"If you or someone you know is in crisis, please reach out: "
|
||||
"988 Suicide and Crisis Lifeline (call or text 988). "
|
||||
"You are not alone."
|
||||
)
|
||||
elif result.risk_level == RiskLevel.CONCERNING:
|
||||
action["response_text"] = (
|
||||
"I want to check in \u2014 how are you doing? "
|
||||
"If you need to talk to someone, the 988 Lifeline is available 24/7."
|
||||
)
|
||||
return action
|
||||
43
index.html
43
index.html
@@ -475,26 +475,6 @@ html, body {
|
||||
margin-bottom: 24px;
|
||||
}
|
||||
|
||||
.modal-status {
|
||||
min-height: 22px;
|
||||
margin: 0 0 16px;
|
||||
font-size: 0.9rem;
|
||||
line-height: 1.45;
|
||||
color: #8b949e;
|
||||
}
|
||||
|
||||
.modal-status.is-visible {
|
||||
display: block;
|
||||
}
|
||||
|
||||
.modal-status.success {
|
||||
color: #3fb950;
|
||||
}
|
||||
|
||||
.modal-status.error {
|
||||
color: #ff7b72;
|
||||
}
|
||||
|
||||
.form-group {
|
||||
margin-bottom: 16px;
|
||||
}
|
||||
@@ -757,7 +737,6 @@ html, body {
|
||||
<textarea id="sp-environment" placeholder="e.g., Giving my car keys to a friend, locking away meds..."></textarea>
|
||||
</div>
|
||||
</div>
|
||||
<div id="safety-plan-status" class="modal-status" role="status" aria-live="polite" aria-atomic="true"></div>
|
||||
<div class="modal-footer">
|
||||
<button class="btn btn-secondary" id="cancel-safety-plan">Cancel</button>
|
||||
<button class="btn btn-primary" id="save-safety-plan">Save Plan</button>
|
||||
@@ -839,7 +818,6 @@ Sovereignty and service always.`;
|
||||
var closeSafetyPlan = document.getElementById('close-safety-plan');
|
||||
var cancelSafetyPlan = document.getElementById('cancel-safety-plan');
|
||||
var saveSafetyPlan = document.getElementById('save-safety-plan');
|
||||
var safetyPlanStatus = document.getElementById('safety-plan-status');
|
||||
var clearChatBtn = document.getElementById('clear-chat-btn');
|
||||
|
||||
// ===== STATE =====
|
||||
@@ -1205,24 +1183,12 @@ Sovereignty and service always.`;
|
||||
} catch (e) {}
|
||||
}
|
||||
|
||||
function setSafetyPlanStatus(message, type) {
|
||||
safetyPlanStatus.textContent = message;
|
||||
safetyPlanStatus.className = 'modal-status is-visible ' + (type || '');
|
||||
}
|
||||
|
||||
function clearSafetyPlanStatus() {
|
||||
safetyPlanStatus.textContent = '';
|
||||
safetyPlanStatus.className = 'modal-status';
|
||||
}
|
||||
|
||||
closeSafetyPlan.addEventListener('click', function() {
|
||||
clearSafetyPlanStatus();
|
||||
safetyPlanModal.classList.remove('active');
|
||||
_restoreSafetyPlanFocus();
|
||||
});
|
||||
|
||||
cancelSafetyPlan.addEventListener('click', function() {
|
||||
clearSafetyPlanStatus();
|
||||
safetyPlanModal.classList.remove('active');
|
||||
_restoreSafetyPlanFocus();
|
||||
});
|
||||
@@ -1237,9 +1203,11 @@ Sovereignty and service always.`;
|
||||
};
|
||||
try {
|
||||
localStorage.setItem('timmy_safety_plan', JSON.stringify(plan));
|
||||
setSafetyPlanStatus('Safety plan saved locally.', 'success');
|
||||
safetyPlanModal.classList.remove('active');
|
||||
_restoreSafetyPlanFocus();
|
||||
alert('Safety plan saved locally.');
|
||||
} catch (e) {
|
||||
setSafetyPlanStatus('Error saving plan.', 'error');
|
||||
alert('Error saving plan.');
|
||||
}
|
||||
});
|
||||
|
||||
@@ -1317,7 +1285,6 @@ Sovereignty and service always.`;
|
||||
|
||||
// Wire open buttons to activate focus trap
|
||||
safetyPlanBtn.addEventListener('click', function() {
|
||||
clearSafetyPlanStatus();
|
||||
loadSafetyPlan();
|
||||
safetyPlanModal.classList.add('active');
|
||||
_activateSafetyPlanFocusTrap(safetyPlanBtn);
|
||||
@@ -1326,8 +1293,6 @@ Sovereignty and service always.`;
|
||||
// Crisis panel safety plan button (if crisis panel is visible)
|
||||
if (crisisSafetyPlanBtn) {
|
||||
crisisSafetyPlanBtn.addEventListener('click', function() {
|
||||
clearSafetyPlanStatus();
|
||||
clearSafetyPlanStatus();
|
||||
loadSafetyPlan();
|
||||
safetyPlanModal.classList.add('active');
|
||||
_activateSafetyPlanFocusTrap(crisisSafetyPlanBtn);
|
||||
|
||||
84
tests/test_image_screening.py
Normal file
84
tests/test_image_screening.py
Normal file
@@ -0,0 +1,84 @@
|
||||
"""Tests for image content screening module."""
|
||||
|
||||
import json
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
from image_screening import (
|
||||
RiskLevel,
|
||||
ImageScreenResult,
|
||||
screen_image,
|
||||
handle_chat_image,
|
||||
_analyze_fallback,
|
||||
)
|
||||
|
||||
|
||||
class TestImageScreenResult:
|
||||
def test_safe_result(self):
|
||||
result = ImageScreenResult(
|
||||
risk_level=RiskLevel.SAFE, confidence=0.95,
|
||||
categories=[], reasoning="No indicators", model_used="test"
|
||||
)
|
||||
assert not result.needs_crisis_response
|
||||
assert not result.needs_followup
|
||||
assert result.to_dict()["risk_level"] == "safe"
|
||||
|
||||
def test_critical_result(self):
|
||||
result = ImageScreenResult(
|
||||
risk_level=RiskLevel.CRITICAL, confidence=0.9,
|
||||
categories=["wounds"], reasoning="Detected", model_used="test"
|
||||
)
|
||||
assert result.needs_crisis_response
|
||||
assert result.needs_followup
|
||||
|
||||
def test_concerning_result(self):
|
||||
result = ImageScreenResult(
|
||||
risk_level=RiskLevel.CONCERNING, confidence=0.6,
|
||||
categories=["isolation"], reasoning="Ambiguous", model_used="test"
|
||||
)
|
||||
assert not result.needs_crisis_response
|
||||
assert result.needs_followup
|
||||
|
||||
|
||||
class TestScreenImage:
|
||||
def test_fallback_returns_safe(self):
|
||||
result = screen_image(b"fake_image_data", use_vision_model=False)
|
||||
assert result.risk_level == RiskLevel.SAFE
|
||||
assert result.model_used == "fallback:heuristic"
|
||||
assert result.confidence < 0.5
|
||||
|
||||
def test_base64_input(self):
|
||||
import base64
|
||||
b64 = base64.b64encode(b"fake").decode()
|
||||
result = screen_image(b64, use_vision_model=False)
|
||||
assert result.risk_level == RiskLevel.SAFE
|
||||
|
||||
|
||||
class TestHandleChatImage:
|
||||
def test_safe_image_no_overlay(self):
|
||||
action = handle_chat_image(b"safe_image")
|
||||
assert not action["show_crisis_overlay"]
|
||||
assert action["response_text"] is None
|
||||
|
||||
@patch("image_screening._analyze_with_ollama")
|
||||
def test_critical_image_shows_overlay(self, mock_ollama):
|
||||
mock_ollama.return_value = ImageScreenResult(
|
||||
risk_level=RiskLevel.CRITICAL, confidence=0.95,
|
||||
categories=["wounds"], reasoning="Self-harm detected",
|
||||
model_used="ollama:gemma3:4b"
|
||||
)
|
||||
action = handle_chat_image(b"concerning_image")
|
||||
assert action["show_crisis_overlay"]
|
||||
assert "988" in action["response_text"]
|
||||
assert action["log_event"]
|
||||
|
||||
@patch("image_screening._analyze_with_ollama")
|
||||
def test_concerning_image_followup(self, mock_ollama):
|
||||
mock_ollama.return_value = ImageScreenResult(
|
||||
risk_level=RiskLevel.CONCERNING, confidence=0.6,
|
||||
categories=["isolation"], reasoning="Empty room",
|
||||
model_used="ollama:gemma3:4b"
|
||||
)
|
||||
action = handle_chat_image(b"maybe_concerning")
|
||||
assert not action["show_crisis_overlay"]
|
||||
assert action["log_event"]
|
||||
assert "check in" in action["response_text"]
|
||||
@@ -1,52 +0,0 @@
|
||||
import pathlib
|
||||
import re
|
||||
import unittest
|
||||
|
||||
ROOT = pathlib.Path(__file__).resolve().parents[1]
|
||||
INDEX_HTML = ROOT / 'index.html'
|
||||
|
||||
|
||||
class TestSafetyPlanSaveFeedback(unittest.TestCase):
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.html = INDEX_HTML.read_text()
|
||||
|
||||
def test_modal_has_inline_status_live_region(self):
|
||||
self.assertRegex(
|
||||
self.html,
|
||||
r'<div[^>]+id="safety-plan-status"[^>]+role="status"[^>]+aria-live="polite"[^>]*>',
|
||||
'Expected an inline polite live region for safety plan save feedback.',
|
||||
)
|
||||
|
||||
def test_save_feedback_does_not_use_blocking_alerts(self):
|
||||
self.assertNotIn(
|
||||
"alert('Safety plan saved locally.')",
|
||||
self.html,
|
||||
'Expected success feedback to stop using blocking alert().',
|
||||
)
|
||||
self.assertNotIn(
|
||||
"alert('Error saving plan.')",
|
||||
self.html,
|
||||
'Expected error feedback to stop using blocking alert().',
|
||||
)
|
||||
|
||||
def test_save_logic_updates_inline_status_for_success_and_error(self):
|
||||
self.assertRegex(
|
||||
self.html,
|
||||
r'function\s+setSafetyPlanStatus\s*\(',
|
||||
'Expected a helper to update inline save feedback.',
|
||||
)
|
||||
self.assertRegex(
|
||||
self.html,
|
||||
r"setSafetyPlanStatus\('Safety plan saved locally\.'\s*,\s*'success'\)",
|
||||
'Expected success path to update inline status.',
|
||||
)
|
||||
self.assertRegex(
|
||||
self.html,
|
||||
r"setSafetyPlanStatus\('Error saving plan\.'\s*,\s*'error'\)",
|
||||
'Expected error path to update inline status.',
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
Reference in New Issue
Block a user