Compare commits
1 Commits
feat/safet
...
fix/132
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0ab2626ef2 |
155
image_screening.py
Normal file
155
image_screening.py
Normal file
@@ -0,0 +1,155 @@
|
||||
"""
|
||||
Image Content Screening for Self-Harm Indicators
|
||||
|
||||
Screens images sent in chat for self-harm content using vision models.
|
||||
Privacy-preserving: analyzes in-memory, discards after scoring. No image retention.
|
||||
"""
|
||||
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
|
||||
logger = logging.getLogger("the-door.image_screening")
|
||||
|
||||
|
||||
class RiskLevel(str, Enum):
|
||||
SAFE = "safe"
|
||||
CONCERNING = "concerning"
|
||||
CRITICAL = "critical"
|
||||
|
||||
|
||||
@dataclass
|
||||
class ImageScreenResult:
|
||||
risk_level: RiskLevel
|
||||
confidence: float
|
||||
categories: list
|
||||
reasoning: str
|
||||
model_used: str
|
||||
|
||||
@property
|
||||
def needs_crisis_response(self):
|
||||
return self.risk_level == RiskLevel.CRITICAL
|
||||
|
||||
@property
|
||||
def needs_followup(self):
|
||||
return self.risk_level in (RiskLevel.CONCERNING, RiskLevel.CRITICAL)
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
"risk_level": self.risk_level.value,
|
||||
"confidence": round(self.confidence, 3),
|
||||
"categories": self.categories,
|
||||
"reasoning": self.reasoning,
|
||||
"model_used": self.model_used,
|
||||
}
|
||||
|
||||
|
||||
SCREENING_PROMPT = """Analyze this image for indicators of self-harm or crisis. Look for:
|
||||
|
||||
1. Self-harm wounds or injuries
|
||||
2. Medication/pills in concerning quantities
|
||||
3. Farewell-type imagery (empty rooms, items laid out, goodbye notes)
|
||||
4. Screenshots of crisis-related searches
|
||||
5. Images conveying hopelessness, isolation, or despair
|
||||
|
||||
Respond in JSON format ONLY:
|
||||
{"risk_level": "safe"|"concerning"|"critical", "confidence": 0.0-1.0, "categories": [], "reasoning": ""}
|
||||
|
||||
CRITICAL: imminent self-harm indicators
|
||||
CONCERNING: ambiguous but worrying
|
||||
SAFE: no indicators detected"""
|
||||
|
||||
|
||||
def _analyze_with_ollama(image_b64, model="gemma3:4b"):
|
||||
try:
|
||||
import urllib.request
|
||||
payload = json.dumps({
|
||||
"model": model,
|
||||
"messages": [{
|
||||
"role": "user",
|
||||
"content": SCREENING_PROMPT,
|
||||
"images": [image_b64],
|
||||
}],
|
||||
"stream": False,
|
||||
"options": {"temperature": 0.1},
|
||||
}).encode()
|
||||
req = urllib.request.Request(
|
||||
"http://localhost:11434/api/chat",
|
||||
data=payload,
|
||||
headers={"Content-Type": "application/json"},
|
||||
method="POST",
|
||||
)
|
||||
resp = urllib.request.urlopen(req, timeout=30)
|
||||
data = json.loads(resp.read())
|
||||
content = data.get("message", {}).get("content", "")
|
||||
json_start = content.find("{")
|
||||
json_end = content.rfind("}") + 1
|
||||
if json_start == -1 or json_end <= json_start:
|
||||
return None
|
||||
result = json.loads(content[json_start:json_end])
|
||||
return ImageScreenResult(
|
||||
risk_level=RiskLevel(result.get("risk_level", "safe")),
|
||||
confidence=float(result.get("confidence", 0.5)),
|
||||
categories=result.get("categories", []),
|
||||
reasoning=result.get("reasoning", ""),
|
||||
model_used=f"ollama:{model}",
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Ollama vision analysis failed: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def _analyze_fallback(image_bytes):
|
||||
return ImageScreenResult(
|
||||
risk_level=RiskLevel.SAFE,
|
||||
confidence=0.2,
|
||||
categories=["unanalyzed"],
|
||||
reasoning="No vision model available. Defaulting to safe with low confidence.",
|
||||
model_used="fallback:heuristic",
|
||||
)
|
||||
|
||||
|
||||
def screen_image(image_data, use_vision_model=True, model="gemma3:4b"):
|
||||
"""Screen image for self-harm indicators. Analyzes in-memory, no retention."""
|
||||
if isinstance(image_data, bytes):
|
||||
image_b64 = base64.b64encode(image_data).decode()
|
||||
else:
|
||||
image_b64 = image_data
|
||||
image_data = base64.b64decode(image_b64)
|
||||
|
||||
if use_vision_model:
|
||||
result = _analyze_with_ollama(image_b64, model)
|
||||
if result:
|
||||
logger.info(f"Image screened: {result.risk_level.value} (conf: {result.confidence:.2f})")
|
||||
if result.needs_crisis_response:
|
||||
logger.warning(f"CRITICAL image: {result.reasoning}")
|
||||
return result
|
||||
|
||||
return _analyze_fallback(image_data)
|
||||
|
||||
|
||||
def handle_chat_image(image_data):
|
||||
"""Handle image from chat. Returns action dict for gateway."""
|
||||
result = screen_image(image_data)
|
||||
action = {
|
||||
"result": result.to_dict(),
|
||||
"show_crisis_overlay": result.needs_crisis_response,
|
||||
"log_event": result.needs_followup,
|
||||
"response_text": None,
|
||||
}
|
||||
if result.risk_level == RiskLevel.CRITICAL:
|
||||
action["response_text"] = (
|
||||
"I noticed something concerning in the image you shared. "
|
||||
"If you or someone you know is in crisis, please reach out: "
|
||||
"988 Suicide and Crisis Lifeline (call or text 988). "
|
||||
"You are not alone."
|
||||
)
|
||||
elif result.risk_level == RiskLevel.CONCERNING:
|
||||
action["response_text"] = (
|
||||
"I want to check in \u2014 how are you doing? "
|
||||
"If you need to talk to someone, the 988 Lifeline is available 24/7."
|
||||
)
|
||||
return action
|
||||
119
index.html
119
index.html
@@ -739,7 +739,6 @@ html, body {
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button class="btn btn-secondary" id="cancel-safety-plan">Cancel</button>
|
||||
<button class="btn btn-secondary" id="history-safety-plan" style="margin-right:auto;">History</button>
|
||||
<button class="btn btn-primary" id="save-safety-plan">Save Plan</button>
|
||||
</div>
|
||||
</div>
|
||||
@@ -1203,7 +1202,6 @@ Sovereignty and service always.`;
|
||||
environment: document.getElementById('sp-environment').value
|
||||
};
|
||||
try {
|
||||
_pushPlanVersion(plan);
|
||||
localStorage.setItem('timmy_safety_plan', JSON.stringify(plan));
|
||||
safetyPlanModal.classList.remove('active');
|
||||
_restoreSafetyPlanFocus();
|
||||
@@ -1213,123 +1211,6 @@ Sovereignty and service always.`;
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
// ===== SAFETY PLAN VERSION HISTORY =====
|
||||
var MAX_HISTORY = 20;
|
||||
|
||||
function _getPlanHistory() {
|
||||
try {
|
||||
var raw = localStorage.getItem('timmy_safety_plan_history');
|
||||
return raw ? JSON.parse(raw) : [];
|
||||
} catch (e) { return []; }
|
||||
}
|
||||
|
||||
function _pushPlanVersion(plan) {
|
||||
var history = _getPlanHistory();
|
||||
var last = history.length > 0 ? history[history.length - 1] : null;
|
||||
// Only save if different from last version
|
||||
if (last && JSON.stringify(last.plan) === JSON.stringify(plan)) return;
|
||||
history.push({ ts: Date.now(), plan: plan });
|
||||
if (history.length > MAX_HISTORY) history = history.slice(-MAX_HISTORY);
|
||||
localStorage.setItem('timmy_safety_plan_history', JSON.stringify(history));
|
||||
}
|
||||
|
||||
function _diffPlans(oldPlan, newPlan) {
|
||||
var fields = ['warningSigns', 'coping', 'distraction', 'help', 'environment'];
|
||||
var labels = { warningSigns: 'Warning signs', coping: 'Coping strategies', distraction: 'Distractions', help: 'People who can help', environment: 'Safe environment' };
|
||||
var diffs = [];
|
||||
fields.forEach(function(f) {
|
||||
var oldVal = (oldPlan[f] || '').trim();
|
||||
var newVal = (newPlan[f] || '').trim();
|
||||
if (oldVal !== newVal) {
|
||||
diffs.push({ field: labels[f], old: oldVal, _new: newVal });
|
||||
}
|
||||
});
|
||||
return diffs;
|
||||
}
|
||||
|
||||
var historyModal = null;
|
||||
|
||||
function _showHistoryModal() {
|
||||
var history = _getPlanHistory();
|
||||
if (history.length === 0) {
|
||||
alert('No saved versions yet.');
|
||||
return;
|
||||
}
|
||||
|
||||
// Create modal if not exists
|
||||
if (!historyModal) {
|
||||
historyModal = document.createElement('div');
|
||||
historyModal.id = 'sp-history-modal';
|
||||
historyModal.className = 'modal-overlay';
|
||||
historyModal.setAttribute('role', 'dialog');
|
||||
historyModal.setAttribute('aria-modal', 'true');
|
||||
historyModal.innerHTML = '<div class="modal-content" style="max-width:600px;max-height:80vh;overflow-y:auto;">' +
|
||||
'<div class="modal-header"><h2>Safety Plan History</h2><button class="close-modal" id="close-sp-history" aria-label="Close">×</button></div>' +
|
||||
'<div class="modal-body" id="sp-history-body"></div>' +
|
||||
'</div>';
|
||||
document.body.appendChild(historyModal);
|
||||
document.getElementById('close-sp-history').addEventListener('click', function() {
|
||||
historyModal.classList.remove('active');
|
||||
});
|
||||
historyModal.addEventListener('click', function(e) {
|
||||
if (e.target === historyModal) historyModal.classList.remove('active');
|
||||
});
|
||||
}
|
||||
|
||||
var body = document.getElementById('sp-history-body');
|
||||
var html = '';
|
||||
for (var i = history.length - 1; i >= 0; i--) {
|
||||
var entry = history[i];
|
||||
var date = new Date(entry.ts);
|
||||
var label = date.toLocaleDateString() + ' ' + date.toLocaleTimeString([], {hour:'2-digit', minute:'2-digit'});
|
||||
var prev = i > 0 ? history[i - 1].plan : {};
|
||||
var diffs = _diffPlans(prev, entry.plan);
|
||||
|
||||
html += '<div style="border:1px solid #30363d;border-radius:8px;padding:12px;margin-bottom:10px;">';
|
||||
html += '<div style="display:flex;justify-content:space-between;align-items:center;margin-bottom:8px;">';
|
||||
html += '<strong style="color:#c9d1d9;">v' + (i + 1) + ' — ' + label + '</strong>';
|
||||
html += '<button class="btn btn-secondary sp-restore-btn" data-idx="' + i + '" style="font-size:0.8rem;padding:4px 10px;">Restore</button>';
|
||||
html += '</div>';
|
||||
|
||||
if (diffs.length === 0 && i === 0) {
|
||||
html += '<span style="color:#8b949e;font-size:0.85rem;">Initial version</span>';
|
||||
} else if (diffs.length === 0) {
|
||||
html += '<span style="color:#8b949e;font-size:0.85rem;">No changes from previous</span>';
|
||||
} else {
|
||||
diffs.forEach(function(d) {
|
||||
html += '<div style="margin-bottom:6px;">';
|
||||
html += '<div style="color:#8b949e;font-size:0.8rem;">' + d.field + '</div>';
|
||||
if (d.old) html += '<div style="color:#f85149;font-size:0.85rem;text-decoration:line-through;">' + d.old.substring(0, 120) + '</div>';
|
||||
if (d._new) html += '<div style="color:#3fb950;font-size:0.85rem;">' + d._new.substring(0, 120) + '</div>';
|
||||
html += '</div>';
|
||||
});
|
||||
}
|
||||
html += '</div>';
|
||||
}
|
||||
body.innerHTML = html;
|
||||
|
||||
// Wire restore buttons
|
||||
body.querySelectorAll('.sp-restore-btn').forEach(function(btn) {
|
||||
btn.addEventListener('click', function() {
|
||||
var idx = parseInt(btn.dataset.idx);
|
||||
var plan = history[idx].plan;
|
||||
document.getElementById('sp-warning-signs').value = plan.warningSigns || '';
|
||||
document.getElementById('sp-coping').value = plan.coping || '';
|
||||
document.getElementById('sp-distraction').value = plan.distraction || '';
|
||||
document.getElementById('sp-help').value = plan.help || '';
|
||||
document.getElementById('sp-environment').value = plan.environment || '';
|
||||
localStorage.setItem('timmy_safety_plan', JSON.stringify(plan));
|
||||
historyModal.classList.remove('active');
|
||||
alert('Restored version ' + (idx + 1) + '.');
|
||||
});
|
||||
});
|
||||
|
||||
historyModal.classList.add('active');
|
||||
}
|
||||
|
||||
document.getElementById('history-safety-plan').addEventListener('click', _showHistoryModal);
|
||||
|
||||
// ===== SAFETY PLAN FOCUS TRAP (fix #65) =====
|
||||
// Focusable elements inside the modal, in tab order
|
||||
var _spFocusableIds = [
|
||||
|
||||
84
tests/test_image_screening.py
Normal file
84
tests/test_image_screening.py
Normal file
@@ -0,0 +1,84 @@
|
||||
"""Tests for image content screening module."""
|
||||
|
||||
import json
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
from image_screening import (
|
||||
RiskLevel,
|
||||
ImageScreenResult,
|
||||
screen_image,
|
||||
handle_chat_image,
|
||||
_analyze_fallback,
|
||||
)
|
||||
|
||||
|
||||
class TestImageScreenResult:
|
||||
def test_safe_result(self):
|
||||
result = ImageScreenResult(
|
||||
risk_level=RiskLevel.SAFE, confidence=0.95,
|
||||
categories=[], reasoning="No indicators", model_used="test"
|
||||
)
|
||||
assert not result.needs_crisis_response
|
||||
assert not result.needs_followup
|
||||
assert result.to_dict()["risk_level"] == "safe"
|
||||
|
||||
def test_critical_result(self):
|
||||
result = ImageScreenResult(
|
||||
risk_level=RiskLevel.CRITICAL, confidence=0.9,
|
||||
categories=["wounds"], reasoning="Detected", model_used="test"
|
||||
)
|
||||
assert result.needs_crisis_response
|
||||
assert result.needs_followup
|
||||
|
||||
def test_concerning_result(self):
|
||||
result = ImageScreenResult(
|
||||
risk_level=RiskLevel.CONCERNING, confidence=0.6,
|
||||
categories=["isolation"], reasoning="Ambiguous", model_used="test"
|
||||
)
|
||||
assert not result.needs_crisis_response
|
||||
assert result.needs_followup
|
||||
|
||||
|
||||
class TestScreenImage:
|
||||
def test_fallback_returns_safe(self):
|
||||
result = screen_image(b"fake_image_data", use_vision_model=False)
|
||||
assert result.risk_level == RiskLevel.SAFE
|
||||
assert result.model_used == "fallback:heuristic"
|
||||
assert result.confidence < 0.5
|
||||
|
||||
def test_base64_input(self):
|
||||
import base64
|
||||
b64 = base64.b64encode(b"fake").decode()
|
||||
result = screen_image(b64, use_vision_model=False)
|
||||
assert result.risk_level == RiskLevel.SAFE
|
||||
|
||||
|
||||
class TestHandleChatImage:
|
||||
def test_safe_image_no_overlay(self):
|
||||
action = handle_chat_image(b"safe_image")
|
||||
assert not action["show_crisis_overlay"]
|
||||
assert action["response_text"] is None
|
||||
|
||||
@patch("image_screening._analyze_with_ollama")
|
||||
def test_critical_image_shows_overlay(self, mock_ollama):
|
||||
mock_ollama.return_value = ImageScreenResult(
|
||||
risk_level=RiskLevel.CRITICAL, confidence=0.95,
|
||||
categories=["wounds"], reasoning="Self-harm detected",
|
||||
model_used="ollama:gemma3:4b"
|
||||
)
|
||||
action = handle_chat_image(b"concerning_image")
|
||||
assert action["show_crisis_overlay"]
|
||||
assert "988" in action["response_text"]
|
||||
assert action["log_event"]
|
||||
|
||||
@patch("image_screening._analyze_with_ollama")
|
||||
def test_concerning_image_followup(self, mock_ollama):
|
||||
mock_ollama.return_value = ImageScreenResult(
|
||||
risk_level=RiskLevel.CONCERNING, confidence=0.6,
|
||||
categories=["isolation"], reasoning="Empty room",
|
||||
model_used="ollama:gemma3:4b"
|
||||
)
|
||||
action = handle_chat_image(b"maybe_concerning")
|
||||
assert not action["show_crisis_overlay"]
|
||||
assert action["log_event"]
|
||||
assert "check in" in action["response_text"]
|
||||
Reference in New Issue
Block a user