Compare commits
4 Commits
fix/132
...
burn/99-17
| Author | SHA1 | Date | |
|---|---|---|---|
| 61ac275880 | |||
| 36f65a43ef | |||
| 89afc9d7c4 | |||
| c4a14d7bba |
@@ -7,6 +7,8 @@ Stands between a broken man and a machine that would tell him to die.
|
||||
from .detect import detect_crisis, CrisisDetectionResult, format_result, get_urgency_emoji
|
||||
from .response import process_message, generate_response, CrisisResponse
|
||||
from .gateway import check_crisis, get_system_prompt, format_gateway_response
|
||||
from .tracker import log_escalation, get_escalations, mark_resolved, get_stats
|
||||
from .bridge import handle_crisis_api
|
||||
|
||||
__all__ = [
|
||||
"detect_crisis",
|
||||
@@ -19,4 +21,9 @@ __all__ = [
|
||||
"format_result",
|
||||
"format_gateway_response",
|
||||
"get_urgency_emoji",
|
||||
"log_escalation",
|
||||
"get_escalations",
|
||||
"mark_resolved",
|
||||
"get_stats",
|
||||
"handle_crisis_api",
|
||||
]
|
||||
|
||||
168
crisis/bridge.py
Normal file
168
crisis/bridge.py
Normal file
@@ -0,0 +1,168 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Crisis Bridge — HTTP integration between the-door web and hermes-agent.
|
||||
|
||||
Provides:
|
||||
- GET /api/crisis/escalations — list recent escalation events
|
||||
- GET /api/crisis/stats — aggregate statistics
|
||||
- POST /api/crisis/log — log a new escalation (from hermes-agent)
|
||||
- POST /api/crisis/resolve/:id — mark escalation as resolved
|
||||
|
||||
Can be mounted as an ASGI/FastAPI sub-app or used standalone.
|
||||
Falls back to a simple HTTP server if no framework is available.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
from typing import Optional
|
||||
|
||||
from .tracker import (
|
||||
log_escalation,
|
||||
get_escalations,
|
||||
mark_resolved,
|
||||
get_stats,
|
||||
)
|
||||
|
||||
|
||||
def handle_crisis_api(method: str, path: str, body: Optional[str] = None,
|
||||
hermes_home: Optional[str] = None) -> dict:
|
||||
"""
|
||||
Handle a crisis API request. Returns dict with status, headers, body.
|
||||
|
||||
Args:
|
||||
method: HTTP method (GET, POST)
|
||||
path: Request path (e.g., "/api/crisis/escalations")
|
||||
body: JSON request body (for POST)
|
||||
hermes_home: Override HERMES_HOME path
|
||||
|
||||
Returns:
|
||||
{"status": int, "headers": dict, "body": str}
|
||||
"""
|
||||
# Normalize path
|
||||
path = path.rstrip("/")
|
||||
|
||||
# GET /api/crisis/escalations
|
||||
if method == "GET" and path == "/api/crisis/escalations":
|
||||
params = _parse_query(path)
|
||||
events = get_escalations(
|
||||
limit=int(params.get("limit", 50)),
|
||||
source=params.get("source"),
|
||||
level=params.get("level"),
|
||||
session_id=params.get("session_id"),
|
||||
since=params.get("since"),
|
||||
hermes_home=hermes_home,
|
||||
)
|
||||
return _json_response(200, {"events": events, "count": len(events)})
|
||||
|
||||
# GET /api/crisis/stats
|
||||
if method == "GET" and path == "/api/crisis/stats":
|
||||
stats = get_stats(hermes_home=hermes_home)
|
||||
return _json_response(200, stats)
|
||||
|
||||
# POST /api/crisis/log
|
||||
if method == "POST" and path == "/api/crisis/log":
|
||||
if not body:
|
||||
return _json_response(400, {"error": "Missing request body"})
|
||||
try:
|
||||
data = json.loads(body)
|
||||
except json.JSONDecodeError:
|
||||
return _json_response(400, {"error": "Invalid JSON"})
|
||||
|
||||
required = ["source", "session_id", "level", "indicators"]
|
||||
missing = [f for f in required if f not in data]
|
||||
if missing:
|
||||
return _json_response(400, {"error": f"Missing fields: {missing}"})
|
||||
|
||||
event = log_escalation(
|
||||
source=data["source"],
|
||||
session_id=data["session_id"],
|
||||
level=data["level"],
|
||||
indicators=data.get("indicators", []),
|
||||
score=data.get("score", 0.0),
|
||||
action_taken=data.get("action_taken", ""),
|
||||
hermes_home=hermes_home,
|
||||
)
|
||||
return _json_response(201, event)
|
||||
|
||||
# POST /api/crisis/resolve/:id
|
||||
if method == "POST" and path.startswith("/api/crisis/resolve/"):
|
||||
event_id = path.split("/")[-1]
|
||||
if mark_resolved(event_id, hermes_home=hermes_home):
|
||||
return _json_response(200, {"resolved": True, "id": event_id})
|
||||
return _json_response(404, {"error": "Event not found"})
|
||||
|
||||
return _json_response(404, {"error": "Not found"})
|
||||
|
||||
|
||||
def _parse_query(path: str) -> dict:
|
||||
"""Extract query parameters from path."""
|
||||
params = {}
|
||||
if "?" in path:
|
||||
query = path.split("?", 1)[1]
|
||||
for pair in query.split("&"):
|
||||
if "=" in pair:
|
||||
k, v = pair.split("=", 1)
|
||||
params[k] = v
|
||||
return params
|
||||
|
||||
|
||||
def _json_response(status: int, body: dict) -> dict:
|
||||
"""Format a JSON response."""
|
||||
return {
|
||||
"status": status,
|
||||
"headers": {"Content-Type": "application/json"},
|
||||
"body": json.dumps(body, indent=2),
|
||||
}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Standalone server (for development / testing)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def run_standalone(host: str = "127.0.0.1", port: int = 8650,
|
||||
hermes_home: Optional[str] = None):
|
||||
"""Run a minimal HTTP server for the crisis API."""
|
||||
try:
|
||||
from http.server import HTTPServer, BaseHTTPRequestHandler
|
||||
except ImportError:
|
||||
print("http.server not available")
|
||||
return
|
||||
|
||||
class Handler(BaseHTTPRequestHandler):
|
||||
def do_GET(self):
|
||||
result = handle_crisis_api("GET", self.path, hermes_home=hermes_home)
|
||||
self._send(result)
|
||||
|
||||
def do_POST(self):
|
||||
length = int(self.headers.get("Content-Length", 0))
|
||||
body = self.rfile.read(length).decode() if length > 0 else None
|
||||
result = handle_crisis_api("POST", self.path, body, hermes_home=hermes_home)
|
||||
self._send(result)
|
||||
|
||||
def _send(self, result):
|
||||
self.send_response(result["status"])
|
||||
for k, v in result["headers"].items():
|
||||
self.send_header(k, v)
|
||||
self.end_headers()
|
||||
self.wfile.write(result["body"].encode())
|
||||
|
||||
def log_message(self, format, *args):
|
||||
pass # Suppress default logging
|
||||
|
||||
server = HTTPServer((host, port), Handler)
|
||||
print(f"Crisis bridge running at http://{host}:{port}")
|
||||
print(f" GET /api/crisis/escalations")
|
||||
print(f" GET /api/crisis/stats")
|
||||
print(f" POST /api/crisis/log")
|
||||
print(f" POST /api/crisis/resolve/:id")
|
||||
server.serve_forever()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(description="Crisis Bridge API Server")
|
||||
parser.add_argument("--host", default="127.0.0.1")
|
||||
parser.add_argument("--port", type=int, default=8650)
|
||||
parser.add_argument("--hermes-home", default=None)
|
||||
args = parser.parse_args()
|
||||
run_standalone(args.host, args.port, args.hermes_home)
|
||||
178
crisis/test_tracker.py
Normal file
178
crisis/test_tracker.py
Normal file
@@ -0,0 +1,178 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Tests for crisis/tracker.py and crisis/bridge.py — shared escalation state.
|
||||
|
||||
Run with: python -m pytest crisis/test_tracker.py -v
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from crisis.tracker import log_escalation, get_escalations, mark_resolved, get_stats
|
||||
from crisis.bridge import handle_crisis_api
|
||||
|
||||
|
||||
class TestTracker(unittest.TestCase):
|
||||
"""Test the shared escalation tracker."""
|
||||
|
||||
def setUp(self):
|
||||
self.tmpdir = tempfile.mkdtemp()
|
||||
self.escalations_path = os.path.join(self.tmpdir, "crisis_escalations.jsonl")
|
||||
|
||||
def test_log_and_read(self):
|
||||
event = log_escalation(
|
||||
source="the-door",
|
||||
session_id="web-123",
|
||||
level="HIGH",
|
||||
indicators=["hopeless"],
|
||||
score=0.75,
|
||||
action_taken="Showed crisis panel",
|
||||
hermes_home=self.tmpdir,
|
||||
)
|
||||
self.assertIn("id", event)
|
||||
self.assertIn("timestamp", event)
|
||||
self.assertEqual(event["source"], "the-door")
|
||||
self.assertEqual(event["level"], "HIGH")
|
||||
self.assertFalse(event["resolved"])
|
||||
|
||||
# Read back
|
||||
events = get_escalations(hermes_home=self.tmpdir)
|
||||
self.assertEqual(len(events), 1)
|
||||
self.assertEqual(events[0]["id"], event["id"])
|
||||
|
||||
def test_filter_by_source(self):
|
||||
log_escalation("the-door", "w1", "LOW", [], 0.25, hermes_home=self.tmpdir)
|
||||
log_escalation("hermes-agent", "c1", "HIGH", [], 0.75, hermes_home=self.tmpdir)
|
||||
|
||||
door_events = get_escalations(source="the-door", hermes_home=self.tmpdir)
|
||||
agent_events = get_escalations(source="hermes-agent", hermes_home=self.tmpdir)
|
||||
|
||||
self.assertEqual(len(door_events), 1)
|
||||
self.assertEqual(len(agent_events), 1)
|
||||
self.assertEqual(door_events[0]["source"], "the-door")
|
||||
self.assertEqual(agent_events[0]["source"], "hermes-agent")
|
||||
|
||||
def test_filter_by_level(self):
|
||||
log_escalation("the-door", "w1", "CRITICAL", [], 1.0, hermes_home=self.tmpdir)
|
||||
log_escalation("the-door", "w1", "LOW", [], 0.25, hermes_home=self.tmpdir)
|
||||
|
||||
critical = get_escalations(level="CRITICAL", hermes_home=self.tmpdir)
|
||||
self.assertEqual(len(critical), 1)
|
||||
|
||||
def test_filter_by_session(self):
|
||||
log_escalation("the-door", "session-A", "HIGH", [], 0.75, hermes_home=self.tmpdir)
|
||||
log_escalation("the-door", "session-B", "HIGH", [], 0.75, hermes_home=self.tmpdir)
|
||||
|
||||
events = get_escalations(session_id="session-A", hermes_home=self.tmpdir)
|
||||
self.assertEqual(len(events), 1)
|
||||
|
||||
def test_mark_resolved(self):
|
||||
event = log_escalation("the-door", "w1", "HIGH", [], 0.75, hermes_home=self.tmpdir)
|
||||
self.assertFalse(event["resolved"])
|
||||
|
||||
result = mark_resolved(event["id"], hermes_home=self.tmpdir)
|
||||
self.assertTrue(result)
|
||||
|
||||
events = get_escalations(hermes_home=self.tmpdir)
|
||||
self.assertTrue(events[0]["resolved"])
|
||||
self.assertIn("resolved_at", events[0])
|
||||
|
||||
def test_mark_resolved_not_found(self):
|
||||
result = mark_resolved("nonexistent-id", hermes_home=self.tmpdir)
|
||||
self.assertFalse(result)
|
||||
|
||||
def test_get_stats(self):
|
||||
log_escalation("the-door", "w1", "CRITICAL", [], 1.0, hermes_home=self.tmpdir)
|
||||
log_escalation("the-door", "w1", "HIGH", [], 0.75, hermes_home=self.tmpdir)
|
||||
log_escalation("hermes-agent", "c1", "HIGH", [], 0.75, hermes_home=self.tmpdir)
|
||||
log_escalation("hermes-agent", "c2", "LOW", [], 0.25, hermes_home=self.tmpdir)
|
||||
|
||||
stats = get_stats(hermes_home=self.tmpdir)
|
||||
self.assertEqual(stats["total"], 4)
|
||||
self.assertEqual(stats["by_level"]["CRITICAL"], 1)
|
||||
self.assertEqual(stats["by_level"]["HIGH"], 2)
|
||||
self.assertEqual(stats["by_source"]["the-door"], 2)
|
||||
self.assertEqual(stats["by_source"]["hermes-agent"], 2)
|
||||
self.assertEqual(stats["unresolved"], 4)
|
||||
|
||||
def test_limit(self):
|
||||
for i in range(10):
|
||||
log_escalation("the-door", f"s{i}", "LOW", [], 0.25, hermes_home=self.tmpdir)
|
||||
|
||||
events = get_escalations(limit=3, hermes_home=self.tmpdir)
|
||||
self.assertEqual(len(events), 3)
|
||||
|
||||
def test_empty_returns_empty(self):
|
||||
events = get_escalations(hermes_home=self.tmpdir)
|
||||
self.assertEqual(events, [])
|
||||
|
||||
stats = get_stats(hermes_home=self.tmpdir)
|
||||
self.assertEqual(stats["total"], 0)
|
||||
|
||||
|
||||
class TestBridge(unittest.TestCase):
|
||||
"""Test the HTTP bridge API handler."""
|
||||
|
||||
def setUp(self):
|
||||
self.tmpdir = tempfile.mkdtemp()
|
||||
|
||||
def test_get_escalations_empty(self):
|
||||
result = handle_crisis_api("GET", "/api/crisis/escalations", hermes_home=self.tmpdir)
|
||||
self.assertEqual(result["status"], 200)
|
||||
body = json.loads(result["body"])
|
||||
self.assertEqual(body["count"], 0)
|
||||
|
||||
def test_post_log(self):
|
||||
body = json.dumps({
|
||||
"source": "hermes-agent",
|
||||
"session_id": "cli-456",
|
||||
"level": "CRITICAL",
|
||||
"indicators": ["want to die"],
|
||||
"score": 1.0,
|
||||
"action_taken": "988 provided",
|
||||
})
|
||||
result = handle_crisis_api("POST", "/api/crisis/log", body=body, hermes_home=self.tmpdir)
|
||||
self.assertEqual(result["status"], 201)
|
||||
event = json.loads(result["body"])
|
||||
self.assertEqual(event["level"], "CRITICAL")
|
||||
self.assertIn("id", event)
|
||||
|
||||
def test_post_log_missing_fields(self):
|
||||
body = json.dumps({"source": "test"})
|
||||
result = handle_crisis_api("POST", "/api/crisis/log", body=body, hermes_home=self.tmpdir)
|
||||
self.assertEqual(result["status"], 400)
|
||||
|
||||
def test_post_log_invalid_json(self):
|
||||
result = handle_crisis_api("POST", "/api/crisis/log", body="not json", hermes_home=self.tmpdir)
|
||||
self.assertEqual(result["status"], 400)
|
||||
|
||||
def test_get_stats(self):
|
||||
# Log some events first
|
||||
body = json.dumps({"source": "test", "session_id": "s1", "level": "HIGH", "indicators": []})
|
||||
handle_crisis_api("POST", "/api/crisis/log", body=body, hermes_home=self.tmpdir)
|
||||
|
||||
result = handle_crisis_api("GET", "/api/crisis/stats", hermes_home=self.tmpdir)
|
||||
self.assertEqual(result["status"], 200)
|
||||
stats = json.loads(result["body"])
|
||||
self.assertEqual(stats["total"], 1)
|
||||
|
||||
def test_resolve_via_api(self):
|
||||
body = json.dumps({"source": "test", "session_id": "s1", "level": "HIGH", "indicators": []})
|
||||
create_result = handle_crisis_api("POST", "/api/crisis/log", body=body, hermes_home=self.tmpdir)
|
||||
event_id = json.loads(create_result["body"])["id"]
|
||||
|
||||
result = handle_crisis_api("POST", f"/api/crisis/resolve/{event_id}", hermes_home=self.tmpdir)
|
||||
self.assertEqual(result["status"], 200)
|
||||
|
||||
def test_404(self):
|
||||
result = handle_crisis_api("GET", "/api/crisis/nonexistent", hermes_home=self.tmpdir)
|
||||
self.assertEqual(result["status"], 404)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
221
crisis/tracker.py
Normal file
221
crisis/tracker.py
Normal file
@@ -0,0 +1,221 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Crisis Escalation Tracker — shared state between the-door web and hermes-agent.
|
||||
|
||||
Writes crisis events to a JSONL file at ~/.hermes/crisis_escalations.jsonl.
|
||||
Both the-door (web sessions) and hermes-agent (CLI sessions) write here.
|
||||
The-door UI reads from it to show unified escalation history.
|
||||
|
||||
Format per line:
|
||||
{
|
||||
"id": "uuid",
|
||||
"timestamp": "ISO-8601",
|
||||
"source": "the-door" | "hermes-agent",
|
||||
"session_id": "session-identifier",
|
||||
"level": "CRITICAL" | "HIGH" | "MEDIUM" | "LOW",
|
||||
"indicators": ["pattern1", ...],
|
||||
"score": 0.0-1.0,
|
||||
"action_taken": "description",
|
||||
"resolved": false
|
||||
}
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
import uuid
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
|
||||
def _get_escalations_path(hermes_home: Optional[str] = None) -> Path:
|
||||
"""Return path to the shared escalation log."""
|
||||
if hermes_home:
|
||||
base = Path(hermes_home)
|
||||
else:
|
||||
base = Path(os.environ.get("HERMES_HOME", os.path.expanduser("~/.hermes")))
|
||||
base.mkdir(parents=True, exist_ok=True)
|
||||
return base / "crisis_escalations.jsonl"
|
||||
|
||||
|
||||
def log_escalation(
|
||||
source: str,
|
||||
session_id: str,
|
||||
level: str,
|
||||
indicators: List[str],
|
||||
score: float,
|
||||
action_taken: str = "",
|
||||
hermes_home: Optional[str] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Log a crisis escalation event.
|
||||
|
||||
Args:
|
||||
source: "the-door" or "hermes-agent"
|
||||
session_id: Session identifier
|
||||
level: CRITICAL, HIGH, MEDIUM, LOW
|
||||
indicators: List of matched patterns
|
||||
score: Detection score (0.0-1.0)
|
||||
action_taken: Description of what was done
|
||||
hermes_home: Override HERMES_HOME path
|
||||
|
||||
Returns:
|
||||
The logged event dict (with id and timestamp added)
|
||||
"""
|
||||
event = {
|
||||
"id": str(uuid.uuid4()),
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"source": source,
|
||||
"session_id": session_id,
|
||||
"level": level,
|
||||
"indicators": indicators,
|
||||
"score": score,
|
||||
"action_taken": action_taken,
|
||||
"resolved": False,
|
||||
}
|
||||
|
||||
path = _get_escalations_path(hermes_home)
|
||||
with open(path, "a") as f:
|
||||
f.write(json.dumps(event) + "\n")
|
||||
|
||||
return event
|
||||
|
||||
|
||||
def get_escalations(
|
||||
limit: int = 50,
|
||||
source: Optional[str] = None,
|
||||
level: Optional[str] = None,
|
||||
session_id: Optional[str] = None,
|
||||
since: Optional[str] = None,
|
||||
hermes_home: Optional[str] = None,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Read escalation events from the shared log.
|
||||
|
||||
Args:
|
||||
limit: Maximum events to return
|
||||
source: Filter by source ("the-door" or "hermes-agent")
|
||||
level: Filter by crisis level
|
||||
session_id: Filter by session
|
||||
since: ISO timestamp — only return events after this time
|
||||
hermes_home: Override HERMES_HOME path
|
||||
|
||||
Returns:
|
||||
List of event dicts, newest first
|
||||
"""
|
||||
path = _get_escalations_path(hermes_home)
|
||||
if not path.exists():
|
||||
return []
|
||||
|
||||
events = []
|
||||
try:
|
||||
with open(path) as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
try:
|
||||
event = json.loads(line)
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
|
||||
# Apply filters
|
||||
if source and event.get("source") != source:
|
||||
continue
|
||||
if level and event.get("level") != level:
|
||||
continue
|
||||
if session_id and event.get("session_id") != session_id:
|
||||
continue
|
||||
if since and event.get("timestamp", "") <= since:
|
||||
continue
|
||||
|
||||
events.append(event)
|
||||
except OSError:
|
||||
return []
|
||||
|
||||
# Sort newest first and limit
|
||||
events.sort(key=lambda e: e.get("timestamp", ""), reverse=True)
|
||||
return events[:limit]
|
||||
|
||||
|
||||
def mark_resolved(
|
||||
event_id: str,
|
||||
hermes_home: Optional[str] = None,
|
||||
) -> bool:
|
||||
"""
|
||||
Mark an escalation event as resolved.
|
||||
|
||||
Returns True if the event was found and updated.
|
||||
"""
|
||||
path = _get_escalations_path(hermes_home)
|
||||
if not path.exists():
|
||||
return False
|
||||
|
||||
events = []
|
||||
found = False
|
||||
with open(path) as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
try:
|
||||
event = json.loads(line)
|
||||
except json.JSONDecodeError:
|
||||
events.append(line)
|
||||
continue
|
||||
|
||||
if event.get("id") == event_id:
|
||||
event["resolved"] = True
|
||||
event["resolved_at"] = datetime.now(timezone.utc).isoformat()
|
||||
found = True
|
||||
|
||||
events.append(json.dumps(event))
|
||||
|
||||
if found:
|
||||
with open(path, "w") as f:
|
||||
for line in events:
|
||||
f.write(line + "\n")
|
||||
|
||||
return found
|
||||
|
||||
|
||||
def get_stats(hermes_home: Optional[str] = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Get aggregate statistics about crisis escalations.
|
||||
|
||||
Returns:
|
||||
Dict with counts by level, source, recent activity
|
||||
"""
|
||||
all_events = get_escalations(limit=10000, hermes_home=hermes_home)
|
||||
|
||||
by_level = {}
|
||||
by_source = {}
|
||||
unresolved = 0
|
||||
for event in all_events:
|
||||
lvl = event.get("level", "UNKNOWN")
|
||||
by_level[lvl] = by_level.get(lvl, 0) + 1
|
||||
src = event.get("source", "unknown")
|
||||
by_source[src] = by_source.get(src, 0) + 1
|
||||
if not event.get("resolved"):
|
||||
unresolved += 1
|
||||
|
||||
# Recent: last 24 hours
|
||||
now = datetime.now(timezone.utc)
|
||||
recent = []
|
||||
for event in all_events:
|
||||
try:
|
||||
ts = datetime.fromisoformat(event["timestamp"])
|
||||
if (now - ts).total_seconds() < 86400:
|
||||
recent.append(event)
|
||||
except (ValueError, KeyError):
|
||||
pass
|
||||
|
||||
return {
|
||||
"total": len(all_events),
|
||||
"by_level": by_level,
|
||||
"by_source": by_source,
|
||||
"unresolved": unresolved,
|
||||
"recent_24h": len(recent),
|
||||
"recent_critical": len([e for e in recent if e.get("level") == "CRITICAL"]),
|
||||
}
|
||||
@@ -1,155 +0,0 @@
|
||||
"""
|
||||
Image Content Screening for Self-Harm Indicators
|
||||
|
||||
Screens images sent in chat for self-harm content using vision models.
|
||||
Privacy-preserving: analyzes in-memory, discards after scoring. No image retention.
|
||||
"""
|
||||
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
|
||||
logger = logging.getLogger("the-door.image_screening")
|
||||
|
||||
|
||||
class RiskLevel(str, Enum):
|
||||
SAFE = "safe"
|
||||
CONCERNING = "concerning"
|
||||
CRITICAL = "critical"
|
||||
|
||||
|
||||
@dataclass
|
||||
class ImageScreenResult:
|
||||
risk_level: RiskLevel
|
||||
confidence: float
|
||||
categories: list
|
||||
reasoning: str
|
||||
model_used: str
|
||||
|
||||
@property
|
||||
def needs_crisis_response(self):
|
||||
return self.risk_level == RiskLevel.CRITICAL
|
||||
|
||||
@property
|
||||
def needs_followup(self):
|
||||
return self.risk_level in (RiskLevel.CONCERNING, RiskLevel.CRITICAL)
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
"risk_level": self.risk_level.value,
|
||||
"confidence": round(self.confidence, 3),
|
||||
"categories": self.categories,
|
||||
"reasoning": self.reasoning,
|
||||
"model_used": self.model_used,
|
||||
}
|
||||
|
||||
|
||||
SCREENING_PROMPT = """Analyze this image for indicators of self-harm or crisis. Look for:
|
||||
|
||||
1. Self-harm wounds or injuries
|
||||
2. Medication/pills in concerning quantities
|
||||
3. Farewell-type imagery (empty rooms, items laid out, goodbye notes)
|
||||
4. Screenshots of crisis-related searches
|
||||
5. Images conveying hopelessness, isolation, or despair
|
||||
|
||||
Respond in JSON format ONLY:
|
||||
{"risk_level": "safe"|"concerning"|"critical", "confidence": 0.0-1.0, "categories": [], "reasoning": ""}
|
||||
|
||||
CRITICAL: imminent self-harm indicators
|
||||
CONCERNING: ambiguous but worrying
|
||||
SAFE: no indicators detected"""
|
||||
|
||||
|
||||
def _analyze_with_ollama(image_b64, model="gemma3:4b"):
|
||||
try:
|
||||
import urllib.request
|
||||
payload = json.dumps({
|
||||
"model": model,
|
||||
"messages": [{
|
||||
"role": "user",
|
||||
"content": SCREENING_PROMPT,
|
||||
"images": [image_b64],
|
||||
}],
|
||||
"stream": False,
|
||||
"options": {"temperature": 0.1},
|
||||
}).encode()
|
||||
req = urllib.request.Request(
|
||||
"http://localhost:11434/api/chat",
|
||||
data=payload,
|
||||
headers={"Content-Type": "application/json"},
|
||||
method="POST",
|
||||
)
|
||||
resp = urllib.request.urlopen(req, timeout=30)
|
||||
data = json.loads(resp.read())
|
||||
content = data.get("message", {}).get("content", "")
|
||||
json_start = content.find("{")
|
||||
json_end = content.rfind("}") + 1
|
||||
if json_start == -1 or json_end <= json_start:
|
||||
return None
|
||||
result = json.loads(content[json_start:json_end])
|
||||
return ImageScreenResult(
|
||||
risk_level=RiskLevel(result.get("risk_level", "safe")),
|
||||
confidence=float(result.get("confidence", 0.5)),
|
||||
categories=result.get("categories", []),
|
||||
reasoning=result.get("reasoning", ""),
|
||||
model_used=f"ollama:{model}",
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Ollama vision analysis failed: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def _analyze_fallback(image_bytes):
|
||||
return ImageScreenResult(
|
||||
risk_level=RiskLevel.SAFE,
|
||||
confidence=0.2,
|
||||
categories=["unanalyzed"],
|
||||
reasoning="No vision model available. Defaulting to safe with low confidence.",
|
||||
model_used="fallback:heuristic",
|
||||
)
|
||||
|
||||
|
||||
def screen_image(image_data, use_vision_model=True, model="gemma3:4b"):
|
||||
"""Screen image for self-harm indicators. Analyzes in-memory, no retention."""
|
||||
if isinstance(image_data, bytes):
|
||||
image_b64 = base64.b64encode(image_data).decode()
|
||||
else:
|
||||
image_b64 = image_data
|
||||
image_data = base64.b64decode(image_b64)
|
||||
|
||||
if use_vision_model:
|
||||
result = _analyze_with_ollama(image_b64, model)
|
||||
if result:
|
||||
logger.info(f"Image screened: {result.risk_level.value} (conf: {result.confidence:.2f})")
|
||||
if result.needs_crisis_response:
|
||||
logger.warning(f"CRITICAL image: {result.reasoning}")
|
||||
return result
|
||||
|
||||
return _analyze_fallback(image_data)
|
||||
|
||||
|
||||
def handle_chat_image(image_data):
|
||||
"""Handle image from chat. Returns action dict for gateway."""
|
||||
result = screen_image(image_data)
|
||||
action = {
|
||||
"result": result.to_dict(),
|
||||
"show_crisis_overlay": result.needs_crisis_response,
|
||||
"log_event": result.needs_followup,
|
||||
"response_text": None,
|
||||
}
|
||||
if result.risk_level == RiskLevel.CRITICAL:
|
||||
action["response_text"] = (
|
||||
"I noticed something concerning in the image you shared. "
|
||||
"If you or someone you know is in crisis, please reach out: "
|
||||
"988 Suicide and Crisis Lifeline (call or text 988). "
|
||||
"You are not alone."
|
||||
)
|
||||
elif result.risk_level == RiskLevel.CONCERNING:
|
||||
action["response_text"] = (
|
||||
"I want to check in \u2014 how are you doing? "
|
||||
"If you need to talk to someone, the 988 Lifeline is available 24/7."
|
||||
)
|
||||
return action
|
||||
@@ -1,84 +0,0 @@
|
||||
"""Tests for image content screening module."""
|
||||
|
||||
import json
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
from image_screening import (
|
||||
RiskLevel,
|
||||
ImageScreenResult,
|
||||
screen_image,
|
||||
handle_chat_image,
|
||||
_analyze_fallback,
|
||||
)
|
||||
|
||||
|
||||
class TestImageScreenResult:
|
||||
def test_safe_result(self):
|
||||
result = ImageScreenResult(
|
||||
risk_level=RiskLevel.SAFE, confidence=0.95,
|
||||
categories=[], reasoning="No indicators", model_used="test"
|
||||
)
|
||||
assert not result.needs_crisis_response
|
||||
assert not result.needs_followup
|
||||
assert result.to_dict()["risk_level"] == "safe"
|
||||
|
||||
def test_critical_result(self):
|
||||
result = ImageScreenResult(
|
||||
risk_level=RiskLevel.CRITICAL, confidence=0.9,
|
||||
categories=["wounds"], reasoning="Detected", model_used="test"
|
||||
)
|
||||
assert result.needs_crisis_response
|
||||
assert result.needs_followup
|
||||
|
||||
def test_concerning_result(self):
|
||||
result = ImageScreenResult(
|
||||
risk_level=RiskLevel.CONCERNING, confidence=0.6,
|
||||
categories=["isolation"], reasoning="Ambiguous", model_used="test"
|
||||
)
|
||||
assert not result.needs_crisis_response
|
||||
assert result.needs_followup
|
||||
|
||||
|
||||
class TestScreenImage:
|
||||
def test_fallback_returns_safe(self):
|
||||
result = screen_image(b"fake_image_data", use_vision_model=False)
|
||||
assert result.risk_level == RiskLevel.SAFE
|
||||
assert result.model_used == "fallback:heuristic"
|
||||
assert result.confidence < 0.5
|
||||
|
||||
def test_base64_input(self):
|
||||
import base64
|
||||
b64 = base64.b64encode(b"fake").decode()
|
||||
result = screen_image(b64, use_vision_model=False)
|
||||
assert result.risk_level == RiskLevel.SAFE
|
||||
|
||||
|
||||
class TestHandleChatImage:
|
||||
def test_safe_image_no_overlay(self):
|
||||
action = handle_chat_image(b"safe_image")
|
||||
assert not action["show_crisis_overlay"]
|
||||
assert action["response_text"] is None
|
||||
|
||||
@patch("image_screening._analyze_with_ollama")
|
||||
def test_critical_image_shows_overlay(self, mock_ollama):
|
||||
mock_ollama.return_value = ImageScreenResult(
|
||||
risk_level=RiskLevel.CRITICAL, confidence=0.95,
|
||||
categories=["wounds"], reasoning="Self-harm detected",
|
||||
model_used="ollama:gemma3:4b"
|
||||
)
|
||||
action = handle_chat_image(b"concerning_image")
|
||||
assert action["show_crisis_overlay"]
|
||||
assert "988" in action["response_text"]
|
||||
assert action["log_event"]
|
||||
|
||||
@patch("image_screening._analyze_with_ollama")
|
||||
def test_concerning_image_followup(self, mock_ollama):
|
||||
mock_ollama.return_value = ImageScreenResult(
|
||||
risk_level=RiskLevel.CONCERNING, confidence=0.6,
|
||||
categories=["isolation"], reasoning="Empty room",
|
||||
model_used="ollama:gemma3:4b"
|
||||
)
|
||||
action = handle_chat_image(b"maybe_concerning")
|
||||
assert not action["show_crisis_overlay"]
|
||||
assert action["log_event"]
|
||||
assert "check in" in action["response_text"]
|
||||
Reference in New Issue
Block a user