Compare commits
1 Commits
feat/136-c
...
fix/673
| Author | SHA1 | Date | |
|---|---|---|---|
| 36ce6faec7 |
75
GENOME.md
Normal file
75
GENOME.md
Normal file
@@ -0,0 +1,75 @@
|
||||
# GENOME.md — the-door
|
||||
|
||||
**Generated:** 2026-04-14
|
||||
**Repo:** Timmy_Foundation/the-door
|
||||
**Description:** Crisis Front Door — a single URL where a man at 3am can talk to Timmy. No login, no signup. 988 always visible.
|
||||
|
||||
---
|
||||
|
||||
## Project Overview
|
||||
|
||||
The-door is a crisis intervention web application — the most sacred surface in the Timmy Foundation. When a man at 3am reaches the end of his road, this is where he lands. No login, no signup, no barriers. 988 Suicide and Crisis Lifeline always visible. The "When a Man Is Dying" protocol active on every page.
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
the-door/
|
||||
├── index.html # Main crisis page (PWA-capable)
|
||||
├── crisis-offline.html # Offline fallback (service worker cached)
|
||||
├── about.html # About page
|
||||
├── testimony.html # Testimony/stories page
|
||||
├── sw.js # Service worker (offline-first)
|
||||
├── manifest.json # PWA manifest
|
||||
├── crisis/ # Core crisis detection + response
|
||||
│ ├── detect.py # Keyword/pattern detection (4 tiers)
|
||||
│ ├── gateway.py # API endpoints, prompt injection
|
||||
│ ├── response.py # Response generation, 988 routing
|
||||
│ ├── compassion_router.py # Profile-based response routing
|
||||
│ ├── profiles.py # Compassion profiles
|
||||
│ └── PROTOCOL.md # The protocol (SOUL.md reference)
|
||||
├── crisis_detector.py # Legacy shim → crisis/detect.py
|
||||
├── crisis_responder.py # Legacy responder
|
||||
├── dying_detection/ # Deprecated module
|
||||
├── evolution/ # Crisis synthesizer (creative)
|
||||
├── tests/ # Safety-critical tests
|
||||
│ ├── test_crisis_overlay_focus_trap.py
|
||||
│ ├── test_dying_detection_deprecation.py
|
||||
│ └── test_false_positive_fixes.py
|
||||
└── deploy/ # Deployment docs
|
||||
```
|
||||
|
||||
## Key Abstractions
|
||||
|
||||
| Module | Purpose |
|
||||
|---|---|
|
||||
| `crisis/detect.py` | 4-tier detection: LOW/MEDIUM/HIGH/CRITICAL via regex patterns |
|
||||
| `crisis/gateway.py` | HTTP API, Sovereign Heart prompt injection |
|
||||
| `crisis/response.py` | Response generation, 988 integration, escalation |
|
||||
| `crisis/compassion_router.py` | Profile-based routing (different crisis types) |
|
||||
| `sw.js` | Service worker for offline-first PWA |
|
||||
|
||||
## Safety Constraints
|
||||
|
||||
- **The-door never auto-closes PRs** (in fleet-ops exempt list)
|
||||
- **988 always visible** on every page, even offline
|
||||
- **When a Man Is Dying protocol** active on every interaction
|
||||
- **No login/signup** — zero barriers to crisis support
|
||||
- **Offline-first** — service worker caches critical pages
|
||||
|
||||
## Test Coverage
|
||||
|
||||
| Test | Coverage |
|
||||
|---|---|
|
||||
| Crisis overlay focus trap | ✅ |
|
||||
| Dying detection deprecation | ✅ |
|
||||
| False positive fixes | ✅ |
|
||||
| Crisis detection tiers | ❌ (in crisis/tests.py) |
|
||||
| Response generation | ❌ |
|
||||
| Offline service worker | ❌ |
|
||||
|
||||
## Security
|
||||
|
||||
- No user data stored (crisis intervention is stateless by design)
|
||||
- No cookies, no tracking, no analytics
|
||||
- Service worker only caches static assets
|
||||
- Crisis detection runs client-side where possible
|
||||
9
Makefile
9
Makefile
@@ -46,12 +46,3 @@ ssl:
|
||||
|
||||
service:
|
||||
ssh root@$(VPS) "cd /opt/the-door && bash deploy/deploy.sh --service"
|
||||
|
||||
# Crisis metrics
|
||||
.PHONY: metrics metrics-json
|
||||
|
||||
metrics: ## Show crisis metrics summary (last 7 days)
|
||||
python3 -m crisis.metrics --summary
|
||||
|
||||
metrics-json: ## Export crisis metrics as JSON
|
||||
python3 -m crisis.metrics --json
|
||||
|
||||
@@ -1,199 +0,0 @@
|
||||
"""Crisis metrics — aggregate detection data for operators.
|
||||
|
||||
Tracks crisis detection events and provides summary reports.
|
||||
|
||||
Usage:
|
||||
python3 -m crisis.metrics --summary # weekly report
|
||||
python3 -m crisis.metrics --json # raw JSON export
|
||||
python3 -m crisis.metrics --last 7d # last 7 days
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from collections import Counter
|
||||
from dataclasses import dataclass, asdict
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
# Data directory for metrics storage
|
||||
_DATA_DIR = Path(os.getenv("CRISIS_DATA_DIR", str(Path.home() / ".the-door")))
|
||||
_METRICS_FILE = _DATA_DIR / "crisis-metrics.jsonl"
|
||||
|
||||
|
||||
@dataclass
|
||||
class CrisisEvent:
|
||||
"""A single crisis detection event."""
|
||||
timestamp: float
|
||||
level: str # NONE, LOW, MODERATE, HIGH, CRITICAL
|
||||
indicators: list
|
||||
session_id: str = ""
|
||||
source: str = "" # "chat", "gateway", "cli"
|
||||
|
||||
|
||||
@dataclass
|
||||
class MetricsSummary:
|
||||
"""Aggregated metrics summary."""
|
||||
period_days: int
|
||||
total_events: int
|
||||
by_level: Dict[str, int]
|
||||
top_indicators: List[tuple]
|
||||
sessions_affected: int
|
||||
avg_daily: float
|
||||
peak_day: str
|
||||
peak_count: int
|
||||
generated_at: str
|
||||
|
||||
|
||||
def log_event(event: CrisisEvent) -> None:
|
||||
"""Log a crisis event to the metrics file."""
|
||||
_DATA_DIR.mkdir(parents=True, exist_ok=True)
|
||||
with open(_METRICS_FILE, "a") as f:
|
||||
f.write(json.dumps(asdict(event)) + "\n")
|
||||
|
||||
|
||||
def load_events(days: int = 7) -> List[CrisisEvent]:
|
||||
"""Load crisis events from the last N days."""
|
||||
if not _METRICS_FILE.exists():
|
||||
return []
|
||||
|
||||
cutoff = time.time() - (days * 86400)
|
||||
events = []
|
||||
|
||||
try:
|
||||
with open(_METRICS_FILE) as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
data = json.loads(line)
|
||||
if data.get("timestamp", 0) >= cutoff:
|
||||
events.append(CrisisEvent(**data))
|
||||
except (json.JSONDecodeError, KeyError):
|
||||
pass
|
||||
|
||||
return events
|
||||
|
||||
|
||||
def compute_summary(days: int = 7) -> MetricsSummary:
|
||||
"""Compute metrics summary for the given period."""
|
||||
events = load_events(days)
|
||||
now = time.time()
|
||||
|
||||
# By level
|
||||
by_level = Counter(e.level for e in events)
|
||||
|
||||
# Top indicators
|
||||
indicator_counts = Counter()
|
||||
for e in events:
|
||||
for ind in e.indicators:
|
||||
indicator_counts[ind] += 1
|
||||
top_indicators = indicator_counts.most_common(10)
|
||||
|
||||
# Sessions
|
||||
sessions = set(e.session_id for e in events if e.session_id)
|
||||
|
||||
# Peak day
|
||||
from collections import defaultdict
|
||||
daily = defaultdict(int)
|
||||
for e in events:
|
||||
day = time.strftime("%Y-%m-%d", time.localtime(e.timestamp))
|
||||
daily[day] += 1
|
||||
peak_day = max(daily, key=daily.get) if daily else "N/A"
|
||||
peak_count = daily.get(peak_day, 0)
|
||||
|
||||
return MetricsSummary(
|
||||
period_days=days,
|
||||
total_events=len(events),
|
||||
by_level=dict(by_level),
|
||||
top_indicators=top_indicators,
|
||||
sessions_affected=len(sessions),
|
||||
avg_daily=round(len(events) / max(days, 1), 1),
|
||||
peak_day=peak_day,
|
||||
peak_count=peak_count,
|
||||
generated_at=time.strftime("%Y-%m-%d %H:%M:%S"),
|
||||
)
|
||||
|
||||
|
||||
def format_summary(summary: MetricsSummary) -> str:
|
||||
"""Format metrics summary as human-readable report."""
|
||||
lines = [
|
||||
"Crisis Metrics Summary",
|
||||
"=" * 40,
|
||||
f"Period: Last {summary.period_days} days",
|
||||
f"Generated: {summary.generated_at}",
|
||||
"",
|
||||
f"Total events: {summary.total_events}",
|
||||
f"Daily avg: {summary.avg_daily}",
|
||||
f"Sessions: {summary.sessions_affected}",
|
||||
f"Peak day: {summary.peak_day} ({summary.peak_count} events)",
|
||||
"",
|
||||
]
|
||||
|
||||
if summary.by_level:
|
||||
lines.append("By severity:")
|
||||
for level in ["CRITICAL", "HIGH", "MODERATE", "LOW", "NONE"]:
|
||||
count = summary.by_level.get(level, 0)
|
||||
if count > 0:
|
||||
bar = "█" * min(count, 30)
|
||||
lines.append(f" {level:10s} {count:4d} {bar}")
|
||||
lines.append("")
|
||||
|
||||
if summary.top_indicators:
|
||||
lines.append("Top indicators:")
|
||||
for indicator, count in summary.top_indicators[:5]:
|
||||
lines.append(f" {indicator}: {count}")
|
||||
lines.append("")
|
||||
|
||||
if summary.total_events == 0:
|
||||
lines.append("No crisis events in this period.")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(description="Crisis metrics summary")
|
||||
parser.add_argument("--summary", action="store_true", help="Print summary report")
|
||||
parser.add_argument("--json", action="store_true", dest="as_json", help="Output JSON")
|
||||
parser.add_argument("--last", default="7d", help="Time period (e.g., 7d, 30d)")
|
||||
parser.add_argument("--log", nargs=2, metavar=("LEVEL", "INDICATOR"), help="Log a test event")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Parse period
|
||||
period_str = args.last.rstrip("d")
|
||||
try:
|
||||
days = int(period_str)
|
||||
except ValueError:
|
||||
days = 7
|
||||
|
||||
# Log mode
|
||||
if args.log:
|
||||
level, indicator = args.log
|
||||
event = CrisisEvent(
|
||||
timestamp=time.time(),
|
||||
level=level.upper(),
|
||||
indicators=[indicator],
|
||||
session_id="cli-test",
|
||||
source="cli",
|
||||
)
|
||||
log_event(event)
|
||||
print(f"Logged: {level.upper()} / {indicator}")
|
||||
return 0
|
||||
|
||||
# Compute summary
|
||||
summary = compute_summary(days)
|
||||
|
||||
if args.as_json:
|
||||
print(json.dumps(asdict(summary), indent=2))
|
||||
else:
|
||||
print(format_summary(summary))
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
Reference in New Issue
Block a user