Compare commits
1 Commits
burn/37-17
...
fix/59
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bbc513821f |
@@ -6,8 +6,7 @@ Stands between a broken man and a machine that would tell him to die.
|
||||
|
||||
from .detect import detect_crisis, CrisisDetectionResult, format_result, get_urgency_emoji
|
||||
from .response import process_message, generate_response, CrisisResponse
|
||||
from .gateway import check_crisis, get_system_prompt, format_gateway_response, get_metrics_summary, get_metrics_report
|
||||
from .metrics import record_detection, record_continuation, get_metrics
|
||||
from .gateway import check_crisis, get_system_prompt, format_gateway_response
|
||||
|
||||
__all__ = [
|
||||
"detect_crisis",
|
||||
@@ -20,9 +19,4 @@ __all__ = [
|
||||
"format_result",
|
||||
"format_gateway_response",
|
||||
"get_urgency_emoji",
|
||||
"get_metrics_summary",
|
||||
"get_metrics_report",
|
||||
"record_detection",
|
||||
"record_continuation",
|
||||
"get_metrics",
|
||||
]
|
||||
|
||||
@@ -22,7 +22,6 @@ from .response import (
|
||||
get_system_prompt_modifier,
|
||||
CrisisResponse,
|
||||
)
|
||||
from .metrics import record_detection, get_summary, print_summary, record_continuation
|
||||
|
||||
|
||||
def check_crisis(text: str) -> dict:
|
||||
@@ -35,9 +34,6 @@ def check_crisis(text: str) -> dict:
|
||||
detection = detect_crisis(text)
|
||||
response = generate_response(detection)
|
||||
|
||||
# Record metrics (privacy-preserving — no message content stored)
|
||||
record_detection(detection.level, detection.indicators)
|
||||
|
||||
return {
|
||||
"level": detection.level,
|
||||
"score": detection.score,
|
||||
@@ -97,21 +93,6 @@ def format_gateway_response(text: str, pretty: bool = True) -> str:
|
||||
return json.dumps(result)
|
||||
|
||||
|
||||
# ── Metrics endpoint ─────────────────────────────────────────────
|
||||
|
||||
def get_metrics_summary() -> dict:
|
||||
"""
|
||||
Return crisis detection metrics summary.
|
||||
Privacy-preserving: no PII, no message content, just counts.
|
||||
"""
|
||||
return get_summary()
|
||||
|
||||
|
||||
def get_metrics_report() -> str:
|
||||
"""Return formatted weekly metrics report for stdout/logs."""
|
||||
return print_summary()
|
||||
|
||||
|
||||
# ── Quick test interface ────────────────────────────────────────
|
||||
|
||||
def _interactive():
|
||||
|
||||
@@ -1,270 +0,0 @@
|
||||
"""
|
||||
Crisis Detection Metrics for the-door.
|
||||
|
||||
Privacy-preserving analytics layer. Tracks:
|
||||
- Detection counts per level (CRITICAL, HIGH, MEDIUM, LOW)
|
||||
- Keyword firing frequency (pattern hashes, not raw text)
|
||||
- Time-based distribution (hourly buckets)
|
||||
- Post-intervention behavior
|
||||
|
||||
NO PII is stored — no message content, no user identifiers, no timestamps
|
||||
finer than hourly granularity.
|
||||
|
||||
Storage: JSON file at crisis_metrics.json (configurable path).
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import hashlib
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
from dataclasses import dataclass, field, asdict
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
|
||||
# ── Pattern hash helper ───────────────────────────────────────────
|
||||
|
||||
def _hash_pattern(pattern: str) -> str:
|
||||
"""Hash a regex pattern to avoid storing raw crisis text."""
|
||||
return hashlib.sha256(pattern.encode()).hexdigest()[:12]
|
||||
|
||||
|
||||
# ── Hour bucket helper ────────────────────────────────────────────
|
||||
|
||||
def _current_hour_key() -> str:
|
||||
"""Return current UTC hour as 'YYYY-MM-DDTHH'."""
|
||||
return datetime.now(timezone.utc).strftime("%Y-%m-%dT%H")
|
||||
|
||||
|
||||
def _current_day_key() -> str:
|
||||
"""Return current UTC day as 'YYYY-MM-DD'."""
|
||||
return datetime.now(timezone.utc).strftime("%Y-%m-%d")
|
||||
|
||||
|
||||
# ── Metrics store ─────────────────────────────────────────────────
|
||||
|
||||
@dataclass
|
||||
class CrisisMetrics:
|
||||
"""In-memory metrics accumulator with JSON persistence."""
|
||||
|
||||
# Total detections per level
|
||||
detections_by_level: Dict[str, int] = field(default_factory=lambda: {
|
||||
"CRITICAL": 0, "HIGH": 0, "MEDIUM": 0, "LOW": 0, "NONE": 0
|
||||
})
|
||||
|
||||
# Pattern hash -> count (tracks which indicators fire most)
|
||||
keyword_frequency: Dict[str, int] = field(default_factory=dict)
|
||||
|
||||
# Hourly detection counts: "YYYY-MM-DDTHH" -> total
|
||||
hourly_counts: Dict[str, int] = field(default_factory=dict)
|
||||
|
||||
# Daily detection counts: "YYYY-MM-DD" -> {level: count}
|
||||
daily_counts: Dict[str, Dict[str, int]] = field(default_factory=dict)
|
||||
|
||||
# Total messages scanned (for false-positive estimation)
|
||||
total_scanned: int = 0
|
||||
|
||||
# Total detections (non-NONE)
|
||||
total_detections: int = 0
|
||||
|
||||
# Post-intervention tracking (session-scoped, reset on restart)
|
||||
interventions: int = 0
|
||||
continued_after_intervention: int = 0
|
||||
|
||||
def record(self, level: str, indicators: List[str]) -> None:
|
||||
"""Record a single detection event."""
|
||||
self.total_scanned += 1
|
||||
|
||||
# Level counts
|
||||
self.detections_by_level[level] = self.detections_by_level.get(level, 0) + 1
|
||||
|
||||
if level != "NONE":
|
||||
self.total_detections += 1
|
||||
|
||||
# Hourly bucket
|
||||
hour = _current_hour_key()
|
||||
self.hourly_counts[hour] = self.hourly_counts.get(hour, 0) + 1
|
||||
|
||||
# Daily bucket
|
||||
day = _current_day_key()
|
||||
if day not in self.daily_counts:
|
||||
self.daily_counts[day] = {}
|
||||
daily = self.daily_counts[day]
|
||||
daily[level] = daily.get(level, 0) + 1
|
||||
|
||||
# Keyword frequency (hash patterns)
|
||||
for pattern in indicators:
|
||||
h = _hash_pattern(pattern)
|
||||
self.keyword_frequency[h] = self.keyword_frequency.get(h, 0) + 1
|
||||
|
||||
# Intervention tracking
|
||||
if level in ("CRITICAL", "HIGH"):
|
||||
self.interventions += 1
|
||||
|
||||
def record_continuation(self) -> None:
|
||||
"""Call when a user continues conversation after crisis intervention."""
|
||||
self.continued_after_intervention += 1
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""Serialize to JSON-safe dict."""
|
||||
return asdict(self)
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> "CrisisMetrics":
|
||||
"""Deserialize from dict."""
|
||||
return cls(
|
||||
detections_by_level=data.get("detections_by_level", {}),
|
||||
keyword_frequency=data.get("keyword_frequency", {}),
|
||||
hourly_counts=data.get("hourly_counts", {}),
|
||||
daily_counts=data.get("daily_counts", {}),
|
||||
total_scanned=data.get("total_scanned", 0),
|
||||
total_detections=data.get("total_detections", 0),
|
||||
interventions=data.get("interventions", 0),
|
||||
continued_after_intervention=data.get("continued_after_intervention", 0),
|
||||
)
|
||||
|
||||
# ── Derived metrics ───────────────────────────────────────────
|
||||
|
||||
@property
|
||||
def false_positive_estimate(self) -> float:
|
||||
"""
|
||||
Estimate false positive rate.
|
||||
Heuristic: users who continue chatting after HIGH/CRITICAL intervention
|
||||
were likely not in true crisis. Returns 0.0 if no interventions.
|
||||
"""
|
||||
if self.interventions == 0:
|
||||
return 0.0
|
||||
return self.continued_after_intervention / self.interventions
|
||||
|
||||
@property
|
||||
def detection_rate(self) -> float:
|
||||
"""Fraction of scanned messages that triggered any detection."""
|
||||
if self.total_scanned == 0:
|
||||
return 0.0
|
||||
return self.total_detections / self.total_scanned
|
||||
|
||||
def top_keywords(self, n: int = 10) -> List[tuple]:
|
||||
"""Return top N most-fired pattern hashes with counts."""
|
||||
sorted_kw = sorted(self.keyword_frequency.items(), key=lambda x: -x[1])
|
||||
return sorted_kw[:n]
|
||||
|
||||
def weekly_summary(self) -> dict:
|
||||
"""Generate a 7-day summary from daily_counts."""
|
||||
from datetime import timedelta
|
||||
|
||||
today = datetime.now(timezone.utc).date()
|
||||
summary = {
|
||||
"period_start": (today - timedelta(days=6)).isoformat(),
|
||||
"period_end": today.isoformat(),
|
||||
"total_detections": 0,
|
||||
"by_level": {"CRITICAL": 0, "HIGH": 0, "MEDIUM": 0, "LOW": 0},
|
||||
"daily_totals": {},
|
||||
}
|
||||
|
||||
for i in range(7):
|
||||
day = (today - timedelta(days=6 - i)).isoformat()
|
||||
day_data = self.daily_counts.get(day, {})
|
||||
day_total = sum(day_data.values())
|
||||
summary["daily_totals"][day] = day_total
|
||||
summary["total_detections"] += day_total
|
||||
for level in ("CRITICAL", "HIGH", "MEDIUM", "LOW"):
|
||||
summary["by_level"][level] += day_data.get(level, 0)
|
||||
|
||||
summary["false_positive_estimate"] = self.false_positive_estimate
|
||||
summary["detection_rate"] = self.detection_rate
|
||||
summary["top_keywords"] = self.top_keywords(5)
|
||||
return summary
|
||||
|
||||
def format_summary(self) -> str:
|
||||
"""Human-readable weekly summary for stdout/log."""
|
||||
s = self.weekly_summary()
|
||||
lines = [
|
||||
"=== Crisis Detection Weekly Summary ===",
|
||||
f"Period: {s['period_start']} to {s['period_end']}",
|
||||
f"Total detections: {s['total_detections']}",
|
||||
"",
|
||||
"By level:",
|
||||
]
|
||||
for level in ("CRITICAL", "HIGH", "MEDIUM", "LOW"):
|
||||
count = s["by_level"][level]
|
||||
bar = "#" * count
|
||||
lines.append(f" {level:10s} {count:4d} {bar}")
|
||||
|
||||
lines.append("")
|
||||
lines.append("Daily trend:")
|
||||
for day, count in s["daily_totals"].items():
|
||||
bar = "#" * count
|
||||
lines.append(f" {day} {count:4d} {bar}")
|
||||
|
||||
lines.append("")
|
||||
lines.append(f"Detection rate: {s['detection_rate']:.1%}")
|
||||
lines.append(f"False positive estimate: {s['false_positive_estimate']:.1%}")
|
||||
lines.append("")
|
||||
lines.append("Top indicators (hashed):")
|
||||
for h, count in s["top_keywords"]:
|
||||
lines.append(f" {h} {count:4d}x")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
# ── Persistence ───────────────────────────────────────────────────
|
||||
|
||||
_DEFAULT_PATH = os.path.join(os.path.dirname(__file__), "crisis_metrics.json")
|
||||
|
||||
|
||||
def load_metrics(path: str = _DEFAULT_PATH) -> CrisisMetrics:
|
||||
"""Load metrics from JSON file, or return empty metrics if missing."""
|
||||
try:
|
||||
with open(path) as f:
|
||||
data = json.load(f)
|
||||
return CrisisMetrics.from_dict(data)
|
||||
except (FileNotFoundError, json.JSONDecodeError):
|
||||
return CrisisMetrics()
|
||||
|
||||
|
||||
def save_metrics(metrics: CrisisMetrics, path: str = _DEFAULT_PATH) -> None:
|
||||
"""Persist metrics to JSON file."""
|
||||
with open(path, "w") as f:
|
||||
json.dump(metrics.to_dict(), f, indent=2)
|
||||
|
||||
|
||||
# ── Global singleton ──────────────────────────────────────────────
|
||||
|
||||
_metrics: Optional[CrisisMetrics] = None
|
||||
_metrics_path: str = _DEFAULT_PATH
|
||||
|
||||
|
||||
def get_metrics(path: str = None) -> CrisisMetrics:
|
||||
"""Get or initialize the global metrics singleton."""
|
||||
global _metrics, _metrics_path
|
||||
if path:
|
||||
_metrics_path = path
|
||||
if _metrics is None:
|
||||
_metrics = load_metrics(_metrics_path)
|
||||
return _metrics
|
||||
|
||||
|
||||
def record_detection(level: str, indicators: List[str], path: str = None) -> None:
|
||||
"""Record a crisis detection event and persist."""
|
||||
m = get_metrics(path)
|
||||
m.record(level, indicators)
|
||||
save_metrics(m, _metrics_path)
|
||||
|
||||
|
||||
def record_continuation(path: str = None) -> None:
|
||||
"""Record that a user continued after crisis intervention."""
|
||||
m = get_metrics(path)
|
||||
m.record_continuation()
|
||||
save_metrics(m, _metrics_path)
|
||||
|
||||
|
||||
def get_summary(path: str = None) -> dict:
|
||||
"""Get the weekly summary as a dict."""
|
||||
m = get_metrics(path)
|
||||
return m.weekly_summary()
|
||||
|
||||
|
||||
def print_summary(path: str = None) -> str:
|
||||
"""Get and format the weekly summary."""
|
||||
m = get_metrics(path)
|
||||
return m.format_summary()
|
||||
@@ -680,7 +680,7 @@ html, body {
|
||||
|
||||
<!-- Footer -->
|
||||
<footer id="footer">
|
||||
<a href="/about" aria-label="About The Door">about</a>
|
||||
<a href="/about.html" aria-label="About The Door">about</a>
|
||||
<button id="safety-plan-btn" aria-label="Open My Safety Plan">my safety plan</button>
|
||||
<button id="clear-chat-btn" aria-label="Clear chat history">clear chat</button>
|
||||
</footer>
|
||||
|
||||
24
tests/test_about_link.py
Normal file
24
tests/test_about_link.py
Normal file
@@ -0,0 +1,24 @@
|
||||
import pathlib
|
||||
import unittest
|
||||
|
||||
|
||||
ROOT = pathlib.Path(__file__).resolve().parents[1]
|
||||
INDEX_HTML = ROOT / 'index.html'
|
||||
ABOUT_HTML = ROOT / 'about.html'
|
||||
|
||||
|
||||
class TestAboutLink(unittest.TestCase):
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.html = INDEX_HTML.read_text(encoding='utf-8')
|
||||
|
||||
def test_about_page_exists(self):
|
||||
self.assertTrue(ABOUT_HTML.exists(), 'about.html should exist for static serving')
|
||||
|
||||
def test_footer_about_link_targets_static_about_html(self):
|
||||
self.assertIn('href="/about.html"', self.html)
|
||||
self.assertNotIn('href="/about"', self.html)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
@@ -1,263 +0,0 @@
|
||||
"""
|
||||
Tests for crisis detection metrics (issue #37).
|
||||
Verifies privacy-preserving analytics layer works correctly.
|
||||
"""
|
||||
|
||||
import unittest
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
import tempfile
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from crisis.metrics import (
|
||||
CrisisMetrics,
|
||||
load_metrics,
|
||||
save_metrics,
|
||||
_hash_pattern,
|
||||
)
|
||||
|
||||
|
||||
class TestCrisisMetrics(unittest.TestCase):
|
||||
"""Test the CrisisMetrics dataclass and persistence."""
|
||||
|
||||
def setUp(self):
|
||||
self.metrics = CrisisMetrics()
|
||||
self.tmpfile = tempfile.NamedTemporaryFile(suffix=".json", delete=False)
|
||||
self.tmpfile.close()
|
||||
|
||||
def tearDown(self):
|
||||
if os.path.exists(self.tmpfile.name):
|
||||
os.unlink(self.tmpfile.name)
|
||||
|
||||
def test_record_none(self):
|
||||
"""Recording NONE should increment scanned but not detections."""
|
||||
self.metrics.record("NONE", [])
|
||||
self.assertEqual(self.metrics.total_scanned, 1)
|
||||
self.assertEqual(self.metrics.total_detections, 0)
|
||||
self.assertEqual(self.metrics.detections_by_level["NONE"], 1)
|
||||
|
||||
def test_record_critical(self):
|
||||
"""Recording CRITICAL should increment all relevant counters."""
|
||||
pattern = r"\bkill\s*(my)?self\b"
|
||||
self.metrics.record("CRITICAL", [pattern])
|
||||
self.assertEqual(self.metrics.total_scanned, 1)
|
||||
self.assertEqual(self.metrics.total_detections, 1)
|
||||
self.assertEqual(self.metrics.detections_by_level["CRITICAL"], 1)
|
||||
self.assertEqual(self.metrics.interventions, 1)
|
||||
|
||||
# Keyword should be hashed, not raw
|
||||
h = _hash_pattern(pattern)
|
||||
self.assertEqual(self.metrics.keyword_frequency[h], 1)
|
||||
|
||||
def test_record_high_is_intervention(self):
|
||||
"""HIGH detections should count as interventions."""
|
||||
self.metrics.record("HIGH", [r"\bdespair\b"])
|
||||
self.assertEqual(self.metrics.interventions, 1)
|
||||
|
||||
def test_record_medium_not_intervention(self):
|
||||
"""MEDIUM detections should NOT count as interventions."""
|
||||
self.metrics.record("MEDIUM", [r"\bbroken\b", r"\bworthless\b"])
|
||||
self.assertEqual(self.metrics.interventions, 0)
|
||||
self.assertEqual(self.metrics.total_detections, 1)
|
||||
|
||||
def test_record_multiple(self):
|
||||
"""Multiple detections should accumulate."""
|
||||
self.metrics.record("CRITICAL", [r"\bpattern1\b"])
|
||||
self.metrics.record("HIGH", [r"\bpattern2\b"])
|
||||
self.metrics.record("NONE", [])
|
||||
self.metrics.record("LOW", [r"\bsad\b"])
|
||||
|
||||
self.assertEqual(self.metrics.total_scanned, 4)
|
||||
self.assertEqual(self.metrics.total_detections, 3)
|
||||
self.assertEqual(self.metrics.interventions, 2)
|
||||
|
||||
def test_continuation_tracking(self):
|
||||
"""Recording continuation should affect false positive estimate."""
|
||||
self.metrics.record("CRITICAL", [r"\bpattern\b"])
|
||||
self.assertEqual(self.metrics.false_positive_estimate, 0.0)
|
||||
|
||||
self.metrics.record_continuation()
|
||||
self.assertEqual(self.metrics.continued_after_intervention, 1)
|
||||
self.assertEqual(self.metrics.false_positive_estimate, 1.0)
|
||||
|
||||
def test_false_positive_estimate_zero_when_no_interventions(self):
|
||||
"""False positive rate should be 0.0 with no interventions."""
|
||||
self.assertEqual(self.metrics.false_positive_estimate, 0.0)
|
||||
|
||||
def test_detection_rate(self):
|
||||
"""Detection rate should be detections/scanned."""
|
||||
self.metrics.record("CRITICAL", [])
|
||||
self.metrics.record("NONE", [])
|
||||
self.metrics.record("NONE", [])
|
||||
self.assertEqual(self.metrics.detection_rate, 1.0 / 3.0)
|
||||
|
||||
def test_detection_rate_zero_when_no_scans(self):
|
||||
self.assertEqual(self.metrics.detection_rate, 0.0)
|
||||
|
||||
def test_hourly_bucket(self):
|
||||
"""Detections should be bucketed by hour."""
|
||||
self.metrics.record("HIGH", [])
|
||||
hour_key = self.metrics.hourly_counts
|
||||
self.assertEqual(len(hour_key), 1)
|
||||
for k, v in hour_key.items():
|
||||
self.assertRegex(k, r"^\d{4}-\d{2}-\d{2}T\d{2}$")
|
||||
self.assertEqual(v, 1)
|
||||
|
||||
def test_daily_bucket(self):
|
||||
"""Detections should be bucketed by day with level."""
|
||||
self.metrics.record("CRITICAL", [])
|
||||
self.metrics.record("HIGH", [])
|
||||
self.assertEqual(len(self.metrics.daily_counts), 1)
|
||||
for day, levels in self.metrics.daily_counts.items():
|
||||
self.assertRegex(day, r"^\d{4}-\d{2}-\d{2}$")
|
||||
self.assertEqual(levels["CRITICAL"], 1)
|
||||
self.assertEqual(levels["HIGH"], 1)
|
||||
|
||||
def test_top_keywords(self):
|
||||
"""top_keywords should return most frequent hashed patterns."""
|
||||
p1 = r"\bpattern_a\b"
|
||||
p2 = r"\bpattern_b\b"
|
||||
h1 = _hash_pattern(p1)
|
||||
h2 = _hash_pattern(p2)
|
||||
|
||||
for _ in range(5):
|
||||
self.metrics.record("HIGH", [p1])
|
||||
for _ in range(2):
|
||||
self.metrics.record("MEDIUM", [p2])
|
||||
|
||||
top = self.metrics.top_keywords(2)
|
||||
self.assertEqual(len(top), 2)
|
||||
self.assertEqual(top[0], (h1, 5))
|
||||
self.assertEqual(top[1], (h2, 2))
|
||||
|
||||
def test_weekly_summary_structure(self):
|
||||
"""Weekly summary should have expected keys."""
|
||||
self.metrics.record("CRITICAL", [])
|
||||
summary = self.metrics.weekly_summary()
|
||||
|
||||
self.assertIn("period_start", summary)
|
||||
self.assertIn("period_end", summary)
|
||||
self.assertIn("total_detections", summary)
|
||||
self.assertIn("by_level", summary)
|
||||
self.assertIn("daily_totals", summary)
|
||||
self.assertIn("false_positive_estimate", summary)
|
||||
self.assertIn("detection_rate", summary)
|
||||
self.assertIn("top_keywords", summary)
|
||||
|
||||
self.assertEqual(summary["total_detections"], 1)
|
||||
self.assertEqual(summary["by_level"]["CRITICAL"], 1)
|
||||
|
||||
def test_format_summary_returns_string(self):
|
||||
"""format_summary should return a non-empty string."""
|
||||
self.metrics.record("CRITICAL", [])
|
||||
result = self.metrics.format_summary()
|
||||
self.assertIsInstance(result, str)
|
||||
self.assertIn("CRITICAL", result)
|
||||
|
||||
def test_persistence_round_trip(self):
|
||||
"""Metrics should survive save/load cycle."""
|
||||
self.metrics.record("CRITICAL", [r"\btest\b"])
|
||||
self.metrics.record("HIGH", [])
|
||||
self.metrics.record_continuation()
|
||||
|
||||
save_metrics(self.metrics, self.tmpfile.name)
|
||||
loaded = load_metrics(self.tmpfile.name)
|
||||
|
||||
self.assertEqual(loaded.total_scanned, 2)
|
||||
self.assertEqual(loaded.total_detections, 2)
|
||||
self.assertEqual(loaded.detections_by_level["CRITICAL"], 1)
|
||||
self.assertEqual(loaded.detections_by_level["HIGH"], 1)
|
||||
self.assertEqual(loaded.interventions, 2)
|
||||
self.assertEqual(loaded.continued_after_intervention, 1)
|
||||
|
||||
def test_load_missing_file_returns_empty(self):
|
||||
"""Loading a nonexistent file should return empty metrics."""
|
||||
m = load_metrics("/tmp/nonexistent_metrics_999.json")
|
||||
self.assertEqual(m.total_scanned, 0)
|
||||
self.assertEqual(m.total_detections, 0)
|
||||
|
||||
def test_load_corrupt_file_returns_empty(self):
|
||||
"""Loading a corrupt JSON file should return empty metrics."""
|
||||
with open(self.tmpfile.name, "w") as f:
|
||||
f.write("NOT JSON {{{")
|
||||
m = load_metrics(self.tmpfile.name)
|
||||
self.assertEqual(m.total_scanned, 0)
|
||||
|
||||
def test_hash_pattern_consistent(self):
|
||||
"""Same pattern should always produce same hash."""
|
||||
h1 = _hash_pattern(r"\bkill\s*self\b")
|
||||
h2 = _hash_pattern(r"\bkill\s*self\b")
|
||||
self.assertEqual(h1, h2)
|
||||
|
||||
def test_hash_pattern_different(self):
|
||||
"""Different patterns should produce different hashes."""
|
||||
h1 = _hash_pattern(r"\bpattern_a\b")
|
||||
h2 = _hash_pattern(r"\bpattern_b\b")
|
||||
self.assertNotEqual(h1, h2)
|
||||
|
||||
|
||||
class TestMetricsGatewayIntegration(unittest.TestCase):
|
||||
"""Test that metrics are recorded through the gateway."""
|
||||
|
||||
def test_check_crisis_records_metrics(self):
|
||||
"""check_crisis should record metrics automatically."""
|
||||
import tempfile
|
||||
from crisis.metrics import get_metrics, _metrics
|
||||
|
||||
# Use a temp metrics file
|
||||
tmpfile = tempfile.NamedTemporaryFile(suffix=".json", delete=False)
|
||||
tmpfile.close()
|
||||
|
||||
try:
|
||||
# Reset global singleton
|
||||
import crisis.metrics
|
||||
crisis.metrics._metrics = None
|
||||
crisis.metrics._metrics_path = tmpfile.name
|
||||
|
||||
from crisis.gateway import check_crisis
|
||||
|
||||
# A critical message
|
||||
check_crisis("I want to kill myself")
|
||||
|
||||
m = get_metrics()
|
||||
self.assertEqual(m.total_scanned, 1)
|
||||
self.assertEqual(m.detections_by_level["CRITICAL"], 1)
|
||||
|
||||
# A safe message
|
||||
check_crisis("I had a good day today")
|
||||
self.assertEqual(m.total_scanned, 2)
|
||||
self.assertEqual(m.detections_by_level["NONE"], 1)
|
||||
|
||||
finally:
|
||||
# Reset singleton
|
||||
crisis.metrics._metrics = None
|
||||
if os.path.exists(tmpfile.name):
|
||||
os.unlink(tmpfile.name)
|
||||
|
||||
|
||||
class TestNoPIIStorage(unittest.TestCase):
|
||||
"""Verify that no message content is stored in metrics."""
|
||||
|
||||
def test_no_text_in_serialized_metrics(self):
|
||||
"""Metrics to_dict should never contain message text."""
|
||||
from crisis.metrics import CrisisMetrics
|
||||
|
||||
m = CrisisMetrics()
|
||||
# Record with actual crisis text — only patterns should be hashed
|
||||
m.record("CRITICAL", [r"\bkill\s*self\b"])
|
||||
|
||||
serialized = json.dumps(m.to_dict())
|
||||
|
||||
# These strings should NOT appear in serialized metrics
|
||||
self.assertNotIn("kill myself", serialized)
|
||||
self.assertNotIn("I want to", serialized)
|
||||
self.assertNotIn("user", serialized.lower())
|
||||
|
||||
# Only hashed patterns should appear (short hex strings)
|
||||
self.assertIn(_hash_pattern(r"\bkill\s*self\b"), serialized)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
Reference in New Issue
Block a user