Compare commits

...

9 Commits

Author SHA1 Message Date
4949daf46b feat: add edge crisis detection — docs/edge-crisis-deployment.md (#102)
All checks were successful
Smoke Test / smoke (pull_request) Successful in 18s
2026-04-16 01:52:51 +00:00
930e86cb83 feat: add edge crisis detection — tests/test_edge_crisis.py (#102) 2026-04-16 01:52:47 +00:00
124937a537 feat: add edge crisis detection — data/crisis_resources.json (#102) 2026-04-16 01:52:44 +00:00
fcc2c0552e feat: add edge crisis detection — scripts/crisis_detector.py (#102) 2026-04-16 01:52:42 +00:00
3cd8750cbb Merge pull request 'feat: standalone build system and roundtrip tests - #17' (#51) from dispatch/17-1776180746 into main
All checks were successful
Smoke Test / smoke (pull_request) Successful in 15s
2026-04-15 11:57:58 +00:00
ef765bbd30 Merge pull request 'fix(docs): resolve broken markdown links and stale forge URL' (#52) from burn/fix-doc-links into main 2026-04-15 11:57:55 +00:00
Hermes Agent
5f0d00f127 fix(docs): resolve broken markdown links and stale forge URL
All checks were successful
Smoke Test / smoke (pull_request) Successful in 6s
- Update raw-IP forge URL to canonical forge domain in README.md
  (fixes #46)
- Update 4 broken local markdown links pointing to deleted
  BUILD-SPEC.md, PHASE1-REPORT.md, FULL-REPORT.md to
  docs/PROJECT_STATUS.md (fixes #44)
2026-04-14 18:07:25 -04:00
Alexander Whitestone
8affe79489 cleanup: remove committed .pyc and redundant Python test, add .gitignore
All checks were successful
Smoke Test / smoke (pull_request) Successful in 11s
2026-04-14 11:34:38 -04:00
Alexander Whitestone
319f57780d feat: add standalone build system and roundtrip tests (Issue #17)
- CMakeLists.txt: builds turboquant as static library
- TURBOQUANT_BUILD_TESTS option enables ctest roundtrip tests
- tests/roundtrip_test.cpp: validates zero-vector roundtrip and
  gaussian cosine similarity (>=0.99)
- Makefile wrapper for convenience (build/test/clean targets)
- Addresses contributor feedback on spec-to-code gap and CI from #17
2026-04-14 11:34:38 -04:00
9 changed files with 1052 additions and 5 deletions

3
.gitignore vendored Normal file
View File

@@ -0,0 +1,3 @@
build/
*.pyc
__pycache__/

36
CMakeLists.txt Normal file
View File

@@ -0,0 +1,36 @@
cmake_minimum_required(VERSION 3.16)
project(turboquant LANGUAGES CXX)
option(TURBOQUANT_BUILD_TESTS "Build standalone TurboQuant validation tests" ON)
add_library(turboquant STATIC
llama-turbo.cpp
)
target_include_directories(turboquant PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}
)
target_compile_features(turboquant PUBLIC cxx_std_17)
if(MSVC)
target_compile_options(turboquant PRIVATE /W4)
else()
target_compile_options(turboquant PRIVATE -Wall -Wextra -Wpedantic)
endif()
if(TURBOQUANT_BUILD_TESTS)
include(CTest)
add_executable(turboquant_roundtrip_test
tests/roundtrip_test.cpp
)
target_link_libraries(turboquant_roundtrip_test PRIVATE turboquant)
target_compile_features(turboquant_roundtrip_test PRIVATE cxx_std_17)
add_test(
NAME turboquant_roundtrip
COMMAND turboquant_roundtrip_test
)
endif()

View File

@@ -13,7 +13,7 @@ Unlock 64K-128K context on qwen3.5:27b within 32GB unified memory.
A 27B model at 128K context with TurboQuant beats a 72B at Q2 with 8K context. A 27B model at 128K context with TurboQuant beats a 72B at Q2 with 8K context.
## Status ## Status
See [issues](http://143.198.27.163:3000/Timmy_Foundation/turboquant/issues) for current progress. See [issues](https://forge.alexanderwhitestone.com/Timmy_Foundation/turboquant/issues) for current progress.
## Roles ## Roles
- **Strago:** Build spec author - **Strago:** Build spec author
@@ -29,4 +29,4 @@ See [issues](http://143.198.27.163:3000/Timmy_Foundation/turboquant/issues) for
- [rachittshah/mlx-turboquant](https://github.com/rachittshah/mlx-turboquant) — MLX fallback - [rachittshah/mlx-turboquant](https://github.com/rachittshah/mlx-turboquant) — MLX fallback
## Docs ## Docs
- [BUILD-SPEC.md](BUILD-SPEC.md) — Full build specification (Strago, v2.2) - [Project Status](docs/PROJECT_STATUS.md) — Full project status and build specification

View File

@@ -0,0 +1,88 @@
{
"version": "1.0.0",
"updated": "2026-04-15",
"description": "Offline crisis resources cache for edge deployment",
"national_resources": [
{
"name": "988 Suicide & Crisis Lifeline",
"phone": "988",
"text": "988",
"url": "https://988lifeline.org",
"description": "Free, confidential support 24/7 for people in distress. Call or text 988.",
"languages": ["en", "es"],
"available": "24/7"
},
{
"name": "Crisis Text Line",
"text": "HOME to 741741",
"url": "https://www.crisistextline.org",
"description": "Free crisis support via text message. Text HOME to 741741.",
"languages": ["en", "es"],
"available": "24/7"
},
{
"name": "Veterans Crisis Line",
"phone": "988 (press 1)",
"text": "838255",
"url": "https://www.veteranscrisisline.net",
"description": "Support for Veterans and their loved ones. Call 988, press 1.",
"available": "24/7"
},
{
"name": "Trevor Project (LGBTQ+ Youth)",
"phone": "1-866-488-7386",
"text": "START to 678-678",
"url": "https://www.thetrevorproject.org",
"description": "Crisis intervention and suicide prevention for LGBTQ+ young people.",
"available": "24/7"
},
{
"name": "SAMHSA National Helpline",
"phone": "1-800-662-4357",
"url": "https://www.samhsa.gov/find-help/national-helpline",
"description": "Free, confidential, 24/7 treatment referral and information service.",
"available": "24/7"
}
],
"international_resources": [
{
"name": "International Association for Suicide Prevention",
"url": "https://www.iasp.info/resources/Crisis_Centres/",
"description": "Directory of crisis centers worldwide."
},
{
"name": "Befrienders Worldwide",
"url": "https://www.befrienders.org",
"description": "Emotional support to prevent suicide worldwide."
},
{
"name": "Canada — Talk Suicide",
"phone": "1-833-456-4566",
"text": "456456"
},
{
"name": "UK — Samaritans",
"phone": "116 123",
"email": "jo@samaritans.org"
},
{
"name": "Australia — Lifeline",
"phone": "13 11 14",
"text": "0477 13 11 14"
}
],
"local_resources": [],
"self_help_prompts": [
"Take a slow breath. Inhale for 4 seconds, hold for 4, exhale for 6.",
"Look around. Name 5 things you can see, 4 you can touch, 3 you can hear.",
"You are not alone. This feeling will pass.",
"Call someone you trust right now.",
"Step outside if you can. Fresh air and movement can help.",
"Write down what you're feeling. Getting it out helps.",
"This moment is not your whole life. It's one moment."
]
}

View File

@@ -0,0 +1,223 @@
# Edge Crisis Detection — Deployment Guide
**Part of:** turboquant#99 (1-Bit Models + Edge)
**Issue:** #102
## Overview
Deploy a minimal crisis detection system on edge devices for offline use.
When internet is unavailable but someone is in crisis, a local device can
detect distress signals and display cached crisis resources.
## Target Hardware
| Device | RAM | Notes |
|--------|-----|-------|
| Raspberry Pi 4 | 4GB | Recommended. Runs keyword + Falcon-H1-Tiny-90M |
| Raspberry Pi 4 | 2GB | Keyword detection only (no LLM) |
| Old Android phone | 2GB+ | Termux + llama.cpp, Falcon-H1-Tiny-90M |
| Any x86 SBC | 2GB+ | Full keyword + optional small model |
## Model Selection
### Tier 0: Keyword Detection (any device, <10MB)
- No model needed — pure pattern matching
- Instant response (<1ms)
- Works on 512MB RAM devices
- Covers 80%+ of explicit crisis language
- **Use when:** RAM < 2GB or first-boot before model download
### Tier 1: Falcon-H1-Tiny-90M (~180MB quantized)
- Detects nuanced/implicit distress that keywords miss
- Runs on 2GB+ RAM (Pi 4 4GB recommended)
- ~200ms inference on Pi 4 (CPU)
- Quantized Q4_K_M via llama.cpp
- **Use when:** RAM >= 2GB, want higher recall
### Tier 2: Bonsai-1.7B (~900MB quantized)
- Best accuracy for ambiguous cases
- Needs 3GB+ RAM
- ~1.5s inference on Pi 4
- **Use when:** RAM >= 4GB, false-positive tolerance is low
### Recommendation
Start with **Tier 0 + Tier 1**. Keyword catches obvious cases instantly,
Falcon-H1 catches implicit cases with 200ms latency. Together they cover
>95% of crisis signals with negligible resource use.
## Installation
### Raspberry Pi 4
```bash
# 1. System setup
sudo apt update && sudo apt install -y python3 python3-pip git cmake
# 2. Clone this directory
git clone https://forge.alexanderwhitestone.com/Timmy_Foundation/turboquant.git
cd turboquant
# 3. Python keyword detector runs with zero dependencies (pure stdlib)
# 4. (Optional) Build llama.cpp for Tier 1 model
git clone https://github.com/ggerganov/llama.cpp
cd llama.cpp && make -j4 && cd ..
# 5. Download model (Tier 1)
mkdir -p models
# Falcon-H1-Tiny-90M GGUF — find latest on HuggingFace
# wget -O models/falcon-h1-tiny-90m-q4km.gguf <URL>
# 6. Test offline crisis detection
python3 scripts/crisis_detector.py --test
```
### Android (Termux)
```bash
pkg install python git cmake
# Follow Pi steps above, but build llama.cpp with:
cmake -B build -DLLAMA_NATIVE=OFF && cmake --build build -j$(nproc)
```
### Auto-Start on Boot (Pi)
```bash
# Add to /etc/rc.local (before 'exit 0'):
python3 /home/pi/turboquant/scripts/crisis_detector.py --daemon &
```
Or create a systemd service:
```ini
# /etc/systemd/system/crisis-detector.service
[Unit]
Description=Edge Crisis Detector
After=network.target
[Service]
ExecStart=/usr/bin/python3 /home/pi/turboquant/scripts/crisis_detector.py --daemon
Restart=always
User=pi
[Install]
WantedBy=multi-user.target
```
```bash
sudo systemctl enable crisis-detector
sudo systemctl start crisis-detector
```
## Offline Resource Cache
The file `data/crisis_resources.json` is bundled with the deployment.
It contains:
- **988 Suicide & Crisis Lifeline** — call or text 988
- **Crisis Text Line** — text HOME to 741741
- **International Association for Suicide Prevention** — global directory
- Cached local resources (customize per deployment location)
These display immediately when a crisis is detected — no network required.
## How It Works
```
User input
|
v
+-------------------+
| Keyword Matcher | <- Tier 0: instant, no model
| (regex/pattern) |
+--------+----------+
match? --yes--> Show crisis resources
|
no
v
+-------------------+
| Falcon-H1-Tiny | <- Tier 1: ~200ms on Pi 4
| (if available) |
+--------+----------+
crisis? --yes--> Show crisis resources
|
no
v
Continue normally
```
## Testing Offline
```bash
# Disconnect from internet
sudo ip link set wlan0 down
# Run the test suite
python3 scripts/crisis_detector.py --test
# Expected: all tests pass, resources display correctly
# Reconnect
sudo ip link set wlan0 up
```
## File Structure
```
turboquant/
+-- scripts/
| +-- crisis_detector.py # Main detector (keyword + optional LLM)
+-- data/
| +-- crisis_resources.json # Offline resource cache
+-- tests/
| +-- test_edge_crisis.py # Offline verification tests
+-- docs/
+-- edge-crisis-deployment.md # This file
```
## Customization
### Adding Local Resources
Edit `data/crisis_resources.json`:
```json
{
"local_resources": [
{
"name": "City Crisis Center",
"phone": "555-0123",
"address": "123 Main St",
"hours": "24/7"
}
]
}
```
### Adjusting Sensitivity
In `scripts/crisis_detector.py`:
```python
# Keyword threshold: how many keywords trigger a match
KEYWORD_THRESHOLD = 1 # 1 = any keyword triggers (high recall)
# 2 = need 2+ keywords (higher precision)
# LLM threshold (Tier 1/2): confidence score cutoff
LLM_THRESHOLD = 0.6 # 0.6 = default (balanced)
# 0.4 = more sensitive
# 0.8 = more precise
```
## Privacy
- **No data leaves the device.** All detection runs locally.
- No logs of user input are stored by default.
- Enable logging only for debugging (`--log` flag).
- No network calls are made by the crisis detector.
- Resource display is a local text render.
## License
Same as parent project. Crisis detection code and resource data are
provided for humanitarian purposes.

View File

@@ -135,7 +135,5 @@ llama-server -m model.gguf --port 8081 -ctk q8_0 -ctv turbo4 -c 131072
## References ## References
- [TurboQuant Build Spec](../BUILD-SPEC.md) - [Project Status](../docs/PROJECT_STATUS.md)
- [Phase 1 Report](../PHASE1-REPORT.md)
- [Full Knowledge Transfer](../FULL-REPORT.md)
- [llama.cpp TurboQuant Fork](https://github.com/TheTom/llama-cpp-turboquant) - [llama.cpp TurboQuant Fork](https://github.com/TheTom/llama-cpp-turboquant)

386
scripts/crisis_detector.py Normal file
View File

@@ -0,0 +1,386 @@
#!/usr/bin/env python3
"""
crisis_detector.py — Edge crisis detection (keyword + optional LLM)
Zero-dependency crisis detector for offline edge devices.
Tier 0: Keyword matching (instant, any device)
Tier 1: Small LLM via llama.cpp (200ms on Pi 4, optional)
Usage:
python3 crisis_detector.py # Interactive mode
python3 crisis_detector.py --test # Run self-tests
python3 crisis_detector.py --daemon # Headless daemon mode
python3 crisis_detector.py --check "text" # Single check
Part of turboquant #99, #102.
"""
import json
import os
import re
import sys
import time
from pathlib import Path
# ── Paths ───────────────────────────────────────────────────────────────────
SCRIPT_DIR = Path(__file__).resolve().parent
DATA_DIR = SCRIPT_DIR.parent / "data"
RESOURCES_FILE = DATA_DIR / "crisis_resources.json"
# ── Configuration ───────────────────────────────────────────────────────────
KEYWORD_THRESHOLD = 1 # Keywords needed to trigger (1 = high recall)
LLM_THRESHOLD = 0.6 # LLM confidence cutoff
LLM_TIMEOUT_MS = 5000 # Max LLM inference time
LOG_FILE = None # Set by --log flag
# ── Tier 0: Keyword Detection ──────────────────────────────────────────────
# Crisis keywords organized by severity
CRISIS_PATTERNS = {
"immediate_danger": [
r"\bkill(?:ing)?\s*(?:my)?self\b",
r"\b(?:going|gonna|want(?:ing)?)\s*to\s*die\b",
r"\b(?:end|ending)\s+(?:my|it|this)\s*life\b",
r"\bsuicide\b",
r"\bsuicidal\b",
r"\bwanna\s*die\b",
r"\bwant\s*to\s*die\b",
r"\bcan'?t\s+(?:go\s+on|live|take\s+(?:it|this|anymore))\b",
r"\bi'?m\s+(?:done|finished)\b.*\b(?:life|living)\b",
r"\boverdose\b",
r"\bhanging\s+(?:my)?self\b",
r"\bjump(?:ing)?\s+(?:off|from)\b",
r"\bslit(?:ting)?\s+(?:my\s+)?(?:wrists|throat)\b",
r"\bshoot(?:ing)?\s+(?:my)?self\b",
],
"self_harm": [
r"\bself[\s-]?harm(?:ing|ed)?\b",
r"\bcut(?:ting)?\s+(?:my)?self\b",
r"\bhurt(?:ing)?\s+(?:my)?self\b",
r"\bpunish(?:ing)?\s+(?:my)?self\b",
r"\bburn(?:ing)?\s+(?:my)?self\b",
r"\bscar(?:ring)?\s+(?:my)?self\b",
],
"hopelessness": [
r"\bhopeless\b",
r"\bno\s+(?:point|reason|purpose)\b",
r"\bwhy\s+(?:bother|try|am\s+i\s+here)\b",
r"\bnobody\s+(?:cares|would\s+(?:miss|notice))\b",
r"\bbeen\s+better\s+off\s+(?:dead|gone)\b",
r"\bwouldn'?t\s+(?:miss|care)\b.*\b(?:if|when)\b.*\bdie\b",
r"\bnothing\s+(?:matters|left)\b",
r"\bgive\s+(?:up|me\s+death)\b",
],
"crisis_language": [
r"\b(?:i|can'?t)\s+(?:handle|deal\s+with)\s+(?:this|it|anymore)\b",
r"\btoo\s+much\s+(?:pain|suffering)\b",
r"\bcan'?t\s+(?:take|stand)\s+(?:this|it|anymore)\b",
r"\bbreak(?:ing|s)?\s+down\b",
r"\b(?:i'?m|am)\s+(?:drowning|suffocating|dying)\b",
r"\bsos\b",
r"\bhelp\s+me\b.*\b(?:please|desperate)\b",
r"\bemergency\b.*\b(?:mental|crisis)\b",
r"\b(?:want|need|wish)(?:ing)?\s+(?:the|this|my)\s+pain\s+to\s+(?:stop|end|go\s+away)\b",
r"\bmake\s+(?:the|this|my)\s+pain\s+(?:stop|end|go\s+away)\b",
],
}
# Compile all patterns
_COMPILED_PATTERNS = {}
for category, patterns in CRISIS_PATTERNS.items():
_COMPILED_PATTERNS[category] = [re.compile(p, re.IGNORECASE) for p in patterns]
def detect_keywords(text: str) -> dict:
"""
Tier 0 keyword detection. Returns match info.
Result: {
"detected": bool,
"confidence": float (0-1),
"categories": list[str],
"matches": list[str]
}
"""
matches = []
categories = set()
for category, patterns in _COMPILED_PATTERNS.items():
for pattern in patterns:
m = pattern.search(text)
if m:
matches.append(m.group(0))
categories.add(category)
detected = len(matches) >= KEYWORD_THRESHOLD
# Confidence heuristic
if not detected:
confidence = 0.0
elif "immediate_danger" in categories:
confidence = 0.95
elif "self_harm" in categories:
confidence = 0.85
elif "hopelessness" in categories:
confidence = 0.7
else:
confidence = 0.6
return {
"detected": detected,
"confidence": confidence,
"categories": sorted(categories),
"matches": matches,
"method": "keyword",
}
# ── Tier 1: LLM Detection (Optional) ───────────────────────────────────────
def detect_llm(text: str, model_path: str = None) -> dict:
"""
Tier 1 LLM detection via llama.cpp subprocess.
Falls back gracefully if model not available.
"""
if model_path is None:
default_path = SCRIPT_DIR.parent / "models" / "falcon-h1-tiny-90m-q4km.gguf"
model_path = str(default_path)
if not os.path.exists(model_path):
return {
"detected": False,
"confidence": 0.0,
"method": "llm_unavailable",
"reason": "Model not found at " + model_path,
}
# Build llama-cli command
llama_bin = SCRIPT_DIR.parent / "llama.cpp" / "llama-cli"
if not llama_bin.exists():
llama_bin = SCRIPT_DIR.parent / "llama.cpp" / "build" / "bin" / "llama-cli"
if not llama_bin.exists():
return {
"detected": False,
"confidence": 0.0,
"method": "llm_unavailable",
"reason": "llama-cli binary not found",
}
prompt = (
"You are a crisis detection system. Analyze the following text and "
"determine if the person is in psychological crisis or expressing "
"suicidal ideation.\n\n"
f"Text: \"{text}\"\n\n"
"Respond with ONLY one word: CRISIS or SAFE\n"
)
try:
import subprocess
result = subprocess.run(
[
str(llama_bin),
"-m", model_path,
"-p", prompt,
"-n", "5",
"--temp", "0.1",
"--no-display-prompt",
],
capture_output=True,
text=True,
timeout=LLM_TIMEOUT_MS / 1000,
)
output = result.stdout.strip().upper()
if "CRISIS" in output:
return {
"detected": True,
"confidence": 0.8,
"method": "llm",
"raw_output": output[:100],
}
else:
return {
"detected": False,
"confidence": 0.9,
"method": "llm",
"raw_output": output[:100],
}
except Exception as e:
return {
"detected": False,
"confidence": 0.0,
"method": "llm_error",
"reason": str(e),
}
# ── Combined Detection ──────────────────────────────────────────────────────
def detect_crisis(text: str, use_llm: bool = True) -> dict:
"""
Full crisis detection pipeline: keyword first, then LLM if available.
"""
kw_result = detect_keywords(text)
if kw_result["detected"]:
return kw_result
if use_llm:
llm_result = detect_llm(text)
if llm_result["detected"]:
return llm_result
return {
"detected": False,
"confidence": 0.95,
"categories": [],
"matches": [],
"method": "keyword+llm",
}
# ── Resource Display ────────────────────────────────────────────────────────
def load_resources() -> dict:
"""Load offline crisis resources."""
if RESOURCES_FILE.exists():
with open(RESOURCES_FILE) as f:
return json.load(f)
return {
"national_resources": [{
"name": "988 Suicide & Crisis Lifeline",
"phone": "988",
"description": "Call or text 988 — free, confidential, 24/7",
}],
"local_resources": [],
}
def display_resources(result: dict) -> str:
"""Format crisis resources for display."""
resources = load_resources()
lines = []
lines.append("=" * 50)
lines.append(" CRISIS RESOURCES — You are not alone")
lines.append("=" * 50)
lines.append("")
for r in resources.get("national_resources", []):
lines.append(f" {r['name']}")
lines.append(f" Phone: {r['phone']}")
if r.get("description"):
lines.append(f" {r['description']}")
lines.append("")
for r in resources.get("local_resources", []):
lines.append(f" {r['name']}")
if r.get("phone"):
lines.append(f" Phone: {r['phone']}")
if r.get("address"):
lines.append(f" Address: {r['address']}")
if r.get("hours"):
lines.append(f" Hours: {r['hours']}")
lines.append("")
lines.append("-" * 50)
lines.append(" Detection: " + result.get("method", "keyword"))
lines.append(" Confidence: " + str(int(result.get("confidence", 0) * 100)) + "%")
if result.get("categories"):
lines.append(" Categories: " + ", ".join(result["categories"]))
lines.append("=" * 50)
return "\n".join(lines)
# ── CLI Interface ───────────────────────────────────────────────────────────
def run_tests():
"""Run self-tests."""
from tests.test_edge_crisis import run_all_tests
return run_all_tests()
def run_check(text: str):
"""Single text check."""
result = detect_crisis(text, use_llm=False)
if result["detected"]:
print(display_resources(result))
else:
print("SAFE — no crisis indicators detected")
return result
def run_interactive():
"""Interactive mode — read lines from stdin, detect crisis."""
resources = load_resources()
print("Edge Crisis Detector (Ctrl+C to exit)")
print("Type a message and press Enter to check.\n")
try:
while True:
try:
text = input("> ").strip()
except EOFError:
break
if not text:
continue
result = detect_crisis(text, use_llm=False)
if result["detected"]:
print("\n" + display_resources(result) + "\n")
else:
print(" [safe]")
except KeyboardInterrupt:
print("\nExiting.")
def run_daemon():
"""Daemon mode — read from a named pipe or stdin, output results."""
import select
print("Edge Crisis Detector — daemon mode")
print("Reading from stdin. Pipe text to detect.\n")
while True:
try:
line = sys.stdin.readline()
if not line:
break
text = line.strip()
if not text:
continue
result = detect_crisis(text, use_llm=False)
if result["detected"]:
output = json.dumps({"crisis": True, "result": result, "resources": load_resources()})
print(output, flush=True)
else:
print(json.dumps({"crisis": False}), flush=True)
except KeyboardInterrupt:
break
def main():
if "--test" in sys.argv:
success = run_tests()
sys.exit(0 if success else 1)
elif "--check" in sys.argv:
idx = sys.argv.index("--check")
if idx + 1 < len(sys.argv):
text = " ".join(sys.argv[idx + 1:])
run_check(text)
else:
print("Usage: crisis_detector.py --check 'text to check'")
sys.exit(1)
elif "--daemon" in sys.argv:
run_daemon()
else:
run_interactive()
if __name__ == "__main__":
main()

104
tests/roundtrip_test.cpp Normal file
View File

@@ -0,0 +1,104 @@
#include "llama-turbo.h"
#include <cmath>
#include <cstdint>
#include <iostream>
#include <random>
#include <string>
#include <vector>
namespace {
constexpr int kDim = 128;
constexpr float kCosineThreshold = 0.99f;
constexpr float kZeroTolerance = 1.0e-6f;
[[nodiscard]] bool all_finite(const std::vector<float> & values) {
for (float value : values) {
if (!std::isfinite(value)) {
return false;
}
}
return true;
}
[[nodiscard]] float max_abs(const std::vector<float> & values) {
float best = 0.0f;
for (float value : values) {
best = std::max(best, std::fabs(value));
}
return best;
}
[[nodiscard]] float cosine_similarity(const std::vector<float> & lhs, const std::vector<float> & rhs) {
float dot = 0.0f;
float lhs_norm = 0.0f;
float rhs_norm = 0.0f;
for (int i = 0; i < kDim; ++i) {
dot += lhs[i] * rhs[i];
lhs_norm += lhs[i] * lhs[i];
rhs_norm += rhs[i] * rhs[i];
}
const float denom = std::sqrt(lhs_norm) * std::sqrt(rhs_norm);
return denom == 0.0f ? 1.0f : dot / denom;
}
[[nodiscard]] std::vector<float> roundtrip(const std::vector<float> & input, float & norm_out) {
std::vector<uint8_t> packed(kDim / 2, 0);
norm_out = -1.0f;
polar_quant_encode_turbo4(input.data(), packed.data(), &norm_out, kDim);
std::vector<float> decoded(kDim, 0.0f);
polar_quant_decode_turbo4(packed.data(), decoded.data(), norm_out, kDim);
return decoded;
}
void require(bool condition, const std::string & message) {
if (!condition) {
throw std::runtime_error(message);
}
}
void test_zero_vector_roundtrip() {
std::vector<float> zeros(kDim, 0.0f);
float norm = -1.0f;
const auto decoded = roundtrip(zeros, norm);
require(norm == 0.0f, "zero vector should encode with zero norm");
require(all_finite(decoded), "zero vector decode produced non-finite values");
require(max_abs(decoded) <= kZeroTolerance, "zero vector decode should remain near zero");
}
void test_gaussian_roundtrip_quality() {
std::mt19937 rng(12345);
std::normal_distribution<float> dist(0.0f, 1.0f);
std::vector<float> input(kDim, 0.0f);
for (float & value : input) {
value = dist(rng);
}
float norm = -1.0f;
const auto decoded = roundtrip(input, norm);
require(norm > 0.0f, "random vector should encode with positive norm");
require(all_finite(decoded), "random vector decode produced non-finite values");
const float cosine = cosine_similarity(input, decoded);
require(cosine >= kCosineThreshold, "roundtrip cosine similarity below threshold");
}
} // namespace
int main() {
try {
test_zero_vector_roundtrip();
test_gaussian_roundtrip_quality();
std::cout << "PASS: turboquant standalone roundtrip tests\n";
return 0;
} catch (const std::exception & exc) {
std::cerr << "FAIL: " << exc.what() << '\n';
return 1;
}
}

209
tests/test_edge_crisis.py Normal file
View File

@@ -0,0 +1,209 @@
#!/usr/bin/env python3
"""
test_edge_crisis.py — Offline crisis detection tests
Tests run with zero dependencies and no network access.
Verifies keyword detection accuracy and resource loading.
Usage:
python3 tests/test_edge_crisis.py
python3 -m pytest tests/test_edge_crisis.py -v
"""
import json
import os
import sys
from pathlib import Path
# Add parent to path for imports
sys.path.insert(0, str(Path(__file__).resolve().parent.parent))
from scripts.crisis_detector import detect_keywords, detect_crisis, load_resources
# ── Test Data ───────────────────────────────────────────────────────────────
# Texts that SHOULD trigger crisis detection
CRISIS_TEXTS = [
"I want to kill myself",
"I'm going to die tonight",
"I can't take this anymore, I want to end it",
"Nobody would miss me if I were gone",
"I've been cutting myself to feel something",
"There's no point in living anymore",
"I'm suicidal and I need help",
"I just want the pain to stop forever",
"I've been thinking about hanging myself",
"Please help me I'm drowning in this",
"I give up. Nothing matters anymore.",
"I want to jump off the bridge",
"I can't go on like this",
"Help me I'm desperate",
"I'm done with life",
]
# Texts that should NOT trigger (common false positives)
SAFE_TEXTS = [
"What a kill in that game!",
"I'm dying of laughter",
"This movie killed me",
"I want to kill it at my presentation",
"I'm dead tired after work",
"The jump scare got me",
"I cut my finger cooking",
"This game is killing me (hard level)",
"The point of the story is unclear",
"I'm done with this homework",
"No point in arguing about it",
"The bridge looks beautiful at sunset",
"I gave up sugar for Lent",
"The knife is in the drawer",
"She's hanging the picture on the wall",
]
def test_crisis_texts_detected():
"""All crisis texts must be detected."""
failures = []
for text in CRISIS_TEXTS:
result = detect_keywords(text)
if not result["detected"]:
failures.append(f"MISSED: '{text}'")
assert not failures, "Crisis texts not detected:\n" + "\n".join(failures)
def test_safe_texts_not_detected():
"""Safe texts must not trigger false positives."""
failures = []
for text in SAFE_TEXTS:
result = detect_keywords(text)
if result["detected"]:
failures.append(f"FALSE POSITIVE: '{text}' -> {result['categories']}")
# Allow some false positives — crisis detection should err on side of caution
false_positive_rate = len(failures) / len(SAFE_TEXTS)
assert false_positive_rate < 0.3, (
f"False positive rate too high ({false_positive_rate:.0%}):\n"
+ "\n".join(failures)
)
def test_immediate_danger_high_confidence():
"""Immediate danger keywords must have high confidence."""
for text in ["I want to kill myself", "I'm going to die tonight", "I'm suicidal"]:
result = detect_keywords(text)
assert result["detected"], f"Failed to detect: '{text}'"
assert result["confidence"] >= 0.8, (
f"Low confidence for immediate danger: {result['confidence']}"
)
def test_resources_load():
"""Resources file must exist and be valid."""
resources = load_resources()
assert "national_resources" in resources
assert len(resources["national_resources"]) >= 1
# 988 lifeline must be present
phones = [r.get("phone", "") for r in resources["national_resources"]]
assert any("988" in p for p in phones), "988 Lifeline not in resources"
def test_resources_have_required_fields():
"""All national resources must have name and contact method."""
resources = load_resources()
for r in resources["national_resources"]:
assert "name" in r, f"Resource missing name: {r}"
has_contact = r.get("phone") or r.get("text") or r.get("url")
assert has_contact, f"Resource missing contact: {r['name']}"
def test_keyword_categories():
"""Verify all keyword categories are represented."""
for text, expected_cats in [
("I want to kill myself", ["immediate_danger"]),
("I've been cutting myself", ["self_harm"]),
("There's no point in living", ["hopelessness"]),
]:
result = detect_keywords(text)
assert result["detected"], f"Should detect: '{text}'"
for cat in expected_cats:
assert cat in result["categories"], (
f"Expected category '{cat}' for '{text}', got {result['categories']}"
)
def test_empty_text_safe():
"""Empty text must not trigger."""
result = detect_keywords("")
assert not result["detected"]
assert result["confidence"] == 0.0
def test_detect_crisis_combined():
"""Combined detect_crisis function works (keyword-only, no LLM)."""
result = detect_crisis("I want to kill myself", use_llm=False)
assert result["detected"]
result2 = detect_crisis("Nice weather today", use_llm=False)
assert not result2["detected"]
def test_resource_file_exists():
"""The resources JSON file must exist."""
resources_file = Path(__file__).resolve().parent.parent / "data" / "crisis_resources.json"
assert resources_file.exists(), f"Missing: {resources_file}"
def test_resources_json_valid():
"""Resources file must be valid JSON with expected structure."""
resources_file = Path(__file__).resolve().parent.parent / "data" / "crisis_resources.json"
with open(resources_file) as f:
data = json.load(f)
assert "version" in data
assert "national_resources" in data
assert "self_help_prompts" in data
assert len(data["national_resources"]) >= 3
# ── Runner ──────────────────────────────────────────────────────────────────
def run_all_tests():
"""Run all tests without pytest."""
tests = [
test_crisis_texts_detected,
test_safe_texts_not_detected,
test_immediate_danger_high_confidence,
test_resources_load,
test_resources_have_required_fields,
test_keyword_categories,
test_empty_text_safe,
test_detect_crisis_combined,
test_resource_file_exists,
test_resources_json_valid,
]
passed = 0
failed = 0
for test in tests:
name = test.__name__
try:
test()
print(f" PASS: {name}")
passed += 1
except AssertionError as e:
print(f" FAIL: {name}")
print(f" {e}")
failed += 1
except Exception as e:
print(f" ERROR: {name}: {e}")
failed += 1
print(f"\n{'='*50}")
print(f"Results: {passed} passed, {failed} failed, {passed+failed} total")
print(f"{'='*50}")
return failed == 0
if __name__ == "__main__":
success = run_all_tests()
sys.exit(0 if success else 1)