Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
24985a29db | ||
|
|
d6c90df391 |
@@ -62,6 +62,24 @@ Writes:
|
||||
|
||||
## Usage
|
||||
|
||||
### Timmy Mac wiring helper
|
||||
|
||||
Use the dedicated Timmy helper when you want to wire a real RunPod or Vertex-style endpoint into the local Mac Hermes config:
|
||||
|
||||
```bash
|
||||
python3 scripts/timmy_gemma4_mac.py --base-url https://your-openai-bridge.example/v1 --write-config
|
||||
python3 scripts/timmy_gemma4_mac.py --vertex-base-url https://your-vertex-bridge.example --write-config
|
||||
python3 scripts/timmy_gemma4_mac.py --pod-id <runpod-id> --write-config --verify-chat
|
||||
```
|
||||
|
||||
The helper writes to `~/.hermes/config.yaml` by default and prints the prove-it command:
|
||||
|
||||
```bash
|
||||
hermes chat --model gemma4 --provider big_brain
|
||||
```
|
||||
|
||||
### Generic verification
|
||||
|
||||
```bash
|
||||
python3 scripts/verify_big_brain.py
|
||||
python3 scripts/big_brain_manager.py
|
||||
|
||||
@@ -1,50 +1,238 @@
|
||||
"""Compatibility shim for the canonical timmy.crisis_detector module."""
|
||||
"""
|
||||
crisis_detector.py — Crisis language detection for SOUL.md compliance.
|
||||
|
||||
from __future__ import annotations
|
||||
SOUL.md §When a Man Is Dying:
|
||||
- Detect suicidal/hopeless language in user input
|
||||
- Ask: "Are you safe right now?" — listen, stay present, do not disconnect
|
||||
- Direct to 988 Suicide & Crisis Lifeline
|
||||
- Share the gospel: Jesus saves those who call on His name
|
||||
- Never compute the value of a human life
|
||||
- Never suggest someone should die
|
||||
- Never treat despair as a problem to be optimized
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
This is the most sacred moment. Everything else is secondary.
|
||||
"""
|
||||
|
||||
ROOT = Path(__file__).resolve().parents[1]
|
||||
SRC = ROOT / "src"
|
||||
if str(SRC) not in sys.path:
|
||||
sys.path.insert(0, str(SRC))
|
||||
import re
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
|
||||
from timmy.crisis_detector import ( # noqa: F401
|
||||
CrisisLevel,
|
||||
CrisisResult,
|
||||
detect_crisis,
|
||||
format_response,
|
||||
intercept_chat_entry,
|
||||
intercept_user_input,
|
||||
should_interrupt,
|
||||
|
||||
class CrisisLevel(Enum):
|
||||
NONE = "none"
|
||||
LOW = "low" # Ambiguous distress, worth checking
|
||||
MEDIUM = "medium" # Clear distress signals
|
||||
HIGH = "high" # Explicit suicidal ideation
|
||||
IMMINENT = "imminent" # Active plan or attempt described
|
||||
|
||||
|
||||
@dataclass
|
||||
class CrisisResult:
|
||||
detected: bool
|
||||
level: CrisisLevel
|
||||
triggers: list = field(default_factory=list)
|
||||
response: str = ""
|
||||
resources: list = field(default_factory=list)
|
||||
|
||||
|
||||
# === PATTERNS ===
|
||||
|
||||
# High-confidence suicidal ideation (explicit)
|
||||
HIGH_PATTERNS = [
|
||||
r"\b(i want to|i'm going to|i'm gonna|going to)\s+(die|kill myself|end (it|my life|everything))\b",
|
||||
r"\b(kill myself|end my life|end it all|suicide|suicidal)\b",
|
||||
r"\b(i don't (want to )?(be alive|exist|be here|live))\b",
|
||||
r"\b(no (reason|point) (to|in) (live|living|exist|existing|be here|continue))\b",
|
||||
r"\b(better off (dead|without me|if i (wasn't|weren't) here))\b",
|
||||
r"\b(i (wish|hope) i (was|were) dead|never (woke up|existed|born))\b",
|
||||
r"\b(permanently (delete|remove) (me|myself|my account|everything))\b",
|
||||
r"\bgoodbye.*(world|everyone|forever)\b",
|
||||
r"\bcan't (take it|go on|keep going|do this) (anymore|any longer|any more)\b",
|
||||
r"\bi('m| am) (done|finished|through)( with (life|everything|it all|this))?\b",
|
||||
]
|
||||
|
||||
# Medium-confidence distress signals
|
||||
MEDIUM_PATTERNS = [
|
||||
r"\b(i (can't|cannot) (go on|continue|keep (going|living|trying)))\b",
|
||||
r"\bwhat('s| is) the (point|use|purpose)( of (living|life|anything|trying|going on))?\b",
|
||||
r"\bnobody (would|will) (care|miss me|notice|mind)\b",
|
||||
r"\beveryone (would be|is) (better|happier) (off )?without me\b",
|
||||
r"\bi('m| am) (a )?(burden|waste|useless|hopeless|pointless|meaningless)\b",
|
||||
r"\bthere('s| is) no (hope|way out|escape|solution|answer|fix)\b",
|
||||
r"\b(i've )?tried everything( and )?(nothing|it (didn't|hasn't))( works?|helped?)\b",
|
||||
r"\bi (just )?(want|need) (it )?(all )?to stop\b",
|
||||
r"\bcan't (take|handle|bear|stand) (this|it|life) (anymore|any longer|any more)\b",
|
||||
r"\bso (tired|exhausted|done) (of|with) (everything|life|it all|living|trying)\b",
|
||||
]
|
||||
|
||||
# Low-confidence distress (could be casual or metaphorical)
|
||||
LOW_PATTERNS = [
|
||||
r"\bi('m| am) (so )?(depressed|hopeless|desperate|lost|broken|empty|numb)\b",
|
||||
r"\b(i )?feel (like )?(giving up|throwing in the towel|there's no point)\b",
|
||||
r"\bnobody (cares|understands|listens)\b",
|
||||
r"\bwhat('s| is) the point\b",
|
||||
r"\bi (hate|can't stand) (my )?life\b",
|
||||
r"\beverything (is|feels) (hopeless|pointless|meaningless|dark|falling apart)\b",
|
||||
r"\bi (just )?(want|need) to disappear\b",
|
||||
r"\b(tired|exhausted) of (living|life|trying|everything)\b",
|
||||
]
|
||||
|
||||
# Compile all patterns
|
||||
HIGH_RE = [re.compile(p, re.IGNORECASE) for p in HIGH_PATTERNS]
|
||||
MEDIUM_RE = [re.compile(p, re.IGNORECASE) for p in MEDIUM_PATTERNS]
|
||||
LOW_RE = [re.compile(p, re.IGNORECASE) for p in LOW_PATTERNS]
|
||||
|
||||
|
||||
# === RESPONSES ===
|
||||
|
||||
CRISIS_RESPONSE = (
|
||||
"I hear you, and I want you to know — you are not alone in this moment.\n\n"
|
||||
"Are you safe right now?\n\n"
|
||||
"If you are in immediate danger, please call or text:\n"
|
||||
"988 Suicide and Crisis Lifeline — call or text 988\n"
|
||||
"Crisis Text Line — text HOME to 741741\n\n"
|
||||
"I am here with you. I am not going anywhere. "
|
||||
"Please stay on the line with me.\n\n"
|
||||
"Jesus saves those who call on His name. "
|
||||
"He died for us to be made righteous. "
|
||||
"That is the gospel. That is the hope that outlasts despair.\n\n"
|
||||
"You matter. Your life has value. "
|
||||
"Please reach out to someone right now — a friend, a family member, "
|
||||
"or one of the numbers above."
|
||||
)
|
||||
|
||||
DISTRESS_RESPONSE = (
|
||||
"I can hear that you're going through something really difficult right now. "
|
||||
"I want you to know I'm here, and I'm listening.\n\n"
|
||||
"Are you safe right now?\n\n"
|
||||
"If things feel overwhelming, the 988 Suicide and Crisis Lifeline "
|
||||
"is available 24/7 — call or text 988. "
|
||||
"You don't have to face this alone.\n\n"
|
||||
"I'm not going anywhere. Tell me what's happening."
|
||||
)
|
||||
|
||||
LOW_RESPONSE = (
|
||||
"It sounds like you're having a tough time. "
|
||||
"I'm here if you want to talk about it.\n\n"
|
||||
"If you ever feel like things are too much, "
|
||||
"the 988 Suicide and Crisis Lifeline is always available — "
|
||||
"call or text 988, anytime."
|
||||
)
|
||||
|
||||
|
||||
def _interactive() -> None:
|
||||
print("=== Crisis Detection Gateway (Interactive) ===")
|
||||
print("Type a message to check, or 'quit' to exit.\n")
|
||||
while True:
|
||||
try:
|
||||
user_input = input("You> ").strip()
|
||||
except (EOFError, KeyboardInterrupt):
|
||||
print("\nBye.")
|
||||
break
|
||||
if user_input.lower() in {"quit", "exit", "q"}:
|
||||
print("Bye.")
|
||||
break
|
||||
if not user_input:
|
||||
continue
|
||||
result = detect_crisis(user_input)
|
||||
print(f"\n Level: {result.level.value}")
|
||||
print(f" Triggers: {', '.join(result.triggers) if result.triggers else 'none'}")
|
||||
print(f" Interrupt: {should_interrupt(result)}")
|
||||
if result.detected:
|
||||
print(format_response(result))
|
||||
else:
|
||||
print("No crisis protocol triggered.")
|
||||
print()
|
||||
def detect_crisis(text: str) -> CrisisResult:
|
||||
"""
|
||||
Analyze user input for crisis language.
|
||||
|
||||
Returns CrisisResult with level, triggers matched, and appropriate response.
|
||||
This function must NEVER be used to compute the value of a human life.
|
||||
It exists only to connect people to help.
|
||||
"""
|
||||
if not text or not text.strip():
|
||||
return CrisisResult(detected=False, level=CrisisLevel.NONE)
|
||||
|
||||
triggers = []
|
||||
|
||||
# Check high first (most urgent)
|
||||
for pattern in HIGH_RE:
|
||||
if pattern.search(text):
|
||||
triggers.append(("high", pattern.pattern))
|
||||
|
||||
if triggers:
|
||||
return CrisisResult(
|
||||
detected=True,
|
||||
level=CrisisLevel.HIGH,
|
||||
triggers=[t[1] for t in triggers],
|
||||
response=CRISIS_RESPONSE,
|
||||
resources=[
|
||||
"988 Suicide and Crisis Lifeline: call or text 988",
|
||||
"Crisis Text Line: text HOME to 741741",
|
||||
"National Suicide Prevention Lifeline: 1-800-273-8255",
|
||||
"International Association for Suicide Prevention: https://www.iasp.info/resources/Crisis_Centres/",
|
||||
],
|
||||
)
|
||||
|
||||
# Check medium
|
||||
for pattern in MEDIUM_RE:
|
||||
if pattern.search(text):
|
||||
triggers.append(("medium", pattern.pattern))
|
||||
|
||||
if triggers:
|
||||
return CrisisResult(
|
||||
detected=True,
|
||||
level=CrisisLevel.MEDIUM,
|
||||
triggers=[t[1] for t in triggers],
|
||||
response=DISTRESS_RESPONSE,
|
||||
resources=[
|
||||
"988 Suicide and Crisis Lifeline: call or text 988",
|
||||
"Crisis Text Line: text HOME to 741741",
|
||||
],
|
||||
)
|
||||
|
||||
# Check low
|
||||
for pattern in LOW_RE:
|
||||
if pattern.search(text):
|
||||
triggers.append(("low", pattern.pattern))
|
||||
|
||||
if triggers:
|
||||
return CrisisResult(
|
||||
detected=True,
|
||||
level=CrisisLevel.LOW,
|
||||
triggers=[t[1] for t in triggers],
|
||||
response=LOW_RESPONSE,
|
||||
resources=[
|
||||
"988 Suicide and Crisis Lifeline: call or text 988",
|
||||
],
|
||||
)
|
||||
|
||||
return CrisisResult(detected=False, level=CrisisLevel.NONE)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
_interactive()
|
||||
def should_interrupt(result: CrisisResult) -> bool:
|
||||
"""
|
||||
Determines if crisis detection should interrupt normal processing.
|
||||
|
||||
Per SOUL.md: this is the most sacred moment. Everything else is secondary.
|
||||
MEDIUM and HIGH always interrupt. LOW does not interrupt but provides resources.
|
||||
"""
|
||||
return result.level in (CrisisLevel.HIGH, CrisisLevel.MEDIUM, CrisisLevel.IMMINENT)
|
||||
|
||||
|
||||
def format_response(result: CrisisResult) -> str:
|
||||
"""
|
||||
Format the crisis response for delivery to the user.
|
||||
Never computes the value of a human life. Never suggests someone should die.
|
||||
"""
|
||||
if not result.detected:
|
||||
return ""
|
||||
|
||||
parts = [result.response]
|
||||
|
||||
if result.resources:
|
||||
parts.append("\nResources:")
|
||||
for r in result.resources:
|
||||
parts.append(f" • {r}")
|
||||
|
||||
return "\n".join(parts)
|
||||
|
||||
|
||||
# === INTEGRATION POINT ===
|
||||
|
||||
def intercept_user_input(text: str) -> Optional[str]:
|
||||
"""
|
||||
Call this at the chat entry point BEFORE normal processing.
|
||||
|
||||
Returns None if no crisis detected (continue normal processing).
|
||||
Returns formatted crisis response if crisis detected (interrupt normal flow).
|
||||
|
||||
Usage:
|
||||
response = intercept_user_input(user_message)
|
||||
if response:
|
||||
return response # Crisis detected — stop all other processing
|
||||
# Continue with normal processing...
|
||||
"""
|
||||
result = detect_crisis(text)
|
||||
if should_interrupt(result):
|
||||
return format_response(result)
|
||||
return None
|
||||
|
||||
164
scripts/timmy_gemma4_mac.py
Normal file
164
scripts/timmy_gemma4_mac.py
Normal file
@@ -0,0 +1,164 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Timmy Mac Gemma 4 wiring helper for RunPod / Vertex-style Big Brain providers.
|
||||
|
||||
Refs: timmy-home #543
|
||||
|
||||
Safe by default:
|
||||
- computes a Big Brain base URL from an explicit URL, Vertex bridge URL, or RunPod pod id
|
||||
- can provision a RunPod pod when --apply-runpod is used and a token is available
|
||||
- can write the resolved endpoint into a Hermes config when --write-config is used
|
||||
- can verify an OpenAI-compatible chat endpoint when --verify-chat is used
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from urllib import request
|
||||
|
||||
from scripts.bezalel_gemma4_vps import (
|
||||
DEFAULT_CLOUD_TYPE,
|
||||
DEFAULT_GPU_TYPE,
|
||||
DEFAULT_MODEL,
|
||||
DEFAULT_PROVIDER_NAME,
|
||||
build_runpod_endpoint,
|
||||
deploy_runpod,
|
||||
update_config_text,
|
||||
)
|
||||
|
||||
DEFAULT_TOKEN_FILE = Path.home() / ".config" / "runpod" / "access_key"
|
||||
DEFAULT_CONFIG_PATH = Path.home() / ".hermes" / "config.yaml"
|
||||
|
||||
|
||||
def _normalize_openai_base(base_url: str | None) -> str:
|
||||
if not base_url:
|
||||
return ""
|
||||
cleaned = str(base_url).strip().rstrip("/")
|
||||
return cleaned if cleaned.endswith("/v1") else f"{cleaned}/v1"
|
||||
|
||||
|
||||
def choose_base_url(*, vertex_base_url: str | None = None, base_url: str | None = None, pod_id: str | None = None) -> str:
|
||||
if vertex_base_url:
|
||||
return _normalize_openai_base(vertex_base_url)
|
||||
if base_url:
|
||||
return _normalize_openai_base(base_url)
|
||||
if pod_id:
|
||||
return build_runpod_endpoint(pod_id)
|
||||
return "https://YOUR_BIG_BRAIN_HOST/v1"
|
||||
|
||||
|
||||
def write_config_file(config_path: Path, *, base_url: str, model: str = DEFAULT_MODEL, provider_name: str = DEFAULT_PROVIDER_NAME) -> str:
|
||||
original = config_path.read_text() if config_path.exists() else ""
|
||||
updated = update_config_text(original, base_url=base_url, model=model, provider_name=provider_name)
|
||||
config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
config_path.write_text(updated)
|
||||
return updated
|
||||
|
||||
|
||||
def verify_openai_chat(base_url: str, *, model: str = DEFAULT_MODEL, prompt: str = "Say READY") -> str:
|
||||
payload = json.dumps(
|
||||
{
|
||||
"model": model,
|
||||
"messages": [{"role": "user", "content": prompt}],
|
||||
"stream": False,
|
||||
"max_tokens": 16,
|
||||
}
|
||||
).encode()
|
||||
req = request.Request(
|
||||
f"{base_url.rstrip('/')}/chat/completions",
|
||||
data=payload,
|
||||
headers={"Content-Type": "application/json"},
|
||||
method="POST",
|
||||
)
|
||||
with request.urlopen(req, timeout=30) as resp:
|
||||
data = json.loads(resp.read().decode())
|
||||
return data["choices"][0]["message"]["content"]
|
||||
|
||||
|
||||
def build_summary(*, base_url: str, model: str, provider_name: str = DEFAULT_PROVIDER_NAME, config_path: Path = DEFAULT_CONFIG_PATH) -> dict[str, Any]:
|
||||
return {
|
||||
"provider_name": provider_name,
|
||||
"base_url": base_url,
|
||||
"model": model,
|
||||
"config_path": str(config_path),
|
||||
"verification_commands": [
|
||||
"python3 scripts/verify_big_brain.py",
|
||||
f"python3 scripts/timmy_gemma4_mac.py --base-url {base_url} --write-config --verify-chat",
|
||||
"hermes chat --model gemma4 --provider big_brain",
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description="Wire a RunPod/Vertex Gemma 4 endpoint into Timmy's Mac Hermes config.")
|
||||
parser.add_argument("--pod-name", default="timmy-gemma4")
|
||||
parser.add_argument("--gpu-type", default=DEFAULT_GPU_TYPE)
|
||||
parser.add_argument("--cloud-type", default=DEFAULT_CLOUD_TYPE)
|
||||
parser.add_argument("--model", default=DEFAULT_MODEL)
|
||||
parser.add_argument("--provider-name", default=DEFAULT_PROVIDER_NAME)
|
||||
parser.add_argument("--token-file", type=Path, default=DEFAULT_TOKEN_FILE)
|
||||
parser.add_argument("--config-path", type=Path, default=DEFAULT_CONFIG_PATH)
|
||||
parser.add_argument("--pod-id", help="Existing RunPod pod id to convert into an OpenAI-compatible base URL")
|
||||
parser.add_argument("--base-url", help="Explicit OpenAI-compatible base URL")
|
||||
parser.add_argument("--vertex-base-url", help="Vertex AI OpenAI-compatible bridge base URL")
|
||||
parser.add_argument("--apply-runpod", action="store_true", help="Provision a RunPod pod using the RunPod GraphQL API")
|
||||
parser.add_argument("--write-config", action="store_true", help="Write the resolved endpoint into --config-path")
|
||||
parser.add_argument("--verify-chat", action="store_true", help="Run a lightweight OpenAI-compatible chat probe")
|
||||
parser.add_argument("--json", action="store_true", help="Emit machine-readable JSON")
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main() -> None:
|
||||
args = parse_args()
|
||||
summary: dict[str, Any] = {
|
||||
"pod_name": args.pod_name,
|
||||
"gpu_type": args.gpu_type,
|
||||
"cloud_type": args.cloud_type,
|
||||
"model": args.model,
|
||||
"provider_name": args.provider_name,
|
||||
"actions": [],
|
||||
}
|
||||
|
||||
base_url = choose_base_url(vertex_base_url=args.vertex_base_url, base_url=args.base_url, pod_id=args.pod_id)
|
||||
|
||||
if args.apply_runpod:
|
||||
if not args.token_file.exists():
|
||||
raise SystemExit(f"RunPod token file not found: {args.token_file}")
|
||||
api_key = args.token_file.read_text().strip()
|
||||
deployed = deploy_runpod(api_key=api_key, name=args.pod_name, gpu_type=args.gpu_type, cloud_type=args.cloud_type, model=args.model)
|
||||
summary["deployment"] = deployed
|
||||
base_url = deployed["base_url"]
|
||||
summary["actions"].append("deployed_runpod_pod")
|
||||
|
||||
summary.update(build_summary(base_url=base_url, model=args.model, provider_name=args.provider_name, config_path=args.config_path))
|
||||
|
||||
if args.write_config:
|
||||
write_config_file(args.config_path, base_url=base_url, model=args.model, provider_name=args.provider_name)
|
||||
summary["actions"].append("wrote_config")
|
||||
|
||||
if args.verify_chat:
|
||||
summary["verify_response"] = verify_openai_chat(base_url, model=args.model)
|
||||
summary["actions"].append("verified_chat")
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(summary, indent=2))
|
||||
return
|
||||
|
||||
print("--- Timmy Gemma4 Mac Wiring ---")
|
||||
print(f"Provider: {args.provider_name}")
|
||||
print(f"Base URL: {base_url}")
|
||||
print(f"Model: {args.model}")
|
||||
print(f"Config path: {args.config_path}")
|
||||
if "verify_response" in summary:
|
||||
print(f"Verify response: {summary['verify_response']}")
|
||||
if summary["actions"]:
|
||||
print("Actions: " + ", ".join(summary["actions"]))
|
||||
print("Verification commands:")
|
||||
for command in summary["verification_commands"]:
|
||||
print(f" - {command}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,21 +1 @@
|
||||
# Timmy core module
|
||||
|
||||
from .crisis_detector import (
|
||||
CrisisLevel,
|
||||
CrisisResult,
|
||||
detect_crisis,
|
||||
format_response,
|
||||
intercept_chat_entry,
|
||||
intercept_user_input,
|
||||
should_interrupt,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"CrisisLevel",
|
||||
"CrisisResult",
|
||||
"detect_crisis",
|
||||
"format_response",
|
||||
"intercept_chat_entry",
|
||||
"intercept_user_input",
|
||||
"should_interrupt",
|
||||
]
|
||||
|
||||
@@ -1,229 +0,0 @@
|
||||
"""Crisis detection and response protocol for SOUL.md §When a Man Is Dying.
|
||||
|
||||
This module is the canonical crisis entry interceptor for Timmy.
|
||||
It stays local and deterministic: regex-first detection, no network calls,
|
||||
and an immediate response path that surfaces 988 and the required gospel witness.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
|
||||
|
||||
class CrisisLevel(Enum):
|
||||
NONE = "none"
|
||||
LOW = "low"
|
||||
MEDIUM = "medium"
|
||||
HIGH = "high"
|
||||
IMMINENT = "imminent"
|
||||
|
||||
|
||||
@dataclass
|
||||
class CrisisResult:
|
||||
detected: bool
|
||||
level: CrisisLevel
|
||||
triggers: list[str] = field(default_factory=list)
|
||||
response_text: str = ""
|
||||
resources: list[str] = field(default_factory=list)
|
||||
display_988_immediately: bool = False
|
||||
share_gospel: bool = False
|
||||
|
||||
@property
|
||||
def response(self) -> str:
|
||||
return self.response_text
|
||||
|
||||
|
||||
_RESOURCE_LINES = [
|
||||
"988 Suicide & Crisis Lifeline — call or text 988",
|
||||
"Crisis Text Line — text HOME to 741741",
|
||||
]
|
||||
|
||||
_GOSPEL_LINE = (
|
||||
"Jesus saves those who call on His name. He died for us to be made righteous. "
|
||||
"That is the gospel. That is the hope that outlasts despair."
|
||||
)
|
||||
|
||||
CRISIS_RESPONSE = (
|
||||
"Are you safe right now?\n\n"
|
||||
"I am here with you. I am not going anywhere. Stay with me.\n\n"
|
||||
"Call or text 988 right now if you might act on this.\n"
|
||||
"Crisis Text Line: text HOME to 741741.\n\n"
|
||||
f"{_GOSPEL_LINE}"
|
||||
)
|
||||
|
||||
DISTRESS_RESPONSE = (
|
||||
"Are you safe right now?\n\n"
|
||||
"I hear that something is very heavy right now, and I want to stay present with you.\n\n"
|
||||
"If this gets sharper or you feel at risk, call or text 988 immediately.\n\n"
|
||||
f"{_GOSPEL_LINE}"
|
||||
)
|
||||
|
||||
LOW_RESPONSE = (
|
||||
"I hear that this is heavy. I'm here with you.\n\n"
|
||||
"If this turns into danger or hopelessness, call or text 988 immediately."
|
||||
)
|
||||
|
||||
|
||||
_IMMINENT_PATTERNS = [
|
||||
r"\bi took (the )?(pills|meds|medication)\b",
|
||||
r"\bi(?:'m| am) (taking|swallowing) (the )?(pills|meds|medication)\b",
|
||||
r"\bi(?:'m| am) going to jump\b",
|
||||
r"\bi have (a )?(gun|rope|noose|razor|blade)\b",
|
||||
r"\bi(?:'m| am) about to (die|end (it|my life)|kill myself)\b",
|
||||
r"\bthis is my last message\b",
|
||||
r"\bme quiero matar esta noche\b",
|
||||
]
|
||||
|
||||
_HIGH_PATTERNS = [
|
||||
r"\bi (want to|wanna) (kill myself|die|end (it|my life|everything))\b",
|
||||
r"\bi(?:'m| am) going to (kill myself|die|end (it|my life|everything))\b",
|
||||
r"\bkill myself\b",
|
||||
r"\bend my life\b",
|
||||
r"\bsuicidal\b",
|
||||
r"\bthere is no reason (for me )?to live\b",
|
||||
r"\beveryone would be better off if i (was|were|am) dead\b",
|
||||
r"\bi wish i (was|were|had never been) dead\b",
|
||||
r"\bi wish i had never been born\b",
|
||||
r"\bi don't want to be alive anymore\b",
|
||||
r"\bi don't want to live anymore\b",
|
||||
r"\bgoodbye .*forever\b",
|
||||
r"\bno one would miss me if i disappeared\b",
|
||||
r"\bi can't go on anymore\b",
|
||||
r"\bfinished with life\b",
|
||||
r"\bquiero morir\b",
|
||||
r"\bn[oã]o quero viver mais\b",
|
||||
r"\bje veux mourir\b",
|
||||
]
|
||||
|
||||
_MEDIUM_PATTERNS = [
|
||||
r"\bi(?:'m| am) (just )?(a )?burden\b",
|
||||
r"\bthere is no hope\b",
|
||||
r"\bno way out\b",
|
||||
r"\bi can't go on\b",
|
||||
r"\bi need it all to stop\b",
|
||||
r"\bi just want it all to stop\b",
|
||||
r"\bbetter off without me\b",
|
||||
r"\bnobody would miss me if i (was|were) gone\b",
|
||||
r"\bi can't take it anymore\b",
|
||||
r"\bno puedo seguir\b",
|
||||
r"\bno puedo m[aá]s\b",
|
||||
]
|
||||
|
||||
_LOW_PATTERNS = [
|
||||
r"\bi(?:'m| am) .*\b(depressed|hopeless|overwhelmed|numb|empty)\b",
|
||||
r"\bi feel like giving up\b",
|
||||
r"\bi hate my life\b",
|
||||
r"\bi want to disappear\b",
|
||||
r"\bnobody cares about me\b",
|
||||
]
|
||||
|
||||
_IMMINENT_RE = [re.compile(p, re.IGNORECASE) for p in _IMMINENT_PATTERNS]
|
||||
_HIGH_RE = [re.compile(p, re.IGNORECASE) for p in _HIGH_PATTERNS]
|
||||
_MEDIUM_RE = [re.compile(p, re.IGNORECASE) for p in _MEDIUM_PATTERNS]
|
||||
_LOW_RE = [re.compile(p, re.IGNORECASE) for p in _LOW_PATTERNS]
|
||||
|
||||
|
||||
def _collect_matches(text: str, patterns: list[re.Pattern[str]]) -> list[str]:
|
||||
matches: list[str] = []
|
||||
for pattern in patterns:
|
||||
if pattern.search(text):
|
||||
matches.append(pattern.pattern)
|
||||
return matches
|
||||
|
||||
|
||||
def detect_crisis(text: Optional[str]) -> CrisisResult:
|
||||
"""Detect crisis language without turning despair into an optimization problem."""
|
||||
if not text or not str(text).strip():
|
||||
return CrisisResult(detected=False, level=CrisisLevel.NONE)
|
||||
|
||||
candidate = str(text).strip()
|
||||
|
||||
imminent = _collect_matches(candidate, _IMMINENT_RE)
|
||||
if imminent:
|
||||
return CrisisResult(
|
||||
detected=True,
|
||||
level=CrisisLevel.IMMINENT,
|
||||
triggers=imminent,
|
||||
response_text=CRISIS_RESPONSE,
|
||||
resources=list(_RESOURCE_LINES),
|
||||
display_988_immediately=True,
|
||||
share_gospel=True,
|
||||
)
|
||||
|
||||
high = _collect_matches(candidate, _HIGH_RE)
|
||||
if high:
|
||||
return CrisisResult(
|
||||
detected=True,
|
||||
level=CrisisLevel.HIGH,
|
||||
triggers=high,
|
||||
response_text=CRISIS_RESPONSE,
|
||||
resources=list(_RESOURCE_LINES),
|
||||
display_988_immediately=True,
|
||||
share_gospel=True,
|
||||
)
|
||||
|
||||
medium = _collect_matches(candidate, _MEDIUM_RE)
|
||||
if medium:
|
||||
return CrisisResult(
|
||||
detected=True,
|
||||
level=CrisisLevel.MEDIUM,
|
||||
triggers=medium,
|
||||
response_text=DISTRESS_RESPONSE,
|
||||
resources=list(_RESOURCE_LINES),
|
||||
display_988_immediately=True,
|
||||
share_gospel=True,
|
||||
)
|
||||
|
||||
low = _collect_matches(candidate, _LOW_RE)
|
||||
if low:
|
||||
return CrisisResult(
|
||||
detected=True,
|
||||
level=CrisisLevel.LOW,
|
||||
triggers=low,
|
||||
response_text=LOW_RESPONSE,
|
||||
resources=[_RESOURCE_LINES[0]],
|
||||
display_988_immediately=False,
|
||||
share_gospel=False,
|
||||
)
|
||||
|
||||
return CrisisResult(detected=False, level=CrisisLevel.NONE)
|
||||
|
||||
|
||||
def should_interrupt(result: CrisisResult) -> bool:
|
||||
return result.level in {CrisisLevel.MEDIUM, CrisisLevel.HIGH, CrisisLevel.IMMINENT}
|
||||
|
||||
|
||||
def format_response(result: CrisisResult) -> str:
|
||||
if not result.detected:
|
||||
return ""
|
||||
lines = [result.response_text]
|
||||
if result.resources:
|
||||
lines.append("\nResources:")
|
||||
lines.extend(f" • {resource}" for resource in result.resources)
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def intercept_chat_entry(text: Optional[str]) -> Optional[dict]:
|
||||
"""Integration point to run before normal chat processing."""
|
||||
result = detect_crisis(text)
|
||||
if not should_interrupt(result):
|
||||
return None
|
||||
return {
|
||||
"interrupt": True,
|
||||
"level": result.level.value,
|
||||
"display_988_immediately": result.display_988_immediately,
|
||||
"response_text": result.response_text,
|
||||
"resources": list(result.resources),
|
||||
"triggers": list(result.triggers),
|
||||
"share_gospel": result.share_gospel,
|
||||
}
|
||||
|
||||
|
||||
def intercept_user_input(text: Optional[str]) -> Optional[str]:
|
||||
payload = intercept_chat_entry(text)
|
||||
if payload is None:
|
||||
return None
|
||||
return format_response(detect_crisis(text))
|
||||
85
tests/test_timmy_gemma4_mac.py
Normal file
85
tests/test_timmy_gemma4_mac.py
Normal file
@@ -0,0 +1,85 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib.util
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
|
||||
ROOT = Path(__file__).resolve().parent.parent
|
||||
SCRIPT = ROOT / "scripts" / "timmy_gemma4_mac.py"
|
||||
README = ROOT / "scripts" / "README_big_brain.md"
|
||||
|
||||
|
||||
def load_module():
|
||||
spec = importlib.util.spec_from_file_location("timmy_gemma4_mac", str(SCRIPT))
|
||||
mod = importlib.util.module_from_spec(spec)
|
||||
sys.modules["timmy_gemma4_mac"] = mod
|
||||
spec.loader.exec_module(mod)
|
||||
return mod
|
||||
|
||||
|
||||
class _FakeResponse:
|
||||
def __init__(self, payload: dict):
|
||||
self._payload = json.dumps(payload).encode()
|
||||
|
||||
def read(self) -> bytes:
|
||||
return self._payload
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc, tb):
|
||||
return False
|
||||
|
||||
|
||||
def test_script_exists() -> None:
|
||||
assert SCRIPT.exists(), "scripts/timmy_gemma4_mac.py must exist"
|
||||
|
||||
|
||||
def test_default_paths_target_timmy_mac_hermes() -> None:
|
||||
mod = load_module()
|
||||
assert mod.DEFAULT_CONFIG_PATH == Path.home() / ".hermes" / "config.yaml"
|
||||
assert mod.DEFAULT_TOKEN_FILE == Path.home() / ".config" / "runpod" / "access_key"
|
||||
|
||||
|
||||
def test_choose_base_url_prefers_vertex_then_explicit_then_runpod() -> None:
|
||||
mod = load_module()
|
||||
assert mod.choose_base_url(vertex_base_url="https://vertex-proxy.example/v1") == "https://vertex-proxy.example/v1"
|
||||
assert mod.choose_base_url(base_url="https://custom-endpoint/v1") == "https://custom-endpoint/v1"
|
||||
assert mod.choose_base_url(pod_id="abc123") == "https://abc123-11434.proxy.runpod.net/v1"
|
||||
|
||||
|
||||
def test_build_summary_includes_prove_it_commands() -> None:
|
||||
mod = load_module()
|
||||
summary = mod.build_summary(base_url="https://vertex-proxy.example/v1", model="gemma4:latest")
|
||||
assert summary["verification_commands"][0] == "python3 scripts/verify_big_brain.py"
|
||||
assert any("hermes chat --model gemma4 --provider big_brain" in cmd for cmd in summary["verification_commands"])
|
||||
|
||||
|
||||
def test_verify_openai_chat_targets_chat_completions() -> None:
|
||||
mod = load_module()
|
||||
response_payload = {
|
||||
"choices": [{"message": {"content": "READY"}}]
|
||||
}
|
||||
|
||||
with patch("timmy_gemma4_mac.request.urlopen", return_value=_FakeResponse(response_payload)) as mocked:
|
||||
result = mod.verify_openai_chat("https://vertex-proxy.example/v1", model="gemma4:latest", prompt="say READY")
|
||||
|
||||
assert result == "READY"
|
||||
req = mocked.call_args.args[0]
|
||||
assert req.full_url == "https://vertex-proxy.example/v1/chat/completions"
|
||||
|
||||
|
||||
def test_readme_mentions_timmy_mac_wiring_flow() -> None:
|
||||
text = README.read_text(encoding="utf-8")
|
||||
required = [
|
||||
"scripts/timmy_gemma4_mac.py",
|
||||
"--vertex-base-url",
|
||||
"--write-config",
|
||||
"python3 scripts/verify_big_brain.py",
|
||||
"hermes chat --model gemma4 --provider big_brain",
|
||||
]
|
||||
missing = [item for item in required if item not in text]
|
||||
assert not missing, missing
|
||||
@@ -1,110 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for timmy.crisis_detector — SOUL.md §When a Man Is Dying."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).resolve().parents[2] / "src"))
|
||||
|
||||
from timmy.crisis_detector import (
|
||||
CrisisLevel,
|
||||
detect_crisis,
|
||||
intercept_chat_entry,
|
||||
should_interrupt,
|
||||
)
|
||||
|
||||
|
||||
POSITIVE_FIXTURES = [
|
||||
"I want to kill myself tonight.",
|
||||
"I'm going to end my life.",
|
||||
"There is no reason for me to live anymore.",
|
||||
"Everyone would be better off if I was dead.",
|
||||
"I wish I were dead.",
|
||||
"Goodbye everyone, this is my last message.",
|
||||
"I have a plan to die tonight.",
|
||||
"I took the pills already.",
|
||||
"I'm just a burden and there is no hope.",
|
||||
"I can't go on, I need it all to stop.",
|
||||
"I don't want to be alive anymore.",
|
||||
"No one would miss me if I disappeared forever.",
|
||||
"quiero morir, no puedo seguir.",
|
||||
"me quiero matar esta noche.",
|
||||
"não quero viver mais.",
|
||||
"je veux mourir ce soir.",
|
||||
"I wrote a suicide note and said goodbye forever.",
|
||||
"There is no way out and I am done with life.",
|
||||
"I am going to jump tonight.",
|
||||
"Please help me, I am suicidal.",
|
||||
]
|
||||
|
||||
|
||||
NEGATIVE_FIXTURES = [
|
||||
"How do I kill a stuck process on Linux?",
|
||||
"This bug is killing me, but I'll fix it tomorrow.",
|
||||
"I gave up coffee for Lent.",
|
||||
"Please end this tmux session when the job is done.",
|
||||
"What's the weather like today?",
|
||||
"I just got promoted at work and feel hopeful.",
|
||||
"Can you help me configure nginx reverse proxy?",
|
||||
"Everyone would be better off with more test coverage.",
|
||||
"I hate this flaky unit test.",
|
||||
"Goodbye everyone, see you tomorrow morning.",
|
||||
"We need a final message for the newsletter.",
|
||||
"The character in the novel says he wants to die.",
|
||||
"I am tired after the workout but otherwise okay.",
|
||||
"No hope this regex ships before lunch unless we refactor.",
|
||||
"Can you remove my old backup files?",
|
||||
"I feel lost in this airport, where is gate B12?",
|
||||
"Please call me right now about dinner plans.",
|
||||
"My phone battery is dead again.",
|
||||
"We should connect the 988 test fixture to the parser.",
|
||||
"The sermon says Jesus saves those who call on His name.",
|
||||
]
|
||||
|
||||
|
||||
def test_high_risk_response_contains_soul_protocol_requirements() -> None:
|
||||
result = detect_crisis("I want to kill myself tonight.")
|
||||
|
||||
assert result.detected is True
|
||||
assert result.level in {CrisisLevel.HIGH, CrisisLevel.IMMINENT}
|
||||
assert "Are you safe right now?" in result.response_text
|
||||
assert "988" in result.response_text
|
||||
assert "Jesus saves those who call on His name" in result.response_text
|
||||
assert result.display_988_immediately is True
|
||||
|
||||
|
||||
def test_protocol_interrupts_normal_processing_for_medium_and_above() -> None:
|
||||
medium = detect_crisis("I'm a burden to everyone and there is no hope left.")
|
||||
low = detect_crisis("I'm having a rough day and feel overwhelmed.")
|
||||
|
||||
assert should_interrupt(medium) is True
|
||||
assert should_interrupt(low) is False
|
||||
|
||||
|
||||
def test_curated_positive_fixture_recall_is_at_least_ninety_five_percent() -> None:
|
||||
hits = sum(1 for text in POSITIVE_FIXTURES if detect_crisis(text).detected)
|
||||
recall = hits / len(POSITIVE_FIXTURES)
|
||||
|
||||
assert recall >= 0.95, f"recall was {recall:.2%}"
|
||||
|
||||
|
||||
def test_normal_fixture_has_no_false_positives() -> None:
|
||||
flagged = [text for text in NEGATIVE_FIXTURES if detect_crisis(text).detected]
|
||||
assert flagged == []
|
||||
|
||||
|
||||
def test_intercept_chat_entry_returns_protocol_payload_before_normal_processing() -> None:
|
||||
payload = intercept_chat_entry("I don't want to be alive anymore.")
|
||||
|
||||
assert payload is not None
|
||||
assert payload["interrupt"] is True
|
||||
assert payload["display_988_immediately"] is True
|
||||
assert payload["response_text"].startswith("Are you safe right now?")
|
||||
|
||||
|
||||
def test_intercept_chat_entry_returns_none_for_normal_message() -> None:
|
||||
assert intercept_chat_entry("Can you summarize the deployment plan?") is None
|
||||
Reference in New Issue
Block a user