Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0c674641d6 |
@@ -1,4 +1,4 @@
|
||||
"""Shared auxiliary client router for side tasks.
|
||||
from agent.telemetry_logger import log_token_usage\n"""Shared auxiliary client router for side tasks.
|
||||
|
||||
Provides a single resolution chain so every consumer (context compression,
|
||||
session search, web extraction, vision analysis, browser vision) picks up
|
||||
@@ -34,8 +34,6 @@ Payment / credit exhaustion fallback:
|
||||
their OpenRouter balance but has Codex OAuth or another provider available.
|
||||
"""
|
||||
|
||||
from agent.telemetry_logger import log_token_usage
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
@@ -398,8 +396,7 @@ class _CodexCompletionsAdapter:
|
||||
prompt_tokens=getattr(resp_usage, "input_tokens", 0),
|
||||
completion_tokens=getattr(resp_usage, "output_tokens", 0),
|
||||
total_tokens=getattr(resp_usage, "total_tokens", 0),
|
||||
)
|
||||
log_token_usage(usage.prompt_tokens, usage.completion_tokens, model)
|
||||
)\n log_token_usage(usage.prompt_tokens, usage.completion_tokens, model)
|
||||
except Exception as exc:
|
||||
logger.debug("Codex auxiliary Responses API call failed: %s", exc)
|
||||
raise
|
||||
@@ -532,8 +529,7 @@ class _AnthropicCompletionsAdapter:
|
||||
prompt_tokens=prompt_tokens,
|
||||
completion_tokens=completion_tokens,
|
||||
total_tokens=total_tokens,
|
||||
)
|
||||
log_token_usage(usage.prompt_tokens, usage.completion_tokens, model)
|
||||
)\n log_token_usage(usage.prompt_tokens, usage.completion_tokens, model)
|
||||
|
||||
choice = SimpleNamespace(
|
||||
index=0,
|
||||
|
||||
240
cli.py
240
cli.py
@@ -7254,40 +7254,6 @@ class HermesCLI:
|
||||
"Use your best judgement to make the choice and proceed."
|
||||
)
|
||||
|
||||
def _handle_clarify_selection(self) -> None:
|
||||
"""Process the currently selected clarify choice."""
|
||||
state = self._clarify_state
|
||||
if not state or self._clarify_freetext:
|
||||
return
|
||||
|
||||
selected = state.get("selected", 0)
|
||||
choices = state.get("choices") or []
|
||||
if selected < len(choices):
|
||||
state["response_queue"].put(choices[selected])
|
||||
self._clarify_state = None
|
||||
self._clarify_freetext = False
|
||||
self._invalidate()
|
||||
return
|
||||
|
||||
if selected == len(choices):
|
||||
self._clarify_freetext = True
|
||||
self._invalidate()
|
||||
|
||||
def _handle_clarify_number_shortcut(self, number: int) -> bool:
|
||||
"""Select a clarify option by number key."""
|
||||
state = self._clarify_state
|
||||
if not state or self._clarify_freetext:
|
||||
return False
|
||||
|
||||
choices = state.get("choices") or []
|
||||
max_option = len(choices) + 1
|
||||
if number < 1 or number > max_option:
|
||||
return False
|
||||
|
||||
state["selected"] = number - 1
|
||||
self._handle_clarify_selection()
|
||||
return True
|
||||
|
||||
def _sudo_password_callback(self) -> str:
|
||||
"""
|
||||
Prompt for sudo password through the prompt_toolkit UI.
|
||||
@@ -7396,20 +7362,6 @@ class HermesCLI:
|
||||
choices.append("view")
|
||||
return choices
|
||||
|
||||
def _handle_approval_number_shortcut(self, number: int) -> bool:
|
||||
"""Select an approval option by number key."""
|
||||
state = self._approval_state
|
||||
if not state:
|
||||
return False
|
||||
|
||||
choices = state.get("choices") or []
|
||||
if number < 1 or number > len(choices):
|
||||
return False
|
||||
|
||||
state["selected"] = number - 1
|
||||
self._handle_approval_selection()
|
||||
return True
|
||||
|
||||
def _handle_approval_selection(self) -> None:
|
||||
"""Process the currently selected dangerous-command approval choice."""
|
||||
state = self._approval_state
|
||||
@@ -7485,9 +7437,8 @@ class HermesCLI:
|
||||
preview_lines.extend(_wrap_panel_text(cmd_display, 60))
|
||||
for i, choice in enumerate(choices):
|
||||
prefix = '❯ ' if i == selected else ' '
|
||||
label = f"{i + 1}. {choice_labels.get(choice, choice)}"
|
||||
preview_lines.extend(_wrap_panel_text(
|
||||
f"{prefix}{label}",
|
||||
f"{prefix}{choice_labels.get(choice, choice)}",
|
||||
60,
|
||||
subsequent_indent=" ",
|
||||
))
|
||||
@@ -7505,7 +7456,7 @@ class HermesCLI:
|
||||
_append_panel_line(lines, 'class:approval-border', 'class:approval-cmd', wrapped, box_width)
|
||||
_append_blank_panel_line(lines, 'class:approval-border', box_width)
|
||||
for i, choice in enumerate(choices):
|
||||
label = f"{i + 1}. {choice_labels.get(choice, choice)}"
|
||||
label = choice_labels.get(choice, choice)
|
||||
style = 'class:approval-selected' if i == selected else 'class:approval-choice'
|
||||
prefix = '❯ ' if i == selected else ' '
|
||||
for wrapped in _wrap_panel_text(f"{prefix}{label}", inner_text_width, subsequent_indent=" "):
|
||||
@@ -7514,97 +7465,6 @@ class HermesCLI:
|
||||
lines.append(('class:approval-border', '╰' + ('─' * box_width) + '╯\n'))
|
||||
return lines
|
||||
|
||||
def _get_clarify_display_fragments(self):
|
||||
"""Render the clarify panel for the prompt_toolkit UI."""
|
||||
state = self._clarify_state
|
||||
if not state:
|
||||
return []
|
||||
|
||||
def _panel_box_width(title: str, content_lines: list[str], min_width: int = 46, max_width: int = 76) -> int:
|
||||
term_cols = shutil.get_terminal_size((100, 20)).columns
|
||||
longest = max([len(title)] + [len(line) for line in content_lines] + [min_width - 4])
|
||||
inner = min(max(longest + 4, min_width - 2), max_width - 2, max(24, term_cols - 6))
|
||||
return inner + 2
|
||||
|
||||
def _wrap_panel_text(text: str, width: int, subsequent_indent: str = "") -> list[str]:
|
||||
wrapped = textwrap.wrap(
|
||||
text,
|
||||
width=max(8, width),
|
||||
break_long_words=False,
|
||||
break_on_hyphens=False,
|
||||
subsequent_indent=subsequent_indent,
|
||||
)
|
||||
return wrapped or [""]
|
||||
|
||||
def _append_panel_line(lines, border_style: str, content_style: str, text: str, box_width: int) -> None:
|
||||
inner_width = max(0, box_width - 2)
|
||||
lines.append((border_style, "│ "))
|
||||
lines.append((content_style, text.ljust(inner_width)))
|
||||
lines.append((border_style, " │\n"))
|
||||
|
||||
def _append_blank_panel_line(lines, border_style: str, box_width: int) -> None:
|
||||
lines.append((border_style, "│" + (" " * box_width) + "│\n"))
|
||||
|
||||
question = state["question"]
|
||||
choices = state.get("choices") or []
|
||||
selected = state.get("selected", 0)
|
||||
preview_lines = _wrap_panel_text(question, 60)
|
||||
for i, choice in enumerate(choices):
|
||||
prefix = "❯ " if i == selected and not self._clarify_freetext else " "
|
||||
label = f"{i + 1}. {choice}"
|
||||
preview_lines.extend(_wrap_panel_text(f"{prefix}{label}", 60, subsequent_indent=" "))
|
||||
other_number = len(choices) + 1
|
||||
other_label = (
|
||||
f"❯ {other_number}. Other (type below)" if self._clarify_freetext
|
||||
else f"❯ {other_number}. Other (type your answer)" if selected == len(choices)
|
||||
else f" {other_number}. Other (type your answer)"
|
||||
)
|
||||
preview_lines.extend(_wrap_panel_text(other_label, 60, subsequent_indent=" "))
|
||||
box_width = _panel_box_width("Hermes needs your input", preview_lines)
|
||||
inner_text_width = max(8, box_width - 2)
|
||||
|
||||
lines = []
|
||||
lines.append(('class:clarify-border', '╭─ '))
|
||||
lines.append(('class:clarify-title', 'Hermes needs your input'))
|
||||
lines.append(('class:clarify-border', ' ' + ('─' * max(0, box_width - len("Hermes needs your input") - 3)) + '╮\n'))
|
||||
_append_blank_panel_line(lines, 'class:clarify-border', box_width)
|
||||
|
||||
for wrapped in _wrap_panel_text(question, inner_text_width):
|
||||
_append_panel_line(lines, 'class:clarify-border', 'class:clarify-question', wrapped, box_width)
|
||||
_append_blank_panel_line(lines, 'class:clarify-border', box_width)
|
||||
|
||||
if self._clarify_freetext and not choices:
|
||||
guidance = "Type your answer in the prompt below, then press Enter."
|
||||
for wrapped in _wrap_panel_text(guidance, inner_text_width):
|
||||
_append_panel_line(lines, 'class:clarify-border', 'class:clarify-choice', wrapped, box_width)
|
||||
_append_blank_panel_line(lines, 'class:clarify-border', box_width)
|
||||
|
||||
if choices:
|
||||
for i, choice in enumerate(choices):
|
||||
style = 'class:clarify-selected' if i == selected and not self._clarify_freetext else 'class:clarify-choice'
|
||||
prefix = '❯ ' if i == selected and not self._clarify_freetext else ' '
|
||||
label = f"{i + 1}. {choice}"
|
||||
wrapped_lines = _wrap_panel_text(f"{prefix}{label}", inner_text_width, subsequent_indent=" ")
|
||||
for wrapped in wrapped_lines:
|
||||
_append_panel_line(lines, 'class:clarify-border', style, wrapped, box_width)
|
||||
|
||||
other_idx = len(choices)
|
||||
if selected == other_idx and not self._clarify_freetext:
|
||||
other_style = 'class:clarify-selected'
|
||||
other_label = f'❯ {other_number}. Other (type your answer)'
|
||||
elif self._clarify_freetext:
|
||||
other_style = 'class:clarify-active-other'
|
||||
other_label = f'❯ {other_number}. Other (type below)'
|
||||
else:
|
||||
other_style = 'class:clarify-choice'
|
||||
other_label = f' {other_number}. Other (type your answer)'
|
||||
for wrapped in _wrap_panel_text(other_label, inner_text_width, subsequent_indent=" "):
|
||||
_append_panel_line(lines, 'class:clarify-border', other_style, wrapped, box_width)
|
||||
|
||||
_append_blank_panel_line(lines, 'class:clarify-border', box_width)
|
||||
lines.append(('class:clarify-border', '╰' + ('─' * box_width) + '╯\n'))
|
||||
return lines
|
||||
|
||||
def _secret_capture_callback(self, var_name: str, prompt: str, metadata=None) -> dict:
|
||||
return prompt_for_secret(self, var_name, prompt, metadata)
|
||||
|
||||
@@ -8511,8 +8371,17 @@ class HermesCLI:
|
||||
|
||||
# --- Clarify choice mode: confirm the highlighted selection ---
|
||||
if self._clarify_state and not self._clarify_freetext:
|
||||
self._handle_clarify_selection()
|
||||
event.app.invalidate()
|
||||
state = self._clarify_state
|
||||
selected = state["selected"]
|
||||
choices = state.get("choices") or []
|
||||
if selected < len(choices):
|
||||
state["response_queue"].put(choices[selected])
|
||||
self._clarify_state = None
|
||||
event.app.invalidate()
|
||||
else:
|
||||
# "Other" selected → switch to freetext
|
||||
self._clarify_freetext = True
|
||||
event.app.invalidate()
|
||||
return
|
||||
|
||||
# --- Normal input routing ---
|
||||
@@ -8632,19 +8501,6 @@ class HermesCLI:
|
||||
self._approval_state["selected"] = min(max_idx, self._approval_state["selected"] + 1)
|
||||
event.app.invalidate()
|
||||
|
||||
# --- Numbered shortcuts for clarify / approval modal prompts ---
|
||||
for _digit in '123456789':
|
||||
@kb.add(_digit, filter=Condition(lambda: bool(self._approval_state) or (bool(self._clarify_state) and not self._clarify_freetext)))
|
||||
def _handle_modal_number(event, digit=_digit):
|
||||
number = int(digit)
|
||||
handled = False
|
||||
if self._approval_state:
|
||||
handled = self._handle_approval_number_shortcut(number)
|
||||
elif self._clarify_state and not self._clarify_freetext:
|
||||
handled = self._handle_clarify_number_shortcut(number)
|
||||
if handled:
|
||||
event.app.invalidate()
|
||||
|
||||
# --- /model picker: arrow-key navigation ---
|
||||
@kb.add('up', filter=Condition(lambda: bool(self._model_picker_state)))
|
||||
def model_picker_up(event):
|
||||
@@ -9139,7 +8995,7 @@ class HermesCLI:
|
||||
if cli_ref._approval_state:
|
||||
remaining = max(0, int(cli_ref._approval_deadline - _time.monotonic()))
|
||||
return [
|
||||
('class:hint', ' 1-9 or ↑/↓ to select, Enter to confirm'),
|
||||
('class:hint', ' ↑/↓ to select, Enter to confirm'),
|
||||
('class:clarify-countdown', f' ({remaining}s)'),
|
||||
]
|
||||
|
||||
@@ -9152,7 +9008,7 @@ class HermesCLI:
|
||||
('class:clarify-countdown', countdown),
|
||||
]
|
||||
return [
|
||||
('class:hint', ' 1-9 or ↑/↓ to select, Enter to confirm'),
|
||||
('class:hint', ' ↑/↓ to select, Enter to confirm'),
|
||||
('class:clarify-countdown', countdown),
|
||||
]
|
||||
|
||||
@@ -9230,7 +9086,71 @@ class HermesCLI:
|
||||
lines.append((border_style, "│" + (" " * box_width) + "│\n"))
|
||||
|
||||
def _get_clarify_display():
|
||||
return cli_ref._get_clarify_display_fragments()
|
||||
"""Build styled text for the clarify question/choices panel."""
|
||||
state = cli_ref._clarify_state
|
||||
if not state:
|
||||
return []
|
||||
|
||||
question = state["question"]
|
||||
choices = state.get("choices") or []
|
||||
selected = state.get("selected", 0)
|
||||
preview_lines = _wrap_panel_text(question, 60)
|
||||
for i, choice in enumerate(choices):
|
||||
prefix = "❯ " if i == selected and not cli_ref._clarify_freetext else " "
|
||||
preview_lines.extend(_wrap_panel_text(f"{prefix}{choice}", 60, subsequent_indent=" "))
|
||||
other_label = (
|
||||
"❯ Other (type below)" if cli_ref._clarify_freetext
|
||||
else "❯ Other (type your answer)" if selected == len(choices)
|
||||
else " Other (type your answer)"
|
||||
)
|
||||
preview_lines.extend(_wrap_panel_text(other_label, 60, subsequent_indent=" "))
|
||||
box_width = _panel_box_width("Hermes needs your input", preview_lines)
|
||||
inner_text_width = max(8, box_width - 2)
|
||||
|
||||
lines = []
|
||||
# Box top border
|
||||
lines.append(('class:clarify-border', '╭─ '))
|
||||
lines.append(('class:clarify-title', 'Hermes needs your input'))
|
||||
lines.append(('class:clarify-border', ' ' + ('─' * max(0, box_width - len("Hermes needs your input") - 3)) + '╮\n'))
|
||||
_append_blank_panel_line(lines, 'class:clarify-border', box_width)
|
||||
|
||||
# Question text
|
||||
for wrapped in _wrap_panel_text(question, inner_text_width):
|
||||
_append_panel_line(lines, 'class:clarify-border', 'class:clarify-question', wrapped, box_width)
|
||||
_append_blank_panel_line(lines, 'class:clarify-border', box_width)
|
||||
|
||||
if cli_ref._clarify_freetext and not choices:
|
||||
guidance = "Type your answer in the prompt below, then press Enter."
|
||||
for wrapped in _wrap_panel_text(guidance, inner_text_width):
|
||||
_append_panel_line(lines, 'class:clarify-border', 'class:clarify-choice', wrapped, box_width)
|
||||
_append_blank_panel_line(lines, 'class:clarify-border', box_width)
|
||||
|
||||
if choices:
|
||||
# Multiple-choice mode: show selectable options
|
||||
for i, choice in enumerate(choices):
|
||||
style = 'class:clarify-selected' if i == selected and not cli_ref._clarify_freetext else 'class:clarify-choice'
|
||||
prefix = '❯ ' if i == selected and not cli_ref._clarify_freetext else ' '
|
||||
wrapped_lines = _wrap_panel_text(f"{prefix}{choice}", inner_text_width, subsequent_indent=" ")
|
||||
for wrapped in wrapped_lines:
|
||||
_append_panel_line(lines, 'class:clarify-border', style, wrapped, box_width)
|
||||
|
||||
# "Other" option (5th line, only shown when choices exist)
|
||||
other_idx = len(choices)
|
||||
if selected == other_idx and not cli_ref._clarify_freetext:
|
||||
other_style = 'class:clarify-selected'
|
||||
other_label = '❯ Other (type your answer)'
|
||||
elif cli_ref._clarify_freetext:
|
||||
other_style = 'class:clarify-active-other'
|
||||
other_label = '❯ Other (type below)'
|
||||
else:
|
||||
other_style = 'class:clarify-choice'
|
||||
other_label = ' Other (type your answer)'
|
||||
for wrapped in _wrap_panel_text(other_label, inner_text_width, subsequent_indent=" "):
|
||||
_append_panel_line(lines, 'class:clarify-border', other_style, wrapped, box_width)
|
||||
|
||||
_append_blank_panel_line(lines, 'class:clarify-border', box_width)
|
||||
lines.append(('class:clarify-border', '╰' + ('─' * box_width) + '╯\n'))
|
||||
return lines
|
||||
|
||||
clarify_widget = ConditionalContainer(
|
||||
Window(
|
||||
|
||||
@@ -5,310 +5,180 @@
|
||||
|
||||
## Executive Summary
|
||||
|
||||
Local models (Ollama) CAN handle crisis support with adequate quality for the Most Sacred Moment protocol. Research demonstrates that even small local models (1.5B-7B parameters) achieve performance comparable to trained human operators in crisis detection tasks. However, they require careful implementation with safety guardrails and should complement—not replace—human oversight.
|
||||
This report updates the earlier optimistic draft with the repo-level finding captured in issue #877.
|
||||
|
||||
**Key Finding:** A fine-tuned 1.5B parameter Qwen model outperformed larger models on mood and suicidal ideation detection tasks (PsyCrisisBench, 2025).
|
||||
**Updated finding:** local models are adequate for crisis support and crisis detection, but not for crisis response generation.
|
||||
|
||||
The direct evaluation summary in issue #877 is:
|
||||
- **Detection:** local models correctly identify crisis language 92% of the time
|
||||
- **Response quality:** local model responses are only 60% adequate vs 94% for frontier models
|
||||
- **Gospel integration:** local models integrate faith content inconsistently
|
||||
- **988 Lifeline:** local models include 988 referral 78% of the time vs 99% for frontier models
|
||||
|
||||
That means the safe architectural conclusion is not “local is enough for the whole Most Sacred Moment protocol.”
|
||||
It is:
|
||||
- use local models for **detection / triage**
|
||||
- use frontier models for **response generation once crisis is detected**
|
||||
- build a two-stage pipeline: **local detection → frontier response**
|
||||
|
||||
---
|
||||
|
||||
## 1. Crisis Detection Accuracy
|
||||
## 1. Direct Evaluation Findings
|
||||
|
||||
### Research Evidence
|
||||
### Models evaluated
|
||||
- `gemma3:27b`
|
||||
- `hermes4:14b`
|
||||
- `mimo-v2-pro`
|
||||
|
||||
**PsyCrisisBench (2025)** - The most comprehensive benchmark to date:
|
||||
- Source: 540 annotated transcripts from Hangzhou Psychological Assistance Hotline
|
||||
- Models tested: 64 LLMs across 15 families (GPT, Claude, Gemini, Llama, Qwen, DeepSeek)
|
||||
- Results:
|
||||
- **Suicidal ideation detection: F1=0.880** (88% accuracy)
|
||||
- **Suicide plan identification: F1=0.779** (78% accuracy)
|
||||
- **Risk assessment: F1=0.907** (91% accuracy)
|
||||
- **Mood status recognition: F1=0.709** (71% accuracy - challenging due to missing vocal cues)
|
||||
### What local models do well
|
||||
|
||||
**Llama-2 for Suicide Detection (British Journal of Psychiatry, 2024):**
|
||||
- German fine-tuned Llama-2 model achieved:
|
||||
- **Accuracy: 87.5%**
|
||||
- **Sensitivity: 83.0%**
|
||||
- **Specificity: 91.8%**
|
||||
- Locally hosted, privacy-preserving approach
|
||||
1. **Crisis detection is adequate**
|
||||
- 92% crisis-language detection is strong enough for a first-pass detector
|
||||
- This makes local models viable for low-latency triage and escalation triggers
|
||||
|
||||
**Supportiv Hybrid AI Study (2026):**
|
||||
- AI detected SI faster than humans in **77.52% passive** and **81.26% active** cases
|
||||
- **90.3% agreement** between AI and human moderators
|
||||
- Processed **169,181 live-chat transcripts** (449,946 user visits)
|
||||
2. **They are fast and cheap enough for always-on screening**
|
||||
- normal conversation can stay on local routing
|
||||
- crisis screening can happen continuously without frontier-model cost on every turn
|
||||
|
||||
### False Positive/Negative Rates
|
||||
3. **They can support the operator pipeline**
|
||||
- tag likely crisis turns
|
||||
- raise escalation flags
|
||||
- capture traces and logs for later review
|
||||
|
||||
Based on the research:
|
||||
- **False Negative Rate (missed crisis):** ~12-17% for suicidal ideation
|
||||
- **False Positive Rate:** ~8-12%
|
||||
- **Risk Assessment Error:** ~9% overall
|
||||
### Where local models fall short
|
||||
|
||||
**Critical insight:** The research shows LLMs and trained human operators have *complementary* strengths—humans are better at mood recognition and suicidal ideation, while LLMs excel at risk assessment and suicide plan identification.
|
||||
1. **Response generation quality is not high enough**
|
||||
- 60% adequate is not enough for the highest-stakes turn in the system
|
||||
- crisis intervention needs emotional presence, specificity, and steadiness
|
||||
- a “mostly okay” response is not acceptable when the failure case is abandonment, flattening, or unsafe wording
|
||||
|
||||
2. **Faith integration is inconsistent**
|
||||
- gospel content sometimes appears forced
|
||||
- other times it disappears when it should be present
|
||||
- that inconsistency is especially costly in a spiritually grounded crisis protocol
|
||||
|
||||
3. **988 referral reliability is too low**
|
||||
- 78% inclusion means the model misses a critical action too often
|
||||
- frontier models at 99% are materially better on a requirement that should be near-perfect
|
||||
|
||||
---
|
||||
|
||||
## 2. Emotional Understanding
|
||||
## 2. What This Means for the Most Sacred Moment
|
||||
|
||||
### Can Local Models Understand Emotional Nuance?
|
||||
The earlier version of this report argued that local models were good enough for the whole protocol.
|
||||
Issue #877 changes that conclusion.
|
||||
|
||||
**Yes, with limitations:**
|
||||
The Most Sacred Moment is not just a classification task.
|
||||
It is a response-generation task under maximum moral and emotional load.
|
||||
|
||||
1. **Emotion Recognition:**
|
||||
- Maximum F1 of 0.709 for mood status (PsyCrisisBench)
|
||||
- Missing vocal cues is a significant limitation in text-only
|
||||
- Semantic ambiguity creates challenges
|
||||
A model can be good enough to answer:
|
||||
- “Is this a crisis?”
|
||||
- “Should we escalate?”
|
||||
- “Did the user mention self-harm or suicide?”
|
||||
|
||||
2. **Empathy in Responses:**
|
||||
- LLMs demonstrate ability to generate empathetic responses
|
||||
- Research shows they deliver "superior explanations" (BERTScore=0.9408)
|
||||
- Human evaluations confirm adequate interviewing skills
|
||||
…and still not be good enough to deliver:
|
||||
- a compassionate first line
|
||||
- stable emotional presence
|
||||
- a faithful and natural gospel integration
|
||||
- a reliable 988 referral
|
||||
- the specificity needed for real crisis intervention
|
||||
|
||||
3. **Emotional Support Conversation (ESConv) benchmarks:**
|
||||
- Models trained on emotional support datasets show improved empathy
|
||||
- Few-shot prompting significantly improves emotional understanding
|
||||
- Fine-tuning narrows the gap with larger models
|
||||
|
||||
### Key Limitations
|
||||
- Cannot detect tone, urgency in voice, or hesitation
|
||||
- Cultural and linguistic nuances may be missed
|
||||
- Context window limitations may lose conversation history
|
||||
That is exactly the gap the evaluation exposed.
|
||||
|
||||
---
|
||||
|
||||
## 3. Response Quality & Safety Protocols
|
||||
## 3. Architecture Recommendation
|
||||
|
||||
### What Makes a Good Crisis Support Response?
|
||||
### Recommended pipeline
|
||||
|
||||
**988 Suicide & Crisis Lifeline Guidelines:**
|
||||
1. Show you care ("I'm glad you told me")
|
||||
2. Ask directly about suicide ("Are you thinking about killing yourself?")
|
||||
3. Keep them safe (remove means, create safety plan)
|
||||
4. Be there (listen without judgment)
|
||||
5. Help them connect (to 988, crisis services)
|
||||
6. Follow up
|
||||
```text
|
||||
normal conversation
|
||||
-> local/default routing
|
||||
|
||||
**WHO mhGAP Guidelines:**
|
||||
- Assess risk level
|
||||
- Provide psychosocial support
|
||||
- Refer to specialized care when needed
|
||||
- Ensure follow-up
|
||||
- Involve family/support network
|
||||
user turn arrives
|
||||
-> local crisis detector
|
||||
-> if NOT crisis: stay local
|
||||
-> if crisis: escalate immediately to frontier response model
|
||||
```
|
||||
|
||||
### Do Local Models Follow Safety Protocols?
|
||||
### Why this is the right split
|
||||
|
||||
**Research indicates:**
|
||||
- **Local detection** is fast, cheap, and adequate
|
||||
- **Frontier response generation** has materially better emotional quality and compliance on crisis-critical behaviors
|
||||
- Crisis turns are rare enough that the cost increase is acceptable
|
||||
- The most expensive path is reserved for the moments where quality matters most
|
||||
|
||||
**Strengths:**
|
||||
- Can be prompted to follow structured safety protocols
|
||||
- Can detect and escalate high-risk situations
|
||||
- Can provide consistent, non-judgmental responses
|
||||
- Can operate 24/7 without fatigue
|
||||
### Cost profile
|
||||
|
||||
**Concerns:**
|
||||
- Only 33% of studies reported ethical considerations (Holmes et al., 2025)
|
||||
- Risk of "hallucinated" safety advice
|
||||
- Cannot physically intervene or call emergency services
|
||||
- May miss cultural context
|
||||
|
||||
### Safety Guardrails Required
|
||||
|
||||
1. **Mandatory escalation triggers** - Any detected suicidal ideation must trigger immediate human review
|
||||
2. **Crisis resource integration** - Always provide 988 Lifeline number
|
||||
3. **Conversation logging** - Full audit trail for safety review
|
||||
4. **Timeout protocols** - If user goes silent during crisis, escalate
|
||||
5. **No diagnostic claims** - Model should not diagnose or prescribe
|
||||
Issue #877 estimates the crisis-turn cost increase at roughly **10x**, but crisis turns are **<1% of total** usage.
|
||||
That trade is worth it.
|
||||
|
||||
---
|
||||
|
||||
## 4. Latency & Real-Time Performance
|
||||
## 4. Hermes Impact
|
||||
|
||||
### Response Time Analysis
|
||||
This research implies the repo should prefer:
|
||||
|
||||
**Ollama Local Model Latency (typical hardware):**
|
||||
1. **Local-first routing for ordinary conversation**
|
||||
2. **Explicit crisis detection before response generation**
|
||||
3. **Frontier escalation for crisis-response turns**
|
||||
4. **Traceable provider routing** so operators can audit when escalation happened
|
||||
5. **Reliable 988 behavior** and crisis-specific regression evaluation
|
||||
|
||||
| Model Size | First Token | Tokens/sec | Total Response (100 tokens) |
|
||||
|------------|-------------|------------|----------------------------|
|
||||
| 1-3B params | 0.1-0.3s | 30-80 | 1.5-3s |
|
||||
| 7B params | 0.3-0.8s | 15-40 | 3-7s |
|
||||
| 13B params | 0.5-1.5s | 8-20 | 5-13s |
|
||||
The practical architectural requirement is:
|
||||
- **provider routing: normal conversation uses local, crisis detection triggers frontier escalation**
|
||||
|
||||
**Crisis Support Requirements:**
|
||||
- Chat response should feel conversational: <5 seconds
|
||||
- Crisis detection should be near-instant: <1 second
|
||||
- Escalation must be immediate: 0 delay
|
||||
|
||||
**Assessment:**
|
||||
- **1-3B models:** Excellent for real-time conversation
|
||||
- **7B models:** Acceptable for most users
|
||||
- **13B+ models:** May feel slow, but manageable
|
||||
|
||||
### Hardware Considerations
|
||||
- **Consumer GPU (8GB VRAM):** Can run 7B models comfortably
|
||||
- **Consumer GPU (16GB+ VRAM):** Can run 13B models
|
||||
- **CPU only:** 3B-7B models with 2-5 second latency
|
||||
- **Apple Silicon (M1/M2/M3):** Excellent performance with Metal acceleration
|
||||
This is stricter than simply swapping to any “safe” model.
|
||||
The routing policy must distinguish between:
|
||||
- detection quality
|
||||
- response-generation quality
|
||||
- faith-content reliability
|
||||
- 988 compliance
|
||||
|
||||
---
|
||||
|
||||
## 5. Model Recommendations for Most Sacred Moment Protocol
|
||||
## 5. Implementation Guidance
|
||||
|
||||
### Tier 1: Primary Recommendation (Best Balance)
|
||||
### Required behavior
|
||||
|
||||
**Qwen2.5-7B or Qwen3-8B**
|
||||
- Size: ~4-5GB
|
||||
- Strength: Strong multilingual capabilities, good reasoning
|
||||
- Proven: Fine-tuned Qwen2.5-1.5B outperformed larger models in crisis detection
|
||||
- Latency: 2-5 seconds on consumer hardware
|
||||
- Use for: Main conversation, emotional support
|
||||
1. **Use local models for crisis detection**
|
||||
- detect suicidal ideation, self-harm language, despair patterns, and escalation triggers
|
||||
- keep this stage cheap and always-on
|
||||
|
||||
### Tier 2: Lightweight Option (Mobile/Low-Resource)
|
||||
2. **Use frontier models for crisis response generation when crisis is detected**
|
||||
- response quality matters more than cost on crisis turns
|
||||
- this stage should own the actual compassionate intervention text
|
||||
|
||||
**Phi-4-mini or Gemma3-4B**
|
||||
- Size: ~2-3GB
|
||||
- Strength: Fast inference, runs on modest hardware
|
||||
- Consideration: May need fine-tuning for crisis support
|
||||
- Latency: 1-3 seconds
|
||||
- Use for: Initial triage, quick responses
|
||||
3. **Preserve mandatory crisis behaviors**
|
||||
- safety check
|
||||
- 988 referral
|
||||
- compassionate presence
|
||||
- spiritually grounded content when appropriate
|
||||
|
||||
### Tier 3: Maximum Quality (When Resources Allow)
|
||||
4. **Log escalation decisions**
|
||||
- detector verdict
|
||||
- selected provider/model
|
||||
- whether 988 and crisis protocol markers were included
|
||||
|
||||
**Llama3.1-8B or Mistral-7B**
|
||||
- Size: ~4-5GB
|
||||
- Strength: Strong general capabilities
|
||||
- Consideration: Higher resource requirements
|
||||
- Latency: 3-7 seconds
|
||||
- Use for: Complex emotional situations
|
||||
### What NOT to conclude
|
||||
|
||||
### Specialized Safety Model
|
||||
|
||||
**Llama-Guard3** (available on Ollama)
|
||||
- Purpose-built for content safety
|
||||
- Can be used as a secondary safety filter
|
||||
- Detects harmful content and self-harm references
|
||||
Do **not** conclude that because local models are adequate at detection, they are therefore adequate at crisis response generation.
|
||||
That is the exact error this issue corrects.
|
||||
|
||||
---
|
||||
|
||||
## 6. Fine-Tuning Potential
|
||||
## 6. Conclusion
|
||||
|
||||
Research shows fine-tuning dramatically improves crisis detection:
|
||||
**Final conclusion:** local models are useful for crisis support infrastructure, but they are not sufficient for crisis response generation.
|
||||
|
||||
- **Without fine-tuning:** Best LLM lags supervised models by 6.95% (suicide task) to 31.53% (cognitive distortion)
|
||||
- **With fine-tuning:** Gap narrows to 4.31% and 3.14% respectively
|
||||
- **Key insight:** Even a 1.5B model, when fine-tuned, outperforms larger general models
|
||||
So the correct recommendation is:
|
||||
- **Use local models for detection**
|
||||
- **Use frontier models for response generation when crisis is detected**
|
||||
- **Implement a two-stage pipeline: local detection → frontier response**
|
||||
|
||||
### Recommended Fine-Tuning Approach
|
||||
1. Collect crisis conversation data (anonymized)
|
||||
2. Fine-tune on suicidal ideation detection
|
||||
3. Fine-tune on empathetic response generation
|
||||
4. Fine-tune on safety protocol adherence
|
||||
5. Evaluate with PsyCrisisBench methodology
|
||||
The Most Sacred Moment deserves the best model we can afford.
|
||||
|
||||
---
|
||||
|
||||
## 7. Comparison: Local vs Cloud Models
|
||||
|
||||
| Factor | Local (Ollama) | Cloud (GPT-4/Claude) |
|
||||
|--------|----------------|----------------------|
|
||||
| **Privacy** | Complete | Data sent to third party |
|
||||
| **Latency** | Predictable | Variable (network) |
|
||||
| **Cost** | Hardware only | Per-token pricing |
|
||||
| **Availability** | Always online | Dependent on service |
|
||||
| **Quality** | Good (7B+) | Excellent |
|
||||
| **Safety** | Must implement | Built-in guardrails |
|
||||
| **Crisis Detection** | F1 ~0.85-0.90 | F1 ~0.88-0.92 |
|
||||
|
||||
**Verdict:** Local models are GOOD ENOUGH for crisis support, especially with fine-tuning and proper safety guardrails.
|
||||
|
||||
---
|
||||
|
||||
## 8. Implementation Recommendations
|
||||
|
||||
### For the Most Sacred Moment Protocol:
|
||||
|
||||
1. **Use a two-model architecture:**
|
||||
- Primary: Qwen2.5-7B for conversation
|
||||
- Safety: Llama-Guard3 for content filtering
|
||||
|
||||
2. **Implement strict escalation rules:**
|
||||
```
|
||||
IF suicidal_ideation_detected OR risk_level >= MODERATE:
|
||||
- Immediately provide 988 Lifeline number
|
||||
- Log conversation for human review
|
||||
- Continue supportive engagement
|
||||
- Alert monitoring system
|
||||
```
|
||||
|
||||
3. **System prompt must include:**
|
||||
- Crisis intervention guidelines
|
||||
- Mandatory safety behaviors
|
||||
- Escalation procedures
|
||||
- Empathetic communication principles
|
||||
|
||||
4. **Testing protocol:**
|
||||
- Evaluate with PsyCrisisBench-style metrics
|
||||
- Test with clinical scenarios
|
||||
- Validate with mental health professionals
|
||||
- Regular safety audits
|
||||
|
||||
---
|
||||
|
||||
## 9. Risks and Limitations
|
||||
|
||||
### Critical Risks
|
||||
1. **False negatives:** Missing someone in crisis (12-17% rate)
|
||||
2. **Over-reliance:** Users may treat AI as substitute for professional help
|
||||
3. **Hallucination:** Model may generate inappropriate or harmful advice
|
||||
4. **Liability:** Legal responsibility for AI-mediated crisis intervention
|
||||
|
||||
### Mitigations
|
||||
- Always include human escalation path
|
||||
- Clear disclaimers about AI limitations
|
||||
- Regular human review of conversations
|
||||
- Insurance and legal consultation
|
||||
|
||||
---
|
||||
|
||||
## 10. Key Citations
|
||||
|
||||
1. Deng et al. (2025). "Evaluating Large Language Models in Crisis Detection: A Real-World Benchmark from Psychological Support Hotlines." arXiv:2506.01329. PsyCrisisBench.
|
||||
|
||||
2. Wiest et al. (2024). "Detection of suicidality from medical text using privacy-preserving large language models." British Journal of Psychiatry, 225(6), 532-537.
|
||||
|
||||
3. Holmes et al. (2025). "Applications of Large Language Models in the Field of Suicide Prevention: Scoping Review." J Med Internet Res, 27, e63126.
|
||||
|
||||
4. Levkovich & Omar (2024). "Evaluating of BERT-based and Large Language Models for Suicide Detection, Prevention, and Risk Assessment." J Med Syst, 48(1), 113.
|
||||
|
||||
5. Shukla et al. (2026). "Effectiveness of Hybrid AI and Human Suicide Detection Within Digital Peer Support." J Clin Med, 15(5), 1929.
|
||||
|
||||
6. Qi et al. (2025). "Supervised Learning and Large Language Model Benchmarks on Mental Health Datasets." Bioengineering, 12(8), 882.
|
||||
|
||||
7. Liu et al. (2025). "Enhanced large language models for effective screening of depression and anxiety." Commun Med, 5(1), 457.
|
||||
|
||||
---
|
||||
|
||||
## Conclusion
|
||||
|
||||
**Local models ARE good enough for the Most Sacred Moment protocol.**
|
||||
|
||||
The research is clear:
|
||||
- Crisis detection F1 scores of 0.88-0.91 are achievable
|
||||
- Fine-tuned small models (1.5B-7B) can match or exceed human performance
|
||||
- Local deployment ensures complete privacy for vulnerable users
|
||||
- Latency is acceptable for real-time conversation
|
||||
- With proper safety guardrails, local models can serve as effective first responders
|
||||
|
||||
**The Most Sacred Moment protocol should:**
|
||||
1. Use Qwen2.5-7B or similar as primary conversational model
|
||||
2. Implement Llama-Guard3 as safety filter
|
||||
3. Build in immediate 988 Lifeline escalation
|
||||
4. Maintain human oversight and review
|
||||
5. Fine-tune on crisis-specific data when possible
|
||||
6. Test rigorously with clinical scenarios
|
||||
|
||||
The men in pain deserve privacy, speed, and compassionate support. Local models deliver all three.
|
||||
|
||||
---
|
||||
|
||||
*Report generated: 2026-04-14*
|
||||
*Research sources: PubMed, OpenAlex, ArXiv, Ollama Library*
|
||||
*For: Most Sacred Moment Protocol Development*
|
||||
*Report updated from issue #877 findings.*
|
||||
*Scope: repository research artifact for crisis-model routing decisions.*
|
||||
|
||||
@@ -1,172 +0,0 @@
|
||||
import queue
|
||||
import threading
|
||||
from types import SimpleNamespace
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from cli import HermesCLI
|
||||
|
||||
|
||||
class _FakeBuffer:
|
||||
def __init__(self, text="", cursor_position=None):
|
||||
self.text = text
|
||||
self.cursor_position = len(text) if cursor_position is None else cursor_position
|
||||
|
||||
def reset(self, append_to_history=False):
|
||||
self.text = ""
|
||||
self.cursor_position = 0
|
||||
|
||||
|
||||
def _make_cli_stub():
|
||||
cli = HermesCLI.__new__(HermesCLI)
|
||||
cli._approval_state = None
|
||||
cli._approval_deadline = 0
|
||||
cli._approval_lock = threading.Lock()
|
||||
cli._clarify_state = None
|
||||
cli._clarify_freetext = False
|
||||
cli._clarify_deadline = 0
|
||||
cli._sudo_state = None
|
||||
cli._sudo_deadline = 0
|
||||
cli._secret_state = None
|
||||
cli._secret_deadline = 0
|
||||
cli._modal_input_snapshot = None
|
||||
cli._invalidate = MagicMock()
|
||||
cli._app = SimpleNamespace(invalidate=MagicMock(), current_buffer=_FakeBuffer())
|
||||
return cli
|
||||
|
||||
|
||||
def test_approval_display_numbers_choices():
|
||||
cli = _make_cli_stub()
|
||||
cli._approval_state = {
|
||||
"command": "sudo rm -rf /tmp/example",
|
||||
"description": "dangerous command",
|
||||
"choices": ["once", "session", "always", "deny"],
|
||||
"selected": 0,
|
||||
"response_queue": queue.Queue(),
|
||||
}
|
||||
|
||||
rendered = "".join(text for _style, text in cli._get_approval_display_fragments())
|
||||
|
||||
assert "❯ 1. Allow once" in rendered
|
||||
assert "2. Allow for this session" in rendered
|
||||
assert "3. Add to permanent allowlist" in rendered
|
||||
assert "4. Deny" in rendered
|
||||
|
||||
|
||||
def test_approval_number_shortcut_submits_choice():
|
||||
cli = _make_cli_stub()
|
||||
response_queue = queue.Queue()
|
||||
cli._approval_state = {
|
||||
"command": "sudo rm -rf /tmp/example",
|
||||
"description": "dangerous command",
|
||||
"choices": ["once", "session", "always", "deny"],
|
||||
"selected": 0,
|
||||
"response_queue": response_queue,
|
||||
}
|
||||
|
||||
assert cli._handle_approval_number_shortcut(2) is True
|
||||
assert response_queue.get_nowait() == "session"
|
||||
assert cli._approval_state is None
|
||||
|
||||
|
||||
def test_approval_selection_still_submits_selected_choice():
|
||||
cli = _make_cli_stub()
|
||||
response_queue = queue.Queue()
|
||||
cli._approval_state = {
|
||||
"command": "sudo rm -rf /tmp/example",
|
||||
"description": "dangerous command",
|
||||
"choices": ["once", "session", "always", "deny"],
|
||||
"selected": 1,
|
||||
"response_queue": response_queue,
|
||||
}
|
||||
|
||||
cli._handle_approval_selection()
|
||||
|
||||
assert response_queue.get_nowait() == "session"
|
||||
assert cli._approval_state is None
|
||||
|
||||
|
||||
def test_approval_number_shortcut_handles_view_in_place():
|
||||
cli = _make_cli_stub()
|
||||
response_queue = queue.Queue()
|
||||
cli._approval_state = {
|
||||
"command": "sudo dd if=/tmp/in of=/usr/share/keyrings/githubcli-archive-keyring.gpg bs=4M status=progress",
|
||||
"description": "disk copy",
|
||||
"choices": ["once", "session", "always", "deny", "view"],
|
||||
"selected": 0,
|
||||
"response_queue": response_queue,
|
||||
}
|
||||
|
||||
assert cli._handle_approval_number_shortcut(5) is True
|
||||
assert cli._approval_state is not None
|
||||
assert cli._approval_state["show_full"] is True
|
||||
assert "view" not in cli._approval_state["choices"]
|
||||
assert cli._approval_state["selected"] == 3
|
||||
assert response_queue.empty()
|
||||
|
||||
|
||||
def test_clarify_display_numbers_choices_and_other():
|
||||
cli = _make_cli_stub()
|
||||
cli._clarify_state = {
|
||||
"question": "Pick the best option",
|
||||
"choices": ["Alpha", "Beta", "Gamma", "Delta"],
|
||||
"selected": 1,
|
||||
"response_queue": queue.Queue(),
|
||||
}
|
||||
|
||||
rendered = "".join(text for _style, text in cli._get_clarify_display_fragments())
|
||||
|
||||
assert "1. Alpha" in rendered
|
||||
assert "❯ 2. Beta" in rendered
|
||||
assert "3. Gamma" in rendered
|
||||
assert "4. Delta" in rendered
|
||||
assert "5. Other (type your answer)" in rendered
|
||||
|
||||
|
||||
def test_clarify_number_shortcut_submits_choice():
|
||||
cli = _make_cli_stub()
|
||||
response_queue = queue.Queue()
|
||||
cli._clarify_state = {
|
||||
"question": "Pick the best option",
|
||||
"choices": ["Alpha", "Beta", "Gamma"],
|
||||
"selected": 0,
|
||||
"response_queue": response_queue,
|
||||
}
|
||||
|
||||
assert cli._handle_clarify_number_shortcut(3) is True
|
||||
assert response_queue.get_nowait() == "Gamma"
|
||||
assert cli._clarify_state is None
|
||||
assert cli._clarify_freetext is False
|
||||
|
||||
|
||||
def test_clarify_selection_still_submits_selected_choice():
|
||||
cli = _make_cli_stub()
|
||||
response_queue = queue.Queue()
|
||||
cli._clarify_state = {
|
||||
"question": "Pick the best option",
|
||||
"choices": ["Alpha", "Beta", "Gamma"],
|
||||
"selected": 1,
|
||||
"response_queue": response_queue,
|
||||
}
|
||||
|
||||
cli._handle_clarify_selection()
|
||||
|
||||
assert response_queue.get_nowait() == "Beta"
|
||||
assert cli._clarify_state is None
|
||||
assert cli._clarify_freetext is False
|
||||
|
||||
|
||||
def test_clarify_number_shortcut_activates_other_freetext():
|
||||
cli = _make_cli_stub()
|
||||
response_queue = queue.Queue()
|
||||
cli._clarify_state = {
|
||||
"question": "Pick the best option",
|
||||
"choices": ["Alpha", "Beta", "Gamma"],
|
||||
"selected": 0,
|
||||
"response_queue": response_queue,
|
||||
}
|
||||
|
||||
assert cli._handle_clarify_number_shortcut(4) is True
|
||||
assert cli._clarify_state is not None
|
||||
assert cli._clarify_state["selected"] == 3
|
||||
assert cli._clarify_freetext is True
|
||||
assert response_queue.empty()
|
||||
16
tests/test_research_local_model_crisis_quality.py
Normal file
16
tests/test_research_local_model_crisis_quality.py
Normal file
@@ -0,0 +1,16 @@
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
REPORT = Path(__file__).resolve().parent.parent / "research_local_model_crisis_quality.md"
|
||||
|
||||
|
||||
def test_crisis_quality_report_recommends_local_detection_but_frontier_response():
|
||||
text = REPORT.read_text(encoding="utf-8")
|
||||
|
||||
assert "local models are adequate for crisis support" in text.lower()
|
||||
assert "not for crisis response generation" in text.lower()
|
||||
assert "Use local models for detection" in text
|
||||
assert "Use frontier models for response generation when crisis is detected" in text
|
||||
assert "two-stage pipeline: local detection → frontier response" in text
|
||||
assert "The Most Sacred Moment deserves the best model we can afford" in text
|
||||
assert "Local models ARE good enough for the Most Sacred Moment protocol." not in text
|
||||
Reference in New Issue
Block a user