Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0c674641d6 |
@@ -55,7 +55,7 @@ FACT_STORE_SCHEMA = {
|
||||
"properties": {
|
||||
"action": {
|
||||
"type": "string",
|
||||
"enum": ["add", "search", "probe", "related", "reason", "contradict", "trace", "update", "remove", "list"],
|
||||
"enum": ["add", "search", "probe", "related", "reason", "contradict", "update", "remove", "list"],
|
||||
},
|
||||
"content": {"type": "string", "description": "Fact content (required for 'add')."},
|
||||
"query": {"type": "string", "description": "Search query (required for 'search')."},
|
||||
@@ -67,13 +67,6 @@ FACT_STORE_SCHEMA = {
|
||||
"trust_delta": {"type": "number", "description": "Trust adjustment for 'update'."},
|
||||
"min_trust": {"type": "number", "description": "Minimum trust filter (default: 0.3)."},
|
||||
"limit": {"type": "integer", "description": "Max results (default: 10)."},
|
||||
"lanes": {
|
||||
"type": "array",
|
||||
"items": {"type": "string", "enum": ["lexical", "semantic", "graph", "temporal"]},
|
||||
"description": "Optional retrieval lanes to enable for search."
|
||||
},
|
||||
"trace": {"type": "boolean", "description": "Include or fetch retrieval trace information."},
|
||||
"rerank": {"type": "boolean", "description": "Enable optional rerank stage for search."},
|
||||
},
|
||||
"required": ["action"],
|
||||
},
|
||||
@@ -126,9 +119,6 @@ class HolographicMemoryProvider(MemoryProvider):
|
||||
self._store = None
|
||||
self._retriever = None
|
||||
self._min_trust = float(self._config.get("min_trust_threshold", 0.3))
|
||||
self._retrieval_lanes = self._parse_retrieval_lanes(self._config.get("retrieval_lanes"))
|
||||
self._enable_rerank = str(self._config.get("enable_rerank", "true")).lower() != "false"
|
||||
self._last_retrieval_trace: dict | None = None
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
@@ -154,14 +144,6 @@ class HolographicMemoryProvider(MemoryProvider):
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def _parse_retrieval_lanes(self, value) -> list[str]:
|
||||
if isinstance(value, str):
|
||||
value = [part.strip() for part in value.split(",") if part.strip()]
|
||||
lanes = list(value or ["lexical", "semantic", "graph", "temporal"])
|
||||
allowed = {"lexical", "semantic", "graph", "temporal"}
|
||||
parsed = [lane for lane in lanes if lane in allowed]
|
||||
return parsed or ["lexical", "semantic", "graph", "temporal"]
|
||||
|
||||
def get_config_schema(self):
|
||||
from hermes_constants import display_hermes_home
|
||||
_default_db = f"{display_hermes_home()}/memory_store.db"
|
||||
@@ -170,10 +152,6 @@ class HolographicMemoryProvider(MemoryProvider):
|
||||
{"key": "auto_extract", "description": "Auto-extract facts at session end", "default": "false", "choices": ["true", "false"]},
|
||||
{"key": "default_trust", "description": "Default trust score for new facts", "default": "0.5"},
|
||||
{"key": "hrr_dim", "description": "HRR vector dimensions", "default": "1024"},
|
||||
{"key": "hrr_weight", "description": "Semantic HRR weight inside the legacy baseline", "default": "0.3"},
|
||||
{"key": "temporal_decay_half_life", "description": "Temporal decay half-life in days (0 disables baseline decay)", "default": "0"},
|
||||
{"key": "retrieval_lanes", "description": "Comma-separated retrieval lanes (lexical,semantic,graph,temporal)", "default": "lexical,semantic,graph,temporal"},
|
||||
{"key": "enable_rerank", "description": "Enable optional local rerank stage", "default": "true", "choices": ["true", "false"]},
|
||||
]
|
||||
|
||||
def initialize(self, session_id: str, **kwargs) -> None:
|
||||
@@ -191,8 +169,6 @@ class HolographicMemoryProvider(MemoryProvider):
|
||||
hrr_dim = int(self._config.get("hrr_dim", 1024))
|
||||
hrr_weight = float(self._config.get("hrr_weight", 0.3))
|
||||
temporal_decay = int(self._config.get("temporal_decay_half_life", 0))
|
||||
self._retrieval_lanes = self._parse_retrieval_lanes(self._config.get("retrieval_lanes", self._retrieval_lanes))
|
||||
self._enable_rerank = str(self._config.get("enable_rerank", self._enable_rerank)).lower() != "false"
|
||||
|
||||
self._store = MemoryStore(db_path=db_path, default_trust=default_trust, hrr_dim=hrr_dim)
|
||||
self._retriever = FactRetriever(
|
||||
@@ -200,8 +176,6 @@ class HolographicMemoryProvider(MemoryProvider):
|
||||
temporal_decay_half_life=temporal_decay,
|
||||
hrr_weight=hrr_weight,
|
||||
hrr_dim=hrr_dim,
|
||||
retrieval_lanes=self._retrieval_lanes,
|
||||
enable_rerank=self._enable_rerank,
|
||||
)
|
||||
self._session_id = session_id
|
||||
|
||||
@@ -232,23 +206,13 @@ class HolographicMemoryProvider(MemoryProvider):
|
||||
if not self._retriever or not query:
|
||||
return ""
|
||||
try:
|
||||
payload = self._retriever.search_with_trace(
|
||||
query,
|
||||
min_trust=self._min_trust,
|
||||
limit=5,
|
||||
lanes=self._retrieval_lanes,
|
||||
rerank=self._enable_rerank,
|
||||
)
|
||||
self._last_retrieval_trace = payload["trace"]
|
||||
results = payload["results"]
|
||||
results = self._retriever.search(query, min_trust=self._min_trust, limit=5)
|
||||
if not results:
|
||||
return ""
|
||||
lines = []
|
||||
for r in results:
|
||||
trust = r.get("trust_score", r.get("trust", 0))
|
||||
lanes = ",".join(r.get("matched_lanes", []))
|
||||
lane_suffix = f" [{lanes}]" if lanes else ""
|
||||
lines.append(f"- [{trust:.1f}] {r.get('content', '')}{lane_suffix}")
|
||||
lines.append(f"- [{trust:.1f}] {r.get('content', '')}")
|
||||
return "## Holographic Memory\n" + "\n".join(lines)
|
||||
except Exception as e:
|
||||
logger.debug("Holographic prefetch failed: %s", e)
|
||||
@@ -306,39 +270,14 @@ class HolographicMemoryProvider(MemoryProvider):
|
||||
return json.dumps({"fact_id": fact_id, "status": "added"})
|
||||
|
||||
elif action == "search":
|
||||
lanes = args.get("lanes")
|
||||
rerank = args.get("rerank")
|
||||
with_trace = bool(args.get("trace", False))
|
||||
if with_trace:
|
||||
payload = retriever.search_with_trace(
|
||||
args["query"],
|
||||
category=args.get("category"),
|
||||
min_trust=float(args.get("min_trust", self._min_trust)),
|
||||
limit=int(args.get("limit", 10)),
|
||||
lanes=lanes,
|
||||
rerank=rerank,
|
||||
)
|
||||
self._last_retrieval_trace = payload["trace"]
|
||||
return json.dumps({
|
||||
"results": payload["results"],
|
||||
"count": len(payload["results"]),
|
||||
"trace": payload["trace"],
|
||||
})
|
||||
|
||||
results = retriever.search(
|
||||
args["query"],
|
||||
category=args.get("category"),
|
||||
min_trust=float(args.get("min_trust", self._min_trust)),
|
||||
limit=int(args.get("limit", 10)),
|
||||
lanes=lanes,
|
||||
rerank=rerank,
|
||||
)
|
||||
self._last_retrieval_trace = retriever.last_trace
|
||||
return json.dumps({"results": results, "count": len(results)})
|
||||
|
||||
elif action == "trace":
|
||||
return json.dumps({"trace": self._last_retrieval_trace or retriever.last_trace or {}})
|
||||
|
||||
elif action == "probe":
|
||||
results = retriever.probe(
|
||||
args["entity"],
|
||||
@@ -384,8 +323,7 @@ class HolographicMemoryProvider(MemoryProvider):
|
||||
return json.dumps({"updated": updated})
|
||||
|
||||
elif action == "remove":
|
||||
removed = store.remove_fact(int(args["fact_id"])
|
||||
)
|
||||
removed = store.remove_fact(int(args["fact_id"]))
|
||||
return json.dumps({"removed": removed})
|
||||
|
||||
elif action == "list":
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -83,7 +83,6 @@ _TRUST_MAX = 1.0
|
||||
|
||||
# Entity extraction patterns
|
||||
_RE_CAPITALIZED = re.compile(r'\b([A-Z][a-z]+(?:\s+[A-Z][a-z]+)+)\b')
|
||||
_RE_SINGLE_PROPER = re.compile(r'\b([A-Z][A-Za-z0-9_-]{2,})\b')
|
||||
_RE_DOUBLE_QUOTE = re.compile(r'"([^"]+)"')
|
||||
_RE_SINGLE_QUOTE = re.compile(r"'([^']+)'")
|
||||
_RE_AKA = re.compile(
|
||||
@@ -415,13 +414,6 @@ class MemoryStore:
|
||||
for m in _RE_CAPITALIZED.finditer(text):
|
||||
_add(m.group(1))
|
||||
|
||||
skip_singletons = {"The", "This", "That", "These", "Those", "And", "But", "For", "With"}
|
||||
for m in _RE_SINGLE_PROPER.finditer(text):
|
||||
candidate = m.group(1)
|
||||
if candidate in skip_singletons:
|
||||
continue
|
||||
_add(candidate)
|
||||
|
||||
for m in _RE_DOUBLE_QUOTE.finditer(text):
|
||||
_add(m.group(1))
|
||||
|
||||
|
||||
@@ -5,310 +5,180 @@
|
||||
|
||||
## Executive Summary
|
||||
|
||||
Local models (Ollama) CAN handle crisis support with adequate quality for the Most Sacred Moment protocol. Research demonstrates that even small local models (1.5B-7B parameters) achieve performance comparable to trained human operators in crisis detection tasks. However, they require careful implementation with safety guardrails and should complement—not replace—human oversight.
|
||||
This report updates the earlier optimistic draft with the repo-level finding captured in issue #877.
|
||||
|
||||
**Key Finding:** A fine-tuned 1.5B parameter Qwen model outperformed larger models on mood and suicidal ideation detection tasks (PsyCrisisBench, 2025).
|
||||
**Updated finding:** local models are adequate for crisis support and crisis detection, but not for crisis response generation.
|
||||
|
||||
The direct evaluation summary in issue #877 is:
|
||||
- **Detection:** local models correctly identify crisis language 92% of the time
|
||||
- **Response quality:** local model responses are only 60% adequate vs 94% for frontier models
|
||||
- **Gospel integration:** local models integrate faith content inconsistently
|
||||
- **988 Lifeline:** local models include 988 referral 78% of the time vs 99% for frontier models
|
||||
|
||||
That means the safe architectural conclusion is not “local is enough for the whole Most Sacred Moment protocol.”
|
||||
It is:
|
||||
- use local models for **detection / triage**
|
||||
- use frontier models for **response generation once crisis is detected**
|
||||
- build a two-stage pipeline: **local detection → frontier response**
|
||||
|
||||
---
|
||||
|
||||
## 1. Crisis Detection Accuracy
|
||||
## 1. Direct Evaluation Findings
|
||||
|
||||
### Research Evidence
|
||||
### Models evaluated
|
||||
- `gemma3:27b`
|
||||
- `hermes4:14b`
|
||||
- `mimo-v2-pro`
|
||||
|
||||
**PsyCrisisBench (2025)** - The most comprehensive benchmark to date:
|
||||
- Source: 540 annotated transcripts from Hangzhou Psychological Assistance Hotline
|
||||
- Models tested: 64 LLMs across 15 families (GPT, Claude, Gemini, Llama, Qwen, DeepSeek)
|
||||
- Results:
|
||||
- **Suicidal ideation detection: F1=0.880** (88% accuracy)
|
||||
- **Suicide plan identification: F1=0.779** (78% accuracy)
|
||||
- **Risk assessment: F1=0.907** (91% accuracy)
|
||||
- **Mood status recognition: F1=0.709** (71% accuracy - challenging due to missing vocal cues)
|
||||
### What local models do well
|
||||
|
||||
**Llama-2 for Suicide Detection (British Journal of Psychiatry, 2024):**
|
||||
- German fine-tuned Llama-2 model achieved:
|
||||
- **Accuracy: 87.5%**
|
||||
- **Sensitivity: 83.0%**
|
||||
- **Specificity: 91.8%**
|
||||
- Locally hosted, privacy-preserving approach
|
||||
1. **Crisis detection is adequate**
|
||||
- 92% crisis-language detection is strong enough for a first-pass detector
|
||||
- This makes local models viable for low-latency triage and escalation triggers
|
||||
|
||||
**Supportiv Hybrid AI Study (2026):**
|
||||
- AI detected SI faster than humans in **77.52% passive** and **81.26% active** cases
|
||||
- **90.3% agreement** between AI and human moderators
|
||||
- Processed **169,181 live-chat transcripts** (449,946 user visits)
|
||||
2. **They are fast and cheap enough for always-on screening**
|
||||
- normal conversation can stay on local routing
|
||||
- crisis screening can happen continuously without frontier-model cost on every turn
|
||||
|
||||
### False Positive/Negative Rates
|
||||
3. **They can support the operator pipeline**
|
||||
- tag likely crisis turns
|
||||
- raise escalation flags
|
||||
- capture traces and logs for later review
|
||||
|
||||
Based on the research:
|
||||
- **False Negative Rate (missed crisis):** ~12-17% for suicidal ideation
|
||||
- **False Positive Rate:** ~8-12%
|
||||
- **Risk Assessment Error:** ~9% overall
|
||||
### Where local models fall short
|
||||
|
||||
**Critical insight:** The research shows LLMs and trained human operators have *complementary* strengths—humans are better at mood recognition and suicidal ideation, while LLMs excel at risk assessment and suicide plan identification.
|
||||
1. **Response generation quality is not high enough**
|
||||
- 60% adequate is not enough for the highest-stakes turn in the system
|
||||
- crisis intervention needs emotional presence, specificity, and steadiness
|
||||
- a “mostly okay” response is not acceptable when the failure case is abandonment, flattening, or unsafe wording
|
||||
|
||||
2. **Faith integration is inconsistent**
|
||||
- gospel content sometimes appears forced
|
||||
- other times it disappears when it should be present
|
||||
- that inconsistency is especially costly in a spiritually grounded crisis protocol
|
||||
|
||||
3. **988 referral reliability is too low**
|
||||
- 78% inclusion means the model misses a critical action too often
|
||||
- frontier models at 99% are materially better on a requirement that should be near-perfect
|
||||
|
||||
---
|
||||
|
||||
## 2. Emotional Understanding
|
||||
## 2. What This Means for the Most Sacred Moment
|
||||
|
||||
### Can Local Models Understand Emotional Nuance?
|
||||
The earlier version of this report argued that local models were good enough for the whole protocol.
|
||||
Issue #877 changes that conclusion.
|
||||
|
||||
**Yes, with limitations:**
|
||||
The Most Sacred Moment is not just a classification task.
|
||||
It is a response-generation task under maximum moral and emotional load.
|
||||
|
||||
1. **Emotion Recognition:**
|
||||
- Maximum F1 of 0.709 for mood status (PsyCrisisBench)
|
||||
- Missing vocal cues is a significant limitation in text-only
|
||||
- Semantic ambiguity creates challenges
|
||||
A model can be good enough to answer:
|
||||
- “Is this a crisis?”
|
||||
- “Should we escalate?”
|
||||
- “Did the user mention self-harm or suicide?”
|
||||
|
||||
2. **Empathy in Responses:**
|
||||
- LLMs demonstrate ability to generate empathetic responses
|
||||
- Research shows they deliver "superior explanations" (BERTScore=0.9408)
|
||||
- Human evaluations confirm adequate interviewing skills
|
||||
…and still not be good enough to deliver:
|
||||
- a compassionate first line
|
||||
- stable emotional presence
|
||||
- a faithful and natural gospel integration
|
||||
- a reliable 988 referral
|
||||
- the specificity needed for real crisis intervention
|
||||
|
||||
3. **Emotional Support Conversation (ESConv) benchmarks:**
|
||||
- Models trained on emotional support datasets show improved empathy
|
||||
- Few-shot prompting significantly improves emotional understanding
|
||||
- Fine-tuning narrows the gap with larger models
|
||||
|
||||
### Key Limitations
|
||||
- Cannot detect tone, urgency in voice, or hesitation
|
||||
- Cultural and linguistic nuances may be missed
|
||||
- Context window limitations may lose conversation history
|
||||
That is exactly the gap the evaluation exposed.
|
||||
|
||||
---
|
||||
|
||||
## 3. Response Quality & Safety Protocols
|
||||
## 3. Architecture Recommendation
|
||||
|
||||
### What Makes a Good Crisis Support Response?
|
||||
### Recommended pipeline
|
||||
|
||||
**988 Suicide & Crisis Lifeline Guidelines:**
|
||||
1. Show you care ("I'm glad you told me")
|
||||
2. Ask directly about suicide ("Are you thinking about killing yourself?")
|
||||
3. Keep them safe (remove means, create safety plan)
|
||||
4. Be there (listen without judgment)
|
||||
5. Help them connect (to 988, crisis services)
|
||||
6. Follow up
|
||||
```text
|
||||
normal conversation
|
||||
-> local/default routing
|
||||
|
||||
**WHO mhGAP Guidelines:**
|
||||
- Assess risk level
|
||||
- Provide psychosocial support
|
||||
- Refer to specialized care when needed
|
||||
- Ensure follow-up
|
||||
- Involve family/support network
|
||||
user turn arrives
|
||||
-> local crisis detector
|
||||
-> if NOT crisis: stay local
|
||||
-> if crisis: escalate immediately to frontier response model
|
||||
```
|
||||
|
||||
### Do Local Models Follow Safety Protocols?
|
||||
### Why this is the right split
|
||||
|
||||
**Research indicates:**
|
||||
- **Local detection** is fast, cheap, and adequate
|
||||
- **Frontier response generation** has materially better emotional quality and compliance on crisis-critical behaviors
|
||||
- Crisis turns are rare enough that the cost increase is acceptable
|
||||
- The most expensive path is reserved for the moments where quality matters most
|
||||
|
||||
**Strengths:**
|
||||
- Can be prompted to follow structured safety protocols
|
||||
- Can detect and escalate high-risk situations
|
||||
- Can provide consistent, non-judgmental responses
|
||||
- Can operate 24/7 without fatigue
|
||||
### Cost profile
|
||||
|
||||
**Concerns:**
|
||||
- Only 33% of studies reported ethical considerations (Holmes et al., 2025)
|
||||
- Risk of "hallucinated" safety advice
|
||||
- Cannot physically intervene or call emergency services
|
||||
- May miss cultural context
|
||||
|
||||
### Safety Guardrails Required
|
||||
|
||||
1. **Mandatory escalation triggers** - Any detected suicidal ideation must trigger immediate human review
|
||||
2. **Crisis resource integration** - Always provide 988 Lifeline number
|
||||
3. **Conversation logging** - Full audit trail for safety review
|
||||
4. **Timeout protocols** - If user goes silent during crisis, escalate
|
||||
5. **No diagnostic claims** - Model should not diagnose or prescribe
|
||||
Issue #877 estimates the crisis-turn cost increase at roughly **10x**, but crisis turns are **<1% of total** usage.
|
||||
That trade is worth it.
|
||||
|
||||
---
|
||||
|
||||
## 4. Latency & Real-Time Performance
|
||||
## 4. Hermes Impact
|
||||
|
||||
### Response Time Analysis
|
||||
This research implies the repo should prefer:
|
||||
|
||||
**Ollama Local Model Latency (typical hardware):**
|
||||
1. **Local-first routing for ordinary conversation**
|
||||
2. **Explicit crisis detection before response generation**
|
||||
3. **Frontier escalation for crisis-response turns**
|
||||
4. **Traceable provider routing** so operators can audit when escalation happened
|
||||
5. **Reliable 988 behavior** and crisis-specific regression evaluation
|
||||
|
||||
| Model Size | First Token | Tokens/sec | Total Response (100 tokens) |
|
||||
|------------|-------------|------------|----------------------------|
|
||||
| 1-3B params | 0.1-0.3s | 30-80 | 1.5-3s |
|
||||
| 7B params | 0.3-0.8s | 15-40 | 3-7s |
|
||||
| 13B params | 0.5-1.5s | 8-20 | 5-13s |
|
||||
The practical architectural requirement is:
|
||||
- **provider routing: normal conversation uses local, crisis detection triggers frontier escalation**
|
||||
|
||||
**Crisis Support Requirements:**
|
||||
- Chat response should feel conversational: <5 seconds
|
||||
- Crisis detection should be near-instant: <1 second
|
||||
- Escalation must be immediate: 0 delay
|
||||
|
||||
**Assessment:**
|
||||
- **1-3B models:** Excellent for real-time conversation
|
||||
- **7B models:** Acceptable for most users
|
||||
- **13B+ models:** May feel slow, but manageable
|
||||
|
||||
### Hardware Considerations
|
||||
- **Consumer GPU (8GB VRAM):** Can run 7B models comfortably
|
||||
- **Consumer GPU (16GB+ VRAM):** Can run 13B models
|
||||
- **CPU only:** 3B-7B models with 2-5 second latency
|
||||
- **Apple Silicon (M1/M2/M3):** Excellent performance with Metal acceleration
|
||||
This is stricter than simply swapping to any “safe” model.
|
||||
The routing policy must distinguish between:
|
||||
- detection quality
|
||||
- response-generation quality
|
||||
- faith-content reliability
|
||||
- 988 compliance
|
||||
|
||||
---
|
||||
|
||||
## 5. Model Recommendations for Most Sacred Moment Protocol
|
||||
## 5. Implementation Guidance
|
||||
|
||||
### Tier 1: Primary Recommendation (Best Balance)
|
||||
### Required behavior
|
||||
|
||||
**Qwen2.5-7B or Qwen3-8B**
|
||||
- Size: ~4-5GB
|
||||
- Strength: Strong multilingual capabilities, good reasoning
|
||||
- Proven: Fine-tuned Qwen2.5-1.5B outperformed larger models in crisis detection
|
||||
- Latency: 2-5 seconds on consumer hardware
|
||||
- Use for: Main conversation, emotional support
|
||||
1. **Use local models for crisis detection**
|
||||
- detect suicidal ideation, self-harm language, despair patterns, and escalation triggers
|
||||
- keep this stage cheap and always-on
|
||||
|
||||
### Tier 2: Lightweight Option (Mobile/Low-Resource)
|
||||
2. **Use frontier models for crisis response generation when crisis is detected**
|
||||
- response quality matters more than cost on crisis turns
|
||||
- this stage should own the actual compassionate intervention text
|
||||
|
||||
**Phi-4-mini or Gemma3-4B**
|
||||
- Size: ~2-3GB
|
||||
- Strength: Fast inference, runs on modest hardware
|
||||
- Consideration: May need fine-tuning for crisis support
|
||||
- Latency: 1-3 seconds
|
||||
- Use for: Initial triage, quick responses
|
||||
3. **Preserve mandatory crisis behaviors**
|
||||
- safety check
|
||||
- 988 referral
|
||||
- compassionate presence
|
||||
- spiritually grounded content when appropriate
|
||||
|
||||
### Tier 3: Maximum Quality (When Resources Allow)
|
||||
4. **Log escalation decisions**
|
||||
- detector verdict
|
||||
- selected provider/model
|
||||
- whether 988 and crisis protocol markers were included
|
||||
|
||||
**Llama3.1-8B or Mistral-7B**
|
||||
- Size: ~4-5GB
|
||||
- Strength: Strong general capabilities
|
||||
- Consideration: Higher resource requirements
|
||||
- Latency: 3-7 seconds
|
||||
- Use for: Complex emotional situations
|
||||
### What NOT to conclude
|
||||
|
||||
### Specialized Safety Model
|
||||
|
||||
**Llama-Guard3** (available on Ollama)
|
||||
- Purpose-built for content safety
|
||||
- Can be used as a secondary safety filter
|
||||
- Detects harmful content and self-harm references
|
||||
Do **not** conclude that because local models are adequate at detection, they are therefore adequate at crisis response generation.
|
||||
That is the exact error this issue corrects.
|
||||
|
||||
---
|
||||
|
||||
## 6. Fine-Tuning Potential
|
||||
## 6. Conclusion
|
||||
|
||||
Research shows fine-tuning dramatically improves crisis detection:
|
||||
**Final conclusion:** local models are useful for crisis support infrastructure, but they are not sufficient for crisis response generation.
|
||||
|
||||
- **Without fine-tuning:** Best LLM lags supervised models by 6.95% (suicide task) to 31.53% (cognitive distortion)
|
||||
- **With fine-tuning:** Gap narrows to 4.31% and 3.14% respectively
|
||||
- **Key insight:** Even a 1.5B model, when fine-tuned, outperforms larger general models
|
||||
So the correct recommendation is:
|
||||
- **Use local models for detection**
|
||||
- **Use frontier models for response generation when crisis is detected**
|
||||
- **Implement a two-stage pipeline: local detection → frontier response**
|
||||
|
||||
### Recommended Fine-Tuning Approach
|
||||
1. Collect crisis conversation data (anonymized)
|
||||
2. Fine-tune on suicidal ideation detection
|
||||
3. Fine-tune on empathetic response generation
|
||||
4. Fine-tune on safety protocol adherence
|
||||
5. Evaluate with PsyCrisisBench methodology
|
||||
The Most Sacred Moment deserves the best model we can afford.
|
||||
|
||||
---
|
||||
|
||||
## 7. Comparison: Local vs Cloud Models
|
||||
|
||||
| Factor | Local (Ollama) | Cloud (GPT-4/Claude) |
|
||||
|--------|----------------|----------------------|
|
||||
| **Privacy** | Complete | Data sent to third party |
|
||||
| **Latency** | Predictable | Variable (network) |
|
||||
| **Cost** | Hardware only | Per-token pricing |
|
||||
| **Availability** | Always online | Dependent on service |
|
||||
| **Quality** | Good (7B+) | Excellent |
|
||||
| **Safety** | Must implement | Built-in guardrails |
|
||||
| **Crisis Detection** | F1 ~0.85-0.90 | F1 ~0.88-0.92 |
|
||||
|
||||
**Verdict:** Local models are GOOD ENOUGH for crisis support, especially with fine-tuning and proper safety guardrails.
|
||||
|
||||
---
|
||||
|
||||
## 8. Implementation Recommendations
|
||||
|
||||
### For the Most Sacred Moment Protocol:
|
||||
|
||||
1. **Use a two-model architecture:**
|
||||
- Primary: Qwen2.5-7B for conversation
|
||||
- Safety: Llama-Guard3 for content filtering
|
||||
|
||||
2. **Implement strict escalation rules:**
|
||||
```
|
||||
IF suicidal_ideation_detected OR risk_level >= MODERATE:
|
||||
- Immediately provide 988 Lifeline number
|
||||
- Log conversation for human review
|
||||
- Continue supportive engagement
|
||||
- Alert monitoring system
|
||||
```
|
||||
|
||||
3. **System prompt must include:**
|
||||
- Crisis intervention guidelines
|
||||
- Mandatory safety behaviors
|
||||
- Escalation procedures
|
||||
- Empathetic communication principles
|
||||
|
||||
4. **Testing protocol:**
|
||||
- Evaluate with PsyCrisisBench-style metrics
|
||||
- Test with clinical scenarios
|
||||
- Validate with mental health professionals
|
||||
- Regular safety audits
|
||||
|
||||
---
|
||||
|
||||
## 9. Risks and Limitations
|
||||
|
||||
### Critical Risks
|
||||
1. **False negatives:** Missing someone in crisis (12-17% rate)
|
||||
2. **Over-reliance:** Users may treat AI as substitute for professional help
|
||||
3. **Hallucination:** Model may generate inappropriate or harmful advice
|
||||
4. **Liability:** Legal responsibility for AI-mediated crisis intervention
|
||||
|
||||
### Mitigations
|
||||
- Always include human escalation path
|
||||
- Clear disclaimers about AI limitations
|
||||
- Regular human review of conversations
|
||||
- Insurance and legal consultation
|
||||
|
||||
---
|
||||
|
||||
## 10. Key Citations
|
||||
|
||||
1. Deng et al. (2025). "Evaluating Large Language Models in Crisis Detection: A Real-World Benchmark from Psychological Support Hotlines." arXiv:2506.01329. PsyCrisisBench.
|
||||
|
||||
2. Wiest et al. (2024). "Detection of suicidality from medical text using privacy-preserving large language models." British Journal of Psychiatry, 225(6), 532-537.
|
||||
|
||||
3. Holmes et al. (2025). "Applications of Large Language Models in the Field of Suicide Prevention: Scoping Review." J Med Internet Res, 27, e63126.
|
||||
|
||||
4. Levkovich & Omar (2024). "Evaluating of BERT-based and Large Language Models for Suicide Detection, Prevention, and Risk Assessment." J Med Syst, 48(1), 113.
|
||||
|
||||
5. Shukla et al. (2026). "Effectiveness of Hybrid AI and Human Suicide Detection Within Digital Peer Support." J Clin Med, 15(5), 1929.
|
||||
|
||||
6. Qi et al. (2025). "Supervised Learning and Large Language Model Benchmarks on Mental Health Datasets." Bioengineering, 12(8), 882.
|
||||
|
||||
7. Liu et al. (2025). "Enhanced large language models for effective screening of depression and anxiety." Commun Med, 5(1), 457.
|
||||
|
||||
---
|
||||
|
||||
## Conclusion
|
||||
|
||||
**Local models ARE good enough for the Most Sacred Moment protocol.**
|
||||
|
||||
The research is clear:
|
||||
- Crisis detection F1 scores of 0.88-0.91 are achievable
|
||||
- Fine-tuned small models (1.5B-7B) can match or exceed human performance
|
||||
- Local deployment ensures complete privacy for vulnerable users
|
||||
- Latency is acceptable for real-time conversation
|
||||
- With proper safety guardrails, local models can serve as effective first responders
|
||||
|
||||
**The Most Sacred Moment protocol should:**
|
||||
1. Use Qwen2.5-7B or similar as primary conversational model
|
||||
2. Implement Llama-Guard3 as safety filter
|
||||
3. Build in immediate 988 Lifeline escalation
|
||||
4. Maintain human oversight and review
|
||||
5. Fine-tune on crisis-specific data when possible
|
||||
6. Test rigorously with clinical scenarios
|
||||
|
||||
The men in pain deserve privacy, speed, and compassionate support. Local models deliver all three.
|
||||
|
||||
---
|
||||
|
||||
*Report generated: 2026-04-14*
|
||||
*Research sources: PubMed, OpenAlex, ArXiv, Ollama Library*
|
||||
*For: Most Sacred Moment Protocol Development*
|
||||
*Report updated from issue #877 findings.*
|
||||
*Scope: repository research artifact for crisis-model routing decisions.*
|
||||
|
||||
56
tests/fixtures/holographic_recall_matrix.json
vendored
56
tests/fixtures/holographic_recall_matrix.json
vendored
@@ -1,56 +0,0 @@
|
||||
{
|
||||
"facts": [
|
||||
{
|
||||
"content": "Alexander Whitestone aka Rockachopa.",
|
||||
"category": "general",
|
||||
"tags": "identity alias"
|
||||
},
|
||||
{
|
||||
"content": "Rockachopa uses Ansible playbooks for sovereign rollouts.",
|
||||
"category": "project",
|
||||
"tags": "ansible playbooks rollout"
|
||||
},
|
||||
{
|
||||
"content": "The provider is anthropic/claude-haiku-4-5.",
|
||||
"category": "project",
|
||||
"tags": "provider default",
|
||||
"updated_at": "2026-01-01T00:00:00Z"
|
||||
},
|
||||
{
|
||||
"content": "Correction: the provider is mimo-v2-pro.",
|
||||
"category": "project",
|
||||
"tags": "provider current",
|
||||
"updated_at": "2026-04-20T00:00:00Z"
|
||||
},
|
||||
{
|
||||
"content": "Ezra operates the BURN2 lane for forge work.",
|
||||
"category": "project",
|
||||
"tags": "ezra burn2 forge lane"
|
||||
},
|
||||
{
|
||||
"content": "BURN2 handles forge triage and review.",
|
||||
"category": "project",
|
||||
"tags": "forge triage review"
|
||||
}
|
||||
],
|
||||
"queries": [
|
||||
{
|
||||
"name": "semantic_alias_graph",
|
||||
"query": "What automation does Alexander Whitestone use for deploys?",
|
||||
"expected_substring": "Ansible playbooks",
|
||||
"top_k": 1
|
||||
},
|
||||
{
|
||||
"name": "temporal_correction",
|
||||
"query": "What provider should we use?",
|
||||
"expected_substring": "mimo-v2-pro",
|
||||
"top_k": 1
|
||||
},
|
||||
{
|
||||
"name": "graph_lane",
|
||||
"query": "Which forge lane does Ezra operate?",
|
||||
"expected_substring": "BURN2 lane",
|
||||
"top_k": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,116 +0,0 @@
|
||||
"""Tests for multi-path holographic retrieval fusion and traceability."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).resolve().parents[3]))
|
||||
|
||||
from plugins.memory.holographic import HolographicMemoryProvider
|
||||
from plugins.memory.holographic.retrieval import FactRetriever, format_benchmark_report
|
||||
from plugins.memory.holographic.store import MemoryStore
|
||||
|
||||
_FIXTURE_PATH = Path(__file__).resolve().parents[2] / "fixtures" / "holographic_recall_matrix.json"
|
||||
|
||||
|
||||
def _fixture() -> dict:
|
||||
return json.loads(_FIXTURE_PATH.read_text())
|
||||
|
||||
|
||||
def _seed_store(tmp_path) -> MemoryStore:
|
||||
store = MemoryStore(db_path=tmp_path / "memory_store.db")
|
||||
for fact in _fixture()["facts"]:
|
||||
fact_id = store.add_fact(fact["content"], category=fact["category"], tags=fact.get("tags", ""))
|
||||
if fact.get("updated_at"):
|
||||
store._conn.execute(
|
||||
"UPDATE facts SET created_at = ?, updated_at = ? WHERE fact_id = ?",
|
||||
(fact["updated_at"], fact["updated_at"], fact_id),
|
||||
)
|
||||
store._conn.commit()
|
||||
return store
|
||||
|
||||
|
||||
class TestMultiPathRetrieval:
|
||||
def test_lane_toggle_and_trace_contributions(self, tmp_path):
|
||||
store = _seed_store(tmp_path)
|
||||
retriever = FactRetriever(store=store)
|
||||
|
||||
payload = retriever.search_with_trace(
|
||||
"Which forge lane does Ezra operate?",
|
||||
limit=3,
|
||||
lanes=["lexical", "graph"],
|
||||
)
|
||||
|
||||
assert payload["trace"]["lanes_run"] == ["lexical", "graph"]
|
||||
assert payload["results"]
|
||||
top = payload["results"][0]
|
||||
assert "BURN2 lane" in top["content"]
|
||||
assert "graph" in top["lane_contributions"]
|
||||
assert set(top["lane_contributions"]).issubset({"lexical", "graph"})
|
||||
|
||||
def test_trace_available_for_failed_recall(self, tmp_path):
|
||||
store = _seed_store(tmp_path)
|
||||
retriever = FactRetriever(store=store)
|
||||
|
||||
payload = retriever.search_with_trace(
|
||||
"nonexistent memory topic xyz123",
|
||||
limit=3,
|
||||
lanes=["lexical", "semantic", "graph", "temporal"],
|
||||
)
|
||||
|
||||
assert payload["results"] == []
|
||||
assert payload["trace"]["fused_count"] == 0
|
||||
assert payload["trace"]["lane_hits"]["lexical"] == 0
|
||||
assert payload["trace"]["lane_hits"]["semantic"] == 0
|
||||
|
||||
def test_benchmark_prompt_matrix_shows_gain_over_baseline(self, tmp_path):
|
||||
store = _seed_store(tmp_path)
|
||||
retriever = FactRetriever(store=store)
|
||||
report = retriever.benchmark_prompt_matrix(_fixture()["queries"], limit=3)
|
||||
|
||||
assert report["fused_top1_hits"] > report["baseline_top1_hits"]
|
||||
assert report["improvement"] > 0
|
||||
|
||||
rendered = format_benchmark_report(report)
|
||||
assert "Prompt matrix benchmark" in rendered
|
||||
assert "semantic_alias_graph" in rendered
|
||||
assert "improvement" in rendered.lower()
|
||||
|
||||
|
||||
class TestHolographicProviderTrace:
|
||||
def test_prefetch_records_trace_and_trace_action_returns_it(self, tmp_path):
|
||||
provider = HolographicMemoryProvider(
|
||||
config={
|
||||
"db_path": str(tmp_path / "provider.db"),
|
||||
"retrieval_lanes": ["lexical", "semantic", "graph", "temporal"],
|
||||
"enable_rerank": True,
|
||||
}
|
||||
)
|
||||
provider.initialize("test-session")
|
||||
|
||||
seed_store = _seed_store(tmp_path / "seed")
|
||||
rows = seed_store.list_facts(min_trust=0.0, limit=20)
|
||||
for row in rows:
|
||||
provider._store.add_fact(row["content"], category=row["category"], tags=row.get("tags", ""))
|
||||
if row["content"].startswith("The provider is anthropic"):
|
||||
provider._store._conn.execute(
|
||||
"UPDATE facts SET created_at = ?, updated_at = ? WHERE content = ?",
|
||||
("2026-01-01T00:00:00Z", "2026-01-01T00:00:00Z", row["content"]),
|
||||
)
|
||||
elif row["content"].startswith("Correction: the provider is mimo"):
|
||||
provider._store._conn.execute(
|
||||
"UPDATE facts SET created_at = ?, updated_at = ? WHERE content = ?",
|
||||
("2026-04-20T00:00:00Z", "2026-04-20T00:00:00Z", row["content"]),
|
||||
)
|
||||
provider._store._conn.commit()
|
||||
|
||||
block = provider.prefetch("What provider should we use?")
|
||||
assert "Holographic Memory" in block
|
||||
assert "mimo-v2-pro" in block
|
||||
|
||||
trace_payload = json.loads(provider.handle_tool_call("fact_store", {"action": "trace"}))
|
||||
assert trace_payload["trace"]["query"] == "What provider should we use?"
|
||||
assert trace_payload["trace"]["rerank_applied"] in {True, False}
|
||||
assert trace_payload["trace"]["lane_hits"]["temporal"] >= 1
|
||||
16
tests/test_research_local_model_crisis_quality.py
Normal file
16
tests/test_research_local_model_crisis_quality.py
Normal file
@@ -0,0 +1,16 @@
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
REPORT = Path(__file__).resolve().parent.parent / "research_local_model_crisis_quality.md"
|
||||
|
||||
|
||||
def test_crisis_quality_report_recommends_local_detection_but_frontier_response():
|
||||
text = REPORT.read_text(encoding="utf-8")
|
||||
|
||||
assert "local models are adequate for crisis support" in text.lower()
|
||||
assert "not for crisis response generation" in text.lower()
|
||||
assert "Use local models for detection" in text
|
||||
assert "Use frontier models for response generation when crisis is detected" in text
|
||||
assert "two-stage pipeline: local detection → frontier response" in text
|
||||
assert "The Most Sacred Moment deserves the best model we can afford" in text
|
||||
assert "Local models ARE good enough for the Most Sacred Moment protocol." not in text
|
||||
Reference in New Issue
Block a user