Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0c674641d6 |
@@ -1,69 +0,0 @@
|
||||
"""First-class context snapshot artifacts for live runtime memory evaluation."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from hermes_constants import get_hermes_home
|
||||
|
||||
|
||||
_SAFE_SEGMENT_RE = re.compile(r"[^A-Za-z0-9_.-]+")
|
||||
|
||||
|
||||
class ContextSnapshotRecorder:
|
||||
"""Write per-call prompt-composition artifacts for a Hermes session."""
|
||||
|
||||
def __init__(self, session_id: str, *, enabled: bool = False, base_dir: str | Path | None = None):
|
||||
self.session_id = session_id or "session"
|
||||
self.enabled = bool(enabled)
|
||||
self.base_dir = Path(base_dir) if base_dir else get_hermes_home() / "reports" / "context_snapshots"
|
||||
|
||||
@property
|
||||
def session_dir(self) -> Path:
|
||||
safe_session = _SAFE_SEGMENT_RE.sub("_", self.session_id).strip("._") or "session"
|
||||
return self.base_dir / safe_session
|
||||
|
||||
def record_call(
|
||||
self,
|
||||
api_call_count: int,
|
||||
*,
|
||||
system_prompt: str,
|
||||
memory_provider_system_prompt: str = "",
|
||||
memory_prefetch_raw: str = "",
|
||||
memory_context_block: str = "",
|
||||
api_user_message: str = "",
|
||||
api_messages: list[dict[str, Any]] | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> Path | None:
|
||||
if not self.enabled:
|
||||
return None
|
||||
|
||||
call_dir = self.session_dir / f"call_{api_call_count:03d}"
|
||||
call_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
self._write_text(call_dir / "system_prompt.txt", system_prompt or "")
|
||||
self._write_text(call_dir / "memory_provider_system_prompt.txt", memory_provider_system_prompt or "")
|
||||
self._write_text(call_dir / "memory_prefetch_raw.txt", memory_prefetch_raw or "")
|
||||
self._write_text(call_dir / "memory_context_block.txt", memory_context_block or "")
|
||||
self._write_text(call_dir / "api_user_message.txt", api_user_message or "")
|
||||
self._write_json(call_dir / "api_messages.json", api_messages or [])
|
||||
self._write_json(
|
||||
call_dir / "metadata.json",
|
||||
{
|
||||
"session_id": self.session_id,
|
||||
"api_call_count": api_call_count,
|
||||
**(metadata or {}),
|
||||
},
|
||||
)
|
||||
return call_dir
|
||||
|
||||
@staticmethod
|
||||
def _write_text(path: Path, content: str) -> None:
|
||||
path.write_text(content, encoding="utf-8")
|
||||
|
||||
@staticmethod
|
||||
def _write_json(path: Path, payload: Any) -> None:
|
||||
path.write_text(json.dumps(payload, indent=2, ensure_ascii=False), encoding="utf-8")
|
||||
@@ -1,132 +0,0 @@
|
||||
# Hindsight local eval homes for live Hermes runtime testing
|
||||
|
||||
Issue: #1010
|
||||
Parent: #985
|
||||
|
||||
This document defines a reproducible, profile-scoped evaluation layout for baseline / MemPalace / Hindsight comparisons without requiring Hindsight Cloud.
|
||||
|
||||
## Eval home layout
|
||||
|
||||
Use three separate `HERMES_HOME` directories so each run has isolated config, memory, sessions, and artifacts.
|
||||
|
||||
```text
|
||||
~/.hermes/profiles/atlas-baseline/
|
||||
config.yaml
|
||||
.env
|
||||
MEMORY.md
|
||||
USER.md
|
||||
reports/context_snapshots/
|
||||
|
||||
~/.hermes/profiles/atlas-mempalace/
|
||||
config.yaml
|
||||
.env
|
||||
MEMORY.md
|
||||
USER.md
|
||||
reports/context_snapshots/
|
||||
plugins/ # if a local MemPalace plugin is installed for this eval lane
|
||||
|
||||
~/.hermes/profiles/atlas-hindsight/
|
||||
config.yaml
|
||||
.env
|
||||
MEMORY.md
|
||||
USER.md
|
||||
hindsight/config.json
|
||||
reports/context_snapshots/
|
||||
```
|
||||
|
||||
## Hindsight local config
|
||||
|
||||
The Hindsight provider already loads config from `$HERMES_HOME/hindsight/config.json` first. For the local eval lane, prefer `local_embedded` so Hermes can bring up a local Hindsight daemon without cloud signup.
|
||||
|
||||
Example `~/.hermes/profiles/atlas-hindsight/hindsight/config.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"mode": "local_embedded",
|
||||
"memory_mode": "context",
|
||||
"recall_prefetch_method": "recall",
|
||||
"llm_provider": "ollama",
|
||||
"llm_model": "gemma3:12b",
|
||||
"api_url": "http://localhost:8888"
|
||||
}
|
||||
```
|
||||
|
||||
Notes:
|
||||
- `local_embedded` avoids any Hindsight Cloud dependency.
|
||||
- If `profile` is omitted, Hermes now derives a stable local Hindsight profile name from the active profile identity / `HERMES_HOME` instead of collapsing all local runs into the shared legacy `hermes` profile.
|
||||
- `local_external` remains valid if you already run a local Hindsight server yourself.
|
||||
|
||||
## Runtime switching procedure
|
||||
|
||||
Switch by exporting `HERMES_HOME` before launching Hermes.
|
||||
|
||||
### 1. Baseline
|
||||
|
||||
```bash
|
||||
export HERMES_HOME="$HOME/.hermes/profiles/atlas-baseline"
|
||||
unset HERMES_CONTEXT_SNAPSHOTS
|
||||
hermes chat
|
||||
```
|
||||
|
||||
### 2. MemPalace lane
|
||||
|
||||
```bash
|
||||
export HERMES_HOME="$HOME/.hermes/profiles/atlas-mempalace"
|
||||
export HERMES_CONTEXT_SNAPSHOTS=1
|
||||
hermes chat
|
||||
```
|
||||
|
||||
### 3. Hindsight lane
|
||||
|
||||
```bash
|
||||
export HERMES_HOME="$HOME/.hermes/profiles/atlas-hindsight"
|
||||
export HERMES_CONTEXT_SNAPSHOTS=1
|
||||
hermes chat
|
||||
```
|
||||
|
||||
## Raw artifact capture
|
||||
|
||||
When `HERMES_CONTEXT_SNAPSHOTS=1` is enabled, Hermes writes first-class prompt-composition artifacts under the active home by default.
|
||||
|
||||
Artifact tree:
|
||||
|
||||
```text
|
||||
$HERMES_HOME/reports/context_snapshots/<session-id>/call_001/
|
||||
system_prompt.txt
|
||||
memory_provider_system_prompt.txt
|
||||
memory_prefetch_raw.txt
|
||||
memory_context_block.txt
|
||||
api_user_message.txt
|
||||
api_messages.json
|
||||
metadata.json
|
||||
```
|
||||
|
||||
Minimum files a benchmark should inspect:
|
||||
- `system_prompt.txt`
|
||||
- `memory_prefetch_raw.txt`
|
||||
- `memory_context_block.txt`
|
||||
- `api_user_message.txt`
|
||||
- `api_messages.json`
|
||||
|
||||
These prove:
|
||||
- what the system prompt was
|
||||
- what the provider prefetched
|
||||
- what entered `<memory-context>`
|
||||
- what the final API user message looked like
|
||||
- what full payload reached the model
|
||||
|
||||
## Follow-on benchmark workflow
|
||||
|
||||
A benchmark issue can now consume this path without redoing integration work:
|
||||
1. pick one eval home (`atlas-baseline`, `atlas-mempalace`, `atlas-hindsight`)
|
||||
2. export the corresponding `HERMES_HOME`
|
||||
3. run Hermes on the same prompt set
|
||||
4. compare the snapshot artifacts in `reports/context_snapshots/`
|
||||
5. score recall quality and answer quality separately
|
||||
|
||||
## Why this is sovereign
|
||||
|
||||
- no hosted Hindsight Cloud dependency is required
|
||||
- the Hindsight config is profile-scoped under `hindsight/config.json`
|
||||
- the runtime artifacts stay under the active `HERMES_HOME`
|
||||
- switching between baseline / MemPalace / Hindsight is just a `HERMES_HOME` swap
|
||||
@@ -178,25 +178,6 @@ def _load_config() -> dict:
|
||||
}
|
||||
|
||||
|
||||
def _derive_local_profile_name(agent_identity: str = "", hermes_home: str = "") -> str:
|
||||
"""Return a stable profile name for local embedded Hindsight storage.
|
||||
|
||||
Prefer the active Hermes profile identity when available, otherwise fall back
|
||||
to the basename of the active HERMES_HOME path. This prevents all local
|
||||
Hindsight eval homes from sharing the legacy default profile name "hermes".
|
||||
"""
|
||||
from pathlib import Path
|
||||
import re
|
||||
|
||||
raw = (agent_identity or "").strip()
|
||||
if not raw and hermes_home:
|
||||
raw = Path(hermes_home).name.strip()
|
||||
if not raw:
|
||||
raw = "hermes"
|
||||
safe = re.sub(r"[^A-Za-z0-9_.-]+", "-", raw).strip(".-_")
|
||||
return safe or "hermes"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# MemoryProvider implementation
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -487,8 +468,6 @@ class HindsightMemoryProvider(MemoryProvider):
|
||||
|
||||
def initialize(self, session_id: str, **kwargs) -> None:
|
||||
self._session_id = session_id
|
||||
hermes_home = str(kwargs.get("hermes_home") or "")
|
||||
agent_identity = str(kwargs.get("agent_identity") or "")
|
||||
|
||||
# Check client version and auto-upgrade if needed
|
||||
try:
|
||||
@@ -521,11 +500,6 @@ class HindsightMemoryProvider(MemoryProvider):
|
||||
# "local" is a legacy alias for "local_embedded"
|
||||
if self._mode == "local":
|
||||
self._mode = "local_embedded"
|
||||
if self._mode == "local_embedded" and not self._config.get("profile"):
|
||||
self._config["profile"] = _derive_local_profile_name(
|
||||
agent_identity=agent_identity,
|
||||
hermes_home=hermes_home,
|
||||
)
|
||||
self._api_key = self._config.get("apiKey") or self._config.get("api_key") or os.environ.get("HINDSIGHT_API_KEY", "")
|
||||
default_url = _DEFAULT_LOCAL_URL if self._mode in ("local_embedded", "local_external") else _DEFAULT_API_URL
|
||||
self._api_url = self._config.get("api_url") or os.environ.get("HINDSIGHT_API_URL", default_url)
|
||||
|
||||
@@ -5,310 +5,180 @@
|
||||
|
||||
## Executive Summary
|
||||
|
||||
Local models (Ollama) CAN handle crisis support with adequate quality for the Most Sacred Moment protocol. Research demonstrates that even small local models (1.5B-7B parameters) achieve performance comparable to trained human operators in crisis detection tasks. However, they require careful implementation with safety guardrails and should complement—not replace—human oversight.
|
||||
This report updates the earlier optimistic draft with the repo-level finding captured in issue #877.
|
||||
|
||||
**Key Finding:** A fine-tuned 1.5B parameter Qwen model outperformed larger models on mood and suicidal ideation detection tasks (PsyCrisisBench, 2025).
|
||||
**Updated finding:** local models are adequate for crisis support and crisis detection, but not for crisis response generation.
|
||||
|
||||
The direct evaluation summary in issue #877 is:
|
||||
- **Detection:** local models correctly identify crisis language 92% of the time
|
||||
- **Response quality:** local model responses are only 60% adequate vs 94% for frontier models
|
||||
- **Gospel integration:** local models integrate faith content inconsistently
|
||||
- **988 Lifeline:** local models include 988 referral 78% of the time vs 99% for frontier models
|
||||
|
||||
That means the safe architectural conclusion is not “local is enough for the whole Most Sacred Moment protocol.”
|
||||
It is:
|
||||
- use local models for **detection / triage**
|
||||
- use frontier models for **response generation once crisis is detected**
|
||||
- build a two-stage pipeline: **local detection → frontier response**
|
||||
|
||||
---
|
||||
|
||||
## 1. Crisis Detection Accuracy
|
||||
## 1. Direct Evaluation Findings
|
||||
|
||||
### Research Evidence
|
||||
### Models evaluated
|
||||
- `gemma3:27b`
|
||||
- `hermes4:14b`
|
||||
- `mimo-v2-pro`
|
||||
|
||||
**PsyCrisisBench (2025)** - The most comprehensive benchmark to date:
|
||||
- Source: 540 annotated transcripts from Hangzhou Psychological Assistance Hotline
|
||||
- Models tested: 64 LLMs across 15 families (GPT, Claude, Gemini, Llama, Qwen, DeepSeek)
|
||||
- Results:
|
||||
- **Suicidal ideation detection: F1=0.880** (88% accuracy)
|
||||
- **Suicide plan identification: F1=0.779** (78% accuracy)
|
||||
- **Risk assessment: F1=0.907** (91% accuracy)
|
||||
- **Mood status recognition: F1=0.709** (71% accuracy - challenging due to missing vocal cues)
|
||||
### What local models do well
|
||||
|
||||
**Llama-2 for Suicide Detection (British Journal of Psychiatry, 2024):**
|
||||
- German fine-tuned Llama-2 model achieved:
|
||||
- **Accuracy: 87.5%**
|
||||
- **Sensitivity: 83.0%**
|
||||
- **Specificity: 91.8%**
|
||||
- Locally hosted, privacy-preserving approach
|
||||
1. **Crisis detection is adequate**
|
||||
- 92% crisis-language detection is strong enough for a first-pass detector
|
||||
- This makes local models viable for low-latency triage and escalation triggers
|
||||
|
||||
**Supportiv Hybrid AI Study (2026):**
|
||||
- AI detected SI faster than humans in **77.52% passive** and **81.26% active** cases
|
||||
- **90.3% agreement** between AI and human moderators
|
||||
- Processed **169,181 live-chat transcripts** (449,946 user visits)
|
||||
2. **They are fast and cheap enough for always-on screening**
|
||||
- normal conversation can stay on local routing
|
||||
- crisis screening can happen continuously without frontier-model cost on every turn
|
||||
|
||||
### False Positive/Negative Rates
|
||||
3. **They can support the operator pipeline**
|
||||
- tag likely crisis turns
|
||||
- raise escalation flags
|
||||
- capture traces and logs for later review
|
||||
|
||||
Based on the research:
|
||||
- **False Negative Rate (missed crisis):** ~12-17% for suicidal ideation
|
||||
- **False Positive Rate:** ~8-12%
|
||||
- **Risk Assessment Error:** ~9% overall
|
||||
### Where local models fall short
|
||||
|
||||
**Critical insight:** The research shows LLMs and trained human operators have *complementary* strengths—humans are better at mood recognition and suicidal ideation, while LLMs excel at risk assessment and suicide plan identification.
|
||||
1. **Response generation quality is not high enough**
|
||||
- 60% adequate is not enough for the highest-stakes turn in the system
|
||||
- crisis intervention needs emotional presence, specificity, and steadiness
|
||||
- a “mostly okay” response is not acceptable when the failure case is abandonment, flattening, or unsafe wording
|
||||
|
||||
2. **Faith integration is inconsistent**
|
||||
- gospel content sometimes appears forced
|
||||
- other times it disappears when it should be present
|
||||
- that inconsistency is especially costly in a spiritually grounded crisis protocol
|
||||
|
||||
3. **988 referral reliability is too low**
|
||||
- 78% inclusion means the model misses a critical action too often
|
||||
- frontier models at 99% are materially better on a requirement that should be near-perfect
|
||||
|
||||
---
|
||||
|
||||
## 2. Emotional Understanding
|
||||
## 2. What This Means for the Most Sacred Moment
|
||||
|
||||
### Can Local Models Understand Emotional Nuance?
|
||||
The earlier version of this report argued that local models were good enough for the whole protocol.
|
||||
Issue #877 changes that conclusion.
|
||||
|
||||
**Yes, with limitations:**
|
||||
The Most Sacred Moment is not just a classification task.
|
||||
It is a response-generation task under maximum moral and emotional load.
|
||||
|
||||
1. **Emotion Recognition:**
|
||||
- Maximum F1 of 0.709 for mood status (PsyCrisisBench)
|
||||
- Missing vocal cues is a significant limitation in text-only
|
||||
- Semantic ambiguity creates challenges
|
||||
A model can be good enough to answer:
|
||||
- “Is this a crisis?”
|
||||
- “Should we escalate?”
|
||||
- “Did the user mention self-harm or suicide?”
|
||||
|
||||
2. **Empathy in Responses:**
|
||||
- LLMs demonstrate ability to generate empathetic responses
|
||||
- Research shows they deliver "superior explanations" (BERTScore=0.9408)
|
||||
- Human evaluations confirm adequate interviewing skills
|
||||
…and still not be good enough to deliver:
|
||||
- a compassionate first line
|
||||
- stable emotional presence
|
||||
- a faithful and natural gospel integration
|
||||
- a reliable 988 referral
|
||||
- the specificity needed for real crisis intervention
|
||||
|
||||
3. **Emotional Support Conversation (ESConv) benchmarks:**
|
||||
- Models trained on emotional support datasets show improved empathy
|
||||
- Few-shot prompting significantly improves emotional understanding
|
||||
- Fine-tuning narrows the gap with larger models
|
||||
|
||||
### Key Limitations
|
||||
- Cannot detect tone, urgency in voice, or hesitation
|
||||
- Cultural and linguistic nuances may be missed
|
||||
- Context window limitations may lose conversation history
|
||||
That is exactly the gap the evaluation exposed.
|
||||
|
||||
---
|
||||
|
||||
## 3. Response Quality & Safety Protocols
|
||||
## 3. Architecture Recommendation
|
||||
|
||||
### What Makes a Good Crisis Support Response?
|
||||
### Recommended pipeline
|
||||
|
||||
**988 Suicide & Crisis Lifeline Guidelines:**
|
||||
1. Show you care ("I'm glad you told me")
|
||||
2. Ask directly about suicide ("Are you thinking about killing yourself?")
|
||||
3. Keep them safe (remove means, create safety plan)
|
||||
4. Be there (listen without judgment)
|
||||
5. Help them connect (to 988, crisis services)
|
||||
6. Follow up
|
||||
```text
|
||||
normal conversation
|
||||
-> local/default routing
|
||||
|
||||
**WHO mhGAP Guidelines:**
|
||||
- Assess risk level
|
||||
- Provide psychosocial support
|
||||
- Refer to specialized care when needed
|
||||
- Ensure follow-up
|
||||
- Involve family/support network
|
||||
user turn arrives
|
||||
-> local crisis detector
|
||||
-> if NOT crisis: stay local
|
||||
-> if crisis: escalate immediately to frontier response model
|
||||
```
|
||||
|
||||
### Do Local Models Follow Safety Protocols?
|
||||
### Why this is the right split
|
||||
|
||||
**Research indicates:**
|
||||
- **Local detection** is fast, cheap, and adequate
|
||||
- **Frontier response generation** has materially better emotional quality and compliance on crisis-critical behaviors
|
||||
- Crisis turns are rare enough that the cost increase is acceptable
|
||||
- The most expensive path is reserved for the moments where quality matters most
|
||||
|
||||
**Strengths:**
|
||||
- Can be prompted to follow structured safety protocols
|
||||
- Can detect and escalate high-risk situations
|
||||
- Can provide consistent, non-judgmental responses
|
||||
- Can operate 24/7 without fatigue
|
||||
### Cost profile
|
||||
|
||||
**Concerns:**
|
||||
- Only 33% of studies reported ethical considerations (Holmes et al., 2025)
|
||||
- Risk of "hallucinated" safety advice
|
||||
- Cannot physically intervene or call emergency services
|
||||
- May miss cultural context
|
||||
|
||||
### Safety Guardrails Required
|
||||
|
||||
1. **Mandatory escalation triggers** - Any detected suicidal ideation must trigger immediate human review
|
||||
2. **Crisis resource integration** - Always provide 988 Lifeline number
|
||||
3. **Conversation logging** - Full audit trail for safety review
|
||||
4. **Timeout protocols** - If user goes silent during crisis, escalate
|
||||
5. **No diagnostic claims** - Model should not diagnose or prescribe
|
||||
Issue #877 estimates the crisis-turn cost increase at roughly **10x**, but crisis turns are **<1% of total** usage.
|
||||
That trade is worth it.
|
||||
|
||||
---
|
||||
|
||||
## 4. Latency & Real-Time Performance
|
||||
## 4. Hermes Impact
|
||||
|
||||
### Response Time Analysis
|
||||
This research implies the repo should prefer:
|
||||
|
||||
**Ollama Local Model Latency (typical hardware):**
|
||||
1. **Local-first routing for ordinary conversation**
|
||||
2. **Explicit crisis detection before response generation**
|
||||
3. **Frontier escalation for crisis-response turns**
|
||||
4. **Traceable provider routing** so operators can audit when escalation happened
|
||||
5. **Reliable 988 behavior** and crisis-specific regression evaluation
|
||||
|
||||
| Model Size | First Token | Tokens/sec | Total Response (100 tokens) |
|
||||
|------------|-------------|------------|----------------------------|
|
||||
| 1-3B params | 0.1-0.3s | 30-80 | 1.5-3s |
|
||||
| 7B params | 0.3-0.8s | 15-40 | 3-7s |
|
||||
| 13B params | 0.5-1.5s | 8-20 | 5-13s |
|
||||
The practical architectural requirement is:
|
||||
- **provider routing: normal conversation uses local, crisis detection triggers frontier escalation**
|
||||
|
||||
**Crisis Support Requirements:**
|
||||
- Chat response should feel conversational: <5 seconds
|
||||
- Crisis detection should be near-instant: <1 second
|
||||
- Escalation must be immediate: 0 delay
|
||||
|
||||
**Assessment:**
|
||||
- **1-3B models:** Excellent for real-time conversation
|
||||
- **7B models:** Acceptable for most users
|
||||
- **13B+ models:** May feel slow, but manageable
|
||||
|
||||
### Hardware Considerations
|
||||
- **Consumer GPU (8GB VRAM):** Can run 7B models comfortably
|
||||
- **Consumer GPU (16GB+ VRAM):** Can run 13B models
|
||||
- **CPU only:** 3B-7B models with 2-5 second latency
|
||||
- **Apple Silicon (M1/M2/M3):** Excellent performance with Metal acceleration
|
||||
This is stricter than simply swapping to any “safe” model.
|
||||
The routing policy must distinguish between:
|
||||
- detection quality
|
||||
- response-generation quality
|
||||
- faith-content reliability
|
||||
- 988 compliance
|
||||
|
||||
---
|
||||
|
||||
## 5. Model Recommendations for Most Sacred Moment Protocol
|
||||
## 5. Implementation Guidance
|
||||
|
||||
### Tier 1: Primary Recommendation (Best Balance)
|
||||
### Required behavior
|
||||
|
||||
**Qwen2.5-7B or Qwen3-8B**
|
||||
- Size: ~4-5GB
|
||||
- Strength: Strong multilingual capabilities, good reasoning
|
||||
- Proven: Fine-tuned Qwen2.5-1.5B outperformed larger models in crisis detection
|
||||
- Latency: 2-5 seconds on consumer hardware
|
||||
- Use for: Main conversation, emotional support
|
||||
1. **Use local models for crisis detection**
|
||||
- detect suicidal ideation, self-harm language, despair patterns, and escalation triggers
|
||||
- keep this stage cheap and always-on
|
||||
|
||||
### Tier 2: Lightweight Option (Mobile/Low-Resource)
|
||||
2. **Use frontier models for crisis response generation when crisis is detected**
|
||||
- response quality matters more than cost on crisis turns
|
||||
- this stage should own the actual compassionate intervention text
|
||||
|
||||
**Phi-4-mini or Gemma3-4B**
|
||||
- Size: ~2-3GB
|
||||
- Strength: Fast inference, runs on modest hardware
|
||||
- Consideration: May need fine-tuning for crisis support
|
||||
- Latency: 1-3 seconds
|
||||
- Use for: Initial triage, quick responses
|
||||
3. **Preserve mandatory crisis behaviors**
|
||||
- safety check
|
||||
- 988 referral
|
||||
- compassionate presence
|
||||
- spiritually grounded content when appropriate
|
||||
|
||||
### Tier 3: Maximum Quality (When Resources Allow)
|
||||
4. **Log escalation decisions**
|
||||
- detector verdict
|
||||
- selected provider/model
|
||||
- whether 988 and crisis protocol markers were included
|
||||
|
||||
**Llama3.1-8B or Mistral-7B**
|
||||
- Size: ~4-5GB
|
||||
- Strength: Strong general capabilities
|
||||
- Consideration: Higher resource requirements
|
||||
- Latency: 3-7 seconds
|
||||
- Use for: Complex emotional situations
|
||||
### What NOT to conclude
|
||||
|
||||
### Specialized Safety Model
|
||||
|
||||
**Llama-Guard3** (available on Ollama)
|
||||
- Purpose-built for content safety
|
||||
- Can be used as a secondary safety filter
|
||||
- Detects harmful content and self-harm references
|
||||
Do **not** conclude that because local models are adequate at detection, they are therefore adequate at crisis response generation.
|
||||
That is the exact error this issue corrects.
|
||||
|
||||
---
|
||||
|
||||
## 6. Fine-Tuning Potential
|
||||
## 6. Conclusion
|
||||
|
||||
Research shows fine-tuning dramatically improves crisis detection:
|
||||
**Final conclusion:** local models are useful for crisis support infrastructure, but they are not sufficient for crisis response generation.
|
||||
|
||||
- **Without fine-tuning:** Best LLM lags supervised models by 6.95% (suicide task) to 31.53% (cognitive distortion)
|
||||
- **With fine-tuning:** Gap narrows to 4.31% and 3.14% respectively
|
||||
- **Key insight:** Even a 1.5B model, when fine-tuned, outperforms larger general models
|
||||
So the correct recommendation is:
|
||||
- **Use local models for detection**
|
||||
- **Use frontier models for response generation when crisis is detected**
|
||||
- **Implement a two-stage pipeline: local detection → frontier response**
|
||||
|
||||
### Recommended Fine-Tuning Approach
|
||||
1. Collect crisis conversation data (anonymized)
|
||||
2. Fine-tune on suicidal ideation detection
|
||||
3. Fine-tune on empathetic response generation
|
||||
4. Fine-tune on safety protocol adherence
|
||||
5. Evaluate with PsyCrisisBench methodology
|
||||
The Most Sacred Moment deserves the best model we can afford.
|
||||
|
||||
---
|
||||
|
||||
## 7. Comparison: Local vs Cloud Models
|
||||
|
||||
| Factor | Local (Ollama) | Cloud (GPT-4/Claude) |
|
||||
|--------|----------------|----------------------|
|
||||
| **Privacy** | Complete | Data sent to third party |
|
||||
| **Latency** | Predictable | Variable (network) |
|
||||
| **Cost** | Hardware only | Per-token pricing |
|
||||
| **Availability** | Always online | Dependent on service |
|
||||
| **Quality** | Good (7B+) | Excellent |
|
||||
| **Safety** | Must implement | Built-in guardrails |
|
||||
| **Crisis Detection** | F1 ~0.85-0.90 | F1 ~0.88-0.92 |
|
||||
|
||||
**Verdict:** Local models are GOOD ENOUGH for crisis support, especially with fine-tuning and proper safety guardrails.
|
||||
|
||||
---
|
||||
|
||||
## 8. Implementation Recommendations
|
||||
|
||||
### For the Most Sacred Moment Protocol:
|
||||
|
||||
1. **Use a two-model architecture:**
|
||||
- Primary: Qwen2.5-7B for conversation
|
||||
- Safety: Llama-Guard3 for content filtering
|
||||
|
||||
2. **Implement strict escalation rules:**
|
||||
```
|
||||
IF suicidal_ideation_detected OR risk_level >= MODERATE:
|
||||
- Immediately provide 988 Lifeline number
|
||||
- Log conversation for human review
|
||||
- Continue supportive engagement
|
||||
- Alert monitoring system
|
||||
```
|
||||
|
||||
3. **System prompt must include:**
|
||||
- Crisis intervention guidelines
|
||||
- Mandatory safety behaviors
|
||||
- Escalation procedures
|
||||
- Empathetic communication principles
|
||||
|
||||
4. **Testing protocol:**
|
||||
- Evaluate with PsyCrisisBench-style metrics
|
||||
- Test with clinical scenarios
|
||||
- Validate with mental health professionals
|
||||
- Regular safety audits
|
||||
|
||||
---
|
||||
|
||||
## 9. Risks and Limitations
|
||||
|
||||
### Critical Risks
|
||||
1. **False negatives:** Missing someone in crisis (12-17% rate)
|
||||
2. **Over-reliance:** Users may treat AI as substitute for professional help
|
||||
3. **Hallucination:** Model may generate inappropriate or harmful advice
|
||||
4. **Liability:** Legal responsibility for AI-mediated crisis intervention
|
||||
|
||||
### Mitigations
|
||||
- Always include human escalation path
|
||||
- Clear disclaimers about AI limitations
|
||||
- Regular human review of conversations
|
||||
- Insurance and legal consultation
|
||||
|
||||
---
|
||||
|
||||
## 10. Key Citations
|
||||
|
||||
1. Deng et al. (2025). "Evaluating Large Language Models in Crisis Detection: A Real-World Benchmark from Psychological Support Hotlines." arXiv:2506.01329. PsyCrisisBench.
|
||||
|
||||
2. Wiest et al. (2024). "Detection of suicidality from medical text using privacy-preserving large language models." British Journal of Psychiatry, 225(6), 532-537.
|
||||
|
||||
3. Holmes et al. (2025). "Applications of Large Language Models in the Field of Suicide Prevention: Scoping Review." J Med Internet Res, 27, e63126.
|
||||
|
||||
4. Levkovich & Omar (2024). "Evaluating of BERT-based and Large Language Models for Suicide Detection, Prevention, and Risk Assessment." J Med Syst, 48(1), 113.
|
||||
|
||||
5. Shukla et al. (2026). "Effectiveness of Hybrid AI and Human Suicide Detection Within Digital Peer Support." J Clin Med, 15(5), 1929.
|
||||
|
||||
6. Qi et al. (2025). "Supervised Learning and Large Language Model Benchmarks on Mental Health Datasets." Bioengineering, 12(8), 882.
|
||||
|
||||
7. Liu et al. (2025). "Enhanced large language models for effective screening of depression and anxiety." Commun Med, 5(1), 457.
|
||||
|
||||
---
|
||||
|
||||
## Conclusion
|
||||
|
||||
**Local models ARE good enough for the Most Sacred Moment protocol.**
|
||||
|
||||
The research is clear:
|
||||
- Crisis detection F1 scores of 0.88-0.91 are achievable
|
||||
- Fine-tuned small models (1.5B-7B) can match or exceed human performance
|
||||
- Local deployment ensures complete privacy for vulnerable users
|
||||
- Latency is acceptable for real-time conversation
|
||||
- With proper safety guardrails, local models can serve as effective first responders
|
||||
|
||||
**The Most Sacred Moment protocol should:**
|
||||
1. Use Qwen2.5-7B or similar as primary conversational model
|
||||
2. Implement Llama-Guard3 as safety filter
|
||||
3. Build in immediate 988 Lifeline escalation
|
||||
4. Maintain human oversight and review
|
||||
5. Fine-tune on crisis-specific data when possible
|
||||
6. Test rigorously with clinical scenarios
|
||||
|
||||
The men in pain deserve privacy, speed, and compassionate support. Local models deliver all three.
|
||||
|
||||
---
|
||||
|
||||
*Report generated: 2026-04-14*
|
||||
*Research sources: PubMed, OpenAlex, ArXiv, Ollama Library*
|
||||
*For: Most Sacred Moment Protocol Development*
|
||||
*Report updated from issue #877 findings.*
|
||||
*Scope: repository research artifact for crisis-model routing decisions.*
|
||||
|
||||
78
run_agent.py
78
run_agent.py
@@ -604,8 +604,6 @@ class AIAgent:
|
||||
checkpoint_max_snapshots: int = 50,
|
||||
pass_session_id: bool = False,
|
||||
persist_session: bool = True,
|
||||
context_snapshots_enabled: bool | None = None,
|
||||
context_snapshots_dir: str | None = None,
|
||||
):
|
||||
"""
|
||||
Initialize the AI Agent.
|
||||
@@ -1131,43 +1129,6 @@ class AIAgent:
|
||||
except Exception:
|
||||
_agent_cfg = {}
|
||||
|
||||
def _is_enabled(value):
|
||||
if isinstance(value, bool):
|
||||
return value
|
||||
return str(value).strip().lower() in {"1", "true", "yes", "on"}
|
||||
|
||||
_debug_cfg = _agent_cfg.get("debug", {}) if isinstance(_agent_cfg, dict) else {}
|
||||
if not isinstance(_debug_cfg, dict):
|
||||
_debug_cfg = {}
|
||||
_snapshot_cfg = _debug_cfg.get("context_snapshots", {})
|
||||
if not isinstance(_snapshot_cfg, dict):
|
||||
_snapshot_cfg = {}
|
||||
_snapshots_env = os.getenv("HERMES_CONTEXT_SNAPSHOTS")
|
||||
_snapshots_dir_env = os.getenv("HERMES_CONTEXT_SNAPSHOTS_DIR")
|
||||
if context_snapshots_enabled is None:
|
||||
if _snapshots_env is not None:
|
||||
self._context_snapshots_enabled = _is_enabled(_snapshots_env)
|
||||
else:
|
||||
self._context_snapshots_enabled = _is_enabled(_snapshot_cfg.get("enabled", False))
|
||||
else:
|
||||
self._context_snapshots_enabled = bool(context_snapshots_enabled)
|
||||
self._context_snapshots_dir = (
|
||||
context_snapshots_dir
|
||||
or _snapshots_dir_env
|
||||
or _snapshot_cfg.get("dir")
|
||||
or None
|
||||
)
|
||||
try:
|
||||
from agent.context_snapshots import ContextSnapshotRecorder
|
||||
self._context_snapshot_recorder = ContextSnapshotRecorder(
|
||||
session_id=self.session_id,
|
||||
enabled=self._context_snapshots_enabled,
|
||||
base_dir=self._context_snapshots_dir,
|
||||
)
|
||||
except Exception as _snapshot_err:
|
||||
logger.debug("Context snapshot recorder init failed: %s", _snapshot_err)
|
||||
self._context_snapshot_recorder = None
|
||||
|
||||
# Persistent memory (MEMORY.md + USER.md) -- loaded from disk
|
||||
self._memory_store = None
|
||||
self._memory_enabled = False
|
||||
@@ -8183,17 +8144,12 @@ class AIAgent:
|
||||
# Use original_user_message (clean input) — user_message may contain
|
||||
# injected skill content that bloats / breaks provider queries.
|
||||
_ext_prefetch_cache = ""
|
||||
_memory_provider_prompt_cache = ""
|
||||
if self._memory_manager:
|
||||
try:
|
||||
_query = original_user_message if isinstance(original_user_message, str) else ""
|
||||
_ext_prefetch_cache = self._memory_manager.prefetch_all(_query) or ""
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
_memory_provider_prompt_cache = self._memory_manager.build_system_prompt() or ""
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
while (api_call_count < self.max_iterations and self.iteration_budget.remaining > 0) or self._budget_grace_call:
|
||||
# Reset per-turn checkpoint dedup so each iteration can take one snapshot
|
||||
@@ -8261,8 +8217,6 @@ class AIAgent:
|
||||
# However, providers like Moonshot AI require a separate 'reasoning_content' field
|
||||
# on assistant messages with tool_calls. We handle both cases here.
|
||||
api_messages = []
|
||||
_current_api_user_message = ""
|
||||
_current_memory_context_block = ""
|
||||
for idx, msg in enumerate(messages):
|
||||
api_msg = msg.copy()
|
||||
|
||||
@@ -8277,15 +8231,12 @@ class AIAgent:
|
||||
_fenced = build_memory_context_block(_ext_prefetch_cache)
|
||||
if _fenced:
|
||||
_injections.append(_fenced)
|
||||
_current_memory_context_block = _fenced
|
||||
if _plugin_user_context:
|
||||
_injections.append(_plugin_user_context)
|
||||
if _injections:
|
||||
_base = api_msg.get("content", "")
|
||||
if isinstance(_base, str):
|
||||
api_msg["content"] = _base + "\n\n" + "\n\n".join(_injections)
|
||||
if isinstance(api_msg.get("content"), str):
|
||||
_current_api_user_message = api_msg["content"]
|
||||
|
||||
# For ALL assistant messages, pass reasoning back to the API
|
||||
# This ensures multi-turn reasoning context is preserved
|
||||
@@ -8320,13 +8271,7 @@ class AIAgent:
|
||||
from agent.privacy_filter import PrivacyFilter
|
||||
pf = PrivacyFilter()
|
||||
# Sanitize messages before they reach the provider
|
||||
_pf_result = pf.sanitize_messages(api_messages)
|
||||
if isinstance(_pf_result, tuple):
|
||||
api_messages, _pf_report = _pf_result
|
||||
if getattr(pf, "last_report", None) is None:
|
||||
pf.last_report = _pf_report
|
||||
else:
|
||||
api_messages = _pf_result
|
||||
api_messages = pf.sanitize_messages(api_messages)
|
||||
if pf.last_report and pf.last_report.had_redactions:
|
||||
logger.info(f"Privacy Filter: Redacted sensitive data from turn payload. Details: {pf.last_report.summary()}")
|
||||
except Exception as e:
|
||||
@@ -8397,27 +8342,6 @@ class AIAgent:
|
||||
new_tcs.append(tc)
|
||||
am["tool_calls"] = new_tcs
|
||||
|
||||
if self._context_snapshot_recorder:
|
||||
try:
|
||||
self._context_snapshot_recorder.record_call(
|
||||
api_call_count,
|
||||
system_prompt=effective_system,
|
||||
memory_provider_system_prompt=_memory_provider_prompt_cache,
|
||||
memory_prefetch_raw=_ext_prefetch_cache,
|
||||
memory_context_block=_current_memory_context_block,
|
||||
api_user_message=_current_api_user_message,
|
||||
api_messages=api_messages,
|
||||
metadata={
|
||||
"model": self.model,
|
||||
"provider": self.provider,
|
||||
"platform": self.platform or "",
|
||||
"api_mode": self.api_mode,
|
||||
"memory_providers": [p.name for p in getattr(self._memory_manager, "providers", [])],
|
||||
},
|
||||
)
|
||||
except Exception as _snapshot_err:
|
||||
logger.debug("Context snapshot capture failed: %s", _snapshot_err)
|
||||
|
||||
# Calculate approximate request size for logging
|
||||
total_chars = sum(len(str(msg)) for msg in api_messages)
|
||||
approx_tokens = estimate_messages_tokens_rough(api_messages)
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
from pathlib import Path
|
||||
|
||||
from agent.context_snapshots import ContextSnapshotRecorder
|
||||
|
||||
|
||||
def test_disabled_recorder_writes_nothing(tmp_path):
|
||||
recorder = ContextSnapshotRecorder(session_id="session-1", enabled=False, base_dir=tmp_path)
|
||||
|
||||
out = recorder.record_call(
|
||||
1,
|
||||
system_prompt="system",
|
||||
api_messages=[{"role": "user", "content": "hello"}],
|
||||
)
|
||||
|
||||
assert out is None
|
||||
assert not (tmp_path / "session-1").exists()
|
||||
|
||||
|
||||
def test_enabled_recorder_writes_expected_artifacts(tmp_path):
|
||||
recorder = ContextSnapshotRecorder(session_id="session-1", enabled=True, base_dir=tmp_path)
|
||||
|
||||
out = recorder.record_call(
|
||||
1,
|
||||
system_prompt="system prompt",
|
||||
memory_provider_system_prompt="# Hindsight Memory\nActive.",
|
||||
memory_prefetch_raw="- remembered fact",
|
||||
memory_context_block="<memory-context>\nremembered\n</memory-context>",
|
||||
api_user_message="What do I prefer?\n\n<memory-context>\nremembered\n</memory-context>",
|
||||
api_messages=[
|
||||
{"role": "system", "content": "system prompt"},
|
||||
{"role": "user", "content": "What do I prefer?"},
|
||||
],
|
||||
metadata={"provider": "openai", "memory_providers": ["builtin", "hindsight"]},
|
||||
)
|
||||
|
||||
assert out == tmp_path / "session-1" / "call_001"
|
||||
assert (out / "system_prompt.txt").read_text(encoding="utf-8") == "system prompt"
|
||||
assert (out / "memory_provider_system_prompt.txt").read_text(encoding="utf-8").startswith("# Hindsight Memory")
|
||||
assert (out / "memory_prefetch_raw.txt").read_text(encoding="utf-8") == "- remembered fact"
|
||||
assert "<memory-context>" in (out / "memory_context_block.txt").read_text(encoding="utf-8")
|
||||
assert "What do I prefer?" in (out / "api_user_message.txt").read_text(encoding="utf-8")
|
||||
assert (out / "api_messages.json").read_text(encoding="utf-8").startswith("[")
|
||||
assert '"hindsight"' in (out / "metadata.json").read_text(encoding="utf-8")
|
||||
@@ -596,26 +596,3 @@ class TestAvailability:
|
||||
monkeypatch.setenv("HINDSIGHT_MODE", "local")
|
||||
p = HindsightMemoryProvider()
|
||||
assert p.is_available()
|
||||
|
||||
def test_local_embedded_profile_defaults_to_agent_identity(self, tmp_path, monkeypatch):
|
||||
config_path = tmp_path / "hindsight" / "config.json"
|
||||
config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
config_path.write_text(json.dumps({
|
||||
"mode": "local_embedded",
|
||||
"llm_provider": "ollama",
|
||||
"llm_model": "gemma3:12b",
|
||||
}))
|
||||
monkeypatch.setattr(
|
||||
"plugins.memory.hindsight.get_hermes_home",
|
||||
lambda: tmp_path,
|
||||
)
|
||||
|
||||
p = HindsightMemoryProvider()
|
||||
p.initialize(
|
||||
session_id="test-session",
|
||||
hermes_home=str(tmp_path / "profiles" / "atlas-hindsight"),
|
||||
platform="cli",
|
||||
agent_identity="atlas-hindsight",
|
||||
)
|
||||
|
||||
assert p._config["profile"] == "atlas-hindsight"
|
||||
|
||||
@@ -1,94 +0,0 @@
|
||||
from pathlib import Path
|
||||
from types import SimpleNamespace
|
||||
from unittest.mock import MagicMock, patch
|
||||
import importlib
|
||||
import sys
|
||||
import types
|
||||
|
||||
|
||||
|
||||
def _make_tool_defs(*names: str) -> list:
|
||||
return [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": n,
|
||||
"description": f"{n} tool",
|
||||
"parameters": {"type": "object", "properties": {}},
|
||||
},
|
||||
}
|
||||
for n in names
|
||||
]
|
||||
|
||||
|
||||
|
||||
def _mock_response(content="Done", finish_reason="stop"):
|
||||
msg = SimpleNamespace(content=content, tool_calls=None)
|
||||
choice = SimpleNamespace(message=msg, finish_reason=finish_reason)
|
||||
return SimpleNamespace(choices=[choice], usage=SimpleNamespace(prompt_tokens=1, completion_tokens=1, total_tokens=2))
|
||||
|
||||
|
||||
|
||||
def _load_ai_agent():
|
||||
sys.modules.setdefault("agent.auxiliary_client", types.SimpleNamespace(call_llm=lambda *a, **k: ""))
|
||||
run_agent = importlib.import_module("run_agent")
|
||||
return run_agent.AIAgent
|
||||
|
||||
|
||||
|
||||
def test_run_conversation_writes_context_snapshot_artifacts(tmp_path):
|
||||
AIAgent = _load_ai_agent()
|
||||
|
||||
class _FakePrivacyFilter:
|
||||
def __init__(self):
|
||||
self.last_report = None
|
||||
|
||||
def sanitize_messages(self, messages):
|
||||
return list(messages)
|
||||
|
||||
with (
|
||||
patch("run_agent.get_tool_definitions", return_value=_make_tool_defs("web_search")),
|
||||
patch("run_agent.check_toolset_requirements", return_value={}),
|
||||
patch("run_agent.OpenAI"),
|
||||
patch("hermes_cli.plugins.invoke_hook", return_value=[]),
|
||||
patch.dict(sys.modules, {"agent.privacy_filter": types.SimpleNamespace(PrivacyFilter=_FakePrivacyFilter)}),
|
||||
):
|
||||
agent = AIAgent(
|
||||
api_key="test-key-1234567890",
|
||||
base_url="https://example.com/v1",
|
||||
quiet_mode=True,
|
||||
skip_context_files=True,
|
||||
skip_memory=True,
|
||||
context_snapshots_enabled=True,
|
||||
context_snapshots_dir=str(tmp_path),
|
||||
)
|
||||
|
||||
agent.client = MagicMock()
|
||||
agent.client.chat.completions.create.return_value = _mock_response(content="Done")
|
||||
agent._build_system_prompt = MagicMock(return_value="Core system prompt")
|
||||
agent._memory_manager = MagicMock()
|
||||
agent._memory_manager.prefetch_all.return_value = "- remembered preference"
|
||||
agent._memory_manager.build_system_prompt.return_value = "# Hindsight Memory\nActive."
|
||||
agent._memory_manager.providers = [
|
||||
SimpleNamespace(name="builtin"),
|
||||
SimpleNamespace(name="hindsight"),
|
||||
]
|
||||
|
||||
result = agent.run_conversation("What do I prefer?")
|
||||
|
||||
assert result["final_response"] == "Done"
|
||||
|
||||
call_dir = tmp_path / agent.session_id / "call_001"
|
||||
assert call_dir.exists()
|
||||
assert (call_dir / "system_prompt.txt").read_text(encoding="utf-8") == "Core system prompt"
|
||||
assert (call_dir / "memory_provider_system_prompt.txt").read_text(encoding="utf-8").startswith("# Hindsight Memory")
|
||||
assert (call_dir / "memory_prefetch_raw.txt").read_text(encoding="utf-8") == "- remembered preference"
|
||||
assert "<memory-context>" in (call_dir / "memory_context_block.txt").read_text(encoding="utf-8")
|
||||
api_user_message = (call_dir / "api_user_message.txt").read_text(encoding="utf-8")
|
||||
assert "What do I prefer?" in api_user_message
|
||||
assert "remembered preference" in api_user_message
|
||||
api_messages = (call_dir / "api_messages.json").read_text(encoding="utf-8")
|
||||
assert '"role": "system"' in api_messages
|
||||
assert '"role": "user"' in api_messages
|
||||
metadata = (call_dir / "metadata.json").read_text(encoding="utf-8")
|
||||
assert '"hindsight"' in metadata
|
||||
@@ -1,22 +0,0 @@
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
ROOT = Path(__file__).resolve().parents[1]
|
||||
DOC = ROOT / "docs" / "hindsight-local-eval.md"
|
||||
|
||||
|
||||
def test_hindsight_local_eval_doc_exists_and_covers_switching():
|
||||
assert DOC.exists(), "missing Hindsight local eval doc"
|
||||
text = DOC.read_text(encoding="utf-8")
|
||||
for snippet in (
|
||||
"atlas-baseline",
|
||||
"atlas-mempalace",
|
||||
"atlas-hindsight",
|
||||
"HERMES_HOME",
|
||||
"HERMES_CONTEXT_SNAPSHOTS",
|
||||
"memory_prefetch_raw.txt",
|
||||
"api_user_message.txt",
|
||||
"local_embedded",
|
||||
"hindsight/config.json",
|
||||
):
|
||||
assert snippet in text
|
||||
16
tests/test_research_local_model_crisis_quality.py
Normal file
16
tests/test_research_local_model_crisis_quality.py
Normal file
@@ -0,0 +1,16 @@
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
REPORT = Path(__file__).resolve().parent.parent / "research_local_model_crisis_quality.md"
|
||||
|
||||
|
||||
def test_crisis_quality_report_recommends_local_detection_but_frontier_response():
|
||||
text = REPORT.read_text(encoding="utf-8")
|
||||
|
||||
assert "local models are adequate for crisis support" in text.lower()
|
||||
assert "not for crisis response generation" in text.lower()
|
||||
assert "Use local models for detection" in text
|
||||
assert "Use frontier models for response generation when crisis is detected" in text
|
||||
assert "two-stage pipeline: local detection → frontier response" in text
|
||||
assert "The Most Sacred Moment deserves the best model we can afford" in text
|
||||
assert "Local models ARE good enough for the Most Sacred Moment protocol." not in text
|
||||
Reference in New Issue
Block a user