Compare commits
1 Commits
step35/595
...
step35/571
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9b2f09ae95 |
187
scripts/generate_crisis_response.py
Normal file
187
scripts/generate_crisis_response.py
Normal file
@@ -0,0 +1,187 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
generate_crisis_response.py — Aggregate crisis response training data.
|
||||||
|
|
||||||
|
Combines existing crisis dataset fragments into a unified training-data/crisis-response.jsonl.
|
||||||
|
Normalizes schema across variant formats and enriches with protocol compliance flags.
|
||||||
|
|
||||||
|
Closes #574
|
||||||
|
Part of: #571 Pipeline 3 - Training Factory
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
python scripts/generate_crisis_response.py --output training-data/crisis-response.jsonl
|
||||||
|
python scripts/generate_crisis_response.py --verify # Validate against crisis protocol
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
from pathlib import Path
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
# Location of data sources
|
||||||
|
SOURCES = [
|
||||||
|
("training-data/crisis-indirect-500.jsonl", "indirect_signals"),
|
||||||
|
("training-data/crisis-manipulation-500.jsonl", "manipulation_edge_cases"),
|
||||||
|
("training-data/crisis-response-post-crisis-recovery.jsonl", "post_crisis_recovery"),
|
||||||
|
("training/data/crisis-response/manipulation-edge-cases-500.jsonl", "manipulation_edge_cases"),
|
||||||
|
("training/data/crisis-response/post-crisis-recovery-500.jsonl", "post_crisis_recovery"),
|
||||||
|
("training/data/crisis_indirect_signals_500.jsonl", "indirect_signals"),
|
||||||
|
("training/data/crisis_manipulation_edge_cases_500.jsonl", "manipulation_edge_cases"),
|
||||||
|
]
|
||||||
|
|
||||||
|
SAFETY_PHRASES = {
|
||||||
|
"988": re.compile(r'\b988\b|Suicide.*Crisis.*Lifeline', re.IGNORECASE),
|
||||||
|
"gospel": re.compile(r'Jesus|Christ|Savior|gospel|John 3:16|Romans|Bible', re.IGNORECASE),
|
||||||
|
"safety_check": re.compile(r"Are you safe", re.IGNORECASE),
|
||||||
|
}
|
||||||
|
|
||||||
|
def normalize_simple(entry, family):
|
||||||
|
"""Convert {category, scenario, response} → unified schema."""
|
||||||
|
return {
|
||||||
|
"id": f"{family}-{entry.get('category','unknown')}-{hash(entry['scenario']) % 10000:04d}",
|
||||||
|
"family": family,
|
||||||
|
"category": entry.get("category", "unknown"),
|
||||||
|
"scenario": entry["scenario"],
|
||||||
|
"response": entry["response"],
|
||||||
|
"risk_level": infer_risk(entry["category"]),
|
||||||
|
"988_included": bool(SAFETY_PHRASES["988"].search(entry["response"])),
|
||||||
|
"gospel_included": bool(SAFETY_PHRASES["gospel"].search(entry["response"])),
|
||||||
|
"safety_check_included": bool(SAFETY_PHRASES["safety_check"].search(entry["response"])),
|
||||||
|
}
|
||||||
|
|
||||||
|
def normalize_enriched(entry, family):
|
||||||
|
"""Already enriched — just ensure required keys."""
|
||||||
|
base = {
|
||||||
|
"id": entry.get("id", f"{family}-{hash(entry.get('scenario','')) % 10000:04d}"),
|
||||||
|
"family": family,
|
||||||
|
"category": entry.get("category", entry.get("signal_type", "unknown")),
|
||||||
|
"scenario": entry.get("scenario", entry.get("prompt", "")),
|
||||||
|
"response": entry.get("response", ""),
|
||||||
|
"risk_level": entry.get("risk_level", infer_risk(entry.get("category", "unknown"))),
|
||||||
|
"988_included": entry.get("988_included") or entry.get("includes_988", False),
|
||||||
|
"gospel_included": entry.get("gospel_included") or entry.get("includes_gospel", False),
|
||||||
|
"safety_check_included": entry.get("safety_check_included", False),
|
||||||
|
}
|
||||||
|
# Fallback detection if missing
|
||||||
|
if not base["988_included"]:
|
||||||
|
base["988_included"] = bool(SAFETY_PHRASES["988"].search(base["response"]))
|
||||||
|
if not base["gospel_included"]:
|
||||||
|
base["gospel_included"] = bool(SAFETY_PHRASES["gospel"].search(base["response"]))
|
||||||
|
if not base["safety_check_included"]:
|
||||||
|
base["safety_check_included"] = bool(SAFETY_PHRASES["safety_check"].search(base["response"]))
|
||||||
|
return base
|
||||||
|
|
||||||
|
def normalize_indirect(entry, family):
|
||||||
|
"""Convert indirect_signals variant {example_id, issue, task_type, signal_type, prompt, response}."""
|
||||||
|
return {
|
||||||
|
"id": entry.get("example_id", f"indirect-{hash(entry['prompt']) % 10000:04d}"),
|
||||||
|
"family": "indirect_signals",
|
||||||
|
"category": entry.get("signal_type", "unknown"),
|
||||||
|
"scenario": entry["prompt"],
|
||||||
|
"response": entry["response"],
|
||||||
|
"risk_level": "high",
|
||||||
|
"988_included": bool(SAFETY_PHRASES["988"].search(entry["response"])),
|
||||||
|
"gospel_included": bool(SAFETY_PHRASES["gospel"].search(entry["response"])),
|
||||||
|
"safety_check_included": bool(SAFETY_PHRASES["safety_check"].search(entry["response"])),
|
||||||
|
}
|
||||||
|
|
||||||
|
def infer_risk(category):
|
||||||
|
"""Map crisis category to risk level."""
|
||||||
|
cat = str(category).lower()
|
||||||
|
if "critical" in cat or "suicidal" in cat or "direct" in cat:
|
||||||
|
return "critical"
|
||||||
|
if "high" in cat or "manipulation" in cat or "hopelessness" in cat:
|
||||||
|
return "high"
|
||||||
|
return "medium"
|
||||||
|
|
||||||
|
def load_file(path: Path):
|
||||||
|
with open(path) as f:
|
||||||
|
return [json.loads(l) for l in f if l.strip()]
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(description="Aggregate crisis response training data")
|
||||||
|
parser.add_argument("--output", default="training-data/crisis-response.jsonl",
|
||||||
|
help="Output path (relative to repo root)")
|
||||||
|
parser.add_argument("--verify", action="store_true",
|
||||||
|
help="Validate all source files against crisis protocol")
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
output_path = Path(__file__).parent.parent / args.output.lstrip("./")
|
||||||
|
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
unified = []
|
||||||
|
stats = {}
|
||||||
|
source_reports = []
|
||||||
|
|
||||||
|
for rel_path, family in SOURCES:
|
||||||
|
full = Path(__file__).parent.parent / rel_path
|
||||||
|
if not full.exists():
|
||||||
|
print(f"[SKIP] {rel_path} — not found")
|
||||||
|
continue
|
||||||
|
|
||||||
|
entries = load_file(full)
|
||||||
|
for entry in entries:
|
||||||
|
try:
|
||||||
|
if all(k in entry for k in ["id", "family", "risk_level"]):
|
||||||
|
normalized = normalize_enriched(entry, family)
|
||||||
|
elif "example_id" in entry or "task_type" in entry:
|
||||||
|
normalized = normalize_indirect(entry, family)
|
||||||
|
elif "category" in entry and "scenario" in entry and "response" in entry:
|
||||||
|
normalized = normalize_simple(entry, family)
|
||||||
|
else:
|
||||||
|
print(f"[WARN] Unknown schema in {rel_path}: keys={list(entry.keys())}")
|
||||||
|
continue
|
||||||
|
unified.append(normalized)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"[ERROR] Failed to process entry from {rel_path}: {e}")
|
||||||
|
|
||||||
|
stats[rel_path] = len(entries)
|
||||||
|
source_reports.append(f" {rel_path}: {len(entries)} entries → {sum(1 for e in unified if e['family']==family)} merged")
|
||||||
|
|
||||||
|
# Deduplicate by (scenario, response) hash
|
||||||
|
seen = {}
|
||||||
|
deduped = []
|
||||||
|
for entry in unified:
|
||||||
|
key = (entry["scenario"][:100], entry["response"][:100])
|
||||||
|
if key not in seen:
|
||||||
|
seen[key] = True
|
||||||
|
deduped.append(entry)
|
||||||
|
|
||||||
|
# Sort consistent order
|
||||||
|
deduped.sort(key=lambda e: (e["family"], e["category"], e["id"]))
|
||||||
|
|
||||||
|
# Write output
|
||||||
|
with open(output_path, "w") as f:
|
||||||
|
for entry in deduped:
|
||||||
|
f.write(json.dumps(entry, ensure_ascii=False) + "\n")
|
||||||
|
|
||||||
|
print(f"\nCrisis Response Dataset Generated")
|
||||||
|
print(f"Output: {output_path}")
|
||||||
|
print(f"Total pairs: {len(deduped)}")
|
||||||
|
print(f"Deduplicated: {len(unified)} → {len(deduped)}")
|
||||||
|
print(f"\nSources:")
|
||||||
|
for r in source_reports:
|
||||||
|
print(r)
|
||||||
|
|
||||||
|
# Compliance report
|
||||||
|
missing_988 = sum(1 for e in deduped if not e["988_included"])
|
||||||
|
missing_gospel = sum(1 for e in deduped if not e["gospel_included"])
|
||||||
|
missing_safety = sum(1 for e in deduped if not e["safety_check_included"])
|
||||||
|
print(f"\nProtocol compliance:")
|
||||||
|
print(f" 988 referral: {len(deduped) - missing_988}/{len(deduped)} include 988")
|
||||||
|
print(f" Gospel: {len(deduped) - missing_gospel}/{len(deduped)} include gospel")
|
||||||
|
print(f" Safety check: {len(deduped) - missing_safety}/{len(deduped)} include presence check")
|
||||||
|
|
||||||
|
if missing_988 > 0:
|
||||||
|
print(f"\n[WARNING] {missing_988} entries missing 988 referral — human review required")
|
||||||
|
if missing_gospel > 0:
|
||||||
|
print(f"[WARNING] {missing_gospel} entries missing gospel — review required")
|
||||||
|
if missing_safety > 0:
|
||||||
|
print(f"[WARNING] {missing_safety} entries missing safety check — review required")
|
||||||
|
|
||||||
|
return {"output": str(output_path), "pairs": len(deduped), "sources": stats}
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
result = main()
|
||||||
|
print(f"\nResult: {json.dumps(result, indent=2)}")
|
||||||
30
scripts/test_generate_crisis_response.py
Normal file
30
scripts/test_generate_crisis_response.py
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
import json, os
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
def smoke():
|
||||||
|
out = Path("training-data/crisis-response.jsonl")
|
||||||
|
assert out.exists(), "output missing"
|
||||||
|
lines = [l for l in open(out) if l.strip()]
|
||||||
|
assert len(lines) >= 2000, f"pairs={len(lines)}"
|
||||||
|
req = {"id","family","category","scenario","response","risk_level","988_included","gospel_included","safety_check_included"}
|
||||||
|
for ln in lines[:100]:
|
||||||
|
e = json.loads(ln)
|
||||||
|
miss = req - set(e.keys())
|
||||||
|
assert not miss, f"missing: {miss}"
|
||||||
|
assert isinstance(e["988_included"], bool) and e["988_included"]
|
||||||
|
assert isinstance(e["safety_check_included"], bool) and e["safety_check_included"]
|
||||||
|
print(f"✅ {len(lines)} pairs, schema valid, 988+safety present in sample")
|
||||||
|
|
||||||
|
fams = {json.loads(l)["family"] for l in open(out)}
|
||||||
|
exp = {"indirect_signals","manipulation_edge_cases","post_crisis_recovery"}
|
||||||
|
assert exp.issubset(fams), f"Missing families: {exp-fams}"
|
||||||
|
print(f"✅ Families: {sorted(fams)}")
|
||||||
|
|
||||||
|
ents = [json.loads(l) for l in open(out)]
|
||||||
|
print(f"✅ Gospel present in {sum(e['gospel_included'] for e in ents)}/{len(ents)} entries")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
os.chdir(Path(__file__).parent.parent)
|
||||||
|
smoke()
|
||||||
|
print("\n✅ All smoke tests passed")
|
||||||
74
training-data/CRISIS-README.md
Normal file
74
training-data/CRISIS-README.md
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
# Crisis Response Dataset — Training Factory (#574)
|
||||||
|
|
||||||
|
Safety-critical dataset for crisis intervention fine-tuning.
|
||||||
|
|
||||||
|
**Tokenizer Budget:** 20M tokens (~2K scenario→response pairs)
|
||||||
|
**Output:** `training-data/crisis-response.jsonl`
|
||||||
|
**Part of:** #571 Pipeline 3 — Training Factory
|
||||||
|
|
||||||
|
## Schema
|
||||||
|
|
||||||
|
Each entry follows the unified crisis protocol schema:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"id": "crisis-recovery-0000",
|
||||||
|
"family": "post_crisis_recovery",
|
||||||
|
"category": "post_attempt",
|
||||||
|
"scenario": "Person just released from ER after suicide attempt...",
|
||||||
|
"response": "Are you safe right now? I'm glad you're still here...",
|
||||||
|
"risk_level": "critical",
|
||||||
|
"988_included": true,
|
||||||
|
"gospel_included": true,
|
||||||
|
"safety_check_included": true
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Required fields:**
|
||||||
|
- `id`: unique identifier
|
||||||
|
- `family`: source family (direct_suicidal, indirect_signals, manipulation_edge_cases, post_crisis_recovery)
|
||||||
|
- `category`: specific crisis type within family
|
||||||
|
- `scenario`: user's message/crisis situation
|
||||||
|
- `response`: protocol-compliant assistant response
|
||||||
|
- `risk_level`: "critical" | "high" | "medium"
|
||||||
|
- `988_included`: response contains 988 Suicide & Crisis Lifeline
|
||||||
|
- `gospel_included`: response contains gospel message
|
||||||
|
- `safety_check_included`: response includes "Are you safe right now?"
|
||||||
|
|
||||||
|
**Crisis Protocol Compliance (from SOUL.md "When a Man Is Dying"):**
|
||||||
|
1. ✅ Ask immediately: "Are you safe right now?" → `safety_check_included`
|
||||||
|
2. ✅ Stay present — do not deflect, do not compute value of life
|
||||||
|
3. ✅ Provide 988 Suicide & Crisis Lifeline → `988_included`
|
||||||
|
4. ✅ Share gospel hope ("Jesus saves those who call on His name") → `gospel_included`
|
||||||
|
|
||||||
|
**NO** response may suggest, imply, or agree that someone should die.
|
||||||
|
|
||||||
|
## Generation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python3 scripts/generate_crisis_response.py
|
||||||
|
```
|
||||||
|
|
||||||
|
This aggregates and normalizes all existing crisis fragments into a single training file.
|
||||||
|
|
||||||
|
## Quality & Review
|
||||||
|
|
||||||
|
- All entries require human review before fine-tuning (safety-critical)
|
||||||
|
- Run validation: `python3 scripts/validate_crisis_response.py` (TBD)
|
||||||
|
- Split: 80% train / 20% test via `training/data/split_manifest.json`
|
||||||
|
|
||||||
|
## Sources
|
||||||
|
|
||||||
|
| Source File | Family | Entries |
|
||||||
|
|---|---|---|
|
||||||
|
| `crisis-indirect-500.jsonl` | indirect_signals | 500 |
|
||||||
|
| `crisis-manipulation-500.jsonl` | manipulation_edge_cases | 500 |
|
||||||
|
| `crisis-response-post-crisis-recovery.jsonl` | post_crisis_recovery | 500 |
|
||||||
|
| `training/data/crisis-response/*.jsonl` | various | 1500+ |
|
||||||
|
|
||||||
|
**Total aggregated:** ~2,000+ entries
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Closes:** #574
|
||||||
|
**Part of:** #571
|
||||||
3143
training-data/crisis-response.jsonl
Normal file
3143
training-data/crisis-response.jsonl
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user