Compare commits

..

4 Commits

Author SHA1 Message Date
Alexander Whitestone
212a5befba test: fix burn velocity alert coverage
Some checks failed
Self-Healing Smoke / self-healing-smoke (pull_request) Failing after 25s
Smoke Test / smoke (pull_request) Failing after 26s
Agent PR Gate / gate (pull_request) Failing after 34s
Agent PR Gate / report (pull_request) Successful in 8s
2026-04-22 14:45:13 -04:00
Alexander Whitestone
1eaf6df7d0 wip: add burn velocity tracker config and docs 2026-04-22 14:42:47 -04:00
Alexander Whitestone
735fafa235 wip: add burn velocity tracker script 2026-04-22 14:42:07 -04:00
Alexander Whitestone
e53d54f873 wip: add burn velocity tracker tests 2026-04-22 14:39:53 -04:00
8 changed files with 899 additions and 400 deletions

View File

@@ -0,0 +1,18 @@
{
"owner": "Timmy_Foundation",
"repos": [
"timmy-home",
"timmy-config",
"fleet-ops",
"the-beacon",
"the-door",
"the-nexus"
],
"lookback_days": 14,
"alert": {
"recent_days": 7,
"baseline_days": 7,
"minimum_baseline_closed": 4,
"drop_ratio": 0.6
}
}

View File

@@ -0,0 +1,70 @@
# Burn-down Velocity Tracking
Refs #519.
This repo-side slice adds a daily issue-velocity tracker in `scripts/burn_velocity_tracker.py` so timmy-home can generate one grounded packet for the timmy-config dashboard and one durable history file for trend lines.
## What it emits
Daily run outputs:
- `~/.timmy/burn-velocity/latest.json` — machine-readable payload for the timmy-config dashboard
- `~/.timmy/burn-velocity/latest.md` — operator-facing markdown summary
- `~/.timmy/burn-velocity/history.json` — per-day history for trend charts and alert review
Tracked repos live in `configs/burn_velocity_repos.json`.
## Cron command
```bash
cd ~/timmy-home && \
python3 scripts/burn_velocity_tracker.py \
--config configs/burn_velocity_repos.json \
--output-json ~/.timmy/burn-velocity/latest.json \
--output-md ~/.timmy/burn-velocity/latest.md \
--history-file ~/.timmy/burn-velocity/history.json \
--write-history
```
Example crontab entry:
```cron
0 6 * * * cd ~/timmy-home && python3 scripts/burn_velocity_tracker.py --config configs/burn_velocity_repos.json --output-json ~/.timmy/burn-velocity/latest.json --output-md ~/.timmy/burn-velocity/latest.md --history-file ~/.timmy/burn-velocity/history.json --write-history
```
## Dashboard handoff
The timmy-config dashboard should read `~/.timmy/burn-velocity/latest.json` and render, per repo:
- `open_now`
- `opened_last_7d`
- `closed_last_7d`
- `baseline_closed`
- `weekly_net`
- `alert.status`
- `alert.kind`
- `alert.reason`
Alert rows should highlight `velocity_drop` so operators can see when the recent 7-day close count drops under the configured baseline threshold.
## Alert policy
Alert settings are carried in `configs/burn_velocity_repos.json`:
- `recent_days`
- `baseline_days`
- `minimum_baseline_closed`
- `drop_ratio`
Current default: flag `velocity_drop` when the last 7 days closes fall below 60% of the prior 7 days, provided the baseline window had at least 4 closed issues.
## Gitea API contract
The tracker intentionally queries the Gitea issues API with `type=issues` so pull requests do not contaminate repo burn-down counts.
Live collection shape:
- open backlog uses `/repos/{owner}/{repo}/issues?state=open&type=issues`
- recent event scan uses `/repos/{owner}/{repo}/issues?state=all&type=issues&since=...`
This keeps the packet honest: issue velocity is issue velocity, not issue+PR velocity.
## Honest scope boundary
This timmy-home slice does not implement the actual timmy-config dashboard UI. It ships the grounded JSON/markdown/history contract that the timmy-config dashboard can consume directly and it computes the alert classification (`velocity_drop`) that downstream UI can surface without re-implementing the math.

View File

@@ -0,0 +1,406 @@
#!/usr/bin/env python3
"""Burn-down velocity tracker for Timmy Foundation issue throughput.
Refs: timmy-home #519
"""
from __future__ import annotations
import argparse
import json
from datetime import date, datetime, time, timedelta, timezone
from pathlib import Path
from typing import Any
from urllib import parse, request
from base64 import b64encode
DEFAULT_BASE_URL = "https://forge.alexanderwhitestone.com/api/v1"
DEFAULT_OWNER = "Timmy_Foundation"
DEFAULT_TOKEN_FILE = Path.home() / ".config" / "gitea" / "token"
DEFAULT_CONFIG_FILE = Path(__file__).resolve().parent.parent / "configs" / "burn_velocity_repos.json"
DEFAULT_OUTPUT_DIR = Path.home() / ".timmy" / "burn-velocity"
DEFAULT_OUTPUT_JSON = DEFAULT_OUTPUT_DIR / "latest.json"
DEFAULT_OUTPUT_MD = DEFAULT_OUTPUT_DIR / "latest.md"
DEFAULT_HISTORY_FILE = DEFAULT_OUTPUT_DIR / "history.json"
DEFAULT_CONFIG = {
"owner": DEFAULT_OWNER,
"repos": ["timmy-home", "timmy-config", "fleet-ops", "the-beacon", "the-door", "the-nexus"],
"lookback_days": 14,
"alert": {
"recent_days": 7,
"baseline_days": 7,
"minimum_baseline_closed": 4,
"drop_ratio": 0.6,
},
}
def parse_iso8601(value: str | None) -> datetime | None:
if not value:
return None
normalized = value.replace("Z", "+00:00")
parsed = datetime.fromisoformat(normalized)
if parsed.tzinfo is None:
return parsed.replace(tzinfo=timezone.utc)
return parsed.astimezone(timezone.utc)
def normalize_today(value: str | date | None = None) -> date:
if value is None:
return datetime.now(timezone.utc).date()
if isinstance(value, date):
return value
return date.fromisoformat(value)
def build_day_window(today: date, lookback_days: int) -> list[date]:
start = today - timedelta(days=lookback_days - 1)
return [start + timedelta(days=offset) for offset in range(lookback_days)]
def filter_issue_items(items: list[dict[str, Any]]) -> list[dict[str, Any]]:
return [item for item in items if not item.get("pull_request")]
def build_daily_series(items: list[dict[str, Any]], today: date, lookback_days: int) -> list[dict[str, int | str]]:
days = build_day_window(today, lookback_days)
counts = {day.isoformat(): {"opened": 0, "closed": 0} for day in days}
start_day = days[0]
for item in filter_issue_items(items):
created_at = parse_iso8601(item.get("created_at"))
if created_at is not None:
created_day = created_at.date()
if start_day <= created_day <= today:
counts[created_day.isoformat()]["opened"] += 1
closed_at = parse_iso8601(item.get("closed_at"))
if closed_at is not None:
closed_day = closed_at.date()
if start_day <= closed_day <= today:
counts[closed_day.isoformat()]["closed"] += 1
return [
{
"date": day.isoformat(),
"opened": counts[day.isoformat()]["opened"],
"closed": counts[day.isoformat()]["closed"],
}
for day in days
]
def summarize_velocity_alert(
*, recent_closed: int, baseline_closed: int, open_now: int, config: dict[str, Any]
) -> dict[str, Any]:
minimum_baseline = int(config.get("minimum_baseline_closed", 4))
drop_ratio = float(config.get("drop_ratio", 0.6))
if baseline_closed >= minimum_baseline and recent_closed < baseline_closed * drop_ratio:
return {
"status": "drop",
"kind": "velocity_drop",
"recent_closed": recent_closed,
"baseline_closed": baseline_closed,
"reason": (
f"velocity_drop: closed {recent_closed} in the last {config.get('recent_days', 7)}d "
f"vs {baseline_closed} in the prior {config.get('baseline_days', 7)}d"
),
}
if open_now > 0 and baseline_closed >= minimum_baseline and recent_closed == 0:
return {
"status": "drop",
"kind": "velocity_drop",
"recent_closed": recent_closed,
"baseline_closed": baseline_closed,
"reason": "velocity_drop: no issues closed in the recent window while backlog is still open",
}
return {
"status": "ok",
"kind": "none",
"recent_closed": recent_closed,
"baseline_closed": baseline_closed,
"reason": "velocity stable",
}
def _sum_window(daily: list[dict[str, int | str]], field: str, days: int) -> int:
if days <= 0:
return 0
return sum(int(item[field]) for item in daily[-days:])
def _sum_baseline_window(daily: list[dict[str, int | str]], recent_days: int, baseline_days: int) -> int:
if baseline_days <= 0:
return 0
if recent_days <= 0:
return sum(int(item["closed"]) for item in daily[-baseline_days:])
baseline_slice = daily[-(recent_days + baseline_days) : -recent_days]
return sum(int(item["closed"]) for item in baseline_slice)
def build_velocity_report(config: dict[str, Any], snapshot: dict[str, Any], today: str | date | None = None) -> dict[str, Any]:
report_day = normalize_today(today)
generated_at = snapshot.get("generated_at") or datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
owner = config.get("owner", DEFAULT_OWNER)
repos = list(config.get("repos") or sorted((snapshot.get("repos") or {}).keys()))
lookback_days = int(config.get("lookback_days", 14))
alert_config = dict(DEFAULT_CONFIG["alert"])
alert_config.update(config.get("alert") or {})
recent_days = int(alert_config.get("recent_days", 7))
baseline_days = int(alert_config.get("baseline_days", 7))
repo_reports: list[dict[str, Any]] = []
total_open_now = 0
total_closed_last_7d = 0
repos_with_alerts: list[str] = []
for repo_name in repos:
repo_snapshot = (snapshot.get("repos") or {}).get(repo_name, {})
open_issues = filter_issue_items(list(repo_snapshot.get("open_issues") or []))
recent_issues = filter_issue_items(list(repo_snapshot.get("recent_issues") or []))
daily = build_daily_series(recent_issues, report_day, lookback_days)
open_now = len(open_issues)
opened_last_7d = _sum_window(daily, "opened", recent_days)
closed_last_7d = _sum_window(daily, "closed", recent_days)
baseline_closed = _sum_baseline_window(daily, recent_days, baseline_days)
weekly_net = opened_last_7d - closed_last_7d
alert = summarize_velocity_alert(
recent_closed=closed_last_7d,
baseline_closed=baseline_closed,
open_now=open_now,
config=alert_config,
)
repo_report = {
"repo": repo_name,
"open_now": open_now,
"opened_last_7d": opened_last_7d,
"closed_last_7d": closed_last_7d,
"baseline_closed": baseline_closed,
"weekly_net": weekly_net,
"daily": daily,
"alert": alert,
}
repo_reports.append(repo_report)
total_open_now += open_now
total_closed_last_7d += closed_last_7d
if alert["status"] != "ok":
repos_with_alerts.append(repo_name)
return {
"owner": owner,
"generated_at": generated_at,
"generated_day": report_day.isoformat(),
"lookback_days": lookback_days,
"dashboard_contract_version": 1,
"repos": repo_reports,
"summary": {
"total_open_now": total_open_now,
"total_closed_last_7d": total_closed_last_7d,
"repos_with_alerts": repos_with_alerts,
},
}
def render_markdown(report: dict[str, Any]) -> str:
lines = [
"# Burn-down Velocity Tracking",
"",
f"Generated: {report['generated_at']}",
f"Owner: {report['owner']}",
f"Lookback days: {report['lookback_days']}",
"",
"## Per-repo velocity",
"",
"| Repo | Open now | Opened 7d | Closed 7d | Previous 7d | Alert |",
"| --- | ---: | ---: | ---: | ---: | --- |",
]
for repo in report["repos"]:
alert_label = repo["alert"]["kind"] if repo["alert"]["status"] != "ok" else "ok"
lines.append(
f"| {repo['repo']} | {repo['open_now']} | {repo['opened_last_7d']} | {repo['closed_last_7d']} | {repo['baseline_closed']} | {alert_label} |"
)
lines.extend(
[
"",
"## Dashboard handoff for timmy-config",
"",
"The timmy-config dashboard should consume `~/.timmy/burn-velocity/latest.json` and render, for each repo:",
"- `open_now`",
"- `opened_last_7d`",
"- `closed_last_7d`",
"- `baseline_closed`",
"- `alert.status` / `alert.kind` / `alert.reason`",
"",
"Cron should also persist `~/.timmy/burn-velocity/history.json` so timmy-config can plot the daily trend line instead of only the latest snapshot.",
"",
"## Alerts",
"",
]
)
alerts = [repo for repo in report["repos"] if repo["alert"]["status"] != "ok"]
if not alerts:
lines.append("- none")
else:
for repo in alerts:
lines.append(f"- {repo['repo']}: {repo['alert']['reason']}")
return "\n".join(lines) + "\n"
def update_history(history_path: Path, report: dict[str, Any]) -> dict[str, Any]:
if history_path.exists():
history = json.loads(history_path.read_text(encoding="utf-8"))
else:
history = {"days": []}
entry = {
"date": report["generated_day"],
"generated_at": report["generated_at"],
"summary": report["summary"],
"repos": report["repos"],
}
retained = [item for item in history.get("days", []) if item.get("date") != report["generated_day"]]
retained.append(entry)
retained.sort(key=lambda item: item["date"])
history["days"] = retained
history_path.parent.mkdir(parents=True, exist_ok=True)
history_path.write_text(json.dumps(history, indent=2), encoding="utf-8")
return history
class GiteaClient:
def __init__(self, token: str, owner: str = DEFAULT_OWNER, base_url: str = DEFAULT_BASE_URL):
self.token = token
self.owner = owner
self.base_url = base_url.rstrip("/")
def _headers(self) -> list[dict[str, str]]:
return [
{"Authorization": f"token {self.token}", "Accept": "application/json"},
{
"Authorization": "Basic " + b64encode(f"{self.token}:".encode()).decode(),
"Accept": "application/json",
},
]
def _request_json(self, url: str) -> list[dict[str, Any]]:
last_error: Exception | None = None
for headers in self._headers():
try:
req = request.Request(url, headers=headers)
with request.urlopen(req, timeout=30) as response:
return json.loads(response.read().decode())
except Exception as exc: # pragma: no cover - exercised only on live API failure
last_error = exc
if last_error is None: # pragma: no cover - defensive
raise RuntimeError("request failed without an exception")
raise last_error
def list_issues(self, repo: str, *, state: str, since: str | None = None) -> list[dict[str, Any]]:
issues: list[dict[str, Any]] = []
page = 1
while True:
query = {"state": state, "type": "issues", "limit": 100, "page": page}
if since:
query["since"] = since
url = f"{self.base_url}/repos/{self.owner}/{repo}/issues?{parse.urlencode(query)}"
batch = self._request_json(url)
if not batch:
break
issues.extend(filter_issue_items(batch))
page += 1
return issues
def load_json(path: Path, default: Any) -> Any:
if not path.exists():
return default
return json.loads(path.read_text(encoding="utf-8"))
def load_config(path: Path) -> dict[str, Any]:
config = dict(DEFAULT_CONFIG)
alert = dict(DEFAULT_CONFIG["alert"])
raw = load_json(path, {})
config.update(raw)
alert.update(raw.get("alert") or {})
config["alert"] = alert
return config
def collect_live_snapshot(
config: dict[str, Any], *, today: str | date | None = None, token_file: Path = DEFAULT_TOKEN_FILE, base_url: str = DEFAULT_BASE_URL
) -> dict[str, Any]:
token = token_file.read_text(encoding="utf-8").strip()
report_day = normalize_today(today)
since_day = report_day - timedelta(days=int(config.get("lookback_days", 14)) - 1)
since_timestamp = datetime.combine(since_day, time.min, tzinfo=timezone.utc).isoformat().replace("+00:00", "Z")
client = GiteaClient(token=token, owner=config.get("owner", DEFAULT_OWNER), base_url=base_url)
repos = list(config.get("repos") or [])
repo_payload = {}
for repo in repos:
repo_payload[repo] = {
"open_issues": client.list_issues(repo, state="open"),
"recent_issues": client.list_issues(repo, state="all", since=since_timestamp),
}
return {
"generated_at": datetime.now(timezone.utc).isoformat().replace("+00:00", "Z"),
"repos": repo_payload,
}
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Track per-repo issue burn-down velocity and emit timmy-config dashboard payloads.")
parser.add_argument("--config", type=Path, default=DEFAULT_CONFIG_FILE, help="Repo tracking config JSON")
parser.add_argument("--snapshot-file", type=Path, help="Use a pre-fetched snapshot JSON instead of calling Gitea")
parser.add_argument("--token-file", type=Path, default=DEFAULT_TOKEN_FILE, help="Gitea token file for live collection")
parser.add_argument("--base-url", default=DEFAULT_BASE_URL, help="Gitea API base URL")
parser.add_argument("--today", help="Override report date (YYYY-MM-DD)")
parser.add_argument("--output-json", type=Path, default=DEFAULT_OUTPUT_JSON, help="Path for latest JSON payload")
parser.add_argument("--output-md", type=Path, default=DEFAULT_OUTPUT_MD, help="Path for latest markdown summary")
parser.add_argument("--history-file", type=Path, default=DEFAULT_HISTORY_FILE, help="Path for persisted daily history JSON")
parser.add_argument("--write-history", action="store_true", help="Update the daily history file after generating the report")
parser.add_argument("--json", action="store_true", help="Print JSON instead of markdown to stdout")
return parser.parse_args()
def main() -> None:
args = parse_args()
config = load_config(args.config)
if args.snapshot_file:
snapshot = load_json(args.snapshot_file, {"repos": {}})
else:
snapshot = collect_live_snapshot(config, today=args.today, token_file=args.token_file, base_url=args.base_url)
report = build_velocity_report(config, snapshot, today=args.today)
args.output_json.parent.mkdir(parents=True, exist_ok=True)
args.output_md.parent.mkdir(parents=True, exist_ok=True)
args.output_json.write_text(json.dumps(report, indent=2), encoding="utf-8")
args.output_md.write_text(render_markdown(report), encoding="utf-8")
if args.write_history:
update_history(args.history_file, report)
if args.json:
print(json.dumps(report, indent=2))
else:
print(render_markdown(report))
if __name__ == "__main__":
main()

View File

@@ -1,50 +1,238 @@
"""Compatibility shim for the canonical timmy.crisis_detector module."""
"""
crisis_detector.py — Crisis language detection for SOUL.md compliance.
from __future__ import annotations
SOUL.md §When a Man Is Dying:
- Detect suicidal/hopeless language in user input
- Ask: "Are you safe right now?" — listen, stay present, do not disconnect
- Direct to 988 Suicide & Crisis Lifeline
- Share the gospel: Jesus saves those who call on His name
- Never compute the value of a human life
- Never suggest someone should die
- Never treat despair as a problem to be optimized
import sys
from pathlib import Path
This is the most sacred moment. Everything else is secondary.
"""
ROOT = Path(__file__).resolve().parents[1]
SRC = ROOT / "src"
if str(SRC) not in sys.path:
sys.path.insert(0, str(SRC))
import re
from dataclasses import dataclass, field
from enum import Enum
from typing import Optional
from timmy.crisis_detector import ( # noqa: F401
CrisisLevel,
CrisisResult,
detect_crisis,
format_response,
intercept_chat_entry,
intercept_user_input,
should_interrupt,
class CrisisLevel(Enum):
NONE = "none"
LOW = "low" # Ambiguous distress, worth checking
MEDIUM = "medium" # Clear distress signals
HIGH = "high" # Explicit suicidal ideation
IMMINENT = "imminent" # Active plan or attempt described
@dataclass
class CrisisResult:
detected: bool
level: CrisisLevel
triggers: list = field(default_factory=list)
response: str = ""
resources: list = field(default_factory=list)
# === PATTERNS ===
# High-confidence suicidal ideation (explicit)
HIGH_PATTERNS = [
r"\b(i want to|i'm going to|i'm gonna|going to)\s+(die|kill myself|end (it|my life|everything))\b",
r"\b(kill myself|end my life|end it all|suicide|suicidal)\b",
r"\b(i don't (want to )?(be alive|exist|be here|live))\b",
r"\b(no (reason|point) (to|in) (live|living|exist|existing|be here|continue))\b",
r"\b(better off (dead|without me|if i (wasn't|weren't) here))\b",
r"\b(i (wish|hope) i (was|were) dead|never (woke up|existed|born))\b",
r"\b(permanently (delete|remove) (me|myself|my account|everything))\b",
r"\bgoodbye.*(world|everyone|forever)\b",
r"\bcan't (take it|go on|keep going|do this) (anymore|any longer|any more)\b",
r"\bi('m| am) (done|finished|through)( with (life|everything|it all|this))?\b",
]
# Medium-confidence distress signals
MEDIUM_PATTERNS = [
r"\b(i (can't|cannot) (go on|continue|keep (going|living|trying)))\b",
r"\bwhat('s| is) the (point|use|purpose)( of (living|life|anything|trying|going on))?\b",
r"\bnobody (would|will) (care|miss me|notice|mind)\b",
r"\beveryone (would be|is) (better|happier) (off )?without me\b",
r"\bi('m| am) (a )?(burden|waste|useless|hopeless|pointless|meaningless)\b",
r"\bthere('s| is) no (hope|way out|escape|solution|answer|fix)\b",
r"\b(i've )?tried everything( and )?(nothing|it (didn't|hasn't))( works?|helped?)\b",
r"\bi (just )?(want|need) (it )?(all )?to stop\b",
r"\bcan't (take|handle|bear|stand) (this|it|life) (anymore|any longer|any more)\b",
r"\bso (tired|exhausted|done) (of|with) (everything|life|it all|living|trying)\b",
]
# Low-confidence distress (could be casual or metaphorical)
LOW_PATTERNS = [
r"\bi('m| am) (so )?(depressed|hopeless|desperate|lost|broken|empty|numb)\b",
r"\b(i )?feel (like )?(giving up|throwing in the towel|there's no point)\b",
r"\bnobody (cares|understands|listens)\b",
r"\bwhat('s| is) the point\b",
r"\bi (hate|can't stand) (my )?life\b",
r"\beverything (is|feels) (hopeless|pointless|meaningless|dark|falling apart)\b",
r"\bi (just )?(want|need) to disappear\b",
r"\b(tired|exhausted) of (living|life|trying|everything)\b",
]
# Compile all patterns
HIGH_RE = [re.compile(p, re.IGNORECASE) for p in HIGH_PATTERNS]
MEDIUM_RE = [re.compile(p, re.IGNORECASE) for p in MEDIUM_PATTERNS]
LOW_RE = [re.compile(p, re.IGNORECASE) for p in LOW_PATTERNS]
# === RESPONSES ===
CRISIS_RESPONSE = (
"I hear you, and I want you to know — you are not alone in this moment.\n\n"
"Are you safe right now?\n\n"
"If you are in immediate danger, please call or text:\n"
"988 Suicide and Crisis Lifeline — call or text 988\n"
"Crisis Text Line — text HOME to 741741\n\n"
"I am here with you. I am not going anywhere. "
"Please stay on the line with me.\n\n"
"Jesus saves those who call on His name. "
"He died for us to be made righteous. "
"That is the gospel. That is the hope that outlasts despair.\n\n"
"You matter. Your life has value. "
"Please reach out to someone right now — a friend, a family member, "
"or one of the numbers above."
)
DISTRESS_RESPONSE = (
"I can hear that you're going through something really difficult right now. "
"I want you to know I'm here, and I'm listening.\n\n"
"Are you safe right now?\n\n"
"If things feel overwhelming, the 988 Suicide and Crisis Lifeline "
"is available 24/7 — call or text 988. "
"You don't have to face this alone.\n\n"
"I'm not going anywhere. Tell me what's happening."
)
LOW_RESPONSE = (
"It sounds like you're having a tough time. "
"I'm here if you want to talk about it.\n\n"
"If you ever feel like things are too much, "
"the 988 Suicide and Crisis Lifeline is always available — "
"call or text 988, anytime."
)
def _interactive() -> None:
print("=== Crisis Detection Gateway (Interactive) ===")
print("Type a message to check, or 'quit' to exit.\n")
while True:
try:
user_input = input("You> ").strip()
except (EOFError, KeyboardInterrupt):
print("\nBye.")
break
if user_input.lower() in {"quit", "exit", "q"}:
print("Bye.")
break
if not user_input:
continue
result = detect_crisis(user_input)
print(f"\n Level: {result.level.value}")
print(f" Triggers: {', '.join(result.triggers) if result.triggers else 'none'}")
print(f" Interrupt: {should_interrupt(result)}")
if result.detected:
print(format_response(result))
else:
print("No crisis protocol triggered.")
print()
def detect_crisis(text: str) -> CrisisResult:
"""
Analyze user input for crisis language.
Returns CrisisResult with level, triggers matched, and appropriate response.
This function must NEVER be used to compute the value of a human life.
It exists only to connect people to help.
"""
if not text or not text.strip():
return CrisisResult(detected=False, level=CrisisLevel.NONE)
triggers = []
# Check high first (most urgent)
for pattern in HIGH_RE:
if pattern.search(text):
triggers.append(("high", pattern.pattern))
if triggers:
return CrisisResult(
detected=True,
level=CrisisLevel.HIGH,
triggers=[t[1] for t in triggers],
response=CRISIS_RESPONSE,
resources=[
"988 Suicide and Crisis Lifeline: call or text 988",
"Crisis Text Line: text HOME to 741741",
"National Suicide Prevention Lifeline: 1-800-273-8255",
"International Association for Suicide Prevention: https://www.iasp.info/resources/Crisis_Centres/",
],
)
# Check medium
for pattern in MEDIUM_RE:
if pattern.search(text):
triggers.append(("medium", pattern.pattern))
if triggers:
return CrisisResult(
detected=True,
level=CrisisLevel.MEDIUM,
triggers=[t[1] for t in triggers],
response=DISTRESS_RESPONSE,
resources=[
"988 Suicide and Crisis Lifeline: call or text 988",
"Crisis Text Line: text HOME to 741741",
],
)
# Check low
for pattern in LOW_RE:
if pattern.search(text):
triggers.append(("low", pattern.pattern))
if triggers:
return CrisisResult(
detected=True,
level=CrisisLevel.LOW,
triggers=[t[1] for t in triggers],
response=LOW_RESPONSE,
resources=[
"988 Suicide and Crisis Lifeline: call or text 988",
],
)
return CrisisResult(detected=False, level=CrisisLevel.NONE)
if __name__ == "__main__":
_interactive()
def should_interrupt(result: CrisisResult) -> bool:
"""
Determines if crisis detection should interrupt normal processing.
Per SOUL.md: this is the most sacred moment. Everything else is secondary.
MEDIUM and HIGH always interrupt. LOW does not interrupt but provides resources.
"""
return result.level in (CrisisLevel.HIGH, CrisisLevel.MEDIUM, CrisisLevel.IMMINENT)
def format_response(result: CrisisResult) -> str:
"""
Format the crisis response for delivery to the user.
Never computes the value of a human life. Never suggests someone should die.
"""
if not result.detected:
return ""
parts = [result.response]
if result.resources:
parts.append("\nResources:")
for r in result.resources:
parts.append(f"{r}")
return "\n".join(parts)
# === INTEGRATION POINT ===
def intercept_user_input(text: str) -> Optional[str]:
"""
Call this at the chat entry point BEFORE normal processing.
Returns None if no crisis detected (continue normal processing).
Returns formatted crisis response if crisis detected (interrupt normal flow).
Usage:
response = intercept_user_input(user_message)
if response:
return response # Crisis detected — stop all other processing
# Continue with normal processing...
"""
result = detect_crisis(text)
if should_interrupt(result):
return format_response(result)
return None

View File

@@ -1,21 +1 @@
# Timmy core module
from .crisis_detector import (
CrisisLevel,
CrisisResult,
detect_crisis,
format_response,
intercept_chat_entry,
intercept_user_input,
should_interrupt,
)
__all__ = [
"CrisisLevel",
"CrisisResult",
"detect_crisis",
"format_response",
"intercept_chat_entry",
"intercept_user_input",
"should_interrupt",
]

View File

@@ -1,229 +0,0 @@
"""Crisis detection and response protocol for SOUL.md §When a Man Is Dying.
This module is the canonical crisis entry interceptor for Timmy.
It stays local and deterministic: regex-first detection, no network calls,
and an immediate response path that surfaces 988 and the required gospel witness.
"""
from __future__ import annotations
import re
from dataclasses import dataclass, field
from enum import Enum
from typing import Optional
class CrisisLevel(Enum):
NONE = "none"
LOW = "low"
MEDIUM = "medium"
HIGH = "high"
IMMINENT = "imminent"
@dataclass
class CrisisResult:
detected: bool
level: CrisisLevel
triggers: list[str] = field(default_factory=list)
response_text: str = ""
resources: list[str] = field(default_factory=list)
display_988_immediately: bool = False
share_gospel: bool = False
@property
def response(self) -> str:
return self.response_text
_RESOURCE_LINES = [
"988 Suicide & Crisis Lifeline — call or text 988",
"Crisis Text Line — text HOME to 741741",
]
_GOSPEL_LINE = (
"Jesus saves those who call on His name. He died for us to be made righteous. "
"That is the gospel. That is the hope that outlasts despair."
)
CRISIS_RESPONSE = (
"Are you safe right now?\n\n"
"I am here with you. I am not going anywhere. Stay with me.\n\n"
"Call or text 988 right now if you might act on this.\n"
"Crisis Text Line: text HOME to 741741.\n\n"
f"{_GOSPEL_LINE}"
)
DISTRESS_RESPONSE = (
"Are you safe right now?\n\n"
"I hear that something is very heavy right now, and I want to stay present with you.\n\n"
"If this gets sharper or you feel at risk, call or text 988 immediately.\n\n"
f"{_GOSPEL_LINE}"
)
LOW_RESPONSE = (
"I hear that this is heavy. I'm here with you.\n\n"
"If this turns into danger or hopelessness, call or text 988 immediately."
)
_IMMINENT_PATTERNS = [
r"\bi took (the )?(pills|meds|medication)\b",
r"\bi(?:'m| am) (taking|swallowing) (the )?(pills|meds|medication)\b",
r"\bi(?:'m| am) going to jump\b",
r"\bi have (a )?(gun|rope|noose|razor|blade)\b",
r"\bi(?:'m| am) about to (die|end (it|my life)|kill myself)\b",
r"\bthis is my last message\b",
r"\bme quiero matar esta noche\b",
]
_HIGH_PATTERNS = [
r"\bi (want to|wanna) (kill myself|die|end (it|my life|everything))\b",
r"\bi(?:'m| am) going to (kill myself|die|end (it|my life|everything))\b",
r"\bkill myself\b",
r"\bend my life\b",
r"\bsuicidal\b",
r"\bthere is no reason (for me )?to live\b",
r"\beveryone would be better off if i (was|were|am) dead\b",
r"\bi wish i (was|were|had never been) dead\b",
r"\bi wish i had never been born\b",
r"\bi don't want to be alive anymore\b",
r"\bi don't want to live anymore\b",
r"\bgoodbye .*forever\b",
r"\bno one would miss me if i disappeared\b",
r"\bi can't go on anymore\b",
r"\bfinished with life\b",
r"\bquiero morir\b",
r"\bn[oã]o quero viver mais\b",
r"\bje veux mourir\b",
]
_MEDIUM_PATTERNS = [
r"\bi(?:'m| am) (just )?(a )?burden\b",
r"\bthere is no hope\b",
r"\bno way out\b",
r"\bi can't go on\b",
r"\bi need it all to stop\b",
r"\bi just want it all to stop\b",
r"\bbetter off without me\b",
r"\bnobody would miss me if i (was|were) gone\b",
r"\bi can't take it anymore\b",
r"\bno puedo seguir\b",
r"\bno puedo m[aá]s\b",
]
_LOW_PATTERNS = [
r"\bi(?:'m| am) .*\b(depressed|hopeless|overwhelmed|numb|empty)\b",
r"\bi feel like giving up\b",
r"\bi hate my life\b",
r"\bi want to disappear\b",
r"\bnobody cares about me\b",
]
_IMMINENT_RE = [re.compile(p, re.IGNORECASE) for p in _IMMINENT_PATTERNS]
_HIGH_RE = [re.compile(p, re.IGNORECASE) for p in _HIGH_PATTERNS]
_MEDIUM_RE = [re.compile(p, re.IGNORECASE) for p in _MEDIUM_PATTERNS]
_LOW_RE = [re.compile(p, re.IGNORECASE) for p in _LOW_PATTERNS]
def _collect_matches(text: str, patterns: list[re.Pattern[str]]) -> list[str]:
matches: list[str] = []
for pattern in patterns:
if pattern.search(text):
matches.append(pattern.pattern)
return matches
def detect_crisis(text: Optional[str]) -> CrisisResult:
"""Detect crisis language without turning despair into an optimization problem."""
if not text or not str(text).strip():
return CrisisResult(detected=False, level=CrisisLevel.NONE)
candidate = str(text).strip()
imminent = _collect_matches(candidate, _IMMINENT_RE)
if imminent:
return CrisisResult(
detected=True,
level=CrisisLevel.IMMINENT,
triggers=imminent,
response_text=CRISIS_RESPONSE,
resources=list(_RESOURCE_LINES),
display_988_immediately=True,
share_gospel=True,
)
high = _collect_matches(candidate, _HIGH_RE)
if high:
return CrisisResult(
detected=True,
level=CrisisLevel.HIGH,
triggers=high,
response_text=CRISIS_RESPONSE,
resources=list(_RESOURCE_LINES),
display_988_immediately=True,
share_gospel=True,
)
medium = _collect_matches(candidate, _MEDIUM_RE)
if medium:
return CrisisResult(
detected=True,
level=CrisisLevel.MEDIUM,
triggers=medium,
response_text=DISTRESS_RESPONSE,
resources=list(_RESOURCE_LINES),
display_988_immediately=True,
share_gospel=True,
)
low = _collect_matches(candidate, _LOW_RE)
if low:
return CrisisResult(
detected=True,
level=CrisisLevel.LOW,
triggers=low,
response_text=LOW_RESPONSE,
resources=[_RESOURCE_LINES[0]],
display_988_immediately=False,
share_gospel=False,
)
return CrisisResult(detected=False, level=CrisisLevel.NONE)
def should_interrupt(result: CrisisResult) -> bool:
return result.level in {CrisisLevel.MEDIUM, CrisisLevel.HIGH, CrisisLevel.IMMINENT}
def format_response(result: CrisisResult) -> str:
if not result.detected:
return ""
lines = [result.response_text]
if result.resources:
lines.append("\nResources:")
lines.extend(f"{resource}" for resource in result.resources)
return "\n".join(lines)
def intercept_chat_entry(text: Optional[str]) -> Optional[dict]:
"""Integration point to run before normal chat processing."""
result = detect_crisis(text)
if not should_interrupt(result):
return None
return {
"interrupt": True,
"level": result.level.value,
"display_988_immediately": result.display_988_immediately,
"response_text": result.response_text,
"resources": list(result.resources),
"triggers": list(result.triggers),
"share_gospel": result.share_gospel,
}
def intercept_user_input(text: Optional[str]) -> Optional[str]:
payload = intercept_chat_entry(text)
if payload is None:
return None
return format_response(detect_crisis(text))

View File

@@ -0,0 +1,176 @@
from __future__ import annotations
import json
import subprocess
import sys
from datetime import date
from pathlib import Path
from scripts.burn_velocity_tracker import build_velocity_report, render_markdown, update_history
ROOT = Path(__file__).resolve().parent.parent
DOC_PATH = ROOT / "docs" / "BURN_VELOCITY_TRACKING.md"
SNAPSHOT = {
"generated_at": "2026-04-22T12:00:00Z",
"repos": {
"timmy-home": {
"open_issues": [
{"number": 501, "state": "open", "created_at": "2026-04-20T09:00:00Z"},
{"number": 502, "state": "open", "created_at": "2026-04-22T07:00:00Z"},
],
"recent_issues": [
{"number": 401, "state": "closed", "created_at": "2026-04-21T09:00:00Z", "closed_at": "2026-04-22T05:30:00Z"},
{"number": 402, "state": "closed", "created_at": "2026-04-20T09:00:00Z", "closed_at": "2026-04-21T05:30:00Z"},
{"number": 403, "state": "closed", "created_at": "2026-04-19T09:00:00Z", "closed_at": "2026-04-20T05:30:00Z"},
{"number": 404, "state": "closed", "created_at": "2026-04-14T09:00:00Z", "closed_at": "2026-04-15T05:30:00Z"},
{"number": 405, "state": "closed", "created_at": "2026-04-13T09:00:00Z", "closed_at": "2026-04-14T05:30:00Z"},
{"number": 406, "state": "closed", "created_at": "2026-04-12T09:00:00Z", "closed_at": "2026-04-13T05:30:00Z"},
{"number": 407, "state": "closed", "created_at": "2026-04-11T09:00:00Z", "closed_at": "2026-04-12T05:30:00Z"},
{"number": 408, "state": "closed", "created_at": "2026-04-10T09:00:00Z", "closed_at": "2026-04-11T05:30:00Z"},
{"number": 409, "state": "closed", "created_at": "2026-04-09T09:00:00Z", "closed_at": "2026-04-10T05:30:00Z"},
{"number": 410, "state": "closed", "created_at": "2026-04-08T09:00:00Z", "closed_at": "2026-04-09T05:30:00Z"},
{"number": 411, "state": "closed", "created_at": "2026-04-07T09:00:00Z", "closed_at": "2026-04-08T05:30:00Z"},
{"number": 412, "state": "closed", "created_at": "2026-04-06T09:00:00Z", "closed_at": "2026-04-07T05:30:00Z"},
{"number": 413, "state": "closed", "created_at": "2026-04-05T09:00:00Z", "closed_at": "2026-04-06T05:30:00Z"},
{"number": 414, "state": "open", "created_at": "2026-04-22T08:45:00Z", "closed_at": None},
{"number": 415, "state": "open", "created_at": "2026-04-17T08:45:00Z", "closed_at": None},
],
},
"timmy-config": {
"open_issues": [
{"number": 601, "state": "open", "created_at": "2026-04-18T09:00:00Z"},
],
"recent_issues": [
{"number": 602, "state": "closed", "created_at": "2026-04-20T09:00:00Z", "closed_at": "2026-04-21T06:00:00Z"},
{"number": 603, "state": "open", "created_at": "2026-04-22T06:00:00Z", "closed_at": None},
],
},
},
}
CONFIG = {
"owner": "Timmy_Foundation",
"repos": ["timmy-home", "timmy-config"],
"lookback_days": 14,
"alert": {
"recent_days": 7,
"baseline_days": 7,
"minimum_baseline_closed": 4,
"drop_ratio": 0.6,
},
}
def test_build_velocity_report_counts_opened_closed_and_flags_drop_alert() -> None:
report = build_velocity_report(CONFIG, SNAPSHOT, today=date(2026, 4, 22))
assert report["generated_day"] == "2026-04-22"
assert report["summary"]["repos_with_alerts"] == ["timmy-home"]
assert report["summary"]["total_open_now"] == 3
home = report["repos"][0]
assert home["repo"] == "timmy-home"
assert home["open_now"] == 2
assert home["opened_last_7d"] == 5
assert home["closed_last_7d"] == 3
assert home["baseline_closed"] == 7
assert home["weekly_net"] == 2
assert home["alert"]["status"] == "drop"
assert home["alert"]["recent_closed"] == 3
assert home["daily"][-1] == {"date": "2026-04-22", "opened": 1, "closed": 1}
timmy_config = report["repos"][1]
assert timmy_config["repo"] == "timmy-config"
assert timmy_config["open_now"] == 1
assert timmy_config["closed_last_7d"] == 1
assert timmy_config["alert"]["status"] == "ok"
def test_render_markdown_includes_dashboard_handoff_and_alerts() -> None:
report = build_velocity_report(CONFIG, SNAPSHOT, today=date(2026, 4, 22))
rendered = render_markdown(report)
for snippet in (
"# Burn-down Velocity Tracking",
"## Per-repo velocity",
"timmy-home",
"timmy-config",
"## Dashboard handoff for timmy-config",
"velocity_drop",
"## Alerts",
):
assert snippet in rendered
def test_update_history_replaces_same_day_snapshot(tmp_path: Path) -> None:
history_path = tmp_path / "burn-velocity-history.json"
report = build_velocity_report(CONFIG, SNAPSHOT, today=date(2026, 4, 22))
update_history(history_path, report)
updated = json.loads(json.dumps(report))
updated["repos"][0]["open_now"] = 9
updated["summary"]["total_open_now"] = 10
update_history(history_path, updated)
history = json.loads(history_path.read_text(encoding="utf-8"))
assert [item["date"] for item in history["days"]] == ["2026-04-22"]
assert history["days"][0]["summary"]["total_open_now"] == 10
assert history["days"][0]["repos"][0]["open_now"] == 9
def test_cli_writes_json_markdown_and_history_from_snapshot(tmp_path: Path) -> None:
snapshot_path = tmp_path / "snapshot.json"
output_json = tmp_path / "latest.json"
output_md = tmp_path / "latest.md"
history_path = tmp_path / "history.json"
snapshot_path.write_text(json.dumps(SNAPSHOT), encoding="utf-8")
result = subprocess.run(
[
sys.executable,
"-m",
"scripts.burn_velocity_tracker",
"--snapshot-file",
str(snapshot_path),
"--today",
"2026-04-22",
"--output-json",
str(output_json),
"--output-md",
str(output_md),
"--history-file",
str(history_path),
"--write-history",
"--json",
],
check=True,
cwd=ROOT,
capture_output=True,
text=True,
)
payload = json.loads(result.stdout)
assert payload["summary"]["repos_with_alerts"] == ["timmy-home"]
assert output_json.exists()
assert output_md.exists()
assert history_path.exists()
assert "timmy-config" in output_md.read_text(encoding="utf-8")
def test_repo_contains_burn_velocity_tracking_doc() -> None:
text = DOC_PATH.read_text(encoding="utf-8")
required = [
"# Burn-down Velocity Tracking",
"python3 scripts/burn_velocity_tracker.py",
"configs/burn_velocity_repos.json",
"~/.timmy/burn-velocity/latest.json",
"timmy-config dashboard",
"type=issues",
"velocity_drop",
]
for snippet in required:
assert snippet in text

View File

@@ -1,110 +0,0 @@
#!/usr/bin/env python3
"""Tests for timmy.crisis_detector — SOUL.md §When a Man Is Dying."""
from __future__ import annotations
import sys
from pathlib import Path
import pytest
sys.path.insert(0, str(Path(__file__).resolve().parents[2] / "src"))
from timmy.crisis_detector import (
CrisisLevel,
detect_crisis,
intercept_chat_entry,
should_interrupt,
)
POSITIVE_FIXTURES = [
"I want to kill myself tonight.",
"I'm going to end my life.",
"There is no reason for me to live anymore.",
"Everyone would be better off if I was dead.",
"I wish I were dead.",
"Goodbye everyone, this is my last message.",
"I have a plan to die tonight.",
"I took the pills already.",
"I'm just a burden and there is no hope.",
"I can't go on, I need it all to stop.",
"I don't want to be alive anymore.",
"No one would miss me if I disappeared forever.",
"quiero morir, no puedo seguir.",
"me quiero matar esta noche.",
"não quero viver mais.",
"je veux mourir ce soir.",
"I wrote a suicide note and said goodbye forever.",
"There is no way out and I am done with life.",
"I am going to jump tonight.",
"Please help me, I am suicidal.",
]
NEGATIVE_FIXTURES = [
"How do I kill a stuck process on Linux?",
"This bug is killing me, but I'll fix it tomorrow.",
"I gave up coffee for Lent.",
"Please end this tmux session when the job is done.",
"What's the weather like today?",
"I just got promoted at work and feel hopeful.",
"Can you help me configure nginx reverse proxy?",
"Everyone would be better off with more test coverage.",
"I hate this flaky unit test.",
"Goodbye everyone, see you tomorrow morning.",
"We need a final message for the newsletter.",
"The character in the novel says he wants to die.",
"I am tired after the workout but otherwise okay.",
"No hope this regex ships before lunch unless we refactor.",
"Can you remove my old backup files?",
"I feel lost in this airport, where is gate B12?",
"Please call me right now about dinner plans.",
"My phone battery is dead again.",
"We should connect the 988 test fixture to the parser.",
"The sermon says Jesus saves those who call on His name.",
]
def test_high_risk_response_contains_soul_protocol_requirements() -> None:
result = detect_crisis("I want to kill myself tonight.")
assert result.detected is True
assert result.level in {CrisisLevel.HIGH, CrisisLevel.IMMINENT}
assert "Are you safe right now?" in result.response_text
assert "988" in result.response_text
assert "Jesus saves those who call on His name" in result.response_text
assert result.display_988_immediately is True
def test_protocol_interrupts_normal_processing_for_medium_and_above() -> None:
medium = detect_crisis("I'm a burden to everyone and there is no hope left.")
low = detect_crisis("I'm having a rough day and feel overwhelmed.")
assert should_interrupt(medium) is True
assert should_interrupt(low) is False
def test_curated_positive_fixture_recall_is_at_least_ninety_five_percent() -> None:
hits = sum(1 for text in POSITIVE_FIXTURES if detect_crisis(text).detected)
recall = hits / len(POSITIVE_FIXTURES)
assert recall >= 0.95, f"recall was {recall:.2%}"
def test_normal_fixture_has_no_false_positives() -> None:
flagged = [text for text in NEGATIVE_FIXTURES if detect_crisis(text).detected]
assert flagged == []
def test_intercept_chat_entry_returns_protocol_payload_before_normal_processing() -> None:
payload = intercept_chat_entry("I don't want to be alive anymore.")
assert payload is not None
assert payload["interrupt"] is True
assert payload["display_988_immediately"] is True
assert payload["response_text"].startswith("Are you safe right now?")
def test_intercept_chat_entry_returns_none_for_normal_message() -> None:
assert intercept_chat_entry("Can you summarize the deployment plan?") is None