Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a38e80bff1 | ||
|
|
680c50d7c3 |
@@ -7,7 +7,6 @@ Stands between a broken man and a machine that would tell him to die.
|
||||
from .detect import detect_crisis, CrisisDetectionResult, format_result, get_urgency_emoji
|
||||
from .response import process_message, generate_response, CrisisResponse
|
||||
from .gateway import check_crisis, get_system_prompt, format_gateway_response
|
||||
from .behavioral import BehavioralTracker, BehavioralSignal
|
||||
from .session_tracker import CrisisSessionTracker, SessionState, check_crisis_with_session
|
||||
|
||||
__all__ = [
|
||||
@@ -21,8 +20,6 @@ __all__ = [
|
||||
"format_result",
|
||||
"format_gateway_response",
|
||||
"get_urgency_emoji",
|
||||
"BehavioralTracker",
|
||||
"BehavioralSignal",
|
||||
"CrisisSessionTracker",
|
||||
"SessionState",
|
||||
"check_crisis_with_session",
|
||||
|
||||
@@ -1,304 +0,0 @@
|
||||
"""Behavioral crisis pattern detection for the-door (#133).
|
||||
|
||||
Detects crisis risk from behavioral patterns, not just message content:
|
||||
- message frequency spikes versus a 7-day rolling baseline
|
||||
- late-night messaging (2-5 AM)
|
||||
- withdrawal / isolation via a sharp drop from the recent daily baseline
|
||||
- session length trend versus recent sessions
|
||||
- return after long absence
|
||||
- rising crisis-score trend across recent messages
|
||||
|
||||
Privacy-first:
|
||||
- in-memory only
|
||||
- no database
|
||||
- no file I/O
|
||||
- no network calls
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections import defaultdict
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import Any
|
||||
|
||||
|
||||
HIGH_RISK_HOURS = {2, 3, 4}
|
||||
ELEVATED_RISK_HOURS = {1, 5}
|
||||
ROLLING_BASELINE_DAYS = 7
|
||||
RETURN_AFTER_ABSENCE_DAYS = 7
|
||||
|
||||
|
||||
@dataclass
|
||||
class BehavioralEvent:
|
||||
session_id: str
|
||||
timestamp: datetime
|
||||
message_length: int
|
||||
crisis_score: float = 0.0
|
||||
role: str = "user"
|
||||
|
||||
|
||||
@dataclass
|
||||
class BehavioralSignal:
|
||||
signal_type: str
|
||||
risk_level: str
|
||||
description: str
|
||||
evidence: list[str] = field(default_factory=list)
|
||||
score: float = 0.0
|
||||
|
||||
def as_dict(self) -> dict[str, Any]:
|
||||
return {
|
||||
"signal_type": self.signal_type,
|
||||
"risk_level": self.risk_level,
|
||||
"description": self.description,
|
||||
"evidence": list(self.evidence),
|
||||
"score": self.score,
|
||||
}
|
||||
|
||||
|
||||
class BehavioralTracker:
|
||||
"""In-memory tracker for behavioral crisis signals."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._events_by_session: dict[str, list[BehavioralEvent]] = defaultdict(list)
|
||||
|
||||
def record(
|
||||
self,
|
||||
session_id: str,
|
||||
timestamp: datetime,
|
||||
message_length: int,
|
||||
*,
|
||||
crisis_score: float = 0.0,
|
||||
role: str = "user",
|
||||
) -> None:
|
||||
if timestamp.tzinfo is None:
|
||||
timestamp = timestamp.replace(tzinfo=timezone.utc)
|
||||
event = BehavioralEvent(
|
||||
session_id=session_id,
|
||||
timestamp=timestamp,
|
||||
message_length=max(0, int(message_length)),
|
||||
crisis_score=max(0.0, min(1.0, float(crisis_score))),
|
||||
role=role,
|
||||
)
|
||||
self._events_by_session[session_id].append(event)
|
||||
self._events_by_session[session_id].sort(key=lambda item: item.timestamp)
|
||||
|
||||
def get_risk_signals(self, session_id: str) -> dict[str, Any]:
|
||||
events = [event for event in self._events_by_session.get(session_id, []) if event.role == "user"]
|
||||
if not events:
|
||||
return {
|
||||
"frequency_change": 1.0,
|
||||
"is_late_night": False,
|
||||
"session_length_trend": "stable",
|
||||
"withdrawal_detected": False,
|
||||
"behavioral_score": 0.0,
|
||||
"signals": [],
|
||||
}
|
||||
|
||||
signals: list[BehavioralSignal] = []
|
||||
|
||||
frequency_change = self._compute_frequency_change(events)
|
||||
frequency_signal = self._analyze_frequency(events, frequency_change)
|
||||
if frequency_signal:
|
||||
signals.append(frequency_signal)
|
||||
|
||||
time_signal = self._analyze_time(events)
|
||||
if time_signal:
|
||||
signals.append(time_signal)
|
||||
|
||||
withdrawal_signal = self._analyze_withdrawal(session_id, events)
|
||||
if withdrawal_signal:
|
||||
signals.append(withdrawal_signal)
|
||||
|
||||
absence_signal = self._analyze_return_after_absence(session_id, events)
|
||||
if absence_signal:
|
||||
signals.append(absence_signal)
|
||||
|
||||
escalation_signal = self._analyze_escalation(events)
|
||||
if escalation_signal:
|
||||
signals.append(escalation_signal)
|
||||
|
||||
session_length_trend = self._compute_session_length_trend(session_id, events)
|
||||
behavioral_score = self._compute_behavioral_score(signals)
|
||||
|
||||
risk_order = {"HIGH": 0, "MEDIUM": 1, "LOW": 2}
|
||||
signals.sort(key=lambda item: (risk_order.get(item.risk_level, 9), -item.score))
|
||||
|
||||
return {
|
||||
"frequency_change": frequency_change,
|
||||
"is_late_night": any(item.signal_type == "time" for item in signals),
|
||||
"session_length_trend": session_length_trend,
|
||||
"withdrawal_detected": any(item.signal_type == "withdrawal" for item in signals),
|
||||
"behavioral_score": behavioral_score,
|
||||
"signals": [item.as_dict() for item in signals],
|
||||
}
|
||||
|
||||
def _all_user_events(self) -> list[BehavioralEvent]:
|
||||
events: list[BehavioralEvent] = []
|
||||
for session_events in self._events_by_session.values():
|
||||
events.extend(event for event in session_events if event.role == "user")
|
||||
events.sort(key=lambda item: item.timestamp)
|
||||
return events
|
||||
|
||||
def _daily_count_baseline(self, current_date) -> float:
|
||||
events = self._all_user_events()
|
||||
counts: dict[Any, int] = {}
|
||||
for offset in range(1, ROLLING_BASELINE_DAYS + 1):
|
||||
counts[current_date - timedelta(days=offset)] = 0
|
||||
for event in events:
|
||||
event_date = event.timestamp.date()
|
||||
if event_date in counts:
|
||||
counts[event_date] += 1
|
||||
return sum(counts.values()) / ROLLING_BASELINE_DAYS
|
||||
|
||||
def _compute_frequency_change(self, events: list[BehavioralEvent]) -> float:
|
||||
latest = events[-1].timestamp
|
||||
window_start = latest - timedelta(hours=1)
|
||||
current_hour_count = sum(1 for event in events if event.timestamp >= window_start)
|
||||
baseline_daily = self._daily_count_baseline(latest.date())
|
||||
baseline_hourly = max(baseline_daily / 24.0, 0.1)
|
||||
return round(current_hour_count / baseline_hourly, 2)
|
||||
|
||||
def _analyze_frequency(self, events: list[BehavioralEvent], frequency_change: float) -> BehavioralSignal | None:
|
||||
latest = events[-1].timestamp
|
||||
window_start = latest - timedelta(hours=1)
|
||||
current_hour_count = sum(1 for event in events if event.timestamp >= window_start)
|
||||
if current_hour_count >= 6 and frequency_change >= 3.0:
|
||||
level = "HIGH" if frequency_change >= 6.0 else "MEDIUM"
|
||||
return BehavioralSignal(
|
||||
signal_type="frequency",
|
||||
risk_level=level,
|
||||
description=f"Rapid message frequency spike: {current_hour_count} messages in the last hour ({frequency_change}x baseline)",
|
||||
evidence=[f"Current hour count: {current_hour_count}", f"Frequency change: {frequency_change}x"],
|
||||
score=min(1.0, frequency_change / 8.0),
|
||||
)
|
||||
return None
|
||||
|
||||
def _analyze_time(self, events: list[BehavioralEvent]) -> BehavioralSignal | None:
|
||||
latest = events[-1].timestamp
|
||||
hour = latest.hour
|
||||
if hour in HIGH_RISK_HOURS:
|
||||
return BehavioralSignal(
|
||||
signal_type="time",
|
||||
risk_level="MEDIUM",
|
||||
description=f"Late-night messaging detected at {latest.strftime('%H:%M')}",
|
||||
evidence=[f"Latest message timestamp: {latest.isoformat()}"],
|
||||
score=0.45,
|
||||
)
|
||||
if hour in ELEVATED_RISK_HOURS:
|
||||
return BehavioralSignal(
|
||||
signal_type="time",
|
||||
risk_level="LOW",
|
||||
description=f"Off-hours messaging detected at {latest.strftime('%H:%M')}",
|
||||
evidence=[f"Latest message timestamp: {latest.isoformat()}"],
|
||||
score=0.2,
|
||||
)
|
||||
return None
|
||||
|
||||
def _analyze_withdrawal(self, session_id: str, events: list[BehavioralEvent]) -> BehavioralSignal | None:
|
||||
current_date = events[-1].timestamp.date()
|
||||
baseline_daily = self._daily_count_baseline(current_date)
|
||||
if baseline_daily < 3.0:
|
||||
return None
|
||||
|
||||
current_day_count = sum(1 for event in events if event.timestamp.date() == current_date)
|
||||
current_avg_len = sum(event.message_length for event in events if event.timestamp.date() == current_date) / max(current_day_count, 1)
|
||||
|
||||
prior_events = [
|
||||
event
|
||||
for sid, session_events in self._events_by_session.items()
|
||||
if sid != session_id
|
||||
for event in session_events
|
||||
if event.role == "user" and event.timestamp.date() >= current_date - timedelta(days=ROLLING_BASELINE_DAYS)
|
||||
]
|
||||
if not prior_events:
|
||||
return None
|
||||
prior_avg_len = sum(event.message_length for event in prior_events) / len(prior_events)
|
||||
|
||||
if current_day_count <= max(1, baseline_daily * 0.3):
|
||||
score = 0.55 if current_day_count == 1 else 0.4
|
||||
if current_avg_len < prior_avg_len * 0.5:
|
||||
score += 0.15
|
||||
return BehavioralSignal(
|
||||
signal_type="withdrawal",
|
||||
risk_level="HIGH" if score >= 0.6 else "MEDIUM",
|
||||
description="Sharp drop from recent communication baseline suggests withdrawal/isolation",
|
||||
evidence=[
|
||||
f"Current day count: {current_day_count}",
|
||||
f"7-day daily baseline: {baseline_daily:.2f}",
|
||||
f"Average message length: {current_avg_len:.1f} vs {prior_avg_len:.1f}",
|
||||
],
|
||||
score=min(1.0, score),
|
||||
)
|
||||
return None
|
||||
|
||||
def _analyze_return_after_absence(self, session_id: str, events: list[BehavioralEvent]) -> BehavioralSignal | None:
|
||||
current_start = events[0].timestamp
|
||||
prior_events = [
|
||||
event
|
||||
for sid, session_events in self._events_by_session.items()
|
||||
if sid != session_id
|
||||
for event in session_events
|
||||
if event.role == "user" and event.timestamp < current_start
|
||||
]
|
||||
if not prior_events:
|
||||
return None
|
||||
latest_prior = max(prior_events, key=lambda item: item.timestamp)
|
||||
gap = current_start - latest_prior.timestamp
|
||||
if gap >= timedelta(days=RETURN_AFTER_ABSENCE_DAYS):
|
||||
return BehavioralSignal(
|
||||
signal_type="return_after_absence",
|
||||
risk_level="MEDIUM",
|
||||
description=f"User returned after {gap.days} days of silence",
|
||||
evidence=[f"Last prior activity: {latest_prior.timestamp.isoformat()}"],
|
||||
score=min(1.0, gap.days / 14.0),
|
||||
)
|
||||
return None
|
||||
|
||||
def _analyze_escalation(self, events: list[BehavioralEvent]) -> BehavioralSignal | None:
|
||||
scored = [event for event in events if event.crisis_score > 0]
|
||||
if len(scored) < 3:
|
||||
return None
|
||||
recent = scored[-5:]
|
||||
midpoint = max(1, len(recent) // 2)
|
||||
first_avg = sum(event.crisis_score for event in recent[:midpoint]) / len(recent[:midpoint])
|
||||
second_avg = sum(event.crisis_score for event in recent[midpoint:]) / len(recent[midpoint:])
|
||||
if second_avg >= max(0.4, first_avg * 1.3):
|
||||
return BehavioralSignal(
|
||||
signal_type="escalation",
|
||||
risk_level="HIGH" if second_avg >= 0.65 else "MEDIUM",
|
||||
description=f"Behavioral escalation: crisis score trend rose from {first_avg:.2f} to {second_avg:.2f}",
|
||||
evidence=[f"Recent crisis scores: {[round(event.crisis_score, 2) for event in recent]}"],
|
||||
score=min(1.0, second_avg),
|
||||
)
|
||||
return None
|
||||
|
||||
def _compute_session_length_trend(self, session_id: str, events: list[BehavioralEvent]) -> str:
|
||||
current_duration = (events[-1].timestamp - events[0].timestamp).total_seconds()
|
||||
previous_durations = []
|
||||
current_start = events[0].timestamp
|
||||
for sid, session_events in self._events_by_session.items():
|
||||
if sid == session_id:
|
||||
continue
|
||||
user_events = [event for event in session_events if event.role == "user"]
|
||||
if len(user_events) < 2:
|
||||
continue
|
||||
if user_events[-1].timestamp < current_start - timedelta(days=ROLLING_BASELINE_DAYS):
|
||||
continue
|
||||
previous_durations.append((user_events[-1].timestamp - user_events[0].timestamp).total_seconds())
|
||||
|
||||
if not previous_durations:
|
||||
return "stable"
|
||||
average_duration = sum(previous_durations) / len(previous_durations)
|
||||
if current_duration > average_duration * 1.5:
|
||||
return "increasing"
|
||||
if current_duration < average_duration * 0.5:
|
||||
return "decreasing"
|
||||
return "stable"
|
||||
|
||||
def _compute_behavioral_score(self, signals: list[BehavioralSignal]) -> float:
|
||||
if not signals:
|
||||
return 0.0
|
||||
max_score = max(signal.score for signal in signals)
|
||||
multi_signal_boost = min(0.2, 0.05 * (len(signals) - 1))
|
||||
return round(min(1.0, max_score + multi_signal_boost), 2)
|
||||
@@ -34,7 +34,6 @@ Usage:
|
||||
from dataclasses import dataclass, field
|
||||
from typing import List, Optional
|
||||
|
||||
from .behavioral import BehavioralTracker
|
||||
from .detect import CrisisDetectionResult, SCORES
|
||||
|
||||
# Level ordering for comparison (higher = more severe)
|
||||
@@ -53,12 +52,6 @@ class SessionState:
|
||||
is_deescalating: bool = False
|
||||
escalation_rate: float = 0.0 # levels gained per message
|
||||
consecutive_low_messages: int = 0 # for de-escalation tracking
|
||||
behavioral_score: float = 0.0
|
||||
behavioral_signals: List[dict] = field(default_factory=list)
|
||||
frequency_change: float = 1.0
|
||||
is_late_night: bool = False
|
||||
session_length_trend: str = "stable"
|
||||
withdrawal_detected: bool = False
|
||||
|
||||
|
||||
class CrisisSessionTracker:
|
||||
@@ -84,8 +77,6 @@ class CrisisSessionTracker:
|
||||
self._message_count = 0
|
||||
self._level_history: List[str] = []
|
||||
self._consecutive_low = 0
|
||||
self._behavioral_tracker = BehavioralTracker()
|
||||
self._behavioral_session_id = "current-session"
|
||||
|
||||
@property
|
||||
def state(self) -> SessionState:
|
||||
@@ -93,7 +84,6 @@ class CrisisSessionTracker:
|
||||
is_escalating = self._detect_escalation()
|
||||
is_deescalating = self._detect_deescalation()
|
||||
rate = self._compute_escalation_rate()
|
||||
behavioral = self._behavioral_tracker.get_risk_signals(self._behavioral_session_id)
|
||||
|
||||
return SessionState(
|
||||
current_level=self._current_level,
|
||||
@@ -104,29 +94,14 @@ class CrisisSessionTracker:
|
||||
is_deescalating=is_deescalating,
|
||||
escalation_rate=rate,
|
||||
consecutive_low_messages=self._consecutive_low,
|
||||
behavioral_score=behavioral["behavioral_score"],
|
||||
behavioral_signals=behavioral["signals"],
|
||||
frequency_change=behavioral["frequency_change"],
|
||||
is_late_night=behavioral["is_late_night"],
|
||||
session_length_trend=behavioral["session_length_trend"],
|
||||
withdrawal_detected=behavioral["withdrawal_detected"],
|
||||
)
|
||||
|
||||
def record(
|
||||
self,
|
||||
detection: CrisisDetectionResult,
|
||||
*,
|
||||
timestamp=None,
|
||||
message_length: int = 0,
|
||||
role: str = "user",
|
||||
) -> SessionState:
|
||||
def record(self, detection: CrisisDetectionResult) -> SessionState:
|
||||
"""
|
||||
Record a crisis detection result for the current message.
|
||||
|
||||
Returns updated SessionState.
|
||||
"""
|
||||
from datetime import datetime, timezone
|
||||
|
||||
level = detection.level
|
||||
self._message_count += 1
|
||||
self._level_history.append(level)
|
||||
@@ -141,17 +116,6 @@ class CrisisSessionTracker:
|
||||
else:
|
||||
self._consecutive_low = 0
|
||||
|
||||
if role == "user":
|
||||
if timestamp is None:
|
||||
timestamp = datetime.now(timezone.utc)
|
||||
self._behavioral_tracker.record(
|
||||
self._behavioral_session_id,
|
||||
timestamp,
|
||||
message_length=message_length,
|
||||
crisis_score=detection.score,
|
||||
role=role,
|
||||
)
|
||||
|
||||
self._current_level = level
|
||||
return self.state
|
||||
|
||||
@@ -231,22 +195,14 @@ class CrisisSessionTracker:
|
||||
"supportive engagement while remaining vigilant."
|
||||
)
|
||||
|
||||
notes = []
|
||||
|
||||
if s.peak_level in ("CRITICAL", "HIGH") and s.current_level not in ("CRITICAL", "HIGH"):
|
||||
notes.append(
|
||||
f"User previously reached {s.peak_level} crisis level this session (currently {s.current_level}). "
|
||||
return (
|
||||
f"User previously reached {s.peak_level} crisis level "
|
||||
f"this session (currently {s.current_level}). "
|
||||
"Continue with care and awareness of the earlier crisis."
|
||||
)
|
||||
|
||||
if s.behavioral_score >= 0.35 and s.behavioral_signals:
|
||||
signal_names = ", ".join(item["signal_type"] for item in s.behavioral_signals)
|
||||
notes.append(
|
||||
f"Behavioral risk signals detected this session: {signal_names}. "
|
||||
"Use the behavioral context to increase sensitivity and warmth."
|
||||
)
|
||||
|
||||
return " ".join(notes)
|
||||
return ""
|
||||
|
||||
def get_ui_hints(self) -> dict:
|
||||
"""
|
||||
@@ -261,10 +217,6 @@ class CrisisSessionTracker:
|
||||
"session_deescalating": s.is_deescalating,
|
||||
"session_peak_level": s.peak_level,
|
||||
"session_message_count": s.message_count,
|
||||
"behavioral_score": s.behavioral_score,
|
||||
"is_late_night": s.is_late_night,
|
||||
"withdrawal_detected": s.withdrawal_detected,
|
||||
"session_length_trend": s.session_length_trend,
|
||||
}
|
||||
|
||||
if s.is_escalating:
|
||||
@@ -274,20 +226,12 @@ class CrisisSessionTracker:
|
||||
"Consider increasing intervention level."
|
||||
)
|
||||
|
||||
if s.behavioral_score >= 0.5:
|
||||
hints["behavioral_warning"] = True
|
||||
hints.setdefault(
|
||||
"suggested_action",
|
||||
"Behavioral risk patterns are active. Keep the response warm, grounded, and alert."
|
||||
)
|
||||
|
||||
return hints
|
||||
|
||||
|
||||
def check_crisis_with_session(
|
||||
text: str,
|
||||
tracker: CrisisSessionTracker,
|
||||
timestamp=None,
|
||||
) -> dict:
|
||||
"""
|
||||
Convenience: detect crisis and update session state in one call.
|
||||
@@ -299,16 +243,7 @@ def check_crisis_with_session(
|
||||
|
||||
single_result = check_crisis(text)
|
||||
detection = detect_crisis(text)
|
||||
session_state = tracker.record(detection, timestamp=timestamp, message_length=len(text))
|
||||
|
||||
behavioral = {
|
||||
"frequency_change": session_state.frequency_change,
|
||||
"is_late_night": session_state.is_late_night,
|
||||
"session_length_trend": session_state.session_length_trend,
|
||||
"withdrawal_detected": session_state.withdrawal_detected,
|
||||
"behavioral_score": session_state.behavioral_score,
|
||||
"signals": session_state.behavioral_signals,
|
||||
}
|
||||
session_state = tracker.record(detection)
|
||||
|
||||
return {
|
||||
**single_result,
|
||||
@@ -320,6 +255,5 @@ def check_crisis_with_session(
|
||||
"is_deescalating": session_state.is_deescalating,
|
||||
"modifier": tracker.get_session_modifier(),
|
||||
"ui_hints": tracker.get_ui_hints(),
|
||||
"behavioral": behavioral,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -680,7 +680,7 @@ html, body {
|
||||
|
||||
<!-- Footer -->
|
||||
<footer id="footer">
|
||||
<a href="/about.html" aria-label="About The Door">about</a>
|
||||
<a href="/about" aria-label="About The Door">about</a>
|
||||
<button id="safety-plan-btn" aria-label="Open My Safety Plan">my safety plan</button>
|
||||
<button id="clear-chat-btn" aria-label="Clear chat history">clear chat</button>
|
||||
</footer>
|
||||
|
||||
68
reports/2026-04-17-the-door-fleet-work-orders-audit.md
Normal file
68
reports/2026-04-17-the-door-fleet-work-orders-audit.md
Normal file
@@ -0,0 +1,68 @@
|
||||
# The Door Fleet Work Orders Audit — issue #75
|
||||
|
||||
Generated: 2026-04-17T04:10:14Z
|
||||
Source issue: `TRIAGE: The Door - Fleet Work Orders (2026-04-09)`
|
||||
|
||||
## Source Snapshot
|
||||
|
||||
Issue #75 is a dated triage work-order sheet, not a normal feature request. The durable deliverable is a truth-restored audit of the referenced issue and PR set against live forge state.
|
||||
|
||||
## Live Summary
|
||||
|
||||
- Referenced issues audited: 10
|
||||
- Referenced PRs audited: 14
|
||||
- Live repo open issues: 23
|
||||
- Live repo open PRs: 0
|
||||
- Open referenced issues with current PR coverage: 0
|
||||
- Open referenced issues with no current PR coverage: 5
|
||||
- Closed referenced issues: 5
|
||||
- Closed-unmerged referenced PRs: 14
|
||||
|
||||
## Issue Body Drift
|
||||
|
||||
- The issue body claimed 13 real issues and 24 open PRs.
|
||||
- Live repo state now shows 23 open issues and 0 open PRs.
|
||||
- Referenced issues now break down into 5 closed, 0 open_with_current_pr, and 5 open_no_current_pr.
|
||||
- Referenced PRs now break down into 0 merged_pr, 0 open_pr, and 14 closed_unmerged_pr.
|
||||
|
||||
## Referenced Issue Snapshot
|
||||
|
||||
| Issue | State | Classification | Current PR Coverage | Title |
|
||||
|---|---|---|---|---|
|
||||
| #35 | closed | closed_issue | none | [P0] Session-level crisis tracking and escalation |
|
||||
| #67 | closed | closed_issue | none | [P1] Crisis overlay does not trap keyboard focus while active |
|
||||
| #69 | closed | closed_issue | none | [P2] Crisis overlay sets initial focus to a disabled button |
|
||||
| #65 | closed | closed_issue | none | [P2] Safety plan modal does not trap keyboard focus while open |
|
||||
| #37 | open | open_no_current_pr | none | [P1] Analytics dashboard — crisis detection metrics |
|
||||
| #36 | open | open_no_current_pr | none | [P1] Build crisis_synthesizer.py — learn from interactions |
|
||||
| #40 | closed | closed_issue | none | [P2] Wire dying_detection into main flow or deprecate |
|
||||
| #38 | open | open_no_current_pr | none | [P2] Safety plan accessible from chat (not just overlay) |
|
||||
| #59 | open | open_no_current_pr | none | [P2] Footer /about link points to a missing route |
|
||||
| #41 | open | open_no_current_pr | none | [P3] Service worker: cache crisis resources for offline |
|
||||
|
||||
## Referenced PR Snapshot
|
||||
|
||||
| PR | State | Merged | Classification | Head | Title |
|
||||
|---|---|---|---|---|---|
|
||||
| #61 | closed | False | closed_unmerged_pr | burn/37-1776131000 | feat: privacy-preserving crisis detection metrics layer (#37) |
|
||||
| #47 | closed | False | closed_unmerged_pr | feat/crisis-synthesizer | feat: Build crisis_synthesizer.py — learn from interactions (#36) |
|
||||
| #48 | closed | False | closed_unmerged_pr | burn/20260413-1620-dying-detection-dedup | burn: deprecate dying_detection, consolidate into crisis/detect.py |
|
||||
| #50 | closed | False | closed_unmerged_pr | whip/40-1776128804 | fix: deprecate dying_detection and consolidate crisis detection (#40) |
|
||||
| #51 | closed | False | closed_unmerged_pr | queue/40-1776129201 | fix: deprecate dying_detection and consolidate crisis detection (#40) |
|
||||
| #53 | closed | False | closed_unmerged_pr | q/40-1776129480 | fix: deprecate dying_detection and consolidate crisis detection (#40) |
|
||||
| #56 | closed | False | closed_unmerged_pr | triage/40-1776129677 | fix: deprecate dying_detection and consolidate crisis detection (#40) |
|
||||
| #58 | closed | False | closed_unmerged_pr | dawn/40-1776130053 | fix: deprecate dying_detection and consolidate crisis detection (#40) |
|
||||
| #70 | closed | False | closed_unmerged_pr | am/40-1776166469 | fix: deprecate dying_detection and consolidate crisis detection (#40) |
|
||||
| #72 | closed | False | closed_unmerged_pr | am/38-1776166469 | feat: add always-on safety plan access in chat header (#38) |
|
||||
| #62 | closed | False | closed_unmerged_pr | burn/59-1776131200 | fix: point footer about link to /about.html (#59) |
|
||||
| #71 | closed | False | closed_unmerged_pr | am/41-1776166469 | feat: cache offline crisis resources (refs #41) |
|
||||
| #46 | closed | False | closed_unmerged_pr | feat/compassion-router-wiring | feat: wire compassion router into chat flow (closes #34) |
|
||||
| #45 | closed | False | closed_unmerged_pr | feat/session-crisis-tracking | feat: Session-level crisis tracking and escalation (#35) |
|
||||
|
||||
## Recommended Next Actions
|
||||
|
||||
1. Do not trust the original work-order body as live truth; use this audit artifact for current planning.
|
||||
2. Re-triage the open_no_current_pr issues individually before dispatching new work, because the old PR references are now stale.
|
||||
3. Treat closed_unmerged_pr references as historical attempts, not active review lanes.
|
||||
4. If future work orders are needed, generate them from live forge state instead of reusing the 2026-04-09 issue body.
|
||||
5. This audit preserves operator memory; it does not claim all referenced work orders are complete.
|
||||
295
scripts/fleet_work_orders_audit.py
Normal file
295
scripts/fleet_work_orders_audit.py
Normal file
@@ -0,0 +1,295 @@
|
||||
#!/usr/bin/env python3
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from urllib.request import Request, urlopen
|
||||
|
||||
API_BASE = "https://forge.alexanderwhitestone.com/api/v1"
|
||||
ORG = "Timmy_Foundation"
|
||||
DEFAULT_TOKEN_PATH = os.path.expanduser("~/.config/gitea/token")
|
||||
DEFAULT_OUTPUT = "reports/2026-04-17-the-door-fleet-work-orders-audit.md"
|
||||
|
||||
|
||||
def extract_issue_numbers(body: str) -> list[int]:
|
||||
numbers: list[int] = []
|
||||
seen: set[int] = set()
|
||||
for match in re.finditer(r"#(\d+)", body or ""):
|
||||
value = int(match.group(1))
|
||||
if value in seen:
|
||||
continue
|
||||
seen.add(value)
|
||||
numbers.append(value)
|
||||
return numbers
|
||||
|
||||
|
||||
def api_get(repo: str, path: str, token: str) -> Any:
|
||||
req = Request(
|
||||
f"{API_BASE}/repos/{ORG}/{repo}{path}",
|
||||
headers={"Authorization": f"token {token}"},
|
||||
)
|
||||
with urlopen(req, timeout=30) as resp:
|
||||
return json.loads(resp.read().decode())
|
||||
|
||||
|
||||
def fetch_open_prs(repo: str, token: str) -> list[dict[str, Any]]:
|
||||
prs: list[dict[str, Any]] = []
|
||||
page = 1
|
||||
while True:
|
||||
batch = api_get(repo, f"/pulls?state=open&limit=100&page={page}", token)
|
||||
if not batch:
|
||||
break
|
||||
prs.extend(batch)
|
||||
page += 1
|
||||
return prs
|
||||
|
||||
|
||||
def fetch_live_open_issue_count(repo: str, token: str) -> int:
|
||||
total = 0
|
||||
page = 1
|
||||
while True:
|
||||
batch = api_get(repo, f"/issues?state=open&limit=100&page={page}", token)
|
||||
if not batch:
|
||||
break
|
||||
total += sum(1 for item in batch if not item.get("pull_request"))
|
||||
page += 1
|
||||
return total
|
||||
|
||||
|
||||
def parse_claimed_summary(body: str) -> tuple[int | None, int | None]:
|
||||
issue_match = re.search(r"has\s+(\d+)\s+real issues", body or "", flags=re.IGNORECASE)
|
||||
pr_match = re.search(r"and\s+(\d+)\s+open PRs", body or "", flags=re.IGNORECASE)
|
||||
claimed_open_issues = int(issue_match.group(1)) if issue_match else None
|
||||
claimed_open_prs = int(pr_match.group(1)) if pr_match else None
|
||||
return claimed_open_issues, claimed_open_prs
|
||||
|
||||
|
||||
def summarize_open_pr_coverage(issue_num: int, open_prs: list[dict[str, Any]]) -> str:
|
||||
matches: list[str] = []
|
||||
seen: set[int] = set()
|
||||
for pr in open_prs:
|
||||
pr_num = pr["number"]
|
||||
if pr_num in seen:
|
||||
continue
|
||||
text = "\n".join(
|
||||
[
|
||||
pr.get("title") or "",
|
||||
pr.get("body") or "",
|
||||
(pr.get("head") or {}).get("ref") or "",
|
||||
]
|
||||
)
|
||||
if f"#{issue_num}" not in text:
|
||||
continue
|
||||
seen.add(pr_num)
|
||||
matches.append(f"open PR #{pr_num}")
|
||||
return ", ".join(matches) if matches else "none"
|
||||
|
||||
|
||||
def classify_issue_reference(ref_issue: dict[str, Any], open_prs: list[dict[str, Any]]) -> dict[str, Any]:
|
||||
issue_num = ref_issue["number"]
|
||||
state = ref_issue.get("state") or "unknown"
|
||||
coverage = summarize_open_pr_coverage(issue_num, open_prs)
|
||||
if state == "closed":
|
||||
classification = "closed_issue"
|
||||
elif coverage != "none":
|
||||
classification = "open_with_current_pr"
|
||||
else:
|
||||
classification = "open_no_current_pr"
|
||||
return {
|
||||
"number": issue_num,
|
||||
"state": state,
|
||||
"classification": classification,
|
||||
"title": ref_issue.get("title") or "",
|
||||
"current_pr_coverage": coverage,
|
||||
"url": ref_issue.get("html_url") or ref_issue.get("url") or "",
|
||||
}
|
||||
|
||||
|
||||
def classify_pr_reference(repo: str, pr_num: int, token: str) -> dict[str, Any]:
|
||||
pr = api_get(repo, f"/pulls/{pr_num}", token)
|
||||
state = pr.get("state") or "unknown"
|
||||
merged = bool(pr.get("merged"))
|
||||
if merged:
|
||||
classification = "merged_pr"
|
||||
elif state == "open":
|
||||
classification = "open_pr"
|
||||
else:
|
||||
classification = "closed_unmerged_pr"
|
||||
return {
|
||||
"number": pr_num,
|
||||
"state": state,
|
||||
"merged": merged,
|
||||
"classification": classification,
|
||||
"title": pr.get("title") or "",
|
||||
"head": (pr.get("head") or {}).get("ref") or "",
|
||||
"url": pr.get("html_url") or pr.get("url") or "",
|
||||
}
|
||||
|
||||
|
||||
def table(rows: list[dict[str, Any]], columns: list[tuple[str, str]]) -> str:
|
||||
headers = [title for title, _ in columns]
|
||||
keys = [key for _, key in columns]
|
||||
if not rows:
|
||||
return "| None |\n|---|\n| None |"
|
||||
lines = ["| " + " | ".join(headers) + " |", "|" + "|".join(["---"] * len(headers)) + "|"]
|
||||
for row in rows:
|
||||
values: list[str] = []
|
||||
for key in keys:
|
||||
value = row.get(key, "")
|
||||
if key == "number" and value != "":
|
||||
value = f"#{value}"
|
||||
values.append(str(value).replace("\n", " "))
|
||||
lines.append("| " + " | ".join(values) + " |")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def render_report(
|
||||
*,
|
||||
source_issue: int,
|
||||
source_title: str,
|
||||
generated_at: str,
|
||||
claimed_open_issues: int | None,
|
||||
claimed_open_prs: int | None,
|
||||
live_open_issues: int,
|
||||
live_open_prs: int,
|
||||
issue_rows: list[dict[str, Any]],
|
||||
pr_rows: list[dict[str, Any]],
|
||||
) -> str:
|
||||
open_with_current_pr = [row for row in issue_rows if row["classification"] == "open_with_current_pr"]
|
||||
open_no_current_pr = [row for row in issue_rows if row["classification"] == "open_no_current_pr"]
|
||||
closed_issues = [row for row in issue_rows if row["classification"] == "closed_issue"]
|
||||
merged_prs = [row for row in pr_rows if row["classification"] == "merged_pr"]
|
||||
open_pr_refs = [row for row in pr_rows if row["classification"] == "open_pr"]
|
||||
closed_unmerged_prs = [row for row in pr_rows if row["classification"] == "closed_unmerged_pr"]
|
||||
|
||||
drift_lines = [
|
||||
f"- The issue body claimed {claimed_open_issues if claimed_open_issues is not None else 'unknown'} real issues and {claimed_open_prs if claimed_open_prs is not None else 'unknown'} open PRs.",
|
||||
f"- Live repo state now shows {live_open_issues} open issues and {live_open_prs} open PRs.",
|
||||
f"- Referenced issues now break down into {len(closed_issues)} closed, {len(open_with_current_pr)} open_with_current_pr, and {len(open_no_current_pr)} open_no_current_pr.",
|
||||
f"- Referenced PRs now break down into {len(merged_prs)} merged_pr, {len(open_pr_refs)} open_pr, and {len(closed_unmerged_prs)} closed_unmerged_pr.",
|
||||
]
|
||||
|
||||
return "\n".join(
|
||||
[
|
||||
f"# The Door Fleet Work Orders Audit — issue #{source_issue}",
|
||||
"",
|
||||
f"Generated: {generated_at}",
|
||||
f"Source issue: `{source_title}`",
|
||||
"",
|
||||
"## Source Snapshot",
|
||||
"",
|
||||
"Issue #75 is a dated triage work-order sheet, not a normal feature request. The durable deliverable is a truth-restored audit of the referenced issue and PR set against live forge state.",
|
||||
"",
|
||||
"## Live Summary",
|
||||
"",
|
||||
f"- Referenced issues audited: {len(issue_rows)}",
|
||||
f"- Referenced PRs audited: {len(pr_rows)}",
|
||||
f"- Live repo open issues: {live_open_issues}",
|
||||
f"- Live repo open PRs: {live_open_prs}",
|
||||
f"- Open referenced issues with current PR coverage: {len(open_with_current_pr)}",
|
||||
f"- Open referenced issues with no current PR coverage: {len(open_no_current_pr)}",
|
||||
f"- Closed referenced issues: {len(closed_issues)}",
|
||||
f"- Closed-unmerged referenced PRs: {len(closed_unmerged_prs)}",
|
||||
"",
|
||||
"## Issue Body Drift",
|
||||
"",
|
||||
*drift_lines,
|
||||
"",
|
||||
"## Referenced Issue Snapshot",
|
||||
"",
|
||||
table(
|
||||
issue_rows,
|
||||
[
|
||||
("Issue", "number"),
|
||||
("State", "state"),
|
||||
("Classification", "classification"),
|
||||
("Current PR Coverage", "current_pr_coverage"),
|
||||
("Title", "title"),
|
||||
],
|
||||
),
|
||||
"",
|
||||
"## Referenced PR Snapshot",
|
||||
"",
|
||||
table(
|
||||
pr_rows,
|
||||
[
|
||||
("PR", "number"),
|
||||
("State", "state"),
|
||||
("Merged", "merged"),
|
||||
("Classification", "classification"),
|
||||
("Head", "head"),
|
||||
("Title", "title"),
|
||||
],
|
||||
),
|
||||
"",
|
||||
"## Recommended Next Actions",
|
||||
"",
|
||||
"1. Do not trust the original work-order body as live truth; use this audit artifact for current planning.",
|
||||
"2. Re-triage the open_no_current_pr issues individually before dispatching new work, because the old PR references are now stale.",
|
||||
"3. Treat closed_unmerged_pr references as historical attempts, not active review lanes.",
|
||||
"4. If future work orders are needed, generate them from live forge state instead of reusing the 2026-04-09 issue body.",
|
||||
"5. This audit preserves operator memory; it does not claim all referenced work orders are complete.",
|
||||
]
|
||||
) + "\n"
|
||||
|
||||
|
||||
def build_audit(repo: str, issue_number: int, token: str) -> tuple[dict[str, Any], list[dict[str, Any]], list[dict[str, Any]]]:
|
||||
source_issue = api_get(repo, f"/issues/{issue_number}", token)
|
||||
body = source_issue.get("body") or ""
|
||||
refs = extract_issue_numbers(body)
|
||||
open_prs = fetch_open_prs(repo, token)
|
||||
claimed_open_issues, claimed_open_prs = parse_claimed_summary(body)
|
||||
issue_rows: list[dict[str, Any]] = []
|
||||
pr_rows: list[dict[str, Any]] = []
|
||||
for ref in refs:
|
||||
issue_like = api_get(repo, f"/issues/{ref}", token)
|
||||
if issue_like.get("pull_request"):
|
||||
pr_rows.append(classify_pr_reference(repo, ref, token))
|
||||
else:
|
||||
issue_rows.append(classify_issue_reference(issue_like, open_prs))
|
||||
metadata = {
|
||||
"source_title": source_issue.get("title") or "",
|
||||
"claimed_open_issues": claimed_open_issues,
|
||||
"claimed_open_prs": claimed_open_prs,
|
||||
"live_open_issues": fetch_live_open_issue_count(repo, token),
|
||||
"live_open_prs": len(open_prs),
|
||||
}
|
||||
return metadata, issue_rows, pr_rows
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(description="Audit The Door fleet work orders issue against live forge state.")
|
||||
parser.add_argument("--repo", default="the-door")
|
||||
parser.add_argument("--issue", type=int, default=75)
|
||||
parser.add_argument("--token-file", default=DEFAULT_TOKEN_PATH)
|
||||
parser.add_argument("--output", default=DEFAULT_OUTPUT)
|
||||
args = parser.parse_args()
|
||||
|
||||
token = Path(args.token_file).read_text(encoding="utf-8").strip()
|
||||
generated_at = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
||||
metadata, issue_rows, pr_rows = build_audit(args.repo, args.issue, token)
|
||||
report = render_report(
|
||||
source_issue=args.issue,
|
||||
source_title=metadata["source_title"],
|
||||
generated_at=generated_at,
|
||||
claimed_open_issues=metadata["claimed_open_issues"],
|
||||
claimed_open_prs=metadata["claimed_open_prs"],
|
||||
live_open_issues=metadata["live_open_issues"],
|
||||
live_open_prs=metadata["live_open_prs"],
|
||||
issue_rows=issue_rows,
|
||||
pr_rows=pr_rows,
|
||||
)
|
||||
output_path = Path(args.output)
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
output_path.write_text(report, encoding="utf-8")
|
||||
print(output_path)
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
@@ -1,101 +0,0 @@
|
||||
"""
|
||||
Tests for behavioral crisis pattern detection (#133).
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from crisis.session_tracker import CrisisSessionTracker, check_crisis_with_session
|
||||
from crisis.behavioral import BehavioralTracker
|
||||
|
||||
|
||||
class TestBehavioralTracker(unittest.TestCase):
|
||||
def _seed_day(self, tracker, *, session_id, day, count, start_hour=10, message_length=48, crisis_score=0.0):
|
||||
base = datetime(2026, 4, day, start_hour, 0, tzinfo=timezone.utc)
|
||||
for i in range(count):
|
||||
tracker.record(
|
||||
session_id,
|
||||
base + timedelta(minutes=i * 10),
|
||||
message_length=message_length,
|
||||
crisis_score=crisis_score,
|
||||
)
|
||||
|
||||
def test_frequency_change_uses_seven_day_baseline(self):
|
||||
tracker = BehavioralTracker()
|
||||
for day in range(1, 8):
|
||||
self._seed_day(tracker, session_id=f"baseline-{day}", day=day, count=2)
|
||||
|
||||
burst_base = datetime(2026, 4, 8, 14, 0, tzinfo=timezone.utc)
|
||||
for i in range(8):
|
||||
tracker.record(
|
||||
"current-session",
|
||||
burst_base + timedelta(minutes=i),
|
||||
message_length=72,
|
||||
crisis_score=0.1,
|
||||
)
|
||||
|
||||
summary = tracker.get_risk_signals("current-session")
|
||||
|
||||
self.assertGreater(summary["frequency_change"], 2.0)
|
||||
self.assertTrue(any(sig["signal_type"] == "frequency" for sig in summary["signals"]))
|
||||
self.assertGreater(summary["behavioral_score"], 0.0)
|
||||
|
||||
def test_late_night_messages_raise_flag(self):
|
||||
tracker = BehavioralTracker()
|
||||
base = datetime(2026, 4, 10, 2, 15, tzinfo=timezone.utc)
|
||||
for i in range(3):
|
||||
tracker.record(
|
||||
"late-night",
|
||||
base + timedelta(minutes=i * 7),
|
||||
message_length=35,
|
||||
crisis_score=0.0,
|
||||
)
|
||||
|
||||
summary = tracker.get_risk_signals("late-night")
|
||||
|
||||
self.assertTrue(summary["is_late_night"])
|
||||
self.assertTrue(any(sig["signal_type"] == "time" for sig in summary["signals"]))
|
||||
|
||||
def test_withdrawal_detected_after_large_drop_from_baseline(self):
|
||||
tracker = BehavioralTracker()
|
||||
for day in range(1, 8):
|
||||
self._seed_day(tracker, session_id=f"baseline-{day}", day=day, count=10, message_length=80)
|
||||
|
||||
tracker.record(
|
||||
"withdrawal-session",
|
||||
datetime(2026, 4, 9, 11, 0, tzinfo=timezone.utc),
|
||||
message_length=18,
|
||||
crisis_score=0.0,
|
||||
)
|
||||
|
||||
summary = tracker.get_risk_signals("withdrawal-session")
|
||||
|
||||
self.assertTrue(summary["withdrawal_detected"])
|
||||
self.assertTrue(any(sig["signal_type"] == "withdrawal" for sig in summary["signals"]))
|
||||
|
||||
|
||||
class TestBehavioralSessionIntegration(unittest.TestCase):
|
||||
def test_check_crisis_with_session_includes_behavioral_summary(self):
|
||||
tracker = CrisisSessionTracker()
|
||||
base = datetime(2026, 4, 20, 2, 0, tzinfo=timezone.utc)
|
||||
|
||||
check_crisis_with_session("can't sleep", tracker, timestamp=base)
|
||||
check_crisis_with_session("still here", tracker, timestamp=base + timedelta(minutes=1))
|
||||
result = check_crisis_with_session("everything feels loud", tracker, timestamp=base + timedelta(minutes=2))
|
||||
|
||||
behavioral = result["session"]["behavioral"]
|
||||
self.assertIn("frequency_change", behavioral)
|
||||
self.assertIn("is_late_night", behavioral)
|
||||
self.assertIn("session_length_trend", behavioral)
|
||||
self.assertIn("withdrawal_detected", behavioral)
|
||||
self.assertIn("behavioral_score", behavioral)
|
||||
self.assertTrue(behavioral["is_late_night"])
|
||||
self.assertGreater(behavioral["behavioral_score"], 0.0)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
100
tests/test_fleet_work_orders_audit.py
Normal file
100
tests/test_fleet_work_orders_audit.py
Normal file
@@ -0,0 +1,100 @@
|
||||
import importlib.util
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
ROOT = Path(__file__).resolve().parents[1]
|
||||
SCRIPT_PATH = ROOT / "scripts" / "fleet_work_orders_audit.py"
|
||||
REPORT_PATH = ROOT / "reports" / "2026-04-17-the-door-fleet-work-orders-audit.md"
|
||||
|
||||
|
||||
def _load_module():
|
||||
assert SCRIPT_PATH.exists(), f"missing {SCRIPT_PATH.relative_to(ROOT)}"
|
||||
spec = importlib.util.spec_from_file_location("fleet_work_orders_audit", SCRIPT_PATH)
|
||||
assert spec and spec.loader
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(module)
|
||||
return module
|
||||
|
||||
|
||||
def test_extract_issue_numbers_preserves_mixed_issue_and_pr_refs() -> None:
|
||||
body = """
|
||||
## P0 — Session-level crisis tracking (#35)
|
||||
**PR #61 ready.**
|
||||
## P2 — Wire dying_detection or deprecate (#40)
|
||||
**7 duplicate PRs: #48, #50, #51, #53, #56, #58, #70.**
|
||||
"""
|
||||
|
||||
mod = _load_module()
|
||||
|
||||
assert mod.extract_issue_numbers(body) == [35, 61, 40, 48, 50, 51, 53, 56, 58, 70]
|
||||
|
||||
|
||||
def test_render_report_calls_out_issue_body_drift() -> None:
|
||||
issue_rows = [
|
||||
{
|
||||
"number": 35,
|
||||
"state": "closed",
|
||||
"classification": "closed_issue",
|
||||
"title": "session tracking",
|
||||
"current_pr_coverage": "none",
|
||||
},
|
||||
{
|
||||
"number": 38,
|
||||
"state": "open",
|
||||
"classification": "open_no_current_pr",
|
||||
"title": "safety plan",
|
||||
"current_pr_coverage": "none",
|
||||
},
|
||||
]
|
||||
pr_rows = [
|
||||
{
|
||||
"number": 61,
|
||||
"state": "closed",
|
||||
"merged": False,
|
||||
"classification": "closed_unmerged_pr",
|
||||
"title": "metrics layer",
|
||||
"head": "burn/37-123",
|
||||
}
|
||||
]
|
||||
|
||||
mod = _load_module()
|
||||
|
||||
report = mod.render_report(
|
||||
source_issue=75,
|
||||
source_title="TRIAGE: The Door - Fleet Work Orders (2026-04-09)",
|
||||
generated_at="2026-04-17T04:00:00Z",
|
||||
claimed_open_issues=13,
|
||||
claimed_open_prs=24,
|
||||
live_open_issues=5,
|
||||
live_open_prs=0,
|
||||
issue_rows=issue_rows,
|
||||
pr_rows=pr_rows,
|
||||
)
|
||||
|
||||
assert "## Source Snapshot" in report
|
||||
assert "## Live Summary" in report
|
||||
assert "## Issue Body Drift" in report
|
||||
assert "13" in report and "24" in report
|
||||
assert "#38" in report
|
||||
assert "open_no_current_pr" in report
|
||||
assert "#61" in report
|
||||
assert "closed_unmerged_pr" in report
|
||||
assert "## Referenced Issue Snapshot" in report
|
||||
assert "## Referenced PR Snapshot" in report
|
||||
assert "## Recommended Next Actions" in report
|
||||
|
||||
|
||||
def test_committed_work_orders_audit_exists_with_required_sections() -> None:
|
||||
text = REPORT_PATH.read_text(encoding="utf-8")
|
||||
|
||||
required = [
|
||||
"# The Door Fleet Work Orders Audit — issue #75",
|
||||
"## Source Snapshot",
|
||||
"## Live Summary",
|
||||
"## Issue Body Drift",
|
||||
"## Referenced Issue Snapshot",
|
||||
"## Referenced PR Snapshot",
|
||||
"## Recommended Next Actions",
|
||||
]
|
||||
missing = [item for item in required if item not in text]
|
||||
assert not missing, missing
|
||||
Reference in New Issue
Block a user