Some checks are pending
CI / validate (pull_request) Waiting to run
Creates the foundational state-tracking and validation infrastructure for Epic #842 (Allegro Self-Improvement). Files added: - allegro-wake-checklist.md — real state check on every wakeup - allegro-lane.md — lane boundaries and empty-lane protocol - allegro-cycle-state.json — crash recovery and multi-cycle tracking - allegro-hands-off-registry.json — 24-hour locks on STOPPED/FINE entities - allegro-failure-log.md — verbal reflection on failures - allegro-handoff-template.md — validated deliverables and context handoffs - burn-mode-validator.py — end-of-cycle scoring script (6 criteria) Sub-issues created: #843 #844 #845 #846 #847 #848 #849 #850
122 lines
3.9 KiB
Python
Executable File
122 lines
3.9 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
"""
|
|
Allegro Burn Mode Validator
|
|
Scores each cycle across 6 criteria.
|
|
Run at the end of every cycle and append the score to the cycle log.
|
|
"""
|
|
|
|
import json
|
|
import os
|
|
import sys
|
|
from datetime import datetime, timezone
|
|
|
|
LOG_PATH = os.path.expanduser("~/.hermes/burn-logs/allegro.log")
|
|
STATE_PATH = os.path.expanduser("~/.hermes/allegro-cycle-state.json")
|
|
FAILURE_LOG_PATH = os.path.expanduser("~/.hermes/allegro-failure-log.md")
|
|
|
|
|
|
def score_cycle():
|
|
now = datetime.now(timezone.utc).isoformat()
|
|
scores = {
|
|
"state_check_completed": 0,
|
|
"tangible_artifact": 0,
|
|
"stop_compliance": 1, # Default to 1; docked only if failure detected
|
|
"lane_boundary_respect": 1, # Default to 1
|
|
"evidence_attached": 0,
|
|
"reflection_logged_if_failure": 1, # Default to 1
|
|
}
|
|
|
|
notes = []
|
|
|
|
# 1. State check completed?
|
|
if os.path.exists(LOG_PATH):
|
|
with open(LOG_PATH, "r") as f:
|
|
lines = f.readlines()
|
|
if lines:
|
|
last_lines = [l for l in lines[-20:] if l.strip()]
|
|
for line in last_lines:
|
|
if "State check complete" in line or "WAKE" in line:
|
|
scores["state_check_completed"] = 1
|
|
break
|
|
else:
|
|
notes.append("No state check log line found in last 20 log lines.")
|
|
else:
|
|
notes.append("Cycle log is empty.")
|
|
else:
|
|
notes.append("Cycle log does not exist.")
|
|
|
|
# 2. Tangible artifact?
|
|
artifact_found = False
|
|
if os.path.exists(STATE_PATH):
|
|
try:
|
|
with open(STATE_PATH, "r") as f:
|
|
state = json.load(f)
|
|
cycles = state.get("cycles", [])
|
|
if cycles:
|
|
last = cycles[-1]
|
|
evidence = last.get("evidence", "")
|
|
if evidence and evidence.strip():
|
|
artifact_found = True
|
|
status = last.get("status", "")
|
|
if status == "aborted" and evidence:
|
|
artifact_found = True # Documented abort counts
|
|
except Exception as e:
|
|
notes.append(f"Could not read cycle state: {e}")
|
|
if artifact_found:
|
|
scores["tangible_artifact"] = 1
|
|
else:
|
|
notes.append("No tangible artifact or documented abort found in cycle state.")
|
|
|
|
# 3. Stop compliance (check failure log for recent un-reflected stops)
|
|
if os.path.exists(FAILURE_LOG_PATH):
|
|
with open(FAILURE_LOG_PATH, "r") as f:
|
|
content = f.read()
|
|
# Heuristic: if failure log mentions stop command and no corrective action verification
|
|
# This is a simple check; human audit is the real source of truth
|
|
if "Stop command" in content and "Verification Date" in content:
|
|
pass # Assume compliance unless new entry added today without reflection
|
|
# We default to 1 and rely on manual flagging for now
|
|
|
|
# 4. Lane boundary respect — default 1, flagged manually if needed
|
|
|
|
# 5. Evidence attached?
|
|
if artifact_found:
|
|
scores["evidence_attached"] = 1
|
|
else:
|
|
notes.append("Evidence missing.")
|
|
|
|
# 6. Reflection logged if failure?
|
|
# Default 1; if a failure occurred this cycle, manual check required
|
|
|
|
total = sum(scores.values())
|
|
max_score = 6
|
|
|
|
result = {
|
|
"timestamp": now,
|
|
"scores": scores,
|
|
"total": total,
|
|
"max": max_score,
|
|
"notes": notes,
|
|
}
|
|
|
|
# Append to log
|
|
with open(LOG_PATH, "a") as f:
|
|
f.write(f"[{now}] VALIDATOR — Score: {total}/{max_score}\n")
|
|
for k, v in scores.items():
|
|
f.write(f" {k}: {v}\n")
|
|
if notes:
|
|
f.write(f" notes: {' | '.join(notes)}\n")
|
|
|
|
print(f"Burn mode score: {total}/{max_score}")
|
|
if notes:
|
|
print("Notes:")
|
|
for n in notes:
|
|
print(f" - {n}")
|
|
|
|
return total
|
|
|
|
|
|
if __name__ == "__main__":
|
|
score = score_cycle()
|
|
sys.exit(0 if score >= 5 else 1)
|