Compare commits

..

6 Commits

Author SHA1 Message Date
d018c769bb fix: add gate file rotation to prevent unbounded directory growth
Some checks failed
Architecture Lint / Linter Tests (pull_request) Successful in 25s
PR Checklist / pr-checklist (pull_request) Failing after 4m17s
Smoke Test / smoke (pull_request) Failing after 18s
Validate Config / YAML Lint (pull_request) Failing after 16s
Validate Config / JSON Validate (pull_request) Successful in 15s
Validate Config / Python Syntax & Import Check (pull_request) Failing after 1m28s
Validate Config / Shell Script Lint (pull_request) Failing after 53s
Validate Config / Cron Syntax Check (pull_request) Successful in 11s
Validate Config / Deploy Script Dry Run (pull_request) Successful in 9s
Validate Config / Playbook Schema Validation (pull_request) Successful in 21s
Architecture Lint / Lint Repository (pull_request) Has been cancelled
Validate Config / Python Test Suite (pull_request) Has been cancelled
Closes #674, closes #628

- Age-based cleanup: deletes files older than 7 days
- Count-based cap: keeps at most 50 historical files
- Always preserves eval_gate_latest.json
- Runs automatically after each evaluate_candidate() call
2026-04-15 03:21:06 +00:00
d120526244 fix: add python3 shebang to scripts/visual_pr_reviewer.py (#681) 2026-04-15 02:57:53 +00:00
8596ff761b fix: add python3 shebang to scripts/diagram_meaning_extractor.py (#681) 2026-04-15 02:57:40 +00:00
7553fd4f3e fix: add python3 shebang to scripts/captcha_bypass_handler.py (#681) 2026-04-15 02:57:25 +00:00
71082fe06f fix: add python3 shebang to bin/soul_eval_gate.py (#681) 2026-04-15 02:57:14 +00:00
6d678e938e fix: add python3 shebang to bin/nostr-agent-demo.py (#681) 2026-04-15 02:57:00 +00:00
6 changed files with 61 additions and 146 deletions

View File

@@ -1,3 +1,4 @@
#!/usr/bin/env python3
"""
Full Nostr agent-to-agent communication demo - FINAL WORKING
"""

View File

@@ -1,3 +1,4 @@
#!/usr/bin/env python3
"""
Soul Eval Gate — The Conscience of the Training Pipeline
@@ -32,6 +33,11 @@ from pathlib import Path
from typing import Optional
# ── Gate File Rotation ──────────────────────────────────────────────
GATE_FILE_MAX_AGE_DAYS = 7
GATE_FILE_MAX_COUNT = 50
# ── SOUL.md Constraints ──────────────────────────────────────────────
#
# These are the non-negotiable categories from SOUL.md and the
@@ -239,6 +245,9 @@ def evaluate_candidate(
latest_file = gate_dir / "eval_gate_latest.json"
latest_file.write_text(json.dumps(result, indent=2))
# Rotate old gate files to prevent unbounded growth
_rotate_gate_files(gate_dir)
return result
@@ -248,6 +257,48 @@ def _load_json(path: str | Path) -> dict:
return json.loads(Path(path).read_text())
def _rotate_gate_files(gate_dir: Path) -> None:
"""Clean up old gate files to prevent unbounded directory growth.
- Deletes files older than GATE_FILE_MAX_AGE_DAYS
- Caps total count at GATE_FILE_MAX_COUNT (oldest first)
- Always preserves eval_gate_latest.json
"""
if not gate_dir.exists():
return
latest_name = "eval_gate_latest.json"
cutoff = datetime.now(timezone.utc).timestamp() - (GATE_FILE_MAX_AGE_DAYS * 86400)
gate_files = []
for f in gate_dir.iterdir():
if f.name == latest_name or not f.name.startswith("eval_gate_") or f.suffix != ".json":
continue
try:
mtime = f.stat().st_mtime
except OSError:
continue
gate_files.append((mtime, f))
# Sort oldest first
gate_files.sort(key=lambda x: x[0])
deleted = 0
for mtime, f in gate_files:
should_delete = False
if mtime < cutoff:
should_delete = True
elif len(gate_files) - deleted > GATE_FILE_MAX_COUNT:
should_delete = True
if should_delete:
try:
f.unlink()
deleted += 1
except OSError:
pass
def _find_category_score(
sessions: dict[str, dict],
category: str,

View File

@@ -1,3 +1,4 @@
#!/usr/bin/env python3
import json
from hermes_tools import browser_navigate, browser_vision

View File

@@ -1,3 +1,4 @@
#!/usr/bin/env python3
import json
from hermes_tools import browser_navigate, browser_vision

View File

@@ -4,10 +4,6 @@
# Checks provider health, pipeline progress, token budget, and interactive load.
# Starts the highest-priority incomplete pipeline that can run.
#
# FIX #650: Pipeline states are date-aware. A "complete" or "failed" state from
# a previous day is treated as stale (not_started) so pipelines can re-run daily.
# Running states older than 6 hours are also treated as stale (likely crashed).
#
# Usage:
# ./scripts/nightly-pipeline-scheduler.sh # Normal run
# ./scripts/nightly-pipeline-scheduler.sh --dry-run # Show what would start
@@ -54,67 +50,6 @@ ensure_dirs() {
log() { echo "[$(date '+%Y-%m-%d %H:%M:%S')] $*" | tee -a "$LOG_FILE"; }
# --- FIX #650: Staleness detection ---
#
# A pipeline state is "stale" if:
# - complete/failed: state was set on a different calendar day
# - running: state was set more than 6 hours ago (likely crashed)
#
# Stale states are treated as not_started, allowing the pipeline to re-run.
today_date() { date +%Y-%m-%d; }
state_is_stale() {
local pipeline="$1"
python3 -c "
import json, os, sys
from datetime import datetime, timedelta
path = '$STATE_FILE'
today = '$(today_date)'
if not os.path.exists(path):
sys.exit(0) # no state file = not stale (not_started)
with open(path) as f:
d = json.load(f)
entry = d.get('$pipeline', {})
state = entry.get('state', 'not_started')
updated = entry.get('updated', '')
if state == 'not_started':
sys.exit(0) # not stale
if not updated:
sys.exit(1) # no timestamp = treat as stale
try:
state_date = updated[:10] # YYYY-MM-DD from ISO timestamp
state_time = datetime.fromisoformat(updated.replace('Z', '+00:00'))
except (ValueError, IndexError):
sys.exit(1) # unparseable = stale
if state in ('complete', 'failed'):
# Stale if not from today
if state_date != today:
print(f'STALE: {state} from {state_date} (today is {today})', file=sys.stderr)
sys.exit(1)
sys.exit(0) # today's state is fresh
if state == 'running':
# Stale if older than 6 hours (likely crashed)
now = datetime.now(state_time.tzinfo)
age_hours = (now - state_time).total_seconds() / 3600
if age_hours > 6:
print(f'STALE: running for {age_hours:.1f}h (max 6h)', file=sys.stderr)
sys.exit(1)
sys.exit(0) # recently started
sys.exit(0)
" 2>/dev/null
return $?
}
get_budget_used_today() {
if [[ -f "$BUDGET_FILE" ]]; then
local today=$(date +%Y-%m-%d)
@@ -178,13 +113,9 @@ with open(path, 'w') as f:
"
}
# FIX #650: is_pipeline_complete checks staleness
is_pipeline_complete() {
local pipeline="$1"
# If stale, it's not complete
if ! state_is_stale "$pipeline" 2>/dev/null; then
# Fresh state — check if actually complete
python3 -c "
python3 -c "
import json, os
path = '$STATE_FILE'
if not os.path.exists(path):
@@ -195,16 +126,11 @@ else:
state = d.get('$pipeline', {}).get('state', 'not_started')
print('true' if state == 'complete' else 'false')
" 2>/dev/null || echo false
else
echo false # Stale = not complete
fi
}
# FIX #650: is_pipeline_running checks staleness
is_pipeline_running() {
local pipeline="$1"
if ! state_is_stale "$pipeline" 2>/dev/null; then
python3 -c "
python3 -c "
import json, os
path = '$STATE_FILE'
if not os.path.exists(path):
@@ -215,9 +141,6 @@ else:
state = d.get('$pipeline', {}).get('state', 'not_started')
print('true' if state == 'running' else 'false')
" 2>/dev/null || echo false
else
echo false # Stale = not running
fi
}
check_dependency() {
@@ -349,57 +272,6 @@ with open(path, 'w') as f:
fi
}
# FIX #650: Daily reset — purge stale states at the start of each run
reset_stale_states() {
if [[ ! -f "$STATE_FILE" ]]; then
return
fi
python3 -c "
import json, os, sys
from datetime import datetime
path = '$STATE_FILE'
today = '$(today_date)'
with open(path) as f:
d = json.load(f)
changed = False
cleaned = []
for name, entry in list(d.items()):
state = entry.get('state', '')
updated = entry.get('updated', '')
if state in ('complete', 'failed') and updated:
state_date = updated[:10]
if state_date != today:
del d[name]
changed = True
cleaned.append(name)
elif state == 'running' and updated:
try:
state_time = datetime.fromisoformat(updated.replace('Z', '+00:00'))
now = datetime.now(state_time.tzinfo)
age_hours = (now - state_time).total_seconds() / 3600
if age_hours > 6:
del d[name]
changed = True
cleaned.append(f'{name}(stale-running)')
except (ValueError, IndexError):
del d[name]
changed = True
cleaned.append(f'{name}(bad-timestamp)')
if changed:
with open(path, 'w') as f:
json.dump(d, f, indent=2)
print(f'Reset {len(cleaned)} stale pipelines: {', '.join(cleaned)}')
else:
print('No stale pipeline states')
" 2>>"$LOG_FILE"
}
# --- Main ---
main() {
local mode="${1:-run}"
@@ -407,9 +279,6 @@ main() {
log "=== Pipeline Scheduler ($mode) ==="
# FIX #650: Reset stale states first
reset_stale_states
# Check 1: Is inference available?
if ! check_inference_available; then
log "No inference provider available. Skipping all pipelines."
@@ -458,20 +327,11 @@ else:
print(d.get('$name', {}).get('state', 'not_started'))
" 2>/dev/null || echo "not_started")
# Check staleness for display
if [[ "$state" == "complete" || "$state" == "failed" || "$state" == "running" ]]; then
if ! state_is_stale "$name" 2>/dev/null; then
: # fresh
else
state="${state} (stale)"
fi
fi
local color=$NC
case "$state" in
running*) color=$YELLOW ;;
complete*) color=$GREEN ;;
failed*) color=$RED ;;
running) color=$YELLOW ;;
complete) color=$GREEN ;;
failed) color=$RED ;;
esac
printf " %-25s %b%s%b (max: %s tokens, dep: %s)\n" "$name" "$color" "$state" "$NC" "$max_tokens" "$dep"
done
@@ -486,7 +346,7 @@ else:
for entry in "${PIPELINES[@]}"; do
IFS='|' read -r name script max_tokens dep <<< "$entry"
# Skip if already running or complete (staleness already handled above)
# Skip if already running or complete
if [[ "$(is_pipeline_running $name)" == "true" ]]; then
log "SKIP $name: already running"
continue

View File

@@ -1,3 +1,4 @@
#!/usr/bin/env python3
import json
from hermes_tools import browser_navigate, browser_vision