Compare commits

..

1 Commits

Author SHA1 Message Date
acf1ea0d78 fix: pipeline_state.json daily reset — timestamp-based staleness (#650)
Some checks failed
Architecture Lint / Linter Tests (pull_request) Failing after 19s
Smoke Test / smoke (pull_request) Failing after 17s
Validate Config / YAML Lint (pull_request) Failing after 17s
Validate Config / JSON Validate (pull_request) Successful in 27s
Validate Config / Python Syntax & Import Check (pull_request) Failing after 2m17s
PR Checklist / pr-checklist (pull_request) Failing after 4m3s
Validate Config / Cron Syntax Check (pull_request) Successful in 8s
Validate Config / Shell Script Lint (pull_request) Failing after 48s
Validate Config / Deploy Script Dry Run (pull_request) Successful in 8s
Validate Config / Playbook Schema Validation (pull_request) Successful in 16s
Architecture Lint / Lint Repository (pull_request) Has been cancelled
Validate Config / Python Test Suite (pull_request) Has been cancelled
Pipeline states from previous days are now treated as stale:
- complete/failed from yesterday → not_started (allows re-run)
- running for >6h → not_started (likely crashed)

Changes:
- Added state_is_stale() function checking state date vs today
- Added reset_stale_states() called at start of each scheduler run
- is_pipeline_complete() and is_pipeline_running() check staleness
- --status shows '(stale)' indicator for outdated states
- Running states auto-expire after 6 hours (crash recovery)
2026-04-15 01:29:18 +00:00
3 changed files with 146 additions and 227 deletions

View File

@@ -4,6 +4,10 @@
# Checks provider health, pipeline progress, token budget, and interactive load.
# Starts the highest-priority incomplete pipeline that can run.
#
# FIX #650: Pipeline states are date-aware. A "complete" or "failed" state from
# a previous day is treated as stale (not_started) so pipelines can re-run daily.
# Running states older than 6 hours are also treated as stale (likely crashed).
#
# Usage:
# ./scripts/nightly-pipeline-scheduler.sh # Normal run
# ./scripts/nightly-pipeline-scheduler.sh --dry-run # Show what would start
@@ -50,6 +54,67 @@ ensure_dirs() {
log() { echo "[$(date '+%Y-%m-%d %H:%M:%S')] $*" | tee -a "$LOG_FILE"; }
# --- FIX #650: Staleness detection ---
#
# A pipeline state is "stale" if:
# - complete/failed: state was set on a different calendar day
# - running: state was set more than 6 hours ago (likely crashed)
#
# Stale states are treated as not_started, allowing the pipeline to re-run.
today_date() { date +%Y-%m-%d; }
state_is_stale() {
local pipeline="$1"
python3 -c "
import json, os, sys
from datetime import datetime, timedelta
path = '$STATE_FILE'
today = '$(today_date)'
if not os.path.exists(path):
sys.exit(0) # no state file = not stale (not_started)
with open(path) as f:
d = json.load(f)
entry = d.get('$pipeline', {})
state = entry.get('state', 'not_started')
updated = entry.get('updated', '')
if state == 'not_started':
sys.exit(0) # not stale
if not updated:
sys.exit(1) # no timestamp = treat as stale
try:
state_date = updated[:10] # YYYY-MM-DD from ISO timestamp
state_time = datetime.fromisoformat(updated.replace('Z', '+00:00'))
except (ValueError, IndexError):
sys.exit(1) # unparseable = stale
if state in ('complete', 'failed'):
# Stale if not from today
if state_date != today:
print(f'STALE: {state} from {state_date} (today is {today})', file=sys.stderr)
sys.exit(1)
sys.exit(0) # today's state is fresh
if state == 'running':
# Stale if older than 6 hours (likely crashed)
now = datetime.now(state_time.tzinfo)
age_hours = (now - state_time).total_seconds() / 3600
if age_hours > 6:
print(f'STALE: running for {age_hours:.1f}h (max 6h)', file=sys.stderr)
sys.exit(1)
sys.exit(0) # recently started
sys.exit(0)
" 2>/dev/null
return $?
}
get_budget_used_today() {
if [[ -f "$BUDGET_FILE" ]]; then
local today=$(date +%Y-%m-%d)
@@ -113,9 +178,13 @@ with open(path, 'w') as f:
"
}
# FIX #650: is_pipeline_complete checks staleness
is_pipeline_complete() {
local pipeline="$1"
python3 -c "
# If stale, it's not complete
if ! state_is_stale "$pipeline" 2>/dev/null; then
# Fresh state — check if actually complete
python3 -c "
import json, os
path = '$STATE_FILE'
if not os.path.exists(path):
@@ -126,11 +195,16 @@ else:
state = d.get('$pipeline', {}).get('state', 'not_started')
print('true' if state == 'complete' else 'false')
" 2>/dev/null || echo false
else
echo false # Stale = not complete
fi
}
# FIX #650: is_pipeline_running checks staleness
is_pipeline_running() {
local pipeline="$1"
python3 -c "
if ! state_is_stale "$pipeline" 2>/dev/null; then
python3 -c "
import json, os
path = '$STATE_FILE'
if not os.path.exists(path):
@@ -141,6 +215,9 @@ else:
state = d.get('$pipeline', {}).get('state', 'not_started')
print('true' if state == 'running' else 'false')
" 2>/dev/null || echo false
else
echo false # Stale = not running
fi
}
check_dependency() {
@@ -272,6 +349,57 @@ with open(path, 'w') as f:
fi
}
# FIX #650: Daily reset — purge stale states at the start of each run
reset_stale_states() {
if [[ ! -f "$STATE_FILE" ]]; then
return
fi
python3 -c "
import json, os, sys
from datetime import datetime
path = '$STATE_FILE'
today = '$(today_date)'
with open(path) as f:
d = json.load(f)
changed = False
cleaned = []
for name, entry in list(d.items()):
state = entry.get('state', '')
updated = entry.get('updated', '')
if state in ('complete', 'failed') and updated:
state_date = updated[:10]
if state_date != today:
del d[name]
changed = True
cleaned.append(name)
elif state == 'running' and updated:
try:
state_time = datetime.fromisoformat(updated.replace('Z', '+00:00'))
now = datetime.now(state_time.tzinfo)
age_hours = (now - state_time).total_seconds() / 3600
if age_hours > 6:
del d[name]
changed = True
cleaned.append(f'{name}(stale-running)')
except (ValueError, IndexError):
del d[name]
changed = True
cleaned.append(f'{name}(bad-timestamp)')
if changed:
with open(path, 'w') as f:
json.dump(d, f, indent=2)
print(f'Reset {len(cleaned)} stale pipelines: {', '.join(cleaned)}')
else:
print('No stale pipeline states')
" 2>>"$LOG_FILE"
}
# --- Main ---
main() {
local mode="${1:-run}"
@@ -279,6 +407,9 @@ main() {
log "=== Pipeline Scheduler ($mode) ==="
# FIX #650: Reset stale states first
reset_stale_states
# Check 1: Is inference available?
if ! check_inference_available; then
log "No inference provider available. Skipping all pipelines."
@@ -327,11 +458,20 @@ else:
print(d.get('$name', {}).get('state', 'not_started'))
" 2>/dev/null || echo "not_started")
# Check staleness for display
if [[ "$state" == "complete" || "$state" == "failed" || "$state" == "running" ]]; then
if ! state_is_stale "$name" 2>/dev/null; then
: # fresh
else
state="${state} (stale)"
fi
fi
local color=$NC
case "$state" in
running) color=$YELLOW ;;
complete) color=$GREEN ;;
failed) color=$RED ;;
running*) color=$YELLOW ;;
complete*) color=$GREEN ;;
failed*) color=$RED ;;
esac
printf " %-25s %b%s%b (max: %s tokens, dep: %s)\n" "$name" "$color" "$state" "$NC" "$max_tokens" "$dep"
done
@@ -346,7 +486,7 @@ else:
for entry in "${PIPELINES[@]}"; do
IFS='|' read -r name script max_tokens dep <<< "$entry"
# Skip if already running or complete
# Skip if already running or complete (staleness already handled above)
if [[ "$(is_pipeline_running $name)" == "true" ]]; then
log "SKIP $name: already running"
continue

View File

@@ -1,176 +0,0 @@
#!/usr/bin/env python3
"""PR Triage Automation -- Categorize, deduplicate, report (#659)."""
import argparse, json, os, re, sys, subprocess
from collections import Counter, defaultdict
from datetime import datetime
from urllib.request import Request, urlopen
from urllib.error import HTTPError
def _token():
t = os.environ.get("GITEA_TOKEN", "")
if not t:
p = os.path.expanduser("~/.config/gitea/token")
if os.path.exists(p):
t = open(p).read().strip()
return t
def _api(url, token, method="GET", data=None):
h = {"Authorization": "token " + token, "Accept": "application/json"}
body = json.dumps(data).encode() if data else None
if data:
h["Content-Type"] = "application/json"
req = Request(url, data=body, headers=h, method=method)
try:
return json.loads(urlopen(req, timeout=30).read())
except HTTPError:
return None
def fetch_prs(base, token, owner, repo):
prs, page = [], 1
while True:
b = _api(base + "/api/v1/repos/" + owner + "/" + repo + "/pulls?state=open&limit=50&page=" + str(page), token)
if not b:
break
prs.extend(b)
if len(b) < 50:
break
page += 1
return prs
def fetch_issues(base, token, owner, repo):
iss, page = {}, 1
while True:
b = _api(base + "/api/v1/repos/" + owner + "/" + repo + "/issues?state=open&limit=50&page=" + str(page), token)
if not b:
break
for i in b:
if "pull_request" not in i:
iss[i["number"]] = i
if len(b) < 50:
break
page += 1
return iss
def categorize(pr):
c = (pr.get("title", "") + " " + pr.get("body", "") + " " + " ".join(l.get("name", "") for l in pr.get("labels", []))).lower()
for kw, cat in [("training data", "training-data"), ("dpo", "training-data"), ("grpo", "training-data"),
("fix:", "bug-fix"), ("bug", "bug-fix"), ("hotfix", "bug-fix"),
("feat:", "feature"), ("feature", "feature"),
("refactor", "maintenance"), ("cleanup", "maintenance"),
("doc", "documentation"), ("test", "testing"), ("infra", "infrastructure")]:
if kw in c:
return cat
return "other"
def refs(pr):
return [int(m) for m in re.findall(r"#(\d+)", pr.get("title", "") + " " + pr.get("body", ""))]
def find_duplicates(prs):
by = defaultdict(list)
for p in prs:
for r in refs(p):
by[r].append(p)
return [g for g in by.values() if len(g) > 1]
def health(pr, issues):
r = refs(pr)
created = datetime.fromisoformat(pr["created_at"].replace("Z", "+00:00"))
updated = datetime.fromisoformat(pr["updated_at"].replace("Z", "+00:00"))
now = datetime.now(created.tzinfo)
return {
"pr": pr["number"], "title": pr["title"], "head": pr["head"]["ref"],
"category": categorize(pr), "refs": r,
"open": [x for x in r if x in issues], "closed": [x for x in r if x not in issues],
"age": (now - created).days, "stale": (now - updated).days,
"mergeable": pr.get("mergeable"), "author": pr.get("user", {}).get("login", ""),
}
def report(repo, checks, dups):
lines = ["# PR Triage -- " + repo,
"Generated: " + datetime.now().strftime("%Y-%m-%d %H:%M"),
"Open PRs: " + str(len(checks)), "", "## Summary", ""]
cats = Counter(h["category"] for h in checks)
lines.append("| Category | Count |")
lines.append("|----------|-------|")
for c, n in cats.most_common():
lines.append("| " + c + " | " + str(n) + " |")
stale = [h for h in checks if h["stale"] > 7]
lines.extend(["", "Stale (>7d): " + str(len(stale)),
"Duplicate groups: " + str(len(dups)), ""])
if dups:
lines.append("## Duplicates")
for g in dups:
rs = set()
for p in g:
rs.update(refs(p))
lines.append("Issues " + ", ".join("#" + str(r) for r in sorted(rs)) + ":")
for p in g:
lines.append(" - #" + str(p["number"]) + ": " + p["title"])
lines.append("")
if stale:
lines.append("## Stale (>7d)")
for h in sorted(stale, key=lambda x: x["stale"], reverse=True):
lines.append("- #" + str(h["pr"]) + ": " + h["title"] + " -- " + str(h["stale"]) + "d")
lines.append("")
lines.append("## All PRs")
lines.append("| # | Title | Category | Age | Stale | Merge |")
lines.append("|---|-------|----------|-----|-------|-------|")
for h in sorted(checks, key=lambda x: x["pr"]):
m = "Y" if h["mergeable"] else ("N" if h["mergeable"] is False else "?")
s = str(h["stale"]) + "d" if h["stale"] > 7 else "-"
lines.append("| " + str(h["pr"]) + " | " + h["title"][:50] + " | " + h["category"] +
" | " + str(h["age"]) + "d | " + s + " | " + m + " |")
return chr(10).join(lines)
def main():
p = argparse.ArgumentParser(description="PR Triage Automation")
p.add_argument("--base-url", default="https://forge.alexanderwhitestone.com")
p.add_argument("--owner", default="Timmy_Foundation")
p.add_argument("--repo", default="")
p.add_argument("--json", action="store_true", dest="js")
p.add_argument("--output", default="")
a = p.parse_args()
token = _token()
if not token:
print("No token"); sys.exit(1)
repo = a.repo
if not repo:
try:
remote = subprocess.check_output(["git", "remote", "get-url", "origin"], text=True).strip()
m = re.search(r"[/:](\w[\w-]*)/(\w[\w-]*?)(?:\.git)?$", remote)
if m:
a.owner, repo = m.group(1), m.group(2)
except Exception:
pass
if not repo:
print("No repo specified"); sys.exit(1)
print("Triaging " + a.owner + "/" + repo + "...", file=sys.stderr)
prs = fetch_prs(a.base_url, token, a.owner, repo)
issues = fetch_issues(a.base_url, token, a.owner, repo)
checks = [health(pr, issues) for pr in prs]
dups = find_duplicates(prs)
if a.js:
print(json.dumps({"repo": repo, "prs": checks,
"duplicates": [[{"number": p["number"], "title": p["title"]} for p in g] for g in dups]},
indent=2))
else:
r = report(repo, checks, dups)
print(r)
if a.output:
with open(a.output, "w") as f:
f.write(r)
print("\n" + str(len(checks)) + " PRs, " + str(len(dups)) + " duplicate groups", file=sys.stderr)
if __name__ == "__main__":
main()

View File

@@ -1,45 +0,0 @@
"""Tests for PR triage automation (#659)."""
import pytest
class TestCategorize:
def _pr(self, title="", body=""):
return {"title": title, "body": body, "labels": []}
def test_training(self):
from scripts.pr_triage import categorize
assert categorize(self._pr("Add DPO pairs")) == "training-data"
def test_bug(self):
from scripts.pr_triage import categorize
assert categorize(self._pr("fix: crash")) == "bug-fix"
def test_feature(self):
from scripts.pr_triage import categorize
assert categorize(self._pr("feat: dark mode")) == "feature"
def test_other(self):
from scripts.pr_triage import categorize
assert categorize(self._pr("random")) == "other"
class TestRefs:
def test_simple(self):
from scripts.pr_triage import refs
assert 123 in refs({"title": "Fix #123", "body": ""})
def test_multiple(self):
from scripts.pr_triage import refs
r = refs({"title": "", "body": "Closes #100, Refs #200"})
assert 100 in r and 200 in r
class TestDuplicates:
def test_found(self):
from scripts.pr_triage import find_duplicates
prs = [{"title": "", "body": "Fix #1", "number": 1, "head": {"ref": "a"}, "created_at": "2026-01-01T00:00:00Z", "updated_at": "2026-01-01T00:00:00Z", "user": {}},
{"title": "", "body": "Refs #1", "number": 2, "head": {"ref": "b"}, "created_at": "2026-01-01T00:00:00Z", "updated_at": "2026-01-01T00:00:00Z", "user": {}}]
assert len(find_duplicates(prs)) == 1
def test_none(self):
from scripts.pr_triage import find_duplicates
prs = [{"title": "", "body": "Fix #1", "number": 1, "head": {"ref": "a"}, "created_at": "2026-01-01T00:00:00Z", "updated_at": "2026-01-01T00:00:00Z", "user": {}},
{"title": "", "body": "Fix #2", "number": 2, "head": {"ref": "b"}, "created_at": "2026-01-01T00:00:00Z", "updated_at": "2026-01-01T00:00:00Z", "user": {}}]
assert find_duplicates(prs) == []