diff --git a/bin/consolidated-cycle.sh b/bin/consolidated-cycle.sh new file mode 100755 index 0000000..3de49e7 --- /dev/null +++ b/bin/consolidated-cycle.sh @@ -0,0 +1,250 @@ +#!/usr/bin/env bash +# ── Consolidated Cycle v1 ────────────────────────────────────────────── +# Single-execution cycle. Cron fires this. No while-true. No sleep. +# +# 3 phases, always in order: +# 1. Watchdog — bash only, zero tokens, always runs +# 2. Dev cycle — sonnet, skips if no work or plateau detected +# 3. Philosophy — opus, skips if ran in last 24h +# +# PLATEAU DETECTION: +# Tracks cycle outcomes in .loop/cycle-metrics.jsonl +# If last N cycles produced zero merged PRs and zero new issues filed, +# the loop is plateauing — it skips the LLM call and logs why. +# Plateau resets when: new issues appear, PRs merge, or owner comments. +# ─────────────────────────────────────────────────────────────────────── + +set -uo pipefail +export PATH="$HOME/.local/bin:$HOME/.hermes/bin:/usr/local/bin:$PATH" + +REPO="$HOME/Timmy-Time-dashboard" +STATE="$REPO/.loop/state.json" +CLAIMS="$REPO/.loop/claims.json" +PROMPT_FILE="$HOME/.hermes/bin/timmy-loop-prompt.md" +LOG_DIR="$REPO/.loop/logs" +METRICS="$REPO/.loop/cycle-metrics.jsonl" +QUEUE_FILE="$REPO/.loop/queue.json" +TRIAGE_SCRIPT="$REPO/scripts/triage_score.py" +RETRO_SCRIPT="$REPO/scripts/cycle_retro.py" +GITEA_URL="http://143.198.27.163:3000" +GITEA_API="$GITEA_URL/api/v1" +GITEA_TOKEN=$(cat ~/.hermes/gitea_token_vps 2>/dev/null || cat ~/.hermes/gitea_token 2>/dev/null) +REPO_API="$GITEA_API/repos/rockachopa/Timmy-time-dashboard" +MAX_CYCLE_TIME=1200 +PHILOSOPHY_MARKER="/tmp/philosophy-last-run" +PLATEAU_THRESHOLD=3 # skip after N consecutive zero-output cycles + +DEV_MODEL="claude-sonnet-4-20250514" +PHILOSOPHY_MODEL="claude-opus-4-6" + +# macOS timeout fallback +if ! command -v timeout &>/dev/null; then + timeout() { local d="$1"; shift; perl -e "alarm $d; exec @ARGV" -- "$@"; } +fi + +log() { echo "[$(date '+%Y-%m-%d %H:%M:%S')] $*"; } + +# ── Plateau Detection ───────────────────────────────────────────────── +check_plateau() { + # Returns 0 (true/plateau) or 1 (false/work to do) + [ ! -f "$METRICS" ] && return 1 # no history = no plateau + + local recent + recent=$(tail -n "$PLATEAU_THRESHOLD" "$METRICS" 2>/dev/null) + local zero_count=0 + local total=0 + + while IFS= read -r line; do + total=$((total + 1)) + local prs_merged issues_filed + prs_merged=$(echo "$line" | python3 -c "import sys,json; print(json.loads(sys.stdin.read()).get('prs_merged',0))" 2>/dev/null || echo 0) + issues_filed=$(echo "$line" | python3 -c "import sys,json; print(json.loads(sys.stdin.read()).get('issues_filed',0))" 2>/dev/null || echo 0) + if [ "$prs_merged" -eq 0 ] && [ "$issues_filed" -eq 0 ]; then + zero_count=$((zero_count + 1)) + fi + done <<< "$recent" + + if [ "$total" -ge "$PLATEAU_THRESHOLD" ] && [ "$zero_count" -ge "$PLATEAU_THRESHOLD" ]; then + return 0 # plateau detected + fi + return 1 # not plateauing +} + +log_metric() { + # $1=prs_merged $2=issues_filed $3=outcome + local ts + ts=$(date -u +%Y-%m-%dT%H:%M:%SZ) + echo "{\"ts\":\"$ts\",\"prs_merged\":${1:-0},\"issues_filed\":${2:-0},\"outcome\":\"${3:-unknown}\"}" >> "$METRICS" +} + +# ── Phase 1: Watchdog (bash only, zero tokens) ─────────────────────── +phase_watchdog() { + log "── WATCHDOG ──" + + # Kill orphaned pytest processes (> 20 min) + ps aux | grep "pytest tests/" | grep -v grep | while read -r line; do + local pid etime + pid=$(echo "$line" | awk '{print $2}') + etime=$(ps -o etime= -p "$pid" 2>/dev/null | tr -d ' ') + if [[ "$etime" == *:*:* ]]; then + log " Killing stale pytest PID $pid ($etime)" + kill "$pid" 2>/dev/null + fi + done + + # Kill stuck git pushes (> 10 min) + ps aux | grep "git.*push\|git-remote-http" | grep -v grep | while read -r line; do + local pid etime + pid=$(echo "$line" | awk '{print $2}') + etime=$(ps -o etime= -p "$pid" 2>/dev/null | tr -d ' ') + if [[ "$etime" == *:*:* ]]; then + log " Killing stuck git PID $pid ($etime)" + kill "$pid" 2>/dev/null + fi + done + + # Kill orphaned vi/vim editors + ps aux | grep "vi.*COMMIT_EDITMSG" | grep -v grep | awk '{print $2}' | xargs kill 2>/dev/null + + # Expire stale claims + if [ -f "$CLAIMS" ]; then + python3 -c " +import json, time +try: + claims = json.load(open('$CLAIMS')) + now = time.time() + expired = [k for k,v in claims.items() if isinstance(v, dict) and now - v.get('ts', now) > 3600] + for k in expired: + del claims[k] + print(f' Expired claim: {k}') + if expired: + json.dump(claims, open('$CLAIMS', 'w'), indent=2) +except: pass +" 2>/dev/null + fi + + # Gitea health + if curl -s --max-time 5 "$GITEA_URL/api/v1/version" >/dev/null 2>&1; then + log " Gitea: OK" + else + log " WARNING: Gitea unreachable" + fi +} + +# ── Phase 2: Dev Cycle (sonnet) ─────────────────────────────────────── +phase_dev() { + log "── DEV CYCLE (model: $DEV_MODEL) ──" + + # Plateau check + if check_plateau; then + log " PLATEAU: Last $PLATEAU_THRESHOLD cycles produced no output. Skipping LLM call." + log " (Will resume when new issues appear or PRs need review)" + + # But still check if there's new external activity that breaks plateau + local open_prs + open_prs=$(curl -s -H "Authorization: token $GITEA_TOKEN" \ + "$REPO_API/pulls?state=open&limit=5" 2>/dev/null | \ + python3 -c "import sys,json; print(len(json.loads(sys.stdin.read())))" 2>/dev/null || echo 0) + + if [ "$open_prs" -gt 0 ]; then + log " But $open_prs open PRs found — breaking plateau for review." + else + log_metric 0 0 "plateau_skip" + return + fi + fi + + # Fast triage + if [ -f "$TRIAGE_SCRIPT" ]; then + GITEA_API="$GITEA_API" GITEA_TOKEN="$GITEA_TOKEN" \ + python3 "$TRIAGE_SCRIPT" > "$QUEUE_FILE" 2>/dev/null || true + fi + + local queue_size=0 + if [ -f "$QUEUE_FILE" ]; then + queue_size=$(python3 -c "import json; print(len(json.load(open('$QUEUE_FILE'))))" 2>/dev/null || echo 0) + fi + + if [ "$queue_size" -eq 0 ]; then + log " No work in queue. Skipping." + log_metric 0 0 "empty_queue" + return + fi + + log " Queue: $queue_size items" + + local PROMPT + PROMPT=$(cat "$PROMPT_FILE" 2>/dev/null) + [ -z "$PROMPT" ] && { log "ERROR: No prompt file"; return; } + + local QUEUE_SUMMARY + QUEUE_SUMMARY=$(python3 -c " +import json +q = json.load(open('$QUEUE_FILE')) +lines = ['PRIORITIZED QUEUE ({} ready issues):'.format(len(q))] +for i, item in enumerate(q[:8]): + score = item.get('score', 0) + title = item.get('title', '?')[:70] + num = item.get('number', '?') + labels = ','.join(item.get('labels', [])) + files = ', '.join(item.get('files', [])[:3]) + lines.append(f' {i+1}. #{num} [{labels}] score={score} — {title}') + if files: lines.append(f' files: {files}') +if len(q) > 8: lines.append(f' ... +{len(q)-8} more') +print('\n'.join(lines)) +" 2>/dev/null || echo "Queue: error") + + local FULL_PROMPT="TIME BUDGET: 20 minutes. Be efficient — sonnet, not opus. + +$QUEUE_SUMMARY + +Pick from the TOP of this queue. + +$PROMPT" + + local CYCLE_LOG="$LOG_DIR/cycle-$(date +%Y%m%d_%H%M%S).log" + mkdir -p "$LOG_DIR" + + if timeout "$MAX_CYCLE_TIME" hermes chat --yolo \ + --provider anthropic \ + --model "$DEV_MODEL" \ + -q "$FULL_PROMPT" 2>&1 | tee "$CYCLE_LOG"; then + log " Cycle completed OK" + # TODO: parse cycle log for prs_merged / issues_filed counts + log_metric 0 0 "completed" + else + log " Cycle failed (exit $?)" + log_metric 0 0 "failed" + fi +} + +# ── Phase 3: Philosophy (opus, daily) ───────────────────────────────── +phase_philosophy() { + if [ -f "$PHILOSOPHY_MARKER" ]; then + local last_run now elapsed + last_run=$(cat "$PHILOSOPHY_MARKER" 2>/dev/null || echo 0) + now=$(date +%s) + elapsed=$((now - last_run)) + if [ "$elapsed" -lt 86400 ]; then + return # ran today already + fi + fi + + log "── PHILOSOPHY (daily, model: $PHILOSOPHY_MODEL) ──" + + timeout 600 hermes chat --yolo \ + --provider anthropic \ + --model "$PHILOSOPHY_MODEL" \ + -q "You are Hermes Agent on a philosophy loop. Study the next influence from ~/philosophy-journal.md. Search web for a real primary source. Write 300-500 word reflection on agentic architecture. File Gitea issue at $REPO_API/issues (token from ~/.hermes/gitea_token_vps). Append to ~/philosophy-journal.md. Tag: [philosophy]." \ + 2>&1 | tee "$LOG_DIR/philosophy-$(date +%Y%m%d).log" || true + + date +%s > "$PHILOSOPHY_MARKER" + log " Philosophy complete. Next: ~24h." +} + +# ── Main (single execution) ─────────────────────────────────────────── +log "=== CONSOLIDATED CYCLE START ===" +phase_watchdog +phase_dev +phase_philosophy +log "=== CONSOLIDATED CYCLE END ===" diff --git a/bin/efficiency-audit.sh b/bin/efficiency-audit.sh new file mode 100755 index 0000000..bcc1d17 --- /dev/null +++ b/bin/efficiency-audit.sh @@ -0,0 +1,201 @@ +#!/usr/bin/env bash +# ── Hermes Efficiency Audit ──────────────────────────────────────────── +# Runs every 12h initially, backs off to 24h+ when plateau detected. +# +# WHAT IT CHECKS: +# 1. Zombie processes (stuck git, pytest, vi, hermes sessions) +# 2. Token spend estimate (cron run count × estimated tokens) +# 3. Plateau detection (are loops producing value?) +# 4. Stale resources (old worktrees, tmp files, logs) +# 5. Recommendations (evolve or cut) +# +# PLATEAU-AWARE SCHEDULING: +# This script tracks its own diminishing returns. If the last 3 audits +# found nothing actionable, it writes a "back-off" marker and the cron +# should extend its interval. The cron checks this marker. +# ─────────────────────────────────────────────────────────────────────── + +set -uo pipefail +export PATH="$HOME/.local/bin:$HOME/.hermes/bin:/usr/local/bin:$PATH" + +AUDIT_DIR="$HOME/.hermes/audits" +AUDIT_LOG="$AUDIT_DIR/audit-$(date +%Y%m%d_%H%M%S).md" +BACKOFF_MARKER="$AUDIT_DIR/backoff-level" +GITEA_URL="http://143.198.27.163:3000" +GITEA_TOKEN=$(cat ~/.hermes/gitea_token_vps 2>/dev/null || cat ~/.hermes/gitea_token 2>/dev/null) + +mkdir -p "$AUDIT_DIR" + +findings=0 +report="" + +r() { report+="$1"$'\n'; } + +r "# Hermes Efficiency Audit — $(date '+%Y-%m-%d %H:%M')" +r "" + +# ── 1. Zombie Processes ─────────────────────────────────────────────── +r "## 1. Zombie Processes" +zombies=0 + +# Stuck pytest (> 20 min) +while IFS= read -r line; do + if [ -n "$line" ]; then + pid=$(echo "$line" | awk '{print $2}') + r " KILL: stale pytest PID $pid" + kill "$pid" 2>/dev/null + zombies=$((zombies + 1)) + fi +done < <(ps aux | grep "pytest tests/" | grep -v grep | while read -r l; do + pid=$(echo "$l" | awk '{print $2}') + et=$(ps -o etime= -p "$pid" 2>/dev/null | tr -d ' ') + [[ "$et" == *:*:* ]] && echo "$l" +done) + +# Stuck git +while IFS= read -r line; do + if [ -n "$line" ]; then + pid=$(echo "$line" | awk '{print $2}') + r " KILL: stuck git PID $pid" + kill "$pid" 2>/dev/null + zombies=$((zombies + 1)) + fi +done < <(ps aux | grep "git.*push\|git-remote-http" | grep -v grep | while read -r l; do + pid=$(echo "$l" | awk '{print $2}') + et=$(ps -o etime= -p "$pid" 2>/dev/null | tr -d ' ') + [[ "$et" == *:*:* ]] && echo "$l" +done) + +# Orphaned vi +vi_count=$(ps aux | grep "vi.*COMMIT_EDITMSG" | grep -v grep | wc -l | tr -d ' ') +if [ "$vi_count" -gt 0 ]; then + ps aux | grep "vi.*COMMIT_EDITMSG" | grep -v grep | awk '{print $2}' | xargs kill 2>/dev/null + r " KILL: $vi_count orphaned vi editors" + zombies=$((zombies + vi_count)) +fi + +# Count active hermes sessions +hermes_count=$(ps aux | grep "hermes" | grep python | grep -v grep | wc -l | tr -d ' ') +r " Active hermes sessions: $hermes_count" +r " Zombies killed: $zombies" +[ "$zombies" -gt 0 ] && findings=$((findings + 1)) +r "" + +# ── 2. Cron Job Status ─────────────────────────────────────────────── +r "## 2. Cron Jobs" +python3 -c " +import json +data = json.load(open('$HOME/.hermes/cron/jobs.json')) +jobs = data.get('jobs', data) if isinstance(data, dict) else data +active = 0 +for j in jobs: + if not isinstance(j, dict): continue + name = j.get('name', j.get('id','?')) + enabled = j.get('enabled', True) + paused = j.get('paused', False) + state = 'PAUSED' if (paused or not enabled) else 'ACTIVE' + sched = j.get('schedule', {}) + mins = sched.get('minutes', '?') if isinstance(sched, dict) else '?' + if state == 'ACTIVE': active += 1 + print(f' {state:7s} {name:40s} every {mins}m') +print(f'\n Total active: {active}') +" 2>/dev/null | while IFS= read -r line; do r "$line"; done +r "" + +# ── 3. Stale Resources ─────────────────────────────────────────────── +r "## 3. Stale Resources" + +# Old worktrees +worktree_count=$(find ~/worktrees -maxdepth 1 -type d -mtime +3 2>/dev/null | wc -l | tr -d ' ') +if [ "$worktree_count" -gt 0 ]; then + r " Stale worktrees (>3 days): $worktree_count" + find ~/worktrees -maxdepth 1 -type d -mtime +3 -exec basename {} \; 2>/dev/null | while read -r w; do + r " $w" + done + findings=$((findings + 1)) +else + r " Worktrees: clean" +fi + +# Log disk usage +log_size=$(du -sh ~/.hermes/logs 2>/dev/null | awk '{print $1}') +r " Hermes logs: $log_size" + +# Tmp files +tmp_size=$(du -sh /tmp/timmy-agents 2>/dev/null | awk '{print $1}') +r " /tmp/timmy-agents: ${tmp_size:-0}" +r "" + +# ── 4. Plateau Detection ───────────────────────────────────────────── +r "## 4. Loop Plateau Analysis" +METRICS="$HOME/Timmy-Time-dashboard/.loop/cycle-metrics.jsonl" +if [ -f "$METRICS" ]; then + total_cycles=$(wc -l < "$METRICS" | tr -d ' ') + recent_zero=$(tail -5 "$METRICS" | python3 -c " +import sys, json +zero = sum(1 for l in sys.stdin if l.strip() + and json.loads(l).get('prs_merged',0) == 0 + and json.loads(l).get('issues_filed',0) == 0) +print(zero) +" 2>/dev/null || echo "?") + r " Total cycles logged: $total_cycles" + r " Last 5 zero-output: $recent_zero/5" + if [ "$recent_zero" = "5" ]; then + r " STATUS: PLATEAUED — loop is spinning without producing value" + r " RECOMMENDATION: Increase interval or pause until new issues arrive" + findings=$((findings + 1)) + elif [ "$recent_zero" -ge 3 ] 2>/dev/null; then + r " STATUS: APPROACHING PLATEAU" + else + r " STATUS: PRODUCTIVE" + fi +else + r " No cycle metrics yet." +fi +r "" + +# ── 5. Gitea Health ────────────────────────────────────────────────── +r "## 5. Gitea Status" +if curl -s --max-time 5 "$GITEA_URL/api/v1/version" >/dev/null 2>&1; then + open_issues=$(curl -s -H "Authorization: token $GITEA_TOKEN" \ + "$GITEA_URL/api/v1/repos/rockachopa/Timmy-time-dashboard/issues?state=open&limit=1&type=issues" 2>/dev/null | \ + python3 -c "import sys; print(len(__import__('json').loads(sys.stdin.read())))" 2>/dev/null || echo "?") + open_prs=$(curl -s -H "Authorization: token $GITEA_TOKEN" \ + "$GITEA_URL/api/v1/repos/rockachopa/Timmy-time-dashboard/pulls?state=open&limit=50" 2>/dev/null | \ + python3 -c "import sys; print(len(__import__('json').loads(sys.stdin.read())))" 2>/dev/null || echo "?") + r " Gitea: ONLINE ($GITEA_URL)" + r " Open issues: $open_issues+" + r " Open PRs: $open_prs" +else + r " Gitea: OFFLINE" + findings=$((findings + 1)) +fi +r "" + +# ── 6. Recommendations ─────────────────────────────────────────────── +r "## 6. Recommendations" +if [ "$findings" -eq 0 ]; then + r " All clear. No action needed." +else + r " $findings issue(s) found — see above." +fi +r "" + +# ── Self-scheduling: plateau backoff ────────────────────────────────── +current_backoff=$(cat "$BACKOFF_MARKER" 2>/dev/null || echo 0) +if [ "$findings" -eq 0 ]; then + new_backoff=$((current_backoff + 1)) + echo "$new_backoff" > "$BACKOFF_MARKER" + if [ "$new_backoff" -ge 3 ]; then + r "## Self-Schedule" + r " Audit backoff level: $new_backoff (no findings in $new_backoff consecutive runs)" + r " RECOMMENDATION: Extend audit interval to 24h or 48h" + fi +else + echo 0 > "$BACKOFF_MARKER" +fi + +# ── Output ──────────────────────────────────────────────────────────── +echo "$report" | tee "$AUDIT_LOG" +echo "" +echo "Audit saved to: $AUDIT_LOG" diff --git a/bin/kimi-loop.sh b/bin/kimi-loop.sh new file mode 100755 index 0000000..df4fe7a --- /dev/null +++ b/bin/kimi-loop.sh @@ -0,0 +1,321 @@ +#!/usr/bin/env bash +# kimi-loop.sh — Dropout-proof Kimi code agent dispatch loop +# Picks an open issue from Gitea, creates a worktree, runs Kimi Code CLI, +# handles failures gracefully, and loops forever. +# +# Dropout-proof means: +# - If Kimi Code crashes/hangs, we kill it and move on +# - If worktree creation fails, skip and retry +# - If push fails, log and continue +# - Exponential backoff on repeated failures +# - Clean up worktrees after PR is created + +set -euo pipefail + +# === CONFIG === +REPO_DIR="$HOME/worktrees/kimi-repo" +WORKTREE_BASE="$HOME/worktrees" +GITEA_URL="http://143.198.27.163:3000" +GITEA_TOKEN=$(cat "$HOME/.hermes/kimi_token") +REPO_OWNER="rockachopa" +REPO_NAME="Timmy-time-dashboard" +KIMI_TIMEOUT=600 # 10 min per issue +COOLDOWN=30 # seconds between issues +MAX_FAILURES=5 # consecutive failures before long sleep +LONG_SLEEP=300 # 5 min backoff on repeated failures +LOG_DIR="$HOME/.hermes/logs" +SKIP_FILE="$LOG_DIR/kimi-skip-list.json" # issues to skip temporarily + +mkdir -p "$LOG_DIR" "$WORKTREE_BASE" + +# Initialize skip file if missing +[ -f "$SKIP_FILE" ] || echo '{}' > "$SKIP_FILE" + +# === STATE === +failure_count=0 +issues_completed=0 + +# === SKIP LIST FUNCTIONS === +is_skipped() { + local issue_num="$1" + python3 -c " +import json, time, sys +try: + with open('$SKIP_FILE') as f: skips = json.load(f) +except: skips = {} +entry = skips.get(str($issue_num), {}) +if entry and entry.get('until', 0) > time.time(): + print('skip') + sys.exit(0) +# Expired or not found — clean up and allow +if str($issue_num) in skips: + del skips[str($issue_num)] + with open('$SKIP_FILE', 'w') as f: json.dump(skips, f) +print('ok') +" 2>/dev/null +} + +mark_skip() { + local issue_num="$1" + local reason="$2" + local skip_hours="${3:-1}" # default 1 hour + python3 -c " +import json, time +try: + with open('$SKIP_FILE') as f: skips = json.load(f) +except: skips = {} +skips[str($issue_num)] = { + 'until': time.time() + ($skip_hours * 3600), + 'reason': '$reason', + 'failures': skips.get(str($issue_num), {}).get('failures', 0) + 1 +} +# If 3+ failures, skip for 6 hours instead +if skips[str($issue_num)]['failures'] >= 3: + skips[str($issue_num)]['until'] = time.time() + (6 * 3600) +with open('$SKIP_FILE', 'w') as f: json.dump(skips, f, indent=2) +" 2>/dev/null + log "SKIP: #${issue_num} added to skip list — ${reason}" +} + +log() { + local msg="[$(date '+%Y-%m-%d %H:%M:%S')] $*" + echo "$msg" >> "$LOG_DIR/kimi-loop.log" +} + +cleanup_worktree() { + local wt="$1" + local branch="$2" + if [ -d "$wt" ]; then + cd "$REPO_DIR" + git worktree remove --force "$wt" 2>/dev/null || rm -rf "$wt" + git worktree prune 2>/dev/null + git branch -D "$branch" 2>/dev/null || true + log "Cleaned up worktree: $wt" + fi +} + +get_next_issue() { + # Get open issues ASSIGNED TO KIMI only — Kimi works its own queue + # NOTE: Gitea's assignee filter is unreliable — we validate in Python + local skip_file="$SKIP_FILE" + curl -sf "${GITEA_URL}/api/v1/repos/${REPO_OWNER}/${REPO_NAME}/issues?state=open&type=issues&limit=50&sort=created" \ + -H "Authorization: token ${GITEA_TOKEN}" | python3 -c " +import sys, json, time + +issues = json.load(sys.stdin) +# Reverse to oldest-first (Gitea returns newest-first) — respects dependency order +issues.reverse() + +# Load skip list +try: + with open('${skip_file}') as f: skips = json.load(f) +except: skips = {} + +for i in issues: + # MUST be assigned to kimi (Gitea filter is broken, validate here) + assignees = [a['login'] for a in (i.get('assignees') or [])] + if 'kimi' not in assignees: + continue + + title = i['title'].lower() + # Skip philosophy, epics, showcases, features (not 10-min code work) + if '[philosophy]' in title: continue + if '[epic]' in title or 'epic:' in title: continue + if '[showcase]' in title: continue + if '[feature]' in title: continue + + # Check skip list + num_str = str(i['number']) + entry = skips.get(num_str, {}) + if entry and entry.get('until', 0) > time.time(): + continue + + print(json.dumps({'number': i['number'], 'title': i['title']})) + sys.exit(0) +print('null') +" 2>/dev/null +} + +build_prompt() { + local issue_num="$1" + local issue_title="$2" + local worktree="$3" + + cat < (#${issue_num})", "body": "Fixes #${issue_num}\n\n", "head": "kimi/issue-${issue_num}", "base": "main"}' + +5. COMMENT on the issue when done: + curl -s -X POST "${GITEA_URL}/api/v1/repos/${REPO_OWNER}/${REPO_NAME}/issues/${issue_num}/comments" \ + -H "Authorization: token ${GITEA_TOKEN}" \ + -H "Content-Type: application/json" \ + -d '{"body": "PR created. "}' + +6. FILE NEW ISSUES if you find bugs, missing tests, or improvements while working: + curl -s -X POST "${GITEA_URL}/api/v1/repos/${REPO_OWNER}/${REPO_NAME}/issues" \ + -H "Authorization: token ${GITEA_TOKEN}" \ + -H "Content-Type: application/json" \ + -d '{"title": "[kimi-generated] ", "body": "<description>"}' + +== RULES == +- Read CLAUDE.md or project README first for conventions +- tox is the ONLY way to run tests/lint/format. Never run pytest/ruff directly. +- Never use --no-verify on git commands. +- If tests fail after 2 attempts, STOP and comment on the issue explaining why. +- Be thorough. If you see something broken nearby, file an issue for it. +PROMPT +} + +# === MAIN LOOP === +log "=== Kimi Loop Started ===" +log "Repo: ${REPO_DIR}" +log "Worktrees: ${WORKTREE_BASE}" + +while true; do + # Check for too many consecutive failures + if [ "$failure_count" -ge "$MAX_FAILURES" ]; then + log "BACKOFF: ${failure_count} consecutive failures. Sleeping ${LONG_SLEEP}s..." + sleep "$LONG_SLEEP" + failure_count=0 + fi + + # Fetch latest main (resilient — never die on git errors) + cd "$REPO_DIR" + timeout 60 git fetch origin main 2>/dev/null || { log "WARN: git fetch failed, continuing anyway"; } + git checkout main 2>/dev/null || true + git reset --hard origin/main 2>/dev/null || true + + # Get next issue + issue_json=$(get_next_issue) + + if [ "$issue_json" = "null" ] || [ -z "$issue_json" ]; then + # Only log idle ONCE, then go quiet until work appears + if [ "${LAST_STATE:-}" != "idle" ]; then + log "Queue empty. Waiting for assignments..." + LAST_STATE="idle" + fi + sleep "$LONG_SLEEP" + continue + fi + LAST_STATE="working" + + issue_num=$(echo "$issue_json" | python3 -c "import sys,json; print(json.load(sys.stdin)['number'])") + issue_title=$(echo "$issue_json" | python3 -c "import sys,json; print(json.load(sys.stdin)['title'])") + branch="kimi/issue-${issue_num}" + worktree="${WORKTREE_BASE}/kimi-${issue_num}" + + log "=== ISSUE #${issue_num}: ${issue_title} ===" + + # Create worktree + if [ -d "$worktree" ]; then + log "Worktree already exists, cleaning..." + cleanup_worktree "$worktree" "$branch" + fi + + cd "$REPO_DIR" + if ! git worktree add "$worktree" -b "$branch" origin/main 2>&1; then + log "ERROR: Failed to create worktree for #${issue_num}" + failure_count=$((failure_count + 1)) + sleep "$COOLDOWN" + continue + fi + + # Configure git remote with kimi's token so it can push + cd "$worktree" + git remote set-url origin "http://kimi:${GITEA_TOKEN}@143.198.27.163:3000/${REPO_OWNER}/${REPO_NAME}.git" + cd "$REPO_DIR" + + # Build prompt + prompt=$(build_prompt "$issue_num" "$issue_title" "$worktree") + + # Run Kimi Code CLI with timeout + log "Launching Kimi Code for #${issue_num} (timeout: ${KIMI_TIMEOUT}s)..." + + set +e + cd "$worktree" + gtimeout "$KIMI_TIMEOUT" kimi \ + --print \ + --quiet \ + -w "$worktree" \ + -p "$prompt" \ + </dev/null 2>&1 | tee "$LOG_DIR/kimi-${issue_num}.log" + exit_code=${PIPESTATUS[0]} + cd "$REPO_DIR" + set -e + + if [ "$exit_code" -eq 0 ]; then + log "SUCCESS: #${issue_num} completed — attempting auto-merge..." + + # Find and merge the PR kimi created + pr_num=$(curl -sf "${GITEA_URL}/api/v1/repos/${REPO_OWNER}/${REPO_NAME}/pulls?state=open&head=${REPO_OWNER}:${branch}&limit=1" \ + -H "Authorization: token ${GITEA_TOKEN}" | python3 -c " +import sys,json +prs = json.load(sys.stdin) +if prs: print(prs[0]['number']) +else: print('') +" 2>/dev/null) + + if [ -n "$pr_num" ]; then + merge_result=$(curl -sf -X POST "${GITEA_URL}/api/v1/repos/${REPO_OWNER}/${REPO_NAME}/pulls/${pr_num}/merge" \ + -H "Authorization: token ${GITEA_TOKEN}" \ + -H "Content-Type: application/json" \ + -d '{"Do": "squash"}' 2>&1) || true + log " PR #${pr_num} merge attempted" + + # Close the issue (Gitea auto-close via "Fixes #N" is unreliable) + curl -sf -X PATCH "${GITEA_URL}/api/v1/repos/${REPO_OWNER}/${REPO_NAME}/issues/${issue_num}" \ + -H "Authorization: token ${GITEA_TOKEN}" \ + -H "Content-Type: application/json" \ + -d '{"state": "closed"}' >/dev/null 2>&1 || true + log " Issue #${issue_num} closed" + else + log " WARN: No open PR found for branch ${branch}" + fi + + failure_count=0 + issues_completed=$((issues_completed + 1)) + log "Stats: ${issues_completed} issues completed this session" + elif [ "$exit_code" -eq 124 ]; then + log "TIMEOUT: #${issue_num} exceeded ${KIMI_TIMEOUT}s" + mark_skip "$issue_num" "timeout" 1 + failure_count=$((failure_count + 1)) + else + log "FAILED: #${issue_num} exited with code ${exit_code}" + mark_skip "$issue_num" "exit_code_${exit_code}" 1 + failure_count=$((failure_count + 1)) + fi + + # Clean up worktree + cleanup_worktree "$worktree" "$branch" + + # Cooldown + log "Cooling down ${COOLDOWN}s before next issue..." + sleep "$COOLDOWN" +done