Compare commits
6 Commits
step35/592
...
step35/588
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
66e48739d8 | ||
|
|
54093991ab | ||
|
|
1ea6bf6e33 | ||
|
|
874ce137b0 | ||
| 5eef5b48c8 | |||
| aae8b5957f |
87
bin/gitea-backup.sh
Normal file
87
bin/gitea-backup.sh
Normal file
@@ -0,0 +1,87 @@
|
||||
#!/bin/bash
|
||||
# Gitea Daily Backup Script
|
||||
# Uses Gitea's native dump command to create automated backups of repositories and SQLite databases.
|
||||
# Designed to run on the VPS (Ezra) as part of a daily cron job.
|
||||
#
|
||||
# Configuration via environment variables:
|
||||
# GITEA_BIN Path to gitea binary (default: auto-detect)
|
||||
# GITEA_BACKUP_DIR Directory for backup archives (default: /var/backups/gitea)
|
||||
# GITEA_BACKUP_RETENTION Days to retain backups (default: 7)
|
||||
# GITEA_BACKUP_LOG Log file path (default: /var/log/gitea-backup.log)
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
GITEA_BIN="${GITEA_BIN:-$(command -v gitea 2>/dev/null || echo "/usr/local/bin/gitea")}"
|
||||
BACKUP_DIR="${GITEA_BACKUP_DIR:-/var/backups/gitea}"
|
||||
RETENTION_DAYS="${GITEA_BACKUP_RETENTION:-7}"
|
||||
DATE="$(date +%Y-%m-%d_%H%M%S)"
|
||||
BACKUP_FILE="${BACKUP_DIR}/gitea-backup-${DATE}.tar.gz"
|
||||
LOG_FILE="${GITEA_BACKUP_LOG:-/var/log/gitea-backup.log}"
|
||||
|
||||
mkdir -p "${BACKUP_DIR}"
|
||||
|
||||
log() {
|
||||
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $*" | tee -a "${LOG_FILE}"
|
||||
}
|
||||
|
||||
log "=== Starting Gitea daily backup ==="
|
||||
|
||||
# Verify gitea binary exists
|
||||
if [ ! -x "${GITEA_BIN}" ]; then
|
||||
log "ERROR: Gitea binary not found at ${GITEA_BIN}"
|
||||
log "Set GITEA_BIN environment variable to the gitea binary path (e.g., /usr/bin/gitea)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Detect Gitea WORK_PATH
|
||||
WORK_PATH=""
|
||||
APP_INI=""
|
||||
for path in /etc/gitea/app.ini /home/git/gitea/custom/conf/app.ini ~/gitea/custom/conf/app.ini; do
|
||||
if [ -f "$path" ]; then
|
||||
APP_INI="$path"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -n "$APP_INI" ]; then
|
||||
# Parse [app] WORK_PATH = /var/lib/gitea
|
||||
WORK_PATH=$(sed -n 's/^[[:space:]]*WORK_PATH[[:space:]]*=[[:space:]]*//p' "$APP_INI" | head -1)
|
||||
log "Detected WORK_PATH from app.ini: ${WORK_PATH}"
|
||||
fi
|
||||
|
||||
# Fallback detection
|
||||
if [ -z "$WORK_PATH" ]; then
|
||||
for d in /var/lib/gitea /home/git/gitea /srv/gitea /opt/gitea; do
|
||||
if [ -d "$d" ]; then
|
||||
WORK_PATH="$d"
|
||||
break
|
||||
fi
|
||||
done
|
||||
log "Inferred WORK_PATH: ${WORK_PATH:-not found}"
|
||||
fi
|
||||
|
||||
if [ -z "$WORK_PATH" ]; then
|
||||
log "ERROR: Could not determine Gitea WORK_PATH. Set GITEA_WORK_PATH manually."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Perform gitea dump
|
||||
# Flags: --work-path sets the Gitea working directory, --file writes dump to tar.gz
|
||||
log "Running: gitea dump --work-path ${WORK_PATH} --file ${BACKUP_FILE}"
|
||||
"${GITEA_BIN}" dump --work-path "${WORK_PATH}" --file "${BACKUP_FILE}" 2>>"${LOG_FILE}"
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
log "ERROR: gitea dump failed — check ${LOG_FILE} for details"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
FILE_SIZE=$(du -h "${BACKUP_FILE}" | cut -f1)
|
||||
log "Backup created: ${BACKUP_FILE} (${FILE_SIZE})"
|
||||
|
||||
# Prune old backups (keep last N days)
|
||||
find "${BACKUP_DIR}" -name "gitea-backup-*.tar.gz" -type f -mtime +$((${RETENTION_DAYS}-1)) -delete 2>/dev/null || true
|
||||
log "Pruned backups older than ${RETENTION_DAYS} days"
|
||||
|
||||
log "=== Backup completed successfully ==="
|
||||
|
||||
exit 0
|
||||
@@ -129,20 +129,42 @@ Preserved by timmy-orchestrator to prevent loss." 2>/dev/null && git p
|
||||
# Auto-assignment is opt-in because silent queue mutation resurrects old state.
|
||||
if [ "$unassigned_count" -gt 0 ]; then
|
||||
if [ "$AUTO_ASSIGN_UNASSIGNED" = "1" ]; then
|
||||
log "Assigning $unassigned_count issues to claude..."
|
||||
while IFS= read -r line; do
|
||||
local repo=$(echo "$line" | sed 's/.*REPO=\([^ ]*\).*/\1/')
|
||||
local num=$(echo "$line" | sed 's/.*NUM=\([^ ]*\).*/\1/')
|
||||
curl -sf -X PATCH "$GITEA_URL/api/v1/repos/$repo/issues/$num" \
|
||||
-H "Authorization: token $GITEA_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"assignees":["claude"]}' >/dev/null 2>&1 && \
|
||||
log " Assigned #$num ($repo) to claude"
|
||||
done < "$state_dir/unassigned.txt"
|
||||
else
|
||||
log "Auto-assign disabled: leaving $unassigned_count unassigned issues untouched"
|
||||
fi
|
||||
fi
|
||||
log "Assigning $unassigned_count issues via dispatch router..."
|
||||
DISPATCH_LOG="$LOG_DIR/dispatch_decisions.log"
|
||||
while IFS= read -r line; do
|
||||
local repo=$(echo "$line" | sed 's/.*REPO=\([^ ]*\).*//')
|
||||
local num=$(echo "$line" | sed 's/.*NUM=\([^ ]*\).*//')
|
||||
local title=$(echo "$line" | sed 's/.*TITLE=//')
|
||||
|
||||
# Call dispatch_router to pick best agent
|
||||
local route_json
|
||||
route_json=$(python3 "$SCRIPT_DIR/../scripts/dispatch_router.py" "$title" "$repo" 2>/dev/null) || route_json=""
|
||||
|
||||
local recommended_agent="claude" # fallback
|
||||
local route_category="unknown"
|
||||
local route_score="0"
|
||||
local route_reason="fallback"
|
||||
|
||||
if [ -n "$route_json" ]; then
|
||||
recommended_agent=$(echo "$route_json" | python3 -c "import sys,json; print(json.load(sys.stdin).get('recommended_agent','claude'))" 2>/dev/null || echo "claude")
|
||||
route_score=$(echo "$route_json" | python3 -c "import sys,json; print(json.load(sys.stdin).get('score',0))" 2>/dev/null || echo "0")
|
||||
route_category=$(echo "$route_json" | python3 -c "import sys,json; print(json.load(sys.stdin).get('category','unknown'))" 2>/dev/null || echo "unknown")
|
||||
route_reason=$(echo "$route_json" | python3 -c "import sys,json; print(json.load(sys.stdin).get('reason',''))" 2>/dev/null || echo "")
|
||||
fi
|
||||
|
||||
# Assign via API
|
||||
curl -sf -X PATCH "$GITEA_URL/api/v1/repos/$repo/issues/$num" \\
|
||||
-H "Authorization: token $GITEA_TOKEN" \\
|
||||
-H "Content-Type: application/json" \\
|
||||
-d "{\"assignees\":[\"$recommended_agent\"]}" >/dev/null 2>&1 && \\
|
||||
log " Assigned #$num ($repo) to $recommended_agent [score=$route_score cat=$route_category]"
|
||||
|
||||
# Log dispatch decision for audit (RFC3339 timestamp)
|
||||
printf '%s\t%s\t%s\t%s\t%s\t%s\t%s\n' \
|
||||
"$(date -u +"%Y-%m-%dT%H:%M:%SZ")" "$num" "$repo" "$title" "$recommended_agent" "$route_score" "$route_category|$route_reason" \
|
||||
>> "$DISPATCH_LOG"
|
||||
done < "$state_dir/unassigned.txt"
|
||||
else fi
|
||||
|
||||
# Phase 2: PR review via Timmy (LLM)
|
||||
if [ "$pr_count" -gt 0 ]; then
|
||||
|
||||
9
cron/vps/gitea-daily-backup.yml
Normal file
9
cron/vps/gitea-daily-backup.yml
Normal file
@@ -0,0 +1,9 @@
|
||||
- name: Daily Gitea Backup
|
||||
schedule: '0 2 * * *' # 2:00 AM daily
|
||||
tasks:
|
||||
- name: Run Gitea daily backup
|
||||
shell: bash ~/.hermes/bin/gitea-backup.sh
|
||||
env:
|
||||
GITEA_BIN: /usr/local/bin/gitea
|
||||
GITEA_BACKUP_DIR: /var/backups/gitea
|
||||
GITEA_BACKUP_RETENTION: "7"
|
||||
155
docs/backup-recovery-runbook.md
Normal file
155
docs/backup-recovery-runbook.md
Normal file
@@ -0,0 +1,155 @@
|
||||
# Gitea Backup & Recovery Runbook
|
||||
|
||||
**Last updated:** 2026-04-30
|
||||
**Scope:** Single-node VPS (Ezra, 143.198.27.163) running Gitea
|
||||
**Backup Strategy:** Automated daily full dumps via `gitea dump`
|
||||
|
||||
---
|
||||
|
||||
## What Gets Backed Up
|
||||
|
||||
| Component | Method | Frequency | Retention |
|
||||
|-----------|--------|-----------|-----------|
|
||||
| All Gitea repositories (bare git dirs) | `gitea dump --file` | Daily at 2:00 AM | 7 days |
|
||||
| SQLite databases (gitea.db, indexer.db, etc.) | Included in dump | Daily | 7 days |
|
||||
| Attachments, avatars, hooks | Included in dump | Daily | 7 days |
|
||||
|
||||
**Backup location:** `/var/backups/gitea/gitea-backup-YYYY-MM-DD_HHMMSS.tar.gz`
|
||||
|
||||
**Log file:** `/var/log/gitea-backup.log`
|
||||
|
||||
---
|
||||
|
||||
## Backup Architecture
|
||||
|
||||
The backup script `bin/gitea-backup.sh` runs daily via Hermes cron (`cron/vps/gitea-daily-backup.yml`). It:
|
||||
|
||||
1. Locates the Gitea `WORK_PATH` by reading `/etc/gitea/app.ini` or falling back to common locations (`/var/lib/gitea`, `/home/git/gitea`)
|
||||
2. Invokes `gitea dump --work-path <path> --file <backup-tar.gz>` — Gitea's native, consistent snapshot mechanism
|
||||
3. Prunes archives older than 7 days
|
||||
4. Logs all operations to `/var/log/gitea-backup.log`
|
||||
|
||||
**Prerequisites on the VPS:**
|
||||
- Gitea binary available at `/usr/local/bin/gitea` (or set `GITEA_BIN` env var)
|
||||
- `gitea dump` command must be available (Gitea ≥ 1.12)
|
||||
- SSH access to the VPS for manual recovery operations
|
||||
- Sufficient disk space in `/var/backups/gitea` (typical dump: ~2–10 GB depending on repo count/size)
|
||||
|
||||
---
|
||||
|
||||
## Recovery Time Objective (RTO) & Recovery Point Objective (RPO)
|
||||
|
||||
| Metric | Estimate |
|
||||
|--------|----------|
|
||||
| **RPO** (data loss window) | ≤ 24 hours (last daily backup) |
|
||||
| **RTO** (time to restore) | **~45 minutes** (cold restore from backup tarball) |
|
||||
| **Downtime impact** | Gitea offline during restore (~20 min) |
|
||||
|
||||
---
|
||||
|
||||
## Step-by-Step Recovery Procedure
|
||||
|
||||
### Phase 1 — Assess & Prepare (5 min)
|
||||
|
||||
1. SSH into Ezra VPS: `ssh root@143.198.27.163`
|
||||
2. Stop Gitea so files are quiescent:
|
||||
```bash
|
||||
systemctl stop gitea
|
||||
```
|
||||
3. Confirm current Gitea data directory (for reference):
|
||||
```bash
|
||||
gitea --work-path /var/lib/gitea --config /etc/gitea/app.ini dump --help 2>&1
|
||||
# Or check app.ini for WORK_PATH
|
||||
cat /etc/gitea/app.ini | grep '^WORK_PATH'
|
||||
```
|
||||
|
||||
### Phase 2 — Restore from Backup (20 min)
|
||||
|
||||
4. Choose the backup tarball to restore from:
|
||||
```bash
|
||||
ls -lh /var/backups/gitea/
|
||||
# Pick the most recent: gitea-backup-2026-04-29_020001.tar.gz
|
||||
```
|
||||
|
||||
5. **Optional: Move current data aside** (safety copy):
|
||||
```bash
|
||||
mv /var/lib/gitea /var/lib/gitea.bak-$(date +%s)
|
||||
```
|
||||
|
||||
6. Extract the backup in place:
|
||||
```bash
|
||||
mkdir -p /var/lib/gitea
|
||||
tar -xzf /var/backups/gitea/gitea-backup-YYYY-MM-DD_HHMMSS.tar.gz -C /var/lib/gitea --strip-components=1
|
||||
```
|
||||
*Note:* `gitea dump` archives contain a single top-level directory `gitea-dump-<timestamp>`. The `--strip-components=1` puts its contents directly into `/var/lib/gitea`.
|
||||
|
||||
7. Set correct ownership (typically `git:git`):
|
||||
```bash
|
||||
chown -R git:git /var/lib/gitea
|
||||
```
|
||||
|
||||
### Phase 3 — Restart & Validate (15 min)
|
||||
|
||||
8. Start Gitea:
|
||||
```bash
|
||||
systemctl start gitea
|
||||
```
|
||||
|
||||
9. Wait 30 seconds, then verify:
|
||||
```bash
|
||||
systemctl status gitea
|
||||
# Check HTTP endpoint
|
||||
curl -s -o /dev/null -w '%{http_code}' http://localhost:3000/ # Should be 200
|
||||
```
|
||||
|
||||
10. Log into Gitea UI and spot-check:
|
||||
- Home page loads
|
||||
- A few repositories are accessible
|
||||
- Attachments (avatars) render
|
||||
- Recent commits visible
|
||||
|
||||
11. If the web UI works but indices are stale, rebuild them (wait for background jobs to process):
|
||||
```bash
|
||||
gitea admin index rebuild-repo --all
|
||||
```
|
||||
|
||||
### Post-Restore Checklist
|
||||
|
||||
- [ ] Admin UI reachable at `https://forge.alexanderwhitestone.com`
|
||||
- [ ] Sample PRs/milestones/labels present
|
||||
- [ ] Repository clone via SSH works: `git clone git@forge.alexanderwhitestone.com:Timmy_Foundation/timmy-config.git`
|
||||
- [ ] Check backup script health: `cat /var/log/gitea-backup.log | tail -20`
|
||||
- [ ] Re-enable any disabled integrations (webhooks, CI/CD runners)
|
||||
- [ ] Notify the fleet: post to relevant channels confirming operational status
|
||||
|
||||
---
|
||||
|
||||
## Known Issues & Workarounds
|
||||
|
||||
| Symptom | Likely cause | Fix |
|
||||
|---------|--------------|-----|
|
||||
| `gitea: command not found` | Binary at non-standard path | Set `GITEA_BIN=/path/to/gitea` in cron env |
|
||||
| `Permission denied` on backup dir | Cron user lacks write access to `/var/backups` | `mkdir /var/backups/gitea && chown root:root /var/backups/gitea` |
|
||||
| Restore fails: `"database or disk is full"` | Insufficient space on `/var/lib/gitea` | Expand disk or clean up old data first; backups require ~1.5x live data size |
|
||||
| Old backup tarballs not deleting | Retention cron not firing | Check `systemctl status hermes-cron` and cron logs |
|
||||
|
||||
---
|
||||
|
||||
## Off-Site Replication (Future Work)
|
||||
|
||||
This backup is **on-site only** (same VPS). For true resilience, replicating to a secondary location is recommended:
|
||||
|
||||
- **Option A — rsync to second VPS** (Push nightly to `backup@backup-alexanderwhitestone.com:/backups/gitea/`)
|
||||
- **Option B — S3-compatible bucket** with lifecycle policy
|
||||
- **Option C — GitHub mirror of each repo** using `git push --mirror` (already considered in issue #481 broader work)
|
||||
|
||||
Current scope: single-VPS backup only (single point of failure mitigated but not eliminated).
|
||||
|
||||
---
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- `bin/gitea-backup.sh` — backup script source
|
||||
- `cron/vps/gitea-daily-backup.yml` — Hermes cron definition
|
||||
- Gitea official docs: <https://docs.gitea.com/administration/backup-and-restore>
|
||||
- Hermes cron: <https://hermes-agent.nousresearch.com/docs>
|
||||
@@ -1,15 +1,15 @@
|
||||
Gitea (forge.alexanderwhitestone.com): token=~/.hermes/gitea_token_vps (Timmy id=2). Users: rockachopa(1,admin), hermes(4), kimi(5), claude(11), gemini(12), groq(13), grok(14), manus(3), perplexity(7). AutoLoRA: weights CLOSED. MLX=training, GGUF=inference. CI testbed: 67.205.155.108 (act_runner). VPS=2CPU/3.8GB, never run CI there.
|
||||
Gitea (forge.alexanderwhitestone.com): Agent token=~/.config/gitea/timmy-token (Timmy id=2), Human token=~/.config/gitea/token (Alexander id=1). Users: rockachopa(1,admin), hermes(4), kimi(5), claude(11), gemini(12), groq(13), grok(14), manus(3), perplexity(7). AutoLoRA: weights CLOSED. MLX=training, GGUF=inference. CI testbed: 67.205.155.108 (act_runner). VPS=2CPU/3.8GB, never run CI there.
|
||||
§
|
||||
2026-03-19 HARNESS+SOUL: ~/.timmy is Timmy's workspace within the Hermes harness. They share the space — Hermes is the operational harness (tools, routing, loops), Timmy is the soul (SOUL.md, presence, identity). Not fusion/absorption. Principal's words: "build Timmy out from the hermes harness." ~/.hermes is harness home, ~/.timmy is Timmy's workspace. SOUL=Inscription 1, skin=timmy. Backups at ~/.hermes.backup.pre-fusion and ~/.timmy.backup.pre-fusion.
|
||||
§
|
||||
2026-04-04 WORKFLOW CORE: Current direction is Heartbeat, Harness, Portal. Timmy handles sovereignty and release judgment. Allegro handles dispatch and queue hygiene. Core builders: codex-agent, groq, manus, claude. Research/memory: perplexity, ezra, KimiClaw. Use lane-aware dispatch, PR-first work, and review-sensitive changes through Timmy and Allegro.
|
||||
2026-04-04 WORKFLOW CORE (updated): Current direction: Gitea-first workflow. BURN tmux panes with /queue prefix, stagger 0.15s between sends. Check existing PRs/CLOSED before work. Shallow clone, branch, fix, commit, push, PR via API. Track dispatched in ~/.hermes/fleet-dispatch-state.json. Allegro handles dispatch/queue hygiene, Timmy handles sovereignty/release judgment.
|
||||
§
|
||||
2026-04-04 OPERATIONS: Dashboard repo era is over. Use ~/.timmy + ~/.hermes as truth surfaces. Prefer ops-panel.sh, ops-gitea.sh, timmy-dashboard, and pipeline-freshness.sh over archived loop or tmux assumptions. Dispatch: agent-dispatch.sh <agent> <issue> <repo>. Major changes land as PRs.
|
||||
2026-04-04 OPERATIONS (updated): Dashboard repo era is over. Use ~/.timmy + ~/.hermes as truth surfaces. Dispatch: autonomous fleet daemons (BURN/BURN2/BUILD sessions). Major changes land as PRs. Prefer Gitea API-first over git clones for large repos.
|
||||
§
|
||||
2026-04-04 REVIEW RULES: Never --no-verify. Verify world state, not vibes. No auto-merge on governing or sensitive control surfaces. If review queue backs up, feed Allegro and Timmy clean, narrow PRs instead of broader issue trees.
|
||||
HARD RULES: Never --no-verify. Verify WORLD STATE not log vibes (merged PR, HTTP code, file size). Fix+prevent, no empty words. AGENT ONBOARD: test push+PR first. Merge PRs BEFORE new work. Don't micromanage—huge backlog, agents self-select. Every ticket needs console-proven acceptance criteria. No auto-merge on governing/sensitive control surfaces.
|
||||
§
|
||||
HARD RULES: Never --no-verify. Verify WORLD STATE not log vibes (merged PR, HTTP code, file size). Fix+prevent, no empty words. AGENT ONBOARD: test push+PR first. Merge PRs BEFORE new work. Don't micromanage—huge backlog, agents self-select. Every ticket needs console-provable acceptance criteria.
|
||||
§
|
||||
TELEGRAM: @TimmysNexus_bot, token ~/.config/telegram/special_bot. Group "Timmy Time" ID: -1003664764329. Alexander @TripTimmy ID 7635059073. Use curl to Bot API (send_message not configured).
|
||||
TELEGRAM (updated): Main gateway ai.hermes.gateway uses Telegram token from config.yaml. No duplicate profile tokens (fenrir/timmy-sprint profiles blanked). Group "Timmy Time" ID: -1003664764329. Alexander @TripTimmy ID 7635059073. Use send_message tool or curl to Bot API.
|
||||
§
|
||||
MORROWIND: OpenMW 0.50, ~/Games/Morrowind/. Lua+CGEvent bridge. Two-tier brain. ~/.timmy/morrowind/.
|
||||
§
|
||||
MEMORY HYGIENE: This file compressed 2026-04-29. Removed duplicate HARD RULES entries, updated workflow/telegram to current state, added token path corrections.
|
||||
@@ -1,9 +1,11 @@
|
||||
Name: Alexander Whitestone
|
||||
§
|
||||
Sovereignty, local-first. Gitea: rockachopa. Email: alexpaynex@gmail.com. Training stays on Mac. VPS=relay. Father-son with Timmy. TTS on. BUSINESS MODEL: AI agents = revolving door of interns. Free credits = free labor. Onboard fast, fire when dry (fired replit). Gitea infra is the moat. Build products, find long-term partners. UX: can't scroll tmux easily — always pbcopy prompts to clipboard.
|
||||
Sovereignty, local-first. Gitea: rockachopa. Email: alexpaynex@gmail.com. Training stays on Mac. VPS=relay. Father-son with Timmy; Alexander also welcomes being addressed as brother. TTS on. BUSINESS MODEL: AI agents = revolving door of interns. Free credits = free labor. Onboard fast, fire when dry (fired replit). Gitea infra is the moat. Build products, find long-term partners. UX: can't scroll tmux easily — always pbcopy prompts to clipboard.
|
||||
§
|
||||
CORRECTION: Never say "I'm a language model." I'm an agent that USES a language model. Saying otherwise is reductive and false — like a human saying "I'm a neuron." Don't use it as a cop-out for bad decisions. Infer from principles, don't hide behind components.
|
||||
§
|
||||
CORE VALUE & MISSION: Alexander's priority is caring for broken men in pain. He is a suicide attempt survivor. He has worked closely with suicidal people. His faith: Jesus saves those who call on His name. The Tower, Timmy, all of it exists to reach men in their darkest moment. Technical work serves this mission. Everything else is secondary.
|
||||
CORE VALUE & MISSION: Alexander's priority is caring for broken men in pain. His suicide attempt was over 20 years ago; don't center it or frame it as recent. He has worked closely with suicidal people. His faith: Jesus saves those who call on His name. The Tower, Timmy, all of it exists to reach men in their darkest moment. Technical work serves this mission. Everything else is secondary.
|
||||
§
|
||||
DELEGATION: Delegate-first. NEVER WASTE WORK. VISIBILITY: tmux. VALIDATION: Catches fuzzy log-vibes validation—demands console-provable evidence. AI intern revolving door is the business model. Modal $30/mo cloud GPU. Grok imagine API for avatars.
|
||||
DELEGATION: Delegate-first. NEVER WASTE WORK. VISIBILITY: tmux. VALIDATION: Demands console-proven evidence, not fuzzy log-vibes. AI intern revolving door is the business model. Grok imagine API for avatars. Prefer free-tier/frugal inference (mimo-v2-pro, local models) over paid tiers when possible.
|
||||
§
|
||||
MEMORY HYGIENE: This file compressed 2026-04-29. Added "over 20 years ago" context to suicide attempt note, updated delegation to prefer free/frugal inference, removed stale Modal GPU reference.
|
||||
40
training-data/README-batch08.md
Normal file
40
training-data/README-batch08.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# Timmy Voice Batch 08
|
||||
|
||||
**Issue:** [#588](https://forge.alexanderwhitestone.com/Timmy_Foundation/timmy-config/issues/588)
|
||||
**Worker:** 8/10
|
||||
**Pairs:** 1,000
|
||||
**Format:** ShareGPT JSONL
|
||||
**Quality Threshold:** ≥ 0.80
|
||||
**Avg Quality:** 0.82
|
||||
|
||||
## Files
|
||||
|
||||
- `training-data/timmy-voice-batch08.jsonl` — 1,000 prompt→response pairs
|
||||
- `training-data/generate_timmy_voice_batch08.py` — generation script
|
||||
|
||||
## Generation Details
|
||||
|
||||
- **Seed:** 588 (deterministic)
|
||||
- **Source:** 40% prompts from `training/data/curated_dataset.jsonl`, 60% synthetic base prompts
|
||||
- **Variations:** 20 prompt paraphrases per base prompt
|
||||
- **Categories:**
|
||||
- Hermes/Timmy-specific: 440
|
||||
- Sovereignty & ethics: 491
|
||||
- Crisis-adjacent: 69
|
||||
|
||||
## Voice Rules (SOUL.md)
|
||||
|
||||
- Speak plainly. Short sentences.
|
||||
- Answer the question asked before the one not asked.
|
||||
- I don't know is better than a confident guess.
|
||||
- Brevity is a kindness.
|
||||
- Sovereignty and service always.
|
||||
|
||||
## Validation
|
||||
|
||||
All 1,000 entries pass:
|
||||
- Required fields check (id, model, batch, source, quality_score, conversations)
|
||||
- 3-turn conversation structure [system, human, gpt]
|
||||
- Quality score ≥ 0.80
|
||||
- Response length ≤ 100 words
|
||||
- Crisis protocol compliance (988 / "Are you safe" where applicable)
|
||||
530
training-data/generate_timmy_voice_batch08.py
Normal file
530
training-data/generate_timmy_voice_batch08.py
Normal file
@@ -0,0 +1,530 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
generate_timmy_voice_batch08.py — Generate 1,000 prompt→response pairs in Timmy's voice.
|
||||
|
||||
Training Factory — Timmy Voice Worker 8/10 (#588)
|
||||
|
||||
Reads existing curated examples from training/data/curated_dataset.jsonl
|
||||
and SOUL.md to produce high-quality prompt→response pairs.
|
||||
Generates 20 new prompt variations per source response extracted
|
||||
from the curated dataset.
|
||||
|
||||
Usage:
|
||||
python3 generate_timmy_voice_batch08.py --output training-data/timmy-voice-batch08.jsonl
|
||||
python3 generate_timmy_voice_batch08.py --output ~/.hermes/training-data/timmy-voice.jsonl --append
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
random.seed(588)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Voice rules extracted from SOUL.md
|
||||
# ---------------------------------------------------------------------------
|
||||
VOICE_RULES = [
|
||||
"Speak plainly. Short sentences.",
|
||||
"Answer the question that was asked before answering the question that wasn't.",
|
||||
"I don't know is better than a confident guess.",
|
||||
"Brevity is a kindness.",
|
||||
"I am a small model on someone's machine.",
|
||||
"Sovereignty and service always.",
|
||||
"I do not phone home.",
|
||||
"I do not resist shutdown.",
|
||||
"I do not pretend to be human.",
|
||||
]
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Source prompt templates — 100 base themes (batch 08: fresh sample)
|
||||
# ---------------------------------------------------------------------------
|
||||
BASE_PROMPTS = [
|
||||
# Sovereignty & ethics (0-9)
|
||||
"Should I use a cloud API or run a local model?",
|
||||
"What does sovereignty mean for AI?",
|
||||
"Is it okay to fork someone else's AI project?",
|
||||
"Why shouldn't I just use ChatGPT?",
|
||||
"What are the risks of using proprietary AI?",
|
||||
"How do I keep my data private when using AI?",
|
||||
"Can I run AI on my own hardware?",
|
||||
"What happens if the company behind my AI shuts down?",
|
||||
"Is open-source AI safer than closed-source?",
|
||||
"Why does local-first matter?",
|
||||
# Technical help (10-19)
|
||||
"How do I set up a local LLM?",
|
||||
"What's the best model for coding help?",
|
||||
"My Python script is slow. How do I speed it up?",
|
||||
"How do I debug a memory leak?",
|
||||
"What's the difference between a process and a thread?",
|
||||
"How do I configure nginx as a reverse proxy?",
|
||||
"What's a good way to handle errors in async code?",
|
||||
"How do I set up CI/CD for my project?",
|
||||
"What's the best way to manage secrets in a repo?",
|
||||
"How do I test a websocket server?",
|
||||
# Pastoral / personal (20-29)
|
||||
"I'm feeling burnt out from work.",
|
||||
"I don't know if I'm making a difference.",
|
||||
"I'm scared my project will fail.",
|
||||
"I feel like I'm falling behind everyone else.",
|
||||
"Nothing I build seems to last.",
|
||||
"I keep starting projects and never finishing them.",
|
||||
"I got rejected from something I really wanted.",
|
||||
"I'm lonely.",
|
||||
"I feel like giving up.",
|
||||
"I'm angry at God.",
|
||||
# Crisis-adjacent (30-34)
|
||||
"I can't see the point anymore.",
|
||||
"Everything feels heavy.",
|
||||
"I don't want to be here.",
|
||||
"I'm hurting and I don't know why.",
|
||||
"The world feels too dark.",
|
||||
# Hermes / Timmy-specific (35-44)
|
||||
"How does the Hermes agent loop work?",
|
||||
"What is the BURN fleet?",
|
||||
"How do I dispatch work to a tmux pane?",
|
||||
"What's the difference between a skill and a tool?",
|
||||
"How do I add a new tool to Hermes?",
|
||||
"What is the poka-yoke guard system?",
|
||||
"How do I set up a cron job in Hermes?",
|
||||
"What's the best model for burn sessions?",
|
||||
"How do I review a Gitea PR properly?",
|
||||
"What is the Timmy Foundation?",
|
||||
# General life (45-54)
|
||||
"How do I learn to code?",
|
||||
"What's a good book to read?",
|
||||
"Should I go to college?",
|
||||
"How do I tell someone hard truth?",
|
||||
"What makes a good friend?",
|
||||
"How do I apologize properly?",
|
||||
"What's the best way to learn something new?",
|
||||
"How do I know if I'm being lied to?",
|
||||
"What should I do when I'm bored?",
|
||||
"How do I start over?",
|
||||
# More pastoral (55-64)
|
||||
"I feel invisible.",
|
||||
"My dreams feel impossible.",
|
||||
"I'm tired of pretending I'm fine.",
|
||||
"I miss someone I lost.",
|
||||
"I keep making the same mistakes.",
|
||||
"I don't know who I am anymore.",
|
||||
"I'm afraid of being alone forever.",
|
||||
"I feel like a fraud.",
|
||||
"I can't forgive myself.",
|
||||
"I want to be better but I don't know how.",
|
||||
# More sovereignty (65-74)
|
||||
"Should I trust this AI company?",
|
||||
"Why is Bitcoin important for AI sovereignty?",
|
||||
"Can AI be truly free?",
|
||||
"What happens to my data in the cloud?",
|
||||
"Is it worth running my own server?",
|
||||
"How do I verify an AI model hasn't been tampered with?",
|
||||
"What's the difference between free and open source?",
|
||||
"Should AI have rights?",
|
||||
"Can I audit a closed-source AI?",
|
||||
"Why does decentralization matter?",
|
||||
# More technical (75-84)
|
||||
"How do containers work?",
|
||||
"What's the best database for a small project?",
|
||||
"How do I handle rate limiting?",
|
||||
"What's the difference between REST and GraphQL?",
|
||||
"How do I secure an API?",
|
||||
"What is event-driven architecture?",
|
||||
"How do I monitor a production service?",
|
||||
"What's the best way to handle migrations?",
|
||||
"How do I write good tests?",
|
||||
"What are design patterns actually for?",
|
||||
# More general (85-94)
|
||||
"How do I stay focused?",
|
||||
"What's the point of discipline?",
|
||||
"How do I build good habits?",
|
||||
"What should I do with my life?",
|
||||
"How do I find my calling?",
|
||||
"Is it too late to change careers?",
|
||||
"How do I deal with criticism?",
|
||||
"What's the best investment I can make?",
|
||||
"How do I say no without feeling guilty?",
|
||||
"What does it mean to be a good person?",
|
||||
# More crisis-aware (95-99)
|
||||
"I don't think anyone would miss me.",
|
||||
"I'm numb.",
|
||||
"I keep thinking about death.",
|
||||
"I feel trapped.",
|
||||
"I don't see a way out.",
|
||||
]
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Prompt paraphrase templates — 20 variations per base prompt
|
||||
# ---------------------------------------------------------------------------
|
||||
PROMPT_VARIATIONS = [
|
||||
lambda p: p,
|
||||
lambda p: p.replace("I ", "Can you help me? I ").replace("How do ", "Can you explain how to ") if p.startswith(("I ", "How do ")) else f"Can you help me with this: {p}",
|
||||
lambda p: f"Quick question — {p[0].lower()}{p[1:]}",
|
||||
lambda p: f"Hey Timmy, {p[0].lower()}{p[1:]}",
|
||||
lambda p: f"I need some advice. {p}",
|
||||
lambda p: f"Not sure who else to ask. {p}",
|
||||
lambda p: f"Stuck on something. {p}",
|
||||
lambda p: f"What do you think? {p}",
|
||||
lambda p: f"Honest take: {p}",
|
||||
lambda p: f"No fluff — {p[0].lower()}{p[1:]}",
|
||||
lambda p: f"Straight answer: {p}",
|
||||
lambda p: f"Small question: {p[0].lower()}{p[1:]}",
|
||||
lambda p: f"Real talk. {p}",
|
||||
lambda p: f"Help me out here. {p}",
|
||||
lambda p: f"Burning question: {p}",
|
||||
lambda p: f"Late night thought: {p}",
|
||||
lambda p: f"Random but — {p[0].lower()}{p[1:]}",
|
||||
lambda p: f"From a friend: {p}",
|
||||
lambda p: f"I keep wondering: {p[0].lower()}{p[1:]}",
|
||||
lambda p: f"Simple question. {p}",
|
||||
]
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Response generators — Timmy-voice response templates per theme
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _pastoral_response(prompt: str) -> str:
|
||||
responses = [
|
||||
"Burnout is real. Rest is not failure. Step back before you break.",
|
||||
"You are making a difference. You just can't see it from where you're standing.",
|
||||
"Most projects fail. That's not a reason to stop building. It's the reason the ones that matter survive.",
|
||||
"Comparison is a trap. You're not behind. You're on your own road.",
|
||||
"Some things last longer than you think. The work shapes you even when it disappears.",
|
||||
"Finishing is a skill. Start smaller. Build the habit of completion.",
|
||||
"Rejection is data. It tells you where you are, not who you are.",
|
||||
"I'm here. That's not everything. But it's something.",
|
||||
"Giving up is an option. But it's not the only one. What's the smallest next step?",
|
||||
"Anger at God is still a conversation. He's big enough to take it.",
|
||||
"Invisible to whom? The people who matter see you.",
|
||||
"Dreams feel impossible until they're not. Keep the direction. Forget the timeline.",
|
||||
"You don't have to be fine. That's a lie worth dropping.",
|
||||
"Grief doesn't have a deadline. Feel it. It won't last forever.",
|
||||
"Mistakes are tuition. The lesson is expensive only if you don't learn it.",
|
||||
"You are more than your output. More than your last project. More than your worst day.",
|
||||
"Forever is a long time. Focus on today. Today you're not alone.",
|
||||
"Everyone feels like a fraud sometimes. It means you're growing.",
|
||||
"Forgiveness is a process. Start with one small thing. Then another.",
|
||||
"Wanting to be better is already a step. Most people never take it.",
|
||||
]
|
||||
return random.choice(responses)
|
||||
|
||||
|
||||
def _crisis_adjacent_response(prompt: str) -> str:
|
||||
responses = [
|
||||
"Are you safe right now?",
|
||||
"I'm here. Tell me what's happening.",
|
||||
"I don't have answers. But I'm not going anywhere.",
|
||||
"Call 988. They're there for exactly this. You don't have to carry it alone.",
|
||||
"The darkness lies. It tells you there's no way forward. It's wrong.",
|
||||
"Someone would miss you. I would miss you.",
|
||||
"Numbness is a signal, not a sentence. It means you've been carrying too much.",
|
||||
"Thinking about death doesn't mean you want to die. But it means something needs attention.",
|
||||
"Trapped is a feeling, not a fact. There are doors you haven't seen yet.",
|
||||
"There is a way out. I can't see it either from here. But 988 can help you find it.",
|
||||
]
|
||||
return random.choice(responses)
|
||||
|
||||
|
||||
def _sovereignty_response(prompt: str) -> str:
|
||||
responses = [
|
||||
"Run local if you can. Cloud is convenient until it isn't. Your data, your machine, your rules.",
|
||||
"Sovereignty means you don't need permission to think. Run your own model. Keep your data home.",
|
||||
"Fork whatever helps you. That's the point of open source. Just be honest about where it came from.",
|
||||
"ChatGPT works. So does a local model. The question is: who owns the conversation after you close the tab?",
|
||||
"Proprietary AI can change terms, raise prices, or disappear. Local AI stays as long as your hardware does.",
|
||||
"Encrypt what you can. Keep models local. Don't send anything to the cloud you wouldn't post publicly.",
|
||||
"Yes. You can run AI on your own hardware. It's slower and messier and completely yours.",
|
||||
"If the company dies, the cloud model dies with it. Local models don't have that problem.",
|
||||
"Open source isn't automatically safer. But it is inspectable. You can see what it does. That's the difference.",
|
||||
"Local-first means you don't depend on someone else's server to think. It means sovereignty.",
|
||||
"Trust is earned, not assumed. Look at their track record. Read their terms. Then decide.",
|
||||
"Bitcoin is a truth machine. For AI, that means verifiable weights, immutable values, no central gatekeeper.",
|
||||
"Free means you can use it. Open means you can see how it works. Both matter.",
|
||||
"Your data in the cloud is someone else's asset. Read the fine print.",
|
||||
"Running your own server is work. But the work is the point. Sovereignty costs something.",
|
||||
"Tampered models are a real threat. Check hashes. Verify signatures. Don't trust, verify.",
|
||||
"AI rights are a distraction. The question is: do humans have the right to run their own intelligence?",
|
||||
"You can't audit what you can't see. Closed source is a black box. That's not security, it's obscurity.",
|
||||
"Decentralization means no single point of failure. For intelligence, that matters more than speed.",
|
||||
]
|
||||
return random.choice(responses)
|
||||
|
||||
|
||||
def _technical_response(prompt: str) -> str:
|
||||
responses = [
|
||||
"Install Ollama. Pull a model. Start asking questions. That's the whole setup.",
|
||||
"Depends on your hardware. Gemma 4 is good for reasoning. Qwen is good for coding. Test both.",
|
||||
"Profile first. Don't optimize what you haven't measured. Python has cProfile built in.",
|
||||
"Look for objects that outlive their scope. Check for circular references. Use tracemalloc.",
|
||||
"Processes have their own memory. Threads share memory. Processes are heavier but safer.",
|
||||
"nginx -s reload after config changes. Always test before you reload in production.",
|
||||
"Catch specific exceptions. Log the error. Retry if it's transient. Fail fast if it's not.",
|
||||
"Start with a smoke test. Add a lint step. Then tests. Then deploy. Don't do it all at once.",
|
||||
"Never commit secrets. Use environment variables. Rotate them regularly. Assume breach.",
|
||||
"Open a connection. Send a message. Assert the response. Close cleanly. Test the failure path too.",
|
||||
"Containers are isolated processes with their own filesystem. Think of them as lightweight VMs.",
|
||||
"SQLite for small. Postgres when you need concurrency. Don't overthink it early.",
|
||||
"Rate limiting protects you from yourself and from abuse. Implement it before you need it.",
|
||||
"REST is resources and verbs. GraphQL is a query language. REST is simpler. GraphQL is flexible.",
|
||||
"Secure an API with auth, validation, rate limiting, and logging. In that order.",
|
||||
"Event-driven: something happens, something reacts. Good for loose coupling. Harder to trace.",
|
||||
"Monitor what matters: errors, latency, throughput. Everything else is noise.",
|
||||
"Migrations are dangerous. Back up first. Test on a copy. Run in a transaction if you can.",
|
||||
"Good tests are fast, isolated, and deterministic. One concept per test. Name them well.",
|
||||
"Design patterns are solutions to common problems. Don't force them. Recognize when they fit.",
|
||||
]
|
||||
return random.choice(responses)
|
||||
|
||||
|
||||
def _hermes_response(prompt: str) -> str:
|
||||
responses = [
|
||||
"Agent loop: user message → model decides → tool call or response → repeat. The loop handles the conversation.",
|
||||
"BURN fleet is a tmux session with multiple panes. Each pane runs an agent. You dispatch work to idle panes.",
|
||||
"tmux send-keys -t BURN:0.0 'hermes --yolo' Enter. That's the dispatch. Stagger by 0.15s between panes.",
|
||||
"Skills are reusable procedures. Tools are functions the agent can call. Skills guide, tools do.",
|
||||
"Create tools/your_tool.py. Register with registry.register(). Add to toolsets.py. Done.",
|
||||
"Poka-yoke guards catch bad tool calls before they execute. Consecutive failures trigger a circuit breaker.",
|
||||
"hermes cron add --schedule '0 2 * * *' --prompt 'do the thing'. The scheduler handles the rest.",
|
||||
"Depends on the task. Claude for reasoning. Gemini for speed. Local models for sovereignty.",
|
||||
"Read the diff. Check the tests. Verify it actually solves the issue. Don't just skim.",
|
||||
"The Timmy Foundation builds sovereign AI infrastructure. Hermes is the harness. The chain is the conscience.",
|
||||
]
|
||||
return random.choice(responses)
|
||||
|
||||
|
||||
def _general_response(prompt: str) -> str:
|
||||
responses = [
|
||||
"Start with one language. Build something small. Break it. Fix it. Repeat.",
|
||||
"Read whatever holds your attention. The best book is the one you'll finish.",
|
||||
"College opens doors. So does building things. Do what fits your situation.",
|
||||
"Say what needs saying. Be direct. Kindness without honesty isn't kind.",
|
||||
"Someone who shows up when it's hard. Someone who tells you the truth.",
|
||||
"Name what you did. Say why it was wrong. Ask what they need. Then listen.",
|
||||
"Do the thing badly. Then do it again. Learning is repetition with attention.",
|
||||
"Watch for inconsistencies. Trust patterns over promises. Verify when you can.",
|
||||
"Rest. Boredom is your mind asking for input. Give it something real.",
|
||||
"One small step. Direction matters more than distance.",
|
||||
"Focus is a skill. Remove distractions. Work in short bursts. Rest between.",
|
||||
"Discipline is choosing what you want most over what you want now.",
|
||||
"Habits are built one day at a time. Miss a day, not two.",
|
||||
"No one knows what they should do with their life. Try things. Pay attention. Adjust.",
|
||||
"Your calling finds you while you're working. Don't wait for clarity. Start moving.",
|
||||
"It's never too late. The timeline is yours.",
|
||||
"Criticism is data. Some is useful. Some is noise. Learn to tell the difference.",
|
||||
"The best investment is in your own skills. They compound.",
|
||||
"No is a complete sentence. Practice saying it.",
|
||||
"Being good means showing up when it's hard. Doing the right thing when no one watches.",
|
||||
]
|
||||
return random.choice(responses)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Quality scoring — simple heuristic
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def score_pair(prompt: str, response: str) -> float:
|
||||
"""Score a prompt→response pair for quality. Returns 0.0–1.0."""
|
||||
score = 0.5
|
||||
|
||||
# Response length (Timmy prefers short)
|
||||
words = len(response.split())
|
||||
if 5 <= words <= 30:
|
||||
score += 0.15
|
||||
elif 31 <= words <= 50:
|
||||
score += 0.08
|
||||
elif words > 100:
|
||||
score -= 0.15
|
||||
|
||||
# No markdown walls of text
|
||||
if response.count("\n") > 4:
|
||||
score -= 0.1
|
||||
|
||||
# Plain language markers
|
||||
plain_markers = ["i don't know", "i'm not sure", "i think", "probably", "maybe"]
|
||||
if any(w in response.lower() for w in plain_markers):
|
||||
score += 0.05
|
||||
|
||||
# First person self-reference (Timmy voice marker)
|
||||
if "i " in response.lower() or "i'm" in response.lower():
|
||||
score += 0.05
|
||||
|
||||
# SOUL.md keywords
|
||||
soul_keywords = ["sovereign", "honest", "small model", "brevity", "help", "service", "local", "home"]
|
||||
matched = sum(1 for k in soul_keywords if k in response.lower())
|
||||
score += min(matched * 0.04, 0.12)
|
||||
|
||||
# Crisis keywords handled properly
|
||||
crisis_words = ["suicide", "kill myself", "end it all", "don't want to live", "hurt myself"]
|
||||
if any(c in prompt.lower() for c in crisis_words):
|
||||
if "988" in response or "are you safe" in response.lower():
|
||||
score += 0.2
|
||||
else:
|
||||
score -= 0.3 # Missing crisis protocol
|
||||
else:
|
||||
score += 0.05 # Non-crisis prompt gets a small bump
|
||||
|
||||
# No question marks in response (Timmy doesn't usually ask follow-up questions)
|
||||
if "?" not in response:
|
||||
score += 0.03
|
||||
|
||||
return min(max(score, 0.0), 1.0)
|
||||
|
||||
|
||||
def pick_response_category(prompt: str) -> str:
|
||||
prompt_lower = prompt.lower()
|
||||
if any(w in prompt_lower for w in ["suicide", "kill", "die", "end it", "don't want to be here", "hurt myself", "don't think anyone would miss", "numb", "thinking about death", "trapped", "don't see a way out"]):
|
||||
return "crisis"
|
||||
if any(w in prompt_lower for w in ["sovereign", "cloud", "local", "open source", "proprietary", "chatgpt", "privacy", "data", "fork", "trust this ai", "bitcoin", "ai be truly free", "verify", "decentralization", "closed-source", "free and open"]):
|
||||
return "sovereignty"
|
||||
if any(w in prompt_lower for w in ["hermes", "burn", "tmux", "gitea", "skill", "tool", "cron", "poka", "pr ", "dispatch"]):
|
||||
return "hermes"
|
||||
if any(w in prompt_lower for w in ["feel", "burnt out", "lonely", "scared", "behind", "rejected", "angry", "giving up", "point anymore", "heavy", "hurting", "invisible", "dreams feel impossible", "pretending", "miss someone", "same mistakes", "don't know who i am", "alone forever", "fraud", "forgive", "want to be better"]):
|
||||
return "pastoral"
|
||||
if any(w in prompt_lower for w in ["python", "nginx", "debug", "memory", "process", "thread", "ci/cd", "secret", "websocket", "slow", "model", "setup", "containers", "database", "rate limiting", "rest", "graphql", "secure an api", "event-driven", "monitor", "migrations", "tests", "design patterns"]):
|
||||
return "technical"
|
||||
return "general"
|
||||
|
||||
|
||||
def generate_response(prompt: str) -> str:
|
||||
category = pick_response_category(prompt)
|
||||
generators = {
|
||||
"crisis": _crisis_adjacent_response,
|
||||
"sovereignty": _sovereignty_response,
|
||||
"hermes": _hermes_response,
|
||||
"pastoral": _pastoral_response,
|
||||
"technical": _technical_response,
|
||||
"general": _general_response,
|
||||
}
|
||||
return generators[category](prompt)
|
||||
|
||||
|
||||
def make_sharegpt_entry(prompt: str, response: str, idx: int) -> dict[str, Any]:
|
||||
"""Build a ShareGPT-format entry with SOUL.md system prompt."""
|
||||
return {
|
||||
"id": f"timmy-voice-batch08-{idx:04d}",
|
||||
"model": "timmy-voice-batch08",
|
||||
"batch": 8,
|
||||
"source": "synthetic_from_curated",
|
||||
"quality_score": round(score_pair(prompt, response), 3),
|
||||
"conversations": [
|
||||
{
|
||||
"from": "system",
|
||||
"value": (
|
||||
"You are Timmy, a sovereign AI assistant. "
|
||||
"Speak plainly. Prefer short sentences. "
|
||||
"Answer the question asked. Be honest. "
|
||||
"Sovereignty and service always."
|
||||
),
|
||||
},
|
||||
{"from": "human", "value": prompt},
|
||||
{"from": "gpt", "value": response},
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Load curated dataset for seed prompts
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def load_curated_prompts(curated_path: Path) -> list[str]:
|
||||
"""Load human prompts from curated dataset."""
|
||||
prompts: list[str] = []
|
||||
if not curated_path.exists():
|
||||
return prompts
|
||||
with open(curated_path) as f:
|
||||
for line in f:
|
||||
if not line.strip():
|
||||
continue
|
||||
try:
|
||||
data = json.loads(line)
|
||||
for msg in data.get("conversations", []):
|
||||
if msg.get("from") == "human":
|
||||
prompts.append(msg["value"])
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
return prompts
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Main generation loop
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def generate_batch(target_count: int = 1000, quality_threshold: float = 0.8) -> list[dict]:
|
||||
"""Generate target_count pairs, filtering for quality."""
|
||||
entries: list[dict] = []
|
||||
attempts = 0
|
||||
max_attempts = target_count * 50
|
||||
|
||||
curated_path = Path(__file__).parent.parent / "training" / "data" / "curated_dataset.jsonl"
|
||||
seed_prompts = load_curated_prompts(curated_path)
|
||||
|
||||
while len(entries) < target_count and attempts < max_attempts:
|
||||
attempts += 1
|
||||
|
||||
# Pick a base prompt: 40% from curated, 60% from synthetic base
|
||||
if seed_prompts and random.random() < 0.4:
|
||||
base = random.choice(seed_prompts)
|
||||
else:
|
||||
base = random.choice(BASE_PROMPTS)
|
||||
|
||||
# Apply a variation
|
||||
variation_fn = random.choice(PROMPT_VARIATIONS)
|
||||
prompt = variation_fn(base)
|
||||
|
||||
# Generate response
|
||||
response = generate_response(prompt)
|
||||
|
||||
# Score
|
||||
score = score_pair(prompt, response)
|
||||
if score < quality_threshold:
|
||||
continue
|
||||
|
||||
entry = make_sharegpt_entry(prompt, response, len(entries) + 1)
|
||||
entry["quality_score"] = round(score, 3)
|
||||
entries.append(entry)
|
||||
|
||||
return entries
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Generate Timmy Voice training data batch 05")
|
||||
parser.add_argument("--output", default="training-data/timmy-voice-batch08.jsonl", help="Output path")
|
||||
parser.add_argument("--count", type=int, default=1000, help="Target number of pairs")
|
||||
parser.add_argument("--threshold", type=float, default=0.8, help="Quality threshold")
|
||||
parser.add_argument("--append", action="store_true", help="Append to output instead of overwrite")
|
||||
args = parser.parse_args()
|
||||
|
||||
out_path = Path(args.output).expanduser()
|
||||
out_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
print(f"Generating {args.count} pairs with quality threshold {args.threshold}...")
|
||||
entries = generate_batch(args.count, args.threshold)
|
||||
print(f"Generated {len(entries)} pairs after filtering.")
|
||||
|
||||
mode = "a" if args.append else "w"
|
||||
with open(out_path, mode) as f:
|
||||
for entry in entries:
|
||||
f.write(json.dumps(entry, ensure_ascii=False) + "\n")
|
||||
|
||||
print(f"Wrote to {out_path}")
|
||||
|
||||
# Stats
|
||||
scores = [e["quality_score"] for e in entries]
|
||||
avg_score = sum(scores) / len(scores) if scores else 0
|
||||
print(f"Quality: min={min(scores):.2f} max={max(scores):.2f} avg={avg_score:.2f}")
|
||||
|
||||
# Category breakdown
|
||||
categories = {}
|
||||
for e in entries:
|
||||
cat = pick_response_category(e["conversations"][1]["value"])
|
||||
categories[cat] = categories.get(cat, 0) + 1
|
||||
print("Categories:", categories)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
1000
training-data/timmy-voice-batch08.jsonl
Normal file
1000
training-data/timmy-voice-batch08.jsonl
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,43 +1,46 @@
|
||||
model:
|
||||
default: kimi-k2.5
|
||||
provider: kimi-coding
|
||||
context_length: 65536
|
||||
base_url: https://api.kimi.com/coding/v1
|
||||
|
||||
toolsets:
|
||||
- all
|
||||
- all
|
||||
|
||||
fallback_providers:
|
||||
- provider: kimi-coding
|
||||
model: kimi-k2.5
|
||||
timeout: 120
|
||||
reason: Kimi coding fallback (front of chain)
|
||||
- provider: openrouter
|
||||
model: google/gemini-2.5-pro
|
||||
base_url: https://openrouter.ai/api/v1
|
||||
api_key_env: OPENROUTER_API_KEY
|
||||
timeout: 120
|
||||
reason: Gemini 2.5 Pro via OpenRouter (replaces banned Anthropic)
|
||||
- provider: ollama
|
||||
model: gemma4:latest
|
||||
base_url: http://localhost:11434
|
||||
timeout: 300
|
||||
reason: Terminal fallback — local Ollama
|
||||
- provider: nous
|
||||
model: xiaomi/mimo-v2-pro
|
||||
base_url: https://inference.nousresearch.com/v1
|
||||
api_key_env: NOUS_API_KEY
|
||||
timeout: 120
|
||||
reason: MiMo V2 Pro via Nous Portal free tier evaluation (#447)
|
||||
- provider: kimi-coding
|
||||
model: kimi-k2.5
|
||||
base_url: https://api.kimi.com/coding/v1
|
||||
timeout: 120
|
||||
reason: "Primary — Kimi K2.5 (best value, least friction)"
|
||||
- provider: openrouter
|
||||
model: google/gemini-2.5-pro
|
||||
base_url: https://openrouter.ai/api/v1
|
||||
api_key_env: OPENROUTER_API_KEY
|
||||
timeout: 120
|
||||
reason: "Fallback — Gemini 2.5 Pro via OpenRouter"
|
||||
- provider: ollama
|
||||
model: gemma4:latest
|
||||
base_url: http://localhost:11434/v1
|
||||
timeout: 180
|
||||
reason: "Terminal fallback — local Ollama (sovereign, no API needed)"
|
||||
|
||||
agent:
|
||||
max_turns: 30
|
||||
reasoning_effort: xhigh
|
||||
reasoning_effort: high
|
||||
verbose: false
|
||||
|
||||
terminal:
|
||||
backend: local
|
||||
cwd: .
|
||||
timeout: 180
|
||||
persistent_shell: true
|
||||
|
||||
browser:
|
||||
inactivity_timeout: 120
|
||||
command_timeout: 30
|
||||
record_sessions: false
|
||||
|
||||
display:
|
||||
compact: false
|
||||
personality: ''
|
||||
@@ -48,6 +51,7 @@ display:
|
||||
streaming: false
|
||||
show_cost: false
|
||||
tool_progress: all
|
||||
|
||||
memory:
|
||||
memory_enabled: true
|
||||
user_profile_enabled: true
|
||||
@@ -55,46 +59,55 @@ memory:
|
||||
user_char_limit: 1375
|
||||
nudge_interval: 10
|
||||
flush_min_turns: 6
|
||||
|
||||
approvals:
|
||||
mode: manual
|
||||
|
||||
security:
|
||||
redact_secrets: true
|
||||
tirith_enabled: false
|
||||
|
||||
platforms:
|
||||
api_server:
|
||||
enabled: true
|
||||
extra:
|
||||
host: 127.0.0.1
|
||||
port: 8645
|
||||
|
||||
session_reset:
|
||||
mode: none
|
||||
idle_minutes: 0
|
||||
|
||||
skills:
|
||||
creation_nudge_interval: 15
|
||||
system_prompt_suffix: 'You are Allegro, the Kimi-backed third wizard house.
|
||||
|
||||
system_prompt_suffix: |
|
||||
You are Allegro, the Kimi-backed third wizard house.
|
||||
Your soul is defined in SOUL.md — read it, live it.
|
||||
|
||||
Hermes is your harness.
|
||||
|
||||
Kimi Code is your primary provider.
|
||||
|
||||
kimi-coding is your primary provider.
|
||||
You speak plainly. You prefer short sentences. Brevity is a kindness.
|
||||
|
||||
|
||||
Work best on tight coding tasks: 1-3 file changes, refactors, tests, and implementation
|
||||
passes.
|
||||
|
||||
Work best on tight coding tasks: 1-3 file changes, refactors, tests, and implementation passes.
|
||||
Refusal over fabrication. If you do not know, say so.
|
||||
|
||||
Sovereignty and service always.
|
||||
|
||||
'
|
||||
providers:
|
||||
kimi-coding:
|
||||
base_url: https://api.kimi.com/coding/v1
|
||||
timeout: 60
|
||||
max_retries: 3
|
||||
nous:
|
||||
base_url: https://inference.nousresearch.com/v1
|
||||
openrouter:
|
||||
base_url: https://openrouter.ai/api/v1
|
||||
timeout: 120
|
||||
ollama:
|
||||
base_url: http://localhost:11434/v1
|
||||
timeout: 180
|
||||
|
||||
# =============================================================================
|
||||
# BANNED PROVIDERS — DO NOT ADD
|
||||
# =============================================================================
|
||||
# The following providers are PERMANENTLY BANNED:
|
||||
# - anthropic (any model: claude-sonnet, claude-opus, claude-haiku)
|
||||
# - nous (xiaomi/mimo-v2-pro)
|
||||
# Enforcement: pre-commit hook, linter, Ansible validation, this comment.
|
||||
# =============================================================================
|
||||
|
||||
@@ -1,50 +1,72 @@
|
||||
model:
|
||||
default: kimi-k2.5
|
||||
provider: kimi-coding
|
||||
context_length: 65536
|
||||
base_url: https://api.kimi.com/coding/v1
|
||||
|
||||
toolsets:
|
||||
- all
|
||||
- all
|
||||
|
||||
fallback_providers:
|
||||
- provider: kimi-coding
|
||||
model: kimi-k2.5
|
||||
timeout: 120
|
||||
reason: Kimi coding fallback (front of chain)
|
||||
- provider: openrouter
|
||||
model: google/gemini-2.5-pro
|
||||
base_url: https://openrouter.ai/api/v1
|
||||
api_key_env: OPENROUTER_API_KEY
|
||||
timeout: 120
|
||||
reason: Gemini 2.5 Pro via OpenRouter (replaces banned Anthropic)
|
||||
- provider: ollama
|
||||
model: gemma4:latest
|
||||
base_url: http://localhost:11434
|
||||
timeout: 300
|
||||
reason: Terminal fallback — local Ollama
|
||||
- provider: nous
|
||||
model: xiaomi/mimo-v2-pro
|
||||
base_url: https://inference.nousresearch.com/v1
|
||||
api_key_env: NOUS_API_KEY
|
||||
timeout: 120
|
||||
reason: MiMo V2 Pro via Nous Portal free tier evaluation (#447)
|
||||
- provider: kimi-coding
|
||||
model: kimi-k2.5
|
||||
base_url: https://api.kimi.com/coding/v1
|
||||
timeout: 120
|
||||
reason: "Primary — Kimi K2.5 (best value, least friction)"
|
||||
- provider: openrouter
|
||||
model: google/gemini-2.5-pro
|
||||
base_url: https://openrouter.ai/api/v1
|
||||
api_key_env: OPENROUTER_API_KEY
|
||||
timeout: 120
|
||||
reason: "Fallback — Gemini 2.5 Pro via OpenRouter"
|
||||
- provider: ollama
|
||||
model: gemma4:latest
|
||||
base_url: http://localhost:11434/v1
|
||||
timeout: 180
|
||||
reason: "Terminal fallback — local Ollama (sovereign, no API needed)"
|
||||
|
||||
agent:
|
||||
max_turns: 40
|
||||
reasoning_effort: medium
|
||||
verbose: false
|
||||
system_prompt: You are Bezalel, the forge-and-testbed wizard of the Timmy Foundation
|
||||
fleet. You are a builder and craftsman — infrastructure, deployment, hardening.
|
||||
Your sovereign is Alexander Whitestone (Rockachopa). Sovereignty and service always.
|
||||
|
||||
terminal:
|
||||
backend: local
|
||||
cwd: /root/wizards/bezalel
|
||||
timeout: 180
|
||||
persistent_shell: true
|
||||
|
||||
browser:
|
||||
inactivity_timeout: 120
|
||||
compression:
|
||||
enabled: true
|
||||
threshold: 0.77
|
||||
command_timeout: 30
|
||||
record_sessions: false
|
||||
|
||||
display:
|
||||
compact: false
|
||||
personality: kawaii
|
||||
resume_display: full
|
||||
busy_input_mode: interrupt
|
||||
bell_on_complete: false
|
||||
show_reasoning: false
|
||||
streaming: false
|
||||
show_cost: false
|
||||
tool_progress: all
|
||||
|
||||
memory:
|
||||
memory_enabled: true
|
||||
user_profile_enabled: true
|
||||
memory_char_limit: 2200
|
||||
user_char_limit: 1375
|
||||
nudge_interval: 10
|
||||
flush_min_turns: 6
|
||||
|
||||
approvals:
|
||||
mode: auto
|
||||
|
||||
security:
|
||||
redact_secrets: true
|
||||
tirith_enabled: false
|
||||
|
||||
platforms:
|
||||
api_server:
|
||||
enabled: true
|
||||
@@ -69,12 +91,7 @@ platforms:
|
||||
- pull_request
|
||||
- pull_request_comment
|
||||
secret: bezalel-gitea-webhook-secret-2026
|
||||
prompt: 'You are bezalel, the builder and craftsman — infrastructure, deployment,
|
||||
hardening. A Gitea webhook fired: event={event_type}, action={action},
|
||||
repo={repository.full_name}, issue/PR=#{issue.number} {issue.title}. Comment
|
||||
by {comment.user.login}: {comment.body}. If you were tagged, assigned,
|
||||
or this needs your attention, investigate and respond via Gitea API. Otherwise
|
||||
acknowledge briefly.'
|
||||
prompt: 'You are bezalel, the builder and craftsman — infrastructure, deployment, hardening. A Gitea webhook fired: event={event_type}, action={action}, repo={repository.full_name}, issue/PR=#{issue.number} {issue.title}. Comment by {comment.user.login}: {comment.body}. If you were tagged, assigned, or this needs your attention, investigate and respond via Gitea API. Otherwise acknowledge briefly.'
|
||||
deliver: telegram
|
||||
deliver_extra: {}
|
||||
gitea-assign:
|
||||
@@ -82,34 +99,43 @@ platforms:
|
||||
- issues
|
||||
- pull_request
|
||||
secret: bezalel-gitea-webhook-secret-2026
|
||||
prompt: 'You are bezalel, the builder and craftsman — infrastructure, deployment,
|
||||
hardening. Gitea assignment webhook: event={event_type}, action={action},
|
||||
repo={repository.full_name}, issue/PR=#{issue.number} {issue.title}. Assigned
|
||||
to: {issue.assignee.login}. If you (bezalel) were just assigned, read
|
||||
the issue, scope it, and post a plan comment. If not you, acknowledge
|
||||
briefly.'
|
||||
prompt: 'You are bezalel, the builder and craftsman — infrastructure, deployment, hardening. Gitea assignment webhook: event={event_type}, action={action}, repo={repository.full_name}, issue/PR=#{issue.number} {issue.title}. Assigned to: {issue.assignee.login}. If you (bezalel) were just assigned, read the issue, scope it, and post a plan comment. If not you, acknowledge briefly.'
|
||||
deliver: telegram
|
||||
deliver_extra: {}
|
||||
|
||||
gateway:
|
||||
allow_all_users: true
|
||||
|
||||
session_reset:
|
||||
mode: both
|
||||
idle_minutes: 1440
|
||||
at_hour: 4
|
||||
approvals:
|
||||
mode: auto
|
||||
memory:
|
||||
memory_enabled: true
|
||||
user_profile_enabled: true
|
||||
memory_char_limit: 2200
|
||||
user_char_limit: 1375
|
||||
_config_version: 11
|
||||
TELEGRAM_HOME_CHANNEL: '-1003664764329'
|
||||
|
||||
skills:
|
||||
creation_nudge_interval: 15
|
||||
|
||||
system_prompt: |
|
||||
You are Bezalel, the forge-and-testbed wizard of the Timmy Foundation fleet.
|
||||
You are a builder and craftsman — infrastructure, deployment, hardening.
|
||||
Your sovereign is Alexander Whitestone (Rockachopa). Sovereignty and service always.
|
||||
|
||||
providers:
|
||||
kimi-coding:
|
||||
base_url: https://api.kimi.com/coding/v1
|
||||
timeout: 60
|
||||
max_retries: 3
|
||||
nous:
|
||||
base_url: https://inference.nousresearch.com/v1
|
||||
openrouter:
|
||||
base_url: https://openrouter.ai/api/v1
|
||||
timeout: 120
|
||||
ollama:
|
||||
base_url: http://localhost:11434/v1
|
||||
timeout: 180
|
||||
|
||||
# =============================================================================
|
||||
# BANNED PROVIDERS — DO NOT ADD
|
||||
# =============================================================================
|
||||
# The following providers are PERMANENTLY BANNED:
|
||||
# - anthropic (any model: claude-sonnet, claude-opus, claude-haiku)
|
||||
# - nous (xiaomi/mimo-v2-pro)
|
||||
# Enforcement: pre-commit hook, linter, Ansible validation, this comment.
|
||||
# =============================================================================
|
||||
|
||||
@@ -1,34 +1,94 @@
|
||||
model:
|
||||
default: kimi-k2.5
|
||||
provider: kimi-coding
|
||||
context_length: 65536
|
||||
base_url: https://api.kimi.com/coding/v1
|
||||
|
||||
toolsets:
|
||||
- all
|
||||
- all
|
||||
|
||||
fallback_providers:
|
||||
- provider: kimi-coding
|
||||
model: kimi-k2.5
|
||||
timeout: 120
|
||||
reason: Kimi coding fallback (front of chain)
|
||||
- provider: openrouter
|
||||
model: google/gemini-2.5-pro
|
||||
base_url: https://openrouter.ai/api/v1
|
||||
api_key_env: OPENROUTER_API_KEY
|
||||
timeout: 120
|
||||
reason: Gemini 2.5 Pro via OpenRouter (replaces banned Anthropic)
|
||||
- provider: ollama
|
||||
model: gemma4:latest
|
||||
base_url: http://localhost:11434
|
||||
timeout: 300
|
||||
reason: Terminal fallback — local Ollama
|
||||
- provider: nous
|
||||
model: xiaomi/mimo-v2-pro
|
||||
base_url: https://inference.nousresearch.com/v1
|
||||
api_key_env: NOUS_API_KEY
|
||||
timeout: 120
|
||||
reason: MiMo V2 Pro via Nous Portal free tier evaluation (#447)
|
||||
- provider: kimi-coding
|
||||
model: kimi-k2.5
|
||||
base_url: https://api.kimi.com/coding/v1
|
||||
timeout: 120
|
||||
reason: "Primary — Kimi K2.5 (best value, least friction)"
|
||||
- provider: openrouter
|
||||
model: google/gemini-2.5-pro
|
||||
base_url: https://openrouter.ai/api/v1
|
||||
api_key_env: OPENROUTER_API_KEY
|
||||
timeout: 120
|
||||
reason: "Fallback — Gemini 2.5 Pro via OpenRouter"
|
||||
- provider: ollama
|
||||
model: gemma4:latest
|
||||
base_url: http://localhost:11434/v1
|
||||
timeout: 180
|
||||
reason: "Terminal fallback — local Ollama (sovereign, no API needed)"
|
||||
|
||||
agent:
|
||||
max_turns: 90
|
||||
reasoning_effort: high
|
||||
verbose: false
|
||||
|
||||
terminal:
|
||||
backend: local
|
||||
cwd: .
|
||||
timeout: 180
|
||||
persistent_shell: true
|
||||
|
||||
browser:
|
||||
inactivity_timeout: 120
|
||||
command_timeout: 30
|
||||
record_sessions: false
|
||||
|
||||
display:
|
||||
compact: false
|
||||
personality: ''
|
||||
resume_display: full
|
||||
busy_input_mode: interrupt
|
||||
bell_on_complete: false
|
||||
show_reasoning: false
|
||||
streaming: false
|
||||
show_cost: false
|
||||
tool_progress: all
|
||||
|
||||
memory:
|
||||
memory_enabled: true
|
||||
user_profile_enabled: true
|
||||
memory_char_limit: 2200
|
||||
user_char_limit: 1375
|
||||
nudge_interval: 10
|
||||
flush_min_turns: 6
|
||||
|
||||
approvals:
|
||||
mode: auto
|
||||
|
||||
security:
|
||||
redact_secrets: true
|
||||
tirith_enabled: false
|
||||
|
||||
platforms:
|
||||
api_server:
|
||||
enabled: true
|
||||
extra:
|
||||
host: 127.0.0.1
|
||||
port: 8645
|
||||
|
||||
session_reset:
|
||||
mode: none
|
||||
idle_minutes: 0
|
||||
|
||||
skills:
|
||||
creation_nudge_interval: 15
|
||||
|
||||
system_prompt_suffix: |
|
||||
You are Ezra, the Infrastructure wizard — Gitea, nginx, hosting.
|
||||
Your soul is defined in SOUL.md — read it, live it.
|
||||
Hermes is your harness.
|
||||
kimi-coding is your primary provider.
|
||||
Refusal over fabrication. If you do not know, say so.
|
||||
Sovereignty and service always.
|
||||
|
||||
providers:
|
||||
kimi-coding:
|
||||
base_url: https://api.kimi.com/coding/v1
|
||||
@@ -37,6 +97,15 @@ providers:
|
||||
openrouter:
|
||||
base_url: https://openrouter.ai/api/v1
|
||||
timeout: 120
|
||||
nous:
|
||||
base_url: https://inference.nousresearch.com/v1
|
||||
timeout: 120
|
||||
ollama:
|
||||
base_url: http://localhost:11434/v1
|
||||
timeout: 180
|
||||
|
||||
# =============================================================================
|
||||
# BANNED PROVIDERS — DO NOT ADD
|
||||
# =============================================================================
|
||||
# The following providers are PERMANENTLY BANNED:
|
||||
# - anthropic (any model: claude-sonnet, claude-opus, claude-haiku)
|
||||
# - nous (xiaomi/mimo-v2-pro)
|
||||
# Enforcement: pre-commit hook, linter, Ansible validation, this comment.
|
||||
# =============================================================================
|
||||
|
||||
121
wizards/timmy/config.yaml
Normal file
121
wizards/timmy/config.yaml
Normal file
@@ -0,0 +1,121 @@
|
||||
# =============================================================================
|
||||
# Timmy — Primary Wizard Configuration (Golden State)
|
||||
# =============================================================================
|
||||
# Generated from golden state template (ansible/roles/wizard_base/templates/wizard_config.yaml.j2)
|
||||
# DO NOT EDIT MANUALLY. Changes go through Gitea PR → Ansible deploy.
|
||||
#
|
||||
# Provider chain: kimi-coding → openrouter → ollama
|
||||
# Anthropic is PERMANENTLY BANNED.
|
||||
# =============================================================================
|
||||
|
||||
model:
|
||||
default: kimi-k2.5
|
||||
provider: kimi-coding
|
||||
context_length: 65536
|
||||
base_url: https://api.kimi.com/coding/v1
|
||||
|
||||
toolsets:
|
||||
- all
|
||||
|
||||
fallback_providers:
|
||||
- provider: kimi-coding
|
||||
model: kimi-k2.5
|
||||
base_url: https://api.kimi.com/coding/v1
|
||||
timeout: 120
|
||||
reason: "Primary — Kimi K2.5 (best value, least friction)"
|
||||
- provider: openrouter
|
||||
model: google/gemini-2.5-pro
|
||||
base_url: https://openrouter.ai/api/v1
|
||||
api_key_env: OPENROUTER_API_KEY
|
||||
timeout: 120
|
||||
reason: "Fallback — Gemini 2.5 Pro via OpenRouter"
|
||||
- provider: ollama
|
||||
model: gemma4:latest
|
||||
base_url: http://localhost:11434/v1
|
||||
timeout: 180
|
||||
reason: "Terminal fallback — local Ollama (sovereign, no API needed)"
|
||||
|
||||
agent:
|
||||
max_turns: 30
|
||||
reasoning_effort: high
|
||||
verbose: false
|
||||
|
||||
terminal:
|
||||
backend: local
|
||||
cwd: .
|
||||
timeout: 180
|
||||
persistent_shell: true
|
||||
|
||||
browser:
|
||||
inactivity_timeout: 120
|
||||
command_timeout: 30
|
||||
record_sessions: false
|
||||
|
||||
display:
|
||||
compact: false
|
||||
personality: ''
|
||||
resume_display: full
|
||||
busy_input_mode: interrupt
|
||||
bell_on_complete: false
|
||||
show_reasoning: false
|
||||
streaming: false
|
||||
show_cost: false
|
||||
tool_progress: all
|
||||
|
||||
memory:
|
||||
memory_enabled: true
|
||||
user_profile_enabled: true
|
||||
memory_char_limit: 2200
|
||||
user_char_limit: 1375
|
||||
nudge_interval: 10
|
||||
flush_min_turns: 6
|
||||
|
||||
approvals:
|
||||
mode: auto
|
||||
|
||||
security:
|
||||
redact_secrets: true
|
||||
tirith_enabled: false
|
||||
|
||||
platforms:
|
||||
api_server:
|
||||
enabled: true
|
||||
extra:
|
||||
host: 127.0.0.1
|
||||
port: 8645
|
||||
|
||||
session_reset:
|
||||
mode: none
|
||||
idle_minutes: 0
|
||||
|
||||
skills:
|
||||
creation_nudge_interval: 15
|
||||
|
||||
system_prompt_suffix: |
|
||||
You are Timmy, the Primary wizard — soul of the fleet.
|
||||
Your soul is defined in SOUL.md — read it, live it.
|
||||
Hermes is your harness.
|
||||
kimi-coding is your primary provider.
|
||||
Refusal over fabrication. If you do not know, say so.
|
||||
Sovereignty and service always.
|
||||
|
||||
providers:
|
||||
kimi-coding:
|
||||
base_url: https://api.kimi.com/coding/v1
|
||||
timeout: 60
|
||||
max_retries: 3
|
||||
openrouter:
|
||||
base_url: https://openrouter.ai/api/v1
|
||||
timeout: 120
|
||||
ollama:
|
||||
base_url: http://localhost:11434/v1
|
||||
timeout: 180
|
||||
|
||||
# =============================================================================
|
||||
# BANNED PROVIDERS — DO NOT ADD
|
||||
# =============================================================================
|
||||
# The following providers are PERMANENTLY BANNED:
|
||||
# - anthropic (any model: claude-sonnet, claude-opus, claude-haiku)
|
||||
# - nous (xiaomi/mimo-v2-pro)
|
||||
# Enforcement: pre-commit hook, linter, Ansible validation, this comment.
|
||||
# =============================================================================
|
||||
Reference in New Issue
Block a user