Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
477ec86467 | ||
|
|
f83fdb7d55 |
96
docs/BEZALEL_TAILSCALE_BOOTSTRAP.md
Normal file
96
docs/BEZALEL_TAILSCALE_BOOTSTRAP.md
Normal file
@@ -0,0 +1,96 @@
|
||||
# Bezalel Tailscale Bootstrap
|
||||
|
||||
Refs #535
|
||||
|
||||
This is the repo-side operator packet for installing Tailscale on the Bezalel VPS and verifying the internal network path for federation work.
|
||||
|
||||
Important truth:
|
||||
- issue #535 names `104.131.15.18`
|
||||
- older Bezalel control-plane docs also mention `159.203.146.185`
|
||||
- the current source of truth in this repo is `ansible/inventory/hosts.ini`, which currently resolves `bezalel` to `67.205.155.108`
|
||||
|
||||
Because of that drift, `scripts/bezalel_tailscale_bootstrap.py` now resolves the target host from `ansible/inventory/hosts.ini` by default instead of trusting a stale hardcoded IP.
|
||||
|
||||
## What the script does
|
||||
|
||||
`python3 scripts/bezalel_tailscale_bootstrap.py`
|
||||
|
||||
Safe by default:
|
||||
- builds the remote bootstrap script
|
||||
- writes it locally to `/tmp/bezalel_tailscale_bootstrap.sh`
|
||||
- prints the SSH command needed to run it
|
||||
- does **not** touch the VPS unless `--apply` is passed
|
||||
|
||||
When applied, the remote script does all of the issue’s repo-side bootstrap steps:
|
||||
- installs Tailscale
|
||||
- runs `tailscale up --ssh --hostname bezalel`
|
||||
- appends the provided Mac SSH public key to `~/.ssh/authorized_keys`
|
||||
- prints `tailscale status --json`
|
||||
- pings the expected peer targets:
|
||||
- Mac: `100.124.176.28`
|
||||
- Ezra: `100.126.61.75`
|
||||
|
||||
## Required secrets / inputs
|
||||
|
||||
- Tailscale auth key
|
||||
- Mac SSH public key
|
||||
|
||||
Provide them either directly or through files:
|
||||
- `--auth-key` or `--auth-key-file`
|
||||
- `--ssh-public-key` or `--ssh-public-key-file`
|
||||
|
||||
## Dry-run example
|
||||
|
||||
```bash
|
||||
python3 scripts/bezalel_tailscale_bootstrap.py \
|
||||
--auth-key-file ~/.config/tailscale/auth_key \
|
||||
--ssh-public-key-file ~/.ssh/id_ed25519.pub \
|
||||
--json
|
||||
```
|
||||
|
||||
This prints:
|
||||
- resolved host
|
||||
- host source (`inventory:<path>` when pulled from `ansible/inventory/hosts.ini`)
|
||||
- local script path
|
||||
- SSH command to execute
|
||||
- peer targets
|
||||
|
||||
## Apply example
|
||||
|
||||
```bash
|
||||
python3 scripts/bezalel_tailscale_bootstrap.py \
|
||||
--auth-key-file ~/.config/tailscale/auth_key \
|
||||
--ssh-public-key-file ~/.ssh/id_ed25519.pub \
|
||||
--apply \
|
||||
--json
|
||||
```
|
||||
|
||||
## Verifying success after apply
|
||||
|
||||
The script now parses the remote stdout into structured verification data:
|
||||
- `verification.tailscale.self.tailscale_ips`
|
||||
- `verification.tailscale.self.dns_name`
|
||||
- `verification.peers`
|
||||
- `verification.ping_ok`
|
||||
|
||||
A successful run should show:
|
||||
- at least one Bezalel Tailscale IP under `tailscale_ips`
|
||||
- `ping_ok.mac = 100.124.176.28`
|
||||
- `ping_ok.ezra = 100.126.61.75`
|
||||
|
||||
## Expected remote install commands
|
||||
|
||||
```bash
|
||||
curl -fsSL https://tailscale.com/install.sh | sh
|
||||
tailscale up --ssh --hostname bezalel
|
||||
install -d -m 700 ~/.ssh
|
||||
touch ~/.ssh/authorized_keys && chmod 600 ~/.ssh/authorized_keys
|
||||
tailscale status --json
|
||||
```
|
||||
|
||||
## Why this PR does not claim live completion
|
||||
|
||||
This repo can safely ship the bootstrap script, host resolution logic, structured proof parsing, and operator packet.
|
||||
It cannot honestly claim that Bezalel was actually joined to the tailnet unless a human/operator runs the script with a real auth key and real SSH access to the VPS.
|
||||
|
||||
That means the correct PR language for #535 is advancement, not pretend closure.
|
||||
@@ -14,6 +14,7 @@ Quick-reference index for common operational tasks across the Timmy Foundation i
|
||||
| Agent scorecard | fleet-ops | `python3 scripts/agent_scorecard.py` |
|
||||
| View fleet manifest | fleet-ops | `cat manifest.yaml` |
|
||||
| Run nightly codebase genome pass | timmy-home | `python3 scripts/codebase_genome_nightly.py --dry-run` |
|
||||
| Prepare Bezalel Tailscale bootstrap | timmy-home | `python3 scripts/bezalel_tailscale_bootstrap.py --auth-key-file <path> --ssh-public-key-file <path> --json` |
|
||||
|
||||
## the-nexus (Frontend + Brain)
|
||||
|
||||
|
||||
@@ -16,11 +16,14 @@ import argparse
|
||||
import json
|
||||
import shlex
|
||||
import subprocess
|
||||
import re
|
||||
from json import JSONDecoder
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
DEFAULT_HOST = "159.203.146.185"
|
||||
DEFAULT_HOST = "67.205.155.108"
|
||||
DEFAULT_HOSTNAME = "bezalel"
|
||||
DEFAULT_INVENTORY_PATH = Path(__file__).resolve().parents[1] / "ansible" / "inventory" / "hosts.ini"
|
||||
DEFAULT_PEERS = {
|
||||
"mac": "100.124.176.28",
|
||||
"ezra": "100.126.61.75",
|
||||
@@ -66,6 +69,37 @@ def parse_tailscale_status(payload: dict[str, Any]) -> dict[str, Any]:
|
||||
}
|
||||
|
||||
|
||||
def resolve_host(host: str | None, inventory_path: Path = DEFAULT_INVENTORY_PATH, hostname: str = DEFAULT_HOSTNAME) -> tuple[str, str]:
|
||||
if host:
|
||||
return host, "explicit"
|
||||
if inventory_path.exists():
|
||||
pattern = re.compile(rf"^{re.escape(hostname)}\s+.*ansible_host=([^\s]+)")
|
||||
for line in inventory_path.read_text().splitlines():
|
||||
match = pattern.search(line.strip())
|
||||
if match:
|
||||
return match.group(1), f"inventory:{inventory_path}"
|
||||
return DEFAULT_HOST, "default"
|
||||
|
||||
|
||||
def parse_apply_output(stdout: str) -> dict[str, Any]:
|
||||
result: dict[str, Any] = {"tailscale": None, "ping_ok": {}}
|
||||
text = stdout or ""
|
||||
start = text.find("{")
|
||||
if start != -1:
|
||||
try:
|
||||
payload, _ = JSONDecoder().raw_decode(text[start:])
|
||||
if isinstance(payload, dict):
|
||||
result["tailscale"] = parse_tailscale_status(payload)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
for line in text.splitlines():
|
||||
if line.startswith("PING_OK:"):
|
||||
_, name, ip = line.split(":", 2)
|
||||
result["ping_ok"][name] = ip
|
||||
return result
|
||||
|
||||
|
||||
def build_ssh_command(host: str, remote_script_path: str = "/tmp/bezalel_tailscale_bootstrap.sh") -> list[str]:
|
||||
return ["ssh", host, f"bash {shlex.quote(remote_script_path)}"]
|
||||
|
||||
@@ -89,8 +123,9 @@ def parse_peer_args(items: list[str]) -> dict[str, str]:
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description="Prepare or execute Tailscale bootstrap for the Bezalel VPS.")
|
||||
parser.add_argument("--host", default=DEFAULT_HOST)
|
||||
parser.add_argument("--host")
|
||||
parser.add_argument("--hostname", default=DEFAULT_HOSTNAME)
|
||||
parser.add_argument("--inventory-path", type=Path, default=DEFAULT_INVENTORY_PATH)
|
||||
parser.add_argument("--auth-key", help="Tailscale auth key")
|
||||
parser.add_argument("--auth-key-file", type=Path, help="Path to file containing the Tailscale auth key")
|
||||
parser.add_argument("--ssh-public-key", help="SSH public key to append to authorized_keys")
|
||||
@@ -116,6 +151,7 @@ def main() -> None:
|
||||
auth_key = _read_secret(args.auth_key, args.auth_key_file)
|
||||
ssh_public_key = _read_secret(args.ssh_public_key, args.ssh_public_key_file)
|
||||
peers = parse_peer_args(args.peer)
|
||||
resolved_host, host_source = resolve_host(args.host, args.inventory_path, args.hostname)
|
||||
|
||||
if not auth_key:
|
||||
raise SystemExit("Missing Tailscale auth key. Use --auth-key or --auth-key-file.")
|
||||
@@ -126,28 +162,31 @@ def main() -> None:
|
||||
write_script(args.script_out, script)
|
||||
|
||||
payload: dict[str, Any] = {
|
||||
"host": args.host,
|
||||
"host": resolved_host,
|
||||
"host_source": host_source,
|
||||
"hostname": args.hostname,
|
||||
"inventory_path": str(args.inventory_path),
|
||||
"script_out": str(args.script_out),
|
||||
"remote_script_path": args.remote_script_path,
|
||||
"ssh_command": build_ssh_command(args.host, args.remote_script_path),
|
||||
"ssh_command": build_ssh_command(resolved_host, args.remote_script_path),
|
||||
"peer_targets": peers,
|
||||
"applied": False,
|
||||
}
|
||||
|
||||
if args.apply:
|
||||
result = run_remote(args.host, args.remote_script_path)
|
||||
result = run_remote(resolved_host, args.remote_script_path)
|
||||
payload["applied"] = True
|
||||
payload["exit_code"] = result.returncode
|
||||
payload["stdout"] = result.stdout
|
||||
payload["stderr"] = result.stderr
|
||||
payload["verification"] = parse_apply_output(result.stdout)
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(payload, indent=2))
|
||||
return
|
||||
|
||||
print("--- Bezalel Tailscale Bootstrap ---")
|
||||
print(f"Host: {args.host}")
|
||||
print(f"Host: {resolved_host} ({host_source})")
|
||||
print(f"Local script: {args.script_out}")
|
||||
print("SSH command: " + " ".join(payload["ssh_command"]))
|
||||
if args.apply:
|
||||
|
||||
@@ -1,313 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Cross-agent quality audit — #518
|
||||
|
||||
Fetches all PRs across Timmy_Foundation repos, classifies by agent,
|
||||
and produces a merge-rate scorecard.
|
||||
|
||||
Usage:
|
||||
python scripts/cross_agent_quality_audit.py
|
||||
python scripts/cross_agent_quality_audit.py --scorecard timmy-config/agent-quality-scorecard.md
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import requests
|
||||
|
||||
GITEA_BASE = "https://forge.alexanderwhitestone.com/api/v1"
|
||||
ORG = "Timmy_Foundation"
|
||||
TOKEN = os.environ.get("GITEA_TOKEN") or (
|
||||
Path.home() / ".config" / "gitea" / "token"
|
||||
).read_text().strip()
|
||||
|
||||
HEADERS = {"Authorization": f"token {TOKEN}"}
|
||||
|
||||
# Repos to audit (active code repos)
|
||||
DEFAULT_REPOS = [
|
||||
"timmy-home",
|
||||
"hermes-agent",
|
||||
"the-nexus",
|
||||
"the-door",
|
||||
"fleet-ops",
|
||||
"burn-fleet",
|
||||
"the-playground",
|
||||
"compounding-intelligence",
|
||||
"the-beacon",
|
||||
"second-son-of-timmy",
|
||||
"timmy-academy",
|
||||
"timmy-config",
|
||||
]
|
||||
|
||||
|
||||
class AgentClassifier:
|
||||
"""Classify PRs by agent identity."""
|
||||
|
||||
# PR title prefixes that explicitly name an agent
|
||||
AGENT_TITLE_RE = re.compile(
|
||||
r"^\[(?P<agent>Claude|Ezra|Allegro|Bezalel|Timmy|Gemini|Kimi|Manus|Codex)\]",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
# Branch patterns that embed agent names
|
||||
AGENT_BRANCH_RE = re.compile(
|
||||
r"(?P<agent>claude|ezra|allegro|bezalel|timmy|gemini|kimi|manus|codex)",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def classify(cls, pr: Dict[str, Any]) -> str:
|
||||
title = pr.get("title", "")
|
||||
branch = pr.get("head", {}).get("ref", "")
|
||||
user = pr.get("user", {}).get("login", "")
|
||||
|
||||
# 1. Explicit title tag like [Claude] or [Ezra]
|
||||
m = cls.AGENT_TITLE_RE.match(title)
|
||||
if m:
|
||||
return m.group("agent").lower()
|
||||
|
||||
# 2. Branch contains agent name (e.g. claude/issue-123)
|
||||
m = cls.AGENT_BRANCH_RE.search(branch)
|
||||
if m:
|
||||
return m.group("agent").lower()
|
||||
|
||||
# 3. Git user mapping
|
||||
if user.lower() == "claude":
|
||||
return "claude"
|
||||
if user.lower() == "rockachopa":
|
||||
# Rockachopa is the human / orchestrator — map to "burn-loop"
|
||||
return "burn-loop"
|
||||
|
||||
return "unknown"
|
||||
|
||||
|
||||
def fetch_prs(repo: str, state: str = "all", per_page: int = 50) -> List[Dict[str, Any]]:
|
||||
"""Paginate through all PRs for a repo."""
|
||||
prs: List[Dict[str, Any]] = []
|
||||
page = 1
|
||||
while True:
|
||||
url = f"{GITEA_BASE}/repos/{ORG}/{repo}/pulls?state={state}&limit={per_page}&page={page}"
|
||||
resp = requests.get(url, headers=HEADERS, timeout=30)
|
||||
resp.raise_for_status()
|
||||
batch = resp.json()
|
||||
if not batch:
|
||||
break
|
||||
prs.extend(batch)
|
||||
if len(batch) < per_page:
|
||||
break
|
||||
page += 1
|
||||
return prs
|
||||
|
||||
|
||||
def parse_datetime(dt_str: Optional[str]) -> Optional[datetime]:
|
||||
if not dt_str:
|
||||
return None
|
||||
try:
|
||||
return datetime.fromisoformat(dt_str.replace("Z", "+00:00"))
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def hours_between(start: Optional[str], end: Optional[str]) -> Optional[float]:
|
||||
s = parse_datetime(start)
|
||||
e = parse_datetime(end)
|
||||
if s and e:
|
||||
return (e - s).total_seconds() / 3600
|
||||
return None
|
||||
|
||||
|
||||
def audit_repos(repos: List[str]) -> Dict[str, Any]:
|
||||
"""Run the audit and return aggregated stats."""
|
||||
agent_stats: Dict[str, Dict[str, Any]] = defaultdict(
|
||||
lambda: {
|
||||
"total": 0,
|
||||
"merged": 0,
|
||||
"closed_unmerged": 0,
|
||||
"open": 0,
|
||||
"hours_to_merge": [],
|
||||
"hours_to_close": [],
|
||||
"repos": set(),
|
||||
"prs": [],
|
||||
}
|
||||
)
|
||||
|
||||
repo_stats: Dict[str, Dict[str, Any]] = {}
|
||||
|
||||
for repo in repos:
|
||||
print(f"Fetching PRs for {repo} ...", file=sys.stderr)
|
||||
try:
|
||||
prs = fetch_prs(repo)
|
||||
except requests.HTTPError as exc:
|
||||
print(f" SKIP {repo}: {exc}", file=sys.stderr)
|
||||
continue
|
||||
|
||||
repo_merged = 0
|
||||
repo_total = len(prs)
|
||||
for pr in prs:
|
||||
agent = AgentClassifier.classify(pr)
|
||||
s = agent_stats[agent]
|
||||
s["total"] += 1
|
||||
s["repos"].add(repo)
|
||||
s["prs"].append(
|
||||
{
|
||||
"repo": repo,
|
||||
"number": pr["number"],
|
||||
"title": pr["title"],
|
||||
"state": pr["state"],
|
||||
"merged": pr.get("merged", False),
|
||||
"created_at": pr.get("created_at"),
|
||||
"merged_at": pr.get("merged_at"),
|
||||
"closed_at": pr.get("closed_at"),
|
||||
}
|
||||
)
|
||||
|
||||
if pr.get("merged"):
|
||||
s["merged"] += 1
|
||||
repo_merged += 1
|
||||
h = hours_between(pr.get("created_at"), pr.get("merged_at"))
|
||||
if h is not None:
|
||||
s["hours_to_merge"].append(h)
|
||||
elif pr["state"] == "closed":
|
||||
s["closed_unmerged"] += 1
|
||||
h = hours_between(pr.get("created_at"), pr.get("closed_at"))
|
||||
if h is not None:
|
||||
s["hours_to_close"].append(h)
|
||||
else:
|
||||
s["open"] += 1
|
||||
|
||||
repo_stats[repo] = {
|
||||
"total": repo_total,
|
||||
"merged": repo_merged,
|
||||
"merge_rate": round(repo_merged / repo_total, 2) if repo_total else 0,
|
||||
}
|
||||
|
||||
# Compute derived metrics
|
||||
summary = {}
|
||||
for agent, s in sorted(agent_stats.items(), key=lambda x: -x[1]["total"]):
|
||||
total = s["total"]
|
||||
merged = s["merged"]
|
||||
closed = s["closed_unmerged"]
|
||||
resolved = merged + closed
|
||||
merge_rate = round(merged / resolved, 3) if resolved else 0
|
||||
avg_merge_hours = (
|
||||
round(sum(s["hours_to_merge"]) / len(s["hours_to_merge"]), 1)
|
||||
if s["hours_to_merge"]
|
||||
else None
|
||||
)
|
||||
avg_close_hours = (
|
||||
round(sum(s["hours_to_close"]) / len(s["hours_to_close"]), 1)
|
||||
if s["hours_to_close"]
|
||||
else None
|
||||
)
|
||||
summary[agent] = {
|
||||
"total_prs": total,
|
||||
"merged": merged,
|
||||
"closed_unmerged": closed,
|
||||
"open": s["open"],
|
||||
"merge_rate": merge_rate,
|
||||
"rejection_rate": round(closed / resolved, 3) if resolved else 0,
|
||||
"avg_hours_to_merge": avg_merge_hours,
|
||||
"avg_hours_to_close": avg_close_hours,
|
||||
"repos": sorted(s["repos"]),
|
||||
}
|
||||
|
||||
return {
|
||||
"audited_at": datetime.now(timezone.utc).isoformat(),
|
||||
"repos_audited": repos,
|
||||
"repo_stats": repo_stats,
|
||||
"agent_summary": summary,
|
||||
"raw_prs": {a: s["prs"] for a, s in agent_stats.items()},
|
||||
}
|
||||
|
||||
|
||||
def render_scorecard(data: Dict[str, Any]) -> str:
|
||||
"""Render a markdown scorecard."""
|
||||
lines = [
|
||||
"# Cross-Agent Quality Scorecard",
|
||||
"",
|
||||
f"**Audited at:** {data['audited_at']}",
|
||||
f"**Repos audited:** {', '.join(data['repos_audited'])}",
|
||||
"",
|
||||
"## Per-Agent Summary",
|
||||
"",
|
||||
"| Agent | Total PRs | Merged | Closed (unmerged) | Open | Merge Rate | Rejection Rate | Avg Hours to Merge | Avg Hours to Close |",
|
||||
"|---|---|---:|---:|---:|---:|---:|---:|---:|",
|
||||
]
|
||||
|
||||
for agent, s in data["agent_summary"].items():
|
||||
merge_hours = f"{s['avg_hours_to_merge']:.1f}" if s["avg_hours_to_merge"] is not None else "—"
|
||||
close_hours = f"{s['avg_hours_to_close']:.1f}" if s["avg_hours_to_close"] is not None else "—"
|
||||
lines.append(
|
||||
f"| {agent} | {s['total_prs']} | {s['merged']} | {s['closed_unmerged']} | "
|
||||
f"{s['open']} | {s['merge_rate']:.1%} | {s['rejection_rate']:.1%} | "
|
||||
f"{merge_hours} | {close_hours} |"
|
||||
)
|
||||
|
||||
lines.extend([
|
||||
"",
|
||||
"## Per-Repo Merge Rate",
|
||||
"",
|
||||
"| Repo | Total PRs | Merged | Merge Rate |",
|
||||
"|---|---|---:|---:|",
|
||||
])
|
||||
|
||||
for repo, s in sorted(data["repo_stats"].items(), key=lambda x: -x[1]["total"]):
|
||||
lines.append(
|
||||
f"| {repo} | {s['total']} | {s['merged']} | {s['merge_rate']:.1%} |"
|
||||
)
|
||||
|
||||
lines.extend([
|
||||
"",
|
||||
"## Methodology",
|
||||
"",
|
||||
"- **Agent classification** uses three signals in priority order:",
|
||||
" 1. Explicit title tag (e.g. `[Claude]`, `[Ezra]`)",
|
||||
" 2. Branch name containing agent name (e.g. `claude/issue-123`)",
|
||||
" 3. Git user (`claude` → claude, `Rockachopa` → burn-loop)",
|
||||
"- **Merge rate** = merged / (merged + closed_unmerged). Open PRs are excluded.",
|
||||
"- **Rejection rate** = closed_unmerged / (merged + closed_unmerged).",
|
||||
"- **Time metrics** are computed from created_at to merged_at / closed_at.",
|
||||
"",
|
||||
"## Raw Data",
|
||||
"",
|
||||
"```json",
|
||||
json.dumps(data["agent_summary"], indent=2),
|
||||
"```",
|
||||
"",
|
||||
])
|
||||
|
||||
return "\n".join(lines) + "\n"
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(description="Cross-agent quality audit")
|
||||
parser.add_argument("--repos", nargs="+", default=DEFAULT_REPOS, help="Repos to audit")
|
||||
parser.add_argument("--scorecard", default="timmy-config/agent-quality-scorecard.md", help="Output path")
|
||||
parser.add_argument("--json", default=None, help="Also write raw JSON to path")
|
||||
args = parser.parse_args()
|
||||
|
||||
data = audit_repos(args.repos)
|
||||
|
||||
scorecard_path = Path(args.scorecard)
|
||||
scorecard_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
scorecard_path.write_text(render_scorecard(data))
|
||||
print(f"Scorecard written to {scorecard_path}", file=sys.stderr)
|
||||
|
||||
if args.json:
|
||||
json_path = Path(args.json)
|
||||
json_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
json_path.write_text(json.dumps(data, indent=2, default=str))
|
||||
print(f"Raw JSON written to {json_path}", file=sys.stderr)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
@@ -2,9 +2,12 @@ from scripts.bezalel_tailscale_bootstrap import (
|
||||
DEFAULT_PEERS,
|
||||
build_remote_script,
|
||||
build_ssh_command,
|
||||
parse_apply_output,
|
||||
parse_peer_args,
|
||||
parse_tailscale_status,
|
||||
resolve_host,
|
||||
)
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def test_build_remote_script_contains_install_up_and_key_append():
|
||||
@@ -78,3 +81,46 @@ def test_parse_peer_args_merges_overrides_into_defaults():
|
||||
"ezra": "100.126.61.76",
|
||||
"forge": "100.70.0.9",
|
||||
}
|
||||
|
||||
|
||||
def test_resolve_host_prefers_inventory_over_stale_default(tmp_path: Path):
|
||||
inventory = tmp_path / "hosts.ini"
|
||||
inventory.write_text(
|
||||
"[fleet]\n"
|
||||
"ezra ansible_host=143.198.27.163 ansible_user=root\n"
|
||||
"bezalel ansible_host=67.205.155.108 ansible_user=root\n"
|
||||
)
|
||||
|
||||
host, source = resolve_host(None, inventory)
|
||||
|
||||
assert host == "67.205.155.108"
|
||||
assert source == f"inventory:{inventory}"
|
||||
|
||||
|
||||
def test_parse_apply_output_extracts_status_and_ping_markers():
|
||||
stdout = (
|
||||
'{"Self": {"HostName": "bezalel", "DNSName": "bezalel.tailnet.ts.net", "TailscaleIPs": ["100.90.0.10"]}, '
|
||||
'"Peer": {"node-1": {"HostName": "ezra", "TailscaleIPs": ["100.126.61.75"]}}}'
|
||||
"\nPING_OK:mac:100.124.176.28\n"
|
||||
"PING_OK:ezra:100.126.61.75\n"
|
||||
)
|
||||
|
||||
result = parse_apply_output(stdout)
|
||||
|
||||
assert result["tailscale"]["self"]["tailscale_ips"] == ["100.90.0.10"]
|
||||
assert result["ping_ok"] == {"mac": "100.124.176.28", "ezra": "100.126.61.75"}
|
||||
|
||||
|
||||
def test_runbook_doc_exists_and_mentions_inventory_auth_and_peer_checks():
|
||||
doc = Path("docs/BEZALEL_TAILSCALE_BOOTSTRAP.md")
|
||||
assert doc.exists(), "missing docs/BEZALEL_TAILSCALE_BOOTSTRAP.md"
|
||||
text = doc.read_text()
|
||||
assert "ansible/inventory/hosts.ini" in text
|
||||
assert "tailscale up" in text
|
||||
assert "authorized_keys" in text
|
||||
assert "100.124.176.28" in text
|
||||
assert "100.126.61.75" in text
|
||||
|
||||
runbook = Path("docs/RUNBOOK_INDEX.md").read_text()
|
||||
assert "Prepare Bezalel Tailscale bootstrap" in runbook
|
||||
assert "scripts/bezalel_tailscale_bootstrap.py" in runbook
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
"""Tests for cross_agent_quality_audit.py — #518."""
|
||||
|
||||
import pytest
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent / "scripts"))
|
||||
|
||||
from cross_agent_quality_audit import AgentClassifier, hours_between
|
||||
|
||||
|
||||
class TestAgentClassifier:
|
||||
def test_title_tag_claude(self):
|
||||
pr = {"title": "[Claude] fix auth middleware", "head": {"ref": "fix/123"}, "user": {"login": "rockachopa"}}
|
||||
assert AgentClassifier.classify(pr) == "claude"
|
||||
|
||||
def test_title_tag_ezra(self):
|
||||
pr = {"title": "[Ezra] tmux fleet launcher", "head": {"ref": "burn/10"}, "user": {"login": "rockachopa"}}
|
||||
assert AgentClassifier.classify(pr) == "ezra"
|
||||
|
||||
def test_branch_name_claude(self):
|
||||
pr = {"title": "fix auth", "head": {"ref": "claude/issue-1695"}, "user": {"login": "rockachopa"}}
|
||||
assert AgentClassifier.classify(pr) == "claude"
|
||||
|
||||
def test_user_mapping(self):
|
||||
pr = {"title": "some fix", "head": {"ref": "fix/1"}, "user": {"login": "claude"}}
|
||||
assert AgentClassifier.classify(pr) == "claude"
|
||||
|
||||
def test_rockachopa_maps_to_burn_loop(self):
|
||||
pr = {"title": "some fix", "head": {"ref": "fix/1"}, "user": {"login": "Rockachopa"}}
|
||||
assert AgentClassifier.classify(pr) == "burn-loop"
|
||||
|
||||
def test_unknown_fallback(self):
|
||||
pr = {"title": "some fix", "head": {"ref": "fix/1"}, "user": {"login": "random"}}
|
||||
assert AgentClassifier.classify(pr) == "unknown"
|
||||
|
||||
|
||||
class TestHoursBetween:
|
||||
def test_same_day(self):
|
||||
h = hours_between("2026-04-22T10:00:00Z", "2026-04-22T12:00:00Z")
|
||||
assert h == 2.0
|
||||
|
||||
def test_none_returns_none(self):
|
||||
assert hours_between(None, "2026-04-22T12:00:00Z") is None
|
||||
assert hours_between("2026-04-22T10:00:00Z", None) is None
|
||||
@@ -1,244 +0,0 @@
|
||||
# Cross-Agent Quality Scorecard
|
||||
|
||||
**Audited at:** 2026-04-22T06:17:43.574309+00:00
|
||||
**Repos audited:** timmy-home, hermes-agent, the-nexus, the-door, fleet-ops, burn-fleet, the-playground, compounding-intelligence, the-beacon, second-son-of-timmy, timmy-academy, timmy-config
|
||||
|
||||
## Per-Agent Summary
|
||||
|
||||
| Agent | Total PRs | Merged | Closed (unmerged) | Open | Merge Rate | Rejection Rate | Avg Hours to Merge | Avg Hours to Close |
|
||||
|---|---|---:|---:|---:|---:|---:|---:|---:|
|
||||
| burn-loop | 1733 | 346 | 1239 | 148 | 21.8% | 78.2% | 18.9 | 20.6 |
|
||||
| unknown | 843 | 598 | 214 | 31 | 73.6% | 26.4% | 2.3 | 11.3 |
|
||||
| claude | 264 | 138 | 121 | 5 | 53.3% | 46.7% | 3.3 | 6.2 |
|
||||
| gemini | 95 | 24 | 70 | 1 | 25.5% | 74.5% | 0.5 | 11.3 |
|
||||
| timmy | 28 | 15 | 11 | 2 | 57.7% | 42.3% | 9.8 | 20.2 |
|
||||
| bezalel | 21 | 11 | 9 | 1 | 55.0% | 45.0% | 2.7 | 8.0 |
|
||||
| allegro | 21 | 7 | 11 | 3 | 38.9% | 61.1% | 31.1 | 20.2 |
|
||||
| ezra | 8 | 2 | 3 | 3 | 40.0% | 60.0% | 4.4 | 16.8 |
|
||||
| kimi | 6 | 3 | 3 | 0 | 50.0% | 50.0% | 39.5 | 0.5 |
|
||||
| manus | 6 | 5 | 1 | 0 | 83.3% | 16.7% | 0.0 | 18.8 |
|
||||
| codex | 2 | 2 | 0 | 0 | 100.0% | 0.0% | 2.3 | — |
|
||||
|
||||
## Per-Repo Merge Rate
|
||||
|
||||
| Repo | Total PRs | Merged | Merge Rate |
|
||||
|---|---|---:|---:|
|
||||
| the-nexus | 985 | 501 | 51.0% |
|
||||
| hermes-agent | 519 | 128 | 25.0% |
|
||||
| timmy-config | 404 | 140 | 35.0% |
|
||||
| timmy-home | 270 | 104 | 39.0% |
|
||||
| fleet-ops | 266 | 84 | 32.0% |
|
||||
| the-beacon | 175 | 62 | 35.0% |
|
||||
| the-door | 153 | 31 | 20.0% |
|
||||
| second-son-of-timmy | 111 | 82 | 74.0% |
|
||||
| compounding-intelligence | 50 | 9 | 18.0% |
|
||||
| the-playground | 44 | 2 | 5.0% |
|
||||
| burn-fleet | 38 | 2 | 5.0% |
|
||||
| timmy-academy | 12 | 6 | 50.0% |
|
||||
|
||||
## Methodology
|
||||
|
||||
- **Agent classification** uses three signals in priority order:
|
||||
1. Explicit title tag (e.g. `[Claude]`, `[Ezra]`)
|
||||
2. Branch name containing agent name (e.g. `claude/issue-123`)
|
||||
3. Git user (`claude` → claude, `Rockachopa` → burn-loop)
|
||||
- **Merge rate** = merged / (merged + closed_unmerged). Open PRs are excluded.
|
||||
- **Rejection rate** = closed_unmerged / (merged + closed_unmerged).
|
||||
- **Time metrics** are computed from created_at to merged_at / closed_at.
|
||||
|
||||
## Raw Data
|
||||
|
||||
```json
|
||||
{
|
||||
"burn-loop": {
|
||||
"total_prs": 1733,
|
||||
"merged": 346,
|
||||
"closed_unmerged": 1239,
|
||||
"open": 148,
|
||||
"merge_rate": 0.218,
|
||||
"rejection_rate": 0.782,
|
||||
"avg_hours_to_merge": 18.9,
|
||||
"avg_hours_to_close": 20.6,
|
||||
"repos": [
|
||||
"burn-fleet",
|
||||
"compounding-intelligence",
|
||||
"fleet-ops",
|
||||
"hermes-agent",
|
||||
"second-son-of-timmy",
|
||||
"the-beacon",
|
||||
"the-door",
|
||||
"the-nexus",
|
||||
"the-playground",
|
||||
"timmy-academy",
|
||||
"timmy-config",
|
||||
"timmy-home"
|
||||
]
|
||||
},
|
||||
"unknown": {
|
||||
"total_prs": 843,
|
||||
"merged": 598,
|
||||
"closed_unmerged": 214,
|
||||
"open": 31,
|
||||
"merge_rate": 0.736,
|
||||
"rejection_rate": 0.264,
|
||||
"avg_hours_to_merge": 2.3,
|
||||
"avg_hours_to_close": 11.3,
|
||||
"repos": [
|
||||
"fleet-ops",
|
||||
"hermes-agent",
|
||||
"second-son-of-timmy",
|
||||
"the-beacon",
|
||||
"the-door",
|
||||
"the-nexus",
|
||||
"timmy-academy",
|
||||
"timmy-config",
|
||||
"timmy-home"
|
||||
]
|
||||
},
|
||||
"claude": {
|
||||
"total_prs": 264,
|
||||
"merged": 138,
|
||||
"closed_unmerged": 121,
|
||||
"open": 5,
|
||||
"merge_rate": 0.533,
|
||||
"rejection_rate": 0.467,
|
||||
"avg_hours_to_merge": 3.3,
|
||||
"avg_hours_to_close": 6.2,
|
||||
"repos": [
|
||||
"hermes-agent",
|
||||
"the-nexus",
|
||||
"timmy-config",
|
||||
"timmy-home"
|
||||
]
|
||||
},
|
||||
"gemini": {
|
||||
"total_prs": 95,
|
||||
"merged": 24,
|
||||
"closed_unmerged": 70,
|
||||
"open": 1,
|
||||
"merge_rate": 0.255,
|
||||
"rejection_rate": 0.745,
|
||||
"avg_hours_to_merge": 0.5,
|
||||
"avg_hours_to_close": 11.3,
|
||||
"repos": [
|
||||
"hermes-agent",
|
||||
"the-nexus",
|
||||
"timmy-config",
|
||||
"timmy-home"
|
||||
]
|
||||
},
|
||||
"timmy": {
|
||||
"total_prs": 28,
|
||||
"merged": 15,
|
||||
"closed_unmerged": 11,
|
||||
"open": 2,
|
||||
"merge_rate": 0.577,
|
||||
"rejection_rate": 0.423,
|
||||
"avg_hours_to_merge": 9.8,
|
||||
"avg_hours_to_close": 20.2,
|
||||
"repos": [
|
||||
"burn-fleet",
|
||||
"hermes-agent",
|
||||
"the-nexus",
|
||||
"timmy-config",
|
||||
"timmy-home"
|
||||
]
|
||||
},
|
||||
"bezalel": {
|
||||
"total_prs": 21,
|
||||
"merged": 11,
|
||||
"closed_unmerged": 9,
|
||||
"open": 1,
|
||||
"merge_rate": 0.55,
|
||||
"rejection_rate": 0.45,
|
||||
"avg_hours_to_merge": 2.7,
|
||||
"avg_hours_to_close": 8.0,
|
||||
"repos": [
|
||||
"burn-fleet",
|
||||
"hermes-agent",
|
||||
"the-beacon",
|
||||
"the-nexus",
|
||||
"timmy-config",
|
||||
"timmy-home"
|
||||
]
|
||||
},
|
||||
"allegro": {
|
||||
"total_prs": 21,
|
||||
"merged": 7,
|
||||
"closed_unmerged": 11,
|
||||
"open": 3,
|
||||
"merge_rate": 0.389,
|
||||
"rejection_rate": 0.611,
|
||||
"avg_hours_to_merge": 31.1,
|
||||
"avg_hours_to_close": 20.2,
|
||||
"repos": [
|
||||
"burn-fleet",
|
||||
"hermes-agent",
|
||||
"the-beacon",
|
||||
"the-nexus",
|
||||
"timmy-config",
|
||||
"timmy-home"
|
||||
]
|
||||
},
|
||||
"ezra": {
|
||||
"total_prs": 8,
|
||||
"merged": 2,
|
||||
"closed_unmerged": 3,
|
||||
"open": 3,
|
||||
"merge_rate": 0.4,
|
||||
"rejection_rate": 0.6,
|
||||
"avg_hours_to_merge": 4.4,
|
||||
"avg_hours_to_close": 16.8,
|
||||
"repos": [
|
||||
"burn-fleet",
|
||||
"fleet-ops",
|
||||
"timmy-config",
|
||||
"timmy-home"
|
||||
]
|
||||
},
|
||||
"kimi": {
|
||||
"total_prs": 6,
|
||||
"merged": 3,
|
||||
"closed_unmerged": 3,
|
||||
"open": 0,
|
||||
"merge_rate": 0.5,
|
||||
"rejection_rate": 0.5,
|
||||
"avg_hours_to_merge": 39.5,
|
||||
"avg_hours_to_close": 0.5,
|
||||
"repos": [
|
||||
"hermes-agent",
|
||||
"the-nexus",
|
||||
"timmy-home"
|
||||
]
|
||||
},
|
||||
"manus": {
|
||||
"total_prs": 6,
|
||||
"merged": 5,
|
||||
"closed_unmerged": 1,
|
||||
"open": 0,
|
||||
"merge_rate": 0.833,
|
||||
"rejection_rate": 0.167,
|
||||
"avg_hours_to_merge": 0.0,
|
||||
"avg_hours_to_close": 18.8,
|
||||
"repos": [
|
||||
"the-nexus",
|
||||
"timmy-config"
|
||||
]
|
||||
},
|
||||
"codex": {
|
||||
"total_prs": 2,
|
||||
"merged": 2,
|
||||
"closed_unmerged": 0,
|
||||
"open": 0,
|
||||
"merge_rate": 1.0,
|
||||
"rejection_rate": 0.0,
|
||||
"avg_hours_to_merge": 2.3,
|
||||
"avg_hours_to_close": null,
|
||||
"repos": [
|
||||
"timmy-config",
|
||||
"timmy-home"
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Reference in New Issue
Block a user