Compare commits
2 Commits
fix/520
...
step35/512
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4638aec983 | ||
|
|
aa69610a9b |
111
scripts/agent-dispatch.sh
Executable file
111
scripts/agent-dispatch.sh
Executable file
@@ -0,0 +1,111 @@
|
||||
#!/bin/bash
|
||||
# ============================================================================
|
||||
# Agent Dispatch — One-shot prompt generator for fleet workers
|
||||
# ============================================================================
|
||||
# Refs: timmy-home #512
|
||||
#
|
||||
# Packages context, token, repo, issue, and Git/Gitea commands into a
|
||||
# copy-pasteable prompt for any agent (Claude, Sonnet, Kimi, Grok, etc.).
|
||||
#
|
||||
# Usage:
|
||||
# scripts/agent-dispatch.sh <agent> <repo> <issue#> [<org>]
|
||||
#
|
||||
# Supported agents:
|
||||
# sonnet, claude, kimi, grok, gemini, ezra, bezalel, allegro, timmy
|
||||
#
|
||||
# Example:
|
||||
# scripts/agent-dispatch.sh sonnet the-nexus 844 Timmy_Foundation
|
||||
# ============================================================================
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
AGENT="${1:-}"
|
||||
REPO="${2:-}"
|
||||
ISSUE="${3:-}"
|
||||
ORG="${4:-Timmy_Foundation}"
|
||||
|
||||
TOKEN="${GITEA_TOKEN:-$(cat ~/.config/gitea/token 2>/dev/null || true)}"
|
||||
FORGE="https://forge.alexanderwhitestone.com"
|
||||
|
||||
if [ -z "$AGENT" ] || [ -z "$REPO" ] || [ -z "$ISSUE" ]; then
|
||||
echo "Usage: $0 <agent> <repo> <issue#> [<org>]"
|
||||
echo ""
|
||||
echo "Supported agents:"
|
||||
echo " sonnet — Anthropic Claude Sonnet (cloud, high-reasoning)"
|
||||
echo " claude — Anthropic Claude (general)"
|
||||
echo " kimi — Moonshot Kimi K2.5 (cloud, long-context)"
|
||||
echo " grok — xAI Grok (cloud, real-time)"
|
||||
echo " gemini — Google Gemini (cloud, multimodal)"
|
||||
echo " ezra — Local archivist house (read-before-write)"
|
||||
echo " bezalel — Local artificer house (proof-required)"
|
||||
echo " allegro — Local dispatch house (tempo-and-routing)"
|
||||
echo " timmy — Local sovereign house (final review)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Validate agent
|
||||
VALID_AGENTS="sonnet claude kimi grok gemini ezra bezalel allegro timmy"
|
||||
if ! echo "$VALID_AGENTS" | grep -qw "$AGENT"; then
|
||||
echo "ERROR: Unknown agent '$AGENT'"
|
||||
echo "Valid agents: $VALID_AGENTS"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Fetch issue details
|
||||
if [ -n "$TOKEN" ]; then
|
||||
ISSUE_JSON=$(curl -s -H "Authorization: token ${TOKEN}" \
|
||||
"${FORGE}/api/v1/repos/${ORG}/${REPO}/issues/${ISSUE}" 2>/dev/null || true)
|
||||
ISSUE_TITLE=$(echo "$ISSUE_JSON" | python3 -c "import sys,json; d=json.load(sys.stdin); print(d.get('title',''))" 2>/dev/null || true)
|
||||
ISSUE_BODY=$(echo "$ISSUE_JSON" | python3 -c "import sys,json; d=json.load(sys.stdin); print(d.get('body',''))" 2>/dev/null || true)
|
||||
else
|
||||
echo "WARNING: No Gitea token found. Issue details will be blank."
|
||||
ISSUE_TITLE=""
|
||||
ISSUE_BODY=""
|
||||
fi
|
||||
|
||||
cat <<EOF
|
||||
================================================================================
|
||||
DISPATCH PROMPT — ${AGENT} → ${ORG}/${REPO}#${ISSUE}
|
||||
================================================================================
|
||||
|
||||
Agent: ${AGENT}
|
||||
Repo: ${ORG}/${REPO}
|
||||
Issue: #${ISSUE}
|
||||
Title: ${ISSUE_TITLE}
|
||||
|
||||
--- ISSUE BODY ---
|
||||
${ISSUE_BODY}
|
||||
|
||||
--- INSTRUCTIONS ---
|
||||
|
||||
1. Clone the repo:
|
||||
git clone --depth 1 "https://\${TOKEN}@forge.alexanderwhitestone.com/${ORG}/${REPO}.git"
|
||||
cd ${REPO}
|
||||
|
||||
2. Create branch:
|
||||
git checkout -b ${AGENT}/${REPO}-${ISSUE}
|
||||
|
||||
3. Read the issue, implement the fix or feature.
|
||||
|
||||
4. Test your changes locally.
|
||||
|
||||
5. Commit and push:
|
||||
git add -A
|
||||
git commit -m "[${AGENT}] ${ISSUE_TITLE} (#${ISSUE})"
|
||||
git push origin ${AGENT}/${REPO}-${ISSUE}
|
||||
|
||||
6. Open PR via Gitea API:
|
||||
curl -X POST \\
|
||||
-H "Authorization: token \${TOKEN}" \\
|
||||
-H "Content-Type: application/json" \\
|
||||
"${FORGE}/api/v1/repos/${ORG}/${REPO}/pulls" \\
|
||||
-d '{"title":"[${AGENT}] ${ISSUE_TITLE}","head":"${AGENT}/${REPO}-${ISSUE}","base":"main","body":"Closes #${ISSUE}"}'
|
||||
|
||||
7. File new issues for anything discovered.
|
||||
|
||||
Token: \${GITEA_TOKEN} or ~/.config/gitea/token
|
||||
Forge: ${FORGE}
|
||||
|
||||
Sovereignty and service always.
|
||||
================================================================================
|
||||
EOF
|
||||
@@ -1,245 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Fleet cost report generator.
|
||||
|
||||
Reads Timmy's sovereignty metrics database and estimates paid API spend by
|
||||
agent/provider lane. Default output targets the local timmy-config reports
|
||||
folder so the cost report can be filed from the sidecar repo.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import sqlite3
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
from typing import Iterable
|
||||
|
||||
DB_PATH = Path.home() / ".timmy" / "metrics" / "model_metrics.db"
|
||||
|
||||
|
||||
AGENT_LANES = (
|
||||
{
|
||||
"agent": "Timmy Cloud Lane",
|
||||
"provider": "OpenRouter",
|
||||
"patterns": ("openrouter/", "google/", "deepseek/", "x-ai/", "mistral/"),
|
||||
"notes": "Cloud fallback and external reasoning routed through OpenRouter-compatible lanes.",
|
||||
},
|
||||
{
|
||||
"agent": "Ezra",
|
||||
"provider": "Anthropic",
|
||||
"patterns": ("claude-", "anthropic/claude"),
|
||||
"notes": "Archivist / long-form reasoning house on Claude-family models.",
|
||||
},
|
||||
{
|
||||
"agent": "Bezalel",
|
||||
"provider": "OpenAI",
|
||||
"patterns": ("gpt-", "openai/", "codex"),
|
||||
"notes": "Forge / implementation house on Codex/OpenAI-backed execution lanes.",
|
||||
},
|
||||
{
|
||||
"agent": "Allegro",
|
||||
"provider": "Kimi / Moonshot",
|
||||
"patterns": ("kimi", "moonshot"),
|
||||
"notes": "Tempo-and-dispatch house on Kimi / Moonshot direct API lanes.",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def default_report_path(report_date: str | None = None) -> Path:
|
||||
if report_date is None:
|
||||
report_date = datetime.now().strftime("%Y-%m-%d")
|
||||
return Path.home() / "code" / "timmy-config" / "reports" / "production" / f"{report_date}-fleet-cost-report.md"
|
||||
|
||||
|
||||
def match_lane(model: str) -> dict | None:
|
||||
lowered = (model or "").lower()
|
||||
for lane in AGENT_LANES:
|
||||
if any(pattern in lowered for pattern in lane["patterns"]):
|
||||
return lane
|
||||
return None
|
||||
|
||||
|
||||
def load_cost_rows(days: int = 30, db_path: Path = DB_PATH) -> list[tuple[str, int, int, int, float]]:
|
||||
if not db_path.exists():
|
||||
return []
|
||||
cutoff = (datetime.now() - timedelta(days=days)).timestamp()
|
||||
with sqlite3.connect(str(db_path)) as conn:
|
||||
rows = conn.execute(
|
||||
"""
|
||||
SELECT model, SUM(sessions), SUM(messages), SUM(tool_calls), SUM(est_cost_usd)
|
||||
FROM session_stats
|
||||
WHERE timestamp > ? AND is_local = 0
|
||||
GROUP BY model
|
||||
ORDER BY SUM(est_cost_usd) DESC, model ASC
|
||||
""",
|
||||
(cutoff,),
|
||||
).fetchall()
|
||||
return [
|
||||
(model, int(sessions or 0), int(messages or 0), int(tool_calls or 0), float(cost or 0.0))
|
||||
for model, sessions, messages, tool_calls, cost in rows
|
||||
]
|
||||
|
||||
|
||||
def summarize_rows(rows: Iterable[tuple[str, int, int, int, float]], days: int = 30) -> dict:
|
||||
rows = list(rows)
|
||||
agents: dict[str, dict] = {}
|
||||
providers_seen: set[str] = set()
|
||||
inventory = [
|
||||
{
|
||||
"agent": lane["agent"],
|
||||
"provider": lane["provider"],
|
||||
"notes": lane["notes"],
|
||||
}
|
||||
for lane in AGENT_LANES
|
||||
]
|
||||
|
||||
for lane in AGENT_LANES:
|
||||
agents[lane["agent"]] = {
|
||||
"provider": lane["provider"],
|
||||
"models": [],
|
||||
"sessions": 0,
|
||||
"messages": 0,
|
||||
"tool_calls": 0,
|
||||
"monthly_cost_usd": 0.0,
|
||||
"daily_cost_usd": 0.0,
|
||||
"notes": lane["notes"],
|
||||
}
|
||||
|
||||
unassigned = {
|
||||
"provider": "Unassigned",
|
||||
"models": [],
|
||||
"sessions": 0,
|
||||
"messages": 0,
|
||||
"tool_calls": 0,
|
||||
"monthly_cost_usd": 0.0,
|
||||
"daily_cost_usd": 0.0,
|
||||
"notes": "Observed paid-model spend not yet mapped to a named wizard house.",
|
||||
}
|
||||
|
||||
for model, sessions, messages, tool_calls, monthly_cost in rows:
|
||||
lane = match_lane(model)
|
||||
if lane is None:
|
||||
bucket = unassigned
|
||||
else:
|
||||
bucket = agents[lane["agent"]]
|
||||
providers_seen.add(lane["provider"])
|
||||
bucket["models"].append(
|
||||
{
|
||||
"model": model,
|
||||
"sessions": sessions,
|
||||
"messages": messages,
|
||||
"tool_calls": tool_calls,
|
||||
"monthly_cost_usd": round(monthly_cost, 4),
|
||||
}
|
||||
)
|
||||
bucket["sessions"] += sessions
|
||||
bucket["messages"] += messages
|
||||
bucket["tool_calls"] += tool_calls
|
||||
bucket["monthly_cost_usd"] += monthly_cost
|
||||
|
||||
for bucket in list(agents.values()) + [unassigned]:
|
||||
bucket["monthly_cost_usd"] = round(bucket["monthly_cost_usd"], 4)
|
||||
bucket["daily_cost_usd"] = round(bucket["monthly_cost_usd"] / max(days, 1), 4)
|
||||
|
||||
if unassigned["models"]:
|
||||
agents["Unassigned"] = unassigned
|
||||
providers_seen.add("Unassigned")
|
||||
|
||||
total_monthly = round(sum(item["monthly_cost_usd"] for item in agents.values()), 4)
|
||||
total_daily = round(sum(item["daily_cost_usd"] for item in agents.values()), 4)
|
||||
|
||||
provider_order = sorted(providers_seen)
|
||||
if "Unassigned" in provider_order:
|
||||
provider_order = [p for p in provider_order if p != "Unassigned"] + ["Unassigned"]
|
||||
|
||||
return {
|
||||
"days": days,
|
||||
"providers": provider_order,
|
||||
"inventory": inventory,
|
||||
"agents": agents,
|
||||
"total_monthly_cost_usd": total_monthly,
|
||||
"total_daily_cost_usd": total_daily,
|
||||
}
|
||||
|
||||
|
||||
def render_markdown(summary: dict, report_date: str | None = None) -> str:
|
||||
if report_date is None:
|
||||
report_date = datetime.now().strftime("%Y-%m-%d")
|
||||
lines = [
|
||||
f"# Fleet Cost Report — {report_date}",
|
||||
"",
|
||||
f"Window: last {summary['days']} days of paid-model session stats from `~/.timmy/metrics/model_metrics.db`.",
|
||||
"",
|
||||
"## Paid API inventory",
|
||||
"",
|
||||
"| Agent | Provider | Notes |",
|
||||
"| --- | --- | --- |",
|
||||
]
|
||||
for item in summary["inventory"]:
|
||||
lines.append(f"| {item['agent']} | {item['provider']} | {item['notes']} |")
|
||||
|
||||
lines.extend(
|
||||
[
|
||||
"",
|
||||
"## Estimated cost per agent per day",
|
||||
"",
|
||||
"| Agent | Provider | Daily cost | Monthly estimate | Sessions | Messages | Tool calls |",
|
||||
"| --- | --- | ---: | ---: | ---: | ---: | ---: |",
|
||||
]
|
||||
)
|
||||
for agent, data in summary["agents"].items():
|
||||
lines.append(
|
||||
f"| {agent} | {data['provider']} | ${data['daily_cost_usd']:.2f} | ${data['monthly_cost_usd']:.2f} | {data['sessions']} | {data['messages']} | {data['tool_calls']} |"
|
||||
)
|
||||
|
||||
lines.extend(
|
||||
[
|
||||
"",
|
||||
f"Total estimated daily paid spend: ${summary['total_daily_cost_usd']:.2f}",
|
||||
f"Total estimated monthly paid spend: ${summary['total_monthly_cost_usd']:.2f}",
|
||||
"",
|
||||
"## Model evidence",
|
||||
"",
|
||||
]
|
||||
)
|
||||
for agent, data in summary["agents"].items():
|
||||
lines.append(f"### {agent}")
|
||||
if not data["models"]:
|
||||
lines.append("- No paid-model sessions observed in the selected window.")
|
||||
else:
|
||||
for model in data["models"]:
|
||||
lines.append(
|
||||
f"- `{model['model']}` — {model['sessions']} sessions / {model['messages']} messages / {model['tool_calls']} tool calls / ${model['monthly_cost_usd']:.2f} est."
|
||||
)
|
||||
lines.append("")
|
||||
|
||||
lines.append("Generated by `python3 scripts/fleet_cost_report.py --days 30`. Default output path targets the local timmy-config report lane.")
|
||||
lines.append("")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def write_report(output_path: Path, summary: dict, report_date: str | None = None) -> Path:
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
output_path.write_text(render_markdown(summary, report_date=report_date), encoding="utf-8")
|
||||
return output_path
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(description="Estimate paid API spend per fleet agent")
|
||||
parser.add_argument("--days", type=int, default=30, help="Lookback window in days")
|
||||
parser.add_argument("--db-path", default=str(DB_PATH), help="Path to model_metrics.db")
|
||||
parser.add_argument("--output", help="Optional markdown output path")
|
||||
parser.add_argument("--date", help="Override report date (YYYY-MM-DD)")
|
||||
args = parser.parse_args()
|
||||
|
||||
rows = load_cost_rows(days=args.days, db_path=Path(args.db_path).expanduser())
|
||||
summary = summarize_rows(rows, days=args.days)
|
||||
report_date = args.date or datetime.now().strftime("%Y-%m-%d")
|
||||
output_path = Path(args.output).expanduser() if args.output else default_report_path(report_date)
|
||||
write_report(output_path, summary, report_date=report_date)
|
||||
print(output_path)
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
195
scripts/sonnet-smoke-test.sh
Executable file
195
scripts/sonnet-smoke-test.sh
Executable file
@@ -0,0 +1,195 @@
|
||||
#!/bin/bash
|
||||
# ============================================================================
|
||||
# Sonnet Workforce Smoke Test
|
||||
# ============================================================================
|
||||
# Refs: timmy-home #512
|
||||
#
|
||||
# Validates that the Sonnet workforce agent can perform the full
|
||||
# clone → code → commit → push → PR workflow via Gitea HTTP.
|
||||
#
|
||||
# Usage:
|
||||
# scripts/sonnet-smoke-test.sh [--cleanup]
|
||||
#
|
||||
# Exit codes:
|
||||
# 0 — all checks passed
|
||||
# 1 — one or more checks failed
|
||||
# ============================================================================
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||||
TOKEN="${GITEA_TOKEN:-$(cat ~/.config/gitea/token 2>/dev/null || true)}"
|
||||
FORGE="https://forge.alexanderwhitestone.com"
|
||||
ORG="Timmy_Foundation"
|
||||
REPO="timmy-home"
|
||||
TEST_BRANCH="smoke/sonnet-$(date +%s)"
|
||||
|
||||
# Colors
|
||||
GREEN='\\033[0;32m'
|
||||
RED='\\033[0;31m'
|
||||
YELLOW='\\033[0;33m'
|
||||
NC='\\033[0m'
|
||||
|
||||
PASS=0
|
||||
FAIL=0
|
||||
|
||||
log_pass() { echo -e "${GREEN}✓${NC} $1"; PASS=$((PASS + 1)); }
|
||||
log_fail() { echo -e "${RED}✗${NC} $1"; FAIL=$((FAIL + 1)); }
|
||||
log_info() { echo -e "${YELLOW}▶${NC} $1"; }
|
||||
|
||||
# ── Prerequisites ──────────────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
log_info "Checking prerequisites..."
|
||||
|
||||
if [ -z "$TOKEN" ]; then
|
||||
log_fail "Gitea token not found (checked GITEA_TOKEN env and ~/.config/gitea/token)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v git &>/dev/null; then
|
||||
log_fail "git not installed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v curl &>/dev/null; then
|
||||
log_fail "curl not installed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v python3 &>/dev/null; then
|
||||
log_fail "python3 not installed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log_pass "Prerequisites OK"
|
||||
|
||||
# ── 1. Clone via Gitea HTTP ───────────────────────────────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
log_info "Step 1: Clone repo via Gitea HTTP..."
|
||||
|
||||
TMPDIR=$(mktemp -d)
|
||||
CLONE_URL="${FORGE}/${ORG}/${REPO}.git"
|
||||
|
||||
cd "$TMPDIR"
|
||||
if git clone --depth 1 "https://${TOKEN}@${FORGE#https://}/${ORG}/${REPO}.git" smoke-clone 2>/dev/null; then
|
||||
log_pass "Clone via Gitea HTTP"
|
||||
else
|
||||
log_fail "Clone via Gitea HTTP"
|
||||
rm -rf "$TMPDIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# ── 2. Commit ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
log_info "Step 2: Create branch and commit..."
|
||||
|
||||
cd "$TMPDIR/smoke-clone"
|
||||
git checkout -b "$TEST_BRANCH" 2>/dev/null || true
|
||||
|
||||
# Make a harmless change
|
||||
printf "# Sonnet smoke test marker\\n# timestamp: %s\\n" "$(date -u +%Y-%m-%dT%H:%M:%SZ)" > SONNET_SMOKE_MARKER.md
|
||||
git add SONNET_SMOKE_MARKER.md
|
||||
|
||||
if git -c user.email="sonnet@timmy.local" -c user.name="Sonnet Smoke Test" \
|
||||
commit -m "test: sonnet smoke test marker" 2>/dev/null; then
|
||||
log_pass "Commit created"
|
||||
else
|
||||
log_fail "Commit failed"
|
||||
rm -rf "$TMPDIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# ── 3. Push ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
log_info "Step 3: Push branch..."
|
||||
|
||||
if git push origin "$TEST_BRANCH" 2>/dev/null; then
|
||||
log_pass "Push to origin"
|
||||
else
|
||||
log_fail "Push to origin"
|
||||
rm -rf "$TMPDIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# ── 4. Create PR ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
log_info "Step 4: Create PR via Gitea API..."
|
||||
|
||||
PR_RESPONSE=$(curl -s -X POST \
|
||||
-H "Authorization: token ${TOKEN}" \
|
||||
-H "Content-Type: application/json" \
|
||||
"${FORGE}/api/v1/repos/${ORG}/${REPO}/pulls" \
|
||||
-d "{
|
||||
\"title\": \"test: sonnet smoke test ${TEST_BRANCH}\",
|
||||
\"head\": \"${TEST_BRANCH}\",
|
||||
\"base\": \"main\",
|
||||
\"body\": \"Automated smoke test verifying Sonnet can clone, commit, push, and open a PR.\\n\\nRefs #512\"
|
||||
}" 2>/dev/null)
|
||||
|
||||
PR_NUMBER=$(echo "$PR_RESPONSE" | python3 -c "import sys,json; d=json.load(sys.stdin); print(d.get('number',''))")
|
||||
|
||||
if [ -n "$PR_NUMBER" ] && [ "$PR_NUMBER" != "None" ]; then
|
||||
log_pass "PR created (#${PR_NUMBER})"
|
||||
PR_URL="${FORGE}/${ORG}/${REPO}/pulls/${PR_NUMBER}"
|
||||
echo " URL: $PR_URL"
|
||||
else
|
||||
log_fail "PR creation failed"
|
||||
echo " Response: $PR_RESPONSE"
|
||||
rm -rf "$TMPDIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# ── 5. Verify PR exists ──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
log_info "Step 5: Verify PR exists via API..."
|
||||
|
||||
PR_CHECK=$(curl -s -H "Authorization: token ${TOKEN}" \
|
||||
"${FORGE}/api/v1/repos/${ORG}/${REPO}/pulls/${PR_NUMBER}" 2>/dev/null)
|
||||
|
||||
PR_STATE=$(echo "$PR_CHECK" | python3 -c "import sys,json; d=json.load(sys.stdin); print(d.get('state',''))")
|
||||
|
||||
if [ "$PR_STATE" = "open" ]; then
|
||||
log_pass "PR verified open via API"
|
||||
else
|
||||
log_fail "PR state is '$PR_STATE', expected 'open'"
|
||||
fi
|
||||
|
||||
# ── Cleanup (optional) ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
if [ "${1:-}" = "--cleanup" ]; then
|
||||
log_info "Cleaning up smoke test artifacts..."
|
||||
curl -s -X PATCH -H "Authorization: token ${TOKEN}" \
|
||||
-H "Content-Type: application/json" \
|
||||
"${FORGE}/api/v1/repos/${ORG}/${REPO}/pulls/${PR_NUMBER}" \
|
||||
-d '{"state":"closed"}' >/dev/null 2>&1 || true
|
||||
git push origin --delete "$TEST_BRANCH" 2>/dev/null || true
|
||||
log_pass "Cleanup complete"
|
||||
fi
|
||||
|
||||
rm -rf "$TMPDIR"
|
||||
|
||||
# ── Summary ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
|
||||
|
||||
echo ""
|
||||
echo "================================================================"
|
||||
echo " Sonnet Smoke Test Summary"
|
||||
echo "================================================================"
|
||||
echo -e " Passed: ${GREEN}${PASS}${NC}"
|
||||
echo -e " Failed: ${RED}${FAIL}${NC}"
|
||||
echo ""
|
||||
|
||||
if [ "$FAIL" -gt 0 ]; then
|
||||
echo -e "${RED}RESULT: FAILED${NC}"
|
||||
exit 1
|
||||
else
|
||||
echo -e "${GREEN}RESULT: PASSED${NC}"
|
||||
echo ""
|
||||
echo "Sonnet workforce is verified end-to-end:"
|
||||
echo " ✓ Clone via Gitea HTTP"
|
||||
echo " ✓ Branch + commit"
|
||||
echo " ✓ Push to origin"
|
||||
echo " ✓ Open PR via API"
|
||||
echo " ✓ Verify PR state"
|
||||
exit 0
|
||||
fi
|
||||
@@ -1,77 +0,0 @@
|
||||
from importlib.util import module_from_spec, spec_from_file_location
|
||||
from pathlib import Path
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
|
||||
ROOT = Path(__file__).resolve().parent.parent
|
||||
SCRIPT_PATH = ROOT / "scripts" / "fleet_cost_report.py"
|
||||
|
||||
|
||||
def load_module():
|
||||
spec = spec_from_file_location("fleet_cost_report", SCRIPT_PATH)
|
||||
module = module_from_spec(spec)
|
||||
assert spec.loader is not None
|
||||
spec.loader.exec_module(module)
|
||||
return module
|
||||
|
||||
|
||||
class TestFleetCostReport(unittest.TestCase):
|
||||
def test_default_output_targets_timmy_config_report_path(self):
|
||||
module = load_module()
|
||||
output_path = module.default_report_path("2026-04-22")
|
||||
self.assertIn("timmy-config", str(output_path))
|
||||
self.assertTrue(str(output_path).endswith("2026-04-22-fleet-cost-report.md"))
|
||||
|
||||
def test_summary_groups_paid_costs_by_agent_and_provider(self):
|
||||
module = load_module()
|
||||
rows = [
|
||||
("claude-sonnet-4-6", 12, 120, 24, 6.0),
|
||||
("gpt-5.4", 6, 60, 12, 3.0),
|
||||
("openrouter/google/gemini-2.5-pro", 4, 40, 8, 2.0),
|
||||
("kimi-k2", 2, 20, 4, 1.0),
|
||||
]
|
||||
summary = module.summarize_rows(rows, days=30)
|
||||
|
||||
self.assertEqual(summary["providers"], ["Anthropic", "Kimi / Moonshot", "OpenAI", "OpenRouter"])
|
||||
self.assertAlmostEqual(summary["agents"]["Ezra"]["monthly_cost_usd"], 6.0)
|
||||
self.assertAlmostEqual(summary["agents"]["Bezalel"]["monthly_cost_usd"], 3.0)
|
||||
self.assertAlmostEqual(summary["agents"]["Timmy Cloud Lane"]["monthly_cost_usd"], 2.0)
|
||||
self.assertAlmostEqual(summary["agents"]["Allegro"]["monthly_cost_usd"], 1.0)
|
||||
self.assertAlmostEqual(summary["agents"]["Ezra"]["daily_cost_usd"], 0.2)
|
||||
|
||||
def test_report_render_mentions_inventory_and_agent_costs(self):
|
||||
module = load_module()
|
||||
rows = [
|
||||
("claude-sonnet-4-6", 12, 120, 24, 6.0),
|
||||
("gpt-5.4", 6, 60, 12, 3.0),
|
||||
("openrouter/google/gemini-2.5-pro", 4, 40, 8, 2.0),
|
||||
]
|
||||
summary = module.summarize_rows(rows, days=30)
|
||||
report = module.render_markdown(summary, report_date="2026-04-22")
|
||||
|
||||
self.assertIn("# Fleet Cost Report — 2026-04-22", report)
|
||||
self.assertIn("## Paid API inventory", report)
|
||||
self.assertIn("Anthropic", report)
|
||||
self.assertIn("OpenRouter", report)
|
||||
self.assertIn("OpenAI", report)
|
||||
self.assertIn("## Estimated cost per agent per day", report)
|
||||
self.assertIn("Timmy Cloud Lane", report)
|
||||
self.assertIn("Ezra", report)
|
||||
self.assertIn("Bezalel", report)
|
||||
|
||||
def test_write_report_creates_markdown_file(self):
|
||||
module = load_module()
|
||||
rows = [("claude-sonnet-4-6", 1, 10, 2, 0.5)]
|
||||
summary = module.summarize_rows(rows, days=30)
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
dest = Path(tmpdir) / "fleet-cost.md"
|
||||
module.write_report(dest, summary, report_date="2026-04-22")
|
||||
self.assertTrue(dest.exists())
|
||||
text = dest.read_text()
|
||||
self.assertIn("Fleet Cost Report", text)
|
||||
self.assertIn("Ezra", text)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
@@ -38,6 +38,7 @@ class House(Enum):
|
||||
EZRA = "ezra" # Archivist, reader
|
||||
BEZALEL = "bezalel" # Artificer, builder
|
||||
ALLEGRO = "allegro" # Tempo-and-dispatch, connected
|
||||
SONNET = "sonnet" # Anthropic Claude Sonnet (cloud, high-reasoning)
|
||||
|
||||
|
||||
class Mode(Enum):
|
||||
|
||||
Reference in New Issue
Block a user