Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e7b9ec8c50 | ||
|
|
2f490e7087 |
@@ -62,24 +62,6 @@ Writes:
|
||||
|
||||
## Usage
|
||||
|
||||
### Timmy Mac wiring helper
|
||||
|
||||
Use the dedicated Timmy helper when you want to wire a real RunPod or Vertex-style endpoint into the local Mac Hermes config:
|
||||
|
||||
```bash
|
||||
python3 scripts/timmy_gemma4_mac.py --base-url https://your-openai-bridge.example/v1 --write-config
|
||||
python3 scripts/timmy_gemma4_mac.py --vertex-base-url https://your-vertex-bridge.example --write-config
|
||||
python3 scripts/timmy_gemma4_mac.py --pod-id <runpod-id> --write-config --verify-chat
|
||||
```
|
||||
|
||||
The helper writes to `~/.hermes/config.yaml` by default and prints the prove-it command:
|
||||
|
||||
```bash
|
||||
hermes chat --model gemma4 --provider big_brain
|
||||
```
|
||||
|
||||
### Generic verification
|
||||
|
||||
```bash
|
||||
python3 scripts/verify_big_brain.py
|
||||
python3 scripts/big_brain_manager.py
|
||||
|
||||
245
scripts/fleet_cost_report.py
Normal file
245
scripts/fleet_cost_report.py
Normal file
@@ -0,0 +1,245 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Fleet cost report generator.
|
||||
|
||||
Reads Timmy's sovereignty metrics database and estimates paid API spend by
|
||||
agent/provider lane. Default output targets the local timmy-config reports
|
||||
folder so the cost report can be filed from the sidecar repo.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import sqlite3
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
from typing import Iterable
|
||||
|
||||
DB_PATH = Path.home() / ".timmy" / "metrics" / "model_metrics.db"
|
||||
|
||||
|
||||
AGENT_LANES = (
|
||||
{
|
||||
"agent": "Timmy Cloud Lane",
|
||||
"provider": "OpenRouter",
|
||||
"patterns": ("openrouter/", "google/", "deepseek/", "x-ai/", "mistral/"),
|
||||
"notes": "Cloud fallback and external reasoning routed through OpenRouter-compatible lanes.",
|
||||
},
|
||||
{
|
||||
"agent": "Ezra",
|
||||
"provider": "Anthropic",
|
||||
"patterns": ("claude-", "anthropic/claude"),
|
||||
"notes": "Archivist / long-form reasoning house on Claude-family models.",
|
||||
},
|
||||
{
|
||||
"agent": "Bezalel",
|
||||
"provider": "OpenAI",
|
||||
"patterns": ("gpt-", "openai/", "codex"),
|
||||
"notes": "Forge / implementation house on Codex/OpenAI-backed execution lanes.",
|
||||
},
|
||||
{
|
||||
"agent": "Allegro",
|
||||
"provider": "Kimi / Moonshot",
|
||||
"patterns": ("kimi", "moonshot"),
|
||||
"notes": "Tempo-and-dispatch house on Kimi / Moonshot direct API lanes.",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def default_report_path(report_date: str | None = None) -> Path:
|
||||
if report_date is None:
|
||||
report_date = datetime.now().strftime("%Y-%m-%d")
|
||||
return Path.home() / "code" / "timmy-config" / "reports" / "production" / f"{report_date}-fleet-cost-report.md"
|
||||
|
||||
|
||||
def match_lane(model: str) -> dict | None:
|
||||
lowered = (model or "").lower()
|
||||
for lane in AGENT_LANES:
|
||||
if any(pattern in lowered for pattern in lane["patterns"]):
|
||||
return lane
|
||||
return None
|
||||
|
||||
|
||||
def load_cost_rows(days: int = 30, db_path: Path = DB_PATH) -> list[tuple[str, int, int, int, float]]:
|
||||
if not db_path.exists():
|
||||
return []
|
||||
cutoff = (datetime.now() - timedelta(days=days)).timestamp()
|
||||
with sqlite3.connect(str(db_path)) as conn:
|
||||
rows = conn.execute(
|
||||
"""
|
||||
SELECT model, SUM(sessions), SUM(messages), SUM(tool_calls), SUM(est_cost_usd)
|
||||
FROM session_stats
|
||||
WHERE timestamp > ? AND is_local = 0
|
||||
GROUP BY model
|
||||
ORDER BY SUM(est_cost_usd) DESC, model ASC
|
||||
""",
|
||||
(cutoff,),
|
||||
).fetchall()
|
||||
return [
|
||||
(model, int(sessions or 0), int(messages or 0), int(tool_calls or 0), float(cost or 0.0))
|
||||
for model, sessions, messages, tool_calls, cost in rows
|
||||
]
|
||||
|
||||
|
||||
def summarize_rows(rows: Iterable[tuple[str, int, int, int, float]], days: int = 30) -> dict:
|
||||
rows = list(rows)
|
||||
agents: dict[str, dict] = {}
|
||||
providers_seen: set[str] = set()
|
||||
inventory = [
|
||||
{
|
||||
"agent": lane["agent"],
|
||||
"provider": lane["provider"],
|
||||
"notes": lane["notes"],
|
||||
}
|
||||
for lane in AGENT_LANES
|
||||
]
|
||||
|
||||
for lane in AGENT_LANES:
|
||||
agents[lane["agent"]] = {
|
||||
"provider": lane["provider"],
|
||||
"models": [],
|
||||
"sessions": 0,
|
||||
"messages": 0,
|
||||
"tool_calls": 0,
|
||||
"monthly_cost_usd": 0.0,
|
||||
"daily_cost_usd": 0.0,
|
||||
"notes": lane["notes"],
|
||||
}
|
||||
|
||||
unassigned = {
|
||||
"provider": "Unassigned",
|
||||
"models": [],
|
||||
"sessions": 0,
|
||||
"messages": 0,
|
||||
"tool_calls": 0,
|
||||
"monthly_cost_usd": 0.0,
|
||||
"daily_cost_usd": 0.0,
|
||||
"notes": "Observed paid-model spend not yet mapped to a named wizard house.",
|
||||
}
|
||||
|
||||
for model, sessions, messages, tool_calls, monthly_cost in rows:
|
||||
lane = match_lane(model)
|
||||
if lane is None:
|
||||
bucket = unassigned
|
||||
else:
|
||||
bucket = agents[lane["agent"]]
|
||||
providers_seen.add(lane["provider"])
|
||||
bucket["models"].append(
|
||||
{
|
||||
"model": model,
|
||||
"sessions": sessions,
|
||||
"messages": messages,
|
||||
"tool_calls": tool_calls,
|
||||
"monthly_cost_usd": round(monthly_cost, 4),
|
||||
}
|
||||
)
|
||||
bucket["sessions"] += sessions
|
||||
bucket["messages"] += messages
|
||||
bucket["tool_calls"] += tool_calls
|
||||
bucket["monthly_cost_usd"] += monthly_cost
|
||||
|
||||
for bucket in list(agents.values()) + [unassigned]:
|
||||
bucket["monthly_cost_usd"] = round(bucket["monthly_cost_usd"], 4)
|
||||
bucket["daily_cost_usd"] = round(bucket["monthly_cost_usd"] / max(days, 1), 4)
|
||||
|
||||
if unassigned["models"]:
|
||||
agents["Unassigned"] = unassigned
|
||||
providers_seen.add("Unassigned")
|
||||
|
||||
total_monthly = round(sum(item["monthly_cost_usd"] for item in agents.values()), 4)
|
||||
total_daily = round(sum(item["daily_cost_usd"] for item in agents.values()), 4)
|
||||
|
||||
provider_order = sorted(providers_seen)
|
||||
if "Unassigned" in provider_order:
|
||||
provider_order = [p for p in provider_order if p != "Unassigned"] + ["Unassigned"]
|
||||
|
||||
return {
|
||||
"days": days,
|
||||
"providers": provider_order,
|
||||
"inventory": inventory,
|
||||
"agents": agents,
|
||||
"total_monthly_cost_usd": total_monthly,
|
||||
"total_daily_cost_usd": total_daily,
|
||||
}
|
||||
|
||||
|
||||
def render_markdown(summary: dict, report_date: str | None = None) -> str:
|
||||
if report_date is None:
|
||||
report_date = datetime.now().strftime("%Y-%m-%d")
|
||||
lines = [
|
||||
f"# Fleet Cost Report — {report_date}",
|
||||
"",
|
||||
f"Window: last {summary['days']} days of paid-model session stats from `~/.timmy/metrics/model_metrics.db`.",
|
||||
"",
|
||||
"## Paid API inventory",
|
||||
"",
|
||||
"| Agent | Provider | Notes |",
|
||||
"| --- | --- | --- |",
|
||||
]
|
||||
for item in summary["inventory"]:
|
||||
lines.append(f"| {item['agent']} | {item['provider']} | {item['notes']} |")
|
||||
|
||||
lines.extend(
|
||||
[
|
||||
"",
|
||||
"## Estimated cost per agent per day",
|
||||
"",
|
||||
"| Agent | Provider | Daily cost | Monthly estimate | Sessions | Messages | Tool calls |",
|
||||
"| --- | --- | ---: | ---: | ---: | ---: | ---: |",
|
||||
]
|
||||
)
|
||||
for agent, data in summary["agents"].items():
|
||||
lines.append(
|
||||
f"| {agent} | {data['provider']} | ${data['daily_cost_usd']:.2f} | ${data['monthly_cost_usd']:.2f} | {data['sessions']} | {data['messages']} | {data['tool_calls']} |"
|
||||
)
|
||||
|
||||
lines.extend(
|
||||
[
|
||||
"",
|
||||
f"Total estimated daily paid spend: ${summary['total_daily_cost_usd']:.2f}",
|
||||
f"Total estimated monthly paid spend: ${summary['total_monthly_cost_usd']:.2f}",
|
||||
"",
|
||||
"## Model evidence",
|
||||
"",
|
||||
]
|
||||
)
|
||||
for agent, data in summary["agents"].items():
|
||||
lines.append(f"### {agent}")
|
||||
if not data["models"]:
|
||||
lines.append("- No paid-model sessions observed in the selected window.")
|
||||
else:
|
||||
for model in data["models"]:
|
||||
lines.append(
|
||||
f"- `{model['model']}` — {model['sessions']} sessions / {model['messages']} messages / {model['tool_calls']} tool calls / ${model['monthly_cost_usd']:.2f} est."
|
||||
)
|
||||
lines.append("")
|
||||
|
||||
lines.append("Generated by `python3 scripts/fleet_cost_report.py --days 30`. Default output path targets the local timmy-config report lane.")
|
||||
lines.append("")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def write_report(output_path: Path, summary: dict, report_date: str | None = None) -> Path:
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
output_path.write_text(render_markdown(summary, report_date=report_date), encoding="utf-8")
|
||||
return output_path
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(description="Estimate paid API spend per fleet agent")
|
||||
parser.add_argument("--days", type=int, default=30, help="Lookback window in days")
|
||||
parser.add_argument("--db-path", default=str(DB_PATH), help="Path to model_metrics.db")
|
||||
parser.add_argument("--output", help="Optional markdown output path")
|
||||
parser.add_argument("--date", help="Override report date (YYYY-MM-DD)")
|
||||
args = parser.parse_args()
|
||||
|
||||
rows = load_cost_rows(days=args.days, db_path=Path(args.db_path).expanduser())
|
||||
summary = summarize_rows(rows, days=args.days)
|
||||
report_date = args.date or datetime.now().strftime("%Y-%m-%d")
|
||||
output_path = Path(args.output).expanduser() if args.output else default_report_path(report_date)
|
||||
write_report(output_path, summary, report_date=report_date)
|
||||
print(output_path)
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
@@ -1,164 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Timmy Mac Gemma 4 wiring helper for RunPod / Vertex-style Big Brain providers.
|
||||
|
||||
Refs: timmy-home #543
|
||||
|
||||
Safe by default:
|
||||
- computes a Big Brain base URL from an explicit URL, Vertex bridge URL, or RunPod pod id
|
||||
- can provision a RunPod pod when --apply-runpod is used and a token is available
|
||||
- can write the resolved endpoint into a Hermes config when --write-config is used
|
||||
- can verify an OpenAI-compatible chat endpoint when --verify-chat is used
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from urllib import request
|
||||
|
||||
from scripts.bezalel_gemma4_vps import (
|
||||
DEFAULT_CLOUD_TYPE,
|
||||
DEFAULT_GPU_TYPE,
|
||||
DEFAULT_MODEL,
|
||||
DEFAULT_PROVIDER_NAME,
|
||||
build_runpod_endpoint,
|
||||
deploy_runpod,
|
||||
update_config_text,
|
||||
)
|
||||
|
||||
DEFAULT_TOKEN_FILE = Path.home() / ".config" / "runpod" / "access_key"
|
||||
DEFAULT_CONFIG_PATH = Path.home() / ".hermes" / "config.yaml"
|
||||
|
||||
|
||||
def _normalize_openai_base(base_url: str | None) -> str:
|
||||
if not base_url:
|
||||
return ""
|
||||
cleaned = str(base_url).strip().rstrip("/")
|
||||
return cleaned if cleaned.endswith("/v1") else f"{cleaned}/v1"
|
||||
|
||||
|
||||
def choose_base_url(*, vertex_base_url: str | None = None, base_url: str | None = None, pod_id: str | None = None) -> str:
|
||||
if vertex_base_url:
|
||||
return _normalize_openai_base(vertex_base_url)
|
||||
if base_url:
|
||||
return _normalize_openai_base(base_url)
|
||||
if pod_id:
|
||||
return build_runpod_endpoint(pod_id)
|
||||
return "https://YOUR_BIG_BRAIN_HOST/v1"
|
||||
|
||||
|
||||
def write_config_file(config_path: Path, *, base_url: str, model: str = DEFAULT_MODEL, provider_name: str = DEFAULT_PROVIDER_NAME) -> str:
|
||||
original = config_path.read_text() if config_path.exists() else ""
|
||||
updated = update_config_text(original, base_url=base_url, model=model, provider_name=provider_name)
|
||||
config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
config_path.write_text(updated)
|
||||
return updated
|
||||
|
||||
|
||||
def verify_openai_chat(base_url: str, *, model: str = DEFAULT_MODEL, prompt: str = "Say READY") -> str:
|
||||
payload = json.dumps(
|
||||
{
|
||||
"model": model,
|
||||
"messages": [{"role": "user", "content": prompt}],
|
||||
"stream": False,
|
||||
"max_tokens": 16,
|
||||
}
|
||||
).encode()
|
||||
req = request.Request(
|
||||
f"{base_url.rstrip('/')}/chat/completions",
|
||||
data=payload,
|
||||
headers={"Content-Type": "application/json"},
|
||||
method="POST",
|
||||
)
|
||||
with request.urlopen(req, timeout=30) as resp:
|
||||
data = json.loads(resp.read().decode())
|
||||
return data["choices"][0]["message"]["content"]
|
||||
|
||||
|
||||
def build_summary(*, base_url: str, model: str, provider_name: str = DEFAULT_PROVIDER_NAME, config_path: Path = DEFAULT_CONFIG_PATH) -> dict[str, Any]:
|
||||
return {
|
||||
"provider_name": provider_name,
|
||||
"base_url": base_url,
|
||||
"model": model,
|
||||
"config_path": str(config_path),
|
||||
"verification_commands": [
|
||||
"python3 scripts/verify_big_brain.py",
|
||||
f"python3 scripts/timmy_gemma4_mac.py --base-url {base_url} --write-config --verify-chat",
|
||||
"hermes chat --model gemma4 --provider big_brain",
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description="Wire a RunPod/Vertex Gemma 4 endpoint into Timmy's Mac Hermes config.")
|
||||
parser.add_argument("--pod-name", default="timmy-gemma4")
|
||||
parser.add_argument("--gpu-type", default=DEFAULT_GPU_TYPE)
|
||||
parser.add_argument("--cloud-type", default=DEFAULT_CLOUD_TYPE)
|
||||
parser.add_argument("--model", default=DEFAULT_MODEL)
|
||||
parser.add_argument("--provider-name", default=DEFAULT_PROVIDER_NAME)
|
||||
parser.add_argument("--token-file", type=Path, default=DEFAULT_TOKEN_FILE)
|
||||
parser.add_argument("--config-path", type=Path, default=DEFAULT_CONFIG_PATH)
|
||||
parser.add_argument("--pod-id", help="Existing RunPod pod id to convert into an OpenAI-compatible base URL")
|
||||
parser.add_argument("--base-url", help="Explicit OpenAI-compatible base URL")
|
||||
parser.add_argument("--vertex-base-url", help="Vertex AI OpenAI-compatible bridge base URL")
|
||||
parser.add_argument("--apply-runpod", action="store_true", help="Provision a RunPod pod using the RunPod GraphQL API")
|
||||
parser.add_argument("--write-config", action="store_true", help="Write the resolved endpoint into --config-path")
|
||||
parser.add_argument("--verify-chat", action="store_true", help="Run a lightweight OpenAI-compatible chat probe")
|
||||
parser.add_argument("--json", action="store_true", help="Emit machine-readable JSON")
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main() -> None:
|
||||
args = parse_args()
|
||||
summary: dict[str, Any] = {
|
||||
"pod_name": args.pod_name,
|
||||
"gpu_type": args.gpu_type,
|
||||
"cloud_type": args.cloud_type,
|
||||
"model": args.model,
|
||||
"provider_name": args.provider_name,
|
||||
"actions": [],
|
||||
}
|
||||
|
||||
base_url = choose_base_url(vertex_base_url=args.vertex_base_url, base_url=args.base_url, pod_id=args.pod_id)
|
||||
|
||||
if args.apply_runpod:
|
||||
if not args.token_file.exists():
|
||||
raise SystemExit(f"RunPod token file not found: {args.token_file}")
|
||||
api_key = args.token_file.read_text().strip()
|
||||
deployed = deploy_runpod(api_key=api_key, name=args.pod_name, gpu_type=args.gpu_type, cloud_type=args.cloud_type, model=args.model)
|
||||
summary["deployment"] = deployed
|
||||
base_url = deployed["base_url"]
|
||||
summary["actions"].append("deployed_runpod_pod")
|
||||
|
||||
summary.update(build_summary(base_url=base_url, model=args.model, provider_name=args.provider_name, config_path=args.config_path))
|
||||
|
||||
if args.write_config:
|
||||
write_config_file(args.config_path, base_url=base_url, model=args.model, provider_name=args.provider_name)
|
||||
summary["actions"].append("wrote_config")
|
||||
|
||||
if args.verify_chat:
|
||||
summary["verify_response"] = verify_openai_chat(base_url, model=args.model)
|
||||
summary["actions"].append("verified_chat")
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(summary, indent=2))
|
||||
return
|
||||
|
||||
print("--- Timmy Gemma4 Mac Wiring ---")
|
||||
print(f"Provider: {args.provider_name}")
|
||||
print(f"Base URL: {base_url}")
|
||||
print(f"Model: {args.model}")
|
||||
print(f"Config path: {args.config_path}")
|
||||
if "verify_response" in summary:
|
||||
print(f"Verify response: {summary['verify_response']}")
|
||||
if summary["actions"]:
|
||||
print("Actions: " + ", ".join(summary["actions"]))
|
||||
print("Verification commands:")
|
||||
for command in summary["verification_commands"]:
|
||||
print(f" - {command}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
77
tests/test_fleet_cost_report.py
Normal file
77
tests/test_fleet_cost_report.py
Normal file
@@ -0,0 +1,77 @@
|
||||
from importlib.util import module_from_spec, spec_from_file_location
|
||||
from pathlib import Path
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
|
||||
ROOT = Path(__file__).resolve().parent.parent
|
||||
SCRIPT_PATH = ROOT / "scripts" / "fleet_cost_report.py"
|
||||
|
||||
|
||||
def load_module():
|
||||
spec = spec_from_file_location("fleet_cost_report", SCRIPT_PATH)
|
||||
module = module_from_spec(spec)
|
||||
assert spec.loader is not None
|
||||
spec.loader.exec_module(module)
|
||||
return module
|
||||
|
||||
|
||||
class TestFleetCostReport(unittest.TestCase):
|
||||
def test_default_output_targets_timmy_config_report_path(self):
|
||||
module = load_module()
|
||||
output_path = module.default_report_path("2026-04-22")
|
||||
self.assertIn("timmy-config", str(output_path))
|
||||
self.assertTrue(str(output_path).endswith("2026-04-22-fleet-cost-report.md"))
|
||||
|
||||
def test_summary_groups_paid_costs_by_agent_and_provider(self):
|
||||
module = load_module()
|
||||
rows = [
|
||||
("claude-sonnet-4-6", 12, 120, 24, 6.0),
|
||||
("gpt-5.4", 6, 60, 12, 3.0),
|
||||
("openrouter/google/gemini-2.5-pro", 4, 40, 8, 2.0),
|
||||
("kimi-k2", 2, 20, 4, 1.0),
|
||||
]
|
||||
summary = module.summarize_rows(rows, days=30)
|
||||
|
||||
self.assertEqual(summary["providers"], ["Anthropic", "Kimi / Moonshot", "OpenAI", "OpenRouter"])
|
||||
self.assertAlmostEqual(summary["agents"]["Ezra"]["monthly_cost_usd"], 6.0)
|
||||
self.assertAlmostEqual(summary["agents"]["Bezalel"]["monthly_cost_usd"], 3.0)
|
||||
self.assertAlmostEqual(summary["agents"]["Timmy Cloud Lane"]["monthly_cost_usd"], 2.0)
|
||||
self.assertAlmostEqual(summary["agents"]["Allegro"]["monthly_cost_usd"], 1.0)
|
||||
self.assertAlmostEqual(summary["agents"]["Ezra"]["daily_cost_usd"], 0.2)
|
||||
|
||||
def test_report_render_mentions_inventory_and_agent_costs(self):
|
||||
module = load_module()
|
||||
rows = [
|
||||
("claude-sonnet-4-6", 12, 120, 24, 6.0),
|
||||
("gpt-5.4", 6, 60, 12, 3.0),
|
||||
("openrouter/google/gemini-2.5-pro", 4, 40, 8, 2.0),
|
||||
]
|
||||
summary = module.summarize_rows(rows, days=30)
|
||||
report = module.render_markdown(summary, report_date="2026-04-22")
|
||||
|
||||
self.assertIn("# Fleet Cost Report — 2026-04-22", report)
|
||||
self.assertIn("## Paid API inventory", report)
|
||||
self.assertIn("Anthropic", report)
|
||||
self.assertIn("OpenRouter", report)
|
||||
self.assertIn("OpenAI", report)
|
||||
self.assertIn("## Estimated cost per agent per day", report)
|
||||
self.assertIn("Timmy Cloud Lane", report)
|
||||
self.assertIn("Ezra", report)
|
||||
self.assertIn("Bezalel", report)
|
||||
|
||||
def test_write_report_creates_markdown_file(self):
|
||||
module = load_module()
|
||||
rows = [("claude-sonnet-4-6", 1, 10, 2, 0.5)]
|
||||
summary = module.summarize_rows(rows, days=30)
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
dest = Path(tmpdir) / "fleet-cost.md"
|
||||
module.write_report(dest, summary, report_date="2026-04-22")
|
||||
self.assertTrue(dest.exists())
|
||||
text = dest.read_text()
|
||||
self.assertIn("Fleet Cost Report", text)
|
||||
self.assertIn("Ezra", text)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
@@ -1,85 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib.util
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
|
||||
ROOT = Path(__file__).resolve().parent.parent
|
||||
SCRIPT = ROOT / "scripts" / "timmy_gemma4_mac.py"
|
||||
README = ROOT / "scripts" / "README_big_brain.md"
|
||||
|
||||
|
||||
def load_module():
|
||||
spec = importlib.util.spec_from_file_location("timmy_gemma4_mac", str(SCRIPT))
|
||||
mod = importlib.util.module_from_spec(spec)
|
||||
sys.modules["timmy_gemma4_mac"] = mod
|
||||
spec.loader.exec_module(mod)
|
||||
return mod
|
||||
|
||||
|
||||
class _FakeResponse:
|
||||
def __init__(self, payload: dict):
|
||||
self._payload = json.dumps(payload).encode()
|
||||
|
||||
def read(self) -> bytes:
|
||||
return self._payload
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc, tb):
|
||||
return False
|
||||
|
||||
|
||||
def test_script_exists() -> None:
|
||||
assert SCRIPT.exists(), "scripts/timmy_gemma4_mac.py must exist"
|
||||
|
||||
|
||||
def test_default_paths_target_timmy_mac_hermes() -> None:
|
||||
mod = load_module()
|
||||
assert mod.DEFAULT_CONFIG_PATH == Path.home() / ".hermes" / "config.yaml"
|
||||
assert mod.DEFAULT_TOKEN_FILE == Path.home() / ".config" / "runpod" / "access_key"
|
||||
|
||||
|
||||
def test_choose_base_url_prefers_vertex_then_explicit_then_runpod() -> None:
|
||||
mod = load_module()
|
||||
assert mod.choose_base_url(vertex_base_url="https://vertex-proxy.example/v1") == "https://vertex-proxy.example/v1"
|
||||
assert mod.choose_base_url(base_url="https://custom-endpoint/v1") == "https://custom-endpoint/v1"
|
||||
assert mod.choose_base_url(pod_id="abc123") == "https://abc123-11434.proxy.runpod.net/v1"
|
||||
|
||||
|
||||
def test_build_summary_includes_prove_it_commands() -> None:
|
||||
mod = load_module()
|
||||
summary = mod.build_summary(base_url="https://vertex-proxy.example/v1", model="gemma4:latest")
|
||||
assert summary["verification_commands"][0] == "python3 scripts/verify_big_brain.py"
|
||||
assert any("hermes chat --model gemma4 --provider big_brain" in cmd for cmd in summary["verification_commands"])
|
||||
|
||||
|
||||
def test_verify_openai_chat_targets_chat_completions() -> None:
|
||||
mod = load_module()
|
||||
response_payload = {
|
||||
"choices": [{"message": {"content": "READY"}}]
|
||||
}
|
||||
|
||||
with patch("timmy_gemma4_mac.request.urlopen", return_value=_FakeResponse(response_payload)) as mocked:
|
||||
result = mod.verify_openai_chat("https://vertex-proxy.example/v1", model="gemma4:latest", prompt="say READY")
|
||||
|
||||
assert result == "READY"
|
||||
req = mocked.call_args.args[0]
|
||||
assert req.full_url == "https://vertex-proxy.example/v1/chat/completions"
|
||||
|
||||
|
||||
def test_readme_mentions_timmy_mac_wiring_flow() -> None:
|
||||
text = README.read_text(encoding="utf-8")
|
||||
required = [
|
||||
"scripts/timmy_gemma4_mac.py",
|
||||
"--vertex-base-url",
|
||||
"--write-config",
|
||||
"python3 scripts/verify_big_brain.py",
|
||||
"hermes chat --model gemma4 --provider big_brain",
|
||||
]
|
||||
missing = [item for item in required if item not in text]
|
||||
assert not missing, missing
|
||||
Reference in New Issue
Block a user