253 lines
8.6 KiB
Python
Executable File
253 lines
8.6 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
"""Timmy Model Dashboard — where are my models, what are they doing.
|
|
|
|
Usage:
|
|
timmy-dashboard # one-shot
|
|
timmy-dashboard --watch # live refresh every 30s
|
|
timmy-dashboard --hours=48 # look back 48h
|
|
"""
|
|
|
|
import json
|
|
import os
|
|
import subprocess
|
|
import sys
|
|
import time
|
|
import urllib.request
|
|
from datetime import datetime, timezone, timedelta
|
|
from pathlib import Path
|
|
|
|
HERMES_HOME = Path.home() / ".hermes"
|
|
TIMMY_HOME = Path.home() / ".timmy"
|
|
METRICS_DIR = TIMMY_HOME / "metrics"
|
|
|
|
# ── Data Sources ──────────────────────────────────────────────────────
|
|
|
|
def get_ollama_models():
|
|
try:
|
|
req = urllib.request.Request("http://localhost:11434/api/tags")
|
|
with urllib.request.urlopen(req, timeout=5) as resp:
|
|
return json.loads(resp.read()).get("models", [])
|
|
except Exception:
|
|
return []
|
|
|
|
|
|
def get_loaded_models():
|
|
try:
|
|
req = urllib.request.Request("http://localhost:11434/api/ps")
|
|
with urllib.request.urlopen(req, timeout=5) as resp:
|
|
return json.loads(resp.read()).get("models", [])
|
|
except Exception:
|
|
return []
|
|
|
|
|
|
def get_huey_pid():
|
|
try:
|
|
r = subprocess.run(["pgrep", "-f", "huey_consumer"],
|
|
capture_output=True, text=True, timeout=5)
|
|
return r.stdout.strip().split("\n")[0] if r.returncode == 0 else None
|
|
except Exception:
|
|
return None
|
|
|
|
|
|
def get_hermes_sessions():
|
|
sessions_file = HERMES_HOME / "sessions" / "sessions.json"
|
|
if not sessions_file.exists():
|
|
return []
|
|
try:
|
|
data = json.loads(sessions_file.read_text())
|
|
return list(data.values())
|
|
except Exception:
|
|
return []
|
|
|
|
|
|
def get_heartbeat_ticks(date_str=None):
|
|
if not date_str:
|
|
date_str = datetime.now().strftime("%Y%m%d")
|
|
tick_file = TIMMY_HOME / "heartbeat" / f"ticks_{date_str}.jsonl"
|
|
if not tick_file.exists():
|
|
return []
|
|
ticks = []
|
|
for line in tick_file.read_text().strip().split("\n"):
|
|
if not line.strip():
|
|
continue
|
|
try:
|
|
ticks.append(json.loads(line))
|
|
except Exception:
|
|
continue
|
|
return ticks
|
|
|
|
|
|
def get_local_metrics(hours=24):
|
|
"""Read local inference metrics from jsonl files."""
|
|
records = []
|
|
cutoff = datetime.now(timezone.utc) - timedelta(hours=hours)
|
|
if not METRICS_DIR.exists():
|
|
return records
|
|
for f in sorted(METRICS_DIR.glob("local_*.jsonl")):
|
|
for line in f.read_text().strip().split("\n"):
|
|
if not line.strip():
|
|
continue
|
|
try:
|
|
r = json.loads(line)
|
|
ts = datetime.fromisoformat(r["timestamp"])
|
|
if ts >= cutoff:
|
|
records.append(r)
|
|
except Exception:
|
|
continue
|
|
return records
|
|
|
|
|
|
def get_cron_jobs():
|
|
"""Get Hermes cron job status."""
|
|
try:
|
|
r = subprocess.run(
|
|
["hermes", "cron", "list", "--json"],
|
|
capture_output=True, text=True, timeout=10
|
|
)
|
|
if r.returncode == 0:
|
|
return json.loads(r.stdout).get("jobs", [])
|
|
except Exception:
|
|
pass
|
|
return []
|
|
|
|
|
|
# ── Rendering ─────────────────────────────────────────────────────────
|
|
|
|
DIM = "\033[2m"
|
|
BOLD = "\033[1m"
|
|
GREEN = "\033[32m"
|
|
YELLOW = "\033[33m"
|
|
RED = "\033[31m"
|
|
CYAN = "\033[36m"
|
|
RST = "\033[0m"
|
|
CLR = "\033[2J\033[H"
|
|
|
|
|
|
def render(hours=24):
|
|
models = get_ollama_models()
|
|
loaded = get_loaded_models()
|
|
huey_pid = get_huey_pid()
|
|
ticks = get_heartbeat_ticks()
|
|
metrics = get_local_metrics(hours)
|
|
sessions = get_hermes_sessions()
|
|
|
|
loaded_names = {m.get("name", "") for m in loaded}
|
|
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
|
|
print(CLR, end="")
|
|
print(f"{BOLD}{'=' * 70}")
|
|
print(f" TIMMY MODEL DASHBOARD")
|
|
print(f" {now} | Huey: {GREEN}PID {huey_pid}{RST if huey_pid else f'{RED}DOWN{RST}'}")
|
|
print(f"{'=' * 70}{RST}")
|
|
|
|
# ── LOCAL MODELS ──
|
|
print(f"\n {BOLD}LOCAL MODELS (Ollama){RST}")
|
|
print(f" {DIM}{'-' * 55}{RST}")
|
|
if models:
|
|
for m in models:
|
|
name = m.get("name", "?")
|
|
size_gb = m.get("size", 0) / 1e9
|
|
if name in loaded_names:
|
|
status = f"{GREEN}IN VRAM{RST}"
|
|
else:
|
|
status = f"{DIM}on disk{RST}"
|
|
print(f" {name:35s} {size_gb:5.1f}GB {status}")
|
|
else:
|
|
print(f" {RED}(Ollama not responding){RST}")
|
|
|
|
# ── LOCAL INFERENCE ACTIVITY ──
|
|
print(f"\n {BOLD}LOCAL INFERENCE ({len(metrics)} calls, last {hours}h){RST}")
|
|
print(f" {DIM}{'-' * 55}{RST}")
|
|
if metrics:
|
|
by_caller = {}
|
|
for r in metrics:
|
|
caller = r.get("caller", "unknown")
|
|
if caller not in by_caller:
|
|
by_caller[caller] = {"count": 0, "success": 0, "errors": 0}
|
|
by_caller[caller]["count"] += 1
|
|
if r.get("success"):
|
|
by_caller[caller]["success"] += 1
|
|
else:
|
|
by_caller[caller]["errors"] += 1
|
|
for caller, stats in by_caller.items():
|
|
err = f" {RED}err:{stats['errors']}{RST}" if stats["errors"] else ""
|
|
print(f" {caller:25s} calls:{stats['count']:4d} "
|
|
f"{GREEN}ok:{stats['success']}{RST}{err}")
|
|
|
|
by_model = {}
|
|
for r in metrics:
|
|
model = r.get("model", "unknown")
|
|
by_model[model] = by_model.get(model, 0) + 1
|
|
print(f"\n {DIM}Models used:{RST}")
|
|
for model, count in sorted(by_model.items(), key=lambda x: -x[1]):
|
|
print(f" {model:30s} {count} calls")
|
|
else:
|
|
print(f" {DIM}(no local calls recorded yet){RST}")
|
|
|
|
# ── HEARTBEAT STATUS ──
|
|
print(f"\n {BOLD}HEARTBEAT ({len(ticks)} ticks today){RST}")
|
|
print(f" {DIM}{'-' * 55}{RST}")
|
|
if ticks:
|
|
last = ticks[-1]
|
|
decision = last.get("decision", last.get("actions", {}))
|
|
if isinstance(decision, dict):
|
|
severity = decision.get("severity", "unknown")
|
|
reasoning = decision.get("reasoning", "")
|
|
sev_color = GREEN if severity == "ok" else YELLOW if severity == "warning" else RED
|
|
print(f" Last tick: {last.get('tick_id', '?')}")
|
|
print(f" Severity: {sev_color}{severity}{RST}")
|
|
if reasoning:
|
|
print(f" Reasoning: {reasoning[:65]}")
|
|
else:
|
|
print(f" Last tick: {last.get('tick_id', '?')}")
|
|
actions = last.get("actions", [])
|
|
print(f" Actions: {actions if actions else 'none'}")
|
|
|
|
model_decisions = sum(1 for t in ticks
|
|
if isinstance(t.get("decision"), dict)
|
|
and t["decision"].get("severity") != "fallback")
|
|
fallback = len(ticks) - model_decisions
|
|
print(f" {CYAN}Model: {model_decisions}{RST} | {DIM}Fallback: {fallback}{RST}")
|
|
else:
|
|
print(f" {DIM}(no ticks today){RST}")
|
|
|
|
# ── HERMES SESSIONS ──
|
|
local_sessions = [s for s in sessions
|
|
if "localhost:11434" in str(s.get("base_url", ""))]
|
|
cloud_sessions = [s for s in sessions if s not in local_sessions]
|
|
print(f"\n {BOLD}HERMES SESSIONS{RST}")
|
|
print(f" {DIM}{'-' * 55}{RST}")
|
|
print(f" Total: {len(sessions)} | "
|
|
f"{GREEN}Local: {len(local_sessions)}{RST} | "
|
|
f"{YELLOW}Cloud: {len(cloud_sessions)}{RST}")
|
|
|
|
# ── ACTIVE LOOPS ──
|
|
print(f"\n {BOLD}ACTIVE LOOPS{RST}")
|
|
print(f" {DIM}{'-' * 55}{RST}")
|
|
print(f" {CYAN}heartbeat_tick{RST} 10m hermes4:14b DECIDE phase")
|
|
print(f" {DIM}model_health{RST} 5m (local check) Ollama ping")
|
|
print(f" {DIM}gemini_worker{RST} 20m gemini-2.5-pro aider")
|
|
print(f" {DIM}grok_worker{RST} 20m grok-3-fast opencode")
|
|
print(f" {DIM}cross_review{RST} 30m gemini+grok PR review")
|
|
|
|
print(f"\n{BOLD}{'=' * 70}{RST}")
|
|
print(f" {DIM}Refresh: timmy-dashboard --watch | History: --hours=N{RST}")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
watch = "--watch" in sys.argv
|
|
hours = 24
|
|
for a in sys.argv[1:]:
|
|
if a.startswith("--hours="):
|
|
hours = int(a.split("=")[1])
|
|
|
|
if watch:
|
|
try:
|
|
while True:
|
|
render(hours)
|
|
time.sleep(30)
|
|
except KeyboardInterrupt:
|
|
print(f"\n{DIM}Dashboard stopped.{RST}")
|
|
else:
|
|
render(hours)
|