Compare commits
1 Commits
fix/708
...
fix/issue-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4c069fe82e |
253
scripts/llama_server_watchdog.py
Executable file
253
scripts/llama_server_watchdog.py
Executable file
@@ -0,0 +1,253 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
llama-server watchdog — monitors llama-server on port 8081 and auto-restarts
|
||||
if it goes down. Designed to run as a cron job or systemd timer.
|
||||
|
||||
Fix for #713: llama-server DOWN on port 8081 — local inference broken.
|
||||
|
||||
Usage:
|
||||
python scripts/llama_server_watchdog.py # one-shot check
|
||||
python scripts/llama_server_watchdog.py --daemon # continuous monitor
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import urllib.request
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format="%(asctime)s [llama-watchdog] %(levelname)s %(message)s",
|
||||
)
|
||||
logger = logging.getLogger("llama-watchdog")
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Configuration
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
LLAMA_PORT = int(os.getenv("LLAMA_SERVER_PORT", "8081"))
|
||||
LLAMA_HOST = os.getenv("LLAMA_SERVER_HOST", "127.0.0.1")
|
||||
HEALTH_URL = f"http://{LLAMA_HOST}:{LLAMA_PORT}/health"
|
||||
CHECK_INTERVAL = int(os.getenv("LLAMA_WATCHDOG_INTERVAL", "60")) # seconds
|
||||
|
||||
# Model path — override via env or auto-detect
|
||||
LLAMA_MODEL_PATH = os.getenv("LLAMA_MODEL_PATH", "")
|
||||
LLAMA_SERVER_BIN = os.getenv("LLAMA_SERVER_BIN", "llama-server")
|
||||
LLAMA_CTX_SIZE = int(os.getenv("LLAMA_CTX_SIZE", "8192"))
|
||||
LLAMA_GPU_LAYERS = int(os.getenv("LLAMA_GPU_LAYERS", "99"))
|
||||
LLAMA_ALIAS = os.getenv("LLAMA_ALIAS", "hermes3")
|
||||
|
||||
# State file for tracking restarts
|
||||
STATE_FILE = Path(os.getenv("HERMES_HOME", str(Path.home() / ".hermes"))) / "llama-watchdog-state.json"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Health Check
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def check_health(timeout: int = 5) -> dict:
|
||||
"""Check if llama-server is responding on the configured port."""
|
||||
try:
|
||||
req = urllib.request.Request(HEALTH_URL, method="GET")
|
||||
req.add_header("User-Agent", "llama-watchdog/1.0")
|
||||
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
||||
body = resp.read().decode("utf-8", errors="replace")
|
||||
return {
|
||||
"alive": True,
|
||||
"status_code": resp.status,
|
||||
"body": body[:500],
|
||||
}
|
||||
except urllib.error.URLError as e:
|
||||
return {"alive": False, "error": str(e)}
|
||||
except Exception as e:
|
||||
return {"alive": False, "error": str(e)}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Process Management
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def find_llama_process() -> Optional[int]:
|
||||
"""Find running llama-server process PID, if any."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["pgrep", "-f", f"llama-server.*--port\s+{LLAMA_PORT}"],
|
||||
capture_output=True, text=True, timeout=5,
|
||||
)
|
||||
if result.returncode == 0 and result.stdout.strip():
|
||||
return int(result.stdout.strip().split("\n")[0])
|
||||
except (subprocess.TimeoutExpired, ValueError):
|
||||
pass
|
||||
return None
|
||||
|
||||
|
||||
def auto_detect_model_path() -> Optional[str]:
|
||||
"""Try to find the llama model path from Ollama's model store."""
|
||||
ollama_models = Path.home() / ".ollama" / "models" / "blobs"
|
||||
if ollama_models.exists():
|
||||
# Look for hermes3 or a reasonable default
|
||||
for blob in sorted(ollama_models.iterdir(), key=lambda p: p.stat().st_size, reverse=True):
|
||||
if blob.stat().st_size > 1_000_000_000: # >1GB, likely a model
|
||||
return str(blob)
|
||||
return None
|
||||
|
||||
|
||||
def start_llama_server() -> dict:
|
||||
"""Start llama-server with the configured parameters."""
|
||||
model_path = LLAMA_MODEL_PATH or auto_detect_model_path()
|
||||
if not model_path:
|
||||
return {"success": False, "error": "No model path configured and auto-detection failed"}
|
||||
|
||||
cmd = [
|
||||
LLAMA_SERVER_BIN,
|
||||
"--model", model_path,
|
||||
"--port", str(LLAMA_PORT),
|
||||
"--host", LLAMA_HOST,
|
||||
"--n-gpu-layers", str(LLAMA_GPU_LAYERS),
|
||||
"--flash-attn", "on",
|
||||
"--ctx-size", str(LLAMA_CTX_SIZE),
|
||||
"--alias", LLAMA_ALIAS,
|
||||
]
|
||||
|
||||
logger.info("Starting llama-server: %s", " ".join(cmd[:4]) + " ...")
|
||||
|
||||
try:
|
||||
process = subprocess.Popen(
|
||||
cmd,
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL,
|
||||
)
|
||||
# Wait a moment for startup
|
||||
time.sleep(3)
|
||||
|
||||
# Verify it started
|
||||
health = check_health(timeout=10)
|
||||
if health["alive"]:
|
||||
logger.info("llama-server started successfully (PID: %d)", process.pid)
|
||||
return {"success": True, "pid": process.pid}
|
||||
else:
|
||||
logger.error("llama-server started but health check failed: %s", health.get("error"))
|
||||
return {"success": False, "error": "Process started but health check failed"}
|
||||
except FileNotFoundError:
|
||||
return {"success": False, "error": f"Binary not found: {LLAMA_SERVER_BIN}"}
|
||||
except Exception as e:
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# State Management
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def load_state() -> dict:
|
||||
"""Load watchdog state from disk."""
|
||||
if STATE_FILE.exists():
|
||||
try:
|
||||
return json.loads(STATE_FILE.read_text())
|
||||
except Exception:
|
||||
pass
|
||||
return {"restarts": 0, "last_restart": None, "last_check": None}
|
||||
|
||||
|
||||
def save_state(state: dict):
|
||||
"""Save watchdog state to disk."""
|
||||
STATE_FILE.parent.mkdir(parents=True, exist_ok=True)
|
||||
STATE_FILE.write_text(json.dumps(state, indent=2))
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Main Logic
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def run_check() -> dict:
|
||||
"""Run a single health check cycle. Returns result dict."""
|
||||
state = load_state()
|
||||
health = check_health()
|
||||
state["last_check"] = time.strftime("%Y-%m-%dT%H:%M:%S")
|
||||
|
||||
result = {
|
||||
"timestamp": state["last_check"],
|
||||
"port": LLAMA_PORT,
|
||||
"health": health,
|
||||
}
|
||||
|
||||
if health["alive"]:
|
||||
logger.info("llama-server OK on port %d", LLAMA_PORT)
|
||||
result["action"] = "none"
|
||||
save_state(state)
|
||||
return result
|
||||
|
||||
# Server is down — attempt restart
|
||||
logger.warning("llama-server DOWN on port %d: %s", LLAMA_PORT, health.get("error"))
|
||||
|
||||
# Kill any zombie process
|
||||
pid = find_llama_process()
|
||||
if pid:
|
||||
logger.info("Killing zombie process %d", pid)
|
||||
try:
|
||||
os.kill(pid, 15) # SIGTERM
|
||||
time.sleep(2)
|
||||
except ProcessLookupError:
|
||||
pass
|
||||
|
||||
# Start new instance
|
||||
start_result = start_llama_server()
|
||||
result["action"] = "restart"
|
||||
result["restart_result"] = start_result
|
||||
|
||||
if start_result["success"]:
|
||||
state["restarts"] = state.get("restarts", 0) + 1
|
||||
state["last_restart"] = state["last_check"]
|
||||
logger.info("llama-server restarted (total restarts: %d)", state["restarts"])
|
||||
else:
|
||||
logger.error("Failed to restart llama-server: %s", start_result.get("error"))
|
||||
result["action"] = "restart_failed"
|
||||
|
||||
save_state(state)
|
||||
return result
|
||||
|
||||
|
||||
def daemon_loop():
|
||||
"""Continuous monitoring loop."""
|
||||
logger.info("Starting llama-server watchdog (port=%d, interval=%ds)", LLAMA_PORT, CHECK_INTERVAL)
|
||||
while True:
|
||||
try:
|
||||
run_check()
|
||||
except Exception as e:
|
||||
logger.error("Check cycle error: %s", e)
|
||||
time.sleep(CHECK_INTERVAL)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="llama-server watchdog")
|
||||
parser.add_argument("--daemon", action="store_true", help="Run continuous monitoring")
|
||||
parser.add_argument("--status", action="store_true", help="Show current status")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.status:
|
||||
health = check_health()
|
||||
state = load_state()
|
||||
print(f"llama-server on port {LLAMA_PORT}:")
|
||||
print(f" Alive: {health['alive']}")
|
||||
if health['alive']:
|
||||
print(f" Response: {health.get('body', '')[:100]}")
|
||||
print(f" Restarts: {state.get('restarts', 0)}")
|
||||
print(f" Last restart: {state.get('last_restart', 'never')}")
|
||||
print(f" Last check: {state.get('last_check', 'never')}")
|
||||
sys.exit(0 if health['alive'] else 1)
|
||||
|
||||
if args.daemon:
|
||||
daemon_loop()
|
||||
else:
|
||||
result = run_check()
|
||||
print(json.dumps(result, indent=2))
|
||||
sys.exit(0 if result["health"]["alive"] else 1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user