Files
hermes-agent/hermes_cli/doctor.py

811 lines
35 KiB
Python
Raw Normal View History

"""
Doctor command for hermes CLI.
Diagnoses issues with Hermes Agent setup.
"""
import os
import sys
import subprocess
import shutil
from hermes_cli.config import get_project_root, get_hermes_home, get_env_path
from hermes_constants import display_hermes_home
PROJECT_ROOT = get_project_root()
HERMES_HOME = get_hermes_home()
_DHH = display_hermes_home() # user-facing display path (e.g. ~/.hermes or ~/.hermes/profiles/coder)
# Load environment variables from ~/.hermes/.env so API key checks work
from dotenv import load_dotenv
_env_path = get_env_path()
if _env_path.exists():
2026-02-25 15:20:42 -08:00
try:
load_dotenv(_env_path, encoding="utf-8")
except UnicodeDecodeError:
load_dotenv(_env_path, encoding="latin-1")
# Also try project .env as dev fallback
2026-02-25 15:20:42 -08:00
load_dotenv(PROJECT_ROOT / ".env", override=False, encoding="utf-8")
2026-02-20 23:23:32 -08:00
from hermes_cli.colors import Colors, color
from hermes_constants import OPENROUTER_MODELS_URL
_PROVIDER_ENV_HINTS = (
"OPENROUTER_API_KEY",
"OPENAI_API_KEY",
"ANTHROPIC_API_KEY",
"ANTHROPIC_TOKEN",
"OPENAI_BASE_URL",
"GLM_API_KEY",
"ZAI_API_KEY",
"Z_AI_API_KEY",
"KIMI_API_KEY",
"MINIMAX_API_KEY",
"MINIMAX_CN_API_KEY",
"KILOCODE_API_KEY",
)
def _has_provider_env_config(content: str) -> bool:
"""Return True when ~/.hermes/.env contains provider auth/base URL settings."""
return any(key in content for key in _PROVIDER_ENV_HINTS)
def _honcho_is_configured_for_doctor() -> bool:
"""Return True when Honcho is configured, even if this process has no active session."""
try:
from honcho_integration.client import HonchoClientConfig
cfg = HonchoClientConfig.from_global_config()
return bool(cfg.enabled and (cfg.api_key or cfg.base_url))
except Exception:
return False
def _apply_doctor_tool_availability_overrides(available: list[str], unavailable: list[dict]) -> tuple[list[str], list[dict]]:
"""Adjust runtime-gated tool availability for doctor diagnostics."""
if not _honcho_is_configured_for_doctor():
return available, unavailable
updated_available = list(available)
updated_unavailable = []
for item in unavailable:
if item.get("name") == "honcho":
if "honcho" not in updated_available:
updated_available.append("honcho")
continue
updated_unavailable.append(item)
return updated_available, updated_unavailable
def check_ok(text: str, detail: str = ""):
print(f" {color('', Colors.GREEN)} {text}" + (f" {color(detail, Colors.DIM)}" if detail else ""))
def check_warn(text: str, detail: str = ""):
print(f" {color('', Colors.YELLOW)} {text}" + (f" {color(detail, Colors.DIM)}" if detail else ""))
def check_fail(text: str, detail: str = ""):
print(f" {color('', Colors.RED)} {text}" + (f" {color(detail, Colors.DIM)}" if detail else ""))
def check_info(text: str):
print(f" {color('', Colors.CYAN)} {text}")
def _check_gateway_service_linger(issues: list[str]) -> None:
"""Warn when a systemd user gateway service will stop after logout."""
try:
from hermes_cli.gateway import (
get_systemd_linger_status,
get_systemd_unit_path,
is_linux,
)
except Exception as e:
check_warn("Gateway service linger", f"(could not import gateway helpers: {e})")
return
if not is_linux():
return
unit_path = get_systemd_unit_path()
if not unit_path.exists():
return
print()
print(color("◆ Gateway Service", Colors.CYAN, Colors.BOLD))
linger_enabled, linger_detail = get_systemd_linger_status()
if linger_enabled is True:
check_ok("Systemd linger enabled", "(gateway service survives logout)")
elif linger_enabled is False:
check_warn("Systemd linger disabled", "(gateway may stop after logout)")
check_info("Run: sudo loginctl enable-linger $USER")
issues.append("Enable linger for the gateway user service: sudo loginctl enable-linger $USER")
else:
check_warn("Could not verify systemd linger", f"({linger_detail})")
def run_doctor(args):
"""Run diagnostic checks."""
should_fix = getattr(args, 'fix', False)
# Doctor runs from the interactive CLI, so CLI-gated tool availability
# checks (like cronjob management) should see the same context as `hermes`.
os.environ.setdefault("HERMES_INTERACTIVE", "1")
issues = []
2026-02-22 02:16:11 -08:00
manual_issues = [] # issues that can't be auto-fixed
fixed_count = 0
print()
print(color("┌─────────────────────────────────────────────────────────┐", Colors.CYAN))
print(color("│ 🩺 Hermes Doctor │", Colors.CYAN))
print(color("└─────────────────────────────────────────────────────────┘", Colors.CYAN))
# =========================================================================
# Check: Python version
# =========================================================================
print()
print(color("◆ Python Environment", Colors.CYAN, Colors.BOLD))
py_version = sys.version_info
if py_version >= (3, 11):
check_ok(f"Python {py_version.major}.{py_version.minor}.{py_version.micro}")
elif py_version >= (3, 10):
check_ok(f"Python {py_version.major}.{py_version.minor}.{py_version.micro}")
check_warn("Python 3.11+ recommended for RL Training tools (tinker requires >= 3.11)")
elif py_version >= (3, 8):
check_warn(f"Python {py_version.major}.{py_version.minor}.{py_version.micro}", "(3.10+ recommended)")
else:
check_fail(f"Python {py_version.major}.{py_version.minor}.{py_version.micro}", "(3.10+ required)")
issues.append("Upgrade Python to 3.10+")
# Check if in virtual environment
in_venv = sys.prefix != sys.base_prefix
if in_venv:
check_ok("Virtual environment active")
else:
check_warn("Not in virtual environment", "(recommended)")
# =========================================================================
# Check: Required packages
# =========================================================================
print()
print(color("◆ Required Packages", Colors.CYAN, Colors.BOLD))
required_packages = [
("openai", "OpenAI SDK"),
("rich", "Rich (terminal UI)"),
("dotenv", "python-dotenv"),
("yaml", "PyYAML"),
("httpx", "HTTPX"),
]
optional_packages = [
("croniter", "Croniter (cron expressions)"),
("telegram", "python-telegram-bot"),
("discord", "discord.py"),
]
for module, name in required_packages:
try:
__import__(module)
check_ok(name)
except ImportError:
check_fail(name, "(missing)")
issues.append(f"Install {name}: uv pip install {module}")
for module, name in optional_packages:
try:
__import__(module)
check_ok(name, "(optional)")
except ImportError:
check_warn(name, "(optional, not installed)")
# =========================================================================
# Check: Configuration files
# =========================================================================
print()
print(color("◆ Configuration Files", Colors.CYAN, Colors.BOLD))
# Check ~/.hermes/.env (primary location for user config)
env_path = HERMES_HOME / '.env'
if env_path.exists():
check_ok(f"{_DHH}/.env file exists")
# Check for common issues
content = env_path.read_text()
if _has_provider_env_config(content):
check_ok("API key or custom endpoint configured")
else:
check_warn(f"No API key found in {_DHH}/.env")
issues.append("Run 'hermes setup' to configure API keys")
else:
# Also check project root as fallback
fallback_env = PROJECT_ROOT / '.env'
if fallback_env.exists():
check_ok(".env file exists (in project directory)")
else:
check_fail(f"{_DHH}/.env file missing")
2026-02-22 02:16:11 -08:00
if should_fix:
env_path.parent.mkdir(parents=True, exist_ok=True)
env_path.touch()
check_ok(f"Created empty {_DHH}/.env")
2026-02-22 02:16:11 -08:00
check_info("Run 'hermes setup' to configure API keys")
fixed_count += 1
else:
check_info("Run 'hermes setup' to create one")
issues.append("Run 'hermes setup' to create .env")
# Check ~/.hermes/config.yaml (primary) or project cli-config.yaml (fallback)
config_path = HERMES_HOME / 'config.yaml'
if config_path.exists():
check_ok(f"{_DHH}/config.yaml exists")
else:
fallback_config = PROJECT_ROOT / 'cli-config.yaml'
if fallback_config.exists():
check_ok("cli-config.yaml exists (in project directory)")
else:
2026-02-22 02:16:11 -08:00
example_config = PROJECT_ROOT / 'cli-config.yaml.example'
if should_fix and example_config.exists():
config_path.parent.mkdir(parents=True, exist_ok=True)
shutil.copy2(str(example_config), str(config_path))
check_ok(f"Created {_DHH}/config.yaml from cli-config.yaml.example")
2026-02-22 02:16:11 -08:00
fixed_count += 1
elif should_fix:
check_warn("config.yaml not found and no example to copy from")
manual_issues.append(f"Create {_DHH}/config.yaml manually")
2026-02-22 02:16:11 -08:00
else:
check_warn("config.yaml not found", "(using defaults)")
# =========================================================================
# Check: Auth providers
# =========================================================================
print()
print(color("◆ Auth Providers", Colors.CYAN, Colors.BOLD))
try:
from hermes_cli.auth import get_nous_auth_status, get_codex_auth_status
nous_status = get_nous_auth_status()
if nous_status.get("logged_in"):
check_ok("Nous Portal auth", "(logged in)")
else:
check_warn("Nous Portal auth", "(not logged in)")
codex_status = get_codex_auth_status()
if codex_status.get("logged_in"):
check_ok("OpenAI Codex auth", "(logged in)")
else:
check_warn("OpenAI Codex auth", "(not logged in)")
if codex_status.get("error"):
check_info(codex_status["error"])
except Exception as e:
check_warn("Auth provider status", f"(could not check: {e})")
if shutil.which("codex"):
check_ok("codex CLI")
else:
check_warn("codex CLI not found", "(required for openai-codex login)")
# =========================================================================
# Check: Directory structure
# =========================================================================
print()
print(color("◆ Directory Structure", Colors.CYAN, Colors.BOLD))
hermes_home = HERMES_HOME
if hermes_home.exists():
check_ok(f"{_DHH} directory exists")
else:
2026-02-22 02:16:11 -08:00
if should_fix:
hermes_home.mkdir(parents=True, exist_ok=True)
check_ok(f"Created {_DHH} directory")
2026-02-22 02:16:11 -08:00
fixed_count += 1
else:
check_warn(f"{_DHH} not found", "(will be created on first use)")
2026-02-22 02:16:11 -08:00
# Check expected subdirectories
expected_subdirs = ["cron", "sessions", "logs", "skills", "memories"]
for subdir_name in expected_subdirs:
subdir_path = hermes_home / subdir_name
if subdir_path.exists():
check_ok(f"{_DHH}/{subdir_name}/ exists")
2026-02-22 02:16:11 -08:00
else:
if should_fix:
subdir_path.mkdir(parents=True, exist_ok=True)
check_ok(f"Created {_DHH}/{subdir_name}/")
2026-02-22 02:16:11 -08:00
fixed_count += 1
else:
check_warn(f"{_DHH}/{subdir_name}/ not found", "(will be created on first use)")
# Check for SOUL.md persona file
soul_path = hermes_home / "SOUL.md"
if soul_path.exists():
content = soul_path.read_text(encoding="utf-8").strip()
# Check if it's just the template comments (no real content)
lines = [l for l in content.splitlines() if l.strip() and not l.strip().startswith(("<!--", "-->", "#"))]
if lines:
check_ok(f"{_DHH}/SOUL.md exists (persona configured)")
else:
check_info(f"{_DHH}/SOUL.md exists but is empty — edit it to customize personality")
else:
check_warn(f"{_DHH}/SOUL.md not found", "(create it to give Hermes a custom personality)")
if should_fix:
soul_path.parent.mkdir(parents=True, exist_ok=True)
2026-02-22 02:16:11 -08:00
soul_path.write_text(
"# Hermes Agent Persona\n\n"
"<!-- Edit this file to customize how Hermes communicates. -->\n\n"
"You are Hermes, a helpful AI assistant.\n",
encoding="utf-8",
)
check_ok(f"Created {_DHH}/SOUL.md with basic template")
2026-02-22 02:16:11 -08:00
fixed_count += 1
# Check memory directory
memories_dir = hermes_home / "memories"
if memories_dir.exists():
check_ok(f"{_DHH}/memories/ directory exists")
memory_file = memories_dir / "MEMORY.md"
user_file = memories_dir / "USER.md"
if memory_file.exists():
size = len(memory_file.read_text(encoding="utf-8").strip())
check_ok(f"MEMORY.md exists ({size} chars)")
else:
check_info("MEMORY.md not created yet (will be created when the agent first writes a memory)")
if user_file.exists():
size = len(user_file.read_text(encoding="utf-8").strip())
check_ok(f"USER.md exists ({size} chars)")
else:
check_info("USER.md not created yet (will be created when the agent first writes a memory)")
else:
check_warn(f"{_DHH}/memories/ not found", "(will be created on first use)")
if should_fix:
memories_dir.mkdir(parents=True, exist_ok=True)
check_ok(f"Created {_DHH}/memories/")
2026-02-22 02:16:11 -08:00
fixed_count += 1
# Check SQLite session store
state_db_path = hermes_home / "state.db"
if state_db_path.exists():
try:
import sqlite3
conn = sqlite3.connect(str(state_db_path))
cursor = conn.execute("SELECT COUNT(*) FROM sessions")
count = cursor.fetchone()[0]
conn.close()
check_ok(f"{_DHH}/state.db exists ({count} sessions)")
except Exception as e:
check_warn(f"{_DHH}/state.db exists but has issues: {e}")
else:
check_info(f"{_DHH}/state.db not created yet (will be created on first session)")
_check_gateway_service_linger(issues)
# =========================================================================
# Check: External tools
# =========================================================================
print()
print(color("◆ External Tools", Colors.CYAN, Colors.BOLD))
# Git
if shutil.which("git"):
check_ok("git")
else:
check_warn("git not found", "(optional)")
# ripgrep (optional, for faster file search)
if shutil.which("rg"):
check_ok("ripgrep (rg)", "(faster file search)")
else:
check_warn("ripgrep (rg) not found", "(file search uses grep fallback)")
check_info("Install for faster search: sudo apt install ripgrep")
# Docker (optional)
terminal_env = os.getenv("TERMINAL_ENV", "local")
if terminal_env == "docker":
if shutil.which("docker"):
# Check if docker daemon is running
result = subprocess.run(["docker", "info"], capture_output=True)
if result.returncode == 0:
check_ok("docker", "(daemon running)")
else:
check_fail("docker daemon not running")
issues.append("Start Docker daemon")
else:
check_fail("docker not found", "(required for TERMINAL_ENV=docker)")
issues.append("Install Docker or change TERMINAL_ENV")
else:
if shutil.which("docker"):
check_ok("docker", "(optional)")
else:
check_warn("docker not found", "(optional)")
# SSH (if using ssh backend)
if terminal_env == "ssh":
ssh_host = os.getenv("TERMINAL_SSH_HOST")
if ssh_host:
# Try to connect
result = subprocess.run(
["ssh", "-o", "ConnectTimeout=5", "-o", "BatchMode=yes", ssh_host, "echo ok"],
capture_output=True,
text=True
)
if result.returncode == 0:
check_ok(f"SSH connection to {ssh_host}")
else:
check_fail(f"SSH connection to {ssh_host}")
issues.append(f"Check SSH configuration for {ssh_host}")
else:
check_fail("TERMINAL_SSH_HOST not set", "(required for TERMINAL_ENV=ssh)")
issues.append("Set TERMINAL_SSH_HOST in .env")
# Daytona (if using daytona backend)
if terminal_env == "daytona":
daytona_key = os.getenv("DAYTONA_API_KEY")
if daytona_key:
check_ok("Daytona API key", "(configured)")
else:
check_fail("DAYTONA_API_KEY not set", "(required for TERMINAL_ENV=daytona)")
issues.append("Set DAYTONA_API_KEY environment variable")
try:
from daytona import Daytona # noqa: F401 — SDK presence check
check_ok("daytona SDK", "(installed)")
except ImportError:
check_fail("daytona SDK not installed", "(pip install daytona)")
issues.append("Install daytona SDK: pip install daytona")
# Node.js + agent-browser (for browser automation tools)
if shutil.which("node"):
check_ok("Node.js")
# Check if agent-browser is installed
agent_browser_path = PROJECT_ROOT / "node_modules" / "agent-browser"
if agent_browser_path.exists():
check_ok("agent-browser (Node.js)", "(browser automation)")
else:
check_warn("agent-browser not installed", "(run: npm install)")
else:
check_warn("Node.js not found", "(optional, needed for browser tools)")
# npm audit for all Node.js packages
if shutil.which("npm"):
npm_dirs = [
(PROJECT_ROOT, "Browser tools (agent-browser)"),
(PROJECT_ROOT / "scripts" / "whatsapp-bridge", "WhatsApp bridge"),
]
for npm_dir, label in npm_dirs:
if not (npm_dir / "node_modules").exists():
continue
try:
audit_result = subprocess.run(
["npm", "audit", "--json"],
cwd=str(npm_dir),
capture_output=True, text=True, timeout=30,
)
import json as _json
audit_data = _json.loads(audit_result.stdout) if audit_result.stdout.strip() else {}
vuln_count = audit_data.get("metadata", {}).get("vulnerabilities", {})
critical = vuln_count.get("critical", 0)
high = vuln_count.get("high", 0)
moderate = vuln_count.get("moderate", 0)
total = critical + high + moderate
if total == 0:
check_ok(f"{label} deps", "(no known vulnerabilities)")
elif critical > 0 or high > 0:
check_warn(
f"{label} deps",
f"({critical} critical, {high} high, {moderate} moderate — run: cd {npm_dir} && npm audit fix)"
)
issues.append(f"{label} has {total} npm vulnerability(ies)")
else:
check_ok(f"{label} deps", f"({moderate} moderate vulnerability(ies))")
except Exception:
pass
# =========================================================================
# Check: API connectivity
# =========================================================================
print()
print(color("◆ API Connectivity", Colors.CYAN, Colors.BOLD))
openrouter_key = os.getenv("OPENROUTER_API_KEY")
if openrouter_key:
2026-02-22 02:16:11 -08:00
print(" Checking OpenRouter API...", end="", flush=True)
try:
import httpx
response = httpx.get(
2026-02-20 23:23:32 -08:00
OPENROUTER_MODELS_URL,
headers={"Authorization": f"Bearer {openrouter_key}"},
timeout=10
)
if response.status_code == 200:
2026-02-22 02:16:11 -08:00
print(f"\r {color('', Colors.GREEN)} OpenRouter API ")
elif response.status_code == 401:
2026-02-22 02:16:11 -08:00
print(f"\r {color('', Colors.RED)} OpenRouter API {color('(invalid API key)', Colors.DIM)} ")
issues.append("Check OPENROUTER_API_KEY in .env")
else:
2026-02-22 02:16:11 -08:00
print(f"\r {color('', Colors.RED)} OpenRouter API {color(f'(HTTP {response.status_code})', Colors.DIM)} ")
except Exception as e:
2026-02-22 02:16:11 -08:00
print(f"\r {color('', Colors.RED)} OpenRouter API {color(f'({e})', Colors.DIM)} ")
issues.append("Check network connectivity")
else:
check_warn("OpenRouter API", "(not configured)")
anthropic_key = os.getenv("ANTHROPIC_TOKEN") or os.getenv("ANTHROPIC_API_KEY")
if anthropic_key:
2026-02-22 02:16:11 -08:00
print(" Checking Anthropic API...", end="", flush=True)
try:
import httpx
from agent.anthropic_adapter import _is_oauth_token, _COMMON_BETAS, _OAUTH_ONLY_BETAS
headers = {"anthropic-version": "2023-06-01"}
if _is_oauth_token(anthropic_key):
headers["Authorization"] = f"Bearer {anthropic_key}"
headers["anthropic-beta"] = ",".join(_COMMON_BETAS + _OAUTH_ONLY_BETAS)
else:
headers["x-api-key"] = anthropic_key
response = httpx.get(
"https://api.anthropic.com/v1/models",
headers=headers,
timeout=10
)
if response.status_code == 200:
2026-02-22 02:16:11 -08:00
print(f"\r {color('', Colors.GREEN)} Anthropic API ")
elif response.status_code == 401:
2026-02-22 02:16:11 -08:00
print(f"\r {color('', Colors.RED)} Anthropic API {color('(invalid API key)', Colors.DIM)} ")
else:
2026-02-22 02:16:11 -08:00
msg = "(couldn't verify)"
print(f"\r {color('', Colors.YELLOW)} Anthropic API {color(msg, Colors.DIM)} ")
except Exception as e:
2026-02-22 02:16:11 -08:00
print(f"\r {color('', Colors.YELLOW)} Anthropic API {color(f'({e})', Colors.DIM)} ")
feat: add z.ai/GLM, Kimi/Moonshot, MiniMax as first-class providers Adds 4 new direct API-key providers (zai, kimi-coding, minimax, minimax-cn) to the inference provider system. All use standard OpenAI-compatible chat/completions endpoints with Bearer token auth. Core changes: - auth.py: Extended ProviderConfig with api_key_env_vars and base_url_env_var fields. Added providers to PROVIDER_REGISTRY. Added provider aliases (glm, z-ai, zhipu, kimi, moonshot). Added auto-detection of API-key providers in resolve_provider(). Added resolve_api_key_provider_credentials() and get_api_key_provider_status() helpers. - runtime_provider.py: Added generic API-key provider branch in resolve_runtime_provider() — any provider with auth_type='api_key' is automatically handled. - main.py: Added providers to hermes model menu with generic _model_flow_api_key_provider() flow. Updated _has_any_provider_configured() to check all provider env vars. Updated argparse --provider choices. - setup.py: Added providers to setup wizard with API key prompts and curated model lists. - config.py: Added env vars (GLM_API_KEY, KIMI_API_KEY, MINIMAX_API_KEY, etc.) to OPTIONAL_ENV_VARS. - status.py: Added API key display and provider status section. - doctor.py: Added connectivity checks for each provider endpoint. - cli.py: Updated provider docstrings. Docs: Updated README.md, .env.example, cli-config.yaml.example, cli-commands.md, environment-variables.md, configuration.md. Tests: 50 new tests covering registry, aliases, resolution, auto-detection, credential resolution, and runtime provider dispatch. Inspired by PR #33 (numman-ali) which proposed a provider registry approach. Credit to tars90percent (PR #473) and manuelschipper (PR #420) for related provider improvements merged earlier in this changeset.
2026-03-06 18:55:12 -08:00
# -- API-key providers (Z.AI/GLM, Kimi, MiniMax, MiniMax-CN) --
# Tuple: (name, env_vars, default_url, base_env, supports_models_endpoint)
# If supports_models_endpoint is False, we skip the health check and just show "configured"
feat: add z.ai/GLM, Kimi/Moonshot, MiniMax as first-class providers Adds 4 new direct API-key providers (zai, kimi-coding, minimax, minimax-cn) to the inference provider system. All use standard OpenAI-compatible chat/completions endpoints with Bearer token auth. Core changes: - auth.py: Extended ProviderConfig with api_key_env_vars and base_url_env_var fields. Added providers to PROVIDER_REGISTRY. Added provider aliases (glm, z-ai, zhipu, kimi, moonshot). Added auto-detection of API-key providers in resolve_provider(). Added resolve_api_key_provider_credentials() and get_api_key_provider_status() helpers. - runtime_provider.py: Added generic API-key provider branch in resolve_runtime_provider() — any provider with auth_type='api_key' is automatically handled. - main.py: Added providers to hermes model menu with generic _model_flow_api_key_provider() flow. Updated _has_any_provider_configured() to check all provider env vars. Updated argparse --provider choices. - setup.py: Added providers to setup wizard with API key prompts and curated model lists. - config.py: Added env vars (GLM_API_KEY, KIMI_API_KEY, MINIMAX_API_KEY, etc.) to OPTIONAL_ENV_VARS. - status.py: Added API key display and provider status section. - doctor.py: Added connectivity checks for each provider endpoint. - cli.py: Updated provider docstrings. Docs: Updated README.md, .env.example, cli-config.yaml.example, cli-commands.md, environment-variables.md, configuration.md. Tests: 50 new tests covering registry, aliases, resolution, auto-detection, credential resolution, and runtime provider dispatch. Inspired by PR #33 (numman-ali) which proposed a provider registry approach. Credit to tars90percent (PR #473) and manuelschipper (PR #420) for related provider improvements merged earlier in this changeset.
2026-03-06 18:55:12 -08:00
_apikey_providers = [
("Z.AI / GLM", ("GLM_API_KEY", "ZAI_API_KEY", "Z_AI_API_KEY"), "https://api.z.ai/api/paas/v4/models", "GLM_BASE_URL", True),
("Kimi / Moonshot", ("KIMI_API_KEY",), "https://api.moonshot.ai/v1/models", "KIMI_BASE_URL", True),
# MiniMax APIs don't support /models endpoint — https://github.com/NousResearch/hermes-agent/issues/811
("MiniMax", ("MINIMAX_API_KEY",), None, "MINIMAX_BASE_URL", False),
("MiniMax (China)", ("MINIMAX_CN_API_KEY",), None, "MINIMAX_CN_BASE_URL", False),
("AI Gateway", ("AI_GATEWAY_API_KEY",), "https://ai-gateway.vercel.sh/v1/models", "AI_GATEWAY_BASE_URL", True),
("Kilo Code", ("KILOCODE_API_KEY",), "https://api.kilo.ai/api/gateway/models", "KILOCODE_BASE_URL", True),
feat: add z.ai/GLM, Kimi/Moonshot, MiniMax as first-class providers Adds 4 new direct API-key providers (zai, kimi-coding, minimax, minimax-cn) to the inference provider system. All use standard OpenAI-compatible chat/completions endpoints with Bearer token auth. Core changes: - auth.py: Extended ProviderConfig with api_key_env_vars and base_url_env_var fields. Added providers to PROVIDER_REGISTRY. Added provider aliases (glm, z-ai, zhipu, kimi, moonshot). Added auto-detection of API-key providers in resolve_provider(). Added resolve_api_key_provider_credentials() and get_api_key_provider_status() helpers. - runtime_provider.py: Added generic API-key provider branch in resolve_runtime_provider() — any provider with auth_type='api_key' is automatically handled. - main.py: Added providers to hermes model menu with generic _model_flow_api_key_provider() flow. Updated _has_any_provider_configured() to check all provider env vars. Updated argparse --provider choices. - setup.py: Added providers to setup wizard with API key prompts and curated model lists. - config.py: Added env vars (GLM_API_KEY, KIMI_API_KEY, MINIMAX_API_KEY, etc.) to OPTIONAL_ENV_VARS. - status.py: Added API key display and provider status section. - doctor.py: Added connectivity checks for each provider endpoint. - cli.py: Updated provider docstrings. Docs: Updated README.md, .env.example, cli-config.yaml.example, cli-commands.md, environment-variables.md, configuration.md. Tests: 50 new tests covering registry, aliases, resolution, auto-detection, credential resolution, and runtime provider dispatch. Inspired by PR #33 (numman-ali) which proposed a provider registry approach. Credit to tars90percent (PR #473) and manuelschipper (PR #420) for related provider improvements merged earlier in this changeset.
2026-03-06 18:55:12 -08:00
]
for _pname, _env_vars, _default_url, _base_env, _supports_health_check in _apikey_providers:
feat: add z.ai/GLM, Kimi/Moonshot, MiniMax as first-class providers Adds 4 new direct API-key providers (zai, kimi-coding, minimax, minimax-cn) to the inference provider system. All use standard OpenAI-compatible chat/completions endpoints with Bearer token auth. Core changes: - auth.py: Extended ProviderConfig with api_key_env_vars and base_url_env_var fields. Added providers to PROVIDER_REGISTRY. Added provider aliases (glm, z-ai, zhipu, kimi, moonshot). Added auto-detection of API-key providers in resolve_provider(). Added resolve_api_key_provider_credentials() and get_api_key_provider_status() helpers. - runtime_provider.py: Added generic API-key provider branch in resolve_runtime_provider() — any provider with auth_type='api_key' is automatically handled. - main.py: Added providers to hermes model menu with generic _model_flow_api_key_provider() flow. Updated _has_any_provider_configured() to check all provider env vars. Updated argparse --provider choices. - setup.py: Added providers to setup wizard with API key prompts and curated model lists. - config.py: Added env vars (GLM_API_KEY, KIMI_API_KEY, MINIMAX_API_KEY, etc.) to OPTIONAL_ENV_VARS. - status.py: Added API key display and provider status section. - doctor.py: Added connectivity checks for each provider endpoint. - cli.py: Updated provider docstrings. Docs: Updated README.md, .env.example, cli-config.yaml.example, cli-commands.md, environment-variables.md, configuration.md. Tests: 50 new tests covering registry, aliases, resolution, auto-detection, credential resolution, and runtime provider dispatch. Inspired by PR #33 (numman-ali) which proposed a provider registry approach. Credit to tars90percent (PR #473) and manuelschipper (PR #420) for related provider improvements merged earlier in this changeset.
2026-03-06 18:55:12 -08:00
_key = ""
for _ev in _env_vars:
_key = os.getenv(_ev, "")
if _key:
break
if _key:
_label = _pname.ljust(20)
# Some providers (like MiniMax) don't support /models endpoint
if not _supports_health_check:
print(f" {color('', Colors.GREEN)} {_label} {color('(key configured)', Colors.DIM)}")
continue
feat: add z.ai/GLM, Kimi/Moonshot, MiniMax as first-class providers Adds 4 new direct API-key providers (zai, kimi-coding, minimax, minimax-cn) to the inference provider system. All use standard OpenAI-compatible chat/completions endpoints with Bearer token auth. Core changes: - auth.py: Extended ProviderConfig with api_key_env_vars and base_url_env_var fields. Added providers to PROVIDER_REGISTRY. Added provider aliases (glm, z-ai, zhipu, kimi, moonshot). Added auto-detection of API-key providers in resolve_provider(). Added resolve_api_key_provider_credentials() and get_api_key_provider_status() helpers. - runtime_provider.py: Added generic API-key provider branch in resolve_runtime_provider() — any provider with auth_type='api_key' is automatically handled. - main.py: Added providers to hermes model menu with generic _model_flow_api_key_provider() flow. Updated _has_any_provider_configured() to check all provider env vars. Updated argparse --provider choices. - setup.py: Added providers to setup wizard with API key prompts and curated model lists. - config.py: Added env vars (GLM_API_KEY, KIMI_API_KEY, MINIMAX_API_KEY, etc.) to OPTIONAL_ENV_VARS. - status.py: Added API key display and provider status section. - doctor.py: Added connectivity checks for each provider endpoint. - cli.py: Updated provider docstrings. Docs: Updated README.md, .env.example, cli-config.yaml.example, cli-commands.md, environment-variables.md, configuration.md. Tests: 50 new tests covering registry, aliases, resolution, auto-detection, credential resolution, and runtime provider dispatch. Inspired by PR #33 (numman-ali) which proposed a provider registry approach. Credit to tars90percent (PR #473) and manuelschipper (PR #420) for related provider improvements merged earlier in this changeset.
2026-03-06 18:55:12 -08:00
print(f" Checking {_pname} API...", end="", flush=True)
try:
import httpx
_base = os.getenv(_base_env, "")
# Auto-detect Kimi Code keys (sk-kimi-) → api.kimi.com
if not _base and _key.startswith("sk-kimi-"):
_base = "https://api.kimi.com/coding/v1"
feat: add z.ai/GLM, Kimi/Moonshot, MiniMax as first-class providers Adds 4 new direct API-key providers (zai, kimi-coding, minimax, minimax-cn) to the inference provider system. All use standard OpenAI-compatible chat/completions endpoints with Bearer token auth. Core changes: - auth.py: Extended ProviderConfig with api_key_env_vars and base_url_env_var fields. Added providers to PROVIDER_REGISTRY. Added provider aliases (glm, z-ai, zhipu, kimi, moonshot). Added auto-detection of API-key providers in resolve_provider(). Added resolve_api_key_provider_credentials() and get_api_key_provider_status() helpers. - runtime_provider.py: Added generic API-key provider branch in resolve_runtime_provider() — any provider with auth_type='api_key' is automatically handled. - main.py: Added providers to hermes model menu with generic _model_flow_api_key_provider() flow. Updated _has_any_provider_configured() to check all provider env vars. Updated argparse --provider choices. - setup.py: Added providers to setup wizard with API key prompts and curated model lists. - config.py: Added env vars (GLM_API_KEY, KIMI_API_KEY, MINIMAX_API_KEY, etc.) to OPTIONAL_ENV_VARS. - status.py: Added API key display and provider status section. - doctor.py: Added connectivity checks for each provider endpoint. - cli.py: Updated provider docstrings. Docs: Updated README.md, .env.example, cli-config.yaml.example, cli-commands.md, environment-variables.md, configuration.md. Tests: 50 new tests covering registry, aliases, resolution, auto-detection, credential resolution, and runtime provider dispatch. Inspired by PR #33 (numman-ali) which proposed a provider registry approach. Credit to tars90percent (PR #473) and manuelschipper (PR #420) for related provider improvements merged earlier in this changeset.
2026-03-06 18:55:12 -08:00
_url = (_base.rstrip("/") + "/models") if _base else _default_url
_headers = {"Authorization": f"Bearer {_key}"}
if "api.kimi.com" in _url.lower():
_headers["User-Agent"] = "KimiCLI/1.0"
feat: add z.ai/GLM, Kimi/Moonshot, MiniMax as first-class providers Adds 4 new direct API-key providers (zai, kimi-coding, minimax, minimax-cn) to the inference provider system. All use standard OpenAI-compatible chat/completions endpoints with Bearer token auth. Core changes: - auth.py: Extended ProviderConfig with api_key_env_vars and base_url_env_var fields. Added providers to PROVIDER_REGISTRY. Added provider aliases (glm, z-ai, zhipu, kimi, moonshot). Added auto-detection of API-key providers in resolve_provider(). Added resolve_api_key_provider_credentials() and get_api_key_provider_status() helpers. - runtime_provider.py: Added generic API-key provider branch in resolve_runtime_provider() — any provider with auth_type='api_key' is automatically handled. - main.py: Added providers to hermes model menu with generic _model_flow_api_key_provider() flow. Updated _has_any_provider_configured() to check all provider env vars. Updated argparse --provider choices. - setup.py: Added providers to setup wizard with API key prompts and curated model lists. - config.py: Added env vars (GLM_API_KEY, KIMI_API_KEY, MINIMAX_API_KEY, etc.) to OPTIONAL_ENV_VARS. - status.py: Added API key display and provider status section. - doctor.py: Added connectivity checks for each provider endpoint. - cli.py: Updated provider docstrings. Docs: Updated README.md, .env.example, cli-config.yaml.example, cli-commands.md, environment-variables.md, configuration.md. Tests: 50 new tests covering registry, aliases, resolution, auto-detection, credential resolution, and runtime provider dispatch. Inspired by PR #33 (numman-ali) which proposed a provider registry approach. Credit to tars90percent (PR #473) and manuelschipper (PR #420) for related provider improvements merged earlier in this changeset.
2026-03-06 18:55:12 -08:00
_resp = httpx.get(
_url,
headers=_headers,
feat: add z.ai/GLM, Kimi/Moonshot, MiniMax as first-class providers Adds 4 new direct API-key providers (zai, kimi-coding, minimax, minimax-cn) to the inference provider system. All use standard OpenAI-compatible chat/completions endpoints with Bearer token auth. Core changes: - auth.py: Extended ProviderConfig with api_key_env_vars and base_url_env_var fields. Added providers to PROVIDER_REGISTRY. Added provider aliases (glm, z-ai, zhipu, kimi, moonshot). Added auto-detection of API-key providers in resolve_provider(). Added resolve_api_key_provider_credentials() and get_api_key_provider_status() helpers. - runtime_provider.py: Added generic API-key provider branch in resolve_runtime_provider() — any provider with auth_type='api_key' is automatically handled. - main.py: Added providers to hermes model menu with generic _model_flow_api_key_provider() flow. Updated _has_any_provider_configured() to check all provider env vars. Updated argparse --provider choices. - setup.py: Added providers to setup wizard with API key prompts and curated model lists. - config.py: Added env vars (GLM_API_KEY, KIMI_API_KEY, MINIMAX_API_KEY, etc.) to OPTIONAL_ENV_VARS. - status.py: Added API key display and provider status section. - doctor.py: Added connectivity checks for each provider endpoint. - cli.py: Updated provider docstrings. Docs: Updated README.md, .env.example, cli-config.yaml.example, cli-commands.md, environment-variables.md, configuration.md. Tests: 50 new tests covering registry, aliases, resolution, auto-detection, credential resolution, and runtime provider dispatch. Inspired by PR #33 (numman-ali) which proposed a provider registry approach. Credit to tars90percent (PR #473) and manuelschipper (PR #420) for related provider improvements merged earlier in this changeset.
2026-03-06 18:55:12 -08:00
timeout=10,
)
if _resp.status_code == 200:
print(f"\r {color('', Colors.GREEN)} {_label} ")
elif _resp.status_code == 401:
print(f"\r {color('', Colors.RED)} {_label} {color('(invalid API key)', Colors.DIM)} ")
issues.append(f"Check {_env_vars[0]} in .env")
else:
print(f"\r {color('', Colors.YELLOW)} {_label} {color(f'(HTTP {_resp.status_code})', Colors.DIM)} ")
except Exception as _e:
print(f"\r {color('', Colors.YELLOW)} {_label} {color(f'({_e})', Colors.DIM)} ")
# =========================================================================
# Check: Submodules
# =========================================================================
print()
print(color("◆ Submodules", Colors.CYAN, Colors.BOLD))
# tinker-atropos (RL training backend)
tinker_dir = PROJECT_ROOT / "tinker-atropos"
if tinker_dir.exists() and (tinker_dir / "pyproject.toml").exists():
if py_version >= (3, 11):
try:
__import__("tinker_atropos")
check_ok("tinker-atropos", "(RL training backend)")
except ImportError:
check_warn("tinker-atropos found but not installed", "(run: uv pip install -e ./tinker-atropos)")
issues.append("Install tinker-atropos: uv pip install -e ./tinker-atropos")
else:
check_warn("tinker-atropos requires Python 3.11+", f"(current: {py_version.major}.{py_version.minor})")
else:
check_warn("tinker-atropos not found", "(run: git submodule update --init --recursive)")
# =========================================================================
# Check: Tool Availability
# =========================================================================
print()
print(color("◆ Tool Availability", Colors.CYAN, Colors.BOLD))
try:
# Add project root to path for imports
sys.path.insert(0, str(PROJECT_ROOT))
from model_tools import check_tool_availability, TOOLSET_REQUIREMENTS
available, unavailable = check_tool_availability()
available, unavailable = _apply_doctor_tool_availability_overrides(available, unavailable)
for tid in available:
info = TOOLSET_REQUIREMENTS.get(tid, {})
check_ok(info.get("name", tid))
for item in unavailable:
env_vars = item.get("missing_vars") or item.get("env_vars") or []
if env_vars:
vars_str = ", ".join(env_vars)
check_warn(item["name"], f"(missing {vars_str})")
else:
check_warn(item["name"], "(system dependency not met)")
# Count disabled tools with API key requirements
api_disabled = [u for u in unavailable if (u.get("missing_vars") or u.get("env_vars"))]
if api_disabled:
issues.append("Run 'hermes setup' to configure missing API keys for full tool access")
except Exception as e:
check_warn("Could not check tool availability", f"({e})")
# =========================================================================
# Check: Skills Hub
# =========================================================================
print()
print(color("◆ Skills Hub", Colors.CYAN, Colors.BOLD))
hub_dir = HERMES_HOME / "skills" / ".hub"
if hub_dir.exists():
check_ok("Skills Hub directory exists")
lock_file = hub_dir / "lock.json"
if lock_file.exists():
try:
import json
lock_data = json.loads(lock_file.read_text())
count = len(lock_data.get("installed", {}))
check_ok(f"Lock file OK ({count} hub-installed skill(s))")
except Exception:
check_warn("Lock file", "(corrupted or unreadable)")
quarantine = hub_dir / "quarantine"
q_count = sum(1 for d in quarantine.iterdir() if d.is_dir()) if quarantine.exists() else 0
if q_count > 0:
check_warn(f"{q_count} skill(s) in quarantine", "(pending review)")
else:
check_warn("Skills Hub directory not initialized", "(run: hermes skills list)")
from hermes_cli.config import get_env_value
github_token = get_env_value("GITHUB_TOKEN") or get_env_value("GH_TOKEN")
if github_token:
check_ok("GitHub token configured (authenticated API access)")
else:
check_warn("No GITHUB_TOKEN", f"(60 req/hr rate limit — set in {_DHH}/.env for better rates)")
# =========================================================================
# Honcho memory
# =========================================================================
print()
print(color("◆ Honcho Memory", Colors.CYAN, Colors.BOLD))
try:
from honcho_integration.client import HonchoClientConfig, resolve_config_path
hcfg = HonchoClientConfig.from_global_config()
_honcho_cfg_path = resolve_config_path()
if not _honcho_cfg_path.exists():
check_warn("Honcho config not found", "run: hermes honcho setup")
elif not hcfg.enabled:
check_info(f"Honcho disabled (set enabled: true in {_honcho_cfg_path} to activate)")
elif not (hcfg.api_key or hcfg.base_url):
check_fail("Honcho API key or base URL not set", "run: hermes honcho setup")
issues.append("No Honcho API key — run 'hermes honcho setup'")
else:
from honcho_integration.client import get_honcho_client, reset_honcho_client
reset_honcho_client()
try:
get_honcho_client(hcfg)
check_ok(
"Honcho connected",
f"workspace={hcfg.workspace_id} mode={hcfg.memory_mode} freq={hcfg.write_frequency}",
)
except Exception as _e:
check_fail("Honcho connection failed", str(_e))
issues.append(f"Honcho unreachable: {_e}")
except ImportError:
check_warn("honcho-ai not installed", "pip install honcho-ai")
except Exception as _e:
check_warn("Honcho check failed", str(_e))
feat: add profiles — run multiple isolated Hermes instances (#3681) Each profile is a fully independent HERMES_HOME with its own config, API keys, memory, sessions, skills, gateway, cron, and state.db. Core module: hermes_cli/profiles.py (~900 lines) - Profile CRUD: create, delete, list, show, rename - Three clone levels: blank, --clone (config), --clone-all (everything) - Export/import: tar.gz archive for backup and migration - Wrapper alias scripts (~/.local/bin/<name>) - Collision detection for alias names - Sticky default via ~/.hermes/active_profile - Skill seeding via subprocess (handles module-level caching) - Auto-stop gateway on delete with disable-before-stop for services - Tab completion generation for bash and zsh CLI integration (hermes_cli/main.py): - _apply_profile_override(): pre-import -p/--profile flag + sticky default - Full 'hermes profile' subcommand: list, use, create, delete, show, alias, rename, export, import - 'hermes completion bash/zsh' command - Multi-profile skill sync in hermes update Display (cli.py, banner.py, gateway/run.py): - CLI prompt: 'coder ❯' when using a non-default profile - Banner shows profile name - Gateway startup log includes profile name Gateway safety: - Token locks: Discord, Slack, WhatsApp, Signal (extends Telegram pattern) - Port conflict detection: API server, webhook adapter Diagnostics (hermes_cli/doctor.py): - Profile health section: lists profiles, checks config, .env, aliases - Orphan alias detection: warns when wrapper points to deleted profile Tests (tests/hermes_cli/test_profiles.py): - 71 automated tests covering: validation, CRUD, clone levels, rename, export/import, active profile, isolation, alias collision, completion - Full suite: 6760 passed, 0 new failures Documentation: - website/docs/user-guide/profiles.md: full user guide (12 sections) - website/docs/reference/profile-commands.md: command reference (12 commands) - website/docs/reference/faq.md: 6 profile FAQ entries - website/sidebars.ts: navigation updated
2026-03-29 10:41:20 -07:00
# =========================================================================
# Profiles
# =========================================================================
try:
from hermes_cli.profiles import list_profiles, _get_wrapper_dir, profile_exists
import re as _re
named_profiles = [p for p in list_profiles() if not p.is_default]
if named_profiles:
print()
print(color("◆ Profiles", Colors.CYAN, Colors.BOLD))
check_ok(f"{len(named_profiles)} profile(s) found")
wrapper_dir = _get_wrapper_dir()
for p in named_profiles:
parts = []
if p.gateway_running:
parts.append("gateway running")
if p.model:
parts.append(p.model[:30])
if not (p.path / "config.yaml").exists():
parts.append("⚠ missing config")
if not (p.path / ".env").exists():
parts.append("no .env")
wrapper = wrapper_dir / p.name
if not wrapper.exists():
parts.append("no alias")
status = ", ".join(parts) if parts else "configured"
check_ok(f" {p.name}: {status}")
# Check for orphan wrappers
if wrapper_dir.is_dir():
for wrapper in wrapper_dir.iterdir():
if not wrapper.is_file():
continue
try:
content = wrapper.read_text()
if "hermes -p" in content:
_m = _re.search(r"hermes -p (\S+)", content)
if _m and not profile_exists(_m.group(1)):
check_warn(f"Orphan alias: {wrapper.name} → profile '{_m.group(1)}' no longer exists")
except Exception:
pass
except ImportError:
pass
except Exception as _e:
logger.debug("Profile health check failed: %s", _e)
# =========================================================================
# Summary
# =========================================================================
print()
2026-02-22 02:16:11 -08:00
remaining_issues = issues + manual_issues
if should_fix and fixed_count > 0:
print(color("" * 60, Colors.GREEN))
print(color(f" Fixed {fixed_count} issue(s).", Colors.GREEN, Colors.BOLD), end="")
if remaining_issues:
print(color(f" {len(remaining_issues)} issue(s) require manual intervention.", Colors.YELLOW, Colors.BOLD))
else:
print()
print()
if remaining_issues:
for i, issue in enumerate(remaining_issues, 1):
print(f" {i}. {issue}")
print()
elif remaining_issues:
print(color("" * 60, Colors.YELLOW))
2026-02-22 02:16:11 -08:00
print(color(f" Found {len(remaining_issues)} issue(s) to address:", Colors.YELLOW, Colors.BOLD))
print()
2026-02-22 02:16:11 -08:00
for i, issue in enumerate(remaining_issues, 1):
print(f" {i}. {issue}")
print()
2026-02-22 02:16:11 -08:00
if not should_fix:
print(color(" Tip: run 'hermes doctor --fix' to auto-fix what's possible.", Colors.DIM))
else:
print(color("" * 60, Colors.GREEN))
print(color(" All checks passed! 🎉", Colors.GREEN, Colors.BOLD))
print()