2026-02-02 19:01:51 -08:00
|
|
|
"""
|
|
|
|
|
Doctor command for hermes CLI.
|
|
|
|
|
|
|
|
|
|
Diagnoses issues with Hermes Agent setup.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
import os
|
|
|
|
|
import sys
|
|
|
|
|
import subprocess
|
|
|
|
|
import shutil
|
|
|
|
|
|
2026-02-16 02:38:19 -08:00
|
|
|
from hermes_cli.config import get_project_root, get_hermes_home, get_env_path
|
2026-03-28 23:47:21 -07:00
|
|
|
from hermes_constants import display_hermes_home
|
2026-02-16 02:38:19 -08:00
|
|
|
|
|
|
|
|
PROJECT_ROOT = get_project_root()
|
|
|
|
|
HERMES_HOME = get_hermes_home()
|
2026-03-28 23:47:21 -07:00
|
|
|
_DHH = display_hermes_home() # user-facing display path (e.g. ~/.hermes or ~/.hermes/profiles/coder)
|
2026-02-16 02:38:19 -08:00
|
|
|
|
|
|
|
|
# Load environment variables from ~/.hermes/.env so API key checks work
|
|
|
|
|
from dotenv import load_dotenv
|
|
|
|
|
_env_path = get_env_path()
|
|
|
|
|
if _env_path.exists():
|
2026-02-25 15:20:42 -08:00
|
|
|
try:
|
|
|
|
|
load_dotenv(_env_path, encoding="utf-8")
|
|
|
|
|
except UnicodeDecodeError:
|
|
|
|
|
load_dotenv(_env_path, encoding="latin-1")
|
2026-02-26 16:49:14 +11:00
|
|
|
# Also try project .env as dev fallback
|
2026-02-25 15:20:42 -08:00
|
|
|
load_dotenv(PROJECT_ROOT / ".env", override=False, encoding="utf-8")
|
2026-02-02 19:01:51 -08:00
|
|
|
|
2026-02-20 23:23:32 -08:00
|
|
|
from hermes_cli.colors import Colors, color
|
|
|
|
|
from hermes_constants import OPENROUTER_MODELS_URL
|
2026-02-02 19:01:51 -08:00
|
|
|
|
2026-03-06 19:47:09 -08:00
|
|
|
|
|
|
|
|
_PROVIDER_ENV_HINTS = (
|
|
|
|
|
"OPENROUTER_API_KEY",
|
|
|
|
|
"OPENAI_API_KEY",
|
|
|
|
|
"ANTHROPIC_API_KEY",
|
2026-03-13 02:09:52 -07:00
|
|
|
"ANTHROPIC_TOKEN",
|
2026-03-06 19:47:09 -08:00
|
|
|
"OPENAI_BASE_URL",
|
|
|
|
|
"GLM_API_KEY",
|
|
|
|
|
"ZAI_API_KEY",
|
|
|
|
|
"Z_AI_API_KEY",
|
|
|
|
|
"KIMI_API_KEY",
|
|
|
|
|
"MINIMAX_API_KEY",
|
|
|
|
|
"MINIMAX_CN_API_KEY",
|
feat: add Kilo Code (kilocode) as first-class inference provider (#1666)
Add Kilo Gateway (kilo.ai) as an API-key provider with OpenAI-compatible
endpoint at https://api.kilo.ai/api/gateway. Supports 500+ models from
Anthropic, OpenAI, Google, xAI, Mistral, MiniMax via a single API key.
- Register kilocode in PROVIDER_REGISTRY with aliases (kilo, kilo-code,
kilo-gateway) and KILOCODE_API_KEY / KILOCODE_BASE_URL env vars
- Add to model catalog, CLI provider menu, setup wizard, doctor checks
- Add google/gemini-3-flash-preview as default aux model
- 12 new tests covering registration, aliases, credential resolution,
runtime config
- Documentation updates (env vars, config, fallback providers)
- Fix setup test index shift from provider insertion
Inspired by PR #1473 by @amanning3390.
Co-authored-by: amanning3390 <amanning3390@users.noreply.github.com>
2026-03-17 02:40:34 -07:00
|
|
|
"KILOCODE_API_KEY",
|
2026-03-06 19:47:09 -08:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _has_provider_env_config(content: str) -> bool:
|
|
|
|
|
"""Return True when ~/.hermes/.env contains provider auth/base URL settings."""
|
|
|
|
|
return any(key in content for key in _PROVIDER_ENV_HINTS)
|
|
|
|
|
|
|
|
|
|
|
2026-03-12 19:34:19 -07:00
|
|
|
def _honcho_is_configured_for_doctor() -> bool:
|
|
|
|
|
"""Return True when Honcho is configured, even if this process has no active session."""
|
|
|
|
|
try:
|
|
|
|
|
from honcho_integration.client import HonchoClientConfig
|
|
|
|
|
|
|
|
|
|
cfg = HonchoClientConfig.from_global_config()
|
2026-03-28 17:49:56 -07:00
|
|
|
return bool(cfg.enabled and (cfg.api_key or cfg.base_url))
|
2026-03-12 19:34:19 -07:00
|
|
|
except Exception:
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _apply_doctor_tool_availability_overrides(available: list[str], unavailable: list[dict]) -> tuple[list[str], list[dict]]:
|
|
|
|
|
"""Adjust runtime-gated tool availability for doctor diagnostics."""
|
|
|
|
|
if not _honcho_is_configured_for_doctor():
|
|
|
|
|
return available, unavailable
|
|
|
|
|
|
|
|
|
|
updated_available = list(available)
|
|
|
|
|
updated_unavailable = []
|
|
|
|
|
for item in unavailable:
|
|
|
|
|
if item.get("name") == "honcho":
|
|
|
|
|
if "honcho" not in updated_available:
|
|
|
|
|
updated_available.append("honcho")
|
|
|
|
|
continue
|
|
|
|
|
updated_unavailable.append(item)
|
|
|
|
|
return updated_available, updated_unavailable
|
|
|
|
|
|
|
|
|
|
|
2026-02-02 19:01:51 -08:00
|
|
|
def check_ok(text: str, detail: str = ""):
|
|
|
|
|
print(f" {color('✓', Colors.GREEN)} {text}" + (f" {color(detail, Colors.DIM)}" if detail else ""))
|
|
|
|
|
|
|
|
|
|
def check_warn(text: str, detail: str = ""):
|
|
|
|
|
print(f" {color('⚠', Colors.YELLOW)} {text}" + (f" {color(detail, Colors.DIM)}" if detail else ""))
|
|
|
|
|
|
|
|
|
|
def check_fail(text: str, detail: str = ""):
|
|
|
|
|
print(f" {color('✗', Colors.RED)} {text}" + (f" {color(detail, Colors.DIM)}" if detail else ""))
|
|
|
|
|
|
|
|
|
|
def check_info(text: str):
|
|
|
|
|
print(f" {color('→', Colors.CYAN)} {text}")
|
|
|
|
|
|
|
|
|
|
|
2026-03-14 06:11:33 -07:00
|
|
|
def _check_gateway_service_linger(issues: list[str]) -> None:
|
|
|
|
|
"""Warn when a systemd user gateway service will stop after logout."""
|
|
|
|
|
try:
|
|
|
|
|
from hermes_cli.gateway import (
|
|
|
|
|
get_systemd_linger_status,
|
|
|
|
|
get_systemd_unit_path,
|
|
|
|
|
is_linux,
|
|
|
|
|
)
|
|
|
|
|
except Exception as e:
|
|
|
|
|
check_warn("Gateway service linger", f"(could not import gateway helpers: {e})")
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
if not is_linux():
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
unit_path = get_systemd_unit_path()
|
|
|
|
|
if not unit_path.exists():
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
print()
|
|
|
|
|
print(color("◆ Gateway Service", Colors.CYAN, Colors.BOLD))
|
|
|
|
|
|
|
|
|
|
linger_enabled, linger_detail = get_systemd_linger_status()
|
|
|
|
|
if linger_enabled is True:
|
|
|
|
|
check_ok("Systemd linger enabled", "(gateway service survives logout)")
|
|
|
|
|
elif linger_enabled is False:
|
|
|
|
|
check_warn("Systemd linger disabled", "(gateway may stop after logout)")
|
|
|
|
|
check_info("Run: sudo loginctl enable-linger $USER")
|
|
|
|
|
issues.append("Enable linger for the gateway user service: sudo loginctl enable-linger $USER")
|
|
|
|
|
else:
|
|
|
|
|
check_warn("Could not verify systemd linger", f"({linger_detail})")
|
|
|
|
|
|
|
|
|
|
|
2026-02-02 19:01:51 -08:00
|
|
|
def run_doctor(args):
|
|
|
|
|
"""Run diagnostic checks."""
|
|
|
|
|
should_fix = getattr(args, 'fix', False)
|
2026-03-13 08:51:45 -07:00
|
|
|
|
|
|
|
|
# Doctor runs from the interactive CLI, so CLI-gated tool availability
|
|
|
|
|
# checks (like cronjob management) should see the same context as `hermes`.
|
|
|
|
|
os.environ.setdefault("HERMES_INTERACTIVE", "1")
|
2026-02-02 19:01:51 -08:00
|
|
|
|
|
|
|
|
issues = []
|
2026-02-22 02:16:11 -08:00
|
|
|
manual_issues = [] # issues that can't be auto-fixed
|
|
|
|
|
fixed_count = 0
|
2026-02-02 19:01:51 -08:00
|
|
|
|
|
|
|
|
print()
|
|
|
|
|
print(color("┌─────────────────────────────────────────────────────────┐", Colors.CYAN))
|
|
|
|
|
print(color("│ 🩺 Hermes Doctor │", Colors.CYAN))
|
|
|
|
|
print(color("└─────────────────────────────────────────────────────────┘", Colors.CYAN))
|
|
|
|
|
|
|
|
|
|
# =========================================================================
|
|
|
|
|
# Check: Python version
|
|
|
|
|
# =========================================================================
|
|
|
|
|
print()
|
|
|
|
|
print(color("◆ Python Environment", Colors.CYAN, Colors.BOLD))
|
|
|
|
|
|
|
|
|
|
py_version = sys.version_info
|
2026-02-07 00:05:04 +00:00
|
|
|
if py_version >= (3, 11):
|
2026-02-02 19:01:51 -08:00
|
|
|
check_ok(f"Python {py_version.major}.{py_version.minor}.{py_version.micro}")
|
2026-02-07 00:05:04 +00:00
|
|
|
elif py_version >= (3, 10):
|
|
|
|
|
check_ok(f"Python {py_version.major}.{py_version.minor}.{py_version.micro}")
|
|
|
|
|
check_warn("Python 3.11+ recommended for RL Training tools (tinker requires >= 3.11)")
|
2026-02-02 19:01:51 -08:00
|
|
|
elif py_version >= (3, 8):
|
|
|
|
|
check_warn(f"Python {py_version.major}.{py_version.minor}.{py_version.micro}", "(3.10+ recommended)")
|
|
|
|
|
else:
|
|
|
|
|
check_fail(f"Python {py_version.major}.{py_version.minor}.{py_version.micro}", "(3.10+ required)")
|
|
|
|
|
issues.append("Upgrade Python to 3.10+")
|
|
|
|
|
|
|
|
|
|
# Check if in virtual environment
|
|
|
|
|
in_venv = sys.prefix != sys.base_prefix
|
|
|
|
|
if in_venv:
|
|
|
|
|
check_ok("Virtual environment active")
|
|
|
|
|
else:
|
|
|
|
|
check_warn("Not in virtual environment", "(recommended)")
|
|
|
|
|
|
|
|
|
|
# =========================================================================
|
|
|
|
|
# Check: Required packages
|
|
|
|
|
# =========================================================================
|
|
|
|
|
print()
|
|
|
|
|
print(color("◆ Required Packages", Colors.CYAN, Colors.BOLD))
|
|
|
|
|
|
|
|
|
|
required_packages = [
|
|
|
|
|
("openai", "OpenAI SDK"),
|
|
|
|
|
("rich", "Rich (terminal UI)"),
|
|
|
|
|
("dotenv", "python-dotenv"),
|
|
|
|
|
("yaml", "PyYAML"),
|
|
|
|
|
("httpx", "HTTPX"),
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
optional_packages = [
|
|
|
|
|
("croniter", "Croniter (cron expressions)"),
|
|
|
|
|
("telegram", "python-telegram-bot"),
|
|
|
|
|
("discord", "discord.py"),
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
for module, name in required_packages:
|
|
|
|
|
try:
|
|
|
|
|
__import__(module)
|
|
|
|
|
check_ok(name)
|
|
|
|
|
except ImportError:
|
|
|
|
|
check_fail(name, "(missing)")
|
2026-02-07 23:54:53 +00:00
|
|
|
issues.append(f"Install {name}: uv pip install {module}")
|
2026-02-02 19:01:51 -08:00
|
|
|
|
|
|
|
|
for module, name in optional_packages:
|
|
|
|
|
try:
|
|
|
|
|
__import__(module)
|
|
|
|
|
check_ok(name, "(optional)")
|
|
|
|
|
except ImportError:
|
|
|
|
|
check_warn(name, "(optional, not installed)")
|
|
|
|
|
|
|
|
|
|
# =========================================================================
|
|
|
|
|
# Check: Configuration files
|
|
|
|
|
# =========================================================================
|
|
|
|
|
print()
|
|
|
|
|
print(color("◆ Configuration Files", Colors.CYAN, Colors.BOLD))
|
|
|
|
|
|
2026-02-16 02:38:19 -08:00
|
|
|
# Check ~/.hermes/.env (primary location for user config)
|
|
|
|
|
env_path = HERMES_HOME / '.env'
|
2026-02-02 19:01:51 -08:00
|
|
|
if env_path.exists():
|
2026-03-28 23:47:21 -07:00
|
|
|
check_ok(f"{_DHH}/.env file exists")
|
2026-02-02 19:01:51 -08:00
|
|
|
|
|
|
|
|
# Check for common issues
|
|
|
|
|
content = env_path.read_text()
|
2026-03-06 19:47:09 -08:00
|
|
|
if _has_provider_env_config(content):
|
|
|
|
|
check_ok("API key or custom endpoint configured")
|
2026-02-02 19:01:51 -08:00
|
|
|
else:
|
2026-03-28 23:47:21 -07:00
|
|
|
check_warn(f"No API key found in {_DHH}/.env")
|
2026-02-02 19:01:51 -08:00
|
|
|
issues.append("Run 'hermes setup' to configure API keys")
|
|
|
|
|
else:
|
2026-02-16 02:38:19 -08:00
|
|
|
# Also check project root as fallback
|
|
|
|
|
fallback_env = PROJECT_ROOT / '.env'
|
|
|
|
|
if fallback_env.exists():
|
|
|
|
|
check_ok(".env file exists (in project directory)")
|
|
|
|
|
else:
|
2026-03-28 23:47:21 -07:00
|
|
|
check_fail(f"{_DHH}/.env file missing")
|
2026-02-22 02:16:11 -08:00
|
|
|
if should_fix:
|
|
|
|
|
env_path.parent.mkdir(parents=True, exist_ok=True)
|
|
|
|
|
env_path.touch()
|
2026-03-28 23:47:21 -07:00
|
|
|
check_ok(f"Created empty {_DHH}/.env")
|
2026-02-22 02:16:11 -08:00
|
|
|
check_info("Run 'hermes setup' to configure API keys")
|
|
|
|
|
fixed_count += 1
|
|
|
|
|
else:
|
|
|
|
|
check_info("Run 'hermes setup' to create one")
|
|
|
|
|
issues.append("Run 'hermes setup' to create .env")
|
2026-02-02 19:01:51 -08:00
|
|
|
|
2026-02-16 02:38:19 -08:00
|
|
|
# Check ~/.hermes/config.yaml (primary) or project cli-config.yaml (fallback)
|
|
|
|
|
config_path = HERMES_HOME / 'config.yaml'
|
2026-02-02 19:01:51 -08:00
|
|
|
if config_path.exists():
|
2026-03-28 23:47:21 -07:00
|
|
|
check_ok(f"{_DHH}/config.yaml exists")
|
2026-02-02 19:01:51 -08:00
|
|
|
else:
|
2026-02-16 02:38:19 -08:00
|
|
|
fallback_config = PROJECT_ROOT / 'cli-config.yaml'
|
|
|
|
|
if fallback_config.exists():
|
|
|
|
|
check_ok("cli-config.yaml exists (in project directory)")
|
|
|
|
|
else:
|
2026-02-22 02:16:11 -08:00
|
|
|
example_config = PROJECT_ROOT / 'cli-config.yaml.example'
|
|
|
|
|
if should_fix and example_config.exists():
|
|
|
|
|
config_path.parent.mkdir(parents=True, exist_ok=True)
|
|
|
|
|
shutil.copy2(str(example_config), str(config_path))
|
2026-03-28 23:47:21 -07:00
|
|
|
check_ok(f"Created {_DHH}/config.yaml from cli-config.yaml.example")
|
2026-02-22 02:16:11 -08:00
|
|
|
fixed_count += 1
|
|
|
|
|
elif should_fix:
|
|
|
|
|
check_warn("config.yaml not found and no example to copy from")
|
2026-03-28 23:47:21 -07:00
|
|
|
manual_issues.append(f"Create {_DHH}/config.yaml manually")
|
2026-02-22 02:16:11 -08:00
|
|
|
else:
|
|
|
|
|
check_warn("config.yaml not found", "(using defaults)")
|
2026-02-02 19:01:51 -08:00
|
|
|
|
2026-02-25 18:20:38 -08:00
|
|
|
# =========================================================================
|
|
|
|
|
# Check: Auth providers
|
|
|
|
|
# =========================================================================
|
|
|
|
|
print()
|
|
|
|
|
print(color("◆ Auth Providers", Colors.CYAN, Colors.BOLD))
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
from hermes_cli.auth import get_nous_auth_status, get_codex_auth_status
|
|
|
|
|
|
|
|
|
|
nous_status = get_nous_auth_status()
|
|
|
|
|
if nous_status.get("logged_in"):
|
|
|
|
|
check_ok("Nous Portal auth", "(logged in)")
|
|
|
|
|
else:
|
|
|
|
|
check_warn("Nous Portal auth", "(not logged in)")
|
|
|
|
|
|
|
|
|
|
codex_status = get_codex_auth_status()
|
|
|
|
|
if codex_status.get("logged_in"):
|
|
|
|
|
check_ok("OpenAI Codex auth", "(logged in)")
|
|
|
|
|
else:
|
|
|
|
|
check_warn("OpenAI Codex auth", "(not logged in)")
|
|
|
|
|
if codex_status.get("error"):
|
|
|
|
|
check_info(codex_status["error"])
|
|
|
|
|
except Exception as e:
|
|
|
|
|
check_warn("Auth provider status", f"(could not check: {e})")
|
|
|
|
|
|
|
|
|
|
if shutil.which("codex"):
|
|
|
|
|
check_ok("codex CLI")
|
|
|
|
|
else:
|
|
|
|
|
check_warn("codex CLI not found", "(required for openai-codex login)")
|
|
|
|
|
|
2026-02-02 19:01:51 -08:00
|
|
|
# =========================================================================
|
|
|
|
|
# Check: Directory structure
|
|
|
|
|
# =========================================================================
|
|
|
|
|
print()
|
|
|
|
|
print(color("◆ Directory Structure", Colors.CYAN, Colors.BOLD))
|
|
|
|
|
|
2026-02-26 19:20:30 +11:00
|
|
|
hermes_home = HERMES_HOME
|
2026-02-02 19:01:51 -08:00
|
|
|
if hermes_home.exists():
|
2026-03-28 23:47:21 -07:00
|
|
|
check_ok(f"{_DHH} directory exists")
|
2026-02-02 19:01:51 -08:00
|
|
|
else:
|
2026-02-22 02:16:11 -08:00
|
|
|
if should_fix:
|
|
|
|
|
hermes_home.mkdir(parents=True, exist_ok=True)
|
2026-03-28 23:47:21 -07:00
|
|
|
check_ok(f"Created {_DHH} directory")
|
2026-02-22 02:16:11 -08:00
|
|
|
fixed_count += 1
|
|
|
|
|
else:
|
2026-03-28 23:47:21 -07:00
|
|
|
check_warn(f"{_DHH} not found", "(will be created on first use)")
|
2026-02-22 02:16:11 -08:00
|
|
|
|
|
|
|
|
# Check expected subdirectories
|
|
|
|
|
expected_subdirs = ["cron", "sessions", "logs", "skills", "memories"]
|
|
|
|
|
for subdir_name in expected_subdirs:
|
|
|
|
|
subdir_path = hermes_home / subdir_name
|
|
|
|
|
if subdir_path.exists():
|
2026-03-28 23:47:21 -07:00
|
|
|
check_ok(f"{_DHH}/{subdir_name}/ exists")
|
2026-02-22 02:16:11 -08:00
|
|
|
else:
|
|
|
|
|
if should_fix:
|
|
|
|
|
subdir_path.mkdir(parents=True, exist_ok=True)
|
2026-03-28 23:47:21 -07:00
|
|
|
check_ok(f"Created {_DHH}/{subdir_name}/")
|
2026-02-22 02:16:11 -08:00
|
|
|
fixed_count += 1
|
|
|
|
|
else:
|
2026-03-28 23:47:21 -07:00
|
|
|
check_warn(f"{_DHH}/{subdir_name}/ not found", "(will be created on first use)")
|
2026-02-02 19:01:51 -08:00
|
|
|
|
2026-02-16 02:38:19 -08:00
|
|
|
# Check for SOUL.md persona file
|
|
|
|
|
soul_path = hermes_home / "SOUL.md"
|
|
|
|
|
if soul_path.exists():
|
|
|
|
|
content = soul_path.read_text(encoding="utf-8").strip()
|
|
|
|
|
# Check if it's just the template comments (no real content)
|
|
|
|
|
lines = [l for l in content.splitlines() if l.strip() and not l.strip().startswith(("<!--", "-->", "#"))]
|
|
|
|
|
if lines:
|
2026-03-28 23:47:21 -07:00
|
|
|
check_ok(f"{_DHH}/SOUL.md exists (persona configured)")
|
2026-02-16 02:38:19 -08:00
|
|
|
else:
|
2026-03-28 23:47:21 -07:00
|
|
|
check_info(f"{_DHH}/SOUL.md exists but is empty — edit it to customize personality")
|
2026-02-16 02:38:19 -08:00
|
|
|
else:
|
2026-03-28 23:47:21 -07:00
|
|
|
check_warn(f"{_DHH}/SOUL.md not found", "(create it to give Hermes a custom personality)")
|
2026-02-16 02:38:19 -08:00
|
|
|
if should_fix:
|
|
|
|
|
soul_path.parent.mkdir(parents=True, exist_ok=True)
|
2026-02-22 02:16:11 -08:00
|
|
|
soul_path.write_text(
|
|
|
|
|
"# Hermes Agent Persona\n\n"
|
|
|
|
|
"<!-- Edit this file to customize how Hermes communicates. -->\n\n"
|
|
|
|
|
"You are Hermes, a helpful AI assistant.\n",
|
|
|
|
|
encoding="utf-8",
|
|
|
|
|
)
|
2026-03-28 23:47:21 -07:00
|
|
|
check_ok(f"Created {_DHH}/SOUL.md with basic template")
|
2026-02-22 02:16:11 -08:00
|
|
|
fixed_count += 1
|
2026-02-16 02:38:19 -08:00
|
|
|
|
2026-02-19 00:57:31 -08:00
|
|
|
# Check memory directory
|
|
|
|
|
memories_dir = hermes_home / "memories"
|
|
|
|
|
if memories_dir.exists():
|
2026-03-28 23:47:21 -07:00
|
|
|
check_ok(f"{_DHH}/memories/ directory exists")
|
2026-02-19 00:57:31 -08:00
|
|
|
memory_file = memories_dir / "MEMORY.md"
|
|
|
|
|
user_file = memories_dir / "USER.md"
|
|
|
|
|
if memory_file.exists():
|
|
|
|
|
size = len(memory_file.read_text(encoding="utf-8").strip())
|
|
|
|
|
check_ok(f"MEMORY.md exists ({size} chars)")
|
|
|
|
|
else:
|
|
|
|
|
check_info("MEMORY.md not created yet (will be created when the agent first writes a memory)")
|
|
|
|
|
if user_file.exists():
|
|
|
|
|
size = len(user_file.read_text(encoding="utf-8").strip())
|
|
|
|
|
check_ok(f"USER.md exists ({size} chars)")
|
|
|
|
|
else:
|
|
|
|
|
check_info("USER.md not created yet (will be created when the agent first writes a memory)")
|
|
|
|
|
else:
|
2026-03-28 23:47:21 -07:00
|
|
|
check_warn(f"{_DHH}/memories/ not found", "(will be created on first use)")
|
2026-02-19 00:57:31 -08:00
|
|
|
if should_fix:
|
|
|
|
|
memories_dir.mkdir(parents=True, exist_ok=True)
|
2026-03-28 23:47:21 -07:00
|
|
|
check_ok(f"Created {_DHH}/memories/")
|
2026-02-22 02:16:11 -08:00
|
|
|
fixed_count += 1
|
2026-02-19 00:57:31 -08:00
|
|
|
|
|
|
|
|
# Check SQLite session store
|
|
|
|
|
state_db_path = hermes_home / "state.db"
|
|
|
|
|
if state_db_path.exists():
|
|
|
|
|
try:
|
|
|
|
|
import sqlite3
|
|
|
|
|
conn = sqlite3.connect(str(state_db_path))
|
|
|
|
|
cursor = conn.execute("SELECT COUNT(*) FROM sessions")
|
|
|
|
|
count = cursor.fetchone()[0]
|
|
|
|
|
conn.close()
|
2026-03-28 23:47:21 -07:00
|
|
|
check_ok(f"{_DHH}/state.db exists ({count} sessions)")
|
2026-02-19 00:57:31 -08:00
|
|
|
except Exception as e:
|
2026-03-28 23:47:21 -07:00
|
|
|
check_warn(f"{_DHH}/state.db exists but has issues: {e}")
|
2026-02-19 00:57:31 -08:00
|
|
|
else:
|
2026-03-28 23:47:21 -07:00
|
|
|
check_info(f"{_DHH}/state.db not created yet (will be created on first session)")
|
2026-03-14 06:11:33 -07:00
|
|
|
|
|
|
|
|
_check_gateway_service_linger(issues)
|
2026-02-19 00:57:31 -08:00
|
|
|
|
2026-02-02 19:01:51 -08:00
|
|
|
# =========================================================================
|
|
|
|
|
# Check: External tools
|
|
|
|
|
# =========================================================================
|
|
|
|
|
print()
|
|
|
|
|
print(color("◆ External Tools", Colors.CYAN, Colors.BOLD))
|
|
|
|
|
|
|
|
|
|
# Git
|
|
|
|
|
if shutil.which("git"):
|
|
|
|
|
check_ok("git")
|
|
|
|
|
else:
|
|
|
|
|
check_warn("git not found", "(optional)")
|
|
|
|
|
|
2026-02-05 03:49:46 -08:00
|
|
|
# ripgrep (optional, for faster file search)
|
|
|
|
|
if shutil.which("rg"):
|
|
|
|
|
check_ok("ripgrep (rg)", "(faster file search)")
|
|
|
|
|
else:
|
|
|
|
|
check_warn("ripgrep (rg) not found", "(file search uses grep fallback)")
|
|
|
|
|
check_info("Install for faster search: sudo apt install ripgrep")
|
|
|
|
|
|
2026-02-02 19:01:51 -08:00
|
|
|
# Docker (optional)
|
|
|
|
|
terminal_env = os.getenv("TERMINAL_ENV", "local")
|
|
|
|
|
if terminal_env == "docker":
|
|
|
|
|
if shutil.which("docker"):
|
|
|
|
|
# Check if docker daemon is running
|
|
|
|
|
result = subprocess.run(["docker", "info"], capture_output=True)
|
|
|
|
|
if result.returncode == 0:
|
|
|
|
|
check_ok("docker", "(daemon running)")
|
|
|
|
|
else:
|
|
|
|
|
check_fail("docker daemon not running")
|
|
|
|
|
issues.append("Start Docker daemon")
|
|
|
|
|
else:
|
|
|
|
|
check_fail("docker not found", "(required for TERMINAL_ENV=docker)")
|
|
|
|
|
issues.append("Install Docker or change TERMINAL_ENV")
|
|
|
|
|
else:
|
|
|
|
|
if shutil.which("docker"):
|
|
|
|
|
check_ok("docker", "(optional)")
|
|
|
|
|
else:
|
|
|
|
|
check_warn("docker not found", "(optional)")
|
|
|
|
|
|
|
|
|
|
# SSH (if using ssh backend)
|
|
|
|
|
if terminal_env == "ssh":
|
|
|
|
|
ssh_host = os.getenv("TERMINAL_SSH_HOST")
|
|
|
|
|
if ssh_host:
|
|
|
|
|
# Try to connect
|
|
|
|
|
result = subprocess.run(
|
|
|
|
|
["ssh", "-o", "ConnectTimeout=5", "-o", "BatchMode=yes", ssh_host, "echo ok"],
|
|
|
|
|
capture_output=True,
|
|
|
|
|
text=True
|
|
|
|
|
)
|
|
|
|
|
if result.returncode == 0:
|
|
|
|
|
check_ok(f"SSH connection to {ssh_host}")
|
|
|
|
|
else:
|
|
|
|
|
check_fail(f"SSH connection to {ssh_host}")
|
|
|
|
|
issues.append(f"Check SSH configuration for {ssh_host}")
|
|
|
|
|
else:
|
|
|
|
|
check_fail("TERMINAL_SSH_HOST not set", "(required for TERMINAL_ENV=ssh)")
|
|
|
|
|
issues.append("Set TERMINAL_SSH_HOST in .env")
|
|
|
|
|
|
2026-03-05 00:44:39 -08:00
|
|
|
# Daytona (if using daytona backend)
|
|
|
|
|
if terminal_env == "daytona":
|
|
|
|
|
daytona_key = os.getenv("DAYTONA_API_KEY")
|
|
|
|
|
if daytona_key:
|
|
|
|
|
check_ok("Daytona API key", "(configured)")
|
|
|
|
|
else:
|
|
|
|
|
check_fail("DAYTONA_API_KEY not set", "(required for TERMINAL_ENV=daytona)")
|
|
|
|
|
issues.append("Set DAYTONA_API_KEY environment variable")
|
|
|
|
|
try:
|
chore: remove ~100 unused imports across 55 files (#3016)
Automated cleanup via pyflakes + autoflake with manual review.
Changes:
- Removed unused stdlib imports (os, sys, json, pathlib.Path, etc.)
- Removed unused typing imports (List, Dict, Any, Optional, Tuple, Set, etc.)
- Removed unused internal imports (hermes_cli.auth, hermes_cli.config, etc.)
- Fixed cli.py: removed 8 shadowed banner imports (imported from hermes_cli.banner
then immediately redefined locally — only build_welcome_banner is actually used)
- Added noqa comments to imports that appear unused but serve a purpose:
- Re-exports (gateway/session.py SessionResetPolicy, tools/terminal_tool.py
is_interrupted/_interrupt_event)
- SDK presence checks in try/except (daytona, fal_client, discord)
- Test mock targets (auxiliary_client.py Path, mcp_config.py get_hermes_home)
Zero behavioral changes. Full test suite passes (6162/6162, 2 pre-existing
streaming test failures unrelated to this change).
2026-03-25 15:02:03 -07:00
|
|
|
from daytona import Daytona # noqa: F401 — SDK presence check
|
2026-03-05 00:44:39 -08:00
|
|
|
check_ok("daytona SDK", "(installed)")
|
|
|
|
|
except ImportError:
|
|
|
|
|
check_fail("daytona SDK not installed", "(pip install daytona)")
|
|
|
|
|
issues.append("Install daytona SDK: pip install daytona")
|
|
|
|
|
|
2026-02-16 02:41:24 -08:00
|
|
|
# Node.js + agent-browser (for browser automation tools)
|
|
|
|
|
if shutil.which("node"):
|
|
|
|
|
check_ok("Node.js")
|
|
|
|
|
# Check if agent-browser is installed
|
|
|
|
|
agent_browser_path = PROJECT_ROOT / "node_modules" / "agent-browser"
|
|
|
|
|
if agent_browser_path.exists():
|
|
|
|
|
check_ok("agent-browser (Node.js)", "(browser automation)")
|
|
|
|
|
else:
|
|
|
|
|
check_warn("agent-browser not installed", "(run: npm install)")
|
|
|
|
|
else:
|
|
|
|
|
check_warn("Node.js not found", "(optional, needed for browser tools)")
|
|
|
|
|
|
2026-02-25 23:47:39 -08:00
|
|
|
# npm audit for all Node.js packages
|
|
|
|
|
if shutil.which("npm"):
|
|
|
|
|
npm_dirs = [
|
|
|
|
|
(PROJECT_ROOT, "Browser tools (agent-browser)"),
|
|
|
|
|
(PROJECT_ROOT / "scripts" / "whatsapp-bridge", "WhatsApp bridge"),
|
|
|
|
|
]
|
|
|
|
|
for npm_dir, label in npm_dirs:
|
|
|
|
|
if not (npm_dir / "node_modules").exists():
|
|
|
|
|
continue
|
|
|
|
|
try:
|
|
|
|
|
audit_result = subprocess.run(
|
|
|
|
|
["npm", "audit", "--json"],
|
|
|
|
|
cwd=str(npm_dir),
|
|
|
|
|
capture_output=True, text=True, timeout=30,
|
|
|
|
|
)
|
|
|
|
|
import json as _json
|
|
|
|
|
audit_data = _json.loads(audit_result.stdout) if audit_result.stdout.strip() else {}
|
|
|
|
|
vuln_count = audit_data.get("metadata", {}).get("vulnerabilities", {})
|
|
|
|
|
critical = vuln_count.get("critical", 0)
|
|
|
|
|
high = vuln_count.get("high", 0)
|
|
|
|
|
moderate = vuln_count.get("moderate", 0)
|
|
|
|
|
total = critical + high + moderate
|
|
|
|
|
if total == 0:
|
|
|
|
|
check_ok(f"{label} deps", "(no known vulnerabilities)")
|
|
|
|
|
elif critical > 0 or high > 0:
|
|
|
|
|
check_warn(
|
|
|
|
|
f"{label} deps",
|
|
|
|
|
f"({critical} critical, {high} high, {moderate} moderate — run: cd {npm_dir} && npm audit fix)"
|
|
|
|
|
)
|
|
|
|
|
issues.append(f"{label} has {total} npm vulnerability(ies)")
|
|
|
|
|
else:
|
|
|
|
|
check_ok(f"{label} deps", f"({moderate} moderate vulnerability(ies))")
|
|
|
|
|
except Exception:
|
|
|
|
|
pass
|
|
|
|
|
|
2026-02-02 19:01:51 -08:00
|
|
|
# =========================================================================
|
|
|
|
|
# Check: API connectivity
|
|
|
|
|
# =========================================================================
|
|
|
|
|
print()
|
|
|
|
|
print(color("◆ API Connectivity", Colors.CYAN, Colors.BOLD))
|
|
|
|
|
|
|
|
|
|
openrouter_key = os.getenv("OPENROUTER_API_KEY")
|
|
|
|
|
if openrouter_key:
|
2026-02-22 02:16:11 -08:00
|
|
|
print(" Checking OpenRouter API...", end="", flush=True)
|
2026-02-02 19:01:51 -08:00
|
|
|
try:
|
|
|
|
|
import httpx
|
|
|
|
|
response = httpx.get(
|
2026-02-20 23:23:32 -08:00
|
|
|
OPENROUTER_MODELS_URL,
|
2026-02-02 19:01:51 -08:00
|
|
|
headers={"Authorization": f"Bearer {openrouter_key}"},
|
|
|
|
|
timeout=10
|
|
|
|
|
)
|
|
|
|
|
if response.status_code == 200:
|
2026-02-22 02:16:11 -08:00
|
|
|
print(f"\r {color('✓', Colors.GREEN)} OpenRouter API ")
|
2026-02-02 19:01:51 -08:00
|
|
|
elif response.status_code == 401:
|
2026-02-22 02:16:11 -08:00
|
|
|
print(f"\r {color('✗', Colors.RED)} OpenRouter API {color('(invalid API key)', Colors.DIM)} ")
|
2026-02-02 19:01:51 -08:00
|
|
|
issues.append("Check OPENROUTER_API_KEY in .env")
|
|
|
|
|
else:
|
2026-02-22 02:16:11 -08:00
|
|
|
print(f"\r {color('✗', Colors.RED)} OpenRouter API {color(f'(HTTP {response.status_code})', Colors.DIM)} ")
|
2026-02-02 19:01:51 -08:00
|
|
|
except Exception as e:
|
2026-02-22 02:16:11 -08:00
|
|
|
print(f"\r {color('✗', Colors.RED)} OpenRouter API {color(f'({e})', Colors.DIM)} ")
|
2026-02-02 19:01:51 -08:00
|
|
|
issues.append("Check network connectivity")
|
|
|
|
|
else:
|
|
|
|
|
check_warn("OpenRouter API", "(not configured)")
|
|
|
|
|
|
2026-03-13 02:09:52 -07:00
|
|
|
anthropic_key = os.getenv("ANTHROPIC_TOKEN") or os.getenv("ANTHROPIC_API_KEY")
|
2026-02-02 19:01:51 -08:00
|
|
|
if anthropic_key:
|
2026-02-22 02:16:11 -08:00
|
|
|
print(" Checking Anthropic API...", end="", flush=True)
|
2026-02-02 19:01:51 -08:00
|
|
|
try:
|
|
|
|
|
import httpx
|
2026-03-13 02:09:52 -07:00
|
|
|
from agent.anthropic_adapter import _is_oauth_token, _COMMON_BETAS, _OAUTH_ONLY_BETAS
|
|
|
|
|
|
|
|
|
|
headers = {"anthropic-version": "2023-06-01"}
|
|
|
|
|
if _is_oauth_token(anthropic_key):
|
|
|
|
|
headers["Authorization"] = f"Bearer {anthropic_key}"
|
|
|
|
|
headers["anthropic-beta"] = ",".join(_COMMON_BETAS + _OAUTH_ONLY_BETAS)
|
|
|
|
|
else:
|
|
|
|
|
headers["x-api-key"] = anthropic_key
|
2026-02-02 19:01:51 -08:00
|
|
|
response = httpx.get(
|
|
|
|
|
"https://api.anthropic.com/v1/models",
|
2026-03-13 02:09:52 -07:00
|
|
|
headers=headers,
|
2026-02-02 19:01:51 -08:00
|
|
|
timeout=10
|
|
|
|
|
)
|
|
|
|
|
if response.status_code == 200:
|
2026-02-22 02:16:11 -08:00
|
|
|
print(f"\r {color('✓', Colors.GREEN)} Anthropic API ")
|
2026-02-02 19:01:51 -08:00
|
|
|
elif response.status_code == 401:
|
2026-02-22 02:16:11 -08:00
|
|
|
print(f"\r {color('✗', Colors.RED)} Anthropic API {color('(invalid API key)', Colors.DIM)} ")
|
2026-02-02 19:01:51 -08:00
|
|
|
else:
|
2026-02-22 02:16:11 -08:00
|
|
|
msg = "(couldn't verify)"
|
|
|
|
|
print(f"\r {color('⚠', Colors.YELLOW)} Anthropic API {color(msg, Colors.DIM)} ")
|
2026-02-02 19:01:51 -08:00
|
|
|
except Exception as e:
|
2026-02-22 02:16:11 -08:00
|
|
|
print(f"\r {color('⚠', Colors.YELLOW)} Anthropic API {color(f'({e})', Colors.DIM)} ")
|
feat: add z.ai/GLM, Kimi/Moonshot, MiniMax as first-class providers
Adds 4 new direct API-key providers (zai, kimi-coding, minimax, minimax-cn)
to the inference provider system. All use standard OpenAI-compatible
chat/completions endpoints with Bearer token auth.
Core changes:
- auth.py: Extended ProviderConfig with api_key_env_vars and base_url_env_var
fields. Added providers to PROVIDER_REGISTRY. Added provider aliases
(glm, z-ai, zhipu, kimi, moonshot). Added auto-detection of API-key
providers in resolve_provider(). Added resolve_api_key_provider_credentials()
and get_api_key_provider_status() helpers.
- runtime_provider.py: Added generic API-key provider branch in
resolve_runtime_provider() — any provider with auth_type='api_key'
is automatically handled.
- main.py: Added providers to hermes model menu with generic
_model_flow_api_key_provider() flow. Updated _has_any_provider_configured()
to check all provider env vars. Updated argparse --provider choices.
- setup.py: Added providers to setup wizard with API key prompts and
curated model lists.
- config.py: Added env vars (GLM_API_KEY, KIMI_API_KEY, MINIMAX_API_KEY,
etc.) to OPTIONAL_ENV_VARS.
- status.py: Added API key display and provider status section.
- doctor.py: Added connectivity checks for each provider endpoint.
- cli.py: Updated provider docstrings.
Docs: Updated README.md, .env.example, cli-config.yaml.example,
cli-commands.md, environment-variables.md, configuration.md.
Tests: 50 new tests covering registry, aliases, resolution, auto-detection,
credential resolution, and runtime provider dispatch.
Inspired by PR #33 (numman-ali) which proposed a provider registry approach.
Credit to tars90percent (PR #473) and manuelschipper (PR #420) for related
provider improvements merged earlier in this changeset.
2026-03-06 18:55:12 -08:00
|
|
|
|
|
|
|
|
# -- API-key providers (Z.AI/GLM, Kimi, MiniMax, MiniMax-CN) --
|
2026-03-11 08:29:35 -07:00
|
|
|
# Tuple: (name, env_vars, default_url, base_env, supports_models_endpoint)
|
|
|
|
|
# If supports_models_endpoint is False, we skip the health check and just show "configured"
|
feat: add z.ai/GLM, Kimi/Moonshot, MiniMax as first-class providers
Adds 4 new direct API-key providers (zai, kimi-coding, minimax, minimax-cn)
to the inference provider system. All use standard OpenAI-compatible
chat/completions endpoints with Bearer token auth.
Core changes:
- auth.py: Extended ProviderConfig with api_key_env_vars and base_url_env_var
fields. Added providers to PROVIDER_REGISTRY. Added provider aliases
(glm, z-ai, zhipu, kimi, moonshot). Added auto-detection of API-key
providers in resolve_provider(). Added resolve_api_key_provider_credentials()
and get_api_key_provider_status() helpers.
- runtime_provider.py: Added generic API-key provider branch in
resolve_runtime_provider() — any provider with auth_type='api_key'
is automatically handled.
- main.py: Added providers to hermes model menu with generic
_model_flow_api_key_provider() flow. Updated _has_any_provider_configured()
to check all provider env vars. Updated argparse --provider choices.
- setup.py: Added providers to setup wizard with API key prompts and
curated model lists.
- config.py: Added env vars (GLM_API_KEY, KIMI_API_KEY, MINIMAX_API_KEY,
etc.) to OPTIONAL_ENV_VARS.
- status.py: Added API key display and provider status section.
- doctor.py: Added connectivity checks for each provider endpoint.
- cli.py: Updated provider docstrings.
Docs: Updated README.md, .env.example, cli-config.yaml.example,
cli-commands.md, environment-variables.md, configuration.md.
Tests: 50 new tests covering registry, aliases, resolution, auto-detection,
credential resolution, and runtime provider dispatch.
Inspired by PR #33 (numman-ali) which proposed a provider registry approach.
Credit to tars90percent (PR #473) and manuelschipper (PR #420) for related
provider improvements merged earlier in this changeset.
2026-03-06 18:55:12 -08:00
|
|
|
_apikey_providers = [
|
2026-03-11 08:29:35 -07:00
|
|
|
("Z.AI / GLM", ("GLM_API_KEY", "ZAI_API_KEY", "Z_AI_API_KEY"), "https://api.z.ai/api/paas/v4/models", "GLM_BASE_URL", True),
|
|
|
|
|
("Kimi / Moonshot", ("KIMI_API_KEY",), "https://api.moonshot.ai/v1/models", "KIMI_BASE_URL", True),
|
|
|
|
|
# MiniMax APIs don't support /models endpoint — https://github.com/NousResearch/hermes-agent/issues/811
|
|
|
|
|
("MiniMax", ("MINIMAX_API_KEY",), None, "MINIMAX_BASE_URL", False),
|
|
|
|
|
("MiniMax (China)", ("MINIMAX_CN_API_KEY",), None, "MINIMAX_CN_BASE_URL", False),
|
2026-03-17 00:12:16 -07:00
|
|
|
("AI Gateway", ("AI_GATEWAY_API_KEY",), "https://ai-gateway.vercel.sh/v1/models", "AI_GATEWAY_BASE_URL", True),
|
feat: add Kilo Code (kilocode) as first-class inference provider (#1666)
Add Kilo Gateway (kilo.ai) as an API-key provider with OpenAI-compatible
endpoint at https://api.kilo.ai/api/gateway. Supports 500+ models from
Anthropic, OpenAI, Google, xAI, Mistral, MiniMax via a single API key.
- Register kilocode in PROVIDER_REGISTRY with aliases (kilo, kilo-code,
kilo-gateway) and KILOCODE_API_KEY / KILOCODE_BASE_URL env vars
- Add to model catalog, CLI provider menu, setup wizard, doctor checks
- Add google/gemini-3-flash-preview as default aux model
- 12 new tests covering registration, aliases, credential resolution,
runtime config
- Documentation updates (env vars, config, fallback providers)
- Fix setup test index shift from provider insertion
Inspired by PR #1473 by @amanning3390.
Co-authored-by: amanning3390 <amanning3390@users.noreply.github.com>
2026-03-17 02:40:34 -07:00
|
|
|
("Kilo Code", ("KILOCODE_API_KEY",), "https://api.kilo.ai/api/gateway/models", "KILOCODE_BASE_URL", True),
|
feat: add z.ai/GLM, Kimi/Moonshot, MiniMax as first-class providers
Adds 4 new direct API-key providers (zai, kimi-coding, minimax, minimax-cn)
to the inference provider system. All use standard OpenAI-compatible
chat/completions endpoints with Bearer token auth.
Core changes:
- auth.py: Extended ProviderConfig with api_key_env_vars and base_url_env_var
fields. Added providers to PROVIDER_REGISTRY. Added provider aliases
(glm, z-ai, zhipu, kimi, moonshot). Added auto-detection of API-key
providers in resolve_provider(). Added resolve_api_key_provider_credentials()
and get_api_key_provider_status() helpers.
- runtime_provider.py: Added generic API-key provider branch in
resolve_runtime_provider() — any provider with auth_type='api_key'
is automatically handled.
- main.py: Added providers to hermes model menu with generic
_model_flow_api_key_provider() flow. Updated _has_any_provider_configured()
to check all provider env vars. Updated argparse --provider choices.
- setup.py: Added providers to setup wizard with API key prompts and
curated model lists.
- config.py: Added env vars (GLM_API_KEY, KIMI_API_KEY, MINIMAX_API_KEY,
etc.) to OPTIONAL_ENV_VARS.
- status.py: Added API key display and provider status section.
- doctor.py: Added connectivity checks for each provider endpoint.
- cli.py: Updated provider docstrings.
Docs: Updated README.md, .env.example, cli-config.yaml.example,
cli-commands.md, environment-variables.md, configuration.md.
Tests: 50 new tests covering registry, aliases, resolution, auto-detection,
credential resolution, and runtime provider dispatch.
Inspired by PR #33 (numman-ali) which proposed a provider registry approach.
Credit to tars90percent (PR #473) and manuelschipper (PR #420) for related
provider improvements merged earlier in this changeset.
2026-03-06 18:55:12 -08:00
|
|
|
]
|
2026-03-11 08:29:35 -07:00
|
|
|
for _pname, _env_vars, _default_url, _base_env, _supports_health_check in _apikey_providers:
|
feat: add z.ai/GLM, Kimi/Moonshot, MiniMax as first-class providers
Adds 4 new direct API-key providers (zai, kimi-coding, minimax, minimax-cn)
to the inference provider system. All use standard OpenAI-compatible
chat/completions endpoints with Bearer token auth.
Core changes:
- auth.py: Extended ProviderConfig with api_key_env_vars and base_url_env_var
fields. Added providers to PROVIDER_REGISTRY. Added provider aliases
(glm, z-ai, zhipu, kimi, moonshot). Added auto-detection of API-key
providers in resolve_provider(). Added resolve_api_key_provider_credentials()
and get_api_key_provider_status() helpers.
- runtime_provider.py: Added generic API-key provider branch in
resolve_runtime_provider() — any provider with auth_type='api_key'
is automatically handled.
- main.py: Added providers to hermes model menu with generic
_model_flow_api_key_provider() flow. Updated _has_any_provider_configured()
to check all provider env vars. Updated argparse --provider choices.
- setup.py: Added providers to setup wizard with API key prompts and
curated model lists.
- config.py: Added env vars (GLM_API_KEY, KIMI_API_KEY, MINIMAX_API_KEY,
etc.) to OPTIONAL_ENV_VARS.
- status.py: Added API key display and provider status section.
- doctor.py: Added connectivity checks for each provider endpoint.
- cli.py: Updated provider docstrings.
Docs: Updated README.md, .env.example, cli-config.yaml.example,
cli-commands.md, environment-variables.md, configuration.md.
Tests: 50 new tests covering registry, aliases, resolution, auto-detection,
credential resolution, and runtime provider dispatch.
Inspired by PR #33 (numman-ali) which proposed a provider registry approach.
Credit to tars90percent (PR #473) and manuelschipper (PR #420) for related
provider improvements merged earlier in this changeset.
2026-03-06 18:55:12 -08:00
|
|
|
_key = ""
|
|
|
|
|
for _ev in _env_vars:
|
|
|
|
|
_key = os.getenv(_ev, "")
|
|
|
|
|
if _key:
|
|
|
|
|
break
|
|
|
|
|
if _key:
|
|
|
|
|
_label = _pname.ljust(20)
|
2026-03-11 08:29:35 -07:00
|
|
|
# Some providers (like MiniMax) don't support /models endpoint
|
|
|
|
|
if not _supports_health_check:
|
|
|
|
|
print(f" {color('✓', Colors.GREEN)} {_label} {color('(key configured)', Colors.DIM)}")
|
|
|
|
|
continue
|
feat: add z.ai/GLM, Kimi/Moonshot, MiniMax as first-class providers
Adds 4 new direct API-key providers (zai, kimi-coding, minimax, minimax-cn)
to the inference provider system. All use standard OpenAI-compatible
chat/completions endpoints with Bearer token auth.
Core changes:
- auth.py: Extended ProviderConfig with api_key_env_vars and base_url_env_var
fields. Added providers to PROVIDER_REGISTRY. Added provider aliases
(glm, z-ai, zhipu, kimi, moonshot). Added auto-detection of API-key
providers in resolve_provider(). Added resolve_api_key_provider_credentials()
and get_api_key_provider_status() helpers.
- runtime_provider.py: Added generic API-key provider branch in
resolve_runtime_provider() — any provider with auth_type='api_key'
is automatically handled.
- main.py: Added providers to hermes model menu with generic
_model_flow_api_key_provider() flow. Updated _has_any_provider_configured()
to check all provider env vars. Updated argparse --provider choices.
- setup.py: Added providers to setup wizard with API key prompts and
curated model lists.
- config.py: Added env vars (GLM_API_KEY, KIMI_API_KEY, MINIMAX_API_KEY,
etc.) to OPTIONAL_ENV_VARS.
- status.py: Added API key display and provider status section.
- doctor.py: Added connectivity checks for each provider endpoint.
- cli.py: Updated provider docstrings.
Docs: Updated README.md, .env.example, cli-config.yaml.example,
cli-commands.md, environment-variables.md, configuration.md.
Tests: 50 new tests covering registry, aliases, resolution, auto-detection,
credential resolution, and runtime provider dispatch.
Inspired by PR #33 (numman-ali) which proposed a provider registry approach.
Credit to tars90percent (PR #473) and manuelschipper (PR #420) for related
provider improvements merged earlier in this changeset.
2026-03-06 18:55:12 -08:00
|
|
|
print(f" Checking {_pname} API...", end="", flush=True)
|
|
|
|
|
try:
|
|
|
|
|
import httpx
|
|
|
|
|
_base = os.getenv(_base_env, "")
|
2026-03-07 20:43:34 -05:00
|
|
|
# Auto-detect Kimi Code keys (sk-kimi-) → api.kimi.com
|
|
|
|
|
if not _base and _key.startswith("sk-kimi-"):
|
|
|
|
|
_base = "https://api.kimi.com/coding/v1"
|
feat: add z.ai/GLM, Kimi/Moonshot, MiniMax as first-class providers
Adds 4 new direct API-key providers (zai, kimi-coding, minimax, minimax-cn)
to the inference provider system. All use standard OpenAI-compatible
chat/completions endpoints with Bearer token auth.
Core changes:
- auth.py: Extended ProviderConfig with api_key_env_vars and base_url_env_var
fields. Added providers to PROVIDER_REGISTRY. Added provider aliases
(glm, z-ai, zhipu, kimi, moonshot). Added auto-detection of API-key
providers in resolve_provider(). Added resolve_api_key_provider_credentials()
and get_api_key_provider_status() helpers.
- runtime_provider.py: Added generic API-key provider branch in
resolve_runtime_provider() — any provider with auth_type='api_key'
is automatically handled.
- main.py: Added providers to hermes model menu with generic
_model_flow_api_key_provider() flow. Updated _has_any_provider_configured()
to check all provider env vars. Updated argparse --provider choices.
- setup.py: Added providers to setup wizard with API key prompts and
curated model lists.
- config.py: Added env vars (GLM_API_KEY, KIMI_API_KEY, MINIMAX_API_KEY,
etc.) to OPTIONAL_ENV_VARS.
- status.py: Added API key display and provider status section.
- doctor.py: Added connectivity checks for each provider endpoint.
- cli.py: Updated provider docstrings.
Docs: Updated README.md, .env.example, cli-config.yaml.example,
cli-commands.md, environment-variables.md, configuration.md.
Tests: 50 new tests covering registry, aliases, resolution, auto-detection,
credential resolution, and runtime provider dispatch.
Inspired by PR #33 (numman-ali) which proposed a provider registry approach.
Credit to tars90percent (PR #473) and manuelschipper (PR #420) for related
provider improvements merged earlier in this changeset.
2026-03-06 18:55:12 -08:00
|
|
|
_url = (_base.rstrip("/") + "/models") if _base else _default_url
|
2026-03-07 20:43:34 -05:00
|
|
|
_headers = {"Authorization": f"Bearer {_key}"}
|
|
|
|
|
if "api.kimi.com" in _url.lower():
|
|
|
|
|
_headers["User-Agent"] = "KimiCLI/1.0"
|
feat: add z.ai/GLM, Kimi/Moonshot, MiniMax as first-class providers
Adds 4 new direct API-key providers (zai, kimi-coding, minimax, minimax-cn)
to the inference provider system. All use standard OpenAI-compatible
chat/completions endpoints with Bearer token auth.
Core changes:
- auth.py: Extended ProviderConfig with api_key_env_vars and base_url_env_var
fields. Added providers to PROVIDER_REGISTRY. Added provider aliases
(glm, z-ai, zhipu, kimi, moonshot). Added auto-detection of API-key
providers in resolve_provider(). Added resolve_api_key_provider_credentials()
and get_api_key_provider_status() helpers.
- runtime_provider.py: Added generic API-key provider branch in
resolve_runtime_provider() — any provider with auth_type='api_key'
is automatically handled.
- main.py: Added providers to hermes model menu with generic
_model_flow_api_key_provider() flow. Updated _has_any_provider_configured()
to check all provider env vars. Updated argparse --provider choices.
- setup.py: Added providers to setup wizard with API key prompts and
curated model lists.
- config.py: Added env vars (GLM_API_KEY, KIMI_API_KEY, MINIMAX_API_KEY,
etc.) to OPTIONAL_ENV_VARS.
- status.py: Added API key display and provider status section.
- doctor.py: Added connectivity checks for each provider endpoint.
- cli.py: Updated provider docstrings.
Docs: Updated README.md, .env.example, cli-config.yaml.example,
cli-commands.md, environment-variables.md, configuration.md.
Tests: 50 new tests covering registry, aliases, resolution, auto-detection,
credential resolution, and runtime provider dispatch.
Inspired by PR #33 (numman-ali) which proposed a provider registry approach.
Credit to tars90percent (PR #473) and manuelschipper (PR #420) for related
provider improvements merged earlier in this changeset.
2026-03-06 18:55:12 -08:00
|
|
|
_resp = httpx.get(
|
|
|
|
|
_url,
|
2026-03-07 20:43:34 -05:00
|
|
|
headers=_headers,
|
feat: add z.ai/GLM, Kimi/Moonshot, MiniMax as first-class providers
Adds 4 new direct API-key providers (zai, kimi-coding, minimax, minimax-cn)
to the inference provider system. All use standard OpenAI-compatible
chat/completions endpoints with Bearer token auth.
Core changes:
- auth.py: Extended ProviderConfig with api_key_env_vars and base_url_env_var
fields. Added providers to PROVIDER_REGISTRY. Added provider aliases
(glm, z-ai, zhipu, kimi, moonshot). Added auto-detection of API-key
providers in resolve_provider(). Added resolve_api_key_provider_credentials()
and get_api_key_provider_status() helpers.
- runtime_provider.py: Added generic API-key provider branch in
resolve_runtime_provider() — any provider with auth_type='api_key'
is automatically handled.
- main.py: Added providers to hermes model menu with generic
_model_flow_api_key_provider() flow. Updated _has_any_provider_configured()
to check all provider env vars. Updated argparse --provider choices.
- setup.py: Added providers to setup wizard with API key prompts and
curated model lists.
- config.py: Added env vars (GLM_API_KEY, KIMI_API_KEY, MINIMAX_API_KEY,
etc.) to OPTIONAL_ENV_VARS.
- status.py: Added API key display and provider status section.
- doctor.py: Added connectivity checks for each provider endpoint.
- cli.py: Updated provider docstrings.
Docs: Updated README.md, .env.example, cli-config.yaml.example,
cli-commands.md, environment-variables.md, configuration.md.
Tests: 50 new tests covering registry, aliases, resolution, auto-detection,
credential resolution, and runtime provider dispatch.
Inspired by PR #33 (numman-ali) which proposed a provider registry approach.
Credit to tars90percent (PR #473) and manuelschipper (PR #420) for related
provider improvements merged earlier in this changeset.
2026-03-06 18:55:12 -08:00
|
|
|
timeout=10,
|
|
|
|
|
)
|
|
|
|
|
if _resp.status_code == 200:
|
|
|
|
|
print(f"\r {color('✓', Colors.GREEN)} {_label} ")
|
|
|
|
|
elif _resp.status_code == 401:
|
|
|
|
|
print(f"\r {color('✗', Colors.RED)} {_label} {color('(invalid API key)', Colors.DIM)} ")
|
|
|
|
|
issues.append(f"Check {_env_vars[0]} in .env")
|
|
|
|
|
else:
|
|
|
|
|
print(f"\r {color('⚠', Colors.YELLOW)} {_label} {color(f'(HTTP {_resp.status_code})', Colors.DIM)} ")
|
|
|
|
|
except Exception as _e:
|
|
|
|
|
print(f"\r {color('⚠', Colors.YELLOW)} {_label} {color(f'({_e})', Colors.DIM)} ")
|
|
|
|
|
|
2026-02-02 19:28:27 -08:00
|
|
|
# =========================================================================
|
2026-02-07 00:05:04 +00:00
|
|
|
# Check: Submodules
|
|
|
|
|
# =========================================================================
|
|
|
|
|
print()
|
|
|
|
|
print(color("◆ Submodules", Colors.CYAN, Colors.BOLD))
|
|
|
|
|
|
|
|
|
|
# tinker-atropos (RL training backend)
|
|
|
|
|
tinker_dir = PROJECT_ROOT / "tinker-atropos"
|
|
|
|
|
if tinker_dir.exists() and (tinker_dir / "pyproject.toml").exists():
|
|
|
|
|
if py_version >= (3, 11):
|
|
|
|
|
try:
|
|
|
|
|
__import__("tinker_atropos")
|
|
|
|
|
check_ok("tinker-atropos", "(RL training backend)")
|
|
|
|
|
except ImportError:
|
2026-02-07 23:54:53 +00:00
|
|
|
check_warn("tinker-atropos found but not installed", "(run: uv pip install -e ./tinker-atropos)")
|
|
|
|
|
issues.append("Install tinker-atropos: uv pip install -e ./tinker-atropos")
|
2026-02-07 00:05:04 +00:00
|
|
|
else:
|
|
|
|
|
check_warn("tinker-atropos requires Python 3.11+", f"(current: {py_version.major}.{py_version.minor})")
|
|
|
|
|
else:
|
|
|
|
|
check_warn("tinker-atropos not found", "(run: git submodule update --init --recursive)")
|
|
|
|
|
|
|
|
|
|
# =========================================================================
|
2026-02-02 19:28:27 -08:00
|
|
|
# Check: Tool Availability
|
|
|
|
|
# =========================================================================
|
|
|
|
|
print()
|
|
|
|
|
print(color("◆ Tool Availability", Colors.CYAN, Colors.BOLD))
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
# Add project root to path for imports
|
|
|
|
|
sys.path.insert(0, str(PROJECT_ROOT))
|
|
|
|
|
from model_tools import check_tool_availability, TOOLSET_REQUIREMENTS
|
|
|
|
|
|
|
|
|
|
available, unavailable = check_tool_availability()
|
2026-03-12 19:34:19 -07:00
|
|
|
available, unavailable = _apply_doctor_tool_availability_overrides(available, unavailable)
|
2026-02-02 19:28:27 -08:00
|
|
|
|
|
|
|
|
for tid in available:
|
|
|
|
|
info = TOOLSET_REQUIREMENTS.get(tid, {})
|
|
|
|
|
check_ok(info.get("name", tid))
|
|
|
|
|
|
|
|
|
|
for item in unavailable:
|
2026-02-26 16:49:14 +11:00
|
|
|
env_vars = item.get("missing_vars") or item.get("env_vars") or []
|
|
|
|
|
if env_vars:
|
|
|
|
|
vars_str = ", ".join(env_vars)
|
2026-02-02 19:28:27 -08:00
|
|
|
check_warn(item["name"], f"(missing {vars_str})")
|
|
|
|
|
else:
|
|
|
|
|
check_warn(item["name"], "(system dependency not met)")
|
2026-02-26 16:49:14 +11:00
|
|
|
|
2026-02-02 19:28:27 -08:00
|
|
|
# Count disabled tools with API key requirements
|
2026-02-26 16:49:14 +11:00
|
|
|
api_disabled = [u for u in unavailable if (u.get("missing_vars") or u.get("env_vars"))]
|
2026-02-02 19:28:27 -08:00
|
|
|
if api_disabled:
|
|
|
|
|
issues.append("Run 'hermes setup' to configure missing API keys for full tool access")
|
|
|
|
|
except Exception as e:
|
|
|
|
|
check_warn("Could not check tool availability", f"({e})")
|
|
|
|
|
|
Add Skills Hub — universal skill search, install, and management from online registries
Implements the Hermes Skills Hub with agentskills.io spec compliance,
multi-registry skill discovery, security scanning, and user-driven
management via CLI and /skills slash command.
Core features:
- Security scanner (tools/skills_guard.py): 120 threat patterns across
12 categories, trust-aware install policy (builtin/trusted/community),
structural checks, unicode injection detection, LLM audit pass
- Hub client (tools/skills_hub.py): GitHub, ClawHub, Claude Code
marketplace, and LobeHub source adapters with shared GitHubAuth
(PAT + gh CLI + GitHub App), lock file provenance tracking, quarantine
flow, and unified search across all sources
- CLI interface (hermes_cli/skills_hub.py): search, install, inspect,
list, audit, uninstall, publish (GitHub PR), snapshot export/import,
and tap management — powers both `hermes skills` and `/skills`
Spec conformance (Phase 0):
- Upgraded frontmatter parser to yaml.safe_load with fallback
- Migrated 39 SKILL.md files: tags/related_skills to metadata.hermes.*
- Added assets/ directory support and compatibility/metadata fields
- Excluded .hub/ from skill discovery in skills_tool.py
Updated 13 config/doc files including README, AGENTS.md, .env.example,
setup wizard, doctor, status, pyproject.toml, and docs.
2026-02-18 16:09:05 -08:00
|
|
|
# =========================================================================
|
|
|
|
|
# Check: Skills Hub
|
|
|
|
|
# =========================================================================
|
|
|
|
|
print()
|
|
|
|
|
print(color("◆ Skills Hub", Colors.CYAN, Colors.BOLD))
|
|
|
|
|
|
2026-02-26 16:49:14 +11:00
|
|
|
hub_dir = HERMES_HOME / "skills" / ".hub"
|
Add Skills Hub — universal skill search, install, and management from online registries
Implements the Hermes Skills Hub with agentskills.io spec compliance,
multi-registry skill discovery, security scanning, and user-driven
management via CLI and /skills slash command.
Core features:
- Security scanner (tools/skills_guard.py): 120 threat patterns across
12 categories, trust-aware install policy (builtin/trusted/community),
structural checks, unicode injection detection, LLM audit pass
- Hub client (tools/skills_hub.py): GitHub, ClawHub, Claude Code
marketplace, and LobeHub source adapters with shared GitHubAuth
(PAT + gh CLI + GitHub App), lock file provenance tracking, quarantine
flow, and unified search across all sources
- CLI interface (hermes_cli/skills_hub.py): search, install, inspect,
list, audit, uninstall, publish (GitHub PR), snapshot export/import,
and tap management — powers both `hermes skills` and `/skills`
Spec conformance (Phase 0):
- Upgraded frontmatter parser to yaml.safe_load with fallback
- Migrated 39 SKILL.md files: tags/related_skills to metadata.hermes.*
- Added assets/ directory support and compatibility/metadata fields
- Excluded .hub/ from skill discovery in skills_tool.py
Updated 13 config/doc files including README, AGENTS.md, .env.example,
setup wizard, doctor, status, pyproject.toml, and docs.
2026-02-18 16:09:05 -08:00
|
|
|
if hub_dir.exists():
|
|
|
|
|
check_ok("Skills Hub directory exists")
|
|
|
|
|
lock_file = hub_dir / "lock.json"
|
|
|
|
|
if lock_file.exists():
|
|
|
|
|
try:
|
|
|
|
|
import json
|
|
|
|
|
lock_data = json.loads(lock_file.read_text())
|
|
|
|
|
count = len(lock_data.get("installed", {}))
|
|
|
|
|
check_ok(f"Lock file OK ({count} hub-installed skill(s))")
|
|
|
|
|
except Exception:
|
|
|
|
|
check_warn("Lock file", "(corrupted or unreadable)")
|
|
|
|
|
quarantine = hub_dir / "quarantine"
|
|
|
|
|
q_count = sum(1 for d in quarantine.iterdir() if d.is_dir()) if quarantine.exists() else 0
|
|
|
|
|
if q_count > 0:
|
|
|
|
|
check_warn(f"{q_count} skill(s) in quarantine", "(pending review)")
|
|
|
|
|
else:
|
|
|
|
|
check_warn("Skills Hub directory not initialized", "(run: hermes skills list)")
|
|
|
|
|
|
2026-02-26 16:49:14 +11:00
|
|
|
from hermes_cli.config import get_env_value
|
|
|
|
|
github_token = get_env_value("GITHUB_TOKEN") or get_env_value("GH_TOKEN")
|
Add Skills Hub — universal skill search, install, and management from online registries
Implements the Hermes Skills Hub with agentskills.io spec compliance,
multi-registry skill discovery, security scanning, and user-driven
management via CLI and /skills slash command.
Core features:
- Security scanner (tools/skills_guard.py): 120 threat patterns across
12 categories, trust-aware install policy (builtin/trusted/community),
structural checks, unicode injection detection, LLM audit pass
- Hub client (tools/skills_hub.py): GitHub, ClawHub, Claude Code
marketplace, and LobeHub source adapters with shared GitHubAuth
(PAT + gh CLI + GitHub App), lock file provenance tracking, quarantine
flow, and unified search across all sources
- CLI interface (hermes_cli/skills_hub.py): search, install, inspect,
list, audit, uninstall, publish (GitHub PR), snapshot export/import,
and tap management — powers both `hermes skills` and `/skills`
Spec conformance (Phase 0):
- Upgraded frontmatter parser to yaml.safe_load with fallback
- Migrated 39 SKILL.md files: tags/related_skills to metadata.hermes.*
- Added assets/ directory support and compatibility/metadata fields
- Excluded .hub/ from skill discovery in skills_tool.py
Updated 13 config/doc files including README, AGENTS.md, .env.example,
setup wizard, doctor, status, pyproject.toml, and docs.
2026-02-18 16:09:05 -08:00
|
|
|
if github_token:
|
|
|
|
|
check_ok("GitHub token configured (authenticated API access)")
|
|
|
|
|
else:
|
2026-03-28 23:47:21 -07:00
|
|
|
check_warn("No GITHUB_TOKEN", f"(60 req/hr rate limit — set in {_DHH}/.env for better rates)")
|
Add Skills Hub — universal skill search, install, and management from online registries
Implements the Hermes Skills Hub with agentskills.io spec compliance,
multi-registry skill discovery, security scanning, and user-driven
management via CLI and /skills slash command.
Core features:
- Security scanner (tools/skills_guard.py): 120 threat patterns across
12 categories, trust-aware install policy (builtin/trusted/community),
structural checks, unicode injection detection, LLM audit pass
- Hub client (tools/skills_hub.py): GitHub, ClawHub, Claude Code
marketplace, and LobeHub source adapters with shared GitHubAuth
(PAT + gh CLI + GitHub App), lock file provenance tracking, quarantine
flow, and unified search across all sources
- CLI interface (hermes_cli/skills_hub.py): search, install, inspect,
list, audit, uninstall, publish (GitHub PR), snapshot export/import,
and tap management — powers both `hermes skills` and `/skills`
Spec conformance (Phase 0):
- Upgraded frontmatter parser to yaml.safe_load with fallback
- Migrated 39 SKILL.md files: tags/related_skills to metadata.hermes.*
- Added assets/ directory support and compatibility/metadata fields
- Excluded .hub/ from skill discovery in skills_tool.py
Updated 13 config/doc files including README, AGENTS.md, .env.example,
setup wizard, doctor, status, pyproject.toml, and docs.
2026-02-18 16:09:05 -08:00
|
|
|
|
feat(honcho): async memory integration with prefetch pipeline and recallMode
Adds full Honcho memory integration to Hermes:
- Session manager with async background writes, memory modes (honcho/hybrid/local),
and dialectic prefetch for first-turn context warming
- Agent integration: prefetch pipeline, tool surface gated by recallMode,
system prompt context injection, SIGTERM/SIGINT flush handlers
- CLI commands: setup, status, mode, tokens, peer, identity, migrate
- recallMode setting (auto | context | tools) for A/B testing retrieval strategies
- Session strategies: per-session, per-repo (git tree root), per-directory, global
- Polymorphic memoryMode config: string shorthand or per-peer object overrides
- 97 tests covering async writes, client config, session resolution, and memory modes
2026-03-09 15:58:22 -04:00
|
|
|
# =========================================================================
|
|
|
|
|
# Honcho memory
|
|
|
|
|
# =========================================================================
|
|
|
|
|
print()
|
|
|
|
|
print(color("◆ Honcho Memory", Colors.CYAN, Colors.BOLD))
|
|
|
|
|
|
|
|
|
|
try:
|
2026-03-21 09:34:00 -07:00
|
|
|
from honcho_integration.client import HonchoClientConfig, resolve_config_path
|
feat(honcho): async memory integration with prefetch pipeline and recallMode
Adds full Honcho memory integration to Hermes:
- Session manager with async background writes, memory modes (honcho/hybrid/local),
and dialectic prefetch for first-turn context warming
- Agent integration: prefetch pipeline, tool surface gated by recallMode,
system prompt context injection, SIGTERM/SIGINT flush handlers
- CLI commands: setup, status, mode, tokens, peer, identity, migrate
- recallMode setting (auto | context | tools) for A/B testing retrieval strategies
- Session strategies: per-session, per-repo (git tree root), per-directory, global
- Polymorphic memoryMode config: string shorthand or per-peer object overrides
- 97 tests covering async writes, client config, session resolution, and memory modes
2026-03-09 15:58:22 -04:00
|
|
|
hcfg = HonchoClientConfig.from_global_config()
|
2026-03-21 09:34:00 -07:00
|
|
|
_honcho_cfg_path = resolve_config_path()
|
feat(honcho): async memory integration with prefetch pipeline and recallMode
Adds full Honcho memory integration to Hermes:
- Session manager with async background writes, memory modes (honcho/hybrid/local),
and dialectic prefetch for first-turn context warming
- Agent integration: prefetch pipeline, tool surface gated by recallMode,
system prompt context injection, SIGTERM/SIGINT flush handlers
- CLI commands: setup, status, mode, tokens, peer, identity, migrate
- recallMode setting (auto | context | tools) for A/B testing retrieval strategies
- Session strategies: per-session, per-repo (git tree root), per-directory, global
- Polymorphic memoryMode config: string shorthand or per-peer object overrides
- 97 tests covering async writes, client config, session resolution, and memory modes
2026-03-09 15:58:22 -04:00
|
|
|
|
2026-03-21 09:34:00 -07:00
|
|
|
if not _honcho_cfg_path.exists():
|
chore: fix 154 f-strings, simplify getattr/URL patterns, remove dead code (#3119)
Three categories of cleanup, all zero-behavioral-change:
1. F-strings without placeholders (154 fixes across 29 files)
- Converted f'...' to '...' where no {expression} was present
- Heaviest files: run_agent.py (24), cli.py (20), honcho_integration/cli.py (34)
2. Simplify defensive patterns in run_agent.py
- Added explicit self._is_anthropic_oauth = False in __init__ (before
the api_mode branch that conditionally sets it)
- Replaced 7x getattr(self, '_is_anthropic_oauth', False) with direct
self._is_anthropic_oauth (attribute always initialized now)
- Added _is_openrouter_url() and _is_anthropic_url() helper methods
- Replaced 3 inline 'openrouter' in self._base_url_lower checks
3. Remove dead code in small files
- hermes_cli/claw.py: removed unused 'total' computation
- tools/fuzzy_match.py: removed unused strip_indent() function and
pattern_stripped variable
Full test suite: 6184 passed, 0 failures
E2E PTY: banner clean, tool calls work, zero garbled ANSI
2026-03-25 19:47:58 -07:00
|
|
|
check_warn("Honcho config not found", "run: hermes honcho setup")
|
feat(honcho): async memory integration with prefetch pipeline and recallMode
Adds full Honcho memory integration to Hermes:
- Session manager with async background writes, memory modes (honcho/hybrid/local),
and dialectic prefetch for first-turn context warming
- Agent integration: prefetch pipeline, tool surface gated by recallMode,
system prompt context injection, SIGTERM/SIGINT flush handlers
- CLI commands: setup, status, mode, tokens, peer, identity, migrate
- recallMode setting (auto | context | tools) for A/B testing retrieval strategies
- Session strategies: per-session, per-repo (git tree root), per-directory, global
- Polymorphic memoryMode config: string shorthand or per-peer object overrides
- 97 tests covering async writes, client config, session resolution, and memory modes
2026-03-09 15:58:22 -04:00
|
|
|
elif not hcfg.enabled:
|
2026-03-21 09:34:00 -07:00
|
|
|
check_info(f"Honcho disabled (set enabled: true in {_honcho_cfg_path} to activate)")
|
2026-03-28 17:49:56 -07:00
|
|
|
elif not (hcfg.api_key or hcfg.base_url):
|
|
|
|
|
check_fail("Honcho API key or base URL not set", "run: hermes honcho setup")
|
feat(honcho): async memory integration with prefetch pipeline and recallMode
Adds full Honcho memory integration to Hermes:
- Session manager with async background writes, memory modes (honcho/hybrid/local),
and dialectic prefetch for first-turn context warming
- Agent integration: prefetch pipeline, tool surface gated by recallMode,
system prompt context injection, SIGTERM/SIGINT flush handlers
- CLI commands: setup, status, mode, tokens, peer, identity, migrate
- recallMode setting (auto | context | tools) for A/B testing retrieval strategies
- Session strategies: per-session, per-repo (git tree root), per-directory, global
- Polymorphic memoryMode config: string shorthand or per-peer object overrides
- 97 tests covering async writes, client config, session resolution, and memory modes
2026-03-09 15:58:22 -04:00
|
|
|
issues.append("No Honcho API key — run 'hermes honcho setup'")
|
|
|
|
|
else:
|
|
|
|
|
from honcho_integration.client import get_honcho_client, reset_honcho_client
|
|
|
|
|
reset_honcho_client()
|
|
|
|
|
try:
|
|
|
|
|
get_honcho_client(hcfg)
|
|
|
|
|
check_ok(
|
|
|
|
|
"Honcho connected",
|
|
|
|
|
f"workspace={hcfg.workspace_id} mode={hcfg.memory_mode} freq={hcfg.write_frequency}",
|
|
|
|
|
)
|
|
|
|
|
except Exception as _e:
|
|
|
|
|
check_fail("Honcho connection failed", str(_e))
|
|
|
|
|
issues.append(f"Honcho unreachable: {_e}")
|
|
|
|
|
except ImportError:
|
|
|
|
|
check_warn("honcho-ai not installed", "pip install honcho-ai")
|
|
|
|
|
except Exception as _e:
|
|
|
|
|
check_warn("Honcho check failed", str(_e))
|
|
|
|
|
|
feat: add profiles — run multiple isolated Hermes instances (#3681)
Each profile is a fully independent HERMES_HOME with its own config,
API keys, memory, sessions, skills, gateway, cron, and state.db.
Core module: hermes_cli/profiles.py (~900 lines)
- Profile CRUD: create, delete, list, show, rename
- Three clone levels: blank, --clone (config), --clone-all (everything)
- Export/import: tar.gz archive for backup and migration
- Wrapper alias scripts (~/.local/bin/<name>)
- Collision detection for alias names
- Sticky default via ~/.hermes/active_profile
- Skill seeding via subprocess (handles module-level caching)
- Auto-stop gateway on delete with disable-before-stop for services
- Tab completion generation for bash and zsh
CLI integration (hermes_cli/main.py):
- _apply_profile_override(): pre-import -p/--profile flag + sticky default
- Full 'hermes profile' subcommand: list, use, create, delete, show,
alias, rename, export, import
- 'hermes completion bash/zsh' command
- Multi-profile skill sync in hermes update
Display (cli.py, banner.py, gateway/run.py):
- CLI prompt: 'coder ❯' when using a non-default profile
- Banner shows profile name
- Gateway startup log includes profile name
Gateway safety:
- Token locks: Discord, Slack, WhatsApp, Signal (extends Telegram pattern)
- Port conflict detection: API server, webhook adapter
Diagnostics (hermes_cli/doctor.py):
- Profile health section: lists profiles, checks config, .env, aliases
- Orphan alias detection: warns when wrapper points to deleted profile
Tests (tests/hermes_cli/test_profiles.py):
- 71 automated tests covering: validation, CRUD, clone levels, rename,
export/import, active profile, isolation, alias collision, completion
- Full suite: 6760 passed, 0 new failures
Documentation:
- website/docs/user-guide/profiles.md: full user guide (12 sections)
- website/docs/reference/profile-commands.md: command reference (12 commands)
- website/docs/reference/faq.md: 6 profile FAQ entries
- website/sidebars.ts: navigation updated
2026-03-29 10:41:20 -07:00
|
|
|
# =========================================================================
|
|
|
|
|
# Profiles
|
|
|
|
|
# =========================================================================
|
|
|
|
|
try:
|
|
|
|
|
from hermes_cli.profiles import list_profiles, _get_wrapper_dir, profile_exists
|
|
|
|
|
import re as _re
|
|
|
|
|
|
|
|
|
|
named_profiles = [p for p in list_profiles() if not p.is_default]
|
|
|
|
|
if named_profiles:
|
|
|
|
|
print()
|
|
|
|
|
print(color("◆ Profiles", Colors.CYAN, Colors.BOLD))
|
|
|
|
|
check_ok(f"{len(named_profiles)} profile(s) found")
|
|
|
|
|
wrapper_dir = _get_wrapper_dir()
|
|
|
|
|
for p in named_profiles:
|
|
|
|
|
parts = []
|
|
|
|
|
if p.gateway_running:
|
|
|
|
|
parts.append("gateway running")
|
|
|
|
|
if p.model:
|
|
|
|
|
parts.append(p.model[:30])
|
|
|
|
|
if not (p.path / "config.yaml").exists():
|
|
|
|
|
parts.append("⚠ missing config")
|
|
|
|
|
if not (p.path / ".env").exists():
|
|
|
|
|
parts.append("no .env")
|
|
|
|
|
wrapper = wrapper_dir / p.name
|
|
|
|
|
if not wrapper.exists():
|
|
|
|
|
parts.append("no alias")
|
|
|
|
|
status = ", ".join(parts) if parts else "configured"
|
|
|
|
|
check_ok(f" {p.name}: {status}")
|
|
|
|
|
|
|
|
|
|
# Check for orphan wrappers
|
|
|
|
|
if wrapper_dir.is_dir():
|
|
|
|
|
for wrapper in wrapper_dir.iterdir():
|
|
|
|
|
if not wrapper.is_file():
|
|
|
|
|
continue
|
|
|
|
|
try:
|
|
|
|
|
content = wrapper.read_text()
|
|
|
|
|
if "hermes -p" in content:
|
|
|
|
|
_m = _re.search(r"hermes -p (\S+)", content)
|
|
|
|
|
if _m and not profile_exists(_m.group(1)):
|
|
|
|
|
check_warn(f"Orphan alias: {wrapper.name} → profile '{_m.group(1)}' no longer exists")
|
|
|
|
|
except Exception:
|
|
|
|
|
pass
|
|
|
|
|
except ImportError:
|
|
|
|
|
pass
|
|
|
|
|
except Exception as _e:
|
|
|
|
|
logger.debug("Profile health check failed: %s", _e)
|
|
|
|
|
|
2026-02-02 19:01:51 -08:00
|
|
|
# =========================================================================
|
|
|
|
|
# Summary
|
|
|
|
|
# =========================================================================
|
|
|
|
|
print()
|
2026-02-22 02:16:11 -08:00
|
|
|
remaining_issues = issues + manual_issues
|
|
|
|
|
if should_fix and fixed_count > 0:
|
|
|
|
|
print(color("─" * 60, Colors.GREEN))
|
|
|
|
|
print(color(f" Fixed {fixed_count} issue(s).", Colors.GREEN, Colors.BOLD), end="")
|
|
|
|
|
if remaining_issues:
|
|
|
|
|
print(color(f" {len(remaining_issues)} issue(s) require manual intervention.", Colors.YELLOW, Colors.BOLD))
|
|
|
|
|
else:
|
|
|
|
|
print()
|
|
|
|
|
print()
|
|
|
|
|
if remaining_issues:
|
|
|
|
|
for i, issue in enumerate(remaining_issues, 1):
|
|
|
|
|
print(f" {i}. {issue}")
|
|
|
|
|
print()
|
|
|
|
|
elif remaining_issues:
|
2026-02-02 19:01:51 -08:00
|
|
|
print(color("─" * 60, Colors.YELLOW))
|
2026-02-22 02:16:11 -08:00
|
|
|
print(color(f" Found {len(remaining_issues)} issue(s) to address:", Colors.YELLOW, Colors.BOLD))
|
2026-02-02 19:01:51 -08:00
|
|
|
print()
|
2026-02-22 02:16:11 -08:00
|
|
|
for i, issue in enumerate(remaining_issues, 1):
|
2026-02-02 19:01:51 -08:00
|
|
|
print(f" {i}. {issue}")
|
|
|
|
|
print()
|
2026-02-22 02:16:11 -08:00
|
|
|
if not should_fix:
|
|
|
|
|
print(color(" Tip: run 'hermes doctor --fix' to auto-fix what's possible.", Colors.DIM))
|
2026-02-02 19:01:51 -08:00
|
|
|
else:
|
|
|
|
|
print(color("─" * 60, Colors.GREEN))
|
|
|
|
|
print(color(" All checks passed! 🎉", Colors.GREEN, Colors.BOLD))
|
|
|
|
|
|
|
|
|
|
print()
|