fix: first-run guard stuck in loop when provider configured via config.yaml (#4298)
The _has_any_provider_configured() guard only checked env vars, .env file, and auth.json — missing config.yaml model.provider/base_url/api_key entirely. Users who configured a provider through setup (saving to config.yaml) but had empty API key placeholders in .env from the install template were permanently blocked by the 'not configured' message. Changes: - _has_any_provider_configured() now checks config.yaml model section for explicit provider, base_url, or api_key — covers custom endpoints and providers that store credentials in config rather than env vars - .env.example: comment out all empty API key placeholders so they don't pollute the environment when copied to .env by the installer - .env.example: mark LLM_MODEL as deprecated (config.yaml is source of truth) - 4 new tests for the config.yaml detection path Reported by OkadoOP on Discord.
This commit is contained in:
43
.env.example
43
.env.example
@@ -7,18 +7,19 @@
|
||||
# OpenRouter provides access to many models through one API
|
||||
# All LLM calls go through OpenRouter - no direct provider keys needed
|
||||
# Get your key at: https://openrouter.ai/keys
|
||||
OPENROUTER_API_KEY=
|
||||
# OPENROUTER_API_KEY=
|
||||
|
||||
# Default model to use (OpenRouter format: provider/model)
|
||||
# Examples: anthropic/claude-opus-4.6, openai/gpt-4o, google/gemini-3-flash-preview, zhipuai/glm-4-plus
|
||||
LLM_MODEL=anthropic/claude-opus-4.6
|
||||
# Default model is configured in ~/.hermes/config.yaml (model.default).
|
||||
# Use 'hermes model' or 'hermes setup' to change it.
|
||||
# LLM_MODEL is no longer read from .env — this line is kept for reference only.
|
||||
# LLM_MODEL=anthropic/claude-opus-4.6
|
||||
|
||||
# =============================================================================
|
||||
# LLM PROVIDER (z.ai / GLM)
|
||||
# =============================================================================
|
||||
# z.ai provides access to ZhipuAI GLM models (GLM-4-Plus, etc.)
|
||||
# Get your key at: https://z.ai or https://open.bigmodel.cn
|
||||
GLM_API_KEY=
|
||||
# GLM_API_KEY=
|
||||
# GLM_BASE_URL=https://api.z.ai/api/paas/v4 # Override default base URL
|
||||
|
||||
# =============================================================================
|
||||
@@ -28,7 +29,7 @@ GLM_API_KEY=
|
||||
# Get your key at: https://platform.kimi.ai (Kimi Code console)
|
||||
# Keys prefixed sk-kimi- use the Kimi Code API (api.kimi.com) by default.
|
||||
# Legacy keys from platform.moonshot.ai need KIMI_BASE_URL override below.
|
||||
KIMI_API_KEY=
|
||||
# KIMI_API_KEY=
|
||||
# KIMI_BASE_URL=https://api.kimi.com/coding/v1 # Default for sk-kimi- keys
|
||||
# KIMI_BASE_URL=https://api.moonshot.ai/v1 # For legacy Moonshot keys
|
||||
# KIMI_BASE_URL=https://api.moonshot.cn/v1 # For Moonshot China keys
|
||||
@@ -38,11 +39,11 @@ KIMI_API_KEY=
|
||||
# =============================================================================
|
||||
# MiniMax provides access to MiniMax models (global endpoint)
|
||||
# Get your key at: https://www.minimax.io
|
||||
MINIMAX_API_KEY=
|
||||
# MINIMAX_API_KEY=
|
||||
# MINIMAX_BASE_URL=https://api.minimax.io/v1 # Override default base URL
|
||||
|
||||
# MiniMax China endpoint (for users in mainland China)
|
||||
MINIMAX_CN_API_KEY=
|
||||
# MINIMAX_CN_API_KEY=
|
||||
# MINIMAX_CN_BASE_URL=https://api.minimaxi.com/v1 # Override default base URL
|
||||
|
||||
# =============================================================================
|
||||
@@ -50,7 +51,7 @@ MINIMAX_CN_API_KEY=
|
||||
# =============================================================================
|
||||
# OpenCode Zen provides curated, tested models (GPT, Claude, Gemini, MiniMax, GLM, Kimi)
|
||||
# Pay-as-you-go pricing. Get your key at: https://opencode.ai/auth
|
||||
OPENCODE_ZEN_API_KEY=
|
||||
# OPENCODE_ZEN_API_KEY=
|
||||
# OPENCODE_ZEN_BASE_URL=https://opencode.ai/zen/v1 # Override default base URL
|
||||
|
||||
# =============================================================================
|
||||
@@ -58,7 +59,7 @@ OPENCODE_ZEN_API_KEY=
|
||||
# =============================================================================
|
||||
# OpenCode Go provides access to open models (GLM-5, Kimi K2.5, MiniMax M2.5)
|
||||
# $10/month subscription. Get your key at: https://opencode.ai/auth
|
||||
OPENCODE_GO_API_KEY=
|
||||
# OPENCODE_GO_API_KEY=
|
||||
|
||||
# =============================================================================
|
||||
# LLM PROVIDER (Hugging Face Inference Providers)
|
||||
@@ -67,7 +68,7 @@ OPENCODE_GO_API_KEY=
|
||||
# Free tier included ($0.10/month), no markup on provider rates.
|
||||
# Get your token at: https://huggingface.co/settings/tokens
|
||||
# Required permission: "Make calls to Inference Providers"
|
||||
HF_TOKEN=
|
||||
# HF_TOKEN=
|
||||
# OPENCODE_GO_BASE_URL=https://opencode.ai/zen/go/v1 # Override default base URL
|
||||
|
||||
# =============================================================================
|
||||
@@ -76,26 +77,26 @@ HF_TOKEN=
|
||||
|
||||
# Exa API Key - AI-native web search and contents
|
||||
# Get at: https://exa.ai
|
||||
EXA_API_KEY=
|
||||
# EXA_API_KEY=
|
||||
|
||||
# Parallel API Key - AI-native web search and extract
|
||||
# Get at: https://parallel.ai
|
||||
PARALLEL_API_KEY=
|
||||
# PARALLEL_API_KEY=
|
||||
|
||||
# Firecrawl API Key - Web search, extract, and crawl
|
||||
# Get at: https://firecrawl.dev/
|
||||
FIRECRAWL_API_KEY=
|
||||
# FIRECRAWL_API_KEY=
|
||||
|
||||
|
||||
# FAL.ai API Key - Image generation
|
||||
# Get at: https://fal.ai/
|
||||
FAL_KEY=
|
||||
# FAL_KEY=
|
||||
|
||||
# Honcho - Cross-session AI-native user modeling (optional)
|
||||
# Builds a persistent understanding of the user across sessions and tools.
|
||||
# Get at: https://app.honcho.dev
|
||||
# Also requires ~/.honcho/config.json with enabled=true (see README).
|
||||
HONCHO_API_KEY=
|
||||
# HONCHO_API_KEY=
|
||||
|
||||
# =============================================================================
|
||||
# TERMINAL TOOL CONFIGURATION
|
||||
@@ -181,10 +182,10 @@ TERMINAL_LIFETIME_SECONDS=300
|
||||
|
||||
# Browserbase API Key - Cloud browser execution
|
||||
# Get at: https://browserbase.com/
|
||||
BROWSERBASE_API_KEY=
|
||||
# BROWSERBASE_API_KEY=
|
||||
|
||||
# Browserbase Project ID - From your Browserbase dashboard
|
||||
BROWSERBASE_PROJECT_ID=
|
||||
# BROWSERBASE_PROJECT_ID=
|
||||
|
||||
# Enable residential proxies for better CAPTCHA solving (default: true)
|
||||
# Routes traffic through residential IPs, significantly improves success rate
|
||||
@@ -216,7 +217,7 @@ BROWSER_INACTIVITY_TIMEOUT=120
|
||||
# Uses OpenAI's API directly (not via OpenRouter).
|
||||
# Named VOICE_TOOLS_OPENAI_KEY to avoid interference with OpenRouter.
|
||||
# Get at: https://platform.openai.com/api-keys
|
||||
VOICE_TOOLS_OPENAI_KEY=
|
||||
# VOICE_TOOLS_OPENAI_KEY=
|
||||
|
||||
# =============================================================================
|
||||
# SLACK INTEGRATION
|
||||
@@ -302,11 +303,11 @@ IMAGE_TOOLS_DEBUG=false
|
||||
|
||||
# Tinker API Key - RL training service
|
||||
# Get at: https://tinker-console.thinkingmachines.ai/keys
|
||||
TINKER_API_KEY=
|
||||
# TINKER_API_KEY=
|
||||
|
||||
# Weights & Biases API Key - Experiment tracking and metrics
|
||||
# Get at: https://wandb.ai/authorize
|
||||
WANDB_API_KEY=
|
||||
# WANDB_API_KEY=
|
||||
|
||||
# RL API Server URL (default: http://localhost:8080)
|
||||
# Change if running the rl-server on a different host/port
|
||||
|
||||
@@ -246,6 +246,17 @@ def _has_any_provider_configured() -> bool:
|
||||
pass
|
||||
|
||||
|
||||
# Check config.yaml — if model is a dict with an explicit provider set,
|
||||
# the user has gone through setup (fresh installs have model as a plain
|
||||
# string). Also covers custom endpoints that store api_key/base_url in
|
||||
# config rather than .env.
|
||||
if isinstance(model_cfg, dict):
|
||||
cfg_provider = (model_cfg.get("provider") or "").strip()
|
||||
cfg_base_url = (model_cfg.get("base_url") or "").strip()
|
||||
cfg_api_key = (model_cfg.get("api_key") or "").strip()
|
||||
if cfg_provider or cfg_base_url or cfg_api_key:
|
||||
return True
|
||||
|
||||
# Check for Claude Code OAuth credentials (~/.claude/.credentials.json)
|
||||
# Only count these if Hermes has been explicitly configured — Claude Code
|
||||
# being installed doesn't mean the user wants Hermes to use their tokens.
|
||||
|
||||
@@ -645,6 +645,83 @@ class TestHasAnyProviderConfigured:
|
||||
from hermes_cli.main import _has_any_provider_configured
|
||||
assert _has_any_provider_configured() is False
|
||||
|
||||
def test_config_provider_counts(self, monkeypatch, tmp_path):
|
||||
"""config.yaml with model.provider set should count as configured."""
|
||||
import yaml
|
||||
from hermes_cli import config as config_module
|
||||
hermes_home = tmp_path / ".hermes"
|
||||
hermes_home.mkdir()
|
||||
config_file = hermes_home / "config.yaml"
|
||||
config_file.write_text(yaml.dump({
|
||||
"model": {"default": "anthropic/claude-opus-4.6", "provider": "openrouter"},
|
||||
}))
|
||||
monkeypatch.setattr(config_module, "get_env_path", lambda: hermes_home / ".env")
|
||||
monkeypatch.setattr(config_module, "get_hermes_home", lambda: hermes_home)
|
||||
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
|
||||
# Clear all provider env vars
|
||||
for var in ("OPENROUTER_API_KEY", "OPENAI_API_KEY", "ANTHROPIC_API_KEY",
|
||||
"ANTHROPIC_TOKEN", "OPENAI_BASE_URL"):
|
||||
monkeypatch.delenv(var, raising=False)
|
||||
from hermes_cli.main import _has_any_provider_configured
|
||||
assert _has_any_provider_configured() is True
|
||||
|
||||
def test_config_base_url_counts(self, monkeypatch, tmp_path):
|
||||
"""config.yaml with model.base_url set (custom endpoint) should count."""
|
||||
import yaml
|
||||
from hermes_cli import config as config_module
|
||||
hermes_home = tmp_path / ".hermes"
|
||||
hermes_home.mkdir()
|
||||
config_file = hermes_home / "config.yaml"
|
||||
config_file.write_text(yaml.dump({
|
||||
"model": {"default": "my-model", "base_url": "http://localhost:11434/v1"},
|
||||
}))
|
||||
monkeypatch.setattr(config_module, "get_env_path", lambda: hermes_home / ".env")
|
||||
monkeypatch.setattr(config_module, "get_hermes_home", lambda: hermes_home)
|
||||
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
|
||||
for var in ("OPENROUTER_API_KEY", "OPENAI_API_KEY", "ANTHROPIC_API_KEY",
|
||||
"ANTHROPIC_TOKEN", "OPENAI_BASE_URL"):
|
||||
monkeypatch.delenv(var, raising=False)
|
||||
from hermes_cli.main import _has_any_provider_configured
|
||||
assert _has_any_provider_configured() is True
|
||||
|
||||
def test_config_api_key_counts(self, monkeypatch, tmp_path):
|
||||
"""config.yaml with model.api_key set should count."""
|
||||
import yaml
|
||||
from hermes_cli import config as config_module
|
||||
hermes_home = tmp_path / ".hermes"
|
||||
hermes_home.mkdir()
|
||||
config_file = hermes_home / "config.yaml"
|
||||
config_file.write_text(yaml.dump({
|
||||
"model": {"default": "my-model", "api_key": "sk-test-key"},
|
||||
}))
|
||||
monkeypatch.setattr(config_module, "get_env_path", lambda: hermes_home / ".env")
|
||||
monkeypatch.setattr(config_module, "get_hermes_home", lambda: hermes_home)
|
||||
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
|
||||
for var in ("OPENROUTER_API_KEY", "OPENAI_API_KEY", "ANTHROPIC_API_KEY",
|
||||
"ANTHROPIC_TOKEN", "OPENAI_BASE_URL"):
|
||||
monkeypatch.delenv(var, raising=False)
|
||||
from hermes_cli.main import _has_any_provider_configured
|
||||
assert _has_any_provider_configured() is True
|
||||
|
||||
def test_config_dict_no_provider_no_creds_still_false(self, monkeypatch, tmp_path):
|
||||
"""config.yaml model dict with only 'default' key and no creds stays false."""
|
||||
import yaml
|
||||
from hermes_cli import config as config_module
|
||||
hermes_home = tmp_path / ".hermes"
|
||||
hermes_home.mkdir()
|
||||
config_file = hermes_home / "config.yaml"
|
||||
config_file.write_text(yaml.dump({
|
||||
"model": {"default": "anthropic/claude-opus-4.6"},
|
||||
}))
|
||||
monkeypatch.setattr(config_module, "get_env_path", lambda: hermes_home / ".env")
|
||||
monkeypatch.setattr(config_module, "get_hermes_home", lambda: hermes_home)
|
||||
monkeypatch.setenv("HERMES_HOME", str(hermes_home))
|
||||
for var in ("OPENROUTER_API_KEY", "OPENAI_API_KEY", "ANTHROPIC_API_KEY",
|
||||
"ANTHROPIC_TOKEN", "OPENAI_BASE_URL"):
|
||||
monkeypatch.delenv(var, raising=False)
|
||||
from hermes_cli.main import _has_any_provider_configured
|
||||
assert _has_any_provider_configured() is False
|
||||
|
||||
def test_claude_code_creds_counted_when_hermes_configured(self, monkeypatch, tmp_path):
|
||||
"""Claude Code credentials should count when Hermes has been explicitly configured."""
|
||||
import yaml
|
||||
|
||||
Reference in New Issue
Block a user