diff --git a/cli-config.yaml.example b/cli-config.yaml.example index acdc4ff2d..036efbc33 100644 --- a/cli-config.yaml.example +++ b/cli-config.yaml.example @@ -7,6 +7,7 @@ # ============================================================================= model: # Default model to use (can be overridden with --model flag) + # Both "default" and "model" work as the key name here. default: "anthropic/claude-opus-4.6" # Inference provider selection: diff --git a/cli.py b/cli.py index 82abba2e6..8aaf1378b 100644 --- a/cli.py +++ b/cli.py @@ -1078,7 +1078,7 @@ class HermesCLI: # authoritative. This avoids conflicts in multi-agent setups where # env vars would stomp each other. _model_config = CLI_CONFIG.get("model", {}) - _config_model = _model_config.get("default", "") if isinstance(_model_config, dict) else (_model_config or "") + _config_model = (_model_config.get("default") or _model_config.get("model") or "") if isinstance(_model_config, dict) else (_model_config or "") _FALLBACK_MODEL = "anthropic/claude-opus-4.6" self.model = model or _config_model or _FALLBACK_MODEL # Auto-detect model from local server if still on fallback diff --git a/gateway/run.py b/gateway/run.py index bd02d1e50..1cebf9760 100644 --- a/gateway/run.py +++ b/gateway/run.py @@ -288,7 +288,7 @@ def _resolve_gateway_model(config: dict | None = None) -> str: if isinstance(model_cfg, str): model = model_cfg elif isinstance(model_cfg, dict): - model = model_cfg.get("default", model) + model = model_cfg.get("default") or model_cfg.get("model") or model return model @@ -2093,7 +2093,7 @@ class GatewayRunner: if isinstance(_model_cfg, str): _hyg_model = _model_cfg elif isinstance(_model_cfg, dict): - _hyg_model = _model_cfg.get("default", _hyg_model) + _hyg_model = _model_cfg.get("default") or _model_cfg.get("model") or _hyg_model # Read explicit context_length override from model config # (same as run_agent.py lines 995-1005) _raw_ctx = _model_cfg.get("context_length") diff --git a/hermes_cli/runtime_provider.py b/hermes_cli/runtime_provider.py index 046e7d6d4..0c82805d5 100644 --- a/hermes_cli/runtime_provider.py +++ b/hermes_cli/runtime_provider.py @@ -63,6 +63,9 @@ def _get_model_config() -> Dict[str, Any]: model_cfg = config.get("model") if isinstance(model_cfg, dict): cfg = dict(model_cfg) + # Accept "model" as alias for "default" (users intuitively write model.model) + if not cfg.get("default") and cfg.get("model"): + cfg["default"] = cfg["model"] default = (cfg.get("default") or "").strip() base_url = (cfg.get("base_url") or "").strip() is_local = "localhost" in base_url or "127.0.0.1" in base_url diff --git a/website/docs/user-guide/configuration.md b/website/docs/user-guide/configuration.md index 9c5f5d179..3ebe0f268 100644 --- a/website/docs/user-guide/configuration.md +++ b/website/docs/user-guide/configuration.md @@ -95,6 +95,10 @@ You need at least one way to connect to an LLM. Use `hermes model` to switch pro | **Hugging Face** | `HF_TOKEN` in `~/.hermes/.env` (provider: `huggingface`, aliases: `hf`) | | **Custom Endpoint** | `hermes model` (saved in `config.yaml`) or `OPENAI_BASE_URL` + `OPENAI_API_KEY` in `~/.hermes/.env` | +:::tip Model key alias +In the `model:` config section, you can use either `default:` or `model:` as the key name for your model ID. Both `model: { default: my-model }` and `model: { model: my-model }` work identically. +::: + :::info Codex Note The OpenAI Codex provider authenticates via device code (open a URL, enter a code). Hermes stores the resulting credentials in its own auth store under `~/.hermes/auth.json` and can import existing Codex CLI credentials from `~/.codex/auth.json` when present. No Codex CLI installation is required. :::