Compare commits

..

2 Commits

Author SHA1 Message Date
Alexander Whitestone
e852ec3533 feat: add ollama as first-class provider for local model inference (#169)
Some checks failed
Forge CI / smoke-and-build (pull_request) Failing after 2s
Add 'ollama' as a recognized inference provider so local models (Gemma4,
Hermes3, Hermes4) can run through the agent harness without falling back
to OpenRouter.

Changes:
- hermes_cli/auth.py: Add ollama to PROVIDER_REGISTRY with
  base_url=http://localhost:11434/v1, dummy API key fallback (ollama
  needs no auth), remove 'ollama' -> 'custom' alias
- hermes_cli/main.py: Add 'ollama' to --provider choices
- hermes_cli/models.py: Add ollama model catalog (gemma4, hermes3,
  hermes4, llama3.1, qwen2.5-coder, etc.), label, and provider order
- hermes_cli/providers.py: Add HermesOverlay for ollama, remove
  'ollama' -> 'ollama-cloud' alias

Usage:
  hermes chat -m gemma4 --provider ollama
  hermes --profile gemma4-local chat -q 'hello'

Ollama exposes an OpenAI-compatible API at localhost:11434/v1.
No API key required (dummy 'ollama' token used for credential checks).
Override with OLLAMA_BASE_URL or OLLAMA_API_KEY env vars.

Closes #169
2026-04-07 02:21:34 -04:00
3fc47a0e2e [claw-code] [CONFIG] Add Kimi model to fallback chain for Allegro and Bezalel (#151) (#177)
Some checks failed
Forge CI / smoke-and-build (push) Failing after 2s
Co-authored-by: claw-code <claw-code@timmy.local>
Co-committed-by: claw-code <claw-code@timmy.local>
2026-04-07 04:14:19 +00:00
4 changed files with 559 additions and 3 deletions

View File

@@ -220,6 +220,14 @@ PROVIDER_REGISTRY: Dict[str, ProviderConfig] = {
api_key_env_vars=("HF_TOKEN",),
base_url_env_var="HF_BASE_URL",
),
"ollama": ProviderConfig(
id="ollama",
name="Ollama (Local)",
auth_type="api_key",
inference_base_url="http://localhost:11434/v1",
api_key_env_vars=("OLLAMA_API_KEY",),
base_url_env_var="OLLAMA_BASE_URL",
),
}
@@ -330,6 +338,15 @@ def _resolve_api_key_provider_secret(
pass
return "", ""
# Ollama does not require an API key — check env vars first,
# then fall back to a dummy token so credential checks pass.
if provider_id == "ollama":
for env_var in pconfig.api_key_env_vars:
val = os.getenv(env_var, "").strip()
if has_usable_secret(val):
return val, env_var
return "ollama", "default"
for env_var in pconfig.api_key_env_vars:
val = os.getenv(env_var, "").strip()
if has_usable_secret(val):
@@ -698,7 +715,7 @@ def resolve_provider(
"kilo": "kilocode", "kilo-code": "kilocode", "kilo-gateway": "kilocode",
# Local server aliases — route through the generic custom provider
"lmstudio": "custom", "lm-studio": "custom", "lm_studio": "custom",
"ollama": "custom", "vllm": "custom", "llamacpp": "custom",
"vllm": "custom", "llamacpp": "custom",
"llama.cpp": "custom", "llama-cpp": "custom",
}
normalized = _PROVIDER_ALIASES.get(normalized, normalized)

View File

@@ -3668,7 +3668,7 @@ For more help on a command:
)
chat_parser.add_argument(
"--provider",
choices=["auto", "openrouter", "nous", "openai-codex", "copilot-acp", "copilot", "anthropic", "huggingface", "zai", "kimi-coding", "minimax", "minimax-cn", "kilocode"],
choices=["auto", "openrouter", "nous", "openai-codex", "copilot-acp", "copilot", "anthropic", "gemini", "huggingface", "zai", "kimi-coding", "minimax", "minimax-cn", "kilocode", "ollama"],
default=None,
help="Inference provider (default: auto)"
)

View File

@@ -238,6 +238,20 @@ _PROVIDER_MODELS: dict[str, list[str]] = {
"XiaomiMiMo/MiMo-V2-Flash",
"moonshotai/Kimi-K2-Thinking",
],
"ollama": [
"gemma4",
"gemma4:27b",
"hermes3",
"hermes3:70b",
"hermes4",
"llama3.1",
"llama3.1:70b",
"qwen2.5-coder",
"qwen2.5-coder:32b",
"deepseek-r1",
"phi4",
"mistral",
],
}
_PROVIDER_LABELS = {
@@ -258,6 +272,7 @@ _PROVIDER_LABELS = {
"kilocode": "Kilo Code",
"alibaba": "Alibaba Cloud (DashScope)",
"huggingface": "Hugging Face",
"ollama": "Ollama (Local)",
"custom": "Custom endpoint",
}
@@ -331,7 +346,7 @@ def list_available_providers() -> list[dict[str, str]]:
"openrouter", "nous", "openai-codex", "copilot", "copilot-acp",
"huggingface", "zai", "kimi-coding", "minimax", "minimax-cn", "kilocode", "anthropic", "alibaba",
"opencode-zen", "opencode-go",
"ai-gateway", "deepseek", "custom",
"ai-gateway", "deepseek", "ollama", "custom",
]
# Build reverse alias map
aliases_for: dict[str, list[str]] = {}

524
hermes_cli/providers.py Normal file
View File

@@ -0,0 +1,524 @@
"""
Single source of truth for provider identity in Hermes Agent.
Two data sources, merged at runtime:
1. **models.dev catalog** — 109+ providers with base URLs, env vars, display
names, and full model metadata (context, cost, capabilities). This is
the primary database.
2. **Hermes overlays** — transport type, auth patterns, aggregator flags,
and additional env vars that models.dev doesn't track. Small dict,
maintained here.
3. **User config** (``providers:`` section in config.yaml) — user-defined
endpoints and overrides. Merged on top of everything else.
Other modules import from this file. No parallel registries.
"""
from __future__ import annotations
import logging
import os
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Tuple
logger = logging.getLogger(__name__)
# -- Hermes overlay ----------------------------------------------------------
# Hermes-specific metadata that models.dev doesn't provide.
@dataclass(frozen=True)
class HermesOverlay:
"""Hermes-specific provider metadata layered on top of models.dev."""
transport: str = "openai_chat" # openai_chat | anthropic_messages | codex_responses
is_aggregator: bool = False
auth_type: str = "api_key" # api_key | oauth_device_code | oauth_external | external_process
extra_env_vars: Tuple[str, ...] = () # env vars models.dev doesn't list
base_url_override: str = "" # override if models.dev URL is wrong/missing
base_url_env_var: str = "" # env var for user-custom base URL
HERMES_OVERLAYS: Dict[str, HermesOverlay] = {
"openrouter": HermesOverlay(
transport="openai_chat",
is_aggregator=True,
extra_env_vars=("OPENAI_API_KEY",),
base_url_env_var="OPENROUTER_BASE_URL",
),
"nous": HermesOverlay(
transport="openai_chat",
auth_type="oauth_device_code",
base_url_override="https://inference-api.nousresearch.com/v1",
),
"openai-codex": HermesOverlay(
transport="codex_responses",
auth_type="oauth_external",
base_url_override="https://chatgpt.com/backend-api/codex",
),
"copilot-acp": HermesOverlay(
transport="codex_responses",
auth_type="external_process",
base_url_override="acp://copilot",
base_url_env_var="COPILOT_ACP_BASE_URL",
),
"github-copilot": HermesOverlay(
transport="openai_chat",
extra_env_vars=("COPILOT_GITHUB_TOKEN", "GH_TOKEN"),
),
"anthropic": HermesOverlay(
transport="anthropic_messages",
extra_env_vars=("ANTHROPIC_TOKEN", "CLAUDE_CODE_OAUTH_TOKEN"),
),
"zai": HermesOverlay(
transport="openai_chat",
extra_env_vars=("GLM_API_KEY", "ZAI_API_KEY", "Z_AI_API_KEY"),
base_url_env_var="GLM_BASE_URL",
),
"kimi-for-coding": HermesOverlay(
transport="openai_chat",
base_url_env_var="KIMI_BASE_URL",
),
"minimax": HermesOverlay(
transport="openai_chat",
base_url_env_var="MINIMAX_BASE_URL",
),
"minimax-cn": HermesOverlay(
transport="openai_chat",
base_url_env_var="MINIMAX_CN_BASE_URL",
),
"deepseek": HermesOverlay(
transport="openai_chat",
base_url_env_var="DEEPSEEK_BASE_URL",
),
"alibaba": HermesOverlay(
transport="openai_chat",
base_url_env_var="DASHSCOPE_BASE_URL",
),
"vercel": HermesOverlay(
transport="openai_chat",
is_aggregator=True,
),
"opencode": HermesOverlay(
transport="openai_chat",
is_aggregator=True,
base_url_env_var="OPENCODE_ZEN_BASE_URL",
),
"opencode-go": HermesOverlay(
transport="openai_chat",
is_aggregator=True,
base_url_env_var="OPENCODE_GO_BASE_URL",
),
"kilo": HermesOverlay(
transport="openai_chat",
is_aggregator=True,
base_url_env_var="KILOCODE_BASE_URL",
),
"huggingface": HermesOverlay(
transport="openai_chat",
is_aggregator=True,
base_url_env_var="HF_BASE_URL",
),
"ollama": HermesOverlay(
transport="openai_chat",
is_aggregator=False,
base_url_env_var="OLLAMA_BASE_URL",
),
}
# -- Resolved provider -------------------------------------------------------
# The merged result of models.dev + overlay + user config.
@dataclass
class ProviderDef:
"""Complete provider definition — merged from all sources."""
id: str
name: str
transport: str # openai_chat | anthropic_messages | codex_responses
api_key_env_vars: Tuple[str, ...] # all env vars to check for API key
base_url: str = ""
base_url_env_var: str = ""
is_aggregator: bool = False
auth_type: str = "api_key"
doc: str = ""
source: str = "" # "models.dev", "hermes", "user-config"
@property
def is_user_defined(self) -> bool:
return self.source == "user-config"
# -- Aliases ------------------------------------------------------------------
# Maps human-friendly / legacy names to canonical provider IDs.
# Uses models.dev IDs where possible.
ALIASES: Dict[str, str] = {
# openrouter
"openai": "openrouter", # bare "openai" → route through aggregator
# zai
"glm": "zai",
"z-ai": "zai",
"z.ai": "zai",
"zhipu": "zai",
# kimi-for-coding (models.dev ID)
"kimi": "kimi-for-coding",
"kimi-coding": "kimi-for-coding",
"moonshot": "kimi-for-coding",
# minimax-cn
"minimax-china": "minimax-cn",
"minimax_cn": "minimax-cn",
# anthropic
"claude": "anthropic",
"claude-code": "anthropic",
# github-copilot (models.dev ID)
"copilot": "github-copilot",
"github": "github-copilot",
"github-copilot-acp": "copilot-acp",
# vercel (models.dev ID for AI Gateway)
"ai-gateway": "vercel",
"aigateway": "vercel",
"vercel-ai-gateway": "vercel",
# opencode (models.dev ID for OpenCode Zen)
"opencode-zen": "opencode",
"zen": "opencode",
# opencode-go
"go": "opencode-go",
"opencode-go-sub": "opencode-go",
# kilo (models.dev ID for KiloCode)
"kilocode": "kilo",
"kilo-code": "kilo",
"kilo-gateway": "kilo",
# deepseek
"deep-seek": "deepseek",
# alibaba
"dashscope": "alibaba",
"aliyun": "alibaba",
"qwen": "alibaba",
"alibaba-cloud": "alibaba",
# huggingface
"hf": "huggingface",
"hugging-face": "huggingface",
"huggingface-hub": "huggingface",
# Local server aliases → virtual "local" concept (resolved via user config)
"lmstudio": "lmstudio",
"lm-studio": "lmstudio",
"lm_studio": "lmstudio",
# ollama is now a first-class provider (issue #169)
"vllm": "local",
"llamacpp": "local",
"llama.cpp": "local",
"llama-cpp": "local",
}
# -- Display labels -----------------------------------------------------------
# Built dynamically from models.dev + overlays. Fallback for providers
# not in the catalog.
_LABEL_OVERRIDES: Dict[str, str] = {
"nous": "Nous Portal",
"openai-codex": "OpenAI Codex",
"copilot-acp": "GitHub Copilot ACP",
"local": "Local endpoint",
}
# -- Transport → API mode mapping ---------------------------------------------
TRANSPORT_TO_API_MODE: Dict[str, str] = {
"openai_chat": "chat_completions",
"anthropic_messages": "anthropic_messages",
"codex_responses": "codex_responses",
}
# -- Helper functions ---------------------------------------------------------
def normalize_provider(name: str) -> str:
"""Resolve aliases and normalise casing to a canonical provider id.
Returns the canonical id string. Does *not* validate that the id
corresponds to a known provider.
"""
key = name.strip().lower()
return ALIASES.get(key, key)
def get_overlay(provider_id: str) -> Optional[HermesOverlay]:
"""Get Hermes overlay for a provider, if one exists."""
canonical = normalize_provider(provider_id)
return HERMES_OVERLAYS.get(canonical)
def get_provider(name: str) -> Optional[ProviderDef]:
"""Look up a provider by id or alias, merging all data sources.
Resolution order:
1. Hermes overlays (for providers not in models.dev: nous, openai-codex, etc.)
2. models.dev catalog + Hermes overlay
3. User-defined providers from config (TODO: Phase 4)
Returns a fully-resolved ProviderDef or None.
"""
canonical = normalize_provider(name)
# Try to get models.dev data
try:
from agent.models_dev import get_provider_info as _mdev_provider
mdev_info = _mdev_provider(canonical)
except Exception:
mdev_info = None
overlay = HERMES_OVERLAYS.get(canonical)
if mdev_info is not None:
# Merge models.dev + overlay
transport = overlay.transport if overlay else "openai_chat"
is_agg = overlay.is_aggregator if overlay else False
auth = overlay.auth_type if overlay else "api_key"
base_url_env = overlay.base_url_env_var if overlay else ""
base_url_override = overlay.base_url_override if overlay else ""
# Combine env vars: models.dev env + hermes extra
env_vars = list(mdev_info.env)
if overlay and overlay.extra_env_vars:
for ev in overlay.extra_env_vars:
if ev not in env_vars:
env_vars.append(ev)
return ProviderDef(
id=canonical,
name=mdev_info.name,
transport=transport,
api_key_env_vars=tuple(env_vars),
base_url=base_url_override or mdev_info.api,
base_url_env_var=base_url_env,
is_aggregator=is_agg,
auth_type=auth,
doc=mdev_info.doc,
source="models.dev",
)
if overlay is not None:
# Hermes-only provider (not in models.dev)
return ProviderDef(
id=canonical,
name=_LABEL_OVERRIDES.get(canonical, canonical),
transport=overlay.transport,
api_key_env_vars=overlay.extra_env_vars,
base_url=overlay.base_url_override,
base_url_env_var=overlay.base_url_env_var,
is_aggregator=overlay.is_aggregator,
auth_type=overlay.auth_type,
source="hermes",
)
return None
def get_label(provider_id: str) -> str:
"""Get a human-readable display name for a provider."""
canonical = normalize_provider(provider_id)
# Check label overrides first
if canonical in _LABEL_OVERRIDES:
return _LABEL_OVERRIDES[canonical]
# Try models.dev
pdef = get_provider(canonical)
if pdef:
return pdef.name
return canonical
# Build LABELS dict for backward compat
def _build_labels() -> Dict[str, str]:
"""Build labels dict from overlays + overrides. Lazy, cached."""
labels: Dict[str, str] = {}
for pid in HERMES_OVERLAYS:
labels[pid] = get_label(pid)
labels.update(_LABEL_OVERRIDES)
return labels
# Lazy-built on first access
_labels_cache: Optional[Dict[str, str]] = None
@property
def LABELS() -> Dict[str, str]:
"""Backward-compatible labels dict."""
global _labels_cache
if _labels_cache is None:
_labels_cache = _build_labels()
return _labels_cache
# For direct import compat, expose as module-level dict
# Built on demand by get_label() calls
LABELS: Dict[str, str] = {
# Static entries for backward compat — get_label() is the proper API
"openrouter": "OpenRouter",
"nous": "Nous Portal",
"openai-codex": "OpenAI Codex",
"copilot-acp": "GitHub Copilot ACP",
"github-copilot": "GitHub Copilot",
"anthropic": "Anthropic",
"zai": "Z.AI / GLM",
"kimi-for-coding": "Kimi / Moonshot",
"minimax": "MiniMax",
"minimax-cn": "MiniMax (China)",
"deepseek": "DeepSeek",
"alibaba": "Alibaba Cloud (DashScope)",
"vercel": "Vercel AI Gateway",
"opencode": "OpenCode Zen",
"opencode-go": "OpenCode Go",
"kilo": "Kilo Gateway",
"huggingface": "Hugging Face",
"local": "Local endpoint",
"custom": "Custom endpoint",
# Legacy Hermes IDs (point to same providers)
"ai-gateway": "Vercel AI Gateway",
"kilocode": "Kilo Gateway",
"copilot": "GitHub Copilot",
"kimi-coding": "Kimi / Moonshot",
"opencode-zen": "OpenCode Zen",
}
def is_aggregator(provider: str) -> bool:
"""Return True when the provider is a multi-model aggregator."""
pdef = get_provider(provider)
return pdef.is_aggregator if pdef else False
def determine_api_mode(provider: str, base_url: str = "") -> str:
"""Determine the API mode (wire protocol) for a provider/endpoint.
Resolution order:
1. Known provider → transport → TRANSPORT_TO_API_MODE.
2. URL heuristics for unknown / custom providers.
3. Default: 'chat_completions'.
"""
pdef = get_provider(provider)
if pdef is not None:
return TRANSPORT_TO_API_MODE.get(pdef.transport, "chat_completions")
# URL-based heuristics for custom / unknown providers
if base_url:
url_lower = base_url.rstrip("/").lower()
if url_lower.endswith("/anthropic") or "api.anthropic.com" in url_lower:
return "anthropic_messages"
if "api.openai.com" in url_lower:
return "codex_responses"
return "chat_completions"
# -- Provider from user config ------------------------------------------------
def resolve_user_provider(name: str, user_config: Dict[str, Any]) -> Optional[ProviderDef]:
"""Resolve a provider from the user's config.yaml ``providers:`` section.
Args:
name: Provider name as given by the user.
user_config: The ``providers:`` dict from config.yaml.
Returns:
ProviderDef if found, else None.
"""
if not user_config or not isinstance(user_config, dict):
return None
entry = user_config.get(name)
if not isinstance(entry, dict):
return None
# Extract fields
display_name = entry.get("name", "") or name
api_url = entry.get("api", "") or entry.get("url", "") or entry.get("base_url", "") or ""
key_env = entry.get("key_env", "") or ""
transport = entry.get("transport", "openai_chat") or "openai_chat"
env_vars: List[str] = []
if key_env:
env_vars.append(key_env)
return ProviderDef(
id=name,
name=display_name,
transport=transport,
api_key_env_vars=tuple(env_vars),
base_url=api_url,
is_aggregator=False,
auth_type="api_key",
source="user-config",
)
def resolve_provider_full(
name: str,
user_providers: Optional[Dict[str, Any]] = None,
) -> Optional[ProviderDef]:
"""Full resolution chain: built-in → models.dev → user config.
This is the main entry point for --provider flag resolution.
Args:
name: Provider name or alias.
user_providers: The ``providers:`` dict from config.yaml (optional).
Returns:
ProviderDef if found, else None.
"""
canonical = normalize_provider(name)
# 1. Built-in (models.dev + overlays)
pdef = get_provider(canonical)
if pdef is not None:
return pdef
# 2. User-defined providers from config
if user_providers:
# Try canonical name
user_pdef = resolve_user_provider(canonical, user_providers)
if user_pdef is not None:
return user_pdef
# Try original name (in case alias didn't match)
user_pdef = resolve_user_provider(name.strip().lower(), user_providers)
if user_pdef is not None:
return user_pdef
# 3. Try models.dev directly (for providers not in our ALIASES)
try:
from agent.models_dev import get_provider_info as _mdev_provider
mdev_info = _mdev_provider(canonical)
if mdev_info is not None:
return ProviderDef(
id=canonical,
name=mdev_info.name,
transport="openai_chat",
api_key_env_vars=mdev_info.env,
base_url=mdev_info.api,
source="models.dev",
)
except Exception:
pass
return None