Compare commits
10 Commits
fix/kimi-f
...
burn/20260
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
511eacb573 | ||
| 7d2421a15f | |||
|
|
5a942d71a1 | ||
|
|
044f0f8951 | ||
| 61c59ce332 | |||
| 01ce8ae889 | |||
|
|
b179250ab8 | ||
| 01a3f47a5b | |||
|
|
4538e11f97 | ||
| 7936483ffc |
@@ -47,6 +47,11 @@ jobs:
|
||||
source .venv/bin/activate
|
||||
python scripts/syntax_guard.py
|
||||
|
||||
- name: No duplicate models
|
||||
run: |
|
||||
source .venv/bin/activate
|
||||
python scripts/check_no_duplicate_models.py
|
||||
|
||||
- name: Green-path E2E
|
||||
run: |
|
||||
source .venv/bin/activate
|
||||
|
||||
@@ -922,6 +922,7 @@ def _resolve_forced_provider(forced: str) -> Tuple[Optional[OpenAI], Optional[st
|
||||
_AUTO_PROVIDER_LABELS = {
|
||||
"_try_openrouter": "openrouter",
|
||||
"_try_nous": "nous",
|
||||
"_try_ollama": "ollama",
|
||||
"_try_custom_endpoint": "local/custom",
|
||||
"_try_codex": "openai-codex",
|
||||
"_resolve_api_key_provider": "api-key",
|
||||
@@ -930,6 +931,18 @@ _AUTO_PROVIDER_LABELS = {
|
||||
_AGGREGATOR_PROVIDERS = frozenset({"openrouter", "nous"})
|
||||
|
||||
|
||||
def _try_ollama() -> Tuple[Optional[OpenAI], Optional[str]]:
|
||||
"""Detect and return an Ollama client if the server is reachable."""
|
||||
base_url = (os.getenv("OLLAMA_BASE_URL", "") or "http://localhost:11434").strip().rstrip("/")
|
||||
base_url = base_url + "/v1" if not base_url.endswith("/v1") else base_url
|
||||
from agent.model_metadata import detect_local_server_type
|
||||
if detect_local_server_type(base_url) != "ollama":
|
||||
return None, None
|
||||
api_key = (os.getenv("OLLAMA_API_KEY", "") or "ollama").strip()
|
||||
model = _read_main_model() or "gemma4:12b"
|
||||
return OpenAI(api_key=api_key, base_url=base_url), model
|
||||
|
||||
|
||||
def _get_provider_chain() -> List[tuple]:
|
||||
"""Return the ordered provider detection chain.
|
||||
|
||||
@@ -939,6 +952,7 @@ def _get_provider_chain() -> List[tuple]:
|
||||
return [
|
||||
("openrouter", _try_openrouter),
|
||||
("nous", _try_nous),
|
||||
("ollama", _try_ollama),
|
||||
("local/custom", _try_custom_endpoint),
|
||||
("openai-codex", _try_codex),
|
||||
("api-key", _resolve_api_key_provider),
|
||||
@@ -988,6 +1002,7 @@ def _try_payment_fallback(
|
||||
# Map common resolved_provider values back to chain labels.
|
||||
_alias_to_label = {"openrouter": "openrouter", "nous": "nous",
|
||||
"openai-codex": "openai-codex", "codex": "openai-codex",
|
||||
"ollama": "ollama",
|
||||
"custom": "local/custom", "local/custom": "local/custom"}
|
||||
skip_chain_labels = {_alias_to_label.get(s, s) for s in skip_labels}
|
||||
|
||||
@@ -1195,6 +1210,15 @@ def resolve_provider_client(
|
||||
return (_to_async_client(client, final_model) if async_mode
|
||||
else (client, final_model))
|
||||
|
||||
# ── Ollama (first-class local provider) ──────────────────────────
|
||||
if provider == "ollama":
|
||||
base_url = (explicit_base_url or os.getenv("OLLAMA_BASE_URL", "") or "http://localhost:11434").strip().rstrip("/")
|
||||
base_url = base_url + "/v1" if not base_url.endswith("/v1") else base_url
|
||||
api_key = (explicit_api_key or os.getenv("OLLAMA_API_KEY", "") or "ollama").strip()
|
||||
final_model = model or _read_main_model() or "gemma4:12b"
|
||||
client = OpenAI(api_key=api_key, base_url=base_url)
|
||||
return (_to_async_client(client, final_model) if async_mode else (client, final_model))
|
||||
|
||||
# ── Custom endpoint (OPENAI_BASE_URL + OPENAI_API_KEY) ───────────
|
||||
if provider == "custom":
|
||||
if explicit_base_url:
|
||||
@@ -1335,6 +1359,7 @@ def get_async_text_auxiliary_client(task: str = ""):
|
||||
_VISION_AUTO_PROVIDER_ORDER = (
|
||||
"openrouter",
|
||||
"nous",
|
||||
"ollama",
|
||||
"openai-codex",
|
||||
"anthropic",
|
||||
"custom",
|
||||
|
||||
@@ -26,7 +26,7 @@ _PROVIDER_PREFIXES: frozenset[str] = frozenset({
|
||||
"openrouter", "nous", "openai-codex", "copilot", "copilot-acp",
|
||||
"gemini", "zai", "kimi-coding", "minimax", "minimax-cn", "anthropic", "deepseek",
|
||||
"opencode-zen", "opencode-go", "ai-gateway", "kilocode", "alibaba",
|
||||
"custom", "local",
|
||||
"ollama", "custom", "local",
|
||||
# Common aliases
|
||||
"google", "google-gemini", "google-ai-studio",
|
||||
"glm", "z-ai", "z.ai", "zhipu", "github", "github-copilot",
|
||||
@@ -102,9 +102,12 @@ DEFAULT_CONTEXT_LENGTHS = {
|
||||
"gpt-4": 128000,
|
||||
# Google
|
||||
"gemini": 1048576,
|
||||
# Gemma (open models served via AI Studio)
|
||||
# Gemma (open models — Ollama / AI Studio)
|
||||
"gemma-4-31b": 256000,
|
||||
"gemma-4-26b": 256000,
|
||||
"gemma-4-12b": 256000,
|
||||
"gemma-4-4b": 256000,
|
||||
"gemma-4-1b": 256000,
|
||||
"gemma-3": 131072,
|
||||
"gemma": 8192, # fallback for older gemma models
|
||||
# DeepSeek
|
||||
@@ -187,6 +190,8 @@ _URL_TO_PROVIDER: Dict[str, str] = {
|
||||
"api.githubcopilot.com": "copilot",
|
||||
"models.github.ai": "copilot",
|
||||
"api.fireworks.ai": "fireworks",
|
||||
"localhost": "ollama",
|
||||
"127.0.0.1": "ollama",
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -148,7 +148,7 @@ PROVIDER_TO_MODELS_DEV: Dict[str, str] = {
|
||||
"openrouter": "openrouter",
|
||||
"anthropic": "anthropic",
|
||||
"zai": "zai",
|
||||
"kimi-coding": "kimi-for-coding",
|
||||
"kimi-coding": "kimi-k2.5",
|
||||
"minimax": "minimax",
|
||||
"minimax-cn": "minimax-cn",
|
||||
"deepseek": "deepseek",
|
||||
|
||||
@@ -6,7 +6,7 @@ model: anthropic/claude-opus-4.6
|
||||
# Fallback chain: Anthropic -> Kimi -> Ollama (local)
|
||||
fallback_providers:
|
||||
- provider: kimi-coding
|
||||
model: kimi-for-coding
|
||||
model: kimi-k2.5
|
||||
timeout: 60
|
||||
reason: "Primary fallback when Anthropic quota limited"
|
||||
|
||||
|
||||
335
docs/memory-architecture.md
Normal file
335
docs/memory-architecture.md
Normal file
@@ -0,0 +1,335 @@
|
||||
# Memory Architecture Guide
|
||||
|
||||
How Hermes Agent remembers things across sessions — the stores, the tools, the data flow, and how to configure it all.
|
||||
|
||||
## Overview
|
||||
|
||||
Hermes has a multi-layered memory system. It is not one thing — it is several independent systems that complement each other:
|
||||
|
||||
1. **Persistent Memory** (MEMORY.md / USER.md) — bounded, curated notes injected into every system prompt
|
||||
2. **Session Search** — full-text search across all past conversation transcripts
|
||||
3. **Skills** — procedural memory: reusable workflows stored as SKILL.md files
|
||||
4. **External Memory Providers** — optional plugins (Honcho, Holographic, Mem0, etc.) for deeper recall
|
||||
|
||||
All built-in memory lives on disk under `~/.hermes/` (or `$HERMES_HOME`). No memory data leaves the machine unless you explicitly configure an external cloud provider.
|
||||
|
||||
## Memory Types in Detail
|
||||
|
||||
### 1. Persistent Memory (MEMORY.md and USER.md)
|
||||
|
||||
The core memory system. Two files in `~/.hermes/memories/`:
|
||||
|
||||
| File | Purpose | Default Char Limit |
|
||||
|------|---------|--------------------|
|
||||
| `MEMORY.md` | Agent's personal notes — environment facts, project conventions, tool quirks, lessons learned | 2,200 chars (~800 tokens) |
|
||||
| `USER.md` | User profile — name, preferences, communication style, pet peeves | 1,375 chars (~500 tokens) |
|
||||
|
||||
**How it works:**
|
||||
|
||||
- Loaded from disk at session start and injected into the system prompt as a frozen snapshot
|
||||
- The agent uses the `memory` tool to add, replace, or remove entries during a session
|
||||
- Mid-session writes go to disk immediately (durable) but do NOT update the system prompt — this preserves the LLM's prefix cache for performance
|
||||
- The snapshot refreshes on the next session start
|
||||
- Entries are delimited by `§` (section sign) and can be multiline
|
||||
|
||||
**System prompt appearance:**
|
||||
|
||||
```
|
||||
══════════════════════════════════════════════
|
||||
MEMORY (your personal notes) [67% — 1,474/2,200 chars]
|
||||
══════════════════════════════════════════════
|
||||
User's project is a Rust web service at ~/code/myapi using Axum + SQLx
|
||||
§
|
||||
This machine runs Ubuntu 22.04, has Docker and Podman installed
|
||||
§
|
||||
User prefers concise responses, dislikes verbose explanations
|
||||
```
|
||||
|
||||
**Memory tool actions:**
|
||||
|
||||
- `add` — append a new entry (rejected if it would exceed the char limit)
|
||||
- `replace` — find an entry by substring match and replace it
|
||||
- `remove` — find an entry by substring match and delete it
|
||||
|
||||
Substring matching means you only need a unique fragment of the entry, not the full text. If the fragment matches multiple entries, the tool returns an error asking for a more specific match.
|
||||
|
||||
### 2. Session Search
|
||||
|
||||
Cross-session conversation recall via SQLite FTS5 full-text search.
|
||||
|
||||
- All CLI and messaging sessions are stored in `~/.hermes/state.db`
|
||||
- The `session_search` tool finds relevant past conversations by keyword
|
||||
- Top matching sessions are summarized by Gemini Flash (cheap, fast) before being returned to the main model
|
||||
- Returns focused summaries, not raw transcripts
|
||||
|
||||
**When to use session_search vs. memory:**
|
||||
|
||||
| Feature | Persistent Memory | Session Search |
|
||||
|---------|------------------|----------------|
|
||||
| Capacity | ~3,575 chars total | Unlimited (all sessions) |
|
||||
| Speed | Instant (in system prompt) | Requires search + LLM summarization |
|
||||
| Use case | Key facts always in context | "What did we discuss about X last week?" |
|
||||
| Management | Manually curated by the agent | Automatic — all sessions stored |
|
||||
| Token cost | Fixed per session (~1,300 tokens) | On-demand (searched when needed) |
|
||||
|
||||
**Rule of thumb:** Memory is for facts that should *always* be available. Session search is for recalling specific past conversations on demand. Don't save task progress or session outcomes to memory — use session_search to find those.
|
||||
|
||||
### 3. Skills (Procedural Memory)
|
||||
|
||||
Skills are reusable workflows stored as `SKILL.md` files in `~/.hermes/skills/` (and optionally external skill directories).
|
||||
|
||||
- Organized by category: `skills/github/github-pr-workflow/SKILL.md`
|
||||
- YAML frontmatter with name, description, version, platform restrictions
|
||||
- Progressive disclosure: metadata shown in skill list, full content loaded on demand via `skill_view`
|
||||
- The agent creates skills proactively after complex tasks (5+ tool calls) using the `skill_manage` tool
|
||||
- Skills can be patched when found outdated — stale skills are a liability
|
||||
|
||||
Skills are *not* injected into the system prompt by default. The agent sees a compact index of available skills and loads them on demand. This keeps the prompt lean while giving access to deep procedural knowledge.
|
||||
|
||||
**Skills vs. Memory:**
|
||||
|
||||
- **Memory:** compact facts ("User's project uses Go 1.22 with chi router")
|
||||
- **Skills:** detailed procedures ("How to deploy the staging server: step 1, step 2, ...")
|
||||
|
||||
### 4. External Memory Providers
|
||||
|
||||
Optional plugins that add deeper, structured memory alongside the built-in system. Only one external provider can be active at a time.
|
||||
|
||||
| Provider | Storage | Key Feature |
|
||||
|----------|---------|-------------|
|
||||
| Honcho | Cloud | Dialectic user modeling with semantic search |
|
||||
| OpenViking | Self-hosted | Filesystem-style knowledge hierarchy |
|
||||
| Mem0 | Cloud | Server-side LLM fact extraction |
|
||||
| Hindsight | Cloud/Local | Knowledge graph with entity resolution |
|
||||
| Holographic | Local SQLite | HRR algebraic reasoning + trust scoring |
|
||||
| RetainDB | Cloud | Hybrid search with delta compression |
|
||||
| ByteRover | Local/Cloud | Hierarchical knowledge tree with CLI |
|
||||
| Supermemory | Cloud | Context fencing + session graph ingest |
|
||||
|
||||
External providers run **alongside** built-in memory (never replacing it). They receive hooks for:
|
||||
- System prompt injection (provider context)
|
||||
- Pre-turn memory prefetch
|
||||
- Post-turn conversation sync
|
||||
- Session-end extraction
|
||||
- Built-in memory write mirroring
|
||||
|
||||
Setup: `hermes memory setup` or set `memory.provider` in `~/.hermes/config.yaml`.
|
||||
|
||||
See `website/docs/user-guide/features/memory-providers.md` for full provider details.
|
||||
|
||||
## How the Systems Interact
|
||||
|
||||
```
|
||||
Session Start
|
||||
|
|
||||
+--> Load MEMORY.md + USER.md from disk --> frozen snapshot into system prompt
|
||||
+--> Provider: system_prompt_block() --> injected into system prompt
|
||||
+--> Skills index --> injected into system prompt (compact metadata only)
|
||||
|
|
||||
v
|
||||
Each Turn
|
||||
|
|
||||
+--> Provider: prefetch(query) --> relevant recalled context
|
||||
+--> Agent sees: system prompt (memory + provider context + skills index)
|
||||
+--> Agent can call: memory tool, session_search tool, skill tools, provider tools
|
||||
|
|
||||
v
|
||||
After Each Response
|
||||
|
|
||||
+--> Provider: sync_turn(user, assistant) --> persist conversation
|
||||
|
|
||||
v
|
||||
Periodic (every N turns, default 10)
|
||||
|
|
||||
+--> Memory nudge: agent prompted to review and update memory
|
||||
|
|
||||
v
|
||||
Session End / Compression
|
||||
|
|
||||
+--> Memory flush: agent saves important facts before context is discarded
|
||||
+--> Provider: on_session_end(messages) --> final extraction
|
||||
+--> Provider: on_pre_compress(messages) --> save insights before compression
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### What to Save
|
||||
|
||||
Save proactively — don't wait for the user to ask:
|
||||
|
||||
- **User preferences:** "I prefer TypeScript over JavaScript" → `user` target
|
||||
- **Corrections:** "Don't use sudo for Docker, I'm in the docker group" → `memory` target
|
||||
- **Environment facts:** "This server runs Debian 12 with PostgreSQL 16" → `memory` target
|
||||
- **Conventions:** "Project uses tabs, 120-char lines, Google docstrings" → `memory` target
|
||||
- **Explicit requests:** "Remember that my API key rotation is monthly" → `memory` target
|
||||
|
||||
### What NOT to Save
|
||||
|
||||
- **Task progress or session outcomes** — use session_search to recall these
|
||||
- **Trivially re-discoverable facts** — "Python 3.12 supports f-strings" (web search this)
|
||||
- **Raw data dumps** — large code blocks, log files, data tables
|
||||
- **Session-specific ephemera** — temporary file paths, one-off debugging context
|
||||
- **Content already in SOUL.md or AGENTS.md** — those are already in context
|
||||
|
||||
### Writing Good Entries
|
||||
|
||||
Compact, information-dense entries work best:
|
||||
|
||||
```
|
||||
# Good — packs multiple related facts
|
||||
User runs macOS 14 Sonoma, uses Homebrew, has Docker Desktop and Podman. Shell: zsh. Editor: VS Code with Vim bindings.
|
||||
|
||||
# Good — specific, actionable convention
|
||||
Project ~/code/api uses Go 1.22, sqlc for DB, chi router. Tests: make test. CI: GitHub Actions.
|
||||
|
||||
# Bad — too vague
|
||||
User has a project.
|
||||
|
||||
# Bad — too verbose
|
||||
On January 5th, 2026, the user asked me to look at their project which is
|
||||
located at ~/code/api. I discovered it uses Go version 1.22 and...
|
||||
```
|
||||
|
||||
### Capacity Management
|
||||
|
||||
When memory is above 80% capacity (visible in the system prompt header), consolidate before adding. Merge related entries into shorter, denser versions. The tool will reject additions that would exceed the limit — use `replace` to consolidate first.
|
||||
|
||||
Priority order for what stays in memory:
|
||||
1. User preferences and corrections (highest — prevents repeated steering)
|
||||
2. Environment facts and project conventions
|
||||
3. Tool quirks and workarounds
|
||||
4. Lessons learned (lowest — can often be rediscovered)
|
||||
|
||||
### Memory Nudge
|
||||
|
||||
Every N turns (default: 10), the agent receives a nudge prompting it to review and update its memory. This is a lightweight prompt injected into the conversation — not a separate API call. The agent can choose to update memory or skip if nothing has changed.
|
||||
|
||||
## Privacy and Data Locality
|
||||
|
||||
**Built-in memory is fully local.** MEMORY.md and USER.md are plain text files in `~/.hermes/memories/`. No network calls are made in the memory read/write path. The memory tool scans entries for prompt injection and exfiltration patterns before accepting them.
|
||||
|
||||
**Session search is local.** The SQLite database (`~/.hermes/state.db`) stays on disk. FTS5 search is a local operation. However, the summarization step uses Gemini Flash (via the auxiliary LLM client) — conversation snippets are sent to Google's API for summarization. If this is a concern, session_search can be disabled.
|
||||
|
||||
**External providers may send data off-machine.** Cloud providers (Honcho, Mem0, RetainDB, Supermemory) send data to their respective APIs. Self-hosted providers (OpenViking, Hindsight local mode, Holographic, ByteRover local mode) keep everything on your machine. Check the provider's documentation for specifics.
|
||||
|
||||
**Security scanning.** All content written to memory (via the `memory` tool) is scanned for:
|
||||
- Prompt injection patterns ("ignore previous instructions", role hijacking, etc.)
|
||||
- Credential exfiltration attempts (curl/wget with secrets, reading .env files)
|
||||
- SSH backdoor patterns
|
||||
- Invisible unicode characters (used for steganographic injection)
|
||||
|
||||
Blocked content is rejected with a descriptive error message.
|
||||
|
||||
## Configuration
|
||||
|
||||
In `~/.hermes/config.yaml`:
|
||||
|
||||
```yaml
|
||||
memory:
|
||||
# Enable/disable the two built-in memory stores
|
||||
memory_enabled: true # MEMORY.md
|
||||
user_profile_enabled: true # USER.md
|
||||
|
||||
# Character limits (not tokens — model-independent)
|
||||
memory_char_limit: 2200 # ~800 tokens at 2.75 chars/token
|
||||
user_char_limit: 1375 # ~500 tokens at 2.75 chars/token
|
||||
|
||||
# External memory provider (empty string = built-in only)
|
||||
# Options: "honcho", "openviking", "mem0", "hindsight",
|
||||
# "holographic", "retaindb", "byterover", "supermemory"
|
||||
provider: ""
|
||||
```
|
||||
|
||||
Additional settings are read from `run_agent.py` defaults:
|
||||
|
||||
| Setting | Default | Description |
|
||||
|---------|---------|-------------|
|
||||
| `nudge_interval` | 10 | Turns between memory review nudges (0 = disabled) |
|
||||
| `flush_min_turns` | 6 | Minimum user turns before memory flush on session end/compression (0 = never flush) |
|
||||
|
||||
These are set under the `memory` key in config.yaml:
|
||||
|
||||
```yaml
|
||||
memory:
|
||||
nudge_interval: 10
|
||||
flush_min_turns: 6
|
||||
```
|
||||
|
||||
### Disabling Memory
|
||||
|
||||
To disable memory entirely, set both to false:
|
||||
|
||||
```yaml
|
||||
memory:
|
||||
memory_enabled: false
|
||||
user_profile_enabled: false
|
||||
```
|
||||
|
||||
The `memory` tool will not appear in the tool list, and no memory blocks are injected into the system prompt.
|
||||
|
||||
You can also disable memory per-invocation with `skip_memory=True` in the AIAgent constructor (used by cron jobs and flush agents).
|
||||
|
||||
## File Locations
|
||||
|
||||
```
|
||||
~/.hermes/
|
||||
├── memories/
|
||||
│ ├── MEMORY.md # Agent's persistent notes
|
||||
│ ├── USER.md # User profile
|
||||
│ ├── MEMORY.md.lock # File lock (auto-created)
|
||||
│ └── USER.md.lock # File lock (auto-created)
|
||||
├── state.db # SQLite session store (FTS5)
|
||||
├── config.yaml # Memory config + provider selection
|
||||
└── .env # API keys for external providers
|
||||
```
|
||||
|
||||
All paths respect `$HERMES_HOME` — if you use Hermes profiles, each profile has its own isolated memory directory.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "Memory full" errors
|
||||
|
||||
The tool returns an error when adding would exceed the character limit. The response includes current entries so the agent can consolidate. Fix by:
|
||||
1. Replacing multiple related entries with one denser entry
|
||||
2. Removing entries that are no longer relevant
|
||||
3. Increasing `memory_char_limit` in config (at the cost of larger system prompts)
|
||||
|
||||
### Stale memory entries
|
||||
|
||||
If the agent seems to have outdated information:
|
||||
- Check `~/.hermes/memories/MEMORY.md` directly — you can edit it by hand
|
||||
- The frozen snapshot pattern means changes only take effect on the next session start
|
||||
- If the agent wrote something wrong mid-session, it persists on disk but won't affect the current session's system prompt
|
||||
|
||||
### Memory not appearing in system prompt
|
||||
|
||||
- Verify `memory_enabled: true` in config.yaml
|
||||
- Check that `~/.hermes/memories/MEMORY.md` exists and has content
|
||||
- The file might be empty if all entries were removed — add entries with the `memory` tool
|
||||
|
||||
### Session search returns no results
|
||||
|
||||
- Session search requires sessions to be stored in `state.db` — new installations have no history
|
||||
- FTS5 indexes are built automatically but may lag behind on very large databases
|
||||
- The summarization step requires the auxiliary LLM client to be configured (API key for Gemini Flash)
|
||||
|
||||
### Skill drift
|
||||
|
||||
Skills that haven't been updated can become wrong or incomplete. The agent is prompted to patch skills when it finds them outdated during use (`skill_manage(action='patch')`). If you notice stale skills:
|
||||
- Use `/skills` to browse and review installed skills
|
||||
- Delete or update skills in `~/.hermes/skills/` directly
|
||||
- The agent creates skills after complex tasks — review and prune periodically
|
||||
|
||||
### Provider not activating
|
||||
|
||||
- Run `hermes memory status` to check provider state
|
||||
- Verify the provider plugin is installed in `~/.hermes/plugins/memory/`
|
||||
- Check that required API keys are set in `~/.hermes/.env`
|
||||
- Start a new session after changing provider config — existing sessions use the old provider
|
||||
|
||||
### Concurrent write conflicts
|
||||
|
||||
The memory tool uses file locking (`fcntl.flock`) and atomic file replacement (`os.replace`) to handle concurrent writes from multiple sessions. If you see corrupted memory files:
|
||||
- Check for stale `.lock` files in `~/.hermes/memories/`
|
||||
- Restart any hung Hermes processes
|
||||
- The atomic write pattern means readers always see either the old or new file — never a partial write
|
||||
@@ -820,10 +820,11 @@ def resolve_provider(
|
||||
"hf": "huggingface", "hugging-face": "huggingface", "huggingface-hub": "huggingface",
|
||||
"go": "opencode-go", "opencode-go-sub": "opencode-go",
|
||||
"kilo": "kilocode", "kilo-code": "kilocode", "kilo-gateway": "kilocode",
|
||||
# Local server aliases — route through the generic custom provider
|
||||
# Local server aliases
|
||||
"lmstudio": "custom", "lm-studio": "custom", "lm_studio": "custom",
|
||||
"ollama": "custom", "vllm": "custom", "llamacpp": "custom",
|
||||
"vllm": "custom", "llamacpp": "custom",
|
||||
"llama.cpp": "custom", "llama-cpp": "custom",
|
||||
"ollama": "ollama",
|
||||
}
|
||||
normalized = _PROVIDER_ALIASES.get(normalized, normalized)
|
||||
|
||||
|
||||
@@ -2126,9 +2126,8 @@ def _model_flow_kimi(config, current_model=""):
|
||||
|
||||
# Step 3: Model selection — show appropriate models for the endpoint
|
||||
if is_coding_plan:
|
||||
# Coding Plan models (kimi-for-coding first)
|
||||
# Coding Plan models (kimi-k2.5 first)
|
||||
model_list = [
|
||||
"kimi-for-coding",
|
||||
"kimi-k2.5",
|
||||
"kimi-k2-thinking",
|
||||
"kimi-k2-thinking-turbo",
|
||||
@@ -4206,7 +4205,7 @@ For more help on a command:
|
||||
)
|
||||
chat_parser.add_argument(
|
||||
"--provider",
|
||||
choices=["auto", "openrouter", "nous", "openai-codex", "copilot-acp", "copilot", "anthropic", "gemini", "huggingface", "zai", "kimi-coding", "minimax", "minimax-cn", "kilocode"],
|
||||
choices=["auto", "openrouter", "nous", "openai-codex", "copilot-acp", "copilot", "anthropic", "gemini", "huggingface", "zai", "kimi-coding", "minimax", "minimax-cn", "kilocode", "ollama"],
|
||||
default=None,
|
||||
help="Inference provider (default: auto)"
|
||||
)
|
||||
|
||||
@@ -130,7 +130,6 @@ _PROVIDER_MODELS: dict[str, list[str]] = {
|
||||
"glm-4.5-flash",
|
||||
],
|
||||
"kimi-coding": [
|
||||
"kimi-for-coding",
|
||||
"kimi-k2.5",
|
||||
"kimi-k2-thinking",
|
||||
"kimi-k2-thinking-turbo",
|
||||
@@ -568,7 +567,7 @@ def list_available_providers() -> list[dict[str, str]]:
|
||||
"gemini", "huggingface",
|
||||
"zai", "kimi-coding", "minimax", "minimax-cn", "kilocode", "anthropic", "alibaba",
|
||||
"opencode-zen", "opencode-go",
|
||||
"ai-gateway", "deepseek", "custom",
|
||||
"ai-gateway", "deepseek", "ollama", "custom",
|
||||
]
|
||||
# Build reverse alias map
|
||||
aliases_for: dict[str, list[str]] = {}
|
||||
|
||||
@@ -78,7 +78,7 @@ HERMES_OVERLAYS: Dict[str, HermesOverlay] = {
|
||||
extra_env_vars=("GLM_API_KEY", "ZAI_API_KEY", "Z_AI_API_KEY"),
|
||||
base_url_env_var="GLM_BASE_URL",
|
||||
),
|
||||
"kimi-for-coding": HermesOverlay(
|
||||
"kimi-k2.5": HermesOverlay(
|
||||
transport="openai_chat",
|
||||
base_url_env_var="KIMI_BASE_URL",
|
||||
),
|
||||
@@ -162,10 +162,10 @@ ALIASES: Dict[str, str] = {
|
||||
"z.ai": "zai",
|
||||
"zhipu": "zai",
|
||||
|
||||
# kimi-for-coding (models.dev ID)
|
||||
"kimi": "kimi-for-coding",
|
||||
"kimi-coding": "kimi-for-coding",
|
||||
"moonshot": "kimi-for-coding",
|
||||
# kimi-k2.5 (models.dev ID)
|
||||
"kimi": "kimi-k2.5",
|
||||
"kimi-coding": "kimi-k2.5",
|
||||
"moonshot": "kimi-k2.5",
|
||||
|
||||
# minimax-cn
|
||||
"minimax-china": "minimax-cn",
|
||||
@@ -376,7 +376,7 @@ LABELS: Dict[str, str] = {
|
||||
"github-copilot": "GitHub Copilot",
|
||||
"anthropic": "Anthropic",
|
||||
"zai": "Z.AI / GLM",
|
||||
"kimi-for-coding": "Kimi / Moonshot",
|
||||
"kimi-k2.5": "Kimi / Moonshot",
|
||||
"minimax": "MiniMax",
|
||||
"minimax-cn": "MiniMax (China)",
|
||||
"deepseek": "DeepSeek",
|
||||
|
||||
@@ -235,7 +235,7 @@ The Hermes Agent framework serves as both the delivery platform and the portfoli
|
||||
|
||||
| House | Host | Model / Provider | Gateway Status |
|
||||
|-------|------|------------------|----------------|
|
||||
| Ezra | Hermes VPS | `kimi-for-coding` (Kimi K2.5) | API `8658`, webhook `8648` — Active |
|
||||
| Ezra | Hermes VPS | `kimi-k2.5` (Kimi K2.5) | API `8658`, webhook `8648` — Active |
|
||||
| Bezalel | Hermes VPS | Claude Opus 4.6 (Anthropic) | Port `8645` — Active |
|
||||
| Allegro-Primus | Hermes VPS | Kimi K2.5 | Port `8644` — Requires restart |
|
||||
| Bilbo | External | Gemma 4B (local) | Telegram dual-mode — Active |
|
||||
|
||||
74
scripts/check_no_duplicate_models.py
Executable file
74
scripts/check_no_duplicate_models.py
Executable file
@@ -0,0 +1,74 @@
|
||||
#!/usr/bin/env python3
|
||||
"""CI check: ensure no duplicate model IDs exist in provider configs.
|
||||
|
||||
Catches the class of bugs where a rename introduces a duplicate entry
|
||||
(e.g. PR #225 kimi-for-coding -> kimi-k2.5 when kimi-k2.5 already existed).
|
||||
|
||||
Runtime target: < 2 seconds.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Allow running from repo root
|
||||
REPO_ROOT = Path(__file__).parent.parent
|
||||
sys.path.insert(0, str(REPO_ROOT))
|
||||
|
||||
|
||||
def check_openrouter_models() -> list[str]:
|
||||
"""Check OPENROUTER_MODELS for duplicate model IDs."""
|
||||
try:
|
||||
from hermes_cli.models import OPENROUTER_MODELS
|
||||
except ImportError:
|
||||
return []
|
||||
|
||||
errors = []
|
||||
seen: dict[str, int] = {}
|
||||
for i, (model_id, _desc) in enumerate(OPENROUTER_MODELS):
|
||||
if model_id in seen:
|
||||
errors.append(
|
||||
f" OPENROUTER_MODELS: duplicate '{model_id}' "
|
||||
f"(index {seen[model_id]} and {i})"
|
||||
)
|
||||
else:
|
||||
seen[model_id] = i
|
||||
return errors
|
||||
|
||||
|
||||
def check_provider_models() -> list[str]:
|
||||
"""Check _PROVIDER_MODELS for duplicate model IDs within each provider list."""
|
||||
from hermes_cli.models import _PROVIDER_MODELS
|
||||
|
||||
errors = []
|
||||
for provider, models in _PROVIDER_MODELS.items():
|
||||
seen: dict[str, int] = {}
|
||||
for i, model_id in enumerate(models):
|
||||
if model_id in seen:
|
||||
errors.append(
|
||||
f" _PROVIDER_MODELS['{provider}']: duplicate '{model_id}' "
|
||||
f"(index {seen[model_id]} and {i})"
|
||||
)
|
||||
else:
|
||||
seen[model_id] = i
|
||||
return errors
|
||||
|
||||
|
||||
def main() -> int:
|
||||
errors = []
|
||||
errors.extend(check_openrouter_models())
|
||||
errors.extend(check_provider_models())
|
||||
|
||||
if errors:
|
||||
print(f"FAIL: {len(errors)} duplicate model(s) found:")
|
||||
for e in errors:
|
||||
print(e)
|
||||
return 1
|
||||
|
||||
print("OK: no duplicate model entries")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
@@ -895,7 +895,7 @@ class TestKimiMoonshotModelListIsolation:
|
||||
def test_moonshot_list_excludes_coding_plan_only_models(self):
|
||||
from hermes_cli.main import _PROVIDER_MODELS
|
||||
moonshot_models = _PROVIDER_MODELS["moonshot"]
|
||||
coding_plan_only = {"kimi-for-coding", "kimi-k2-thinking-turbo"}
|
||||
coding_plan_only = {"kimi-k2.5", "kimi-k2-thinking-turbo"}
|
||||
leaked = set(moonshot_models) & coding_plan_only
|
||||
assert not leaked, f"Moonshot list contains Coding Plan-only models: {leaked}"
|
||||
|
||||
@@ -908,7 +908,7 @@ class TestKimiMoonshotModelListIsolation:
|
||||
def test_coding_plan_list_contains_plan_specific_models(self):
|
||||
from hermes_cli.main import _PROVIDER_MODELS
|
||||
coding_models = _PROVIDER_MODELS["kimi-coding"]
|
||||
assert "kimi-for-coding" in coding_models
|
||||
assert "kimi-k2.5" in coding_models
|
||||
assert "kimi-k2-thinking-turbo" in coding_models
|
||||
|
||||
|
||||
|
||||
@@ -142,7 +142,7 @@ hermes chat --provider zai --model glm-5
|
||||
# Requires: GLM_API_KEY in ~/.hermes/.env
|
||||
|
||||
# Kimi / Moonshot AI
|
||||
hermes chat --provider kimi-coding --model kimi-for-coding
|
||||
hermes chat --provider kimi-coding --model kimi-k2.5
|
||||
# Requires: KIMI_API_KEY in ~/.hermes/.env
|
||||
|
||||
# MiniMax (global endpoint)
|
||||
|
||||
Reference in New Issue
Block a user