feat: add Honcho AI-native memory integration
Opt-in persistent cross-session user modeling via Honcho. Reads ~/.honcho/config.json as single source of truth (shared with Claude Code, Cursor, and other Honcho-enabled tools). Zero impact when disabled or unconfigured. - honcho_integration/ package (client, session manager, peer resolution) - Host-based config resolution matching claude-honcho/cursor-honcho pattern - Prefetch user context into system prompt per conversation turn - Sync user/assistant messages to Honcho after each exchange - query_user_context tool for mid-conversation dialectic reasoning - Gated activation: requires ~/.honcho/config.json with enabled=true
This commit is contained in:
1
cli.py
1
cli.py
@@ -960,6 +960,7 @@ class HermesCLI:
|
||||
platform="cli",
|
||||
session_db=self._session_db,
|
||||
clarify_callback=self._clarify_callback,
|
||||
honcho_session_key=self.session_id,
|
||||
)
|
||||
return True
|
||||
except Exception as e:
|
||||
|
||||
@@ -1444,6 +1444,7 @@ class GatewayRunner:
|
||||
session_id=session_id,
|
||||
tool_progress_callback=progress_callback if tool_progress_enabled else None,
|
||||
platform=platform_key,
|
||||
honcho_session_key=session_key,
|
||||
)
|
||||
|
||||
# Store agent reference for interrupt support
|
||||
|
||||
@@ -127,6 +127,11 @@ DEFAULT_CONFIG = {
|
||||
# Never saved to sessions, logs, or trajectories.
|
||||
"prefill_messages_file": "",
|
||||
|
||||
# Honcho AI-native memory -- reads ~/.honcho/config.json as single source of truth.
|
||||
# This section is only needed for hermes-specific overrides; everything else
|
||||
# (apiKey, workspace, peerName, sessions, enabled) comes from the global config.
|
||||
"honcho": {},
|
||||
|
||||
# Permanently allowed dangerous command patterns (added via "always" approval)
|
||||
"command_allowlist": [],
|
||||
|
||||
@@ -229,6 +234,16 @@ OPTIONAL_ENV_VARS = {
|
||||
"category": "tool",
|
||||
},
|
||||
|
||||
# ── Honcho ──
|
||||
"HONCHO_API_KEY": {
|
||||
"description": "Honcho API key for AI-native persistent memory",
|
||||
"prompt": "Honcho API key",
|
||||
"url": "https://app.honcho.dev",
|
||||
"tools": ["query_user_context"],
|
||||
"password": True,
|
||||
"category": "tool",
|
||||
},
|
||||
|
||||
# ── Messaging platforms ──
|
||||
"TELEGRAM_BOT_TOKEN": {
|
||||
"description": "Telegram bot token from @BotFather",
|
||||
@@ -769,7 +784,7 @@ def set_config_value(key: str, value: str):
|
||||
'FAL_KEY', 'TELEGRAM_BOT_TOKEN', 'DISCORD_BOT_TOKEN',
|
||||
'TERMINAL_SSH_HOST', 'TERMINAL_SSH_USER', 'TERMINAL_SSH_KEY',
|
||||
'SUDO_PASSWORD', 'SLACK_BOT_TOKEN', 'SLACK_APP_TOKEN',
|
||||
'GITHUB_TOKEN',
|
||||
'GITHUB_TOKEN', 'HONCHO_API_KEY',
|
||||
]
|
||||
|
||||
if key.upper() in api_keys or key.upper().startswith('TERMINAL_SSH'):
|
||||
|
||||
9
honcho_integration/__init__.py
Normal file
9
honcho_integration/__init__.py
Normal file
@@ -0,0 +1,9 @@
|
||||
"""Honcho integration for AI-native memory.
|
||||
|
||||
This package is only active when honcho.enabled=true in config and
|
||||
HONCHO_API_KEY is set. All honcho-ai imports are deferred to avoid
|
||||
ImportError when the package is not installed.
|
||||
|
||||
Named ``honcho_integration`` (not ``honcho``) to avoid shadowing the
|
||||
``honcho`` package installed by the ``honcho-ai`` SDK.
|
||||
"""
|
||||
191
honcho_integration/client.py
Normal file
191
honcho_integration/client.py
Normal file
@@ -0,0 +1,191 @@
|
||||
"""Honcho client initialization and configuration.
|
||||
|
||||
Reads the global ~/.honcho/config.json when available, falling back
|
||||
to environment variables.
|
||||
|
||||
Resolution order for host-specific settings:
|
||||
1. Explicit host block fields (always win)
|
||||
2. Flat/global fields from config root
|
||||
3. Defaults (host name as workspace/peer)
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import logging
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Any, TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from honcho import Honcho
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
GLOBAL_CONFIG_PATH = Path.home() / ".honcho" / "config.json"
|
||||
HOST = "hermes"
|
||||
|
||||
|
||||
@dataclass
|
||||
class HonchoClientConfig:
|
||||
"""Configuration for Honcho client, resolved for a specific host."""
|
||||
|
||||
host: str = HOST
|
||||
workspace_id: str = "hermes"
|
||||
api_key: str | None = None
|
||||
environment: str = "production"
|
||||
# Identity
|
||||
peer_name: str | None = None
|
||||
ai_peer: str = "hermes"
|
||||
linked_hosts: list[str] = field(default_factory=list)
|
||||
# Toggles
|
||||
enabled: bool = False
|
||||
save_messages: bool = True
|
||||
# Session resolution
|
||||
session_strategy: str = "per-directory"
|
||||
session_peer_prefix: bool = False
|
||||
sessions: dict[str, str] = field(default_factory=dict)
|
||||
# Raw global config for anything else consumers need
|
||||
raw: dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
@classmethod
|
||||
def from_env(cls, workspace_id: str = "hermes") -> HonchoClientConfig:
|
||||
"""Create config from environment variables (fallback)."""
|
||||
return cls(
|
||||
workspace_id=workspace_id,
|
||||
api_key=os.environ.get("HONCHO_API_KEY"),
|
||||
environment=os.environ.get("HONCHO_ENVIRONMENT", "production"),
|
||||
enabled=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_global_config(
|
||||
cls,
|
||||
host: str = HOST,
|
||||
config_path: Path | None = None,
|
||||
) -> HonchoClientConfig:
|
||||
"""Create config from ~/.honcho/config.json.
|
||||
|
||||
Falls back to environment variables if the file doesn't exist.
|
||||
"""
|
||||
path = config_path or GLOBAL_CONFIG_PATH
|
||||
if not path.exists():
|
||||
logger.debug("No global Honcho config at %s, falling back to env", path)
|
||||
return cls.from_env()
|
||||
|
||||
try:
|
||||
raw = json.loads(path.read_text(encoding="utf-8"))
|
||||
except (json.JSONDecodeError, OSError) as e:
|
||||
logger.warning("Failed to read %s: %s, falling back to env", path, e)
|
||||
return cls.from_env()
|
||||
|
||||
host_block = (raw.get("hosts") or {}).get(host, {})
|
||||
|
||||
# Explicit host block fields win, then flat/global, then defaults
|
||||
workspace = (
|
||||
host_block.get("workspace")
|
||||
or raw.get("workspace")
|
||||
or host
|
||||
)
|
||||
ai_peer = (
|
||||
host_block.get("aiPeer")
|
||||
or raw.get("aiPeer")
|
||||
or host
|
||||
)
|
||||
linked_hosts = host_block.get("linkedHosts", [])
|
||||
|
||||
return cls(
|
||||
host=host,
|
||||
workspace_id=workspace,
|
||||
api_key=raw.get("apiKey") or os.environ.get("HONCHO_API_KEY"),
|
||||
environment=raw.get("environment", "production"),
|
||||
peer_name=raw.get("peerName"),
|
||||
ai_peer=ai_peer,
|
||||
linked_hosts=linked_hosts,
|
||||
enabled=raw.get("enabled", False),
|
||||
save_messages=raw.get("saveMessages", True),
|
||||
session_strategy=raw.get("sessionStrategy", "per-directory"),
|
||||
session_peer_prefix=raw.get("sessionPeerPrefix", False),
|
||||
sessions=raw.get("sessions", {}),
|
||||
raw=raw,
|
||||
)
|
||||
|
||||
def resolve_session_name(self, cwd: str | None = None) -> str | None:
|
||||
"""Resolve session name for a directory.
|
||||
|
||||
Checks manual overrides first, then derives from directory name.
|
||||
"""
|
||||
if not cwd:
|
||||
cwd = os.getcwd()
|
||||
|
||||
# Manual override
|
||||
manual = self.sessions.get(cwd)
|
||||
if manual:
|
||||
return manual
|
||||
|
||||
# Derive from directory basename
|
||||
base = Path(cwd).name
|
||||
if self.session_peer_prefix and self.peer_name:
|
||||
return f"{self.peer_name}-{base}"
|
||||
return base
|
||||
|
||||
def get_linked_workspaces(self) -> list[str]:
|
||||
"""Resolve linked host keys to workspace names."""
|
||||
hosts = self.raw.get("hosts", {})
|
||||
workspaces = []
|
||||
for host_key in self.linked_hosts:
|
||||
block = hosts.get(host_key, {})
|
||||
ws = block.get("workspace") or host_key
|
||||
if ws != self.workspace_id:
|
||||
workspaces.append(ws)
|
||||
return workspaces
|
||||
|
||||
|
||||
_honcho_client: Honcho | None = None
|
||||
|
||||
|
||||
def get_honcho_client(config: HonchoClientConfig | None = None) -> Honcho:
|
||||
"""Get or create the Honcho client singleton.
|
||||
|
||||
When no config is provided, attempts to load ~/.honcho/config.json
|
||||
first, falling back to environment variables.
|
||||
"""
|
||||
global _honcho_client
|
||||
|
||||
if _honcho_client is not None:
|
||||
return _honcho_client
|
||||
|
||||
if config is None:
|
||||
config = HonchoClientConfig.from_global_config()
|
||||
|
||||
if not config.api_key:
|
||||
raise ValueError(
|
||||
"Honcho API key not found. Set it in ~/.honcho/config.json "
|
||||
"or the HONCHO_API_KEY environment variable. "
|
||||
"Get an API key from https://app.honcho.dev"
|
||||
)
|
||||
|
||||
try:
|
||||
from honcho import Honcho
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"honcho-ai is required for Honcho integration. "
|
||||
"Install it with: pip install honcho-ai"
|
||||
)
|
||||
|
||||
logger.info("Initializing Honcho client (host: %s, workspace: %s)", config.host, config.workspace_id)
|
||||
|
||||
_honcho_client = Honcho(
|
||||
workspace_id=config.workspace_id,
|
||||
api_key=config.api_key,
|
||||
environment=config.environment,
|
||||
)
|
||||
|
||||
return _honcho_client
|
||||
|
||||
|
||||
def reset_honcho_client() -> None:
|
||||
"""Reset the Honcho client singleton (useful for testing)."""
|
||||
global _honcho_client
|
||||
_honcho_client = None
|
||||
538
honcho_integration/session.py
Normal file
538
honcho_integration/session.py
Normal file
@@ -0,0 +1,538 @@
|
||||
"""Honcho-based session management for conversation history."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import logging
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from typing import Any, TYPE_CHECKING
|
||||
|
||||
from honcho_integration.client import get_honcho_client
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from honcho import Honcho
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class HonchoSession:
|
||||
"""
|
||||
A conversation session backed by Honcho.
|
||||
|
||||
Provides a local message cache that syncs to Honcho's
|
||||
AI-native memory system for user modeling.
|
||||
"""
|
||||
|
||||
key: str # channel:chat_id
|
||||
user_peer_id: str # Honcho peer ID for the user
|
||||
assistant_peer_id: str # Honcho peer ID for the assistant
|
||||
honcho_session_id: str # Honcho session ID
|
||||
messages: list[dict[str, Any]] = field(default_factory=list)
|
||||
created_at: datetime = field(default_factory=datetime.now)
|
||||
updated_at: datetime = field(default_factory=datetime.now)
|
||||
metadata: dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def add_message(self, role: str, content: str, **kwargs: Any) -> None:
|
||||
"""Add a message to the local cache."""
|
||||
msg = {
|
||||
"role": role,
|
||||
"content": content,
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
**kwargs,
|
||||
}
|
||||
self.messages.append(msg)
|
||||
self.updated_at = datetime.now()
|
||||
|
||||
def get_history(self, max_messages: int = 50) -> list[dict[str, Any]]:
|
||||
"""Get message history for LLM context."""
|
||||
recent = (
|
||||
self.messages[-max_messages:]
|
||||
if len(self.messages) > max_messages
|
||||
else self.messages
|
||||
)
|
||||
return [{"role": m["role"], "content": m["content"]} for m in recent]
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear all messages in the session."""
|
||||
self.messages = []
|
||||
self.updated_at = datetime.now()
|
||||
|
||||
|
||||
class HonchoSessionManager:
|
||||
"""
|
||||
Manages conversation sessions using Honcho.
|
||||
|
||||
Runs alongside hermes' existing SQLite state and file-based memory,
|
||||
adding persistent cross-session user modeling via Honcho's AI-native memory.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
honcho: Honcho | None = None,
|
||||
context_tokens: int | None = None,
|
||||
config: Any | None = None,
|
||||
):
|
||||
"""
|
||||
Initialize the session manager.
|
||||
|
||||
Args:
|
||||
honcho: Optional Honcho client. If not provided, uses the singleton.
|
||||
context_tokens: Max tokens for context() calls (None = Honcho default).
|
||||
config: HonchoClientConfig from global config (provides peer_name, ai_peer, etc.).
|
||||
"""
|
||||
self._honcho = honcho
|
||||
self._context_tokens = context_tokens
|
||||
self._config = config
|
||||
self._cache: dict[str, HonchoSession] = {}
|
||||
self._peers_cache: dict[str, Any] = {}
|
||||
self._sessions_cache: dict[str, Any] = {}
|
||||
|
||||
@property
|
||||
def honcho(self) -> Honcho:
|
||||
"""Get the Honcho client, initializing if needed."""
|
||||
if self._honcho is None:
|
||||
self._honcho = get_honcho_client()
|
||||
return self._honcho
|
||||
|
||||
def _get_or_create_peer(self, peer_id: str) -> Any:
|
||||
"""
|
||||
Get or create a Honcho peer.
|
||||
|
||||
Peers are lazy -- no API call until first use.
|
||||
Observation settings are controlled per-session via SessionPeerConfig.
|
||||
"""
|
||||
if peer_id in self._peers_cache:
|
||||
return self._peers_cache[peer_id]
|
||||
|
||||
peer = self.honcho.peer(peer_id)
|
||||
self._peers_cache[peer_id] = peer
|
||||
return peer
|
||||
|
||||
def _get_or_create_honcho_session(
|
||||
self, session_id: str, user_peer: Any, assistant_peer: Any
|
||||
) -> tuple[Any, list]:
|
||||
"""
|
||||
Get or create a Honcho session with peers configured.
|
||||
|
||||
Returns:
|
||||
Tuple of (honcho_session, existing_messages).
|
||||
"""
|
||||
if session_id in self._sessions_cache:
|
||||
logger.debug("Honcho session '%s' retrieved from cache", session_id)
|
||||
return self._sessions_cache[session_id], []
|
||||
|
||||
session = self.honcho.session(session_id)
|
||||
|
||||
# Configure peer observation settings
|
||||
from honcho.session import SessionPeerConfig
|
||||
user_config = SessionPeerConfig(observe_me=True, observe_others=True)
|
||||
ai_config = SessionPeerConfig(observe_me=False, observe_others=True)
|
||||
|
||||
session.add_peers([(user_peer, user_config), (assistant_peer, ai_config)])
|
||||
|
||||
# Load existing messages via context() - single call for messages + metadata
|
||||
existing_messages = []
|
||||
try:
|
||||
ctx = session.context(summary=True, tokens=self._context_tokens)
|
||||
existing_messages = ctx.messages or []
|
||||
|
||||
# Verify chronological ordering
|
||||
if existing_messages and len(existing_messages) > 1:
|
||||
timestamps = [m.created_at for m in existing_messages if m.created_at]
|
||||
if timestamps and timestamps != sorted(timestamps):
|
||||
logger.warning(
|
||||
"Honcho messages not chronologically ordered for session '%s', sorting",
|
||||
session_id,
|
||||
)
|
||||
existing_messages = sorted(
|
||||
existing_messages,
|
||||
key=lambda m: m.created_at or datetime.min,
|
||||
)
|
||||
|
||||
if existing_messages:
|
||||
logger.info(
|
||||
"Honcho session '%s' retrieved (%d existing messages)",
|
||||
session_id, len(existing_messages),
|
||||
)
|
||||
else:
|
||||
logger.info("Honcho session '%s' created (new)", session_id)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Honcho session '%s' loaded (failed to fetch context: %s)",
|
||||
session_id, e,
|
||||
)
|
||||
|
||||
self._sessions_cache[session_id] = session
|
||||
return session, existing_messages
|
||||
|
||||
def _sanitize_id(self, id_str: str) -> str:
|
||||
"""Sanitize an ID to match Honcho's pattern: ^[a-zA-Z0-9_-]+"""
|
||||
return re.sub(r'[^a-zA-Z0-9_-]', '-', id_str)
|
||||
|
||||
def get_or_create(self, key: str) -> HonchoSession:
|
||||
"""
|
||||
Get an existing session or create a new one.
|
||||
|
||||
Args:
|
||||
key: Session key (usually channel:chat_id).
|
||||
|
||||
Returns:
|
||||
The session.
|
||||
"""
|
||||
if key in self._cache:
|
||||
logger.debug("Local session cache hit: %s", key)
|
||||
return self._cache[key]
|
||||
|
||||
# Use peer names from global config when available
|
||||
if self._config and self._config.peer_name:
|
||||
user_peer_id = self._sanitize_id(self._config.peer_name)
|
||||
else:
|
||||
# Fallback: derive from session key
|
||||
parts = key.split(":", 1)
|
||||
channel = parts[0] if len(parts) > 1 else "default"
|
||||
chat_id = parts[1] if len(parts) > 1 else key
|
||||
user_peer_id = self._sanitize_id(f"user-{channel}-{chat_id}")
|
||||
|
||||
assistant_peer_id = (
|
||||
self._config.ai_peer if self._config else "hermes-assistant"
|
||||
)
|
||||
|
||||
# Sanitize session ID for Honcho
|
||||
honcho_session_id = self._sanitize_id(key)
|
||||
|
||||
# Get or create peers
|
||||
user_peer = self._get_or_create_peer(user_peer_id)
|
||||
assistant_peer = self._get_or_create_peer(assistant_peer_id)
|
||||
|
||||
# Get or create Honcho session
|
||||
honcho_session, existing_messages = self._get_or_create_honcho_session(
|
||||
honcho_session_id, user_peer, assistant_peer
|
||||
)
|
||||
|
||||
# Convert Honcho messages to local format
|
||||
local_messages = []
|
||||
for msg in existing_messages:
|
||||
role = "assistant" if msg.peer_id == assistant_peer_id else "user"
|
||||
local_messages.append({
|
||||
"role": role,
|
||||
"content": msg.content,
|
||||
"timestamp": msg.created_at.isoformat() if msg.created_at else "",
|
||||
"_synced": True, # Already in Honcho
|
||||
})
|
||||
|
||||
# Create local session wrapper with existing messages
|
||||
session = HonchoSession(
|
||||
key=key,
|
||||
user_peer_id=user_peer_id,
|
||||
assistant_peer_id=assistant_peer_id,
|
||||
honcho_session_id=honcho_session_id,
|
||||
messages=local_messages,
|
||||
)
|
||||
|
||||
self._cache[key] = session
|
||||
return session
|
||||
|
||||
def save(self, session: HonchoSession) -> None:
|
||||
"""
|
||||
Save messages to Honcho.
|
||||
|
||||
Syncs only new (unsynced) messages from the local cache.
|
||||
"""
|
||||
if not session.messages:
|
||||
return
|
||||
|
||||
# Get the Honcho session and peers
|
||||
user_peer = self._get_or_create_peer(session.user_peer_id)
|
||||
assistant_peer = self._get_or_create_peer(session.assistant_peer_id)
|
||||
honcho_session = self._sessions_cache.get(session.honcho_session_id)
|
||||
|
||||
if not honcho_session:
|
||||
honcho_session, _ = self._get_or_create_honcho_session(
|
||||
session.honcho_session_id, user_peer, assistant_peer
|
||||
)
|
||||
|
||||
# Only send new messages (those without a '_synced' flag)
|
||||
new_messages = [m for m in session.messages if not m.get("_synced")]
|
||||
|
||||
if not new_messages:
|
||||
return
|
||||
|
||||
honcho_messages = []
|
||||
for msg in new_messages:
|
||||
peer = user_peer if msg["role"] == "user" else assistant_peer
|
||||
honcho_messages.append(peer.message(msg["content"]))
|
||||
|
||||
try:
|
||||
honcho_session.add_messages(honcho_messages)
|
||||
for msg in new_messages:
|
||||
msg["_synced"] = True
|
||||
logger.debug("Synced %d messages to Honcho for %s", len(honcho_messages), session.key)
|
||||
except Exception as e:
|
||||
for msg in new_messages:
|
||||
msg["_synced"] = False
|
||||
logger.error("Failed to sync messages to Honcho: %s", e)
|
||||
|
||||
# Update cache
|
||||
self._cache[session.key] = session
|
||||
|
||||
def delete(self, key: str) -> bool:
|
||||
"""Delete a session from local cache."""
|
||||
if key in self._cache:
|
||||
del self._cache[key]
|
||||
return True
|
||||
return False
|
||||
|
||||
def new_session(self, key: str) -> HonchoSession:
|
||||
"""
|
||||
Create a new session, preserving the old one for user modeling.
|
||||
|
||||
Creates a fresh session with a new ID while keeping the old
|
||||
session's data in Honcho for continued user modeling.
|
||||
"""
|
||||
import time
|
||||
|
||||
# Remove old session from caches (but don't delete from Honcho)
|
||||
old_session = self._cache.pop(key, None)
|
||||
if old_session:
|
||||
self._sessions_cache.pop(old_session.honcho_session_id, None)
|
||||
|
||||
# Create new session with timestamp suffix
|
||||
timestamp = int(time.time())
|
||||
new_key = f"{key}:{timestamp}"
|
||||
|
||||
# get_or_create will create a fresh session
|
||||
session = self.get_or_create(new_key)
|
||||
|
||||
# Cache under both original key and timestamped key
|
||||
self._cache[key] = session
|
||||
self._cache[new_key] = session
|
||||
|
||||
logger.info("Created new session for %s (honcho: %s)", key, session.honcho_session_id)
|
||||
return session
|
||||
|
||||
def get_user_context(self, session_key: str, query: str) -> str:
|
||||
"""
|
||||
Query Honcho's dialectic chat for user context.
|
||||
|
||||
Args:
|
||||
session_key: The session key to get context for.
|
||||
query: Natural language question about the user.
|
||||
|
||||
Returns:
|
||||
Honcho's response about the user.
|
||||
"""
|
||||
session = self._cache.get(session_key)
|
||||
if not session:
|
||||
return "No session found for this context."
|
||||
|
||||
user_peer = self._get_or_create_peer(session.user_peer_id)
|
||||
|
||||
try:
|
||||
return user_peer.chat(query)
|
||||
except Exception as e:
|
||||
logger.error("Failed to get user context from Honcho: %s", e)
|
||||
return f"Unable to retrieve user context: {e}"
|
||||
|
||||
def get_prefetch_context(self, session_key: str, user_message: str | None = None) -> dict[str, str]:
|
||||
"""
|
||||
Pre-fetch user context using Honcho's context() method.
|
||||
|
||||
Single API call that returns the user's representation
|
||||
and peer card, using semantic search based on the user's message.
|
||||
|
||||
Args:
|
||||
session_key: The session key to get context for.
|
||||
user_message: The user's message for semantic search.
|
||||
|
||||
Returns:
|
||||
Dictionary with 'representation' and 'card' keys.
|
||||
"""
|
||||
session = self._cache.get(session_key)
|
||||
if not session:
|
||||
return {}
|
||||
|
||||
honcho_session = self._sessions_cache.get(session.honcho_session_id)
|
||||
if not honcho_session:
|
||||
return {}
|
||||
|
||||
try:
|
||||
ctx = honcho_session.context(
|
||||
summary=False,
|
||||
tokens=self._context_tokens,
|
||||
peer_target=session.user_peer_id,
|
||||
search_query=user_message,
|
||||
)
|
||||
# peer_card is list[str] in SDK v2, join for prompt injection
|
||||
card = ctx.peer_card or []
|
||||
card_str = "\n".join(card) if isinstance(card, list) else str(card)
|
||||
return {
|
||||
"representation": ctx.peer_representation or "",
|
||||
"card": card_str,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.warning("Failed to fetch context from Honcho: %s", e)
|
||||
return {}
|
||||
|
||||
def migrate_local_history(self, session_key: str, messages: list[dict[str, Any]]) -> bool:
|
||||
"""
|
||||
Upload local session history to Honcho as a file.
|
||||
|
||||
Used when Honcho activates mid-conversation to preserve prior context.
|
||||
|
||||
Args:
|
||||
session_key: The session key (e.g., "telegram:123456").
|
||||
messages: Local messages (dicts with role, content, timestamp).
|
||||
|
||||
Returns:
|
||||
True if upload succeeded, False otherwise.
|
||||
"""
|
||||
sanitized = self._sanitize_id(session_key)
|
||||
honcho_session = self._sessions_cache.get(sanitized)
|
||||
if not honcho_session:
|
||||
logger.warning("No Honcho session cached for '%s', skipping migration", session_key)
|
||||
return False
|
||||
|
||||
# Resolve user peer for attribution
|
||||
parts = session_key.split(":", 1)
|
||||
channel = parts[0] if len(parts) > 1 else "default"
|
||||
chat_id = parts[1] if len(parts) > 1 else session_key
|
||||
user_peer_id = self._sanitize_id(f"user-{channel}-{chat_id}")
|
||||
user_peer = self._peers_cache.get(user_peer_id)
|
||||
if not user_peer:
|
||||
logger.warning("No user peer cached for '%s', skipping migration", user_peer_id)
|
||||
return False
|
||||
|
||||
content_bytes = self._format_migration_transcript(session_key, messages)
|
||||
first_ts = messages[0].get("timestamp") if messages else None
|
||||
|
||||
try:
|
||||
honcho_session.upload_file(
|
||||
file=("prior_history.txt", content_bytes, "text/plain"),
|
||||
peer=user_peer,
|
||||
metadata={"source": "local_jsonl", "count": len(messages)},
|
||||
created_at=first_ts,
|
||||
)
|
||||
logger.info("Migrated %d local messages to Honcho for %s", len(messages), session_key)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error("Failed to upload local history to Honcho for %s: %s", session_key, e)
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def _format_migration_transcript(session_key: str, messages: list[dict[str, Any]]) -> bytes:
|
||||
"""Format local messages as an XML transcript for Honcho file upload."""
|
||||
timestamps = [m.get("timestamp", "") for m in messages]
|
||||
time_range = f"{timestamps[0]} to {timestamps[-1]}" if timestamps else "unknown"
|
||||
|
||||
lines = [
|
||||
"<prior_conversation_history>",
|
||||
"<context>",
|
||||
"This conversation history occurred BEFORE the Honcho memory system was activated.",
|
||||
"These messages are the preceding elements of this conversation session and should",
|
||||
"be treated as foundational context for all subsequent interactions. The user and",
|
||||
"assistant have already established rapport through these exchanges.",
|
||||
"</context>",
|
||||
"",
|
||||
f'<transcript session_key="{session_key}" message_count="{len(messages)}"',
|
||||
f' time_range="{time_range}">',
|
||||
"",
|
||||
]
|
||||
for msg in messages:
|
||||
ts = msg.get("timestamp", "?")
|
||||
role = msg.get("role", "unknown")
|
||||
content = msg.get("content", "")
|
||||
lines.append(f"[{ts}] {role}: {content}")
|
||||
|
||||
lines.append("")
|
||||
lines.append("</transcript>")
|
||||
lines.append("</prior_conversation_history>")
|
||||
|
||||
return "\n".join(lines).encode("utf-8")
|
||||
|
||||
def migrate_memory_files(self, session_key: str, memory_dir: str) -> bool:
|
||||
"""
|
||||
Upload MEMORY.md and USER.md to Honcho as files.
|
||||
|
||||
Used when Honcho activates on an instance that already has locally
|
||||
consolidated memory. Backwards compatible -- skips if files don't exist.
|
||||
|
||||
Args:
|
||||
session_key: The session key to associate files with.
|
||||
memory_dir: Path to the memories directory (~/.hermes/memories/).
|
||||
|
||||
Returns:
|
||||
True if at least one file was uploaded, False otherwise.
|
||||
"""
|
||||
from pathlib import Path
|
||||
memory_path = Path(memory_dir)
|
||||
|
||||
if not memory_path.exists():
|
||||
return False
|
||||
|
||||
sanitized = self._sanitize_id(session_key)
|
||||
honcho_session = self._sessions_cache.get(sanitized)
|
||||
if not honcho_session:
|
||||
logger.warning("No Honcho session cached for '%s', skipping memory migration", session_key)
|
||||
return False
|
||||
|
||||
# Resolve user peer for attribution
|
||||
parts = session_key.split(":", 1)
|
||||
channel = parts[0] if len(parts) > 1 else "default"
|
||||
chat_id = parts[1] if len(parts) > 1 else session_key
|
||||
user_peer_id = self._sanitize_id(f"user-{channel}-{chat_id}")
|
||||
user_peer = self._peers_cache.get(user_peer_id)
|
||||
if not user_peer:
|
||||
logger.warning("No user peer cached for '%s', skipping memory migration", user_peer_id)
|
||||
return False
|
||||
|
||||
uploaded = False
|
||||
files = [
|
||||
("MEMORY.md", "consolidated_memory.md", "Long-term agent notes and preferences"),
|
||||
("USER.md", "user_profile.md", "User profile and preferences"),
|
||||
]
|
||||
|
||||
for filename, upload_name, description in files:
|
||||
filepath = memory_path / filename
|
||||
if not filepath.exists():
|
||||
continue
|
||||
content = filepath.read_text(encoding="utf-8").strip()
|
||||
if not content:
|
||||
continue
|
||||
|
||||
wrapped = (
|
||||
f"<prior_memory_file>\n"
|
||||
f"<context>\n"
|
||||
f"This file was consolidated from local conversations BEFORE Honcho was activated.\n"
|
||||
f"{description}. Treat as foundational context for this user.\n"
|
||||
f"</context>\n"
|
||||
f"\n"
|
||||
f"{content}\n"
|
||||
f"</prior_memory_file>\n"
|
||||
)
|
||||
|
||||
try:
|
||||
honcho_session.upload_file(
|
||||
file=(upload_name, wrapped.encode("utf-8"), "text/plain"),
|
||||
peer=user_peer,
|
||||
metadata={"source": "local_memory", "original_file": filename},
|
||||
)
|
||||
logger.info("Uploaded %s to Honcho for %s", filename, session_key)
|
||||
uploaded = True
|
||||
except Exception as e:
|
||||
logger.error("Failed to upload %s to Honcho: %s", filename, e)
|
||||
|
||||
return uploaded
|
||||
|
||||
def list_sessions(self) -> list[dict[str, Any]]:
|
||||
"""List all cached sessions."""
|
||||
return [
|
||||
{
|
||||
"key": s.key,
|
||||
"created_at": s.created_at.isoformat(),
|
||||
"updated_at": s.updated_at.isoformat(),
|
||||
"message_count": len(s.messages),
|
||||
}
|
||||
for s in self._cache.values()
|
||||
]
|
||||
@@ -93,6 +93,7 @@ def _discover_tools():
|
||||
"tools.delegate_tool",
|
||||
"tools.process_registry",
|
||||
"tools.send_message_tool",
|
||||
"tools.honcho_tools",
|
||||
]
|
||||
import importlib
|
||||
for mod_name in _modules:
|
||||
|
||||
@@ -46,6 +46,7 @@ slack = ["slack-bolt>=1.18.0", "slack-sdk>=3.27.0"]
|
||||
cli = ["simple-term-menu"]
|
||||
tts-premium = ["elevenlabs"]
|
||||
pty = ["ptyprocess>=0.7.0"]
|
||||
honcho = ["honcho-ai>=2.0.1"]
|
||||
all = [
|
||||
"hermes-agent[modal]",
|
||||
"hermes-agent[messaging]",
|
||||
@@ -55,6 +56,7 @@ all = [
|
||||
"hermes-agent[tts-premium]",
|
||||
"hermes-agent[slack]",
|
||||
"hermes-agent[pty]",
|
||||
"hermes-agent[honcho]",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
@@ -65,7 +67,7 @@ hermes-agent = "run_agent:main"
|
||||
py-modules = ["run_agent", "model_tools", "toolsets", "batch_runner", "trajectory_compressor", "toolset_distributions", "cli", "hermes_constants"]
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
include = ["tools", "hermes_cli", "gateway", "cron"]
|
||||
include = ["tools", "hermes_cli", "gateway", "cron", "honcho_integration"]
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
testpaths = ["tests"]
|
||||
|
||||
103
run_agent.py
103
run_agent.py
@@ -131,6 +131,7 @@ class AIAgent:
|
||||
skip_context_files: bool = False,
|
||||
skip_memory: bool = False,
|
||||
session_db=None,
|
||||
honcho_session_key: str = None,
|
||||
):
|
||||
"""
|
||||
Initialize the AI Agent.
|
||||
@@ -168,6 +169,8 @@ class AIAgent:
|
||||
skip_context_files (bool): If True, skip auto-injection of SOUL.md, AGENTS.md, and .cursorrules
|
||||
into the system prompt. Use this for batch processing and data generation to avoid
|
||||
polluting trajectories with user-specific persona or project instructions.
|
||||
honcho_session_key (str): Session key for Honcho integration (e.g., "telegram:123456" or CLI session_id).
|
||||
When provided and Honcho is enabled in config, enables persistent cross-session user modeling.
|
||||
"""
|
||||
self.model = model
|
||||
self.max_iterations = max_iterations
|
||||
@@ -418,6 +421,45 @@ class AIAgent:
|
||||
except Exception:
|
||||
pass # Memory is optional -- don't break agent init
|
||||
|
||||
# Honcho AI-native memory (cross-session user modeling)
|
||||
# Reads ~/.honcho/config.json as the single source of truth.
|
||||
self._honcho = None # HonchoSessionManager | None
|
||||
self._honcho_session_key = honcho_session_key
|
||||
if not skip_memory:
|
||||
try:
|
||||
from honcho_integration.client import HonchoClientConfig, get_honcho_client
|
||||
hcfg = HonchoClientConfig.from_global_config()
|
||||
if hcfg.enabled and hcfg.api_key:
|
||||
from honcho_integration.session import HonchoSessionManager
|
||||
client = get_honcho_client(hcfg)
|
||||
self._honcho = HonchoSessionManager(
|
||||
honcho=client,
|
||||
config=hcfg,
|
||||
)
|
||||
# Resolve session key: explicit arg > global sessions map > fallback
|
||||
if not self._honcho_session_key:
|
||||
self._honcho_session_key = (
|
||||
hcfg.resolve_session_name()
|
||||
or "hermes-default"
|
||||
)
|
||||
# Ensure session exists in Honcho
|
||||
self._honcho.get_or_create(self._honcho_session_key)
|
||||
# Inject session context into the honcho tool module
|
||||
from tools.honcho_tools import set_session_context
|
||||
set_session_context(self._honcho, self._honcho_session_key)
|
||||
logger.info(
|
||||
"Honcho active (session: %s, user: %s, workspace: %s)",
|
||||
self._honcho_session_key, hcfg.peer_name, hcfg.workspace_id,
|
||||
)
|
||||
else:
|
||||
if not hcfg.enabled:
|
||||
logger.debug("Honcho disabled in global config")
|
||||
elif not hcfg.api_key:
|
||||
logger.debug("Honcho enabled but no API key configured")
|
||||
except Exception as e:
|
||||
logger.debug("Honcho init failed (non-fatal): %s", e)
|
||||
self._honcho = None
|
||||
|
||||
# Skills config: nudge interval for skill creation reminders
|
||||
self._skill_nudge_interval = 15
|
||||
try:
|
||||
@@ -1056,7 +1098,46 @@ class AIAgent:
|
||||
def is_interrupted(self) -> bool:
|
||||
"""Check if an interrupt has been requested."""
|
||||
return self._interrupt_requested
|
||||
|
||||
|
||||
# ── Honcho integration helpers ──
|
||||
|
||||
def _honcho_prefetch(self, user_message: str) -> str:
|
||||
"""Fetch user context from Honcho for system prompt injection.
|
||||
|
||||
Returns a formatted context block, or empty string if unavailable.
|
||||
"""
|
||||
if not self._honcho or not self._honcho_session_key:
|
||||
return ""
|
||||
try:
|
||||
ctx = self._honcho.get_prefetch_context(self._honcho_session_key, user_message)
|
||||
if not ctx:
|
||||
return ""
|
||||
parts = []
|
||||
rep = ctx.get("representation", "")
|
||||
card = ctx.get("card", "")
|
||||
if rep:
|
||||
parts.append(rep)
|
||||
if card:
|
||||
parts.append(card)
|
||||
if not parts:
|
||||
return ""
|
||||
return "# Honcho User Context\n" + "\n\n".join(parts)
|
||||
except Exception as e:
|
||||
logger.debug("Honcho prefetch failed (non-fatal): %s", e)
|
||||
return ""
|
||||
|
||||
def _honcho_sync(self, user_content: str, assistant_content: str) -> None:
|
||||
"""Sync the user/assistant message pair to Honcho."""
|
||||
if not self._honcho or not self._honcho_session_key:
|
||||
return
|
||||
try:
|
||||
session = self._honcho.get_or_create(self._honcho_session_key)
|
||||
session.add_message("user", user_content)
|
||||
session.add_message("assistant", assistant_content)
|
||||
self._honcho.save(session)
|
||||
except Exception as e:
|
||||
logger.debug("Honcho sync failed (non-fatal): %s", e)
|
||||
|
||||
def _build_system_prompt(self, system_message: str = None) -> str:
|
||||
"""
|
||||
Assemble the full system prompt from all layers.
|
||||
@@ -1711,6 +1792,10 @@ class AIAgent:
|
||||
# Track user turns for memory flush and periodic nudge logic
|
||||
self._user_turn_count += 1
|
||||
|
||||
# Preserve the original user message before nudge injection.
|
||||
# Honcho should receive the actual user input, not system nudges.
|
||||
original_user_message = user_message
|
||||
|
||||
# Periodic memory nudge: remind the model to consider saving memories.
|
||||
# Counter resets whenever the memory tool is actually used.
|
||||
if (self._memory_nudge_interval > 0
|
||||
@@ -1735,6 +1820,14 @@ class AIAgent:
|
||||
)
|
||||
self._iters_since_skill = 0
|
||||
|
||||
# Honcho prefetch: retrieve user context for system prompt injection
|
||||
self._honcho_context = ""
|
||||
if self._honcho and self._honcho_session_key:
|
||||
try:
|
||||
self._honcho_context = self._honcho_prefetch(user_message)
|
||||
except Exception as e:
|
||||
logger.debug("Honcho prefetch failed (non-fatal): %s", e)
|
||||
|
||||
# Add user message
|
||||
user_msg = {"role": "user", "content": user_message}
|
||||
messages.append(user_msg)
|
||||
@@ -1813,6 +1906,8 @@ class AIAgent:
|
||||
effective_system = active_system_prompt or ""
|
||||
if self.ephemeral_system_prompt:
|
||||
effective_system = (effective_system + "\n\n" + self.ephemeral_system_prompt).strip()
|
||||
if self._honcho_context:
|
||||
effective_system = (effective_system + "\n\n" + self._honcho_context).strip()
|
||||
if effective_system:
|
||||
api_messages = [{"role": "system", "content": effective_system}] + api_messages
|
||||
|
||||
@@ -2471,7 +2566,11 @@ class AIAgent:
|
||||
|
||||
# Persist session to both JSON log and SQLite
|
||||
self._persist_session(messages, conversation_history)
|
||||
|
||||
|
||||
# Sync conversation to Honcho for user modeling
|
||||
if final_response and not interrupted:
|
||||
self._honcho_sync(original_user_message, final_response)
|
||||
|
||||
# Build result with interrupt info if applicable
|
||||
result = {
|
||||
"final_response": final_response,
|
||||
|
||||
102
tools/honcho_tools.py
Normal file
102
tools/honcho_tools.py
Normal file
@@ -0,0 +1,102 @@
|
||||
"""Honcho tool for querying user context via dialectic reasoning.
|
||||
|
||||
Registers ``query_user_context`` -- an LLM-callable tool that asks Honcho
|
||||
about the current user's history, preferences, goals, and communication
|
||||
style. The session key is injected at runtime by the agent loop via
|
||||
``set_session_context()``.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# ── Module-level state (injected by AIAgent at init time) ──
|
||||
|
||||
_session_manager = None # HonchoSessionManager instance
|
||||
_session_key: str | None = None # Current session key (e.g., "telegram:123456")
|
||||
|
||||
|
||||
def set_session_context(session_manager, session_key: str) -> None:
|
||||
"""Register the active Honcho session manager and key.
|
||||
|
||||
Called by AIAgent.__init__ when Honcho is enabled.
|
||||
"""
|
||||
global _session_manager, _session_key
|
||||
_session_manager = session_manager
|
||||
_session_key = session_key
|
||||
|
||||
|
||||
def clear_session_context() -> None:
|
||||
"""Clear session context (for testing or shutdown)."""
|
||||
global _session_manager, _session_key
|
||||
_session_manager = None
|
||||
_session_key = None
|
||||
|
||||
|
||||
# ── Tool schema ──
|
||||
|
||||
HONCHO_TOOL_SCHEMA = {
|
||||
"name": "query_user_context",
|
||||
"description": (
|
||||
"Query Honcho to retrieve relevant context about the user based on their "
|
||||
"history and preferences. Use this when you need to understand the user's "
|
||||
"background, preferences, past interactions, or goals. This helps you "
|
||||
"personalize your responses and provide more relevant assistance."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"A natural language question about the user. Examples: "
|
||||
"'What are this user's main goals?', "
|
||||
"'What communication style does this user prefer?', "
|
||||
"'What topics has this user discussed recently?', "
|
||||
"'What is this user's technical expertise level?'"
|
||||
),
|
||||
}
|
||||
},
|
||||
"required": ["query"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
# ── Tool handler ──
|
||||
|
||||
def _handle_query_user_context(args: dict, **kw) -> str:
|
||||
"""Execute the Honcho context query."""
|
||||
query = args.get("query", "")
|
||||
if not query:
|
||||
return json.dumps({"error": "Missing required parameter: query"})
|
||||
|
||||
if not _session_manager or not _session_key:
|
||||
return json.dumps({"error": "Honcho is not active for this session."})
|
||||
|
||||
try:
|
||||
result = _session_manager.get_user_context(_session_key, query)
|
||||
return json.dumps({"result": result})
|
||||
except Exception as e:
|
||||
logger.error("Error querying Honcho user context: %s", e)
|
||||
return json.dumps({"error": f"Failed to query user context: {e}"})
|
||||
|
||||
|
||||
# ── Availability check ──
|
||||
|
||||
def _check_honcho_available() -> bool:
|
||||
"""Tool is only available when Honcho is active."""
|
||||
return _session_manager is not None and _session_key is not None
|
||||
|
||||
|
||||
# ── Registration ──
|
||||
|
||||
from tools.registry import registry
|
||||
|
||||
registry.register(
|
||||
name="query_user_context",
|
||||
toolset="honcho",
|
||||
schema=HONCHO_TOOL_SCHEMA,
|
||||
handler=_handle_query_user_context,
|
||||
check_fn=_check_honcho_available,
|
||||
)
|
||||
@@ -60,6 +60,8 @@ _HERMES_CORE_TOOLS = [
|
||||
"schedule_cronjob", "list_cronjobs", "remove_cronjob",
|
||||
# Cross-platform messaging (gated on gateway running via check_fn)
|
||||
"send_message",
|
||||
# Honcho user context (gated on honcho being active via check_fn)
|
||||
"query_user_context",
|
||||
]
|
||||
|
||||
|
||||
@@ -185,6 +187,12 @@ TOOLSETS = {
|
||||
"tools": ["delegate_task"],
|
||||
"includes": []
|
||||
},
|
||||
|
||||
"honcho": {
|
||||
"description": "Honcho AI-native memory for persistent cross-session user modeling",
|
||||
"tools": ["query_user_context"],
|
||||
"includes": []
|
||||
},
|
||||
|
||||
|
||||
# Scenario-specific toolsets
|
||||
|
||||
Reference in New Issue
Block a user