fix(honcho): plugin drift overhaul -- observation config, chunking, setup wizard, docs, dead code cleanup

Salvaged from PR #5045 by erosika.

- Replace memoryMode/peer_memory_modes with granular per-peer observation config
- Add message chunking for Honcho API limits (25k chars default)
- Add dialectic input guard (10k chars default)
- Add dialecticDynamic toggle for reasoning level auto-bump
- Rewrite setup wizard with cloud/local deployment picker
- Switch peer card/profile/search from session.context() to direct peer APIs
- Add server-side observation sync via get_peer_configuration()
- Fix base_url/baseUrl config mismatch for self-hosted setups
- Fix local auth leak (cloud API keys no longer sent to local instances)
- Remove dead code: memoryMode, peer_memory_modes, linkedHosts, suppress flags, SOUL.md aiPeer sync
- Add post_setup hook to memory_setup.py for provider-specific setup wizards
- Comprehensive README rewrite with full config reference
- New optional skill: autonomous-ai-agents/honcho
- Expanded memory-providers.md with multi-profile docs
- 9 new tests (chunking, dialectic guard, peer lookups), 14 dead tests removed
- Fix 2 pre-existing TestResolveConfigPath filesystem isolation failures
This commit is contained in:
erosika
2026-04-05 12:03:15 -07:00
committed by Teknium
parent 12724e6295
commit c02c3dc723
12 changed files with 1265 additions and 443 deletions

View File

@@ -86,7 +86,7 @@ class HonchoSessionManager:
honcho: Optional Honcho client. If not provided, uses the singleton.
context_tokens: Max tokens for context() calls (None = Honcho default).
config: HonchoClientConfig from global config (provides peer_name, ai_peer,
write_frequency, memory_mode, etc.).
write_frequency, observation, etc.).
"""
self._honcho = honcho
self._context_tokens = context_tokens
@@ -107,11 +107,25 @@ class HonchoSessionManager:
self._dialectic_reasoning_level: str = (
config.dialectic_reasoning_level if config else "low"
)
self._dialectic_dynamic: bool = (
config.dialectic_dynamic if config else True
)
self._dialectic_max_chars: int = (
config.dialectic_max_chars if config else 600
)
self._observation_mode: str = (
config.observation_mode if config else "unified"
config.observation_mode if config else "directional"
)
# Per-peer observation booleans (granular, from config)
self._user_observe_me: bool = config.user_observe_me if config else True
self._user_observe_others: bool = config.user_observe_others if config else True
self._ai_observe_me: bool = config.ai_observe_me if config else True
self._ai_observe_others: bool = config.ai_observe_others if config else True
self._message_max_chars: int = (
config.message_max_chars if config else 25000
)
self._dialectic_max_input_chars: int = (
config.dialectic_max_input_chars if config else 10000
)
# Async write queue — started lazily on first enqueue
@@ -162,20 +176,43 @@ class HonchoSessionManager:
session = self.honcho.session(session_id)
# Configure peer observation settings based on observation_mode.
# Unified: user peer observes self, AI peer passive — all agents share
# one observation pool via user self-observations.
# Directional: AI peer observes user — each agent keeps its own view.
# Configure per-peer observation from granular booleans.
# These map 1:1 to Honcho's SessionPeerConfig toggles.
try:
from honcho.session import SessionPeerConfig
if self._observation_mode == "directional":
user_config = SessionPeerConfig(observe_me=True, observe_others=False)
ai_config = SessionPeerConfig(observe_me=False, observe_others=True)
else: # unified (default)
user_config = SessionPeerConfig(observe_me=True, observe_others=False)
ai_config = SessionPeerConfig(observe_me=False, observe_others=False)
user_config = SessionPeerConfig(
observe_me=self._user_observe_me,
observe_others=self._user_observe_others,
)
ai_config = SessionPeerConfig(
observe_me=self._ai_observe_me,
observe_others=self._ai_observe_others,
)
session.add_peers([(user_peer, user_config), (assistant_peer, ai_config)])
# Sync back: server-side config (set via Honcho UI) wins over
# local defaults. Read the effective config after add_peers.
# Note: observation booleans are manager-scoped, not per-session.
# Last session init wins. Fine for CLI; gateway should scope per-session.
try:
server_user = session.get_peer_configuration(user_peer)
server_ai = session.get_peer_configuration(assistant_peer)
if server_user.observe_me is not None:
self._user_observe_me = server_user.observe_me
if server_user.observe_others is not None:
self._user_observe_others = server_user.observe_others
if server_ai.observe_me is not None:
self._ai_observe_me = server_ai.observe_me
if server_ai.observe_others is not None:
self._ai_observe_others = server_ai.observe_others
logger.debug(
"Honcho observation synced from server: user(me=%s,others=%s) ai(me=%s,others=%s)",
self._user_observe_me, self._user_observe_others,
self._ai_observe_me, self._ai_observe_others,
)
except Exception as e:
logger.debug("Honcho get_peer_configuration failed (using local config): %s", e)
except Exception as e:
logger.warning(
"Honcho session '%s' add_peers failed (non-fatal): %s",
@@ -451,17 +488,22 @@ class HonchoSessionManager:
def _dynamic_reasoning_level(self, query: str) -> str:
"""
Pick a reasoning level based on message complexity.
Pick a reasoning level for a dialectic query.
Uses the configured default as a floor; bumps up for longer or
more complex messages so Honcho applies more inference where it matters.
When dialecticDynamic is true (default), auto-bumps based on query
length so Honcho applies more inference where it matters:
< 120 chars default (typically "low")
120400 chars → one level above default (cap at "high")
> 400 chars → two levels above default (cap at "high")
< 120 chars -> configured default (typically "low")
120-400 chars -> +1 level above default (cap at "high")
> 400 chars -> +2 levels above default (cap at "high")
"max" is never selected automatically reserve it for explicit config.
"max" is never selected automatically -- reserve it for explicit config.
When dialecticDynamic is false, always returns the configured level.
"""
if not self._dialectic_dynamic:
return self._dialectic_reasoning_level
levels = self._REASONING_LEVELS
default_idx = levels.index(self._dialectic_reasoning_level) if self._dialectic_reasoning_level in levels else 1
n = len(query)
@@ -501,11 +543,15 @@ class HonchoSessionManager:
if not session:
return ""
# Guard: truncate query to Honcho's dialectic input limit
if len(query) > self._dialectic_max_input_chars:
query = query[:self._dialectic_max_input_chars].rsplit(" ", 1)[0]
level = reasoning_level or self._dynamic_reasoning_level(query)
try:
if self._observation_mode == "directional":
# AI peer queries about the user (cross-observation)
if self._ai_observe_others:
# AI peer can observe user — use cross-observation routing
if peer == "ai":
ai_peer_obj = self._get_or_create_peer(session.assistant_peer_id)
result = ai_peer_obj.chat(query, reasoning_level=level) or ""
@@ -517,7 +563,7 @@ class HonchoSessionManager:
reasoning_level=level,
) or ""
else:
# Unified: user peer queries self, or AI peer queries self
# AI can't observe others — each peer queries self
peer_id = session.assistant_peer_id if peer == "ai" else session.user_peer_id
target_peer = self._get_or_create_peer(peer_id)
result = target_peer.chat(query, reasoning_level=level) or ""
@@ -618,35 +664,19 @@ class HonchoSessionManager:
if not session:
return {}
honcho_session = self._sessions_cache.get(session.honcho_session_id)
if not honcho_session:
return {}
result: dict[str, str] = {}
try:
ctx = honcho_session.context(
summary=False,
tokens=self._context_tokens,
peer_target=session.user_peer_id,
peer_perspective=session.assistant_peer_id,
)
card = ctx.peer_card or []
result["representation"] = ctx.peer_representation or ""
result["card"] = "\n".join(card) if isinstance(card, list) else str(card)
user_ctx = self._fetch_peer_context(session.user_peer_id)
result["representation"] = user_ctx["representation"]
result["card"] = "\n".join(user_ctx["card"])
except Exception as e:
logger.warning("Failed to fetch user context from Honcho: %s", e)
# Also fetch AI peer's own representation so Hermes knows itself.
try:
ai_ctx = honcho_session.context(
summary=False,
tokens=self._context_tokens,
peer_target=session.assistant_peer_id,
peer_perspective=session.user_peer_id,
)
ai_card = ai_ctx.peer_card or []
result["ai_representation"] = ai_ctx.peer_representation or ""
result["ai_card"] = "\n".join(ai_card) if isinstance(ai_card, list) else str(ai_card)
ai_ctx = self._fetch_peer_context(session.assistant_peer_id)
result["ai_representation"] = ai_ctx["representation"]
result["ai_card"] = "\n".join(ai_ctx["card"])
except Exception as e:
logger.debug("Failed to fetch AI peer context from Honcho: %s", e)
@@ -823,6 +853,64 @@ class HonchoSessionManager:
return uploaded
@staticmethod
def _normalize_card(card: Any) -> list[str]:
"""Normalize Honcho card payloads into a plain list of strings."""
if not card:
return []
if isinstance(card, list):
return [str(item) for item in card if item]
return [str(card)]
def _fetch_peer_card(self, peer_id: str) -> list[str]:
"""Fetch a peer card directly from the peer object.
This avoids relying on session.context(), which can return an empty
peer_card for per-session messaging sessions even when the peer itself
has a populated card.
"""
peer = self._get_or_create_peer(peer_id)
getter = getattr(peer, "get_card", None)
if callable(getter):
return self._normalize_card(getter())
legacy_getter = getattr(peer, "card", None)
if callable(legacy_getter):
return self._normalize_card(legacy_getter())
return []
def _fetch_peer_context(self, peer_id: str, search_query: str | None = None) -> dict[str, Any]:
"""Fetch representation + peer card directly from a peer object."""
peer = self._get_or_create_peer(peer_id)
representation = ""
card: list[str] = []
try:
ctx = peer.context(search_query=search_query) if search_query else peer.context()
representation = (
getattr(ctx, "representation", None)
or getattr(ctx, "peer_representation", None)
or ""
)
card = self._normalize_card(getattr(ctx, "peer_card", None))
except Exception as e:
logger.debug("Direct peer.context() failed for '%s': %s", peer_id, e)
if not representation:
try:
representation = peer.representation() or ""
except Exception as e:
logger.debug("Direct peer.representation() failed for '%s': %s", peer_id, e)
if not card:
try:
card = self._fetch_peer_card(peer_id)
except Exception as e:
logger.debug("Direct peer card fetch failed for '%s': %s", peer_id, e)
return {"representation": representation, "card": card}
def get_peer_card(self, session_key: str) -> list[str]:
"""
Fetch the user peer's card — a curated list of key facts.
@@ -835,19 +923,8 @@ class HonchoSessionManager:
if not session:
return []
honcho_session = self._sessions_cache.get(session.honcho_session_id)
if not honcho_session:
return []
try:
ctx = honcho_session.context(
summary=False,
tokens=200,
peer_target=session.user_peer_id,
peer_perspective=session.assistant_peer_id,
)
card = ctx.peer_card or []
return card if isinstance(card, list) else [str(card)]
return self._fetch_peer_card(session.user_peer_id)
except Exception as e:
logger.debug("Failed to fetch peer card from Honcho: %s", e)
return []
@@ -872,25 +949,14 @@ class HonchoSessionManager:
if not session:
return ""
honcho_session = self._sessions_cache.get(session.honcho_session_id)
if not honcho_session:
return ""
try:
ctx = honcho_session.context(
summary=False,
tokens=max_tokens,
peer_target=session.user_peer_id,
peer_perspective=session.assistant_peer_id,
search_query=query,
)
ctx = self._fetch_peer_context(session.user_peer_id, search_query=query)
parts = []
if ctx.peer_representation:
parts.append(ctx.peer_representation)
card = ctx.peer_card or []
if ctx["representation"]:
parts.append(ctx["representation"])
card = ctx["card"] or []
if card:
facts = card if isinstance(card, list) else [str(card)]
parts.append("\n".join(f"- {f}" for f in facts))
parts.append("\n".join(f"- {f}" for f in card))
return "\n\n".join(parts)
except Exception as e:
logger.debug("Honcho search_context failed: %s", e)
@@ -919,12 +985,12 @@ class HonchoSessionManager:
return False
try:
if self._observation_mode == "directional":
if self._ai_observe_others:
# AI peer creates conclusion about user (cross-observation)
assistant_peer = self._get_or_create_peer(session.assistant_peer_id)
conclusions_scope = assistant_peer.conclusions_of(session.user_peer_id)
else:
# Unified: user peer creates self-conclusion
# AI can't observe others — user peer creates self-conclusion
user_peer = self._get_or_create_peer(session.user_peer_id)
conclusions_scope = user_peer.conclusions_of(session.user_peer_id)
@@ -994,21 +1060,11 @@ class HonchoSessionManager:
if not session:
return {"representation": "", "card": ""}
honcho_session = self._sessions_cache.get(session.honcho_session_id)
if not honcho_session:
return {"representation": "", "card": ""}
try:
ctx = honcho_session.context(
summary=False,
tokens=self._context_tokens,
peer_target=session.assistant_peer_id,
peer_perspective=session.user_peer_id,
)
ai_card = ctx.peer_card or []
ctx = self._fetch_peer_context(session.assistant_peer_id)
return {
"representation": ctx.peer_representation or "",
"card": "\n".join(ai_card) if isinstance(ai_card, list) else str(ai_card),
"representation": ctx["representation"] or "",
"card": "\n".join(ctx["card"]),
}
except Exception as e:
logger.debug("Failed to fetch AI representation: %s", e)