Compare commits

...

1 Commits

Author SHA1 Message Date
Timmy
fbc91dea82 fix(#296): poka-yoke — prevent silent context overflow with mandatory compression
Some checks failed
Forge CI / smoke-and-build (pull_request) Failing after 1m5s
## Problem
When context approaches model limit, no guaranteed fail-safe. Agent could
ignore compression warnings, causing silent data loss when context overflows.

## Solution (Poka-yoke: Mitigation)
1. Hard token budget check at 85% of context length — auto-compression
2. At 95%, BLOCK tool calls and force compression before continuing
3. Add /context-status slash command (aliases: /ctx, /context)
4. Log context overflow events separately from normal compression

## Changes

### agent/context_compressor.py
- WARNING_THRESHOLD (0.85), CRITICAL_THRESHOLD (0.95)
- should_auto_compress(), should_block_tools(), get_usage_level()
- Extended get_status() with usage_level and thresholds

### run_agent.py
- 85%/95% checks before compression in main loop
- overflow_triggered param on _compress_context()
- Separate CONTEXT WARNING/CRITICAL logging

### hermes_cli/commands.py
- CommandDef("context-status", ...) with aliases (ctx, context)

### cli.py
- _show_context_status() with progress bar and threshold display

Refs #296
2026-04-13 21:38:13 -04:00
4 changed files with 165 additions and 6 deletions

View File

@@ -138,6 +138,48 @@ class ContextCompressor:
rough_estimate = estimate_messages_tokens_rough(messages)
return rough_estimate >= self.threshold_tokens
# ── Poka-yoke: Hard context overflow safeguards (#296) ──────────────
WARNING_THRESHOLD = 0.85 # 85% — auto-compression trigger
CRITICAL_THRESHOLD = 0.95 # 95% — block tools, force compression
def get_context_usage_percent(self, prompt_tokens: int = None) -> float:
"""Return context usage as a percentage of total context length (0-100)."""
tokens = prompt_tokens if prompt_tokens is not None else self.last_prompt_tokens
if self.context_length <= 0:
return 0.0
return min(100.0, (tokens / self.context_length) * 100)
def get_usage_level(self, prompt_tokens: int = None) -> str:
"""Return the current context pressure level: 'normal', 'warning', 'critical'."""
pct = self.get_context_usage_percent(prompt_tokens) / 100.0
if pct >= self.CRITICAL_THRESHOLD:
return "critical"
elif pct >= self.WARNING_THRESHOLD:
return "warning"
return "normal"
def should_auto_compress(self, prompt_tokens: int = None) -> bool:
"""Check if context exceeds the WARNING threshold (85% of context length).
This is a HARD trigger — unlike should_compress() which uses the
configurable threshold_percent, this always fires at 85% regardless
of configuration. Poka-yoke: don't trust voluntary behavior.
"""
tokens = prompt_tokens if prompt_tokens is not None else self.last_prompt_tokens
warning_tokens = int(self.context_length * self.WARNING_THRESHOLD)
return tokens >= warning_tokens
def should_block_tools(self, prompt_tokens: int = None) -> bool:
"""Check if context exceeds the CRITICAL threshold (95% of context length).
When True, the agent MUST NOT make further tool calls until compression
completes. Poka-yoke: enforce the constraint mechanically.
"""
tokens = prompt_tokens if prompt_tokens is not None else self.last_prompt_tokens
critical_tokens = int(self.context_length * self.CRITICAL_THRESHOLD)
return tokens >= critical_tokens
def get_status(self) -> Dict[str, Any]:
"""Get current compression status for display/logging."""
return {
@@ -146,6 +188,9 @@ class ContextCompressor:
"context_length": self.context_length,
"usage_percent": min(100, (self.last_prompt_tokens / self.context_length * 100)) if self.context_length else 0,
"compression_count": self.compression_count,
"usage_level": self.get_usage_level(),
"warning_threshold_tokens": int(self.context_length * self.WARNING_THRESHOLD),
"critical_threshold_tokens": int(self.context_length * self.CRITICAL_THRESHOLD),
}
# ------------------------------------------------------------------

46
cli.py
View File

@@ -4658,6 +4658,8 @@ def _upload_0x0st(content: str) -> str | None:
self._handle_reasoning_command(cmd_original)
elif canonical == "compress":
self._manual_compress()
elif canonical == "context-status":
self._show_context_status()
elif canonical == "usage":
self._show_usage()
elif canonical == "insights":
@@ -5474,6 +5476,50 @@ def _upload_0x0st(content: str) -> str | None:
except Exception as e:
print(f" ❌ Compression failed: {e}")
def _show_context_status(self):
"""Show context usage, compression history, and remaining budget."""
if not self.agent:
print("(._.) No active agent -- send a message first.")
return
compressor = getattr(self.agent, "context_compressor", None)
if not compressor:
print("(._.) Context compressor not initialized.")
return
status = compressor.get_status()
# Calculate real token usage
from agent.model_metadata import estimate_messages_tokens_rough
real_tokens = status.get("last_prompt_tokens", 0)
if not real_tokens and self.conversation_history:
real_tokens = estimate_messages_tokens_rough(self.conversation_history)
context_length = status.get("context_length", 0)
usage_percent = (real_tokens / context_length * 100) if context_length > 0 else 0
usage_level = status.get("usage_level", "normal")
# Format usage level with emoji
level_emoji = {"normal": "", "warning": "⚠️", "critical": "🔴"}.get(usage_level, "")
print(f"\n📊 Context Status")
print(f" {level_emoji} Status: {usage_level.upper()}")
print(f" Usage: {real_tokens:,} / {context_length:,} tokens ({usage_percent:.1f}%)")
print(f" Compression threshold: {status.get('threshold_tokens', 0):,} tokens ({status.get('threshold_tokens', 0) / context_length * 100:.0f}%)" if context_length else "")
print(f" Warning threshold (85%): {status.get('warning_threshold_tokens', 0):,} tokens")
print(f" Critical threshold (95%): {status.get('critical_threshold_tokens', 0):,} tokens")
print(f" Compressions: {status.get('compression_count', 0)}")
# Progress bar
bar_length = 40
filled = int(bar_length * min(usage_percent, 100) / 100)
bar = "" * filled + "" * (bar_length - filled)
print(f" [{bar}] {usage_percent:.1f}%")
# Remaining budget
remaining = max(0, context_length - real_tokens)
print(f" Remaining: {remaining:,} tokens")
def _show_usage(self):
"""Show cumulative token usage for the current session."""
if not self.agent:

View File

@@ -60,6 +60,8 @@ COMMAND_REGISTRY: list[CommandDef] = [
CommandDef("branch", "Branch the current session (explore a different path)", "Session",
aliases=("fork",), args_hint="[name]"),
CommandDef("compress", "Manually compress conversation context", "Session"),
CommandDef("context-status", "Show context usage, compression history, and remaining budget", "Session",
aliases=("ctx", "context")),
CommandDef("rollback", "List or restore filesystem checkpoints", "Session",
args_hint="[number]"),
CommandDef("stop", "Kill all running background processes", "Session"),

View File

@@ -5931,18 +5931,28 @@ class AIAgent:
if messages and messages[-1].get("_flush_sentinel") == _sentinel:
messages.pop()
def _compress_context(self, messages: list, system_message: str, *, approx_tokens: int = None, task_id: str = "default") -> tuple:
def _compress_context(self, messages: list, system_message: str, *, approx_tokens: int = None, task_id: str = "default", overflow_triggered: bool = False) -> tuple:
"""Compress conversation context and split the session in SQLite.
Returns:
(compressed_messages, new_system_prompt) tuple
"""
_pre_msg_count = len(messages)
logger.info(
"context compression started: session=%s messages=%d tokens=~%s model=%s",
self.session_id or "none", _pre_msg_count,
f"{approx_tokens:,}" if approx_tokens else "unknown", self.model,
)
# Log overflow events separately (#296)
if overflow_triggered:
logger.warning(
"CONTEXT OVERFLOW COMPRESSION: session=%s messages=%d tokens=~%s model=%s "
"reason=overflow_triggered",
self.session_id or "none", _pre_msg_count,
f"{approx_tokens:,}" if approx_tokens else "unknown", self.model,
)
else:
logger.info(
"context compression started: session=%s messages=%d tokens=~%s model=%s",
self.session_id or "none", _pre_msg_count,
f"{approx_tokens:,}" if approx_tokens else "unknown", self.model,
)
# Pre-compression memory flush: let the model save memories before they're lost
self.flush_memories(messages, min_turns=0)
@@ -9001,6 +9011,62 @@ class AIAgent:
self._context_pressure_warned = True
self._emit_context_pressure(_compaction_progress, _compressor)
# ── Poka-yoke: Hard context overflow safeguards (#296) ──────────
_usage_level = _compressor.get_usage_level(_real_tokens)
# Log context overflow events separately
if _usage_level == "critical" and not getattr(self, '_context_critical_logged', False):
self._context_critical_logged = True
logger.warning(
"CONTEXT CRITICAL: %.1f%% of context used (%d/%d tokens). "
"Tool calls BLOCKED until compression completes.",
_compressor.get_context_usage_percent(_real_tokens),
_real_tokens, _compressor.context_length,
)
elif _usage_level == "warning" and not getattr(self, '_context_warning_logged', False):
self._context_warning_logged = True
logger.warning(
"CONTEXT WARNING: %.1f%% of context used (%d/%d tokens). "
"Auto-compression triggered.",
_compressor.get_context_usage_percent(_real_tokens),
_real_tokens, _compressor.context_length,
)
# Reset log flags when context drops below warning
if _usage_level == "normal":
self._context_critical_logged = False
self._context_warning_logged = False
# CRITICAL (95%): Block tool calls — force compression
if self.compression_enabled and _compressor.should_block_tools(_real_tokens):
logger.warning(
"CONTEXT CRITICAL: Blocking tool calls and forcing compression. "
"%.1f%% of context used.",
_compressor.get_context_usage_percent(_real_tokens),
)
messages, active_system_prompt = self._compress_context(
messages, system_message,
approx_tokens=_real_tokens,
task_id=effective_task_id,
)
conversation_history = None
self._session_messages = messages
self._save_session_log(messages)
continue
# WARNING (85%): Auto-compression trigger (hard-coded threshold)
if self.compression_enabled and _compressor.should_auto_compress(_real_tokens):
messages, active_system_prompt = self._compress_context(
messages, system_message,
approx_tokens=_real_tokens,
task_id=effective_task_id,
)
conversation_history = None
self._session_messages = messages
self._save_session_log(messages)
continue
# Standard compression (configurable threshold_percent)
if self.compression_enabled and _compressor.should_compress(_real_tokens):
messages, active_system_prompt = self._compress_context(
messages, system_message,