Compare commits
3 Commits
whip/316-1
...
triage/296
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
959e933646 | ||
| 954fd992eb | |||
|
|
f35f56e397 |
@@ -138,6 +138,33 @@ class ContextCompressor:
|
||||
rough_estimate = estimate_messages_tokens_rough(messages)
|
||||
return rough_estimate >= self.threshold_tokens
|
||||
|
||||
# ── Poka-yoke: Hard context overflow safeguards (#296) ──────────────
|
||||
WARNING_THRESHOLD = 0.85 # 85% — auto-compression trigger
|
||||
CRITICAL_THRESHOLD = 0.95 # 95% — block tools, force compression
|
||||
|
||||
def get_context_usage_percent(self, prompt_tokens: int = None) -> float:
|
||||
"""Return context usage as a percentage of total context length (0-100)."""
|
||||
tokens = prompt_tokens if prompt_tokens is not None else self.last_prompt_tokens
|
||||
if self.context_length <= 0: return 0.0
|
||||
return min(100.0, (tokens / self.context_length) * 100)
|
||||
|
||||
def get_usage_level(self, prompt_tokens: int = None) -> str:
|
||||
"""Return the current context pressure level: 'normal', 'warning', 'critical'."""
|
||||
pct = self.get_context_usage_percent(prompt_tokens) / 100.0
|
||||
if pct >= self.CRITICAL_THRESHOLD: return "critical"
|
||||
elif pct >= self.WARNING_THRESHOLD: return "warning"
|
||||
return "normal"
|
||||
|
||||
def should_auto_compress(self, prompt_tokens: int = None) -> bool:
|
||||
"""Hard trigger at 85% — ignores configurable threshold_percent."""
|
||||
tokens = prompt_tokens if prompt_tokens is not None else self.last_prompt_tokens
|
||||
return tokens >= int(self.context_length * self.WARNING_THRESHOLD)
|
||||
|
||||
def should_block_tools(self, prompt_tokens: int = None) -> bool:
|
||||
"""Hard trigger at 95% — block tool calls until compression."""
|
||||
tokens = prompt_tokens if prompt_tokens is not None else self.last_prompt_tokens
|
||||
return tokens >= int(self.context_length * self.CRITICAL_THRESHOLD)
|
||||
|
||||
def get_status(self) -> Dict[str, Any]:
|
||||
"""Get current compression status for display/logging."""
|
||||
return {
|
||||
@@ -146,6 +173,9 @@ class ContextCompressor:
|
||||
"context_length": self.context_length,
|
||||
"usage_percent": min(100, (self.last_prompt_tokens / self.context_length * 100)) if self.context_length else 0,
|
||||
"compression_count": self.compression_count,
|
||||
"usage_level": self.get_usage_level(),
|
||||
"warning_threshold_tokens": int(self.context_length * self.WARNING_THRESHOLD),
|
||||
"critical_threshold_tokens": int(self.context_length * self.CRITICAL_THRESHOLD),
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
18
cli.py
18
cli.py
@@ -4658,6 +4658,8 @@ def _upload_0x0st(content: str) -> str | None:
|
||||
self._handle_reasoning_command(cmd_original)
|
||||
elif canonical == "compress":
|
||||
self._manual_compress()
|
||||
elif canonical == "context-status":
|
||||
self._show_context_status()
|
||||
elif canonical == "usage":
|
||||
self._show_usage()
|
||||
elif canonical == "insights":
|
||||
@@ -5474,6 +5476,22 @@ def _upload_0x0st(content: str) -> str | None:
|
||||
except Exception as e:
|
||||
print(f" ❌ Compression failed: {e}")
|
||||
|
||||
def _show_context_status(self):
|
||||
if not self.agent: print("(._.) No active agent."); return
|
||||
compressor = getattr(self.agent, "context_compressor", None)
|
||||
if not compressor: print("(._.) No compressor."); return
|
||||
from agent.model_metadata import estimate_messages_tokens_rough
|
||||
status = compressor.get_status()
|
||||
real = status.get("last_prompt_tokens", 0) or (estimate_messages_tokens_rough(self.conversation_history) if self.conversation_history else 0)
|
||||
ctx_len = status.get("context_length", 1)
|
||||
pct = real / ctx_len * 100 if ctx_len else 0
|
||||
lvl = status.get("usage_level", "normal")
|
||||
emoji = {"normal":"✅","warning":"⚠️","critical":"🔴"}.get(lvl,"❓")
|
||||
bar = "█"*int(40*pct/100) + "░"*(40-int(40*pct/100))
|
||||
print(f"\n📊 Context: {emoji} {lvl.upper()} | {real:,}/{ctx_len:,} ({pct:.1f}%) | {status.get('compression_count',0)} compressions")
|
||||
print(f" [{bar}] {pct:.1f}% | Remaining: {max(0,ctx_len-real):,} tokens")
|
||||
print(f" Thresholds: config={status.get('threshold_tokens',0):,} | warn(85%)={status.get('warning_threshold_tokens',0):,} | crit(95%)={status.get('critical_threshold_tokens',0):,}")
|
||||
|
||||
def _show_usage(self):
|
||||
"""Show cumulative token usage for the current session."""
|
||||
if not self.agent:
|
||||
|
||||
@@ -60,6 +60,8 @@ COMMAND_REGISTRY: list[CommandDef] = [
|
||||
CommandDef("branch", "Branch the current session (explore a different path)", "Session",
|
||||
aliases=("fork",), args_hint="[name]"),
|
||||
CommandDef("compress", "Manually compress conversation context", "Session"),
|
||||
CommandDef("context-status", "Show context usage, compression history, and remaining budget", "Session",
|
||||
aliases=("ctx", "context")),
|
||||
CommandDef("rollback", "List or restore filesystem checkpoints", "Session",
|
||||
args_hint="[number]"),
|
||||
CommandDef("stop", "Kill all running background processes", "Session"),
|
||||
|
||||
30
run_agent.py
30
run_agent.py
@@ -1001,30 +1001,10 @@ class AIAgent:
|
||||
self._session_db = session_db
|
||||
self._parent_session_id = parent_session_id
|
||||
self._last_flushed_db_idx = 0 # tracks DB-write cursor to prevent duplicate writes
|
||||
if self._session_db:
|
||||
try:
|
||||
self._session_db.create_session(
|
||||
session_id=self.session_id,
|
||||
source=self.platform or os.environ.get("HERMES_SESSION_SOURCE", "cli"),
|
||||
model=self.model,
|
||||
model_config={
|
||||
"max_iterations": self.max_iterations,
|
||||
"reasoning_config": reasoning_config,
|
||||
"max_tokens": max_tokens,
|
||||
},
|
||||
user_id=None,
|
||||
parent_session_id=self._parent_session_id,
|
||||
)
|
||||
except Exception as e:
|
||||
# Transient SQLite lock contention (e.g. CLI and gateway writing
|
||||
# concurrently) must NOT permanently disable session_search for
|
||||
# this agent. Keep _session_db alive — subsequent message
|
||||
# flushes and session_search calls will still work once the
|
||||
# lock clears. The session row may be missing from the index
|
||||
# for this run, but that is recoverable (flushes upsert rows).
|
||||
logger.warning(
|
||||
"Session DB create_session failed (session_search still available): %s", e
|
||||
)
|
||||
# Lazy session creation: defer until first message flush (#314).
|
||||
# _flush_messages_to_session_db() calls ensure_session() which uses
|
||||
# INSERT OR IGNORE — creating the row only when messages arrive.
|
||||
# This eliminates 32% of sessions that are created but never used.
|
||||
|
||||
# In-memory todo list for task planning (one per agent/session)
|
||||
from tools.todo_tool import TodoStore
|
||||
@@ -5951,7 +5931,7 @@ class AIAgent:
|
||||
if messages and messages[-1].get("_flush_sentinel") == _sentinel:
|
||||
messages.pop()
|
||||
|
||||
def _compress_context(self, messages: list, system_message: str, *, approx_tokens: int = None, task_id: str = "default") -> tuple:
|
||||
def _compress_context(self, messages: list, system_message: str, *, approx_tokens: int = None, task_id: str = "default", overflow_triggered: bool = False) -> tuple:
|
||||
"""Compress conversation context and split the session in SQLite.
|
||||
|
||||
Returns:
|
||||
|
||||
Reference in New Issue
Block a user