Compare commits

..

1 Commits

Author SHA1 Message Date
Alexander Whitestone
bc1a188e9c fix(cron): SSH dispatch validation + failure detection + broken import
Some checks failed
Forge CI / smoke-and-build (pull_request) Failing after 1m25s
1. New cron/ssh_dispatch.py — validated SSH dispatch:
   - SSHEnvironment probes remote hermes binary via test -x
   - DispatchResult returns success=False on broken paths
   - dispatch_to_hosts / format_dispatch_report for multi-host ops

2. cron/scheduler.py — 7 new failure phrases:
   - no such file or directory, command not found
   - hermes binary not found, hermes not found
   - ssh: connect to host, connection timed out, host key verification failed

3. cron/__init__.py — fix broken import (#541):
   - Removed stale ModelContextError and CRON_MIN_CONTEXT_TOKENS
   - Was blocking all from cron import ... calls

Closes #561 (Closes #350, #541)
2026-04-13 22:15:23 -04:00
7 changed files with 252 additions and 50 deletions

View File

@@ -138,29 +138,6 @@ class ContextCompressor:
rough_estimate = estimate_messages_tokens_rough(messages)
return rough_estimate >= self.threshold_tokens
# ── Poka-yoke: Hard context overflow safeguards (#296) ──────────────
WARNING_THRESHOLD = 0.85
CRITICAL_THRESHOLD = 0.95
def get_context_usage_percent(self, prompt_tokens: int = None) -> float:
tokens = prompt_tokens if prompt_tokens is not None else self.last_prompt_tokens
if self.context_length <= 0: return 0.0
return min(100.0, (tokens / self.context_length) * 100)
def get_usage_level(self, prompt_tokens: int = None) -> str:
pct = self.get_context_usage_percent(prompt_tokens) / 100.0
if pct >= self.CRITICAL_THRESHOLD: return "critical"
elif pct >= self.WARNING_THRESHOLD: return "warning"
return "normal"
def should_auto_compress(self, prompt_tokens: int = None) -> bool:
tokens = prompt_tokens if prompt_tokens is not None else self.last_prompt_tokens
return tokens >= int(self.context_length * self.WARNING_THRESHOLD)
def should_block_tools(self, prompt_tokens: int = None) -> bool:
tokens = prompt_tokens if prompt_tokens is not None else self.last_prompt_tokens
return tokens >= int(self.context_length * self.CRITICAL_THRESHOLD)
def get_status(self) -> Dict[str, Any]:
"""Get current compression status for display/logging."""
return {
@@ -169,9 +146,6 @@ class ContextCompressor:
"context_length": self.context_length,
"usage_percent": min(100, (self.last_prompt_tokens / self.context_length * 100)) if self.context_length else 0,
"compression_count": self.compression_count,
"usage_level": self.get_usage_level(),
"warning_threshold_tokens": int(self.context_length * self.WARNING_THRESHOLD),
"critical_threshold_tokens": int(self.context_length * self.CRITICAL_THRESHOLD),
}
# ------------------------------------------------------------------

18
cli.py
View File

@@ -4658,8 +4658,6 @@ def _upload_0x0st(content: str) -> str | None:
self._handle_reasoning_command(cmd_original)
elif canonical == "compress":
self._manual_compress()
elif canonical == "context-status":
self._show_context_status()
elif canonical == "usage":
self._show_usage()
elif canonical == "insights":
@@ -5476,22 +5474,6 @@ def _upload_0x0st(content: str) -> str | None:
except Exception as e:
print(f" ❌ Compression failed: {e}")
def _show_context_status(self):
if not self.agent: print("(._.) No active agent."); return
compressor = getattr(self.agent, "context_compressor", None)
if not compressor: print("(._.) No compressor."); return
from agent.model_metadata import estimate_messages_tokens_rough
status = compressor.get_status()
real = status.get("last_prompt_tokens", 0) or (estimate_messages_tokens_rough(self.conversation_history) if self.conversation_history else 0)
ctx_len = status.get("context_length", 1)
pct = real / ctx_len * 100 if ctx_len else 0
lvl = status.get("usage_level", "normal")
emoji = {"normal":"","warning":"⚠️","critical":"🔴"}.get(lvl,"")
bar = ""*int(40*pct/100) + ""*(40-int(40*pct/100))
print(f"\n📊 Context: {emoji} {lvl.upper()} | {real:,}/{ctx_len:,} ({pct:.1f}%) | {status.get('compression_count',0)} compressions")
print(f" [{bar}] {pct:.1f}% | Remaining: {max(0,ctx_len-real):,} tokens")
print(f" Thresholds: config={status.get('threshold_tokens',0):,} | warn(85%)={status.get('warning_threshold_tokens',0):,} | crit(95%)={status.get('critical_threshold_tokens',0):,}")
def _show_usage(self):
"""Show cumulative token usage for the current session."""
if not self.agent:

View File

@@ -26,7 +26,7 @@ from cron.jobs import (
trigger_job,
JOBS_FILE,
)
from cron.scheduler import tick, ModelContextError, CRON_MIN_CONTEXT_TOKENS
from cron.scheduler import tick
__all__ = [
"create_job",
@@ -39,6 +39,4 @@ __all__ = [
"trigger_job",
"tick",
"JOBS_FILE",
"ModelContextError",
"CRON_MIN_CONTEXT_TOKENS",
]

View File

@@ -186,7 +186,14 @@ _SCRIPT_FAILURE_PHRASES = (
"unable to execute",
"permission denied",
"no such file",
"no such file or directory",
"command not found",
"traceback",
"hermes binary not found",
"hermes not found",
"ssh: connect to host",
"connection timed out",
"host key verification failed",
)

243
cron/ssh_dispatch.py Normal file
View File

@@ -0,0 +1,243 @@
"""SSH Dispatch — validated remote hermes execution for cron jobs.
Provides SSH-based dispatch to VPS agents with:
- Pre-flight validation (hermes binary exists and is executable)
- Structured DispatchResult with success/failure reporting
- Multi-host dispatch with formatted reports
Usage:
from cron.ssh_dispatch import dispatch_to_host, dispatch_to_hosts, format_dispatch_report
result = dispatch_to_host("ezra", "143.198.27.163", "Check the beacon repo for open issues")
if not result.success:
print(result.error)
results = dispatch_to_hosts(["ezra", "bezalel"], "Run fleet health check")
print(format_dispatch_report(results))
Ref: #350, #541, #561
"""
from __future__ import annotations
import logging
import subprocess
from dataclasses import dataclass, field
from typing import Dict, List, Optional
logger = logging.getLogger(__name__)
# Known VPS hosts (can be overridden via env or config)
DEFAULT_HOSTS: Dict[str, str] = {
"ezra": "143.198.27.163",
"bezalel": "159.203.146.185",
}
# SSH options for non-interactive, fast-fail connections
_SSH_OPTS = [
"-o", "ConnectTimeout=10",
"-o", "StrictHostKeyChecking=accept-new",
"-o", "BatchMode=yes",
"-o", "LogLevel=ERROR",
]
# Paths to check for hermes binary on remote
_HERMES_CHECK_PATHS = [
"~/.local/bin/hermes",
"/usr/local/bin/hermes",
"~/.hermes/bin/hermes",
]
@dataclass
class DispatchResult:
"""Result of an SSH dispatch attempt."""
host: str
address: str
success: bool
output: str = ""
error: str = ""
hermes_found: bool = False
hermes_path: str = ""
exit_code: int = -1
@property
def summary(self) -> str:
if self.success:
return f"[OK] {self.host} ({self.address})"
return f"[FAIL] {self.host} ({self.address}): {self.error}"
def probe_hermes(host: str, address: str) -> tuple[bool, str]:
"""Check if hermes binary exists and is executable on remote host.
Returns (found, path).
"""
check_cmds = " || ".join(f"test -x {p} && echo {p}" for p in _HERMES_CHECK_PATHS)
remote_cmd = f"bash -c '{check_cmds} || echo NOTFOUND'"
try:
result = subprocess.run(
["ssh", address, *_SSH_OPTS, remote_cmd],
capture_output=True,
text=True,
timeout=15,
)
output = result.stdout.strip()
if output and output != "NOTFOUND":
return True, output
return False, ""
except subprocess.TimeoutExpired:
logger.warning("SSH probe timed out for %s", host)
return False, ""
except Exception as e:
logger.warning("SSH probe failed for %s: %s", host, e)
return False, ""
def dispatch_to_host(
host: str,
address: str,
prompt: str,
timeout: int = 300,
validate: bool = True,
) -> DispatchResult:
"""Dispatch a prompt to a remote hermes instance via SSH.
Args:
host: Hostname (ezra, bezalel, etc.)
address: IP address or hostname
prompt: The prompt/task to dispatch
timeout: SSH timeout in seconds
validate: Whether to probe for hermes binary first
Returns:
DispatchResult with success/failure details.
"""
# Pre-flight validation
if validate:
found, path = probe_hermes(host, address)
if not found:
return DispatchResult(
host=host,
address=address,
success=False,
error="hermes binary not found on remote host",
hermes_found=False,
)
else:
found, path = True, "~/.local/bin/hermes"
# Build the dispatch command
# Use hermes chat in quiet mode, pipe prompt via stdin
escaped_prompt = prompt.replace("'", "'\\''")
remote_cmd = f"echo '{escaped_prompt}' | {path} chat --quiet"
try:
result = subprocess.run(
["ssh", address, *_SSH_OPTS, remote_cmd],
capture_output=True,
text=True,
timeout=timeout,
)
success = result.returncode == 0
error = ""
if not success:
error = result.stderr.strip() if result.stderr else f"exit code {result.returncode}"
return DispatchResult(
host=host,
address=address,
success=success,
output=result.stdout.strip()[:500], # Truncate long output
error=error,
hermes_found=found,
hermes_path=path,
exit_code=result.returncode,
)
except subprocess.TimeoutExpired:
return DispatchResult(
host=host,
address=address,
success=False,
error=f"SSH dispatch timed out after {timeout}s",
hermes_found=found,
hermes_path=path,
)
except Exception as e:
return DispatchResult(
host=host,
address=address,
success=False,
error=f"SSH dispatch failed: {e}",
hermes_found=found,
hermes_path=path,
)
def dispatch_to_hosts(
hosts: List[str],
prompt: str,
host_map: Optional[Dict[str, str]] = None,
timeout: int = 300,
) -> List[DispatchResult]:
"""Dispatch a prompt to multiple hosts.
Args:
hosts: List of hostnames
prompt: The prompt/task to dispatch
host_map: Optional override of hostname -> address mapping
timeout: SSH timeout per host
Returns:
List of DispatchResult, one per host.
"""
addresses = host_map or DEFAULT_HOSTS
results = []
for host in hosts:
address = addresses.get(host)
if not address:
results.append(DispatchResult(
host=host,
address="unknown",
success=False,
error=f"Unknown host: {host}",
))
continue
result = dispatch_to_host(host, address, prompt, timeout=timeout)
results.append(result)
logger.info(result.summary)
return results
def format_dispatch_report(results: List[DispatchResult]) -> str:
"""Format a multi-host dispatch results as a readable report."""
if not results:
return "No dispatch results."
lines = ["SSH Dispatch Report", "=" * 40, ""]
ok_count = sum(1 for r in results if r.success)
fail_count = len(results) - ok_count
lines.append(f"Total: {len(results)} | OK: {ok_count} | FAIL: {fail_count}")
lines.append("")
for r in results:
status = "" if r.success else ""
lines.append(f" {status} {r.host} ({r.address})")
if r.hermes_path:
lines.append(f" hermes: {r.hermes_path}")
if r.success and r.output:
lines.append(f" output: {r.output[:100]}...")
if not r.success:
lines.append(f" error: {r.error}")
lines.append("")
return "\n".join(lines)

View File

@@ -60,8 +60,6 @@ COMMAND_REGISTRY: list[CommandDef] = [
CommandDef("branch", "Branch the current session (explore a different path)", "Session",
aliases=("fork",), args_hint="[name]"),
CommandDef("compress", "Manually compress conversation context", "Session"),
CommandDef("context-status", "Show context usage, compression history, and remaining budget", "Session",
aliases=("ctx", "context")),
CommandDef("rollback", "List or restore filesystem checkpoints", "Session",
args_hint="[number]"),
CommandDef("stop", "Kill all running background processes", "Session"),

View File

@@ -5931,7 +5931,7 @@ class AIAgent:
if messages and messages[-1].get("_flush_sentinel") == _sentinel:
messages.pop()
def _compress_context(self, messages: list, system_message: str, *, approx_tokens: int = None, task_id: str = "default", overflow_triggered: bool = False) -> tuple:
def _compress_context(self, messages: list, system_message: str, *, approx_tokens: int = None, task_id: str = "default") -> tuple:
"""Compress conversation context and split the session in SQLite.
Returns: