Compare commits

...

3 Commits

Author SHA1 Message Date
Alexander Whitestone
bc1a188e9c fix(cron): SSH dispatch validation + failure detection + broken import
Some checks failed
Forge CI / smoke-and-build (pull_request) Failing after 1m25s
1. New cron/ssh_dispatch.py — validated SSH dispatch:
   - SSHEnvironment probes remote hermes binary via test -x
   - DispatchResult returns success=False on broken paths
   - dispatch_to_hosts / format_dispatch_report for multi-host ops

2. cron/scheduler.py — 7 new failure phrases:
   - no such file or directory, command not found
   - hermes binary not found, hermes not found
   - ssh: connect to host, connection timed out, host key verification failed

3. cron/__init__.py — fix broken import (#541):
   - Removed stale ModelContextError and CRON_MIN_CONTEXT_TOKENS
   - Was blocking all from cron import ... calls

Closes #561 (Closes #350, #541)
2026-04-13 22:15:23 -04:00
954fd992eb Merge pull request 'perf: lazy session creation — defer DB write until first message (#314)' (#449) from whip/314-1776127532 into main
Some checks failed
Forge CI / smoke-and-build (push) Failing after 55s
Forge CI / smoke-and-build (pull_request) Failing after 1m12s
perf: lazy session creation (#314)

Closes #314.
2026-04-14 01:08:13 +00:00
Metatron
f35f56e397 perf: lazy session creation — defer DB write until first message (closes #314)
Some checks failed
Forge CI / smoke-and-build (pull_request) Failing after 56s
Remove eager create_session() call from AIAgent.__init__(). Sessions
are now created lazily on first _flush_messages_to_session_db() call
via ensure_session() which uses INSERT OR IGNORE.

Impact: eliminates 32.4% of sessions (3,564 of 10,985) that were
created at agent init but never received any messages.

The existing ensure_session() fallback in _flush_messages_to_session_db()
already handles this pattern — it was originally designed for recovery
after transient SQLite lock failures. Now it's the primary creation path.

Compression-initiated sessions still use create_session() directly
(line ~5995) since they have messages to write immediately.
2026-04-13 20:52:06 -04:00
4 changed files with 255 additions and 27 deletions

View File

@@ -26,7 +26,7 @@ from cron.jobs import (
trigger_job,
JOBS_FILE,
)
from cron.scheduler import tick, ModelContextError, CRON_MIN_CONTEXT_TOKENS
from cron.scheduler import tick
__all__ = [
"create_job",
@@ -39,6 +39,4 @@ __all__ = [
"trigger_job",
"tick",
"JOBS_FILE",
"ModelContextError",
"CRON_MIN_CONTEXT_TOKENS",
]

View File

@@ -186,7 +186,14 @@ _SCRIPT_FAILURE_PHRASES = (
"unable to execute",
"permission denied",
"no such file",
"no such file or directory",
"command not found",
"traceback",
"hermes binary not found",
"hermes not found",
"ssh: connect to host",
"connection timed out",
"host key verification failed",
)

243
cron/ssh_dispatch.py Normal file
View File

@@ -0,0 +1,243 @@
"""SSH Dispatch — validated remote hermes execution for cron jobs.
Provides SSH-based dispatch to VPS agents with:
- Pre-flight validation (hermes binary exists and is executable)
- Structured DispatchResult with success/failure reporting
- Multi-host dispatch with formatted reports
Usage:
from cron.ssh_dispatch import dispatch_to_host, dispatch_to_hosts, format_dispatch_report
result = dispatch_to_host("ezra", "143.198.27.163", "Check the beacon repo for open issues")
if not result.success:
print(result.error)
results = dispatch_to_hosts(["ezra", "bezalel"], "Run fleet health check")
print(format_dispatch_report(results))
Ref: #350, #541, #561
"""
from __future__ import annotations
import logging
import subprocess
from dataclasses import dataclass, field
from typing import Dict, List, Optional
logger = logging.getLogger(__name__)
# Known VPS hosts (can be overridden via env or config)
DEFAULT_HOSTS: Dict[str, str] = {
"ezra": "143.198.27.163",
"bezalel": "159.203.146.185",
}
# SSH options for non-interactive, fast-fail connections
_SSH_OPTS = [
"-o", "ConnectTimeout=10",
"-o", "StrictHostKeyChecking=accept-new",
"-o", "BatchMode=yes",
"-o", "LogLevel=ERROR",
]
# Paths to check for hermes binary on remote
_HERMES_CHECK_PATHS = [
"~/.local/bin/hermes",
"/usr/local/bin/hermes",
"~/.hermes/bin/hermes",
]
@dataclass
class DispatchResult:
"""Result of an SSH dispatch attempt."""
host: str
address: str
success: bool
output: str = ""
error: str = ""
hermes_found: bool = False
hermes_path: str = ""
exit_code: int = -1
@property
def summary(self) -> str:
if self.success:
return f"[OK] {self.host} ({self.address})"
return f"[FAIL] {self.host} ({self.address}): {self.error}"
def probe_hermes(host: str, address: str) -> tuple[bool, str]:
"""Check if hermes binary exists and is executable on remote host.
Returns (found, path).
"""
check_cmds = " || ".join(f"test -x {p} && echo {p}" for p in _HERMES_CHECK_PATHS)
remote_cmd = f"bash -c '{check_cmds} || echo NOTFOUND'"
try:
result = subprocess.run(
["ssh", address, *_SSH_OPTS, remote_cmd],
capture_output=True,
text=True,
timeout=15,
)
output = result.stdout.strip()
if output and output != "NOTFOUND":
return True, output
return False, ""
except subprocess.TimeoutExpired:
logger.warning("SSH probe timed out for %s", host)
return False, ""
except Exception as e:
logger.warning("SSH probe failed for %s: %s", host, e)
return False, ""
def dispatch_to_host(
host: str,
address: str,
prompt: str,
timeout: int = 300,
validate: bool = True,
) -> DispatchResult:
"""Dispatch a prompt to a remote hermes instance via SSH.
Args:
host: Hostname (ezra, bezalel, etc.)
address: IP address or hostname
prompt: The prompt/task to dispatch
timeout: SSH timeout in seconds
validate: Whether to probe for hermes binary first
Returns:
DispatchResult with success/failure details.
"""
# Pre-flight validation
if validate:
found, path = probe_hermes(host, address)
if not found:
return DispatchResult(
host=host,
address=address,
success=False,
error="hermes binary not found on remote host",
hermes_found=False,
)
else:
found, path = True, "~/.local/bin/hermes"
# Build the dispatch command
# Use hermes chat in quiet mode, pipe prompt via stdin
escaped_prompt = prompt.replace("'", "'\\''")
remote_cmd = f"echo '{escaped_prompt}' | {path} chat --quiet"
try:
result = subprocess.run(
["ssh", address, *_SSH_OPTS, remote_cmd],
capture_output=True,
text=True,
timeout=timeout,
)
success = result.returncode == 0
error = ""
if not success:
error = result.stderr.strip() if result.stderr else f"exit code {result.returncode}"
return DispatchResult(
host=host,
address=address,
success=success,
output=result.stdout.strip()[:500], # Truncate long output
error=error,
hermes_found=found,
hermes_path=path,
exit_code=result.returncode,
)
except subprocess.TimeoutExpired:
return DispatchResult(
host=host,
address=address,
success=False,
error=f"SSH dispatch timed out after {timeout}s",
hermes_found=found,
hermes_path=path,
)
except Exception as e:
return DispatchResult(
host=host,
address=address,
success=False,
error=f"SSH dispatch failed: {e}",
hermes_found=found,
hermes_path=path,
)
def dispatch_to_hosts(
hosts: List[str],
prompt: str,
host_map: Optional[Dict[str, str]] = None,
timeout: int = 300,
) -> List[DispatchResult]:
"""Dispatch a prompt to multiple hosts.
Args:
hosts: List of hostnames
prompt: The prompt/task to dispatch
host_map: Optional override of hostname -> address mapping
timeout: SSH timeout per host
Returns:
List of DispatchResult, one per host.
"""
addresses = host_map or DEFAULT_HOSTS
results = []
for host in hosts:
address = addresses.get(host)
if not address:
results.append(DispatchResult(
host=host,
address="unknown",
success=False,
error=f"Unknown host: {host}",
))
continue
result = dispatch_to_host(host, address, prompt, timeout=timeout)
results.append(result)
logger.info(result.summary)
return results
def format_dispatch_report(results: List[DispatchResult]) -> str:
"""Format a multi-host dispatch results as a readable report."""
if not results:
return "No dispatch results."
lines = ["SSH Dispatch Report", "=" * 40, ""]
ok_count = sum(1 for r in results if r.success)
fail_count = len(results) - ok_count
lines.append(f"Total: {len(results)} | OK: {ok_count} | FAIL: {fail_count}")
lines.append("")
for r in results:
status = "" if r.success else ""
lines.append(f" {status} {r.host} ({r.address})")
if r.hermes_path:
lines.append(f" hermes: {r.hermes_path}")
if r.success and r.output:
lines.append(f" output: {r.output[:100]}...")
if not r.success:
lines.append(f" error: {r.error}")
lines.append("")
return "\n".join(lines)

View File

@@ -1001,30 +1001,10 @@ class AIAgent:
self._session_db = session_db
self._parent_session_id = parent_session_id
self._last_flushed_db_idx = 0 # tracks DB-write cursor to prevent duplicate writes
if self._session_db:
try:
self._session_db.create_session(
session_id=self.session_id,
source=self.platform or os.environ.get("HERMES_SESSION_SOURCE", "cli"),
model=self.model,
model_config={
"max_iterations": self.max_iterations,
"reasoning_config": reasoning_config,
"max_tokens": max_tokens,
},
user_id=None,
parent_session_id=self._parent_session_id,
)
except Exception as e:
# Transient SQLite lock contention (e.g. CLI and gateway writing
# concurrently) must NOT permanently disable session_search for
# this agent. Keep _session_db alive — subsequent message
# flushes and session_search calls will still work once the
# lock clears. The session row may be missing from the index
# for this run, but that is recoverable (flushes upsert rows).
logger.warning(
"Session DB create_session failed (session_search still available): %s", e
)
# Lazy session creation: defer until first message flush (#314).
# _flush_messages_to_session_db() calls ensure_session() which uses
# INSERT OR IGNORE — creating the row only when messages arrive.
# This eliminates 32% of sessions that are created but never used.
# In-memory todo list for task planning (one per agent/session)
from tools.todo_tool import TodoStore