Compare commits

..

1 Commits

Author SHA1 Message Date
Alexander Whitestone
bc1a188e9c fix(cron): SSH dispatch validation + failure detection + broken import
Some checks failed
Forge CI / smoke-and-build (pull_request) Failing after 1m25s
1. New cron/ssh_dispatch.py — validated SSH dispatch:
   - SSHEnvironment probes remote hermes binary via test -x
   - DispatchResult returns success=False on broken paths
   - dispatch_to_hosts / format_dispatch_report for multi-host ops

2. cron/scheduler.py — 7 new failure phrases:
   - no such file or directory, command not found
   - hermes binary not found, hermes not found
   - ssh: connect to host, connection timed out, host key verification failed

3. cron/__init__.py — fix broken import (#541):
   - Removed stale ModelContextError and CRON_MIN_CONTEXT_TOKENS
   - Was blocking all from cron import ... calls

Closes #561 (Closes #350, #541)
2026-04-13 22:15:23 -04:00
5 changed files with 251 additions and 693 deletions

View File

@@ -26,7 +26,7 @@ from cron.jobs import (
trigger_job,
JOBS_FILE,
)
from cron.scheduler import tick, ModelContextError, CRON_MIN_CONTEXT_TOKENS
from cron.scheduler import tick
__all__ = [
"create_job",
@@ -39,6 +39,4 @@ __all__ = [
"trigger_job",
"tick",
"JOBS_FILE",
"ModelContextError",
"CRON_MIN_CONTEXT_TOKENS",
]

View File

@@ -186,7 +186,14 @@ _SCRIPT_FAILURE_PHRASES = (
"unable to execute",
"permission denied",
"no such file",
"no such file or directory",
"command not found",
"traceback",
"hermes binary not found",
"hermes not found",
"ssh: connect to host",
"connection timed out",
"host key verification failed",
)

243
cron/ssh_dispatch.py Normal file
View File

@@ -0,0 +1,243 @@
"""SSH Dispatch — validated remote hermes execution for cron jobs.
Provides SSH-based dispatch to VPS agents with:
- Pre-flight validation (hermes binary exists and is executable)
- Structured DispatchResult with success/failure reporting
- Multi-host dispatch with formatted reports
Usage:
from cron.ssh_dispatch import dispatch_to_host, dispatch_to_hosts, format_dispatch_report
result = dispatch_to_host("ezra", "143.198.27.163", "Check the beacon repo for open issues")
if not result.success:
print(result.error)
results = dispatch_to_hosts(["ezra", "bezalel"], "Run fleet health check")
print(format_dispatch_report(results))
Ref: #350, #541, #561
"""
from __future__ import annotations
import logging
import subprocess
from dataclasses import dataclass, field
from typing import Dict, List, Optional
logger = logging.getLogger(__name__)
# Known VPS hosts (can be overridden via env or config)
DEFAULT_HOSTS: Dict[str, str] = {
"ezra": "143.198.27.163",
"bezalel": "159.203.146.185",
}
# SSH options for non-interactive, fast-fail connections
_SSH_OPTS = [
"-o", "ConnectTimeout=10",
"-o", "StrictHostKeyChecking=accept-new",
"-o", "BatchMode=yes",
"-o", "LogLevel=ERROR",
]
# Paths to check for hermes binary on remote
_HERMES_CHECK_PATHS = [
"~/.local/bin/hermes",
"/usr/local/bin/hermes",
"~/.hermes/bin/hermes",
]
@dataclass
class DispatchResult:
"""Result of an SSH dispatch attempt."""
host: str
address: str
success: bool
output: str = ""
error: str = ""
hermes_found: bool = False
hermes_path: str = ""
exit_code: int = -1
@property
def summary(self) -> str:
if self.success:
return f"[OK] {self.host} ({self.address})"
return f"[FAIL] {self.host} ({self.address}): {self.error}"
def probe_hermes(host: str, address: str) -> tuple[bool, str]:
"""Check if hermes binary exists and is executable on remote host.
Returns (found, path).
"""
check_cmds = " || ".join(f"test -x {p} && echo {p}" for p in _HERMES_CHECK_PATHS)
remote_cmd = f"bash -c '{check_cmds} || echo NOTFOUND'"
try:
result = subprocess.run(
["ssh", address, *_SSH_OPTS, remote_cmd],
capture_output=True,
text=True,
timeout=15,
)
output = result.stdout.strip()
if output and output != "NOTFOUND":
return True, output
return False, ""
except subprocess.TimeoutExpired:
logger.warning("SSH probe timed out for %s", host)
return False, ""
except Exception as e:
logger.warning("SSH probe failed for %s: %s", host, e)
return False, ""
def dispatch_to_host(
host: str,
address: str,
prompt: str,
timeout: int = 300,
validate: bool = True,
) -> DispatchResult:
"""Dispatch a prompt to a remote hermes instance via SSH.
Args:
host: Hostname (ezra, bezalel, etc.)
address: IP address or hostname
prompt: The prompt/task to dispatch
timeout: SSH timeout in seconds
validate: Whether to probe for hermes binary first
Returns:
DispatchResult with success/failure details.
"""
# Pre-flight validation
if validate:
found, path = probe_hermes(host, address)
if not found:
return DispatchResult(
host=host,
address=address,
success=False,
error="hermes binary not found on remote host",
hermes_found=False,
)
else:
found, path = True, "~/.local/bin/hermes"
# Build the dispatch command
# Use hermes chat in quiet mode, pipe prompt via stdin
escaped_prompt = prompt.replace("'", "'\\''")
remote_cmd = f"echo '{escaped_prompt}' | {path} chat --quiet"
try:
result = subprocess.run(
["ssh", address, *_SSH_OPTS, remote_cmd],
capture_output=True,
text=True,
timeout=timeout,
)
success = result.returncode == 0
error = ""
if not success:
error = result.stderr.strip() if result.stderr else f"exit code {result.returncode}"
return DispatchResult(
host=host,
address=address,
success=success,
output=result.stdout.strip()[:500], # Truncate long output
error=error,
hermes_found=found,
hermes_path=path,
exit_code=result.returncode,
)
except subprocess.TimeoutExpired:
return DispatchResult(
host=host,
address=address,
success=False,
error=f"SSH dispatch timed out after {timeout}s",
hermes_found=found,
hermes_path=path,
)
except Exception as e:
return DispatchResult(
host=host,
address=address,
success=False,
error=f"SSH dispatch failed: {e}",
hermes_found=found,
hermes_path=path,
)
def dispatch_to_hosts(
hosts: List[str],
prompt: str,
host_map: Optional[Dict[str, str]] = None,
timeout: int = 300,
) -> List[DispatchResult]:
"""Dispatch a prompt to multiple hosts.
Args:
hosts: List of hostnames
prompt: The prompt/task to dispatch
host_map: Optional override of hostname -> address mapping
timeout: SSH timeout per host
Returns:
List of DispatchResult, one per host.
"""
addresses = host_map or DEFAULT_HOSTS
results = []
for host in hosts:
address = addresses.get(host)
if not address:
results.append(DispatchResult(
host=host,
address="unknown",
success=False,
error=f"Unknown host: {host}",
))
continue
result = dispatch_to_host(host, address, prompt, timeout=timeout)
results.append(result)
logger.info(result.summary)
return results
def format_dispatch_report(results: List[DispatchResult]) -> str:
"""Format a multi-host dispatch results as a readable report."""
if not results:
return "No dispatch results."
lines = ["SSH Dispatch Report", "=" * 40, ""]
ok_count = sum(1 for r in results if r.success)
fail_count = len(results) - ok_count
lines.append(f"Total: {len(results)} | OK: {ok_count} | FAIL: {fail_count}")
lines.append("")
for r in results:
status = "" if r.success else ""
lines.append(f" {status} {r.host} ({r.address})")
if r.hermes_path:
lines.append(f" hermes: {r.hermes_path}")
if r.success and r.output:
lines.append(f" output: {r.output[:100]}...")
if not r.success:
lines.append(f" error: {r.error}")
lines.append("")
return "\n".join(lines)

View File

@@ -5258,80 +5258,6 @@ For more help on a command:
sessions_parser.set_defaults(func=cmd_sessions)
# Warm session command
warm_parser = subparsers.add_parser(
"warm",
help="Warm session provisioning",
description="Create pre-contextualized sessions from templates"
)
warm_subparsers = warm_parser.add_subparsers(dest="warm_command")
# Extract command
warm_extract = warm_subparsers.add_parser("extract", help="Extract template from session")
warm_extract.add_argument("session_id", help="Session ID to extract from")
warm_extract.add_argument("--name", "-n", required=True, help="Template name")
warm_extract.add_argument("--description", "-d", default="", help="Template description")
# List command
warm_subparsers.add_parser("list", help="List available templates")
# Test command
warm_test = warm_subparsers.add_parser("test", help="Test warm session creation")
warm_test.add_argument("template_id", help="Template ID")
warm_test.add_argument("message", help="Test message")
# Delete command
warm_delete = warm_subparsers.add_parser("delete", help="Delete a template")
warm_delete.add_argument("template_id", help="Template ID to delete")
warm_parser.set_defaults(func=cmd_warm)
# A/B testing command
ab_parser = subparsers.add_parser(
"ab-test",
help="A/B test warm vs cold sessions",
description="Framework for comparing warm and cold session performance"
)
ab_subparsers = ab_parser.add_subparsers(dest="ab_command")
# Create test
ab_create = ab_subparsers.add_parser("create", help="Create a new A/B test")
ab_create.add_argument("--task-id", required=True, help="Task ID")
ab_create.add_argument("--description", required=True, help="Task description")
ab_create.add_argument("--prompt", required=True, help="Test prompt")
ab_create.add_argument("--category", default="general", help="Task category")
ab_create.add_argument("--difficulty", default="medium", choices=["easy", "medium", "hard"])
# List tests
ab_subparsers.add_parser("list", help="List all A/B tests")
# Show test
ab_show = ab_subparsers.add_parser("show", help="Show test details")
ab_show.add_argument("test_id", help="Test ID")
# Analyze test
ab_analyze = ab_subparsers.add_parser("analyze", help="Analyze test results")
ab_analyze.add_argument("test_id", help="Test ID")
# Add result
ab_add = ab_subparsers.add_parser("add-result", help="Add a test result")
ab_add.add_argument("test_id", help="Test ID")
ab_add.add_argument("--session-type", required=True, choices=["cold", "warm"])
ab_add.add_argument("--session-id", required=True, help="Session ID")
ab_add.add_argument("--tool-calls", type=int, default=0)
ab_add.add_argument("--successful-calls", type=int, default=0)
ab_add.add_argument("--completion-time", type=float, default=0.0)
ab_add.add_argument("--success", action="store_true")
ab_add.add_argument("--notes", default="")
# Delete test
ab_delete = ab_subparsers.add_parser("delete", help="Delete a test")
ab_delete.add_argument("test_id", help="Test ID")
ab_parser.set_defaults(func=cmd_ab_test)
# =========================================================================
# insights command
# =========================================================================
@@ -5672,102 +5598,3 @@ Examples:
if __name__ == "__main__":
main()
def cmd_warm(args):
"""Handle warm session commands."""
from hermes_cli.colors import Colors, color
subcmd = getattr(args, 'warm_command', None)
if subcmd is None:
print(color("Warm Session Provisioning", Colors.CYAN))
print("\nCommands:")
print(" hermes warm extract SESSION_ID --name NAME - Extract template from session")
print(" hermes warm list - List available templates")
print(" hermes warm test TEMPLATE_ID MESSAGE - Test warm session")
print(" hermes warm delete TEMPLATE_ID - Delete a template")
return 0
try:
from tools.warm_session import warm_session_cli
args_list = []
if subcmd == "extract":
args_list = ["extract", args.session_id, "--name", args.name]
if args.description:
args_list.extend(["--description", args.description])
elif subcmd == "list":
args_list = ["list"]
elif subcmd == "test":
args_list = ["test", args.template_id, args.message]
elif subcmd == "delete":
args_list = ["delete", args.template_id]
return warm_session_cli(args_list)
except ImportError as e:
print(color(f"Error: Cannot import warm_session module: {e}", Colors.RED))
return 1
except Exception as e:
print(color(f"Error: {e}", Colors.RED))
return 1
def cmd_ab_test(args):
"""Handle A/B testing commands."""
from hermes_cli.colors import Colors, color
subcmd = getattr(args, 'ab_command', None)
if subcmd is None:
print(color("A/B Testing Framework for Warm vs Cold Sessions", Colors.CYAN))
print("\nCommands:")
print(" hermes ab-test create --task-id ID --description DESC --prompt PROMPT")
print(" hermes ab-test list")
print(" hermes ab-test show TEST_ID")
print(" hermes ab-test analyze TEST_ID")
print(" hermes ab-test add-result TEST_ID --session-type TYPE --session-id ID")
print(" hermes ab-test delete TEST_ID")
return 0
try:
from tools.session_ab_testing import ab_test_cli
args_list = []
if subcmd == "create":
args_list = ["create", "--task-id", args.task_id, "--description", args.description, "--prompt", args.prompt]
if args.category:
args_list.extend(["--category", args.category])
if args.difficulty:
args_list.extend(["--difficulty", args.difficulty])
elif subcmd == "list":
args_list = ["list"]
elif subcmd == "show":
args_list = ["show", args.test_id]
elif subcmd == "analyze":
args_list = ["analyze", args.test_id]
elif subcmd == "add-result":
args_list = ["add-result", args.test_id, "--session-type", args.session_type, "--session-id", args.session_id]
if args.tool_calls:
args_list.extend(["--tool-calls", str(args.tool_calls)])
if args.successful_calls:
args_list.extend(["--successful-calls", str(args.successful_calls)])
if args.completion_time:
args_list.extend(["--completion-time", str(args.completion_time)])
if args.success:
args_list.append("--success")
if args.notes:
args_list.extend(["--notes", args.notes])
elif subcmd == "delete":
args_list = ["delete", args.test_id]
return ab_test_cli(args_list)
except ImportError as e:
print(color(f"Error: Cannot import session_ab_testing module: {e}", Colors.RED))
return 1
except Exception as e:
print(color(f"Error: {e}", Colors.RED))
return 1

View File

@@ -1,517 +0,0 @@
"""
Warm Session A/B Testing Framework
Framework for comparing warm vs cold session performance.
Addresses research questions from issue #327.
Issue: #327
"""
import json
import logging
import time
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
from dataclasses import dataclass, asdict, field
from enum import Enum
import statistics
logger = logging.getLogger(__name__)
class SessionType(Enum):
"""Type of session for A/B testing."""
COLD = "cold" # Fresh session, no warm-up
WARM = "warm" # Session with warm-up context
@dataclass
class TestTask:
"""A task for A/B testing."""
task_id: str
description: str
prompt: str
expected_tools: List[str] = field(default_factory=list)
success_criteria: Dict[str, Any] = field(default_factory=dict)
category: str = "general"
difficulty: str = "medium" # easy, medium, hard
@dataclass
class SessionResult:
"""Result from a session test."""
session_id: str
session_type: SessionType
task_id: str
start_time: str
end_time: Optional[str] = None
message_count: int = 0
tool_calls: int = 0
successful_tool_calls: int = 0
errors: List[str] = field(default_factory=list)
completion_time_seconds: float = 0.0
user_corrections: int = 0
success: bool = False
notes: str = ""
@property
def error_rate(self) -> float:
"""Calculate error rate."""
if self.tool_calls == 0:
return 0.0
return (self.tool_calls - self.successful_tool_calls) / self.tool_calls
@property
def success_rate(self) -> float:
"""Calculate success rate."""
if self.tool_calls == 0:
return 0.0
return self.successful_tool_calls / self.tool_calls
def to_dict(self) -> Dict[str, Any]:
return {
"session_id": self.session_id,
"session_type": self.session_type.value,
"task_id": self.task_id,
"start_time": self.start_time,
"end_time": self.end_time,
"message_count": self.message_count,
"tool_calls": self.tool_calls,
"successful_tool_calls": self.successful_tool_calls,
"errors": self.errors,
"completion_time_seconds": self.completion_time_seconds,
"user_corrections": self.user_corrections,
"success": self.success,
"error_rate": self.error_rate,
"success_rate": self.success_rate,
"notes": self.notes
}
@dataclass
class ABTestResult:
"""Results from an A/B test."""
test_id: str
task: TestTask
cold_results: List[SessionResult] = field(default_factory=list)
warm_results: List[SessionResult] = field(default_factory=list)
created_at: str = field(default_factory=lambda: datetime.now().isoformat())
def add_result(self, result: SessionResult):
"""Add a session result."""
if result.session_type == SessionType.COLD:
self.cold_results.append(result)
else:
self.warm_results.append(result)
def get_summary(self) -> Dict[str, Any]:
"""Get summary statistics."""
def calc_stats(results: List[SessionResult]) -> Dict[str, Any]:
if not results:
return {"count": 0}
error_rates = [r.error_rate for r in results]
success_rates = [r.success_rate for r in results]
completion_times = [r.completion_time_seconds for r in results if r.completion_time_seconds > 0]
message_counts = [r.message_count for r in results]
return {
"count": len(results),
"avg_error_rate": statistics.mean(error_rates) if error_rates else 0,
"avg_success_rate": statistics.mean(success_rates) if success_rates else 0,
"avg_completion_time": statistics.mean(completion_times) if completion_times else 0,
"avg_messages": statistics.mean(message_counts) if message_counts else 0,
"success_count": sum(1 for r in results if r.success)
}
cold_stats = calc_stats(self.cold_results)
warm_stats = calc_stats(self.warm_results)
# Calculate improvement
improvement = {}
if cold_stats.get("count", 0) > 0 and warm_stats.get("count", 0) > 0:
cold_error = cold_stats.get("avg_error_rate", 0)
warm_error = warm_stats.get("avg_error_rate", 0)
if cold_error > 0:
improvement["error_rate"] = (cold_error - warm_error) / cold_error
cold_success = cold_stats.get("avg_success_rate", 0)
warm_success = warm_stats.get("avg_success_rate", 0)
if cold_success > 0:
improvement["success_rate"] = (warm_success - cold_success) / cold_success
return {
"task_id": self.task.task_id,
"cold": cold_stats,
"warm": warm_stats,
"improvement": improvement,
"recommendation": self._get_recommendation(cold_stats, warm_stats)
}
def _get_recommendation(self, cold_stats: Dict, warm_stats: Dict) -> str:
"""Generate recommendation based on results."""
if cold_stats.get("count", 0) < 3 or warm_stats.get("count", 0) < 3:
return "Insufficient data (need at least 3 tests each)"
cold_error = cold_stats.get("avg_error_rate", 0)
warm_error = warm_stats.get("avg_error_rate", 0)
if warm_error < cold_error * 0.8: # 20% improvement
return "WARM recommended: Significant error reduction"
elif warm_error > cold_error * 1.2: # 20% worse
return "COLD recommended: Warm sessions performed worse"
else:
return "No significant difference detected"
def to_dict(self) -> Dict[str, Any]:
return {
"test_id": self.test_id,
"task": asdict(self.task),
"cold_results": [r.to_dict() for r in self.cold_results],
"warm_results": [r.to_dict() for r in self.warm_results],
"created_at": self.created_at,
"summary": self.get_summary()
}
class ABTestManager:
"""Manage A/B tests."""
def __init__(self, test_dir: Path = None):
self.test_dir = test_dir or Path.home() / ".hermes" / "ab_tests"
self.test_dir.mkdir(parents=True, exist_ok=True)
def create_test(self, task: TestTask) -> ABTestResult:
"""Create a new A/B test."""
test_id = f"test_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{task.task_id}"
result = ABTestResult(
test_id=test_id,
task=task
)
self.save_test(result)
return result
def save_test(self, test: ABTestResult):
"""Save test results."""
path = self.test_dir / f"{test.test_id}.json"
with open(path, 'w') as f:
json.dump(test.to_dict(), f, indent=2)
def load_test(self, test_id: str) -> Optional[ABTestResult]:
"""Load test results."""
path = self.test_dir / f"{test_id}.json"
if not path.exists():
return None
try:
with open(path, 'r') as f:
data = json.load(f)
task = TestTask(**data["task"])
test = ABTestResult(
test_id=data["test_id"],
task=task,
created_at=data.get("created_at", "")
)
for r in data.get("cold_results", []):
r["session_type"] = SessionType(r["session_type"])
test.cold_results.append(SessionResult(**r))
for r in data.get("warm_results", []):
r["session_type"] = SessionType(r["session_type"])
test.warm_results.append(SessionResult(**r))
return test
except Exception as e:
logger.error(f"Failed to load test: {e}")
return None
def list_tests(self) -> List[Dict[str, Any]]:
"""List all tests."""
tests = []
for path in self.test_dir.glob("*.json"):
try:
with open(path, 'r') as f:
data = json.load(f)
tests.append({
"test_id": data.get("test_id"),
"task_id": data.get("task", {}).get("task_id"),
"description": data.get("task", {}).get("description", ""),
"cold_count": len(data.get("cold_results", [])),
"warm_count": len(data.get("warm_results", [])),
"created_at": data.get("created_at")
})
except:
pass
return tests
def delete_test(self, test_id: str) -> bool:
"""Delete a test."""
path = self.test_dir / f"{test_id}.json"
if path.exists():
path.unlink()
return True
return False
class ABTestRunner:
"""Run A/B tests."""
def __init__(self, manager: ABTestManager = None):
self.manager = manager or ABTestManager()
def run_comparison(
self,
task: TestTask,
cold_messages: List[Dict],
warm_messages: List[Dict],
session_db=None
) -> Tuple[SessionResult, SessionResult]:
"""
Run a comparison between cold and warm sessions.
Returns:
Tuple of (cold_result, warm_result)
"""
# This is a framework - actual execution would depend on
# integration with the agent system
cold_result = SessionResult(
session_id=f"cold_{task.task_id}_{int(time.time())}",
session_type=SessionType.COLD,
task_id=task.task_id,
start_time=datetime.now().isoformat()
)
warm_result = SessionResult(
session_id=f"warm_{task.task_id}_{int(time.time())}",
session_type=SessionType.WARM,
task_id=task.task_id,
start_time=datetime.now().isoformat()
)
# In a real implementation, this would:
# 1. Start a cold session with cold_messages
# 2. Execute the task and collect metrics
# 3. Start a warm session with warm_messages
# 4. Execute the same task and collect metrics
# 5. Return both results
return cold_result, warm_result
def analyze_results(self, test_id: str) -> Dict[str, Any]:
"""Analyze test results."""
test = self.manager.load_test(test_id)
if not test:
return {"error": "Test not found"}
summary = test.get_summary()
# Add statistical significance check
if (summary["cold"].get("count", 0) >= 3 and
summary["warm"].get("count", 0) >= 3):
# Simple t-test approximation
cold_errors = [r.error_rate for r in test.cold_results]
warm_errors = [r.error_rate for r in test.warm_results]
if len(cold_errors) >= 2 and len(warm_errors) >= 2:
cold_std = statistics.stdev(cold_errors) if len(cold_errors) > 1 else 0
warm_std = statistics.stdev(warm_errors) if len(warm_errors) > 1 else 0
summary["statistical_notes"] = {
"cold_std_dev": cold_std,
"warm_std_dev": warm_std,
"significance": "low" if max(cold_std, warm_std) > 0.2 else "medium"
}
return summary
# CLI Interface
def ab_test_cli(args: List[str]) -> int:
"""CLI interface for A/B testing."""
import argparse
parser = argparse.ArgumentParser(description="Warm session A/B testing")
subparsers = parser.add_subparsers(dest="command")
# Create test
create_parser = subparsers.add_parser("create", help="Create a new test")
create_parser.add_argument("--task-id", required=True, help="Task ID")
create_parser.add_argument("--description", required=True, help="Task description")
create_parser.add_argument("--prompt", required=True, help="Test prompt")
create_parser.add_argument("--category", default="general", help="Task category")
create_parser.add_argument("--difficulty", default="medium", choices=["easy", "medium", "hard"])
# List tests
subparsers.add_parser("list", help="List all tests")
# Show test results
show_parser = subparsers.add_parser("show", help="Show test results")
show_parser.add_argument("test_id", help="Test ID")
# Analyze test
analyze_parser = subparsers.add_parser("analyze", help="Analyze test results")
analyze_parser.add_argument("test_id", help="Test ID")
# Delete test
delete_parser = subparsers.add_parser("delete", help="Delete a test")
delete_parser.add_argument("test_id", help="Test ID")
# Add result
add_parser = subparsers.add_parser("add-result", help="Add a test result")
add_parser.add_argument("test_id", help="Test ID")
add_parser.add_argument("--session-type", required=True, choices=["cold", "warm"])
add_parser.add_argument("--session-id", required=True, help="Session ID")
add_parser.add_argument("--tool-calls", type=int, default=0)
add_parser.add_argument("--successful-calls", type=int, default=0)
add_parser.add_argument("--completion-time", type=float, default=0.0)
add_parser.add_argument("--success", action="store_true")
add_parser.add_argument("--notes", default="")
parsed = parser.parse_args(args)
if not parsed.command:
parser.print_help()
return 1
manager = ABTestManager()
runner = ABTestRunner(manager)
if parsed.command == "create":
task = TestTask(
task_id=parsed.task_id,
description=parsed.description,
prompt=parsed.prompt,
category=parsed.category,
difficulty=parsed.difficulty
)
test = manager.create_test(task)
print(f"Created test: {test.test_id}")
print(f"Task: {task.description}")
return 0
elif parsed.command == "list":
tests = manager.list_tests()
if not tests:
print("No tests found.")
return 0
print("\n=== A/B Tests ===\n")
for t in tests:
print(f"ID: {t['test_id']}")
print(f" Task: {t['description']}")
print(f" Cold tests: {t['cold_count']}, Warm tests: {t['warm_count']}")
print(f" Created: {t['created_at']}")
print()
return 0
elif parsed.command == "show":
test = manager.load_test(parsed.test_id)
if not test:
print(f"Test {parsed.test_id} not found")
return 1
print(f"\n=== Test: {test.test_id} ===\n")
print(f"Task: {test.task.description}")
print(f"Prompt: {test.task.prompt}")
print(f"Category: {test.task.category}, Difficulty: {test.task.difficulty}")
print(f"\nCold sessions: {len(test.cold_results)}")
for r in test.cold_results:
print(f" {r.session_id}: {r.success_rate:.0%} success, {r.error_rate:.0%} errors")
print(f"\nWarm sessions: {len(test.warm_results)}")
for r in test.warm_results:
print(f" {r.session_id}: {r.success_rate:.0%} success, {r.error_rate:.0%} errors")
return 0
elif parsed.command == "analyze":
analysis = runner.analyze_results(parsed.test_id)
if "error" in analysis:
print(f"Error: {analysis['error']}")
return 1
print(f"\n=== Analysis: {parsed.test_id} ===\n")
cold = analysis.get("cold", {})
warm = analysis.get("warm", {})
print("Cold Sessions:")
print(f" Count: {cold.get('count', 0)}")
print(f" Avg error rate: {cold.get('avg_error_rate', 0):.1%}")
print(f" Avg success rate: {cold.get('avg_success_rate', 0):.1%}")
print(f" Avg completion time: {cold.get('avg_completion_time', 0):.1f}s")
print("\nWarm Sessions:")
print(f" Count: {warm.get('count', 0)}")
print(f" Avg error rate: {warm.get('avg_error_rate', 0):.1%}")
print(f" Avg success rate: {warm.get('avg_success_rate', 0):.1%}")
print(f" Avg completion time: {warm.get('avg_completion_time', 0):.1f}s")
improvement = analysis.get("improvement", {})
if improvement:
print("\nImprovement:")
if "error_rate" in improvement:
print(f" Error rate: {improvement['error_rate']:+.1%}")
if "success_rate" in improvement:
print(f" Success rate: {improvement['success_rate']:+.1%}")
print(f"\nRecommendation: {analysis.get('recommendation', 'N/A')}")
return 0
elif parsed.command == "delete":
if manager.delete_test(parsed.test_id):
print(f"Deleted test: {parsed.test_id}")
return 0
else:
print(f"Test {parsed.test_id} not found")
return 1
elif parsed.command == "add-result":
test = manager.load_test(parsed.test_id)
if not test:
print(f"Test {parsed.test_id} not found")
return 1
result = SessionResult(
session_id=parsed.session_id,
session_type=SessionType(parsed.session_type),
task_id=test.task.task_id,
start_time=datetime.now().isoformat(),
end_time=datetime.now().isoformat(),
tool_calls=parsed.tool_calls,
successful_tool_calls=parsed.successful_calls,
completion_time_seconds=parsed.completion_time,
success=parsed.success,
notes=parsed.notes
)
test.add_result(result)
manager.save_test(test)
print(f"Added {parsed.session_type} result to test {parsed.test_id}")
print(f" Session: {parsed.session_id}")
print(f" Success rate: {result.success_rate:.0%}")
return 0
return 1
if __name__ == "__main__":
import sys
sys.exit(ab_test_cli(sys.argv[1:]))