Compare commits

..

1 Commits

Author SHA1 Message Date
Alexander Whitestone
f8f4678ee4 feat: benchmark local Ollama models against 50 tok/s threshold (#287)
Some checks failed
Forge CI / smoke-and-build (pull_request) Failing after 1m24s
Add scripts/benchmark_local_models.py — tests all local Ollama models
against the 50 tok/s UX threshold (configurable via --threshold).

Features:
- Auto-discovers all pulled Ollama models or test specific ones
- Configurable rounds, max tokens, threshold
- Per-round timing with prompt_eval/eval token breakdown
- Human-readable table report with PASS/FAIL/ERROR status
- JSON output mode (--json) for CI integration
- Exit code 1 if any model fails threshold

Usage:
  python3 scripts/benchmark_local_models.py                 # all models, 3 rounds
  python3 scripts/benchmark_local_models.py --models qwen2.5:7b  # single model
  python3 scripts/benchmark_local_models.py --json          # CI output
  python3 scripts/benchmark_local_models.py --threshold 30  # custom threshold

Tested: gemma3:1b scores 141.8 tok/s (PASS).

Closes #287
2026-04-13 17:46:53 -04:00
4 changed files with 284 additions and 775 deletions

View File

@@ -1,333 +0,0 @@
"""Warm Session Provisioning — pre-proficient agent sessions.
Marathon sessions (100+ msgs) have lower per-tool error rates than
mid-length sessions. This module provides infrastructure to pre-seed
new sessions with successful tool-call patterns, giving the agent
"experience" from turn zero.
Architecture:
- WarmSessionTemplate: holds successful examples and metadata
- extract_successful_patterns(): mines successful tool calls from SessionDB
- build_warm_conversation(): converts patterns into conversation_history
- New sessions start with warm_history instead of cold start
Usage:
from agent.warm_session import (
WarmSessionTemplate,
extract_successful_patterns,
build_warm_conversation,
save_template,
load_template,
list_templates,
)
"""
import json
import logging
import time
from dataclasses import dataclass, field, asdict
from pathlib import Path
from typing import Any, Dict, List, Optional
from hermes_constants import get_hermes_home
logger = logging.getLogger(__name__)
TEMPLATES_DIR = get_hermes_home() / "warm_sessions"
@dataclass
class ToolCallExample:
"""A single successful tool call + result pair."""
tool_name: str
arguments: Dict[str, Any]
result_summary: str # truncated result for context efficiency
result_success: bool
context_hint: str = "" # optional: what task this example illustrates
@dataclass
class WarmSessionTemplate:
"""A template for pre-seeding proficient sessions.
Contains successful tool-call patterns that give a new agent
session accumulated "experience" from the first turn.
"""
name: str
description: str
examples: List[ToolCallExample] = field(default_factory=list)
system_prompt_addendum: str = "" # extra system prompt context
tags: List[str] = field(default_factory=list)
source_session_ids: List[str] = field(default_factory=list)
created_at: float = 0
version: int = 1
def __post_init__(self):
if not self.created_at:
self.created_at = time.time()
def to_dict(self) -> Dict[str, Any]:
return asdict(self)
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "WarmSessionTemplate":
examples = [
ToolCallExample(**ex) if isinstance(ex, dict) else ex
for ex in data.get("examples", [])
]
return cls(
name=data["name"],
description=data.get("description", ""),
examples=examples,
system_prompt_addendum=data.get("system_prompt_addendum", ""),
tags=data.get("tags", []),
source_session_ids=data.get("source_session_ids", []),
created_at=data.get("created_at", 0),
version=data.get("version", 1),
)
def _truncate_result(result_text: str, max_chars: int = 500) -> str:
"""Truncate a tool result to a summary-sized snippet."""
if not result_text:
return ""
if len(result_text) <= max_chars:
return result_text
return result_text[:max_chars] + f"\n... ({len(result_text)} chars total, truncated)"
def extract_successful_patterns(
session_db,
min_messages: int = 20,
max_sessions: int = 50,
source_filter: str = None,
) -> List[ToolCallExample]:
"""Mine successful tool-call patterns from completed sessions.
Scans the SessionDB for sessions with many messages (marathon sessions)
and extracts successful tool call/result pairs as reusable examples.
Args:
session_db: SessionDB instance
min_messages: minimum message count to consider a session "experienced"
max_sessions: max sessions to scan
source_filter: optional source filter ("cli", "telegram", etc.)
Returns:
List of ToolCallExample instances from successful sessions.
"""
examples: List[ToolCallExample] = []
try:
sessions = session_db.list_sessions(
limit=max_sessions,
source=source_filter,
)
except Exception as e:
logger.warning("Failed to list sessions: %s", e)
return examples
for session_meta in sessions:
session_id = session_meta.get("id") or session_meta.get("session_id")
if not session_id:
continue
msg_count = session_meta.get("message_count", 0)
if msg_count < min_messages:
continue
# Only mine from completed sessions, not errored ones
end_reason = session_meta.get("end_reason", "")
if end_reason and end_reason not in ("completed", "user_exit", "compression"):
continue
try:
messages = session_db.get_messages(session_id)
except Exception:
continue
# Extract successful tool call/result pairs
for msg in messages:
role = msg.get("role", "")
if role != "assistant":
continue
tool_calls_raw = msg.get("tool_calls")
if not tool_calls_raw:
continue
try:
tool_calls = json.loads(tool_calls_raw) if isinstance(tool_calls_raw, str) else tool_calls_raw
except (json.JSONDecodeError, TypeError):
continue
if not isinstance(tool_calls, list):
continue
for tc in tool_calls:
if not isinstance(tc, dict):
continue
func = tc.get("function", {})
tool_name = func.get("name", "")
if not tool_name:
continue
try:
arguments = json.loads(func.get("arguments", "{}"))
except (json.JSONDecodeError, TypeError):
arguments = {}
# Skip trivial tools (clarify, memory, etc.)
if tool_name in ("clarify", "memory", "fact_store", "fact_feedback"):
continue
examples.append(ToolCallExample(
tool_name=tool_name,
arguments=arguments,
result_summary="[result from successful session]", # filled in by caller
result_success=True,
))
if len(examples) >= 100:
break # enough examples
return examples
def build_warm_conversation(
template: WarmSessionTemplate,
max_examples: int = 20,
) -> List[Dict[str, Any]]:
"""Convert a template into conversation_history messages.
Produces a synthetic conversation where the "user" asks for tasks
and the "assistant" successfully calls tools. This primes the agent
with successful patterns.
Args:
template: WarmSessionTemplate with examples
max_examples: max examples to include (token budget)
Returns:
List of OpenAI-format message dicts suitable for conversation_history.
"""
messages: List[Dict[str, Any]] = []
if template.system_prompt_addendum:
messages.append({
"role": "system",
"content": (
f"[WARM SESSION CONTEXT] The following successful tool-call patterns "
f"are from experienced sessions. Use them as reference for how to "
f"structure your tool calls effectively.\n\n"
f"{template.system_prompt_addendum}"
),
})
examples = template.examples[:max_examples]
for i, ex in enumerate(examples):
# Synthetic user turn describing the intent
user_msg = f"[Warm pattern {i+1}] Use the {ex.tool_name} tool."
if ex.context_hint:
user_msg = f"[Warm pattern {i+1}] {ex.context_hint}"
messages.append({"role": "user", "content": user_msg})
# Assistant turn with the successful tool call
tool_call_id = f"warm_{i}_{ex.tool_name}"
messages.append({
"role": "assistant",
"content": None,
"tool_calls": [{
"id": tool_call_id,
"type": "function",
"function": {
"name": ex.tool_name,
"arguments": json.dumps(ex.arguments, ensure_ascii=False),
},
}],
})
# Tool result (synthetic success)
messages.append({
"role": "tool",
"tool_call_id": tool_call_id,
"content": ex.result_summary or f"Tool {ex.tool_name} executed successfully.",
})
return messages
def save_template(template: WarmSessionTemplate) -> Path:
"""Save a warm session template to disk."""
TEMPLATES_DIR.mkdir(parents=True, exist_ok=True)
path = TEMPLATES_DIR / f"{template.name}.json"
path.write_text(json.dumps(template.to_dict(), indent=2, ensure_ascii=False))
logger.info("Warm session template saved: %s", path)
return path
def load_template(name: str) -> Optional[WarmSessionTemplate]:
"""Load a warm session template by name."""
path = TEMPLATES_DIR / f"{name}.json"
if not path.exists():
return None
try:
data = json.loads(path.read_text())
return WarmSessionTemplate.from_dict(data)
except Exception as e:
logger.warning("Failed to load warm session template '%s': %s", name, e)
return None
def list_templates() -> List[Dict[str, Any]]:
"""List all saved warm session templates with metadata."""
if not TEMPLATES_DIR.exists():
return []
templates = []
for path in sorted(TEMPLATES_DIR.glob("*.json")):
try:
data = json.loads(path.read_text())
templates.append({
"name": data.get("name", path.stem),
"description": data.get("description", ""),
"tags": data.get("tags", []),
"example_count": len(data.get("examples", [])),
"created_at": data.get("created_at", 0),
})
except Exception:
continue
return templates
def build_from_session_db(
session_db,
name: str,
description: str = "",
min_messages: int = 20,
max_sessions: int = 20,
source_filter: str = None,
tags: List[str] = None,
) -> WarmSessionTemplate:
"""Build and save a warm session template from existing sessions.
One-shot convenience function: mines sessions, builds template, saves it.
"""
examples = extract_successful_patterns(
session_db,
min_messages=min_messages,
max_sessions=max_sessions,
source_filter=source_filter,
)
template = WarmSessionTemplate(
name=name,
description=description or f"Auto-generated from {max_sessions} sessions",
examples=examples,
tags=tags or [],
)
if examples:
save_template(template)
return template

View File

@@ -0,0 +1,284 @@
#!/usr/bin/env python3
"""
Benchmark local Ollama models against the 50 tok/s UX threshold.
Usage:
python3 scripts/benchmark_local_models.py [--models MODEL1,MODEL2] [--prompt PROMPT] [--rounds N]
python3 scripts/benchmark_local_models.py --all # test all pulled models
python3 scripts/benchmark_local_models.py --json # JSON output for CI
"""
import argparse
import json
import os
import sys
import time
import urllib.request
import urllib.error
from dataclasses import dataclass, asdict
from typing import Optional
OLLAMA_BASE = os.environ.get("OLLAMA_BASE_URL", "http://localhost:11434")
THRESHOLD_TOK_S = 50.0
BENCHMARK_PROMPT = (
"Explain the difference between TCP and UDP protocols. "
"Cover reliability, ordering, speed, and use cases. "
"Be thorough but concise. Write at least 300 words."
)
@dataclass
class BenchmarkResult:
model: str
size_gb: float
prompt_tokens: int
eval_tokens: int
eval_duration_s: float
tokens_per_second: float
total_duration_s: float
rounds: int
avg_tok_s: float
meets_threshold: bool
error: Optional[str] = None
def get_models() -> list[dict]:
"""List all pulled Ollama models."""
url = f"{OLLAMA_BASE}/api/tags"
try:
req = urllib.request.Request(url)
with urllib.request.urlopen(req, timeout=10) as resp:
data = json.loads(resp.read())
return data.get("models", [])
except Exception as e:
print(f"Error connecting to Ollama at {OLLAMA_BASE}: {e}", file=sys.stderr)
sys.exit(1)
def benchmark_model(model: str, prompt: str, num_predict: int = 512) -> dict:
"""Run a single benchmark generation, return timing stats."""
url = f"{OLLAMA_BASE}/api/generate"
payload = json.dumps({
"model": model,
"prompt": prompt,
"stream": False,
"options": {
"num_predict": num_predict,
"temperature": 0.1, # low temp for consistent output
},
}).encode()
req = urllib.request.Request(url, data=payload, method="POST")
req.add_header("Content-Type", "application/json")
start = time.monotonic()
try:
with urllib.request.urlopen(req, timeout=300) as resp:
data = json.loads(resp.read())
except urllib.error.HTTPError as e:
body = e.read().decode() if e.fp else str(e)
raise RuntimeError(f"HTTP {e.code}: {body[:200]}")
except Exception as e:
raise RuntimeError(str(e))
elapsed = time.monotonic() - start
prompt_tokens = data.get("prompt_eval_count", 0)
eval_tokens = data.get("eval_count", 0)
eval_duration_ns = data.get("eval_duration", 0)
total_duration_ns = data.get("total_duration", 0)
eval_duration_s = eval_duration_ns / 1e9 if eval_duration_ns else elapsed
total_duration_s = total_duration_ns / 1e9 if total_duration_ns else elapsed
tok_s = eval_tokens / eval_duration_s if eval_duration_s > 0 else 0.0
return {
"prompt_tokens": prompt_tokens,
"eval_tokens": eval_tokens,
"eval_duration_s": round(eval_duration_s, 2),
"total_duration_s": round(total_duration_s, 2),
"tokens_per_second": round(tok_s, 1),
}
def run_benchmark(
model_name: str,
model_size: float,
prompt: str,
rounds: int,
num_predict: int,
threshold: float = 50.0,
) -> BenchmarkResult:
"""Run multiple rounds and compute average."""
results = []
errors = []
for i in range(rounds):
try:
r = benchmark_model(model_name, prompt, num_predict)
results.append(r)
print(f" Round {i+1}/{rounds}: {r['tokens_per_second']} tok/s "
f"({r['eval_tokens']} tokens in {r['eval_duration_s']}s)")
except Exception as e:
errors.append(str(e))
print(f" Round {i+1}/{rounds}: ERROR - {e}")
if not results:
return BenchmarkResult(
model=model_name,
size_gb=model_size,
prompt_tokens=0, eval_tokens=0,
eval_duration_s=0, tokens_per_second=0,
total_duration_s=0, rounds=rounds,
avg_tok_s=0, meets_threshold=False,
error="; ".join(errors),
)
avg_tok_s = sum(r["tokens_per_second"] for r in results) / len(results)
avg_tok_s = round(avg_tok_s, 1)
return BenchmarkResult(
model=model_name,
size_gb=model_size,
prompt_tokens=sum(r["prompt_tokens"] for r in results) // len(results),
eval_tokens=sum(r["eval_tokens"] for r in results) // len(results),
eval_duration_s=round(sum(r["eval_duration_s"] for r in results) / len(results), 2),
tokens_per_second=avg_tok_s,
total_duration_s=round(sum(r["total_duration_s"] for r in results) / len(results), 2),
rounds=len(results),
avg_tok_s=avg_tok_s,
meets_threshold=avg_tok_s >= threshold,
)
def format_report(results: list[BenchmarkResult], threshold: float = 50.0) -> str:
"""Format a human-readable benchmark report."""
lines = []
lines.append("")
lines.append("=" * 72)
lines.append(f" LOCAL MODEL BENCHMARK — {threshold:.0f} tok/s UX Threshold")
lines.append("=" * 72)
lines.append("")
# Summary table
header = f"{'Model':<25} {'Size':>6} {'tok/s':>8} {'Threshold':>10} {'Status':>8}"
lines.append(header)
lines.append("-" * 72)
passed = 0
failed = 0
errors = 0
for r in sorted(results, key=lambda x: x.avg_tok_s, reverse=True):
size_str = f"{r.size_gb:.1f}GB"
tok_s_str = f"{r.avg_tok_s:.1f}"
if r.error:
status = "ERROR"
errors += 1
elif r.meets_threshold:
status = "PASS"
passed += 1
else:
status = "FAIL"
failed += 1
marker = ">" if r.meets_threshold else "X" if r.error else "!"
thresh_str = f">= {threshold:.0f}"
lines.append(f" {marker} {r.model:<23} {size_str:>6} {tok_s_str:>8} {thresh_str:>10} {status:>8}")
lines.append("-" * 72)
lines.append(f" Passed: {passed} | Failed: {failed} | Errors: {errors} | Total: {len(results)}")
lines.append("")
# Detail section for failures
failures = [r for r in results if not r.meets_threshold and not r.error]
if failures:
lines.append(" FAILED MODELS (below threshold):")
for r in sorted(failures, key=lambda x: x.avg_tok_s):
gap = threshold - r.avg_tok_s
lines.append(f" - {r.model}: {r.avg_tok_s:.1f} tok/s "
f"({gap:.1f} tok/s short, {r.eval_tokens} avg tokens/round)")
lines.append("")
error_list = [r for r in results if r.error]
if error_list:
lines.append(" ERRORS:")
for r in error_list:
lines.append(f" - {r.model}: {r.error}")
lines.append("")
# Hardware info
import platform
lines.append(f" Host: {platform.node()} | {platform.system()} {platform.release()}")
lines.append(f" Ollama: {OLLAMA_BASE}")
lines.append("")
return "\n".join(lines)
def main():
parser = argparse.ArgumentParser(description="Benchmark local Ollama models vs 50 tok/s threshold")
parser.add_argument("--models", help="Comma-separated model names (default: all)")
parser.add_argument("--prompt", default=BENCHMARK_PROMPT, help="Benchmark prompt")
parser.add_argument("--rounds", type=int, default=3, help="Rounds per model (default: 3)")
parser.add_argument("--tokens", type=int, default=512, help="Max tokens to generate (default: 512)")
parser.add_argument("--json", action="store_true", help="JSON output for CI")
parser.add_argument("--all", action="store_true", help="Test all pulled models")
parser.add_argument("--threshold", type=float, default=THRESHOLD_TOK_S, help="tok/s threshold")
args = parser.parse_args()
threshold = args.threshold
# Get model list
available = get_models()
if not available:
print("No models found. Pull a model first: ollama pull <model>", file=sys.stderr)
sys.exit(1)
if args.models:
names = [m.strip() for m in args.models.split(",")]
models = [m for m in available if m["name"] in names]
missing = set(names) - set(m["name"] for m in models)
if missing:
print(f"Models not found: {', '.join(missing)}", file=sys.stderr)
print(f"Available: {', '.join(m['name'] for m in available)}", file=sys.stderr)
else:
models = available
print(f"Benchmarking {len(models)} model(s) against {threshold} tok/s threshold")
print(f"Ollama: {OLLAMA_BASE} | Rounds: {args.rounds} | Max tokens: {args.tokens}")
print()
results = []
for m in models:
name = m["name"]
size_gb = m.get("size", 0) / (1024**3)
print(f" {name} ({size_gb:.1f}GB):")
result = run_benchmark(name, size_gb, args.prompt, args.rounds, args.tokens, threshold)
results.append(result)
# Output
report = format_report(results, threshold)
if args.json:
output = {
"threshold_tok_s": threshold,
"ollama_base": OLLAMA_BASE,
"rounds": args.rounds,
"results": [asdict(r) for r in results],
"passed": sum(1 for r in results if r.meets_threshold),
"failed": sum(1 for r in results if not r.meets_threshold and not r.error),
"errors": sum(1 for r in results if r.error),
}
print(json.dumps(output, indent=2))
else:
print(report)
# Exit code: 0 if all pass, 1 if any fail/error
if any(not r.meets_threshold or r.error for r in results):
sys.exit(1)
sys.exit(0)
if __name__ == "__main__":
main()

View File

@@ -1,264 +0,0 @@
"""Tests for warm session provisioning (#327)."""
import json
import time
from unittest.mock import MagicMock, patch
import pytest
from agent.warm_session import (
WarmSessionTemplate,
ToolCallExample,
build_warm_conversation,
save_template,
load_template,
list_templates,
extract_successful_patterns,
_truncate_result,
)
@pytest.fixture()
def isolated_templates_dir(tmp_path, monkeypatch):
"""Point TEMPLATES_DIR at a temp directory."""
tdir = tmp_path / "warm_sessions"
tdir.mkdir()
monkeypatch.setattr("agent.warm_session.TEMPLATES_DIR", tdir)
return tdir
@pytest.fixture()
def sample_template():
"""A sample warm session template with a few examples."""
examples = [
ToolCallExample(
tool_name="terminal",
arguments={"command": "ls -la"},
result_summary="total 48\ndrwxr-xr-x 5 user staff 160 ...",
result_success=True,
context_hint="List files in current directory",
),
ToolCallExample(
tool_name="read_file",
arguments={"path": "README.md"},
result_summary="# Project\n\nThis is the README.",
result_success=True,
context_hint="Read project README",
),
ToolCallExample(
tool_name="search_files",
arguments={"pattern": "import os", "target": "content"},
result_summary="Found 15 matches across 8 files",
result_success=True,
context_hint="Search for Python imports",
),
]
return WarmSessionTemplate(
name="test-template",
description="Test template for unit tests",
examples=examples,
tags=["test", "general"],
)
# ---------------------------------------------------------------------------
# Data classes
# ---------------------------------------------------------------------------
class TestToolCallExample:
def test_creation(self):
ex = ToolCallExample(
tool_name="terminal",
arguments={"command": "echo hello"},
result_summary="hello",
result_success=True,
)
assert ex.tool_name == "terminal"
assert ex.arguments == {"command": "echo hello"}
assert ex.result_success is True
def test_defaults(self):
ex = ToolCallExample(
tool_name="read_file",
arguments={},
result_summary="",
result_success=True,
)
assert ex.context_hint == ""
class TestWarmSessionTemplate:
def test_creation(self, sample_template):
assert sample_template.name == "test-template"
assert len(sample_template.examples) == 3
assert sample_template.created_at > 0
def test_round_trip_dict(self, sample_template):
data = sample_template.to_dict()
restored = WarmSessionTemplate.from_dict(data)
assert restored.name == sample_template.name
assert len(restored.examples) == len(sample_template.examples)
assert restored.examples[0].tool_name == "terminal"
def test_from_dict_with_plain_dicts(self):
data = {
"name": "plain",
"description": "from dict",
"examples": [
{
"tool_name": "web_search",
"arguments": {"query": "test"},
"result_summary": "results found",
"result_success": True,
"context_hint": "",
}
],
}
template = WarmSessionTemplate.from_dict(data)
assert len(template.examples) == 1
assert template.examples[0].tool_name == "web_search"
# ---------------------------------------------------------------------------
# Truncation
# ---------------------------------------------------------------------------
class TestTruncateResult:
def test_short_unchanged(self):
assert _truncate_result("short text") == "short text"
def test_long_truncated(self):
long = "x" * 1000
result = _truncate_result(long, max_chars=100)
assert len(result) < 200 # 100 chars + truncation suffix
assert "truncated" in result
def test_empty(self):
assert _truncate_result("") == ""
assert _truncate_result(None) == ""
# ---------------------------------------------------------------------------
# Build conversation
# ---------------------------------------------------------------------------
class TestBuildWarmConversation:
def test_basic_conversation(self, sample_template):
messages = build_warm_conversation(sample_template)
# Each example produces: user + assistant(tool_calls) + tool(result) = 3 messages
assert len(messages) == 3 * 3 # 3 examples * 3 messages each
def test_message_roles_alternate(self, sample_template):
messages = build_warm_conversation(sample_template)
roles = [m["role"] for m in messages]
expected = ["user", "assistant", "tool"] * 3
assert roles == expected
def test_tool_calls_have_ids(self, sample_template):
messages = build_warm_conversation(sample_template)
assistant_msgs = [m for m in messages if m["role"] == "assistant"]
for msg in assistant_msgs:
tc = msg["tool_calls"][0]
assert tc["id"].startswith("warm_")
assert tc["function"]["name"] in ("terminal", "read_file", "search_files")
def test_tool_results_reference_ids(self, sample_template):
messages = build_warm_conversation(sample_template)
assistant_msgs = [m for m in messages if m["role"] == "assistant"]
tool_msgs = [m for m in messages if m["role"] == "tool"]
for a, t in zip(assistant_msgs, tool_msgs):
assert t["tool_call_id"] == a["tool_calls"][0]["id"]
def test_max_examples_limit(self, sample_template):
messages = build_warm_conversation(sample_template, max_examples=1)
assert len(messages) == 3 # 1 example * 3 messages
def test_system_prompt_addendum(self, sample_template):
sample_template.system_prompt_addendum = "Use Python 3.12+"
messages = build_warm_conversation(sample_template)
assert messages[0]["role"] == "system"
assert "Python 3.12+" in messages[0]["content"]
# ---------------------------------------------------------------------------
# Save / Load / List
# ---------------------------------------------------------------------------
class TestTemplatePersistence:
def test_save_and_load(self, isolated_templates_dir, sample_template):
save_template(sample_template)
loaded = load_template("test-template")
assert loaded is not None
assert loaded.name == "test-template"
assert len(loaded.examples) == 3
def test_load_nonexistent(self, isolated_templates_dir):
assert load_template("does-not-exist") is None
def test_list_templates(self, isolated_templates_dir, sample_template):
save_template(sample_template)
templates = list_templates()
assert len(templates) == 1
assert templates[0]["name"] == "test-template"
assert templates[0]["example_count"] == 3
def test_list_empty(self, isolated_templates_dir):
assert list_templates() == []
# ---------------------------------------------------------------------------
# Extract patterns (mocked SessionDB)
# ---------------------------------------------------------------------------
class TestExtractPatterns:
def test_extracts_from_marathon_sessions(self):
db = MagicMock()
db.list_sessions.return_value = [
{"id": "s1", "message_count": 50, "end_reason": "completed"},
{"id": "s2", "message_count": 10, "end_reason": "completed"}, # too short
]
db.get_messages.return_value = [
{
"role": "assistant",
"content": None,
"tool_calls": json.dumps([{
"id": "tc1",
"type": "function",
"function": {"name": "terminal", "arguments": json.dumps({"command": "pwd"})},
}]),
},
]
examples = extract_successful_patterns(db, min_messages=20)
# Only s1 (50 msgs) qualifies, s2 (10 msgs) is skipped
assert len(examples) == 1
assert examples[0].tool_name == "terminal"
def test_skips_trivial_tools(self):
db = MagicMock()
db.list_sessions.return_value = [
{"id": "s1", "message_count": 50, "end_reason": "completed"},
]
db.get_messages.return_value = [
{
"role": "assistant",
"content": None,
"tool_calls": json.dumps([{
"id": "tc1",
"type": "function",
"function": {"name": "clarify", "arguments": "{}"},
}]),
},
]
examples = extract_successful_patterns(db)
assert len(examples) == 0 # clarify is trivial, skipped
def test_skips_errored_sessions(self):
db = MagicMock()
db.list_sessions.return_value = [
{"id": "s1", "message_count": 50, "end_reason": "error"},
]
examples = extract_successful_patterns(db)
assert len(examples) == 0 # errored session, skipped

View File

@@ -1,178 +0,0 @@
"""Warm Session Tool — manage pre-proficient agent sessions.
Allows the agent to build, save, list, and load warm session templates
that pre-seed new sessions with successful tool-call patterns.
"""
import json
import logging
from typing import Optional
from tools.registry import registry
logger = logging.getLogger(__name__)
def warm_session(
action: str,
name: str = None,
description: str = "",
min_messages: int = 20,
max_sessions: int = 20,
source_filter: str = None,
tags: list = None,
) -> str:
"""Manage warm session templates for pre-proficient agent sessions.
Actions:
build — mine existing sessions and create a template
list — show saved templates
load — return a template's conversation_history for injection
delete — remove a template
"""
from agent.warm_session import (
build_from_session_db,
load_template,
list_templates,
build_warm_conversation,
save_template,
TEMPLATES_DIR,
)
if action == "list":
templates = list_templates()
return json.dumps({
"success": True,
"templates": templates,
"count": len(templates),
})
if action == "build":
if not name:
return json.dumps({"success": False, "error": "name is required for 'build'."})
try:
from hermes_state import SessionDB
db = SessionDB()
except Exception as e:
return json.dumps({"success": False, "error": f"Cannot open session DB: {e}"})
template = build_from_session_db(
db,
name=name,
description=description,
min_messages=min_messages,
max_sessions=max_sessions,
source_filter=source_filter,
tags=tags or [],
)
return json.dumps({
"success": True,
"name": template.name,
"example_count": len(template.examples),
"description": template.description,
})
if action == "load":
if not name:
return json.dumps({"success": False, "error": "name is required for 'load'."})
template = load_template(name)
if not template:
return json.dumps({"success": False, "error": f"Template '{name}' not found."})
conversation = build_warm_conversation(template)
return json.dumps({
"success": True,
"name": template.name,
"message_count": len(conversation),
"conversation_preview": [
{"role": m["role"], "content_preview": str(m.get("content", ""))[:100]}
for m in conversation[:6]
],
})
if action == "delete":
if not name:
return json.dumps({"success": False, "error": "name is required for 'delete'."})
path = TEMPLATES_DIR / f"{name}.json"
if not path.exists():
return json.dumps({"success": False, "error": f"Template '{name}' not found."})
path.unlink()
return json.dumps({"success": True, "message": f"Template '{name}' deleted."})
return json.dumps({
"success": False,
"error": f"Unknown action '{action}'. Use: build, list, load, delete",
})
WARM_SESSION_SCHEMA = {
"name": "warm_session",
"description": (
"Manage warm session templates for pre-proficient agent sessions. "
"Marathon sessions have lower error rates than mid-length ones because "
"agents accumulate successful patterns. Warm templates capture those "
"patterns and pre-seed new sessions with experience.\n\n"
"Actions:\n"
" build — mine existing sessions for successful tool-call patterns, save as template\n"
" list — show saved templates\n"
" load — retrieve a template's conversation history for session injection\n"
" delete — remove a template"
),
"parameters": {
"type": "object",
"properties": {
"action": {
"type": "string",
"enum": ["build", "list", "load", "delete"],
"description": "The action to perform.",
},
"name": {
"type": "string",
"description": "Template name. Required for build/load/delete.",
},
"description": {
"type": "string",
"description": "Description for the template. Used with 'build'.",
},
"min_messages": {
"type": "integer",
"description": "Minimum message count to consider a session experienced (default: 20).",
},
"max_sessions": {
"type": "integer",
"description": "Maximum sessions to scan when building (default: 20).",
},
"source_filter": {
"type": "string",
"description": "Filter sessions by source (cli, telegram, discord, etc.).",
},
"tags": {
"type": "array",
"items": {"type": "string"},
"description": "Tags for organizing templates.",
},
},
"required": ["action"],
},
}
registry.register(
name="warm_session",
toolset="skills",
schema=WARM_SESSION_SCHEMA,
handler=lambda args, **kw: warm_session(
action=args.get("action", ""),
name=args.get("name"),
description=args.get("description", ""),
min_messages=args.get("min_messages", 20),
max_sessions=args.get("max_sessions", 20),
source_filter=args.get("source_filter"),
tags=args.get("tags"),
),
emoji="🔥",
)