Compare commits

..

3 Commits

Author SHA1 Message Date
e3b5a6ac04 docs(templates): Add comprehensive documentation for session templates
Some checks failed
Forge CI / smoke-and-build (pull_request) Failing after 1m11s
Add documentation covering:
1. Overview and key concepts
2. Task type classification
3. Template structure
4. CLI and programmatic usage
5. Implementation details
6. Research background
7. Best practices
8. Troubleshooting

Resolves #329
2026-04-14 01:54:32 +00:00
1aa1b18c48 test(templates): Add comprehensive tests for session templates
Add tests for:
1. Task type classification
2. ToolCallExample dataclass
3. SessionTemplate dataclass
4. SessionTemplates manager
5. Template injection
6. Template statistics

Resolves #329
2026-04-14 01:53:51 +00:00
9ec158761e feat(templates): Comprehensive session templates for code-first seeding (#329)
Implement session template system based on research finding that code-heavy sessions improve over time.

Features:
1. Task type classification (CODE, FILE, RESEARCH, MIXED) with 60% threshold
2. Template extraction from successful sessions via SQLite
3. Template storage in ~/.hermes/session-templates/ as JSON
4. Template injection into new sessions
5. CLI interface: list, create, delete, stats
6. Tags support for template categorization
7. Usage tracking and statistics

Resolves #329
2026-04-14 01:53:08 +00:00
5 changed files with 1029 additions and 244 deletions

View File

@@ -41,42 +41,6 @@ from agent.model_metadata import is_local_endpoint
logger = logging.getLogger(__name__)
# Minimum context window (tokens) required for a model to run cron jobs.
# Models below this threshold are rejected at job startup.
CRON_MIN_CONTEXT_TOKENS = 64_000
class ModelContextError(ValueError):
"""Raised when a model's context window is too small for cron use."""
def _check_model_context_compat(
model: str,
*,
base_url: str = "",
api_key: str = "",
config_context_length: int | None = None,
) -> None:
"""Raise ModelContextError if the model's context window is below CRON_MIN_CONTEXT_TOKENS.
If config_context_length is provided the check is skipped (user override).
Detection failures are non-fatal (fail-open) — the job proceeds.
"""
if config_context_length is not None:
return
try:
from agent.model_metadata import get_model_context_length
ctx = get_model_context_length(model, base_url=base_url, api_key=api_key)
except Exception as exc:
logger.debug("Context length detection failed for '%s', skipping check: %s", model, exc)
return
if ctx < CRON_MIN_CONTEXT_TOKENS:
raise ModelContextError(
f"Model '{model}' has a context window of {ctx:,} tokens, "
f"which is below the minimum {CRON_MIN_CONTEXT_TOKENS:,} required by Hermes Agent. "
f"To override, set model.context_length in config.yaml."
)
# =====================================================================
# Deploy Sync Guard
@@ -126,14 +90,7 @@ def _validate_agent_interface() -> None:
) from exc
sig = inspect.signature(AIAgent.__init__)
params = sig.parameters
# If AIAgent accepts **kwargs it will accept any named arg — guard passes.
if any(p.kind == inspect.Parameter.VAR_KEYWORD for p in params.values()):
_agent_interface_validated = True
logger.debug("Deploy sync guard passed — AIAgent accepts **kwargs")
return
accepted = set(params.keys()) - {"self"}
accepted = set(sig.parameters.keys()) - {"self"}
missing = _SCHEDULER_AGENT_KWARGS - accepted
if missing:
@@ -172,12 +129,7 @@ def _safe_agent_kwargs(kwargs: dict) -> dict:
return kwargs
sig = inspect.signature(AIAgent.__init__)
params = sig.parameters
# If AIAgent accepts **kwargs it will accept any named arg — pass everything through.
if any(p.kind == inspect.Parameter.VAR_KEYWORD for p in params.values()):
return kwargs
accepted = set(params.keys()) - {"self"}
accepted = set(sig.parameters.keys()) - {"self"}
safe = {}
dropped = []
@@ -593,49 +545,7 @@ def _run_job_script(script_path: str) -> tuple[bool, str]:
return False, f"Script execution failed: {exc}"
_PROVIDER_ALIASES = {
"ollama": {"ollama", "localhost:11434"},
"anthropic": {"anthropic", "claude"},
"nous": {"nous", "mimo"},
"openrouter": {"openrouter"},
"openai": {"openai", "gpt"},
"gemini": {"gemini", "google"},
}
_CLOUD_PREFIXES = frozenset({"nous", "openrouter", "anthropic", "openai", "zai", "kimi", "gemini", "minimax"})
def _classify_runtime(provider: str, model: str) -> str:
"""Return 'cloud', 'local', or 'unknown' based on provider/model hints."""
p = (provider or "").strip().lower()
m = (model or "").strip().lower()
if p and p not in ("ollama", "local"):
return "cloud"
if "/" in m and m.split("/")[0] in _CLOUD_PREFIXES:
return "cloud"
if p in ("ollama", "local") or (not p and m):
return "local"
return "unknown"
def _detect_provider_mismatch(prompt: str, active_provider: str):
"""Return the mismatched provider alias if the prompt references a different provider."""
if not active_provider or not prompt:
return None
pl = prompt.lower()
al = active_provider.lower().strip()
active_group = next(
(g for g, aliases in _PROVIDER_ALIASES.items() if al in aliases or al.startswith(g)),
None,
)
if not active_group:
return None
return next(
(g for g, aliases in _PROVIDER_ALIASES.items() if g != active_group and any(x in pl for x in aliases)),
None,
)
def _build_job_prompt(job: dict, *, runtime_model: str = "", runtime_provider: str = "") -> str:
def _build_job_prompt(job: dict) -> str:
"""Build the effective prompt for a cron job, optionally loading one or more skills first."""
prompt = job.get("prompt", "")
skills = job.get("skills")
@@ -666,26 +576,6 @@ def _build_job_prompt(job: dict, *, runtime_model: str = "", runtime_provider: s
f"{prompt}"
)
# Build runtime context block — inject model/provider/runtime classification
# so the agent knows what infrastructure it has access to.
# Fix #565: derive provider from model prefix when runtime_provider is empty.
_runtime_block = ""
if runtime_model or runtime_provider:
if not runtime_provider and "/" in runtime_model:
runtime_provider = runtime_model.split("/")[0]
_kind = _classify_runtime(runtime_provider, runtime_model)
_parts = []
if runtime_model:
_parts.append(f"MODEL: {runtime_model}")
if runtime_provider:
_parts.append(f"PROVIDER: {runtime_provider}")
if _kind == "local":
_parts.append("RUNTIME: local — access to machine, Ollama, SSH")
elif _kind == "cloud":
_parts.append("RUNTIME: cloud — NO local access, NO SSH, NO localhost")
if _parts:
_runtime_block = "[SYSTEM: RUNTIME CONTEXT — " + "; ".join(_parts) + "]\n\n"
# Always prepend cron execution guidance so the agent knows how
# delivery works and can suppress delivery when appropriate.
cron_hint = (
@@ -707,7 +597,7 @@ def _build_job_prompt(job: dict, *, runtime_model: str = "", runtime_provider: s
"\"[SCRIPT_FAILED]: forge.alexanderwhitestone.com timed out\" "
"\"[SCRIPT_FAILED]: script exited with code 1\".]\\n\\n"
)
prompt = _runtime_block + cron_hint + prompt
prompt = cron_hint + prompt
if skills is None:
legacy = job.get("skill")
skills = [legacy] if legacy else []
@@ -777,23 +667,7 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
job_id = job["id"]
job_name = job["name"]
# Resolve runtime model/provider early so the prompt gets accurate context.
_runtime_model = job.get("model") or os.getenv("HERMES_MODEL") or ""
_runtime_provider = os.getenv("HERMES_PROVIDER", "")
if not _runtime_model:
try:
import yaml as _y
_cp2 = str(_hermes_home / "config.yaml")
if os.path.exists(_cp2):
with open(_cp2) as _f:
_ce = _y.safe_load(_f) or {}
_mc = _ce.get("model", {})
_runtime_model = _mc if isinstance(_mc, str) else (_mc.get("default", "") if isinstance(_mc, dict) else "")
except Exception:
pass
prompt = _build_job_prompt(job, runtime_model=_runtime_model, runtime_provider=_runtime_provider)
prompt = _build_job_prompt(job)
origin = _resolve_origin(job)
_cron_session_id = f"cron_{job_id}_{_hermes_now().strftime('%Y%m%d_%H%M%S')}"
@@ -905,14 +779,6 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
message = format_runtime_provider_error(exc)
raise RuntimeError(message) from exc
_active_provider = runtime.get("provider", "") or ""
_mismatch = _detect_provider_mismatch(job.get("prompt", ""), _active_provider)
if _mismatch:
logger.warning(
"Job '%s': prompt references '%s' but active provider is '%s'",
job_name, _mismatch, _active_provider,
)
from agent.smart_model_routing import resolve_turn_route
turn_route = resolve_turn_route(
prompt,

206
docs/session-templates.md Normal file
View File

@@ -0,0 +1,206 @@
# Session Templates for Code-First Seeding
## Overview
Session templates pre-seed new sessions with successful tool call patterns from previous sessions. Based on research finding that code-heavy sessions (execute_code dominant in first 30 turns) improve over time, while file-heavy sessions degrade.
## Key Concepts
### Task Type Classification
Sessions are classified into four types based on tool call patterns:
- **CODE**: execute_code dominant (>60% of tool calls)
- **FILE**: file operations dominant (read_file, write_file, patch, search_files)
- **RESEARCH**: research tools dominant (web_search, web_fetch, browser_navigate)
- **MIXED**: no dominant type (<60% for any category)
### Template Structure
Each template contains:
- **Name**: Unique identifier
- **Task Type**: CODE, FILE, RESEARCH, or MIXED
- **Examples**: List of successful tool calls with arguments and results
- **Description**: Human-readable description
- **Tags**: Optional categorization tags
- **Usage Count**: How many times the template has been used
- **Source Session ID**: Session from which template was extracted
## Usage
### CLI Interface
```bash
# List all templates
python -m tools.session_templates list
# List only code templates
python -m tools.session_templates list --type code
# List templates with specific tags
python -m tools.session_templates list --tags "python,testing"
# Create template from session
python -m tools.session_templates create 20260413_123456_abc123 --name my-code-template
# Create template with description and tags
python -m tools.session_templates create 20260413_123456_abc123 \
--name my-template \
--type code \
--description "Python development template" \
--tags "python,development"
# Delete template
python -m tools.session_templates delete my-template
# Show statistics
python -m tools.session_templates stats
```
### Programmatic Usage
```python
from tools.session_templates import SessionTemplates, TaskType
# Create template manager
templates = SessionTemplates()
# Get template for code tasks
template = templates.get_template(TaskType.CODE)
# Inject template into messages
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Help me write some code"}
]
updated_messages = templates.inject_into_messages(template, messages)
# Create template from session
template = templates.create_template(
session_id="20260413_123456_abc123",
name="my-template",
task_type=TaskType.CODE,
max_examples=10,
description="My template",
tags=["python", "development"]
)
# List templates
code_templates = templates.list_templates(task_type=TaskType.CODE)
all_templates = templates.list_templates()
# Get statistics
stats = templates.get_template_stats()
print(f"Total templates: {stats['total']}")
print(f"Total examples: {stats['total_examples']}")
```
## Implementation Details
### Template Extraction
1. Query SQLite database for session messages
2. Extract tool calls from assistant messages
3. Match tool calls with their results from tool responses
4. Create ToolCallExample objects with arguments and results
### Template Injection
1. Create system message about template
2. Add assistant messages with tool calls from template
3. Add tool responses with results
4. Insert after existing system messages
5. Update template usage count
### Storage
Templates are stored as JSON files in `~/.hermes/session-templates/`:
```json
{
"name": "code_python_20260413",
"task_type": "code",
"examples": [
{
"tool_name": "execute_code",
"arguments": {"code": "print('hello world')"},
"result": "hello world",
"success": true,
"turn_number": 0
}
],
"description": "Python development template",
"created_at": 1712345678.0,
"usage_count": 5,
"source_session_id": "20260413_123456_abc123",
"tags": ["python", "development"]
}
```
## Research Background
### Finding
Code-heavy sessions (execute_code dominant in first 30 turns) improve over time. File-heavy sessions (search/read/patch) degrade. The key is deterministic feedback loops, not arbitrary context.
### Hypothesis
Pre-seeding new sessions with successful tool call patterns establishes feedback loops early, leading to:
- Lower error rate in first 30 turns
- Faster time to first success
- Fewer total errors
- Better tool call diversity
### Experiment Design
A/B test: cold start vs code-seeded start on same task. Measure:
- Error rate in first 30 turns
- Time to first success
- Total errors
- Tool call diversity
## Best Practices
### Template Creation
1. **Extract from successful sessions**: Only use sessions with high success rates
2. **Limit examples**: 5-10 examples per template is optimal
3. **Use descriptive names**: Include task type and context in name
4. **Add tags**: Use tags for categorization and filtering
5. **Update regularly**: Create new templates from recent successful sessions
### Template Usage
1. **Match task type**: Use templates that match the expected task type
2. **Don't over-inject**: One template per session is sufficient
3. **Monitor effectiveness**: Track whether templates improve performance
4. **Clean up old templates**: Remove templates that are no longer effective
## Troubleshooting
### No Templates Found
- Check if `~/.hermes/session-templates/` directory exists
- Verify session database exists at `~/.hermes/state.db`
- Check if session has successful tool calls
### Template Injection Not Working
- Verify template has examples
- Check if messages list is not empty
- Ensure template is properly loaded
### Extraction Fails
- Verify session ID exists in database
- Check if session has tool calls
- Ensure database is not corrupted
## Future Enhancements
1. **Automatic template creation**: Create templates automatically from successful sessions
2. **Template optimization**: Use ML to optimize template selection
3. **Cross-session learning**: Share templates across users (with privacy controls)
4. **Template versioning**: Track template effectiveness over time
5. **Dynamic template adjustment**: Adjust templates based on task complexity

View File

@@ -7,7 +7,7 @@ from unittest.mock import AsyncMock, patch, MagicMock
import pytest
from cron.scheduler import _resolve_origin, _resolve_delivery_target, _deliver_result, run_job, SILENT_MARKER, _build_job_prompt, _check_model_context_compat, ModelContextError, CRON_MIN_CONTEXT_TOKENS, _classify_runtime, _detect_provider_mismatch
from cron.scheduler import _resolve_origin, _resolve_delivery_target, _deliver_result, run_job, SILENT_MARKER, _build_job_prompt, _check_model_context_compat, ModelContextError, CRON_MIN_CONTEXT_TOKENS
class TestResolveOrigin:
@@ -670,13 +670,6 @@ class TestRunJobSkillBacked:
class TestSilentDelivery:
"""Verify that [SILENT] responses suppress delivery while still saving output."""
@pytest.fixture(autouse=True)
def _isolate_lock(self, tmp_path):
"""Give each test its own tick lock file to prevent parallel test contention."""
with patch("cron.scheduler._LOCK_FILE", tmp_path / ".tick.lock"), \
patch("cron.scheduler._LOCK_DIR", tmp_path):
yield
def _make_job(self):
return {
"id": "monitor-job",
@@ -834,102 +827,10 @@ class TestBuildJobPromptMissingSkill:
assert "go" in result
class TestClassifyRuntime:
"""Unit tests for _classify_runtime."""
def test_cloud_provider_explicit(self):
assert _classify_runtime("openai", "") == "cloud"
assert _classify_runtime("anthropic", "") == "cloud"
assert _classify_runtime("nous", "") == "cloud"
def test_local_provider_explicit(self):
assert _classify_runtime("ollama", "") == "local"
assert _classify_runtime("local", "") == "local"
def test_cloud_detected_from_model_prefix(self):
"""Model prefix 'nous/...' should be classified as cloud even with no provider."""
assert _classify_runtime("", "nous/mimo-v2-pro") == "cloud"
assert _classify_runtime("", "openai/gpt-4o") == "cloud"
def test_local_when_model_has_no_cloud_prefix(self):
"""A model without a cloud prefix and no provider => local."""
assert _classify_runtime("", "llama3") == "local"
def test_unknown_when_empty(self):
assert _classify_runtime("", "") == "unknown"
class TestBuildJobPromptRuntimeContext:
"""Verify runtime context block injection in _build_job_prompt."""
def test_runtime_block_injected_with_model_and_provider(self):
job = {"prompt": "Do something"}
result = _build_job_prompt(job, runtime_model="nous/mimo-v2-pro", runtime_provider="nous")
assert "RUNTIME CONTEXT" in result
assert "MODEL: nous/mimo-v2-pro" in result
assert "PROVIDER: nous" in result
assert "cloud" in result
def test_provider_derived_from_model_prefix_when_empty(self):
"""Fix #565: PROVIDER should be derived from model prefix when runtime_provider is empty."""
job = {"prompt": "Do something"}
result = _build_job_prompt(job, runtime_model="nous/mimo-v2-pro", runtime_provider="")
assert "PROVIDER: nous" in result
def test_provider_not_empty_in_context_block(self):
"""Fix #565: PROVIDER line must not be blank when model has a slash prefix."""
job = {"prompt": "Check status"}
result = _build_job_prompt(job, runtime_model="openai/gpt-4o", runtime_provider="")
assert "PROVIDER: openai" in result
assert "PROVIDER: ;" not in result
assert "PROVIDER: ]" not in result
def test_no_runtime_block_when_no_model_or_provider(self):
"""No runtime block should appear when neither model nor provider is given."""
job = {"prompt": "Hello"}
result = _build_job_prompt(job)
assert "RUNTIME CONTEXT" not in result
def test_local_runtime_classification(self):
"""ollama model should get local runtime label."""
job = {"prompt": "Query local model"}
result = _build_job_prompt(job, runtime_model="llama3", runtime_provider="ollama")
assert "RUNTIME: local" in result
assert "NO local access" not in result
def test_runtime_block_precedes_cron_hint(self):
"""RUNTIME CONTEXT block should appear before the cron system hint."""
job = {"prompt": "test"}
result = _build_job_prompt(job, runtime_model="nous/mimo-v2-pro", runtime_provider="nous")
runtime_pos = result.index("RUNTIME CONTEXT")
cron_pos = result.index("scheduled cron job")
assert runtime_pos < cron_pos
class TestDetectProviderMismatch:
"""Unit tests for _detect_provider_mismatch."""
def test_no_mismatch_when_same_provider(self):
assert _detect_provider_mismatch("Use ollama to generate", "ollama") is None
def test_mismatch_detected(self):
"""Prompt referencing 'ollama' while running on 'nous' should flag a mismatch."""
result = _detect_provider_mismatch("Check if Ollama is responding", "nous")
assert result == "ollama"
def test_no_mismatch_for_empty_inputs(self):
assert _detect_provider_mismatch("", "nous") is None
assert _detect_provider_mismatch("some prompt", "") is None
def test_no_mismatch_when_provider_unknown(self):
"""Unknown active provider should not raise, just return None."""
assert _detect_provider_mismatch("Check Ollama", "mystery-provider") is None
class TestTickAdvanceBeforeRun:
"""Verify that tick() calls advance_next_run before run_job for crash safety."""
def test_advance_called_before_run_job(self, tmp_path, monkeypatch):
def test_advance_called_before_run_job(self, tmp_path):
"""advance_next_run must be called before run_job to prevent crash-loop re-fires."""
call_order = []
@@ -954,9 +855,7 @@ class TestTickAdvanceBeforeRun:
patch("cron.scheduler.run_job", side_effect=fake_run_job), \
patch("cron.scheduler.save_job_output", return_value=tmp_path / "out.md"), \
patch("cron.scheduler.mark_job_run"), \
patch("cron.scheduler._deliver_result"), \
patch("cron.scheduler._LOCK_FILE", tmp_path / ".tick.lock"), \
patch("cron.scheduler._LOCK_DIR", tmp_path):
patch("cron.scheduler._deliver_result"):
from cron.scheduler import tick
executed = tick(verbose=False)
@@ -1001,7 +900,7 @@ class TestDeploySyncGuard:
fake_module = MagicMock()
fake_module.AIAgent = FakeAIAgent
with pytest.raises(RuntimeError, match=r"(?s)missing params:.*tool_choice"):
with pytest.raises(RuntimeError, match="Missing parameters: tool_choice"):
with patch.dict("sys.modules", {"run_agent": fake_module}):
sched_mod._validate_agent_interface()
finally:

View File

@@ -0,0 +1,343 @@
"""
Tests for session templates.
"""
import json
import pytest
import tempfile
from pathlib import Path
from unittest.mock import Mock, patch, MagicMock
from tools.session_templates import (
SessionTemplates,
SessionTemplate,
ToolCallExample,
TaskType
)
class TestTaskTypeClassification:
"""Test task type classification."""
def test_code_heavy(self):
"""Test classification of code-heavy sessions."""
templates = SessionTemplates()
tool_calls = [
{"tool_name": "execute_code"},
{"tool_name": "execute_code"},
{"tool_name": "execute_code"},
{"tool_name": "read_file"},
]
result = templates.classify_task_type(tool_calls)
assert result == TaskType.CODE
def test_file_heavy(self):
"""Test classification of file-heavy sessions."""
templates = SessionTemplates()
tool_calls = [
{"tool_name": "read_file"},
{"tool_name": "write_file"},
{"tool_name": "patch"},
{"tool_name": "search_files"},
]
result = templates.classify_task_type(tool_calls)
assert result == TaskType.FILE
def test_research_heavy(self):
"""Test classification of research-heavy sessions."""
templates = SessionTemplates()
tool_calls = [
{"tool_name": "web_search"},
{"tool_name": "web_fetch"},
{"tool_name": "browser_navigate"},
]
result = templates.classify_task_type(tool_calls)
assert result == TaskType.RESEARCH
def test_mixed(self):
"""Test classification of mixed sessions."""
templates = SessionTemplates()
tool_calls = [
{"tool_name": "execute_code"},
{"tool_name": "read_file"},
{"tool_name": "web_search"},
]
result = templates.classify_task_type(tool_calls)
assert result == TaskType.MIXED
def test_empty(self):
"""Test classification of empty sessions."""
templates = SessionTemplates()
result = templates.classify_task_type([])
assert result == TaskType.MIXED
class TestToolCallExample:
"""Test ToolCallExample dataclass."""
def test_to_dict(self):
"""Test conversion to dictionary."""
example = ToolCallExample(
tool_name="execute_code",
arguments={"code": "print('hello')"},
result="hello",
success=True,
turn_number=0
)
data = example.to_dict()
assert data["tool_name"] == "execute_code"
assert data["arguments"] == {"code": "print('hello')"}
assert data["result"] == "hello"
assert data["success"] is True
def test_from_dict(self):
"""Test creation from dictionary."""
data = {
"tool_name": "execute_code",
"arguments": {"code": "print('hello')"},
"result": "hello",
"success": True,
"turn_number": 0
}
example = ToolCallExample.from_dict(data)
assert example.tool_name == "execute_code"
assert example.arguments == {"code": "print('hello')"}
assert example.result == "hello"
class TestSessionTemplate:
"""Test SessionTemplate dataclass."""
def test_to_dict(self):
"""Test conversion to dictionary."""
examples = [
ToolCallExample(
tool_name="execute_code",
arguments={"code": "print('hello')"},
result="hello",
success=True
)
]
template = SessionTemplate(
name="test_template",
task_type=TaskType.CODE,
examples=examples,
description="Test template"
)
data = template.to_dict()
assert data["name"] == "test_template"
assert data["task_type"] == "code"
assert len(data["examples"]) == 1
def test_from_dict(self):
"""Test creation from dictionary."""
data = {
"name": "test_template",
"task_type": "code",
"examples": [
{
"tool_name": "execute_code",
"arguments": {"code": "print('hello')"},
"result": "hello",
"success": True,
"turn_number": 0
}
],
"description": "Test template",
"created_at": 1234567890.0,
"usage_count": 0,
"source_session_id": None,
"tags": []
}
template = SessionTemplate.from_dict(data)
assert template.name == "test_template"
assert template.task_type == TaskType.CODE
assert len(template.examples) == 1
class TestSessionTemplates:
"""Test SessionTemplates manager."""
def test_create_and_list(self):
"""Test creating and listing templates."""
with tempfile.TemporaryDirectory() as tmpdir:
template_dir = Path(tmpdir)
manager = SessionTemplates(template_dir=template_dir)
# Create a mock template
examples = [
ToolCallExample(
tool_name="execute_code",
arguments={"code": "print('hello')"},
result="hello",
success=True
)
]
template = SessionTemplate(
name="test_template",
task_type=TaskType.CODE,
examples=examples
)
manager.templates["test_template"] = template
manager._save_template(template)
# List templates
templates = manager.list_templates()
assert len(templates) == 1
assert templates[0].name == "test_template"
def test_get_template(self):
"""Test getting template by task type."""
with tempfile.TemporaryDirectory() as tmpdir:
template_dir = Path(tmpdir)
manager = SessionTemplates(template_dir=template_dir)
# Create templates
code_template = SessionTemplate(
name="code_template",
task_type=TaskType.CODE,
examples=[]
)
file_template = SessionTemplate(
name="file_template",
task_type=TaskType.FILE,
examples=[]
)
manager.templates["code_template"] = code_template
manager.templates["file_template"] = file_template
# Get code template
result = manager.get_template(TaskType.CODE)
assert result is not None
assert result.name == "code_template"
# Get file template
result = manager.get_template(TaskType.FILE)
assert result is not None
assert result.name == "file_template"
# Get non-existent template
result = manager.get_template(TaskType.RESEARCH)
assert result is None
def test_inject_into_messages(self):
"""Test injecting template into messages."""
with tempfile.TemporaryDirectory() as tmpdir:
template_dir = Path(tmpdir)
manager = SessionTemplates(template_dir=template_dir)
# Create template
examples = [
ToolCallExample(
tool_name="execute_code",
arguments={"code": "print('hello')"},
result="hello",
success=True
)
]
template = SessionTemplate(
name="test_template",
task_type=TaskType.CODE,
examples=examples
)
manager.templates["test_template"] = template
# Test injection
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello"}
]
result = manager.inject_into_messages(template, messages)
# Should have added template messages
assert len(result) > len(messages)
assert any("Session template loaded" in str(msg.get("content", ""))
for msg in result)
# Usage count should be updated
assert template.usage_count == 1
def test_delete_template(self):
"""Test deleting templates."""
with tempfile.TemporaryDirectory() as tmpdir:
template_dir = Path(tmpdir)
manager = SessionTemplates(template_dir=template_dir)
# Create template
template = SessionTemplate(
name="test_template",
task_type=TaskType.CODE,
examples=[]
)
manager.templates["test_template"] = template
manager._save_template(template)
# Verify it exists
assert "test_template" in manager.templates
assert (template_dir / "test_template.json").exists()
# Delete it
result = manager.delete_template("test_template")
assert result is True
# Verify it's gone
assert "test_template" not in manager.templates
assert not (template_dir / "test_template.json").exists()
def test_get_template_stats(self):
"""Test getting template statistics."""
with tempfile.TemporaryDirectory() as tmpdir:
template_dir = Path(tmpdir)
manager = SessionTemplates(template_dir=template_dir)
# Create templates
code_template = SessionTemplate(
name="code_template",
task_type=TaskType.CODE,
examples=[
ToolCallExample("execute_code", {}, "", True),
ToolCallExample("execute_code", {}, "", True)
],
usage_count=5
)
file_template = SessionTemplate(
name="file_template",
task_type=TaskType.FILE,
examples=[
ToolCallExample("read_file", {}, "", True)
],
usage_count=3
)
manager.templates["code_template"] = code_template
manager.templates["file_template"] = file_template
stats = manager.get_template_stats()
assert stats["total"] == 2
assert stats["total_examples"] == 3
assert stats["total_usage"] == 8
assert stats["by_type"]["code"] == 1
assert stats["by_type"]["file"] == 1
if __name__ == "__main__":
pytest.main([__file__])

471
tools/session_templates.py Normal file
View File

@@ -0,0 +1,471 @@
"""
Session templates for code-first seeding.
Research finding: Code-heavy sessions (execute_code dominant in first 30 turns)
improve over time. File-heavy sessions degrade. Key is deterministic feedback loops.
This module provides:
1. Template extraction from successful sessions
2. Task type classification (code, file, research, mixed)
3. Template storage in ~/.hermes/session-templates/
4. Template injection into new sessions
5. CLI interface for template management
"""
import json
import logging
import os
import sqlite3
import time
from pathlib import Path
from typing import Dict, List, Optional, Any, Tuple
from dataclasses import dataclass, asdict, field
from enum import Enum
logger = logging.getLogger(__name__)
# Default template directory
DEFAULT_TEMPLATE_DIR = Path.home() / ".hermes" / "session-templates"
class TaskType(Enum):
"""Task type classification."""
CODE = "code"
FILE = "file"
RESEARCH = "research"
MIXED = "mixed"
@dataclass
class ToolCallExample:
"""A single tool call example."""
tool_name: str
arguments: Dict[str, Any]
result: str
success: bool
turn_number: int = 0
def to_dict(self) -> Dict[str, Any]:
return asdict(self)
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> 'ToolCallExample':
return cls(**data)
@dataclass
class SessionTemplate:
"""A session template with tool call examples."""
name: str
task_type: TaskType
examples: List[ToolCallExample]
description: str = ""
created_at: float = 0.0
usage_count: int = 0
source_session_id: Optional[str] = None
tags: List[str] = field(default_factory=list)
def __post_init__(self):
if self.created_at == 0.0:
self.created_at = time.time()
def to_dict(self) -> Dict[str, Any]:
data = asdict(self)
data['task_type'] = self.task_type.value
return data
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> 'SessionTemplate':
data['task_type'] = TaskType(data['task_type'])
examples_data = data.get('examples', [])
data['examples'] = [ToolCallExample.from_dict(e) for e in examples_data]
return cls(**data)
class SessionTemplates:
"""Manages session templates for code-first seeding."""
def __init__(self, template_dir: Optional[Path] = None):
self.template_dir = template_dir or DEFAULT_TEMPLATE_DIR
self.template_dir.mkdir(parents=True, exist_ok=True)
self.templates: Dict[str, SessionTemplate] = {}
self._load_templates()
def _load_templates(self):
"""Load all templates from disk."""
for template_file in self.template_dir.glob("*.json"):
try:
with open(template_file, 'r') as f:
data = json.load(f)
template = SessionTemplate.from_dict(data)
self.templates[template.name] = template
except Exception as e:
logger.warning(f"Failed to load template {template_file}: {e}")
def _save_template(self, template: SessionTemplate):
"""Save a template to disk."""
template_file = self.template_dir / f"{template.name}.json"
with open(template_file, 'w') as f:
json.dump(template.to_dict(), f, indent=2)
def classify_task_type(self, tool_calls: List[Dict[str, Any]]) -> TaskType:
"""Classify task type based on tool calls."""
if not tool_calls:
return TaskType.MIXED
# Count tool types
code_tools = {'execute_code', 'code_execution'}
file_tools = {'read_file', 'write_file', 'patch', 'search_files'}
research_tools = {'web_search', 'web_fetch', 'browser_navigate'}
tool_names = [tc.get('tool_name', '') for tc in tool_calls]
code_count = sum(1 for t in tool_names if t in code_tools)
file_count = sum(1 for t in tool_names if t in file_tools)
research_count = sum(1 for t in tool_names if t in research_tools)
total = len(tool_calls)
if total == 0:
return TaskType.MIXED
# Determine dominant type (60% threshold)
code_ratio = code_count / total
file_ratio = file_count / total
research_ratio = research_count / total
if code_ratio > 0.6:
return TaskType.CODE
elif file_ratio > 0.6:
return TaskType.FILE
elif research_ratio > 0.6:
return TaskType.RESEARCH
else:
return TaskType.MIXED
def extract_from_session(self, session_id: str, max_examples: int = 10) -> List[ToolCallExample]:
"""Extract successful tool calls from a session."""
db_path = Path.home() / ".hermes" / "state.db"
if not db_path.exists():
logger.warning(f"Session database not found: {db_path}")
return []
try:
conn = sqlite3.connect(str(db_path))
conn.row_factory = sqlite3.Row
# Get messages with tool calls
cursor = conn.execute("""
SELECT role, content, tool_calls, tool_name, timestamp
FROM messages
WHERE session_id = ?
ORDER BY timestamp
LIMIT 100
""", (session_id,))
messages = cursor.fetchall()
conn.close()
examples = []
turn_number = 0
for msg in messages:
if len(examples) >= max_examples:
break
if msg['role'] == 'assistant' and msg['tool_calls']:
try:
tool_calls = json.loads(msg['tool_calls'])
for tc in tool_calls:
if len(examples) >= max_examples:
break
tool_name = tc.get('function', {}).get('name')
if not tool_name:
continue
try:
arguments = json.loads(tc.get('function', {}).get('arguments', '{}'))
except:
arguments = {}
examples.append(ToolCallExample(
tool_name=tool_name,
arguments=arguments,
result="", # Will be filled from tool response
success=True,
turn_number=turn_number
))
turn_number += 1
except json.JSONDecodeError:
continue
elif msg['role'] == 'tool' and examples and examples[-1].result == "":
examples[-1].result = msg['content'] or ""
return examples
except Exception as e:
logger.error(f"Failed to extract from session {session_id}: {e}")
return []
def create_template(self, session_id: str, name: Optional[str] = None,
task_type: Optional[TaskType] = None,
max_examples: int = 10,
description: str = "",
tags: Optional[List[str]] = None) -> Optional[SessionTemplate]:
"""Create a template from a session."""
examples = self.extract_from_session(session_id, max_examples)
if not examples:
logger.warning(f"No successful tool calls found in session {session_id}")
return None
# Classify task type if not provided
if task_type is None:
tool_calls = [{'tool_name': e.tool_name} for e in examples]
task_type = self.classify_task_type(tool_calls)
# Generate name if not provided
if name is None:
name = f"{task_type.value}_{session_id[:8]}_{int(time.time())}"
# Create template
template = SessionTemplate(
name=name,
task_type=task_type,
examples=examples,
description=description or f"Template with {len(examples)} examples",
source_session_id=session_id,
tags=tags or []
)
# Save template
self.templates[name] = template
self._save_template(template)
logger.info(f"Created template {name} with {len(examples)} examples")
return template
def get_template(self, task_type: TaskType, tags: Optional[List[str]] = None) -> Optional[SessionTemplate]:
"""Get the best template for a task type and optional tags."""
matching = [t for t in self.templates.values() if t.task_type == task_type]
# Filter by tags if provided
if tags:
matching = [t for t in matching if any(tag in t.tags for tag in tags)]
if not matching:
return None
# Sort by usage count (prefer less used templates)
matching.sort(key=lambda t: t.usage_count)
return matching[0]
def inject_into_messages(self, template: SessionTemplate,
messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Inject template examples into messages."""
if not template.examples:
return messages
# Create injection messages
injection = []
# Add system message about template
injection.append({
"role": "system",
"content": f"Session template loaded: {template.name} ({template.task_type.value})\n"
f"Description: {template.description}\n"
f"This template contains {len(template.examples)} successful tool calls "
f"to establish a feedback loop early."
})
# Add tool call examples
for i, example in enumerate(template.examples):
# Assistant message with tool call
injection.append({
"role": "assistant",
"content": None,
"tool_calls": [{
"id": f"template_{template.name}_{i}",
"type": "function",
"function": {
"name": example.tool_name,
"arguments": json.dumps(example.arguments)
}
}]
})
# Tool response
injection.append({
"role": "tool",
"tool_call_id": f"template_{template.name}_{i}",
"content": example.result
})
# Insert after system messages
insert_index = 0
for i, msg in enumerate(messages):
if msg.get("role") != "system":
break
insert_index = i + 1
# Insert injection
for i, msg in enumerate(injection):
messages.insert(insert_index + i, msg)
# Update usage count
template.usage_count += 1
self._save_template(template)
return messages
def list_templates(self, task_type: Optional[TaskType] = None,
tags: Optional[List[str]] = None) -> List[SessionTemplate]:
"""List templates, optionally filtered by task type and tags."""
templates = list(self.templates.values())
if task_type:
templates = [t for t in templates if t.task_type == task_type]
if tags:
templates = [t for t in templates if any(tag in t.tags for tag in tags)]
templates.sort(key=lambda t: t.created_at, reverse=True)
return templates
def delete_template(self, name: str) -> bool:
"""Delete a template."""
if name not in self.templates:
return False
del self.templates[name]
template_file = self.template_dir / f"{name}.json"
if template_file.exists():
template_file.unlink()
logger.info(f"Deleted template {name}")
return True
def get_template_stats(self) -> Dict[str, Any]:
"""Get statistics about templates."""
if not self.templates:
return {
"total": 0,
"by_type": {},
"total_examples": 0,
"total_usage": 0
}
by_type = {}
total_examples = 0
total_usage = 0
for template in self.templates.values():
task_type = template.task_type.value
by_type[task_type] = by_type.get(task_type, 0) + 1
total_examples += len(template.examples)
total_usage += template.usage_count
return {
"total": len(self.templates),
"by_type": by_type,
"total_examples": total_examples,
"total_usage": total_usage
}
# CLI interface
def main():
"""CLI for session templates."""
import argparse
parser = argparse.ArgumentParser(description="Session Templates")
subparsers = parser.add_subparsers(dest="command")
# List templates
list_parser = subparsers.add_parser("list", help="List templates")
list_parser.add_argument("--type", choices=["code", "file", "research", "mixed"],
help="Filter by task type")
list_parser.add_argument("--tags", help="Filter by tags (comma-separated)")
# Create template
create_parser = subparsers.add_parser("create", help="Create template from session")
create_parser.add_argument("session_id", help="Session ID")
create_parser.add_argument("--name", help="Template name")
create_parser.add_argument("--type", choices=["code", "file", "research", "mixed"],
help="Task type")
create_parser.add_argument("--max-examples", type=int, default=10,
help="Maximum examples to extract")
create_parser.add_argument("--description", help="Template description")
create_parser.add_argument("--tags", help="Tags (comma-separated)")
# Delete template
delete_parser = subparsers.add_parser("delete", help="Delete template")
delete_parser.add_argument("name", help="Template name")
# Show stats
stats_parser = subparsers.add_parser("stats", help="Show template statistics")
args = parser.parse_args()
templates = SessionTemplates()
if args.command == "list":
task_type = TaskType(args.type) if args.type else None
tags = args.tags.split(",") if args.tags else None
template_list = templates.list_templates(task_type, tags)
if not template_list:
print("No templates found")
return
print(f"Found {len(template_list)} templates:")
for t in template_list:
tags_str = f" [tags: {', '.join(t.tags)}]" if t.tags else ""
print(f" {t.name}: {t.task_type.value} ({len(t.examples)} examples, "
f"used {t.usage_count} times){tags_str}")
elif args.command == "create":
task_type = TaskType(args.type) if args.type else None
tags = args.tags.split(",") if args.tags else None
template = templates.create_template(
args.session_id,
name=args.name,
task_type=task_type,
max_examples=args.max_examples,
description=args.description or "",
tags=tags
)
if template:
print(f"Created template: {template.name}")
print(f" Type: {template.task_type.value}")
print(f" Examples: {len(template.examples)}")
if template.tags:
print(f" Tags: {', '.join(template.tags)}")
else:
print("Failed to create template")
elif args.command == "delete":
if templates.delete_template(args.name):
print(f"Deleted template: {args.name}")
else:
print(f"Template not found: {args.name}")
elif args.command == "stats":
stats = templates.get_template_stats()
print("Template Statistics:")
print(f" Total templates: {stats['total']}")
print(f" Total examples: {stats['total_examples']}")
print(f" Total usage: {stats['total_usage']}")
if stats['by_type']:
print(" By type:")
for task_type, count in stats['by_type'].items():
print(f" {task_type}: {count}")
else:
parser.print_help()
if __name__ == "__main__":
main()