Compare commits
1 Commits
feat/91-se
...
feat/sessi
| Author | SHA1 | Date | |
|---|---|---|---|
| 160dfcf419 |
276
scripts/session_metadata.py
Normal file
276
scripts/session_metadata.py
Normal file
@@ -0,0 +1,276 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
session_metadata.py - Extract structured metadata from Hermes session transcripts.
|
||||
Works alongside session_reader.py to provide higher-level session analysis.
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
from dataclasses import dataclass, asdict
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Any
|
||||
|
||||
# Import from session_reader (the canonical reader)
|
||||
from session_reader import read_session
|
||||
|
||||
|
||||
@dataclass
|
||||
class SessionSummary:
|
||||
"""Structured summary of a Hermes session transcript."""
|
||||
session_id: str
|
||||
model: str
|
||||
repo: str
|
||||
outcome: str
|
||||
message_count: int
|
||||
tool_calls: int
|
||||
duration_estimate: str
|
||||
key_actions: List[str]
|
||||
errors_encountered: List[str]
|
||||
start_time: Optional[str] = None
|
||||
end_time: Optional[str] = None
|
||||
total_tokens_estimate: int = 0
|
||||
user_messages: int = 0
|
||||
assistant_messages: int = 0
|
||||
tool_outputs: int = 0
|
||||
|
||||
|
||||
def extract_session_metadata(file_path: str) -> SessionSummary:
|
||||
"""
|
||||
Extract structured metadata from a Hermes session JSONL transcript.
|
||||
Uses session_reader.read_session() for file reading.
|
||||
"""
|
||||
session_id = Path(file_path).stem
|
||||
messages = []
|
||||
model = "unknown"
|
||||
repo = "unknown"
|
||||
tool_calls_count = 0
|
||||
key_actions = []
|
||||
errors = []
|
||||
start_time = None
|
||||
end_time = None
|
||||
total_tokens = 0
|
||||
|
||||
# Common repo patterns to look for
|
||||
repo_patterns = [
|
||||
r"(?:the-nexus|compounding-intelligence|timmy-config|hermes-agent)",
|
||||
r"(?:forge\.alexanderwhitestone\.com/([^/]+/[^/\\s]+))",
|
||||
r"(?:github\.com/([^/]+/[^/\\s]+))",
|
||||
r"(?:Timmy_Foundation/([^/\\s]+))",
|
||||
]
|
||||
|
||||
try:
|
||||
# Use the canonical reader from session_reader.py
|
||||
messages = read_session(file_path)
|
||||
except FileNotFoundError:
|
||||
return SessionSummary(
|
||||
session_id=session_id,
|
||||
model="unknown",
|
||||
repo="unknown",
|
||||
outcome="failure",
|
||||
message_count=0,
|
||||
tool_calls=0,
|
||||
duration_estimate="0m",
|
||||
key_actions=[],
|
||||
errors_encountered=[f"File not found: {file_path}"]
|
||||
)
|
||||
|
||||
# Process messages for metadata
|
||||
for entry in messages:
|
||||
# Extract model from assistant messages
|
||||
if entry.get("role") == "assistant" and entry.get("model"):
|
||||
model = entry["model"]
|
||||
|
||||
# Extract timestamps
|
||||
if entry.get("timestamp"):
|
||||
ts = entry["timestamp"]
|
||||
if start_time is None:
|
||||
start_time = ts
|
||||
end_time = ts
|
||||
|
||||
# Count tool calls
|
||||
if entry.get("tool_calls"):
|
||||
tool_calls_count += len(entry["tool_calls"])
|
||||
for tc in entry["tool_calls"]:
|
||||
if tc.get("function", {}).get("name"):
|
||||
action = f"{tc['function']['name']}"
|
||||
if action not in key_actions:
|
||||
key_actions.append(action)
|
||||
|
||||
# Estimate tokens from content length
|
||||
content = entry.get("content", "")
|
||||
if isinstance(content, str):
|
||||
total_tokens += len(content.split())
|
||||
elif isinstance(content, list):
|
||||
for item in content:
|
||||
if isinstance(item, dict) and "text" in item:
|
||||
total_tokens += len(item["text"].split())
|
||||
|
||||
# Look for repo mentions in content
|
||||
if entry.get("content"):
|
||||
content_str = str(entry["content"])
|
||||
for pattern in repo_patterns:
|
||||
match = re.search(pattern, content_str, re.IGNORECASE)
|
||||
if match:
|
||||
if match.groups():
|
||||
repo = match.group(1)
|
||||
else:
|
||||
repo = match.group(0)
|
||||
break
|
||||
|
||||
# Look for error messages
|
||||
if entry.get("role") == "tool" and entry.get("is_error"):
|
||||
error_msg = entry.get("content", "Unknown error")
|
||||
if isinstance(error_msg, str) and len(error_msg) < 200:
|
||||
errors.append(error_msg[:200])
|
||||
|
||||
# Count message types
|
||||
user_messages = sum(1 for m in messages if m.get("role") == "user")
|
||||
assistant_messages = sum(1 for m in messages if m.get("role") == "assistant")
|
||||
tool_outputs = sum(1 for m in messages if m.get("role") == "tool")
|
||||
|
||||
# Calculate duration estimate
|
||||
duration_estimate = "unknown"
|
||||
if start_time and end_time:
|
||||
try:
|
||||
# Try to parse timestamps
|
||||
start_dt = None
|
||||
end_dt = None
|
||||
|
||||
# Handle various timestamp formats
|
||||
for fmt in ["%Y-%m-%dT%H:%M:%S.%fZ", "%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%d %H:%M:%S"]:
|
||||
try:
|
||||
if start_dt is None:
|
||||
start_dt = datetime.strptime(start_time, fmt)
|
||||
if end_dt is None:
|
||||
end_dt = datetime.strptime(end_time, fmt)
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
if start_dt and end_dt:
|
||||
duration = end_dt - start_dt
|
||||
minutes = duration.total_seconds() / 60
|
||||
duration_estimate = f"{minutes:.0f}m"
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Classify outcome
|
||||
outcome = "unknown"
|
||||
if errors:
|
||||
# Check if any errors are fatal
|
||||
fatal_errors = any("405" in e or "permission" in e.lower() or "authentication" in e.lower()
|
||||
for e in errors)
|
||||
if fatal_errors:
|
||||
outcome = "failure"
|
||||
else:
|
||||
outcome = "partial"
|
||||
elif messages:
|
||||
# Check last message for success indicators
|
||||
last_msg = messages[-1]
|
||||
if last_msg.get("role") == "assistant":
|
||||
content = last_msg.get("content", "")
|
||||
if isinstance(content, str):
|
||||
success_indicators = ["done", "completed", "success", "merged", "pushed"]
|
||||
if any(indicator in content.lower() for indicator in success_indicators):
|
||||
outcome = "success"
|
||||
else:
|
||||
outcome = "unknown"
|
||||
|
||||
# Deduplicate key actions (keep unique, limit to 10)
|
||||
unique_actions = []
|
||||
for action in key_actions:
|
||||
if action not in unique_actions:
|
||||
unique_actions.append(action)
|
||||
if len(unique_actions) >= 10:
|
||||
break
|
||||
|
||||
# Deduplicate errors (keep unique, limit to 5)
|
||||
unique_errors = []
|
||||
for error in errors:
|
||||
if error not in unique_errors:
|
||||
unique_errors.append(error)
|
||||
if len(unique_errors) >= 5:
|
||||
break
|
||||
|
||||
return SessionSummary(
|
||||
session_id=session_id,
|
||||
model=model,
|
||||
repo=repo,
|
||||
outcome=outcome,
|
||||
message_count=len(messages),
|
||||
tool_calls=tool_calls_count,
|
||||
duration_estimate=duration_estimate,
|
||||
key_actions=unique_actions,
|
||||
errors_encountered=unique_errors,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
total_tokens_estimate=total_tokens,
|
||||
user_messages=user_messages,
|
||||
assistant_messages=assistant_messages,
|
||||
tool_outputs=tool_outputs
|
||||
)
|
||||
|
||||
|
||||
def process_session_directory(directory_path: str, output_file: Optional[str] = None) -> List[SessionSummary]:
|
||||
"""
|
||||
Process all JSONL files in a directory.
|
||||
"""
|
||||
directory = Path(directory_path)
|
||||
if not directory.exists():
|
||||
print(f"Error: Directory {directory_path} does not exist", file=sys.stderr)
|
||||
return []
|
||||
|
||||
jsonl_files = list(directory.glob("*.jsonl"))
|
||||
if not jsonl_files:
|
||||
print(f"Warning: No JSONL files found in {directory_path}", file=sys.stderr)
|
||||
return []
|
||||
|
||||
summaries = []
|
||||
for jsonl_file in sorted(jsonl_files):
|
||||
print(f"Processing {jsonl_file.name}...", file=sys.stderr)
|
||||
summary = extract_session_metadata(str(jsonl_file))
|
||||
summaries.append(summary)
|
||||
|
||||
if output_file:
|
||||
with open(output_file, 'w', encoding='utf-8') as f:
|
||||
json.dump([asdict(s) for s in summaries], f, indent=2)
|
||||
print(f"Wrote {len(summaries)} summaries to {output_file}", file=sys.stderr)
|
||||
|
||||
return summaries
|
||||
|
||||
|
||||
def main():
|
||||
"""CLI entry point."""
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description="Extract metadata from Hermes session JSONL transcripts")
|
||||
parser.add_argument("path", help="Path to JSONL file or directory of session files")
|
||||
parser.add_argument("-o", "--output", help="Output JSON file (default: stdout)")
|
||||
parser.add_argument("-v", "--verbose", action="store_true", help="Verbose output")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
path = Path(args.path)
|
||||
|
||||
if path.is_file():
|
||||
summary = extract_session_metadata(str(path))
|
||||
if args.output:
|
||||
with open(args.output, 'w') as f:
|
||||
json.dump(asdict(summary), f, indent=2)
|
||||
print(f"Wrote summary to {args.output}", file=sys.stderr)
|
||||
else:
|
||||
print(json.dumps(asdict(summary), indent=2))
|
||||
|
||||
elif path.is_dir():
|
||||
summaries = process_session_directory(str(path), args.output)
|
||||
if not args.output:
|
||||
print(json.dumps([asdict(s) for s in summaries], indent=2))
|
||||
|
||||
else:
|
||||
print(f"Error: {args.path} is not a file or directory", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,234 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Session Transcript → Training Pair Harvester
|
||||
|
||||
Scans Hermes session JSONL files for Q&A patterns and extracts
|
||||
terse→rich training pairs. Outputs JSONL matching the timmy-config
|
||||
training pairs spec.
|
||||
|
||||
Usage:
|
||||
python3 scripts/session_pair_harvester.py ~/.hermes/sessions/
|
||||
python3 scripts/session_pair_harvester.py session.jsonl --output pairs.jsonl
|
||||
python3 scripts/session_pair_harvester.py --dir ~/.hermes/sessions/ --min-ratio 2.0
|
||||
|
||||
Output format:
|
||||
{"terse": "user short prompt", "rich": "ai detailed response", "source": "session_id", "model": "..."}
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import hashlib
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
|
||||
def compute_hash(text: str) -> str:
|
||||
"""Content hash for deduplication."""
|
||||
return hashlib.sha256(text.encode()).hexdigest()[:16]
|
||||
|
||||
|
||||
def extract_pairs_from_session(session_data: dict, min_ratio: float = 1.5,
|
||||
min_response_words: int = 20) -> list:
|
||||
"""Extract terse→rich pairs from a single session object."""
|
||||
pairs = []
|
||||
conversations = session_data.get("conversations", [])
|
||||
session_id = session_data.get("id", "unknown")
|
||||
model = session_data.get("model", "unknown")
|
||||
|
||||
seen_hashes = set()
|
||||
|
||||
for i, msg in enumerate(conversations):
|
||||
# Look for assistant/gpt responses
|
||||
if msg.get("from") not in ("gpt", "assistant"):
|
||||
continue
|
||||
|
||||
response_text = msg.get("value", "")
|
||||
if not response_text or len(response_text.split()) < min_response_words:
|
||||
continue
|
||||
|
||||
# Find the preceding human message
|
||||
prompt_text = ""
|
||||
for j in range(i - 1, -1, -1):
|
||||
if conversations[j].get("from") == "human":
|
||||
prompt_text = conversations[j].get("value", "")
|
||||
break
|
||||
|
||||
if not prompt_text:
|
||||
continue
|
||||
|
||||
# Filter: skip tool results, system messages embedded as human
|
||||
if prompt_text.startswith("{") and "output" in prompt_text[:100]:
|
||||
continue # likely a tool result
|
||||
if prompt_text.startswith("# SOUL.md") or prompt_text.startswith("You are"):
|
||||
continue # system prompt leak
|
||||
|
||||
# Quality filters
|
||||
prompt_words = len(prompt_text.split())
|
||||
response_words = len(response_text.split())
|
||||
|
||||
# Must have meaningful length ratio
|
||||
if prompt_words == 0 or response_words == 0:
|
||||
continue
|
||||
ratio = response_words / prompt_words
|
||||
if ratio < min_ratio:
|
||||
continue
|
||||
|
||||
# Skip responses that are mostly code
|
||||
code_blocks = response_text.count("```")
|
||||
if code_blocks >= 4 and len(response_text.replace("```", "").strip()) < 50:
|
||||
continue
|
||||
|
||||
# Skip responses with tool call artifacts
|
||||
if "tool_call" in response_text[:100] or "function_call" in response_text[:100]:
|
||||
continue
|
||||
|
||||
# Deduplicate by content hash
|
||||
content_hash = compute_hash(prompt_text + response_text[:200])
|
||||
if content_hash in seen_hashes:
|
||||
continue
|
||||
seen_hashes.add(content_hash)
|
||||
|
||||
# Clean up response: remove markdown headers if too many
|
||||
clean_response = response_text
|
||||
|
||||
pairs.append({
|
||||
"terse": prompt_text.strip(),
|
||||
"rich": clean_response.strip(),
|
||||
"source": session_id,
|
||||
"model": model,
|
||||
"prompt_words": prompt_words,
|
||||
"response_words": response_words,
|
||||
"ratio": round(ratio, 2),
|
||||
})
|
||||
|
||||
return pairs
|
||||
|
||||
|
||||
def extract_from_jsonl_file(filepath: str, **kwargs) -> list:
|
||||
"""Extract pairs from a session JSONL file."""
|
||||
pairs = []
|
||||
path = Path(filepath)
|
||||
|
||||
if not path.exists():
|
||||
print(f"Warning: {filepath} not found", file=sys.stderr)
|
||||
return pairs
|
||||
|
||||
content = path.read_text()
|
||||
lines = content.strip().split("\n")
|
||||
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
try:
|
||||
session = json.loads(line)
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
|
||||
session_pairs = extract_pairs_from_session(session, **kwargs)
|
||||
pairs.extend(session_pairs)
|
||||
|
||||
return pairs
|
||||
|
||||
|
||||
def deduplicate_pairs(pairs: list) -> list:
|
||||
"""Remove duplicate pairs across files."""
|
||||
seen = set()
|
||||
unique = []
|
||||
for pair in pairs:
|
||||
key = compute_hash(pair["terse"] + pair["rich"][:200])
|
||||
if key not in seen:
|
||||
seen.add(key)
|
||||
unique.append(pair)
|
||||
return unique
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Harvest training pairs from session transcripts")
|
||||
parser.add_argument("input", nargs="?", help="Session JSONL file or directory")
|
||||
parser.add_argument("--dir", "-d", help="Directory to scan for session files")
|
||||
parser.add_argument("--output", "-o", default="harvested_pairs.jsonl", help="Output file")
|
||||
parser.add_argument("--min-ratio", type=float, default=1.5, help="Min response/prompt word ratio")
|
||||
parser.add_argument("--min-words", type=int, default=20, help="Min response word count")
|
||||
parser.add_argument("--dry-run", action="store_true", help="Print stats without writing")
|
||||
args = parser.parse_args()
|
||||
|
||||
all_pairs = []
|
||||
files_scanned = 0
|
||||
|
||||
scan_dir = args.dir or args.input
|
||||
if not scan_dir:
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
scan_path = Path(scan_dir)
|
||||
if scan_path.is_dir():
|
||||
jsonl_files = sorted(scan_path.rglob("*.jsonl"))
|
||||
print(f"Scanning {len(jsonl_files)} files in {scan_dir}...", file=sys.stderr)
|
||||
for fpath in jsonl_files:
|
||||
pairs = extract_from_jsonl_file(
|
||||
str(fpath),
|
||||
min_ratio=args.min_ratio,
|
||||
min_response_words=args.min_words
|
||||
)
|
||||
all_pairs.extend(pairs)
|
||||
files_scanned += 1
|
||||
else:
|
||||
pairs = extract_from_jsonl_file(
|
||||
str(scan_path),
|
||||
min_ratio=args.min_ratio,
|
||||
min_response_words=args.min_words
|
||||
)
|
||||
all_pairs.extend(pairs)
|
||||
files_scanned = 1
|
||||
|
||||
# Deduplicate
|
||||
unique_pairs = deduplicate_pairs(all_pairs)
|
||||
|
||||
# Stats
|
||||
if unique_pairs:
|
||||
avg_prompt = sum(p["prompt_words"] for p in unique_pairs) / len(unique_pairs)
|
||||
avg_response = sum(p["response_words"] for p in unique_pairs) / len(unique_pairs)
|
||||
avg_ratio = sum(p["ratio"] for p in unique_pairs) / len(unique_pairs)
|
||||
else:
|
||||
avg_prompt = avg_response = avg_ratio = 0
|
||||
|
||||
stats = {
|
||||
"files_scanned": files_scanned,
|
||||
"raw_pairs": len(all_pairs),
|
||||
"unique_pairs": len(unique_pairs),
|
||||
"duplicates_removed": len(all_pairs) - len(unique_pairs),
|
||||
"avg_prompt_words": round(avg_prompt, 1),
|
||||
"avg_response_words": round(avg_response, 1),
|
||||
"avg_ratio": round(avg_ratio, 2),
|
||||
}
|
||||
|
||||
print(json.dumps(stats, indent=2), file=sys.stderr)
|
||||
|
||||
if args.dry_run:
|
||||
# Print sample pairs
|
||||
for pair in unique_pairs[:3]:
|
||||
print(f"\n--- Source: {pair['source']} (ratio: {pair['ratio']}) ---", file=sys.stderr)
|
||||
print(f"TERSE: {pair['terse'][:100]}...", file=sys.stderr)
|
||||
print(f"RICH: {pair['rich'][:150]}...", file=sys.stderr)
|
||||
return
|
||||
|
||||
# Write output
|
||||
output_path = Path(args.output)
|
||||
with open(output_path, "w") as f:
|
||||
for pair in unique_pairs:
|
||||
# Strip internal fields for output
|
||||
output = {
|
||||
"terse": pair["terse"],
|
||||
"rich": pair["rich"],
|
||||
"source": pair["source"],
|
||||
"model": pair["model"],
|
||||
}
|
||||
f.write(json.dumps(output) + "\n")
|
||||
|
||||
print(f"\nWrote {len(unique_pairs)} pairs to {output_path}", file=sys.stderr)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,90 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for session_pair_harvester."""
|
||||
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
sys.path.insert(0, os.path.dirname(__file__))
|
||||
from session_pair_harvester import extract_pairs_from_session, deduplicate_pairs, compute_hash
|
||||
|
||||
|
||||
def test_basic_extraction():
|
||||
session = {
|
||||
"id": "test_001",
|
||||
"model": "test-model",
|
||||
"conversations": [
|
||||
{"from": "system", "value": "You are helpful."},
|
||||
{"from": "human", "value": "What is Python?"},
|
||||
{"from": "gpt", "value": "Python is a high-level programming language known for its readability and versatility. It supports multiple paradigms including procedural, object-oriented, and functional programming. Python is widely used in web development, data science, machine learning, and automation."},
|
||||
]
|
||||
}
|
||||
pairs = extract_pairs_from_session(session, min_ratio=1.5, min_response_words=10)
|
||||
assert len(pairs) == 1
|
||||
assert pairs[0]["terse"] == "What is Python?"
|
||||
assert "programming language" in pairs[0]["rich"]
|
||||
assert pairs[0]["source"] == "test_001"
|
||||
print("PASS: test_basic_extraction")
|
||||
|
||||
|
||||
def test_filters_short_responses():
|
||||
session = {
|
||||
"id": "test_002",
|
||||
"model": "test",
|
||||
"conversations": [
|
||||
{"from": "human", "value": "Hi"},
|
||||
{"from": "gpt", "value": "Hello!"},
|
||||
]
|
||||
}
|
||||
pairs = extract_pairs_from_session(session, min_ratio=1.5, min_response_words=20)
|
||||
assert len(pairs) == 0
|
||||
print("PASS: test_filters_short_responses")
|
||||
|
||||
|
||||
def test_skips_tool_results():
|
||||
session = {
|
||||
"id": "test_003",
|
||||
"model": "test",
|
||||
"conversations": [
|
||||
{"from": "human", "value": '{"output": "file content", "exit_code": 0}'},
|
||||
{"from": "gpt", "value": "The file was read successfully. Now let me analyze the content and provide a detailed summary of what was found in the file system."},
|
||||
]
|
||||
}
|
||||
pairs = extract_pairs_from_session(session, min_ratio=1.5, min_response_words=10)
|
||||
assert len(pairs) == 0
|
||||
print("PASS: test_skips_tool_results")
|
||||
|
||||
|
||||
def test_deduplication():
|
||||
pairs = [
|
||||
{"terse": "What is X?", "rich": "X is Y.", "source": "s1", "model": "m"},
|
||||
{"terse": "What is X?", "rich": "X is Y.", "source": "s2", "model": "m"},
|
||||
{"terse": "What is Z?", "rich": "Z is W.", "source": "s1", "model": "m"},
|
||||
]
|
||||
unique = deduplicate_pairs(pairs)
|
||||
assert len(unique) == 2
|
||||
print("PASS: test_deduplication")
|
||||
|
||||
|
||||
def test_ratio_filter():
|
||||
session = {
|
||||
"id": "test_005",
|
||||
"model": "test",
|
||||
"conversations": [
|
||||
{"from": "human", "value": "Explain quantum computing in detail with examples and applications"},
|
||||
{"from": "gpt", "value": "OK."},
|
||||
]
|
||||
}
|
||||
pairs = extract_pairs_from_session(session, min_ratio=1.5, min_response_words=10)
|
||||
assert len(pairs) == 0 # response too short relative to prompt
|
||||
print("PASS: test_ratio_filter")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_basic_extraction()
|
||||
test_filters_short_responses()
|
||||
test_skips_tool_results()
|
||||
test_deduplication()
|
||||
test_ratio_filter()
|
||||
print("\nAll tests passed.")
|
||||
Reference in New Issue
Block a user