diff --git a/scripts/session_pair_harvester.py b/scripts/session_pair_harvester.py index 82ce640..5d0607d 100644 --- a/scripts/session_pair_harvester.py +++ b/scripts/session_pair_harvester.py @@ -22,114 +22,95 @@ import sys from pathlib import Path from typing import Optional +from session_reader import extract_conversation, read_session + def compute_hash(text: str) -> str: """Content hash for deduplication.""" return hashlib.sha256(text.encode()).hexdigest()[:16] -def extract_pairs_from_session(session_data: dict, min_ratio: float = 1.5, +def extract_pairs_from_conversation(conversation: list, session_id: str, model: str, + min_ratio: float = 1.5, min_response_words: int = 20) -> list: - """Extract terse→rich pairs from a single session object.""" + """Extract terse→rich pairs from a normalized conversation.""" pairs = [] - conversations = session_data.get("conversations", []) - session_id = session_data.get("id", "unknown") - model = session_data.get("model", "unknown") - seen_hashes = set() - for i, msg in enumerate(conversations): - # Look for assistant/gpt responses - if msg.get("from") not in ("gpt", "assistant"): + for i, msg in enumerate(conversation): + # Look for assistant responses + if msg.get('role') != 'assistant': continue - response_text = msg.get("value", "") + response_text = msg.get('content', '') if not response_text or len(response_text.split()) < min_response_words: continue - # Find the preceding human message + # Find the preceding user message prompt_text = "" for j in range(i - 1, -1, -1): - if conversations[j].get("from") == "human": - prompt_text = conversations[j].get("value", "") + if conversation[j].get('role') == 'user': + prompt_text = conversation[j].get('content', '') break if not prompt_text: continue # Filter: skip tool results, system messages embedded as human - if prompt_text.startswith("{") and "output" in prompt_text[:100]: - continue # likely a tool result - if prompt_text.startswith("# SOUL.md") or prompt_text.startswith("You are"): - continue # system prompt leak + if prompt_text.startswith('{') and 'output' in prompt_text[:100]: + continue + if prompt_text.startswith('# SOUL.md') or prompt_text.startswith('You are'): + continue # Quality filters prompt_words = len(prompt_text.split()) response_words = len(response_text.split()) - # Must have meaningful length ratio if prompt_words == 0 or response_words == 0: continue ratio = response_words / prompt_words if ratio < min_ratio: continue - # Skip responses that are mostly code - code_blocks = response_text.count("```") - if code_blocks >= 4 and len(response_text.replace("```", "").strip()) < 50: + code_blocks = response_text.count('```') + if code_blocks >= 4 and len(response_text.replace('```', '').strip()) < 50: continue - # Skip responses with tool call artifacts - if "tool_call" in response_text[:100] or "function_call" in response_text[:100]: + if 'tool_call' in response_text[:100] or 'function_call' in response_text[:100]: continue - # Deduplicate by content hash content_hash = compute_hash(prompt_text + response_text[:200]) if content_hash in seen_hashes: continue seen_hashes.add(content_hash) - # Clean up response: remove markdown headers if too many clean_response = response_text pairs.append({ - "terse": prompt_text.strip(), - "rich": clean_response.strip(), - "source": session_id, - "model": model, - "prompt_words": prompt_words, - "response_words": response_words, - "ratio": round(ratio, 2), + 'terse': prompt_text.strip(), + 'rich': clean_response.strip(), + 'source': session_id, + 'model': model, + 'prompt_words': prompt_words, + 'response_words': response_words, + 'ratio': round(ratio, 2), }) return pairs -def extract_from_jsonl_file(filepath: str, **kwargs) -> list: - """Extract pairs from a session JSONL file.""" - pairs = [] - path = Path(filepath) - if not path.exists(): - print(f"Warning: {filepath} not found", file=sys.stderr) - return pairs - - content = path.read_text() - lines = content.strip().split("\n") - - for line in lines: - line = line.strip() - if not line: - continue - try: - session = json.loads(line) - except json.JSONDecodeError: - continue - - session_pairs = extract_pairs_from_session(session, **kwargs) - pairs.extend(session_pairs) - - return pairs +def extract_from_jsonl_file(path: str, **kwargs) -> list: + """Read a session file and extract training pairs using normalized conversation.""" + session_messages = read_session(path) + if not session_messages: + return [] + conversation = extract_conversation(session_messages) + # Derive session_id and model from first real message metadata + first_msg = next((m for m in session_messages if m.get('role') or m.get('from')), {}) + session_id = first_msg.get('meta_session_id', Path(path).name) + model = first_msg.get('model', 'unknown') + return extract_pairs_from_conversation(conversation, session_id, model, **kwargs) def deduplicate_pairs(pairs: list) -> list: diff --git a/tests/test_session_pair_harvester.py b/tests/test_session_pair_harvester.py new file mode 100644 index 0000000..43bacd2 --- /dev/null +++ b/tests/test_session_pair_harvester.py @@ -0,0 +1,118 @@ +""" +Tests for session_pair_harvester — training pair extraction from sessions. +""" + +import json +import tempfile +import unittest +from pathlib import Path + +import sys +from pathlib import Path +sys.path.insert(0, str(Path(__file__).parent.parent / "scripts")) +from session_pair_harvester import ( + extract_pairs_from_conversation, + extract_from_jsonl_file, + deduplicate_pairs, + compute_hash, +) + + +class TestSessionPairHarvester(unittest.TestCase): + def test_compute_hash_consistent(self): + h1 = compute_hash("hello world") + h2 = compute_hash("hello world") + self.assertEqual(h1, h2) + self.assertEqual(len(h1), 16) + + def test_extract_simple_qa_pair(self): + """A simple user→assistant exchange produces one pair.""" + conversation = [ + {"role": "user", "content": "What is the capital of France?"}, + {"role": "assistant", "content": "The capital of France is Paris. It is a major European city renowned for its art, fashion, gastronomy, cultural heritage, and historical significance. The city attracts millions of tourists annually."}, + ] + pairs = extract_pairs_from_conversation(conversation, "test_session", "test-model") + self.assertEqual(len(pairs), 1) + self.assertEqual(pairs[0]["terse"], "What is the capital of France?") + self.assertIn("Paris", pairs[0]["rich"]) + self.assertEqual(pairs[0]["source"], "test_session") + + def test_min_ratio_filter(self): + """Very short responses are filtered out.""" + conversation = [ + {"role": "user", "content": "Yes"}, + {"role": "assistant", "content": "No."}, + ] + # Default min_ratio = 1.5, min_words = 20 for response + pairs = extract_pairs_from_conversation(conversation, "s", "m", min_response_words=3) + self.assertEqual(len(pairs), 0) + + def test_min_words_filter(self): + """Assistant responses below min word count are skipped.""" + conversation = [ + {"role": "user", "content": "Explain the project architecture in detail"}, + {"role": "assistant", "content": "OK."}, + ] + pairs = extract_pairs_from_conversation(conversation, "s", "m", min_response_words=5) + self.assertEqual(len(pairs), 0) + + def test_skip_non_assistant_messages(self): + """System and tool messages are ignored.""" + conversation = [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello"}, + {"role": "assistant", "content": "Hi there! How can I help you today?"}, + ] + pairs = extract_pairs_from_conversation(conversation, "s", "m", min_response_words=3) + self.assertEqual(len(pairs), 1) + self.assertEqual(pairs[0]["terse"], "Hello") + + def test_multiple_pairs_from_one_session(self): + """A conversation with several Q&A turns yields multiple pairs.""" + conversation = [ + {"role": "user", "content": "First question?"}, + {"role": "assistant", "content": "Here is a detailed and comprehensive answer that thoroughly explores multiple aspects of the subject. It provides background context and practical implications for the reader."}, + {"role": "user", "content": "Second?"}, + {"role": "assistant", "content": "Another comprehensive response with detailed examples. This includes practical code blocks and thorough explanations to ensure deep understanding of the topic at hand."}, + ] + pairs = extract_pairs_from_conversation(conversation, "s", "m", min_ratio=1.0) + self.assertEqual(len(pairs), 2) + + def test_deduplication_removes_duplicates(self): + """Identical pairs across sessions are deduplicated.""" + pairs = [ + {"terse": "q1", "rich": "a1", "source": "s1", "model": "m"}, + {"terse": "q1", "rich": "a1", "source": "s2", "model": "m"}, + {"terse": "q2", "rich": "a2", "source": "s1", "model": "m"}, + ] + unique = deduplicate_pairs(pairs) + self.assertEqual(len(unique), 2) + sources = {p["source"] for p in unique} + # First unique pair can be from either s1 or s2 + self.assertIn("s1", sources) + + def test_integration_with_test_sessions(self): + """Harvester finds pairs in real test session files.""" + repo_root = Path(__file__).parent.parent + test_sessions_dir = repo_root / "test_sessions" + if not test_sessions_dir.exists(): + self.skipTest("test_sessions not found") + + pairs = [] + for jsonl_file in sorted(test_sessions_dir.glob("*.jsonl")): + pairs.extend(extract_from_jsonl_file(str(jsonl_file))) + + self.assertGreater(len(pairs), 0, "Should extract at least one pair from test_sessions") + for p in pairs: + self.assertIn("terse", p) + self.assertIn("rich", p) + self.assertIn("source", p) + self.assertIn("model", p) + # Verify content exists + self.assertGreater(len(p["terse"]), 0) + self.assertGreater(len(p["rich"]), 0) + + +if __name__ == "__main__": + unittest.main() +