Compare commits

..

2 Commits

Author SHA1 Message Date
2a4e73aa03 Merge pull request 'fix: session_pair_harvester uses role/content format (#91)' (#240) from step35/91-feat-session-transcript-trai into main
Some checks failed
Test / pytest (push) Failing after 31s
2026-05-04 00:23:19 +00:00
Alex Payne
b1a728f5f4 feat: fix session_pair_harvester to use role/content format (#91)
Some checks failed
Test / pytest (pull_request) Failing after 8s
- Harvester used old message fields (from/value) but Hermes sessions use role/content
- Import session_reader to normalize conversations properly
- Update extract function to operate on normalized role/content messages
- Change predecessor lookup from "human"/"gpt" to "user"/"assistant"
- Add comprehensive smoke tests (8 tests, all pass)
- Verify extraction from test_sessions: 11 pairs, avg ratio 8.13
2026-04-26 00:19:56 -04:00
4 changed files with 155 additions and 330 deletions

View File

@@ -1,95 +0,0 @@
# Architecture: STEP35-compounding-intelligence-99
**Generated by:** `scripts/architecture_doc_generator.py`
## Entry Points
- `scripts/architecture_doc_generator.py`
- `scripts/refactoring_opportunity_finder.py`
- `scripts/automation_opportunity_finder.py`
- `scripts/bootstrapper.py`
- `scripts/dead_code_detector.py`
- `scripts/dedup.py`
- `scripts/dependency_graph.py`
- `scripts/freshness.py`
- `scripts/gitea_issue_parser.py`
- `scripts/harvester.py`
- `scripts/improvement_proposals.py`
- `scripts/knowledge_staleness_check.py`
- `scripts/perf_bottleneck_finder.py`
- `scripts/pr_complexity_scorer.py`
- `scripts/priority_rebalancer.py`
- `quality_gate.py`
- `scripts/sampler.py`
- `scripts/session_metadata.py`
- `scripts/session_pair_harvester.py`
- `scripts/session_reader.py`
- `scripts/test_automation_opportunity_finder.py`
- `scripts/test_bootstrapper.py`
- `scripts/test_diff_analyzer.py`
- `tests/test_freshness.py`
- `scripts/test_gitea_issue_parser.py`
- `scripts/test_harvest_prompt.py`
- `scripts/test_harvest_prompt_comprehensive.py`
- `scripts/test_harvester_pipeline.py`
- `scripts/test_improvement_proposals.py`
- `tests/test_knowledge_gap_identifier.py`
- `scripts/test_knowledge_staleness.py`
- `tests/test_quality_gate.py`
- `scripts/test_refactoring_opportunity_finder.py`
- `scripts/test_session_pair_harvester.py`
- `scripts/validate_knowledge.py`
## Module Dependencies
| Module | Imports |
|--------|---------|
| `quality_gate` | `quality_gate` |
| `scripts.harvester` | `scripts.session_reader` |
| `scripts.session_metadata` | `scripts.session_reader` |
| `scripts.test_bootstrapper` | `scripts.bootstrapper` |
| `scripts.test_harvester_pipeline` | `scripts.harvester, scripts.session_reader` |
| `scripts.test_pr_complexity_scorer` | `scripts.pr_complexity_scorer` |
| `scripts.test_priority_rebalancer` | `scripts.priority_rebalancer` |
| `scripts.test_session_pair_harvester` | `scripts.session_pair_harvester` |
| `tests.test_dedup` | `scripts.dedup` |
| `tests.test_knowledge_gap_identifier` | `scripts.knowledge_gap_identifier` |
| `tests.test_perf_bottleneck_finder` | `scripts.perf_bottleneck_finder` |
| `tests.test_quality_gate` | `quality_gate` |
## ASCII Diagram
```
*quality_gate*
└─> quality_gate
*scripts.bootstrapper*
*scripts.dedup*
*scripts.harvester*
└─> scripts.session_reader
[scripts.knowledge_gap_identifier]
*scripts.perf_bottleneck_finder*
*scripts.pr_complexity_scorer*
*scripts.priority_rebalancer*
*scripts.session_metadata*
└─> scripts.session_reader
*scripts.session_pair_harvester*
*scripts.session_reader*
*scripts.test_bootstrapper*
└─> scripts.bootstrapper
*scripts.test_harvester_pipeline*
└─> scripts.harvester
└─> scripts.session_reader
[scripts.test_pr_complexity_scorer]
└─> scripts.pr_complexity_scorer
[scripts.test_priority_rebalancer]
└─> scripts.priority_rebalancer
*scripts.test_session_pair_harvester*
└─> scripts.session_pair_harvester
[tests.test_dedup]
└─> scripts.dedup
*tests.test_knowledge_gap_identifier*
└─> scripts.knowledge_gap_identifier
[tests.test_perf_bottleneck_finder]
└─> scripts.perf_bottleneck_finder
*tests.test_quality_gate*
└─> quality_gate
```
_Generated automatically. Keep this file in sync with code changes by re-running the generator._

View File

@@ -1,179 +0,0 @@
#!/usr/bin/env python3
"""
Architecture Doc Generator — 4.4
Analyzes codebase structure and generates an architecture overview:
- Maps module dependencies (Python imports within the repo)
- Identifies entry points (main guards, CLI scripts)
- Generates ASCII diagram of module relationships
- Produces one ARCHITECTURE.md per repo
Usage:
python3 scripts/architecture_doc_generator.py [repo_root]
If no repo_root given, uses current directory.
Outputs ARCHITECTURE.md to the repo root.
"""
import argparse
import re
import sys
from collections import defaultdict
from pathlib import Path
def scan_python_files(root: Path):
"""Find all .py files under root, excluding tests/ and .git/."""
py_files = []
for path in root.rglob("*.py"):
parts = path.parts
if any(p.startswith('.') for p in parts if p != '.'):
continue
if 'test' in parts:
continue
if any(x in parts for x in ('venv', 'node_modules', '__pycache__', 'dist', 'build')):
continue
py_files.append(path)
return sorted(py_files)
def module_id(path: Path, root: Path) -> str:
"""Return a readable module identifier."""
rel = path.relative_to(root)
if rel.parent == Path('.'):
return path.stem
return str(rel.with_suffix('')).replace('/', '.')
def extract_imports(path: Path) -> list[str]:
"""Extract top-level import names from a Python file."""
try:
text = path.read_text(errors='ignore')
except Exception:
return []
imports = set()
# import X or import X.Y.Z
for m in re.finditer(r'^\s*import\s+([a-zA-Z0-9_.]+)', text, re.MULTILINE):
imports.add(m.group(1).split('.')[0])
# from X import Y (handles absolute and relative: from .X import Y)
for m in re.finditer(r'^\s*from\s+(\.+)?([a-zA-Z0-9_.]+)\s+import', text, re.MULTILINE):
imports.add(m.group(2).split('.')[0])
return sorted(imports)
def build_dependency_graph(py_files: list[Path], root: Path) -> dict[str, set[str]]:
"""Build adjacency: local_module -> set(local_modules it imports)."""
graph = defaultdict(set)
# Collect all local module identifiers
local_ids = set()
for p in py_files:
local_ids.add(module_id(p, root))
for path in py_files:
src_mod = module_id(path, root)
for imp in extract_imports(path):
# Match import to a local module by stem or by full dotted prefix
target = None
# Exact match
if imp in local_ids:
target = imp
else:
# Find module whose stem equals imp, or whose dotted name ends with .imp
for mid in local_ids:
if mid.split('.')[-1] == imp or mid == imp:
target = mid
break
if target:
graph[src_mod].add(target)
return {k: sorted(v) for k, v in graph.items()}
def find_entry_points(py_files: list[Path]) -> list[Path]:
"""Files with if __name__ == '__main__' guard or executable scripts."""
entries = []
for path in py_files:
try:
text = path.read_text(errors='ignore')
except Exception:
continue
if 'if __name__' in text and '__main__' in text:
entries.append(path)
return sorted(entries, key=lambda p: (not (p.stat().st_mode & 0o111), p.name))
def ascii_diagram(graph: dict[str, list[str]], entries: list[Path], root: Path) -> str:
"""Generate a simple ASCII box-and-arrow diagram."""
lines = []
entry_names = {module_id(p, root) for p in entries}
# All nodes
nodes = sorted(set(graph.keys()) | set().union(*graph.values()))
for node in nodes:
is_entry = node in entry_names
label = f"*{node}*" if is_entry else f"[{node}]"
lines.append(label)
for dep in graph.get(node, []):
lines.append(f" └─> {dep}")
return '\n'.join(lines)
def generate_markdown(root: Path, graph: dict, entries: list[Path], diagram: str) -> str:
root_name = root.name
md = []
md.append(f"# Architecture: {root_name}")
md.append("")
md.append("**Generated by:** `scripts/architecture_doc_generator.py`")
md.append("")
md.append("## Entry Points")
if entries:
for p in entries:
rel = p.relative_to(root)
md.append(f"- `{rel}`")
else:
md.append("_No entry points detected._")
md.append("")
md.append("## Module Dependencies")
if graph:
md.append("| Module | Imports |")
md.append("|--------|---------|")
for mod in sorted(graph.keys()):
deps = ', '.join(sorted(graph[mod])) if graph[mod] else '_none_'
md.append(f"| `{mod}` | `{deps}` |")
else:
md.append("_No dependencies detected._")
md.append("")
md.append("## ASCII Diagram")
md.append("```")
md.append(diagram)
md.append("```")
md.append("")
md.append("_Generated automatically. Keep this file in sync with code changes by re-running the generator._")
return '\n'.join(md)
def main():
parser = argparse.ArgumentParser(description="Generate architecture documentation")
parser.add_argument("repo_root", nargs="?", default=".", help="Repository root (default: current directory)")
args = parser.parse_args()
root = Path(args.repo_root).resolve()
py_files = scan_python_files(root)
if not py_files:
print("No Python files found — nothing to do.", file=sys.stderr)
sys.exit(1)
graph = build_dependency_graph(py_files, root)
entries = find_entry_points(py_files)
diagram = ascii_diagram(graph, entries, root)
markdown = generate_markdown(root, graph, entries, diagram)
out_path = root / "ARCHITECTURE.md"
out_path.write_text(markdown, encoding='utf-8')
print(f"Written: {out_path}")
print(f" Modules scanned: {len(py_files)}")
print(f" Entry points: {len(entries)}")
print(f" Dependency edges: {sum(len(v) for v in graph.values())}")
if __name__ == "__main__":
main()

View File

@@ -22,114 +22,95 @@ import sys
from pathlib import Path
from typing import Optional
from session_reader import extract_conversation, read_session
def compute_hash(text: str) -> str:
"""Content hash for deduplication."""
return hashlib.sha256(text.encode()).hexdigest()[:16]
def extract_pairs_from_session(session_data: dict, min_ratio: float = 1.5,
def extract_pairs_from_conversation(conversation: list, session_id: str, model: str,
min_ratio: float = 1.5,
min_response_words: int = 20) -> list:
"""Extract terse→rich pairs from a single session object."""
"""Extract terse→rich pairs from a normalized conversation."""
pairs = []
conversations = session_data.get("conversations", [])
session_id = session_data.get("id", "unknown")
model = session_data.get("model", "unknown")
seen_hashes = set()
for i, msg in enumerate(conversations):
# Look for assistant/gpt responses
if msg.get("from") not in ("gpt", "assistant"):
for i, msg in enumerate(conversation):
# Look for assistant responses
if msg.get('role') != 'assistant':
continue
response_text = msg.get("value", "")
response_text = msg.get('content', '')
if not response_text or len(response_text.split()) < min_response_words:
continue
# Find the preceding human message
# Find the preceding user message
prompt_text = ""
for j in range(i - 1, -1, -1):
if conversations[j].get("from") == "human":
prompt_text = conversations[j].get("value", "")
if conversation[j].get('role') == 'user':
prompt_text = conversation[j].get('content', '')
break
if not prompt_text:
continue
# Filter: skip tool results, system messages embedded as human
if prompt_text.startswith("{") and "output" in prompt_text[:100]:
continue # likely a tool result
if prompt_text.startswith("# SOUL.md") or prompt_text.startswith("You are"):
continue # system prompt leak
if prompt_text.startswith('{') and 'output' in prompt_text[:100]:
continue
if prompt_text.startswith('# SOUL.md') or prompt_text.startswith('You are'):
continue
# Quality filters
prompt_words = len(prompt_text.split())
response_words = len(response_text.split())
# Must have meaningful length ratio
if prompt_words == 0 or response_words == 0:
continue
ratio = response_words / prompt_words
if ratio < min_ratio:
continue
# Skip responses that are mostly code
code_blocks = response_text.count("```")
if code_blocks >= 4 and len(response_text.replace("```", "").strip()) < 50:
code_blocks = response_text.count('```')
if code_blocks >= 4 and len(response_text.replace('```', '').strip()) < 50:
continue
# Skip responses with tool call artifacts
if "tool_call" in response_text[:100] or "function_call" in response_text[:100]:
if 'tool_call' in response_text[:100] or 'function_call' in response_text[:100]:
continue
# Deduplicate by content hash
content_hash = compute_hash(prompt_text + response_text[:200])
if content_hash in seen_hashes:
continue
seen_hashes.add(content_hash)
# Clean up response: remove markdown headers if too many
clean_response = response_text
pairs.append({
"terse": prompt_text.strip(),
"rich": clean_response.strip(),
"source": session_id,
"model": model,
"prompt_words": prompt_words,
"response_words": response_words,
"ratio": round(ratio, 2),
'terse': prompt_text.strip(),
'rich': clean_response.strip(),
'source': session_id,
'model': model,
'prompt_words': prompt_words,
'response_words': response_words,
'ratio': round(ratio, 2),
})
return pairs
def extract_from_jsonl_file(filepath: str, **kwargs) -> list:
"""Extract pairs from a session JSONL file."""
pairs = []
path = Path(filepath)
if not path.exists():
print(f"Warning: {filepath} not found", file=sys.stderr)
return pairs
content = path.read_text()
lines = content.strip().split("\n")
for line in lines:
line = line.strip()
if not line:
continue
try:
session = json.loads(line)
except json.JSONDecodeError:
continue
session_pairs = extract_pairs_from_session(session, **kwargs)
pairs.extend(session_pairs)
return pairs
def extract_from_jsonl_file(path: str, **kwargs) -> list:
"""Read a session file and extract training pairs using normalized conversation."""
session_messages = read_session(path)
if not session_messages:
return []
conversation = extract_conversation(session_messages)
# Derive session_id and model from first real message metadata
first_msg = next((m for m in session_messages if m.get('role') or m.get('from')), {})
session_id = first_msg.get('meta_session_id', Path(path).name)
model = first_msg.get('model', 'unknown')
return extract_pairs_from_conversation(conversation, session_id, model, **kwargs)
def deduplicate_pairs(pairs: list) -> list:

View File

@@ -0,0 +1,118 @@
"""
Tests for session_pair_harvester — training pair extraction from sessions.
"""
import json
import tempfile
import unittest
from pathlib import Path
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent / "scripts"))
from session_pair_harvester import (
extract_pairs_from_conversation,
extract_from_jsonl_file,
deduplicate_pairs,
compute_hash,
)
class TestSessionPairHarvester(unittest.TestCase):
def test_compute_hash_consistent(self):
h1 = compute_hash("hello world")
h2 = compute_hash("hello world")
self.assertEqual(h1, h2)
self.assertEqual(len(h1), 16)
def test_extract_simple_qa_pair(self):
"""A simple user→assistant exchange produces one pair."""
conversation = [
{"role": "user", "content": "What is the capital of France?"},
{"role": "assistant", "content": "The capital of France is Paris. It is a major European city renowned for its art, fashion, gastronomy, cultural heritage, and historical significance. The city attracts millions of tourists annually."},
]
pairs = extract_pairs_from_conversation(conversation, "test_session", "test-model")
self.assertEqual(len(pairs), 1)
self.assertEqual(pairs[0]["terse"], "What is the capital of France?")
self.assertIn("Paris", pairs[0]["rich"])
self.assertEqual(pairs[0]["source"], "test_session")
def test_min_ratio_filter(self):
"""Very short responses are filtered out."""
conversation = [
{"role": "user", "content": "Yes"},
{"role": "assistant", "content": "No."},
]
# Default min_ratio = 1.5, min_words = 20 for response
pairs = extract_pairs_from_conversation(conversation, "s", "m", min_response_words=3)
self.assertEqual(len(pairs), 0)
def test_min_words_filter(self):
"""Assistant responses below min word count are skipped."""
conversation = [
{"role": "user", "content": "Explain the project architecture in detail"},
{"role": "assistant", "content": "OK."},
]
pairs = extract_pairs_from_conversation(conversation, "s", "m", min_response_words=5)
self.assertEqual(len(pairs), 0)
def test_skip_non_assistant_messages(self):
"""System and tool messages are ignored."""
conversation = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi there! How can I help you today?"},
]
pairs = extract_pairs_from_conversation(conversation, "s", "m", min_response_words=3)
self.assertEqual(len(pairs), 1)
self.assertEqual(pairs[0]["terse"], "Hello")
def test_multiple_pairs_from_one_session(self):
"""A conversation with several Q&A turns yields multiple pairs."""
conversation = [
{"role": "user", "content": "First question?"},
{"role": "assistant", "content": "Here is a detailed and comprehensive answer that thoroughly explores multiple aspects of the subject. It provides background context and practical implications for the reader."},
{"role": "user", "content": "Second?"},
{"role": "assistant", "content": "Another comprehensive response with detailed examples. This includes practical code blocks and thorough explanations to ensure deep understanding of the topic at hand."},
]
pairs = extract_pairs_from_conversation(conversation, "s", "m", min_ratio=1.0)
self.assertEqual(len(pairs), 2)
def test_deduplication_removes_duplicates(self):
"""Identical pairs across sessions are deduplicated."""
pairs = [
{"terse": "q1", "rich": "a1", "source": "s1", "model": "m"},
{"terse": "q1", "rich": "a1", "source": "s2", "model": "m"},
{"terse": "q2", "rich": "a2", "source": "s1", "model": "m"},
]
unique = deduplicate_pairs(pairs)
self.assertEqual(len(unique), 2)
sources = {p["source"] for p in unique}
# First unique pair can be from either s1 or s2
self.assertIn("s1", sources)
def test_integration_with_test_sessions(self):
"""Harvester finds pairs in real test session files."""
repo_root = Path(__file__).parent.parent
test_sessions_dir = repo_root / "test_sessions"
if not test_sessions_dir.exists():
self.skipTest("test_sessions not found")
pairs = []
for jsonl_file in sorted(test_sessions_dir.glob("*.jsonl")):
pairs.extend(extract_from_jsonl_file(str(jsonl_file)))
self.assertGreater(len(pairs), 0, "Should extract at least one pair from test_sessions")
for p in pairs:
self.assertIn("terse", p)
self.assertIn("rich", p)
self.assertIn("source", p)
self.assertIn("model", p)
# Verify content exists
self.assertGreater(len(p["terse"]), 0)
self.assertGreater(len(p["rich"]), 0)
if __name__ == "__main__":
unittest.main()