Compare commits

..

1 Commits

Author SHA1 Message Date
Alex Payne
b1a728f5f4 feat: fix session_pair_harvester to use role/content format (#91)
Some checks failed
Test / pytest (pull_request) Failing after 8s
- Harvester used old message fields (from/value) but Hermes sessions use role/content
- Import session_reader to normalize conversations properly
- Update extract function to operate on normalized role/content messages
- Change predecessor lookup from "human"/"gpt" to "user"/"assistant"
- Add comprehensive smoke tests (8 tests, all pass)
- Verify extraction from test_sessions: 11 pairs, avg ratio 8.13
2026-04-26 00:19:56 -04:00
4 changed files with 155 additions and 367 deletions

View File

@@ -1,206 +0,0 @@
#!/usr/bin/env python3
"""
graph_visualizer.py — Generate visual graph representations of the knowledge graph.
Reads knowledge/index.json and renders the fact relationship graph.
Supports ASCII terminal output and DOT export for Graphviz.
Usage:
python3 scripts/graph_visualizer.py # ASCII, all nodes
python3 scripts/graph_visualizer.py --format dot # DOT output
python3 scripts/graph_visualizer.py --seed root --max-depth 2
python3 scripts/graph_visualizer.py --filter-domain hermes-agent
python3 scripts/graph_visualizer.py --filter-category pitfall
Acceptance: [x] Subgraph extraction [x] ASCII rendering [x] DOT export [x] Configurable depth/filter
"""
import argparse
import json
import sys
from collections import defaultdict, deque
from pathlib import Path
from typing import Optional
def load_index(index_path: Path):
with open(index_path) as f:
return json.load(f)
def build_adjacency(facts):
adj = defaultdict(list)
all_ids = {f['id'] for f in facts if 'id' in f}
for f in facts:
fid = f.get('id')
if not fid:
continue
for rel in f.get('related', []):
if rel in all_ids:
adj[fid].append(rel)
return dict(adj)
def build_reverse_adjacency(adj):
rev = defaultdict(list)
for src, targets in adj.items():
for tgt in targets:
rev[tgt].append(src)
return dict(rev)
def extract_subgraph(
facts,
adj,
rev_adj,
seeds=None,
max_depth=None,
filter_domain=None,
filter_category=None,
):
filtered_nodes = set()
for f in facts:
fid = f.get('id')
if not fid:
continue
if filter_domain and f.get('domain') != filter_domain:
continue
if filter_category and f.get('category') != filter_category:
continue
filtered_nodes.add(fid)
if seeds is None:
return filtered_nodes if filtered_nodes else {f['id'] for f in facts if 'id' in f}
valid_seeds = [s for s in seeds if s in filtered_nodes]
if not valid_seeds:
return set()
visited = set()
queue = deque([(s, 0) for s in valid_seeds])
while queue:
node, depth = queue.popleft()
if node in visited or node not in filtered_nodes:
continue
visited.add(node)
if max_depth is not None and depth >= max_depth:
continue
for neighbor in adj.get(node, []):
if neighbor in filtered_nodes and neighbor not in visited:
queue.append((neighbor, depth + 1))
for neighbor in rev_adj.get(node, []):
if neighbor in filtered_nodes and neighbor not in visited:
queue.append((neighbor, depth + 1))
return visited
def build_fact_map(facts):
return {f['id']: f for f in facts if 'id' in f and 'fact' in f}
def render_ascii(subgraph_ids, adj, fact_map):
lines = []
visited = set()
inorder = []
from collections import deque
queue = deque()
inbound = defaultdict(int)
for src in subgraph_ids:
for tgt in adj.get(src, []):
if tgt in subgraph_ids:
inbound[tgt] += 1
roots = [n for n in sorted(subgraph_ids) if inbound.get(n, 0) == 0]
if not roots:
roots = sorted(subgraph_ids)
for root in roots:
queue.append((root, 0, None))
while queue:
node, depth, parent_label = queue.popleft()
if node in visited:
continue
visited.add(node)
fact = fact_map.get(node, {})
label = fact.get('fact', str(node))[:80]
category = fact.get('category', 'fact')
domain = fact.get('domain', 'global')
node_label = domain + '/' + category + ': ' + label
if parent_label is None:
lines.append(f"{' ' * depth}┌─ {node_label}")
else:
lines.append(f"{' ' * depth}├─ {node_label}")
children = [c for c in adj.get(node, []) if c in subgraph_ids]
for i, child in enumerate(children):
queue.append((child, depth + 1, node))
if len(visited) < len(subgraph_ids):
lines.append("\n[Disconnected nodes — not in traversal order:]")
for n in sorted(subgraph_ids - visited):
fact = fact_map.get(n, {})
label = fact.get('fact', n)[:60]
lines.append(f" {n}{label}")
return "\n".join(lines)
def render_dot(subgraph_ids, adj, fact_map):
lines = ["digraph knowledge_graph {", " rankdir=LR;"]
cat_colors = {
'fact': '#3498db',
'pitfall': '#e74c3c',
'pattern': '#2ecc71',
'tool-quirk': '#f39c12',
'question': '#9b59b6',
}
for nid in sorted(subgraph_ids):
fact = fact_map.get(nid, {})
category = fact.get('category', 'fact')
domain = fact.get('domain', 'global')
label = fact.get('fact', nid).replace('"', '\\"')[:80]
fillcolor = cat_colors.get(category, '#666666')
lines.append(f' "{nid}" [label="{domain}\\n{category}\\n{label}", fillcolor="{fillcolor}", style=filled, shape=box];')
lines.append("")
for src in sorted(subgraph_ids):
for tgt in adj.get(src, []):
if tgt in subgraph_ids:
lines.append(f' "{src}" -> "{tgt}";')
lines.append("}")
return "\n".join(lines)
def main():
parser = argparse.ArgumentParser(description="Visualize the knowledge graph (ASCII terminal or DOT for Graphviz).")
parser.add_argument("--index", type=Path, default=Path(__file__).parent.parent / "knowledge" / "index.json",
help="Path to knowledge/index.json")
parser.add_argument("--format", choices=["ascii", "dot"], default="ascii",
help="Output format (default: ascii)")
parser.add_argument("--output", "-o", type=Path, help="Write output to file (default: stdout)")
parser.add_argument("--seed", help="Starting fact ID (comma-sep). Omit to render full graph.")
parser.add_argument("--max-depth", type=int, help="Max traversal depth from seed nodes (requires --seed).")
parser.add_argument("--filter-domain", help="Only include facts from this domain.")
parser.add_argument("--filter-category", help="Only include facts of this category.")
args = parser.parse_args()
index = load_index(args.index)
facts = index.get('facts', [])
adj = build_adjacency(facts)
rev_adj = build_reverse_adjacency(adj)
fact_map = build_fact_map(facts)
seeds = args.seed.split(',') if args.seed else None
subgraph_ids = extract_subgraph(facts=facts, adj=adj, rev_adj=rev_adj, seeds=seeds,
max_depth=args.max_depth,
filter_domain=args.filter_domain,
filter_category=args.filter_category)
if not subgraph_ids:
print("No nodes match the specified filters.", file=sys.stderr)
sys.exit(1)
if args.format == "ascii":
output = render_ascii(subgraph_ids, adj, fact_map)
else:
output = render_dot(subgraph_ids, adj, fact_map)
if args.output:
args.output.write_text(output)
print(f"Written: {args.output}", file=sys.stderr)
else:
print(output)
if __name__ == "__main__":
main()

View File

@@ -22,114 +22,95 @@ import sys
from pathlib import Path
from typing import Optional
from session_reader import extract_conversation, read_session
def compute_hash(text: str) -> str:
"""Content hash for deduplication."""
return hashlib.sha256(text.encode()).hexdigest()[:16]
def extract_pairs_from_session(session_data: dict, min_ratio: float = 1.5,
def extract_pairs_from_conversation(conversation: list, session_id: str, model: str,
min_ratio: float = 1.5,
min_response_words: int = 20) -> list:
"""Extract terse→rich pairs from a single session object."""
"""Extract terse→rich pairs from a normalized conversation."""
pairs = []
conversations = session_data.get("conversations", [])
session_id = session_data.get("id", "unknown")
model = session_data.get("model", "unknown")
seen_hashes = set()
for i, msg in enumerate(conversations):
# Look for assistant/gpt responses
if msg.get("from") not in ("gpt", "assistant"):
for i, msg in enumerate(conversation):
# Look for assistant responses
if msg.get('role') != 'assistant':
continue
response_text = msg.get("value", "")
response_text = msg.get('content', '')
if not response_text or len(response_text.split()) < min_response_words:
continue
# Find the preceding human message
# Find the preceding user message
prompt_text = ""
for j in range(i - 1, -1, -1):
if conversations[j].get("from") == "human":
prompt_text = conversations[j].get("value", "")
if conversation[j].get('role') == 'user':
prompt_text = conversation[j].get('content', '')
break
if not prompt_text:
continue
# Filter: skip tool results, system messages embedded as human
if prompt_text.startswith("{") and "output" in prompt_text[:100]:
continue # likely a tool result
if prompt_text.startswith("# SOUL.md") or prompt_text.startswith("You are"):
continue # system prompt leak
if prompt_text.startswith('{') and 'output' in prompt_text[:100]:
continue
if prompt_text.startswith('# SOUL.md') or prompt_text.startswith('You are'):
continue
# Quality filters
prompt_words = len(prompt_text.split())
response_words = len(response_text.split())
# Must have meaningful length ratio
if prompt_words == 0 or response_words == 0:
continue
ratio = response_words / prompt_words
if ratio < min_ratio:
continue
# Skip responses that are mostly code
code_blocks = response_text.count("```")
if code_blocks >= 4 and len(response_text.replace("```", "").strip()) < 50:
code_blocks = response_text.count('```')
if code_blocks >= 4 and len(response_text.replace('```', '').strip()) < 50:
continue
# Skip responses with tool call artifacts
if "tool_call" in response_text[:100] or "function_call" in response_text[:100]:
if 'tool_call' in response_text[:100] or 'function_call' in response_text[:100]:
continue
# Deduplicate by content hash
content_hash = compute_hash(prompt_text + response_text[:200])
if content_hash in seen_hashes:
continue
seen_hashes.add(content_hash)
# Clean up response: remove markdown headers if too many
clean_response = response_text
pairs.append({
"terse": prompt_text.strip(),
"rich": clean_response.strip(),
"source": session_id,
"model": model,
"prompt_words": prompt_words,
"response_words": response_words,
"ratio": round(ratio, 2),
'terse': prompt_text.strip(),
'rich': clean_response.strip(),
'source': session_id,
'model': model,
'prompt_words': prompt_words,
'response_words': response_words,
'ratio': round(ratio, 2),
})
return pairs
def extract_from_jsonl_file(filepath: str, **kwargs) -> list:
"""Extract pairs from a session JSONL file."""
pairs = []
path = Path(filepath)
if not path.exists():
print(f"Warning: {filepath} not found", file=sys.stderr)
return pairs
content = path.read_text()
lines = content.strip().split("\n")
for line in lines:
line = line.strip()
if not line:
continue
try:
session = json.loads(line)
except json.JSONDecodeError:
continue
session_pairs = extract_pairs_from_session(session, **kwargs)
pairs.extend(session_pairs)
return pairs
def extract_from_jsonl_file(path: str, **kwargs) -> list:
"""Read a session file and extract training pairs using normalized conversation."""
session_messages = read_session(path)
if not session_messages:
return []
conversation = extract_conversation(session_messages)
# Derive session_id and model from first real message metadata
first_msg = next((m for m in session_messages if m.get('role') or m.get('from')), {})
session_id = first_msg.get('meta_session_id', Path(path).name)
model = first_msg.get('model', 'unknown')
return extract_pairs_from_conversation(conversation, session_id, model, **kwargs)
def deduplicate_pairs(pairs: list) -> list:

View File

@@ -1,105 +0,0 @@
#!/usr/bin/env python3
"""
Tests for graph_visualizer.py — smoke test + subgraph logic.
Run: python3 scripts/test_graph_visualizer.py
"""
import json, sys, tempfile
from pathlib import Path
sys.path.insert(0, str(Path(__file__).resolve().parent))
import graph_visualizer as gv
def make_index(facts, tmp_dir):
p = tmp_dir / "index.json"
p.write_text(json.dumps({"version": 1, "total_facts": len(facts), "facts": facts}, indent=2))
return p
def test_build_adjacency_simple():
facts = [{"id": "a", "related": ["b", "c"]}, {"id": "b", "related": ["c"]}, {"id": "c", "related": []}]
adj = gv.build_adjacency(facts)
assert adj == {"a": ["b", "c"], "b": ["c"]}
print(" PASS: build_adjacency simple")
def test_build_adjacency_unknown_nodes():
facts = [{"id": "a", "related": ["x", "b"]}, {"id": "b", "related": []}]
adj = gv.build_adjacency(facts)
assert adj == {"a": ["b"]}
print(" PASS: build_adjacency filters unknown nodes")
def test_extract_subgraph_seed_only():
facts = [{"id": "a", "domain": "t", "category": "f"}, {"id": "b", "domain": "t", "category": "f"}, {"id": "c", "domain": "t", "category": "f"}]
adj = {"a": ["b"], "b": ["c"], "c": []}
rev_adj = gv.build_reverse_adjacency(adj)
sub = gv.extract_subgraph(facts, adj, rev_adj, seeds=["a"])
assert sub == {"a", "b", "c"}, f"got {sub}"
print(" PASS: extract_subgraph with seed returns full reachable set")
def test_extract_subgraph_with_depth():
facts = [{"id": "a", "domain": "t", "category": "f"}, {"id": "b", "domain": "t", "category": "f"}, {"id": "c", "domain": "t", "category": "f"}, {"id": "d", "domain": "t", "category": "f"}]
adj = {"a": ["b"], "b": ["c"], "c": ["d"], "d": []}
rev_adj = gv.build_reverse_adjacency(adj)
sub = gv.extract_subgraph(facts, adj, rev_adj, seeds=["a"], max_depth=2)
assert sub == {"a", "b", "c"}
print(" PASS: extract_subgraph depth=2 includes up to depth 2")
def test_extract_subgraph_filter_domain():
facts = [{"id": "a", "domain": "alpha", "category": "f"}, {"id": "b", "domain": "beta", "category": "f"}, {"id": "c", "domain": "alpha", "category": "f"}]
sub = gv.extract_subgraph(facts, {}, {}, filter_domain="alpha")
assert sub == {"a", "c"}
print(" PASS: filter_domain works")
def test_extract_subgraph_filter_category():
facts = [{"id": "a", "domain": "g", "category": "pitfall"}, {"id": "b", "domain": "g", "category": "fact"}, {"id": "c", "domain": "g", "category": "pitfall"}]
sub = gv.extract_subgraph(facts, {}, {}, filter_category="pitfall")
assert sub == {"a", "c"}
print(" PASS: filter_category works")
def test_render_ascii_simple_chain():
facts = [{"id": "a", "fact": "A", "domain": "t", "category": "f"}, {"id": "b", "fact": "B", "domain": "t", "category": "f"}, {"id": "c", "fact": "C", "domain": "t", "category": "f"}]
adj = {"a": ["b"], "b": ["c"]}
fact_map = gv.build_fact_map(facts)
out = gv.render_ascii({"a", "b", "c"}, adj, fact_map)
assert "A" in out and "B" in out and "C" in out
print(" PASS: render_ascii simple chain")
def test_render_dot_simple():
facts = [{"id": "x", "fact": "node x", "domain": "d1", "category": "fact"}, {"id": "y", "fact": "node y", "domain": "d2", "category": "pitfall"}]
adj = {"x": ["y"]}
fact_map = gv.build_fact_map(facts)
out = gv.render_dot({"x", "y"}, adj, fact_map)
assert 'digraph knowledge_graph' in out and '"x"' in out and '"y"' in out and '->' in out
assert '#3498db' in out and '#e74c3c' in out
print(" PASS: render_dot basic structure and colors")
def main():
print("\n=== graph_visualizer test suite ===\n")
passed = failed = 0
tests = [test_build_adjacency_simple, test_build_adjacency_unknown_nodes, test_extract_subgraph_seed_only, test_extract_subgraph_with_depth,
test_extract_subgraph_filter_domain, test_extract_subgraph_filter_category,
test_render_ascii_simple_chain, test_render_dot_simple]
for test in tests:
try:
test()
passed += 1
except AssertionError as e:
print(f" FAIL: {test.__name__}{e}")
failed += 1
except Exception as e:
print(f" ERROR: {test.__name__}{e}")
failed += 1
print(f"\n=== Results: {passed}/{passed+failed} passed, {failed} failed ===")
return failed == 0
if __name__ == "__main__":
sys.exit(0 if main() else 1)

View File

@@ -0,0 +1,118 @@
"""
Tests for session_pair_harvester — training pair extraction from sessions.
"""
import json
import tempfile
import unittest
from pathlib import Path
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent / "scripts"))
from session_pair_harvester import (
extract_pairs_from_conversation,
extract_from_jsonl_file,
deduplicate_pairs,
compute_hash,
)
class TestSessionPairHarvester(unittest.TestCase):
def test_compute_hash_consistent(self):
h1 = compute_hash("hello world")
h2 = compute_hash("hello world")
self.assertEqual(h1, h2)
self.assertEqual(len(h1), 16)
def test_extract_simple_qa_pair(self):
"""A simple user→assistant exchange produces one pair."""
conversation = [
{"role": "user", "content": "What is the capital of France?"},
{"role": "assistant", "content": "The capital of France is Paris. It is a major European city renowned for its art, fashion, gastronomy, cultural heritage, and historical significance. The city attracts millions of tourists annually."},
]
pairs = extract_pairs_from_conversation(conversation, "test_session", "test-model")
self.assertEqual(len(pairs), 1)
self.assertEqual(pairs[0]["terse"], "What is the capital of France?")
self.assertIn("Paris", pairs[0]["rich"])
self.assertEqual(pairs[0]["source"], "test_session")
def test_min_ratio_filter(self):
"""Very short responses are filtered out."""
conversation = [
{"role": "user", "content": "Yes"},
{"role": "assistant", "content": "No."},
]
# Default min_ratio = 1.5, min_words = 20 for response
pairs = extract_pairs_from_conversation(conversation, "s", "m", min_response_words=3)
self.assertEqual(len(pairs), 0)
def test_min_words_filter(self):
"""Assistant responses below min word count are skipped."""
conversation = [
{"role": "user", "content": "Explain the project architecture in detail"},
{"role": "assistant", "content": "OK."},
]
pairs = extract_pairs_from_conversation(conversation, "s", "m", min_response_words=5)
self.assertEqual(len(pairs), 0)
def test_skip_non_assistant_messages(self):
"""System and tool messages are ignored."""
conversation = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi there! How can I help you today?"},
]
pairs = extract_pairs_from_conversation(conversation, "s", "m", min_response_words=3)
self.assertEqual(len(pairs), 1)
self.assertEqual(pairs[0]["terse"], "Hello")
def test_multiple_pairs_from_one_session(self):
"""A conversation with several Q&A turns yields multiple pairs."""
conversation = [
{"role": "user", "content": "First question?"},
{"role": "assistant", "content": "Here is a detailed and comprehensive answer that thoroughly explores multiple aspects of the subject. It provides background context and practical implications for the reader."},
{"role": "user", "content": "Second?"},
{"role": "assistant", "content": "Another comprehensive response with detailed examples. This includes practical code blocks and thorough explanations to ensure deep understanding of the topic at hand."},
]
pairs = extract_pairs_from_conversation(conversation, "s", "m", min_ratio=1.0)
self.assertEqual(len(pairs), 2)
def test_deduplication_removes_duplicates(self):
"""Identical pairs across sessions are deduplicated."""
pairs = [
{"terse": "q1", "rich": "a1", "source": "s1", "model": "m"},
{"terse": "q1", "rich": "a1", "source": "s2", "model": "m"},
{"terse": "q2", "rich": "a2", "source": "s1", "model": "m"},
]
unique = deduplicate_pairs(pairs)
self.assertEqual(len(unique), 2)
sources = {p["source"] for p in unique}
# First unique pair can be from either s1 or s2
self.assertIn("s1", sources)
def test_integration_with_test_sessions(self):
"""Harvester finds pairs in real test session files."""
repo_root = Path(__file__).parent.parent
test_sessions_dir = repo_root / "test_sessions"
if not test_sessions_dir.exists():
self.skipTest("test_sessions not found")
pairs = []
for jsonl_file in sorted(test_sessions_dir.glob("*.jsonl")):
pairs.extend(extract_from_jsonl_file(str(jsonl_file)))
self.assertGreater(len(pairs), 0, "Should extract at least one pair from test_sessions")
for p in pairs:
self.assertIn("terse", p)
self.assertIn("rich", p)
self.assertIn("source", p)
self.assertIn("model", p)
# Verify content exists
self.assertGreater(len(p["terse"]), 0)
self.assertGreater(len(p["rich"]), 0)
if __name__ == "__main__":
unittest.main()