Compare commits
1 Commits
feat/91-se
...
feat/92-kn
| Author | SHA1 | Date | |
|---|---|---|---|
| 20a59d0cb7 |
221
scripts/knowledge_staleness_check.py
Normal file
221
scripts/knowledge_staleness_check.py
Normal file
@@ -0,0 +1,221 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Knowledge Store Staleness Detector
|
||||
|
||||
Checks knowledge entries against their source files to detect staleness.
|
||||
An entry is stale when its source file has been modified since extraction.
|
||||
|
||||
Usage:
|
||||
python3 scripts/knowledge_staleness_check.py knowledge/index.json
|
||||
python3 scripts/knowledge_staleness_check.py --repo /path/to/repo --index knowledge/index.json
|
||||
python3 scripts/knowledge_staleness_check.py --index knowledge/index.json --fix
|
||||
|
||||
Expected index.json format:
|
||||
{
|
||||
"version": 1,
|
||||
"facts": [
|
||||
{
|
||||
"fact": "...",
|
||||
"category": "fact|pitfall|pattern|tool-quirk",
|
||||
"repo": "repo-name",
|
||||
"confidence": 0.8,
|
||||
"source_file": "path/to/file.py",
|
||||
"source_hash": "sha256:abcdef...",
|
||||
"extracted_at": "2026-04-13T20:00:00Z"
|
||||
}
|
||||
]
|
||||
}
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import hashlib
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
|
||||
def compute_file_hash(filepath: str) -> Optional[str]:
|
||||
"""Compute SHA-256 hash of a file. Returns None if file not found."""
|
||||
path = Path(filepath)
|
||||
if not path.exists():
|
||||
return None
|
||||
content = path.read_bytes()
|
||||
return hashlib.sha256(content).hexdigest()[:16]
|
||||
|
||||
|
||||
def check_staleness(index_path: str, repo_root: str = None) -> dict:
|
||||
"""Check all entries in the knowledge index for staleness."""
|
||||
index = Path(index_path)
|
||||
if not index.exists():
|
||||
return {"error": f"Index not found: {index_path}"}
|
||||
|
||||
data = json.loads(index.read_text())
|
||||
facts = data.get("facts", [])
|
||||
|
||||
if not facts:
|
||||
return {
|
||||
"total": 0,
|
||||
"stale": 0,
|
||||
"fresh": 0,
|
||||
"no_source": 0,
|
||||
"missing_files": 0,
|
||||
"stale_entries": [],
|
||||
}
|
||||
|
||||
# Determine repo root
|
||||
if repo_root:
|
||||
root = Path(repo_root)
|
||||
else:
|
||||
root = index.parent.parent # knowledge/index.json -> repo root
|
||||
|
||||
results = {
|
||||
"total": len(facts),
|
||||
"stale": 0,
|
||||
"fresh": 0,
|
||||
"no_source": 0,
|
||||
"missing_files": 0,
|
||||
"stale_entries": [],
|
||||
}
|
||||
|
||||
for i, entry in enumerate(facts):
|
||||
source_file = entry.get("source_file")
|
||||
stored_hash = entry.get("source_hash")
|
||||
|
||||
if not source_file:
|
||||
results["no_source"] += 1
|
||||
continue
|
||||
|
||||
if not stored_hash:
|
||||
# Entry has source file but no hash — consider stale
|
||||
results["stale"] += 1
|
||||
results["stale_entries"].append({
|
||||
"index": i,
|
||||
"fact": entry.get("fact", "")[:100],
|
||||
"source_file": source_file,
|
||||
"reason": "no_hash",
|
||||
})
|
||||
continue
|
||||
|
||||
# Compute current hash
|
||||
full_path = root / source_file
|
||||
current_hash = compute_file_hash(str(full_path))
|
||||
|
||||
if current_hash is None:
|
||||
results["missing_files"] += 1
|
||||
results["stale_entries"].append({
|
||||
"index": i,
|
||||
"fact": entry.get("fact", "")[:100],
|
||||
"source_file": source_file,
|
||||
"reason": "file_missing",
|
||||
})
|
||||
elif current_hash != stored_hash:
|
||||
results["stale"] += 1
|
||||
results["stale_entries"].append({
|
||||
"index": i,
|
||||
"fact": entry.get("fact", "")[:100],
|
||||
"source_file": source_file,
|
||||
"stored_hash": stored_hash,
|
||||
"current_hash": current_hash,
|
||||
"reason": "hash_mismatch",
|
||||
})
|
||||
else:
|
||||
results["fresh"] += 1
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def add_hashes_to_index(index_path: str, repo_root: str = None) -> dict:
|
||||
"""Add source hashes to entries that are missing them."""
|
||||
index = Path(index_path)
|
||||
data = json.loads(index.read_text())
|
||||
facts = data.get("facts", [])
|
||||
|
||||
if repo_root:
|
||||
root = Path(repo_root)
|
||||
else:
|
||||
root = index.parent.parent
|
||||
|
||||
updated = 0
|
||||
skipped = 0
|
||||
|
||||
for entry in facts:
|
||||
source_file = entry.get("source_file")
|
||||
if not source_file or entry.get("source_hash"):
|
||||
skipped += 1
|
||||
continue
|
||||
|
||||
full_path = root / source_file
|
||||
file_hash = compute_file_hash(str(full_path))
|
||||
if file_hash:
|
||||
entry["source_hash"] = file_hash
|
||||
updated += 1
|
||||
|
||||
if updated > 0:
|
||||
index.write_text(json.dumps(data, indent=2) + "\n")
|
||||
|
||||
return {"updated": updated, "skipped": skipped, "total": len(facts)}
|
||||
|
||||
|
||||
def report_staleness(results: dict) -> str:
|
||||
"""Format staleness check results as a report."""
|
||||
lines = []
|
||||
lines.append("=" * 50)
|
||||
lines.append("KNOWLEDGE STORE STALENESS REPORT")
|
||||
lines.append("=" * 50)
|
||||
lines.append(f"Total entries: {results['total']}")
|
||||
lines.append(f"Fresh: {results['fresh']}")
|
||||
lines.append(f"Stale: {results['stale']}")
|
||||
lines.append(f"No source: {results['no_source']}")
|
||||
lines.append(f"Missing files: {results['missing_files']}")
|
||||
lines.append("")
|
||||
|
||||
if results["stale_entries"]:
|
||||
lines.append("STALE ENTRIES:")
|
||||
lines.append("-" * 50)
|
||||
for entry in results["stale_entries"]:
|
||||
lines.append(f" [{entry['reason']}] {entry['source_file']}")
|
||||
lines.append(f" {entry['fact']}")
|
||||
if entry.get("stored_hash") and entry.get("current_hash"):
|
||||
lines.append(f" stored: {entry['stored_hash']}")
|
||||
lines.append(f" current: {entry['current_hash']}")
|
||||
lines.append("")
|
||||
|
||||
if results["total"] > 0:
|
||||
staleness_pct = results["stale"] / results["total"] * 100
|
||||
lines.append(f"Staleness rate: {staleness_pct:.1f}%")
|
||||
else:
|
||||
lines.append("No entries to check.")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Check knowledge store for stale entries")
|
||||
parser.add_argument("--index", default="knowledge/index.json", help="Path to index.json")
|
||||
parser.add_argument("--repo", help="Repository root (default: auto-detect from index path)")
|
||||
parser.add_argument("--fix", action="store_true", help="Add missing hashes to index")
|
||||
parser.add_argument("--json", action="store_true", help="Output JSON instead of report")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.fix:
|
||||
result = add_hashes_to_index(args.index, args.repo)
|
||||
if args.json:
|
||||
print(json.dumps(result, indent=2))
|
||||
else:
|
||||
print(f"Updated {result['updated']} entries with source hashes.")
|
||||
print(f"Skipped {result['skipped']} (already had hashes or no source file).")
|
||||
else:
|
||||
results = check_staleness(args.index, args.repo)
|
||||
if "error" in results:
|
||||
print(f"Error: {results['error']}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(results, indent=2))
|
||||
else:
|
||||
print(report_staleness(results))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,234 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Session Transcript → Training Pair Harvester
|
||||
|
||||
Scans Hermes session JSONL files for Q&A patterns and extracts
|
||||
terse→rich training pairs. Outputs JSONL matching the timmy-config
|
||||
training pairs spec.
|
||||
|
||||
Usage:
|
||||
python3 scripts/session_pair_harvester.py ~/.hermes/sessions/
|
||||
python3 scripts/session_pair_harvester.py session.jsonl --output pairs.jsonl
|
||||
python3 scripts/session_pair_harvester.py --dir ~/.hermes/sessions/ --min-ratio 2.0
|
||||
|
||||
Output format:
|
||||
{"terse": "user short prompt", "rich": "ai detailed response", "source": "session_id", "model": "..."}
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import hashlib
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
|
||||
def compute_hash(text: str) -> str:
|
||||
"""Content hash for deduplication."""
|
||||
return hashlib.sha256(text.encode()).hexdigest()[:16]
|
||||
|
||||
|
||||
def extract_pairs_from_session(session_data: dict, min_ratio: float = 1.5,
|
||||
min_response_words: int = 20) -> list:
|
||||
"""Extract terse→rich pairs from a single session object."""
|
||||
pairs = []
|
||||
conversations = session_data.get("conversations", [])
|
||||
session_id = session_data.get("id", "unknown")
|
||||
model = session_data.get("model", "unknown")
|
||||
|
||||
seen_hashes = set()
|
||||
|
||||
for i, msg in enumerate(conversations):
|
||||
# Look for assistant/gpt responses
|
||||
if msg.get("from") not in ("gpt", "assistant"):
|
||||
continue
|
||||
|
||||
response_text = msg.get("value", "")
|
||||
if not response_text or len(response_text.split()) < min_response_words:
|
||||
continue
|
||||
|
||||
# Find the preceding human message
|
||||
prompt_text = ""
|
||||
for j in range(i - 1, -1, -1):
|
||||
if conversations[j].get("from") == "human":
|
||||
prompt_text = conversations[j].get("value", "")
|
||||
break
|
||||
|
||||
if not prompt_text:
|
||||
continue
|
||||
|
||||
# Filter: skip tool results, system messages embedded as human
|
||||
if prompt_text.startswith("{") and "output" in prompt_text[:100]:
|
||||
continue # likely a tool result
|
||||
if prompt_text.startswith("# SOUL.md") or prompt_text.startswith("You are"):
|
||||
continue # system prompt leak
|
||||
|
||||
# Quality filters
|
||||
prompt_words = len(prompt_text.split())
|
||||
response_words = len(response_text.split())
|
||||
|
||||
# Must have meaningful length ratio
|
||||
if prompt_words == 0 or response_words == 0:
|
||||
continue
|
||||
ratio = response_words / prompt_words
|
||||
if ratio < min_ratio:
|
||||
continue
|
||||
|
||||
# Skip responses that are mostly code
|
||||
code_blocks = response_text.count("```")
|
||||
if code_blocks >= 4 and len(response_text.replace("```", "").strip()) < 50:
|
||||
continue
|
||||
|
||||
# Skip responses with tool call artifacts
|
||||
if "tool_call" in response_text[:100] or "function_call" in response_text[:100]:
|
||||
continue
|
||||
|
||||
# Deduplicate by content hash
|
||||
content_hash = compute_hash(prompt_text + response_text[:200])
|
||||
if content_hash in seen_hashes:
|
||||
continue
|
||||
seen_hashes.add(content_hash)
|
||||
|
||||
# Clean up response: remove markdown headers if too many
|
||||
clean_response = response_text
|
||||
|
||||
pairs.append({
|
||||
"terse": prompt_text.strip(),
|
||||
"rich": clean_response.strip(),
|
||||
"source": session_id,
|
||||
"model": model,
|
||||
"prompt_words": prompt_words,
|
||||
"response_words": response_words,
|
||||
"ratio": round(ratio, 2),
|
||||
})
|
||||
|
||||
return pairs
|
||||
|
||||
|
||||
def extract_from_jsonl_file(filepath: str, **kwargs) -> list:
|
||||
"""Extract pairs from a session JSONL file."""
|
||||
pairs = []
|
||||
path = Path(filepath)
|
||||
|
||||
if not path.exists():
|
||||
print(f"Warning: {filepath} not found", file=sys.stderr)
|
||||
return pairs
|
||||
|
||||
content = path.read_text()
|
||||
lines = content.strip().split("\n")
|
||||
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
try:
|
||||
session = json.loads(line)
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
|
||||
session_pairs = extract_pairs_from_session(session, **kwargs)
|
||||
pairs.extend(session_pairs)
|
||||
|
||||
return pairs
|
||||
|
||||
|
||||
def deduplicate_pairs(pairs: list) -> list:
|
||||
"""Remove duplicate pairs across files."""
|
||||
seen = set()
|
||||
unique = []
|
||||
for pair in pairs:
|
||||
key = compute_hash(pair["terse"] + pair["rich"][:200])
|
||||
if key not in seen:
|
||||
seen.add(key)
|
||||
unique.append(pair)
|
||||
return unique
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Harvest training pairs from session transcripts")
|
||||
parser.add_argument("input", nargs="?", help="Session JSONL file or directory")
|
||||
parser.add_argument("--dir", "-d", help="Directory to scan for session files")
|
||||
parser.add_argument("--output", "-o", default="harvested_pairs.jsonl", help="Output file")
|
||||
parser.add_argument("--min-ratio", type=float, default=1.5, help="Min response/prompt word ratio")
|
||||
parser.add_argument("--min-words", type=int, default=20, help="Min response word count")
|
||||
parser.add_argument("--dry-run", action="store_true", help="Print stats without writing")
|
||||
args = parser.parse_args()
|
||||
|
||||
all_pairs = []
|
||||
files_scanned = 0
|
||||
|
||||
scan_dir = args.dir or args.input
|
||||
if not scan_dir:
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
scan_path = Path(scan_dir)
|
||||
if scan_path.is_dir():
|
||||
jsonl_files = sorted(scan_path.rglob("*.jsonl"))
|
||||
print(f"Scanning {len(jsonl_files)} files in {scan_dir}...", file=sys.stderr)
|
||||
for fpath in jsonl_files:
|
||||
pairs = extract_from_jsonl_file(
|
||||
str(fpath),
|
||||
min_ratio=args.min_ratio,
|
||||
min_response_words=args.min_words
|
||||
)
|
||||
all_pairs.extend(pairs)
|
||||
files_scanned += 1
|
||||
else:
|
||||
pairs = extract_from_jsonl_file(
|
||||
str(scan_path),
|
||||
min_ratio=args.min_ratio,
|
||||
min_response_words=args.min_words
|
||||
)
|
||||
all_pairs.extend(pairs)
|
||||
files_scanned = 1
|
||||
|
||||
# Deduplicate
|
||||
unique_pairs = deduplicate_pairs(all_pairs)
|
||||
|
||||
# Stats
|
||||
if unique_pairs:
|
||||
avg_prompt = sum(p["prompt_words"] for p in unique_pairs) / len(unique_pairs)
|
||||
avg_response = sum(p["response_words"] for p in unique_pairs) / len(unique_pairs)
|
||||
avg_ratio = sum(p["ratio"] for p in unique_pairs) / len(unique_pairs)
|
||||
else:
|
||||
avg_prompt = avg_response = avg_ratio = 0
|
||||
|
||||
stats = {
|
||||
"files_scanned": files_scanned,
|
||||
"raw_pairs": len(all_pairs),
|
||||
"unique_pairs": len(unique_pairs),
|
||||
"duplicates_removed": len(all_pairs) - len(unique_pairs),
|
||||
"avg_prompt_words": round(avg_prompt, 1),
|
||||
"avg_response_words": round(avg_response, 1),
|
||||
"avg_ratio": round(avg_ratio, 2),
|
||||
}
|
||||
|
||||
print(json.dumps(stats, indent=2), file=sys.stderr)
|
||||
|
||||
if args.dry_run:
|
||||
# Print sample pairs
|
||||
for pair in unique_pairs[:3]:
|
||||
print(f"\n--- Source: {pair['source']} (ratio: {pair['ratio']}) ---", file=sys.stderr)
|
||||
print(f"TERSE: {pair['terse'][:100]}...", file=sys.stderr)
|
||||
print(f"RICH: {pair['rich'][:150]}...", file=sys.stderr)
|
||||
return
|
||||
|
||||
# Write output
|
||||
output_path = Path(args.output)
|
||||
with open(output_path, "w") as f:
|
||||
for pair in unique_pairs:
|
||||
# Strip internal fields for output
|
||||
output = {
|
||||
"terse": pair["terse"],
|
||||
"rich": pair["rich"],
|
||||
"source": pair["source"],
|
||||
"model": pair["model"],
|
||||
}
|
||||
f.write(json.dumps(output) + "\n")
|
||||
|
||||
print(f"\nWrote {len(unique_pairs)} pairs to {output_path}", file=sys.stderr)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,90 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for session_pair_harvester."""
|
||||
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
sys.path.insert(0, os.path.dirname(__file__))
|
||||
from session_pair_harvester import extract_pairs_from_session, deduplicate_pairs, compute_hash
|
||||
|
||||
|
||||
def test_basic_extraction():
|
||||
session = {
|
||||
"id": "test_001",
|
||||
"model": "test-model",
|
||||
"conversations": [
|
||||
{"from": "system", "value": "You are helpful."},
|
||||
{"from": "human", "value": "What is Python?"},
|
||||
{"from": "gpt", "value": "Python is a high-level programming language known for its readability and versatility. It supports multiple paradigms including procedural, object-oriented, and functional programming. Python is widely used in web development, data science, machine learning, and automation."},
|
||||
]
|
||||
}
|
||||
pairs = extract_pairs_from_session(session, min_ratio=1.5, min_response_words=10)
|
||||
assert len(pairs) == 1
|
||||
assert pairs[0]["terse"] == "What is Python?"
|
||||
assert "programming language" in pairs[0]["rich"]
|
||||
assert pairs[0]["source"] == "test_001"
|
||||
print("PASS: test_basic_extraction")
|
||||
|
||||
|
||||
def test_filters_short_responses():
|
||||
session = {
|
||||
"id": "test_002",
|
||||
"model": "test",
|
||||
"conversations": [
|
||||
{"from": "human", "value": "Hi"},
|
||||
{"from": "gpt", "value": "Hello!"},
|
||||
]
|
||||
}
|
||||
pairs = extract_pairs_from_session(session, min_ratio=1.5, min_response_words=20)
|
||||
assert len(pairs) == 0
|
||||
print("PASS: test_filters_short_responses")
|
||||
|
||||
|
||||
def test_skips_tool_results():
|
||||
session = {
|
||||
"id": "test_003",
|
||||
"model": "test",
|
||||
"conversations": [
|
||||
{"from": "human", "value": '{"output": "file content", "exit_code": 0}'},
|
||||
{"from": "gpt", "value": "The file was read successfully. Now let me analyze the content and provide a detailed summary of what was found in the file system."},
|
||||
]
|
||||
}
|
||||
pairs = extract_pairs_from_session(session, min_ratio=1.5, min_response_words=10)
|
||||
assert len(pairs) == 0
|
||||
print("PASS: test_skips_tool_results")
|
||||
|
||||
|
||||
def test_deduplication():
|
||||
pairs = [
|
||||
{"terse": "What is X?", "rich": "X is Y.", "source": "s1", "model": "m"},
|
||||
{"terse": "What is X?", "rich": "X is Y.", "source": "s2", "model": "m"},
|
||||
{"terse": "What is Z?", "rich": "Z is W.", "source": "s1", "model": "m"},
|
||||
]
|
||||
unique = deduplicate_pairs(pairs)
|
||||
assert len(unique) == 2
|
||||
print("PASS: test_deduplication")
|
||||
|
||||
|
||||
def test_ratio_filter():
|
||||
session = {
|
||||
"id": "test_005",
|
||||
"model": "test",
|
||||
"conversations": [
|
||||
{"from": "human", "value": "Explain quantum computing in detail with examples and applications"},
|
||||
{"from": "gpt", "value": "OK."},
|
||||
]
|
||||
}
|
||||
pairs = extract_pairs_from_session(session, min_ratio=1.5, min_response_words=10)
|
||||
assert len(pairs) == 0 # response too short relative to prompt
|
||||
print("PASS: test_ratio_filter")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_basic_extraction()
|
||||
test_filters_short_responses()
|
||||
test_skips_tool_results()
|
||||
test_deduplication()
|
||||
test_ratio_filter()
|
||||
print("\nAll tests passed.")
|
||||
Reference in New Issue
Block a user