Compare commits
2 Commits
feat/sessi
...
feat/179-s
| Author | SHA1 | Date | |
|---|---|---|---|
| 81c02f6709 | |||
| c2c3c6a3b9 |
131
scripts/knowledge_staleness_check.py
Normal file
131
scripts/knowledge_staleness_check.py
Normal file
@@ -0,0 +1,131 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Knowledge Store Staleness Detector — Detect stale knowledge entries by comparing source file hashes.
|
||||
|
||||
Usage:
|
||||
python3 scripts/knowledge_staleness_check.py --index knowledge/index.json
|
||||
python3 scripts/knowledge_staleness_check.py --index knowledge/index.json --json
|
||||
python3 scripts/knowledge_staleness_check.py --index knowledge/index.json --fix
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Any, Optional
|
||||
|
||||
|
||||
def compute_file_hash(filepath: str) -> Optional[str]:
|
||||
"""Compute SHA-256 hash of a file. Returns None if file doesn't exist."""
|
||||
try:
|
||||
with open(filepath, "rb") as f:
|
||||
return "sha256:" + hashlib.sha256(f.read()).hexdigest()
|
||||
except (FileNotFoundError, IsADirectoryError, PermissionError):
|
||||
return None
|
||||
|
||||
|
||||
def check_staleness(index_path: str, repo_root: str = ".") -> List[Dict[str, Any]]:
|
||||
"""Check all entries in knowledge index for staleness.
|
||||
|
||||
Returns list of entries with staleness info:
|
||||
- status: "fresh" | "stale" | "missing_source" | "no_hash"
|
||||
- current_hash: computed hash (if source exists)
|
||||
- stored_hash: hash from index
|
||||
"""
|
||||
with open(index_path) as f:
|
||||
data = json.load(f)
|
||||
|
||||
facts = data.get("facts", [])
|
||||
results = []
|
||||
|
||||
for entry in facts:
|
||||
source_file = entry.get("source_file")
|
||||
stored_hash = entry.get("source_hash")
|
||||
|
||||
if not source_file:
|
||||
results.append({**entry, "status": "no_source", "current_hash": None})
|
||||
continue
|
||||
|
||||
full_path = os.path.join(repo_root, source_file)
|
||||
current_hash = compute_file_hash(full_path)
|
||||
|
||||
if current_hash is None:
|
||||
results.append({**entry, "status": "missing_source", "current_hash": None})
|
||||
elif not stored_hash:
|
||||
results.append({**entry, "status": "no_hash", "current_hash": current_hash})
|
||||
elif current_hash != stored_hash:
|
||||
results.append({**entry, "status": "stale", "current_hash": current_hash})
|
||||
else:
|
||||
results.append({**entry, "status": "fresh", "current_hash": current_hash})
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def fix_hashes(index_path: str, repo_root: str = ".") -> int:
|
||||
"""Add hashes to entries missing them. Returns count of fixed entries."""
|
||||
with open(index_path) as f:
|
||||
data = json.load(f)
|
||||
|
||||
fixed = 0
|
||||
for entry in data.get("facts", []):
|
||||
if entry.get("source_hash"):
|
||||
continue
|
||||
source_file = entry.get("source_file")
|
||||
if not source_file:
|
||||
continue
|
||||
full_path = os.path.join(repo_root, source_file)
|
||||
h = compute_file_hash(full_path)
|
||||
if h:
|
||||
entry["source_hash"] = h
|
||||
fixed += 1
|
||||
|
||||
with open(index_path, "w") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
|
||||
return fixed
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Check knowledge store staleness")
|
||||
parser.add_argument("--index", required=True, help="Path to knowledge/index.json")
|
||||
parser.add_argument("--repo", default=".", help="Repo root for source file resolution")
|
||||
parser.add_argument("--json", action="store_true", help="Output as JSON")
|
||||
parser.add_argument("--fix", action="store_true", help="Add hashes to entries missing them")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.fix:
|
||||
fixed = fix_hashes(args.index, args.repo)
|
||||
print(f"Fixed {fixed} entries with missing hashes.")
|
||||
return
|
||||
|
||||
results = check_staleness(args.index, args.repo)
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(results, indent=2))
|
||||
else:
|
||||
stale = [r for r in results if r["status"] != "fresh"]
|
||||
fresh = [r for r in results if r["status"] == "fresh"]
|
||||
|
||||
print(f"Knowledge Store Staleness Check")
|
||||
print(f" Total entries: {len(results)}")
|
||||
print(f" Fresh: {len(fresh)}")
|
||||
print(f" Stale/Issues: {len(stale)}")
|
||||
print()
|
||||
|
||||
if stale:
|
||||
print("Issues found:")
|
||||
for r in stale:
|
||||
status = r["status"]
|
||||
fact = r.get("fact", "?")[:60]
|
||||
source = r.get("source_file", "?")
|
||||
print(f" [{status}] {source}: {fact}")
|
||||
else:
|
||||
print("All entries are fresh!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,276 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
session_metadata.py - Extract structured metadata from Hermes session transcripts.
|
||||
Works alongside session_reader.py to provide higher-level session analysis.
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
from dataclasses import dataclass, asdict
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Any
|
||||
|
||||
# Import from session_reader (the canonical reader)
|
||||
from session_reader import read_session
|
||||
|
||||
|
||||
@dataclass
|
||||
class SessionSummary:
|
||||
"""Structured summary of a Hermes session transcript."""
|
||||
session_id: str
|
||||
model: str
|
||||
repo: str
|
||||
outcome: str
|
||||
message_count: int
|
||||
tool_calls: int
|
||||
duration_estimate: str
|
||||
key_actions: List[str]
|
||||
errors_encountered: List[str]
|
||||
start_time: Optional[str] = None
|
||||
end_time: Optional[str] = None
|
||||
total_tokens_estimate: int = 0
|
||||
user_messages: int = 0
|
||||
assistant_messages: int = 0
|
||||
tool_outputs: int = 0
|
||||
|
||||
|
||||
def extract_session_metadata(file_path: str) -> SessionSummary:
|
||||
"""
|
||||
Extract structured metadata from a Hermes session JSONL transcript.
|
||||
Uses session_reader.read_session() for file reading.
|
||||
"""
|
||||
session_id = Path(file_path).stem
|
||||
messages = []
|
||||
model = "unknown"
|
||||
repo = "unknown"
|
||||
tool_calls_count = 0
|
||||
key_actions = []
|
||||
errors = []
|
||||
start_time = None
|
||||
end_time = None
|
||||
total_tokens = 0
|
||||
|
||||
# Common repo patterns to look for
|
||||
repo_patterns = [
|
||||
r"(?:the-nexus|compounding-intelligence|timmy-config|hermes-agent)",
|
||||
r"(?:forge\.alexanderwhitestone\.com/([^/]+/[^/\\s]+))",
|
||||
r"(?:github\.com/([^/]+/[^/\\s]+))",
|
||||
r"(?:Timmy_Foundation/([^/\\s]+))",
|
||||
]
|
||||
|
||||
try:
|
||||
# Use the canonical reader from session_reader.py
|
||||
messages = read_session(file_path)
|
||||
except FileNotFoundError:
|
||||
return SessionSummary(
|
||||
session_id=session_id,
|
||||
model="unknown",
|
||||
repo="unknown",
|
||||
outcome="failure",
|
||||
message_count=0,
|
||||
tool_calls=0,
|
||||
duration_estimate="0m",
|
||||
key_actions=[],
|
||||
errors_encountered=[f"File not found: {file_path}"]
|
||||
)
|
||||
|
||||
# Process messages for metadata
|
||||
for entry in messages:
|
||||
# Extract model from assistant messages
|
||||
if entry.get("role") == "assistant" and entry.get("model"):
|
||||
model = entry["model"]
|
||||
|
||||
# Extract timestamps
|
||||
if entry.get("timestamp"):
|
||||
ts = entry["timestamp"]
|
||||
if start_time is None:
|
||||
start_time = ts
|
||||
end_time = ts
|
||||
|
||||
# Count tool calls
|
||||
if entry.get("tool_calls"):
|
||||
tool_calls_count += len(entry["tool_calls"])
|
||||
for tc in entry["tool_calls"]:
|
||||
if tc.get("function", {}).get("name"):
|
||||
action = f"{tc['function']['name']}"
|
||||
if action not in key_actions:
|
||||
key_actions.append(action)
|
||||
|
||||
# Estimate tokens from content length
|
||||
content = entry.get("content", "")
|
||||
if isinstance(content, str):
|
||||
total_tokens += len(content.split())
|
||||
elif isinstance(content, list):
|
||||
for item in content:
|
||||
if isinstance(item, dict) and "text" in item:
|
||||
total_tokens += len(item["text"].split())
|
||||
|
||||
# Look for repo mentions in content
|
||||
if entry.get("content"):
|
||||
content_str = str(entry["content"])
|
||||
for pattern in repo_patterns:
|
||||
match = re.search(pattern, content_str, re.IGNORECASE)
|
||||
if match:
|
||||
if match.groups():
|
||||
repo = match.group(1)
|
||||
else:
|
||||
repo = match.group(0)
|
||||
break
|
||||
|
||||
# Look for error messages
|
||||
if entry.get("role") == "tool" and entry.get("is_error"):
|
||||
error_msg = entry.get("content", "Unknown error")
|
||||
if isinstance(error_msg, str) and len(error_msg) < 200:
|
||||
errors.append(error_msg[:200])
|
||||
|
||||
# Count message types
|
||||
user_messages = sum(1 for m in messages if m.get("role") == "user")
|
||||
assistant_messages = sum(1 for m in messages if m.get("role") == "assistant")
|
||||
tool_outputs = sum(1 for m in messages if m.get("role") == "tool")
|
||||
|
||||
# Calculate duration estimate
|
||||
duration_estimate = "unknown"
|
||||
if start_time and end_time:
|
||||
try:
|
||||
# Try to parse timestamps
|
||||
start_dt = None
|
||||
end_dt = None
|
||||
|
||||
# Handle various timestamp formats
|
||||
for fmt in ["%Y-%m-%dT%H:%M:%S.%fZ", "%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%d %H:%M:%S"]:
|
||||
try:
|
||||
if start_dt is None:
|
||||
start_dt = datetime.strptime(start_time, fmt)
|
||||
if end_dt is None:
|
||||
end_dt = datetime.strptime(end_time, fmt)
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
if start_dt and end_dt:
|
||||
duration = end_dt - start_dt
|
||||
minutes = duration.total_seconds() / 60
|
||||
duration_estimate = f"{minutes:.0f}m"
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Classify outcome
|
||||
outcome = "unknown"
|
||||
if errors:
|
||||
# Check if any errors are fatal
|
||||
fatal_errors = any("405" in e or "permission" in e.lower() or "authentication" in e.lower()
|
||||
for e in errors)
|
||||
if fatal_errors:
|
||||
outcome = "failure"
|
||||
else:
|
||||
outcome = "partial"
|
||||
elif messages:
|
||||
# Check last message for success indicators
|
||||
last_msg = messages[-1]
|
||||
if last_msg.get("role") == "assistant":
|
||||
content = last_msg.get("content", "")
|
||||
if isinstance(content, str):
|
||||
success_indicators = ["done", "completed", "success", "merged", "pushed"]
|
||||
if any(indicator in content.lower() for indicator in success_indicators):
|
||||
outcome = "success"
|
||||
else:
|
||||
outcome = "unknown"
|
||||
|
||||
# Deduplicate key actions (keep unique, limit to 10)
|
||||
unique_actions = []
|
||||
for action in key_actions:
|
||||
if action not in unique_actions:
|
||||
unique_actions.append(action)
|
||||
if len(unique_actions) >= 10:
|
||||
break
|
||||
|
||||
# Deduplicate errors (keep unique, limit to 5)
|
||||
unique_errors = []
|
||||
for error in errors:
|
||||
if error not in unique_errors:
|
||||
unique_errors.append(error)
|
||||
if len(unique_errors) >= 5:
|
||||
break
|
||||
|
||||
return SessionSummary(
|
||||
session_id=session_id,
|
||||
model=model,
|
||||
repo=repo,
|
||||
outcome=outcome,
|
||||
message_count=len(messages),
|
||||
tool_calls=tool_calls_count,
|
||||
duration_estimate=duration_estimate,
|
||||
key_actions=unique_actions,
|
||||
errors_encountered=unique_errors,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
total_tokens_estimate=total_tokens,
|
||||
user_messages=user_messages,
|
||||
assistant_messages=assistant_messages,
|
||||
tool_outputs=tool_outputs
|
||||
)
|
||||
|
||||
|
||||
def process_session_directory(directory_path: str, output_file: Optional[str] = None) -> List[SessionSummary]:
|
||||
"""
|
||||
Process all JSONL files in a directory.
|
||||
"""
|
||||
directory = Path(directory_path)
|
||||
if not directory.exists():
|
||||
print(f"Error: Directory {directory_path} does not exist", file=sys.stderr)
|
||||
return []
|
||||
|
||||
jsonl_files = list(directory.glob("*.jsonl"))
|
||||
if not jsonl_files:
|
||||
print(f"Warning: No JSONL files found in {directory_path}", file=sys.stderr)
|
||||
return []
|
||||
|
||||
summaries = []
|
||||
for jsonl_file in sorted(jsonl_files):
|
||||
print(f"Processing {jsonl_file.name}...", file=sys.stderr)
|
||||
summary = extract_session_metadata(str(jsonl_file))
|
||||
summaries.append(summary)
|
||||
|
||||
if output_file:
|
||||
with open(output_file, 'w', encoding='utf-8') as f:
|
||||
json.dump([asdict(s) for s in summaries], f, indent=2)
|
||||
print(f"Wrote {len(summaries)} summaries to {output_file}", file=sys.stderr)
|
||||
|
||||
return summaries
|
||||
|
||||
|
||||
def main():
|
||||
"""CLI entry point."""
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description="Extract metadata from Hermes session JSONL transcripts")
|
||||
parser.add_argument("path", help="Path to JSONL file or directory of session files")
|
||||
parser.add_argument("-o", "--output", help="Output JSON file (default: stdout)")
|
||||
parser.add_argument("-v", "--verbose", action="store_true", help="Verbose output")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
path = Path(args.path)
|
||||
|
||||
if path.is_file():
|
||||
summary = extract_session_metadata(str(path))
|
||||
if args.output:
|
||||
with open(args.output, 'w') as f:
|
||||
json.dump(asdict(summary), f, indent=2)
|
||||
print(f"Wrote summary to {args.output}", file=sys.stderr)
|
||||
else:
|
||||
print(json.dumps(asdict(summary), indent=2))
|
||||
|
||||
elif path.is_dir():
|
||||
summaries = process_session_directory(str(path), args.output)
|
||||
if not args.output:
|
||||
print(json.dumps([asdict(s) for s in summaries], indent=2))
|
||||
|
||||
else:
|
||||
print(f"Error: {args.path} is not a file or directory", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
129
scripts/test_knowledge_staleness.py
Normal file
129
scripts/test_knowledge_staleness.py
Normal file
@@ -0,0 +1,129 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for scripts/knowledge_staleness_check.py — 8 tests."""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
sys.path.insert(0, os.path.dirname(__file__) or ".")
|
||||
import importlib.util
|
||||
spec = importlib.util.spec_from_file_location("ks", os.path.join(os.path.dirname(__file__) or ".", "knowledge_staleness_check.py"))
|
||||
mod = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(mod)
|
||||
check_staleness = mod.check_staleness
|
||||
fix_hashes = mod.fix_hashes
|
||||
compute_file_hash = mod.compute_file_hash
|
||||
|
||||
|
||||
def test_fresh_entry():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
src = os.path.join(tmpdir, "source.py")
|
||||
with open(src, "w") as f:
|
||||
f.write("print('hello')")
|
||||
h = compute_file_hash(src)
|
||||
idx = os.path.join(tmpdir, "index.json")
|
||||
with open(idx, "w") as f:
|
||||
json.dump({"facts": [{"fact": "hello", "source_file": "source.py", "source_hash": h}]}, f)
|
||||
results = check_staleness(idx, tmpdir)
|
||||
assert results[0]["status"] == "fresh"
|
||||
print("PASS: test_fresh_entry")
|
||||
|
||||
|
||||
def test_stale_entry():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
src = os.path.join(tmpdir, "source.py")
|
||||
with open(src, "w") as f:
|
||||
f.write("original content")
|
||||
idx = os.path.join(tmpdir, "index.json")
|
||||
with open(idx, "w") as f:
|
||||
json.dump({"facts": [{"fact": "old", "source_file": "source.py", "source_hash": "sha256:wrong"}]}, f)
|
||||
# Now change the source
|
||||
with open(src, "w") as f:
|
||||
f.write("modified content")
|
||||
results = check_staleness(idx, tmpdir)
|
||||
assert results[0]["status"] == "stale"
|
||||
print("PASS: test_stale_entry")
|
||||
|
||||
|
||||
def test_missing_source():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
idx = os.path.join(tmpdir, "index.json")
|
||||
with open(idx, "w") as f:
|
||||
json.dump({"facts": [{"fact": "gone", "source_file": "nonexistent.py", "source_hash": "sha256:abc"}]}, f)
|
||||
results = check_staleness(idx, tmpdir)
|
||||
assert results[0]["status"] == "missing_source"
|
||||
print("PASS: test_missing_source")
|
||||
|
||||
|
||||
def test_no_hash():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
src = os.path.join(tmpdir, "source.py")
|
||||
with open(src, "w") as f:
|
||||
f.write("content")
|
||||
idx = os.path.join(tmpdir, "index.json")
|
||||
with open(idx, "w") as f:
|
||||
json.dump({"facts": [{"fact": "no hash", "source_file": "source.py"}]}, f)
|
||||
results = check_staleness(idx, tmpdir)
|
||||
assert results[0]["status"] == "no_hash"
|
||||
assert results[0]["current_hash"].startswith("sha256:")
|
||||
print("PASS: test_no_hash")
|
||||
|
||||
|
||||
def test_no_source_field():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
idx = os.path.join(tmpdir, "index.json")
|
||||
with open(idx, "w") as f:
|
||||
json.dump({"facts": [{"fact": "orphan"}]}, f)
|
||||
results = check_staleness(idx, tmpdir)
|
||||
assert results[0]["status"] == "no_source"
|
||||
print("PASS: test_no_source_field")
|
||||
|
||||
|
||||
def test_fix_hashes():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
src = os.path.join(tmpdir, "source.py")
|
||||
with open(src, "w") as f:
|
||||
f.write("content for hashing")
|
||||
idx = os.path.join(tmpdir, "index.json")
|
||||
with open(idx, "w") as f:
|
||||
json.dump({"facts": [{"fact": "needs hash", "source_file": "source.py"}]}, f)
|
||||
fixed = fix_hashes(idx, tmpdir)
|
||||
assert fixed == 1
|
||||
# Verify hash was added
|
||||
with open(idx) as f:
|
||||
data = json.load(f)
|
||||
assert data["facts"][0]["source_hash"].startswith("sha256:")
|
||||
print("PASS: test_fix_hashes")
|
||||
|
||||
|
||||
def test_empty_index():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
idx = os.path.join(tmpdir, "index.json")
|
||||
with open(idx, "w") as f:
|
||||
json.dump({"facts": []}, f)
|
||||
results = check_staleness(idx, tmpdir)
|
||||
assert results == []
|
||||
print("PASS: test_empty_index")
|
||||
|
||||
|
||||
def test_compute_hash_nonexistent():
|
||||
h = compute_file_hash("/nonexistent/path/file.py")
|
||||
assert h is None
|
||||
print("PASS: test_compute_hash_nonexistent")
|
||||
|
||||
|
||||
def run_all():
|
||||
test_fresh_entry()
|
||||
test_stale_entry()
|
||||
test_missing_source()
|
||||
test_no_hash()
|
||||
test_no_source_field()
|
||||
test_fix_hashes()
|
||||
test_empty_index()
|
||||
test_compute_hash_nonexistent()
|
||||
print("\nAll 8 tests passed!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_all()
|
||||
Reference in New Issue
Block a user