Compare commits
1 Commits
burn/172-1
...
feat/sessi
| Author | SHA1 | Date | |
|---|---|---|---|
| 160dfcf419 |
@@ -1,275 +0,0 @@
|
||||
"""
|
||||
Knowledge Gap Identifier — Pipeline 10.7
|
||||
|
||||
Cross-references code, docs, and tests to find gaps:
|
||||
- Undocumented functions/classes
|
||||
- Untested code paths
|
||||
- Documented but missing implementations
|
||||
- Test files without corresponding source
|
||||
|
||||
Produces a gap report with severity and suggestions.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import ast
|
||||
import os
|
||||
import re
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Set
|
||||
|
||||
|
||||
class GapSeverity(Enum):
|
||||
INFO = "info"
|
||||
WARNING = "warning"
|
||||
ERROR = "error"
|
||||
|
||||
|
||||
class GapType(Enum):
|
||||
UNDOCUMENTED = "undocumented"
|
||||
UNTESTED = "untested"
|
||||
MISSING_IMPLEMENTATION = "missing_implementation"
|
||||
ORPHAN_TEST = "orphan_test"
|
||||
STALE_DOC = "stale_doc"
|
||||
|
||||
|
||||
@dataclass
|
||||
class Gap:
|
||||
"""A single knowledge gap."""
|
||||
gap_type: GapType
|
||||
severity: GapSeverity
|
||||
file: str
|
||||
line: Optional[int]
|
||||
name: str
|
||||
description: str
|
||||
suggestion: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class GapReport:
|
||||
"""Full gap analysis report."""
|
||||
repo_path: str
|
||||
gaps: List[Gap] = field(default_factory=list)
|
||||
stats: Dict[str, int] = field(default_factory=dict)
|
||||
|
||||
def summary(self) -> str:
|
||||
lines = [f"Gap Report for {self.repo_path}", "=" * 40]
|
||||
by_type = {}
|
||||
for g in self.gaps:
|
||||
by_type.setdefault(g.gap_type.value, []).append(g)
|
||||
|
||||
for gtype, items in sorted(by_type.items()):
|
||||
lines.append(f"\n{gtype.upper()} ({len(items)}):")
|
||||
for g in items:
|
||||
loc = f"{g.file}:{g.line}" if g.line else g.file
|
||||
lines.append(f" [{g.severity.value}] {g.name} @ {loc}")
|
||||
lines.append(f" {g.description}")
|
||||
|
||||
lines.append(f"\nTotal gaps: {len(self.gaps)}")
|
||||
self.stats = {k: len(v) for k, v in by_type.items()}
|
||||
return "\n".join(lines)
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"repo_path": self.repo_path,
|
||||
"total_gaps": len(self.gaps),
|
||||
"stats": {k: len(v) for k, v in
|
||||
{gt: [g for g in self.gaps if g.gap_type == gt]
|
||||
for gt in GapType}.items() if v},
|
||||
"gaps": [
|
||||
{
|
||||
"type": g.gap_type.value,
|
||||
"severity": g.severity.value,
|
||||
"file": g.file,
|
||||
"line": g.line,
|
||||
"name": g.name,
|
||||
"description": g.description,
|
||||
"suggestion": g.suggestion,
|
||||
}
|
||||
for g in self.gaps
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
def _collect_python_files(root: Path) -> List[Path]:
|
||||
"""Collect .py files, excluding venv/node_modules/.git."""
|
||||
skip = {".git", "venv", "env", ".venv", "node_modules", "__pycache__", ".tox", ".mypy_cache"}
|
||||
files = []
|
||||
for dirpath, dirnames, filenames in os.walk(root):
|
||||
dirnames[:] = [d for d in dirnames if d not in skip]
|
||||
for f in filenames:
|
||||
if f.endswith(".py"):
|
||||
files.append(Path(dirpath) / f)
|
||||
return files
|
||||
|
||||
|
||||
def _extract_python_symbols(filepath: Path) -> Set[str]:
|
||||
"""Extract top-level function and class names from a Python file."""
|
||||
symbols = set()
|
||||
try:
|
||||
source = filepath.read_text(encoding="utf-8", errors="replace")
|
||||
tree = ast.parse(source, filename=str(filepath))
|
||||
except (SyntaxError, UnicodeDecodeError):
|
||||
return symbols
|
||||
|
||||
for node in ast.iter_child_nodes(tree):
|
||||
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
|
||||
symbols.add(node.name)
|
||||
return symbols
|
||||
|
||||
|
||||
def _extract_doc_symbols(filepath: Path) -> Set[str]:
|
||||
"""Extract function/class names mentioned in markdown docs."""
|
||||
symbols = set()
|
||||
try:
|
||||
text = filepath.read_text(encoding="utf-8", errors="replace")
|
||||
except (UnicodeDecodeError, OSError):
|
||||
return symbols
|
||||
|
||||
# Match backtick-quoted identifiers: `ClassName`, `func_name`, `func()`
|
||||
for m in re.finditer(r"`([A-Za-z_]\w+)(?:\(\))?`", text):
|
||||
symbols.add(m.group(1))
|
||||
# Match ## ClassName or ### func_name headings
|
||||
for m in re.finditer(r"^#{1,4}\s+(\w+)", text, re.MULTILINE):
|
||||
symbols.add(m.group(1))
|
||||
return symbols
|
||||
|
||||
|
||||
def _collect_test_files(root: Path) -> Dict[str, Path]:
|
||||
"""Map test module names to their file paths."""
|
||||
test_map = {}
|
||||
for dirpath, dirnames, filenames in os.walk(root):
|
||||
dirnames[:] = [d for d in dirnames if d not in {".git", "venv", "node_modules"}]
|
||||
for f in filenames:
|
||||
if f.startswith("test_") and f.endswith(".py"):
|
||||
# test_foo.py -> foo
|
||||
module_name = f[5:-3]
|
||||
test_map[module_name] = Path(dirpath) / f
|
||||
return test_map
|
||||
|
||||
|
||||
class KnowledgeGapIdentifier:
|
||||
"""Analyzes a repo for knowledge gaps between code, docs, and tests."""
|
||||
|
||||
def analyze(self, repo_path: str) -> GapReport:
|
||||
root = Path(repo_path).resolve()
|
||||
report = GapReport(repo_path=str(root))
|
||||
|
||||
if not root.is_dir():
|
||||
report.gaps.append(Gap(
|
||||
gap_type=GapType.UNDOCUMENTED,
|
||||
severity=GapSeverity.ERROR,
|
||||
file=str(root),
|
||||
line=None,
|
||||
name="repo",
|
||||
description="Path is not a directory",
|
||||
suggestion="Provide a valid repo directory",
|
||||
))
|
||||
return report
|
||||
|
||||
# Collect artifacts
|
||||
py_files = _collect_python_files(root)
|
||||
doc_files = list(root.glob("docs/**/*.md")) + list(root.glob("*.md"))
|
||||
test_map = _collect_test_files(root / "tests") if (root / "tests").is_dir() else {}
|
||||
|
||||
# Extract symbols from each source file
|
||||
source_symbols: Dict[str, Set[str]] = {} # relative_path -> symbols
|
||||
all_source_symbols: Set[str] = set()
|
||||
|
||||
for pf in py_files:
|
||||
rel = str(pf.relative_to(root))
|
||||
# Skip test files and setup/config
|
||||
if "/tests/" in rel or rel.startswith("tests/") or rel.startswith("test_"):
|
||||
continue
|
||||
if pf.name in ("setup.py", "conftest.py", "conf.py"):
|
||||
continue
|
||||
|
||||
syms = _extract_python_symbols(pf)
|
||||
if syms:
|
||||
source_symbols[rel] = syms
|
||||
all_source_symbols.update(syms)
|
||||
|
||||
# Extract documented symbols
|
||||
doc_symbols: Set[str] = set()
|
||||
for df in doc_files:
|
||||
doc_symbols.update(_extract_doc_symbols(df))
|
||||
|
||||
# Extract test-covered symbols
|
||||
tested_modules: Set[str] = set(test_map.keys())
|
||||
|
||||
# --- Find gaps ---
|
||||
|
||||
# 1. Undocumented: source symbols not in any doc
|
||||
for rel_path, syms in source_symbols.items():
|
||||
for sym in sorted(syms):
|
||||
if sym.startswith("_") and not sym.startswith("__"):
|
||||
continue # Skip private
|
||||
if sym not in doc_symbols:
|
||||
report.gaps.append(Gap(
|
||||
gap_type=GapType.UNDOCUMENTED,
|
||||
severity=GapSeverity.WARNING,
|
||||
file=rel_path,
|
||||
line=None,
|
||||
name=sym,
|
||||
description=f"{sym} defined in {rel_path} but not referenced in any docs",
|
||||
suggestion=f"Add documentation for {sym} in a .md file",
|
||||
))
|
||||
|
||||
# 2. Untested: source modules without a corresponding test file
|
||||
for rel_path in source_symbols:
|
||||
module_name = Path(rel_path).stem
|
||||
if module_name not in tested_modules and module_name not in ("__init__", "main", "config"):
|
||||
report.gaps.append(Gap(
|
||||
gap_type=GapType.UNTESTED,
|
||||
severity=GapSeverity.ERROR,
|
||||
file=rel_path,
|
||||
line=None,
|
||||
name=module_name,
|
||||
description=f"No test file found for {rel_path}",
|
||||
suggestion=f"Create tests/test_{module_name}.py",
|
||||
))
|
||||
|
||||
# 3. Missing implementation: doc references symbol not in any source
|
||||
referenced_but_missing = doc_symbols - all_source_symbols
|
||||
for sym in sorted(referenced_but_missing):
|
||||
# Filter out common non-code terms
|
||||
if sym.lower() in {"todo", "fixme", "note", "example", "usage", "api",
|
||||
"install", "setup", "config", "license", "contributing",
|
||||
"changelog", "readme", "python", "bash", "json", "yaml",
|
||||
"http", "url", "cli", "gui", "ui", "api", "rest"}:
|
||||
continue
|
||||
if len(sym) < 3:
|
||||
continue
|
||||
report.gaps.append(Gap(
|
||||
gap_type=GapType.MISSING_IMPLEMENTATION,
|
||||
severity=GapSeverity.INFO,
|
||||
file="(docs)",
|
||||
line=None,
|
||||
name=sym,
|
||||
description=f"{sym} referenced in docs but not found in source code",
|
||||
suggestion=f"Verify if {sym} should be implemented or update docs",
|
||||
))
|
||||
|
||||
# 4. Orphan tests: test files without matching source
|
||||
for test_mod, test_path in test_map.items():
|
||||
if test_mod not in tested_modules and not any(
|
||||
test_mod in Path(f).stem for f in source_symbols
|
||||
):
|
||||
# Check if any source file partially matches
|
||||
matches_source = any(test_mod.replace("_", "-") in f or test_mod.replace("_", "") in Path(f).stem
|
||||
for f in source_symbols)
|
||||
if not matches_source:
|
||||
rel = str(test_path.relative_to(root))
|
||||
report.gaps.append(Gap(
|
||||
gap_type=GapType.ORPHAN_TEST,
|
||||
severity=GapSeverity.WARNING,
|
||||
file=rel,
|
||||
line=None,
|
||||
name=test_mod,
|
||||
description=f"Test file {rel} exists but no matching source module found",
|
||||
suggestion=f"Verify if the source was renamed or removed",
|
||||
))
|
||||
|
||||
return report
|
||||
@@ -1,131 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Knowledge Store Staleness Detector — Detect stale knowledge entries by comparing source file hashes.
|
||||
|
||||
Usage:
|
||||
python3 scripts/knowledge_staleness_check.py --index knowledge/index.json
|
||||
python3 scripts/knowledge_staleness_check.py --index knowledge/index.json --json
|
||||
python3 scripts/knowledge_staleness_check.py --index knowledge/index.json --fix
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Any, Optional
|
||||
|
||||
|
||||
def compute_file_hash(filepath: str) -> Optional[str]:
|
||||
"""Compute SHA-256 hash of a file. Returns None if file doesn't exist."""
|
||||
try:
|
||||
with open(filepath, "rb") as f:
|
||||
return "sha256:" + hashlib.sha256(f.read()).hexdigest()
|
||||
except (FileNotFoundError, IsADirectoryError, PermissionError):
|
||||
return None
|
||||
|
||||
|
||||
def check_staleness(index_path: str, repo_root: str = ".") -> List[Dict[str, Any]]:
|
||||
"""Check all entries in knowledge index for staleness.
|
||||
|
||||
Returns list of entries with staleness info:
|
||||
- status: "fresh" | "stale" | "missing_source" | "no_hash"
|
||||
- current_hash: computed hash (if source exists)
|
||||
- stored_hash: hash from index
|
||||
"""
|
||||
with open(index_path) as f:
|
||||
data = json.load(f)
|
||||
|
||||
facts = data.get("facts", [])
|
||||
results = []
|
||||
|
||||
for entry in facts:
|
||||
source_file = entry.get("source_file")
|
||||
stored_hash = entry.get("source_hash")
|
||||
|
||||
if not source_file:
|
||||
results.append({**entry, "status": "no_source", "current_hash": None})
|
||||
continue
|
||||
|
||||
full_path = os.path.join(repo_root, source_file)
|
||||
current_hash = compute_file_hash(full_path)
|
||||
|
||||
if current_hash is None:
|
||||
results.append({**entry, "status": "missing_source", "current_hash": None})
|
||||
elif not stored_hash:
|
||||
results.append({**entry, "status": "no_hash", "current_hash": current_hash})
|
||||
elif current_hash != stored_hash:
|
||||
results.append({**entry, "status": "stale", "current_hash": current_hash})
|
||||
else:
|
||||
results.append({**entry, "status": "fresh", "current_hash": current_hash})
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def fix_hashes(index_path: str, repo_root: str = ".") -> int:
|
||||
"""Add hashes to entries missing them. Returns count of fixed entries."""
|
||||
with open(index_path) as f:
|
||||
data = json.load(f)
|
||||
|
||||
fixed = 0
|
||||
for entry in data.get("facts", []):
|
||||
if entry.get("source_hash"):
|
||||
continue
|
||||
source_file = entry.get("source_file")
|
||||
if not source_file:
|
||||
continue
|
||||
full_path = os.path.join(repo_root, source_file)
|
||||
h = compute_file_hash(full_path)
|
||||
if h:
|
||||
entry["source_hash"] = h
|
||||
fixed += 1
|
||||
|
||||
with open(index_path, "w") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
|
||||
return fixed
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Check knowledge store staleness")
|
||||
parser.add_argument("--index", required=True, help="Path to knowledge/index.json")
|
||||
parser.add_argument("--repo", default=".", help="Repo root for source file resolution")
|
||||
parser.add_argument("--json", action="store_true", help="Output as JSON")
|
||||
parser.add_argument("--fix", action="store_true", help="Add hashes to entries missing them")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.fix:
|
||||
fixed = fix_hashes(args.index, args.repo)
|
||||
print(f"Fixed {fixed} entries with missing hashes.")
|
||||
return
|
||||
|
||||
results = check_staleness(args.index, args.repo)
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(results, indent=2))
|
||||
else:
|
||||
stale = [r for r in results if r["status"] != "fresh"]
|
||||
fresh = [r for r in results if r["status"] == "fresh"]
|
||||
|
||||
print(f"Knowledge Store Staleness Check")
|
||||
print(f" Total entries: {len(results)}")
|
||||
print(f" Fresh: {len(fresh)}")
|
||||
print(f" Stale/Issues: {len(stale)}")
|
||||
print()
|
||||
|
||||
if stale:
|
||||
print("Issues found:")
|
||||
for r in stale:
|
||||
status = r["status"]
|
||||
fact = r.get("fact", "?")[:60]
|
||||
source = r.get("source_file", "?")
|
||||
print(f" [{status}] {source}: {fact}")
|
||||
else:
|
||||
print("All entries are fresh!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
276
scripts/session_metadata.py
Normal file
276
scripts/session_metadata.py
Normal file
@@ -0,0 +1,276 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
session_metadata.py - Extract structured metadata from Hermes session transcripts.
|
||||
Works alongside session_reader.py to provide higher-level session analysis.
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
from dataclasses import dataclass, asdict
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Any
|
||||
|
||||
# Import from session_reader (the canonical reader)
|
||||
from session_reader import read_session
|
||||
|
||||
|
||||
@dataclass
|
||||
class SessionSummary:
|
||||
"""Structured summary of a Hermes session transcript."""
|
||||
session_id: str
|
||||
model: str
|
||||
repo: str
|
||||
outcome: str
|
||||
message_count: int
|
||||
tool_calls: int
|
||||
duration_estimate: str
|
||||
key_actions: List[str]
|
||||
errors_encountered: List[str]
|
||||
start_time: Optional[str] = None
|
||||
end_time: Optional[str] = None
|
||||
total_tokens_estimate: int = 0
|
||||
user_messages: int = 0
|
||||
assistant_messages: int = 0
|
||||
tool_outputs: int = 0
|
||||
|
||||
|
||||
def extract_session_metadata(file_path: str) -> SessionSummary:
|
||||
"""
|
||||
Extract structured metadata from a Hermes session JSONL transcript.
|
||||
Uses session_reader.read_session() for file reading.
|
||||
"""
|
||||
session_id = Path(file_path).stem
|
||||
messages = []
|
||||
model = "unknown"
|
||||
repo = "unknown"
|
||||
tool_calls_count = 0
|
||||
key_actions = []
|
||||
errors = []
|
||||
start_time = None
|
||||
end_time = None
|
||||
total_tokens = 0
|
||||
|
||||
# Common repo patterns to look for
|
||||
repo_patterns = [
|
||||
r"(?:the-nexus|compounding-intelligence|timmy-config|hermes-agent)",
|
||||
r"(?:forge\.alexanderwhitestone\.com/([^/]+/[^/\\s]+))",
|
||||
r"(?:github\.com/([^/]+/[^/\\s]+))",
|
||||
r"(?:Timmy_Foundation/([^/\\s]+))",
|
||||
]
|
||||
|
||||
try:
|
||||
# Use the canonical reader from session_reader.py
|
||||
messages = read_session(file_path)
|
||||
except FileNotFoundError:
|
||||
return SessionSummary(
|
||||
session_id=session_id,
|
||||
model="unknown",
|
||||
repo="unknown",
|
||||
outcome="failure",
|
||||
message_count=0,
|
||||
tool_calls=0,
|
||||
duration_estimate="0m",
|
||||
key_actions=[],
|
||||
errors_encountered=[f"File not found: {file_path}"]
|
||||
)
|
||||
|
||||
# Process messages for metadata
|
||||
for entry in messages:
|
||||
# Extract model from assistant messages
|
||||
if entry.get("role") == "assistant" and entry.get("model"):
|
||||
model = entry["model"]
|
||||
|
||||
# Extract timestamps
|
||||
if entry.get("timestamp"):
|
||||
ts = entry["timestamp"]
|
||||
if start_time is None:
|
||||
start_time = ts
|
||||
end_time = ts
|
||||
|
||||
# Count tool calls
|
||||
if entry.get("tool_calls"):
|
||||
tool_calls_count += len(entry["tool_calls"])
|
||||
for tc in entry["tool_calls"]:
|
||||
if tc.get("function", {}).get("name"):
|
||||
action = f"{tc['function']['name']}"
|
||||
if action not in key_actions:
|
||||
key_actions.append(action)
|
||||
|
||||
# Estimate tokens from content length
|
||||
content = entry.get("content", "")
|
||||
if isinstance(content, str):
|
||||
total_tokens += len(content.split())
|
||||
elif isinstance(content, list):
|
||||
for item in content:
|
||||
if isinstance(item, dict) and "text" in item:
|
||||
total_tokens += len(item["text"].split())
|
||||
|
||||
# Look for repo mentions in content
|
||||
if entry.get("content"):
|
||||
content_str = str(entry["content"])
|
||||
for pattern in repo_patterns:
|
||||
match = re.search(pattern, content_str, re.IGNORECASE)
|
||||
if match:
|
||||
if match.groups():
|
||||
repo = match.group(1)
|
||||
else:
|
||||
repo = match.group(0)
|
||||
break
|
||||
|
||||
# Look for error messages
|
||||
if entry.get("role") == "tool" and entry.get("is_error"):
|
||||
error_msg = entry.get("content", "Unknown error")
|
||||
if isinstance(error_msg, str) and len(error_msg) < 200:
|
||||
errors.append(error_msg[:200])
|
||||
|
||||
# Count message types
|
||||
user_messages = sum(1 for m in messages if m.get("role") == "user")
|
||||
assistant_messages = sum(1 for m in messages if m.get("role") == "assistant")
|
||||
tool_outputs = sum(1 for m in messages if m.get("role") == "tool")
|
||||
|
||||
# Calculate duration estimate
|
||||
duration_estimate = "unknown"
|
||||
if start_time and end_time:
|
||||
try:
|
||||
# Try to parse timestamps
|
||||
start_dt = None
|
||||
end_dt = None
|
||||
|
||||
# Handle various timestamp formats
|
||||
for fmt in ["%Y-%m-%dT%H:%M:%S.%fZ", "%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%d %H:%M:%S"]:
|
||||
try:
|
||||
if start_dt is None:
|
||||
start_dt = datetime.strptime(start_time, fmt)
|
||||
if end_dt is None:
|
||||
end_dt = datetime.strptime(end_time, fmt)
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
if start_dt and end_dt:
|
||||
duration = end_dt - start_dt
|
||||
minutes = duration.total_seconds() / 60
|
||||
duration_estimate = f"{minutes:.0f}m"
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Classify outcome
|
||||
outcome = "unknown"
|
||||
if errors:
|
||||
# Check if any errors are fatal
|
||||
fatal_errors = any("405" in e or "permission" in e.lower() or "authentication" in e.lower()
|
||||
for e in errors)
|
||||
if fatal_errors:
|
||||
outcome = "failure"
|
||||
else:
|
||||
outcome = "partial"
|
||||
elif messages:
|
||||
# Check last message for success indicators
|
||||
last_msg = messages[-1]
|
||||
if last_msg.get("role") == "assistant":
|
||||
content = last_msg.get("content", "")
|
||||
if isinstance(content, str):
|
||||
success_indicators = ["done", "completed", "success", "merged", "pushed"]
|
||||
if any(indicator in content.lower() for indicator in success_indicators):
|
||||
outcome = "success"
|
||||
else:
|
||||
outcome = "unknown"
|
||||
|
||||
# Deduplicate key actions (keep unique, limit to 10)
|
||||
unique_actions = []
|
||||
for action in key_actions:
|
||||
if action not in unique_actions:
|
||||
unique_actions.append(action)
|
||||
if len(unique_actions) >= 10:
|
||||
break
|
||||
|
||||
# Deduplicate errors (keep unique, limit to 5)
|
||||
unique_errors = []
|
||||
for error in errors:
|
||||
if error not in unique_errors:
|
||||
unique_errors.append(error)
|
||||
if len(unique_errors) >= 5:
|
||||
break
|
||||
|
||||
return SessionSummary(
|
||||
session_id=session_id,
|
||||
model=model,
|
||||
repo=repo,
|
||||
outcome=outcome,
|
||||
message_count=len(messages),
|
||||
tool_calls=tool_calls_count,
|
||||
duration_estimate=duration_estimate,
|
||||
key_actions=unique_actions,
|
||||
errors_encountered=unique_errors,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
total_tokens_estimate=total_tokens,
|
||||
user_messages=user_messages,
|
||||
assistant_messages=assistant_messages,
|
||||
tool_outputs=tool_outputs
|
||||
)
|
||||
|
||||
|
||||
def process_session_directory(directory_path: str, output_file: Optional[str] = None) -> List[SessionSummary]:
|
||||
"""
|
||||
Process all JSONL files in a directory.
|
||||
"""
|
||||
directory = Path(directory_path)
|
||||
if not directory.exists():
|
||||
print(f"Error: Directory {directory_path} does not exist", file=sys.stderr)
|
||||
return []
|
||||
|
||||
jsonl_files = list(directory.glob("*.jsonl"))
|
||||
if not jsonl_files:
|
||||
print(f"Warning: No JSONL files found in {directory_path}", file=sys.stderr)
|
||||
return []
|
||||
|
||||
summaries = []
|
||||
for jsonl_file in sorted(jsonl_files):
|
||||
print(f"Processing {jsonl_file.name}...", file=sys.stderr)
|
||||
summary = extract_session_metadata(str(jsonl_file))
|
||||
summaries.append(summary)
|
||||
|
||||
if output_file:
|
||||
with open(output_file, 'w', encoding='utf-8') as f:
|
||||
json.dump([asdict(s) for s in summaries], f, indent=2)
|
||||
print(f"Wrote {len(summaries)} summaries to {output_file}", file=sys.stderr)
|
||||
|
||||
return summaries
|
||||
|
||||
|
||||
def main():
|
||||
"""CLI entry point."""
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description="Extract metadata from Hermes session JSONL transcripts")
|
||||
parser.add_argument("path", help="Path to JSONL file or directory of session files")
|
||||
parser.add_argument("-o", "--output", help="Output JSON file (default: stdout)")
|
||||
parser.add_argument("-v", "--verbose", action="store_true", help="Verbose output")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
path = Path(args.path)
|
||||
|
||||
if path.is_file():
|
||||
summary = extract_session_metadata(str(path))
|
||||
if args.output:
|
||||
with open(args.output, 'w') as f:
|
||||
json.dump(asdict(summary), f, indent=2)
|
||||
print(f"Wrote summary to {args.output}", file=sys.stderr)
|
||||
else:
|
||||
print(json.dumps(asdict(summary), indent=2))
|
||||
|
||||
elif path.is_dir():
|
||||
summaries = process_session_directory(str(path), args.output)
|
||||
if not args.output:
|
||||
print(json.dumps([asdict(s) for s in summaries], indent=2))
|
||||
|
||||
else:
|
||||
print(f"Error: {args.path} is not a file or directory", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,129 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for scripts/knowledge_staleness_check.py — 8 tests."""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
sys.path.insert(0, os.path.dirname(__file__) or ".")
|
||||
import importlib.util
|
||||
spec = importlib.util.spec_from_file_location("ks", os.path.join(os.path.dirname(__file__) or ".", "knowledge_staleness_check.py"))
|
||||
mod = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(mod)
|
||||
check_staleness = mod.check_staleness
|
||||
fix_hashes = mod.fix_hashes
|
||||
compute_file_hash = mod.compute_file_hash
|
||||
|
||||
|
||||
def test_fresh_entry():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
src = os.path.join(tmpdir, "source.py")
|
||||
with open(src, "w") as f:
|
||||
f.write("print('hello')")
|
||||
h = compute_file_hash(src)
|
||||
idx = os.path.join(tmpdir, "index.json")
|
||||
with open(idx, "w") as f:
|
||||
json.dump({"facts": [{"fact": "hello", "source_file": "source.py", "source_hash": h}]}, f)
|
||||
results = check_staleness(idx, tmpdir)
|
||||
assert results[0]["status"] == "fresh"
|
||||
print("PASS: test_fresh_entry")
|
||||
|
||||
|
||||
def test_stale_entry():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
src = os.path.join(tmpdir, "source.py")
|
||||
with open(src, "w") as f:
|
||||
f.write("original content")
|
||||
idx = os.path.join(tmpdir, "index.json")
|
||||
with open(idx, "w") as f:
|
||||
json.dump({"facts": [{"fact": "old", "source_file": "source.py", "source_hash": "sha256:wrong"}]}, f)
|
||||
# Now change the source
|
||||
with open(src, "w") as f:
|
||||
f.write("modified content")
|
||||
results = check_staleness(idx, tmpdir)
|
||||
assert results[0]["status"] == "stale"
|
||||
print("PASS: test_stale_entry")
|
||||
|
||||
|
||||
def test_missing_source():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
idx = os.path.join(tmpdir, "index.json")
|
||||
with open(idx, "w") as f:
|
||||
json.dump({"facts": [{"fact": "gone", "source_file": "nonexistent.py", "source_hash": "sha256:abc"}]}, f)
|
||||
results = check_staleness(idx, tmpdir)
|
||||
assert results[0]["status"] == "missing_source"
|
||||
print("PASS: test_missing_source")
|
||||
|
||||
|
||||
def test_no_hash():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
src = os.path.join(tmpdir, "source.py")
|
||||
with open(src, "w") as f:
|
||||
f.write("content")
|
||||
idx = os.path.join(tmpdir, "index.json")
|
||||
with open(idx, "w") as f:
|
||||
json.dump({"facts": [{"fact": "no hash", "source_file": "source.py"}]}, f)
|
||||
results = check_staleness(idx, tmpdir)
|
||||
assert results[0]["status"] == "no_hash"
|
||||
assert results[0]["current_hash"].startswith("sha256:")
|
||||
print("PASS: test_no_hash")
|
||||
|
||||
|
||||
def test_no_source_field():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
idx = os.path.join(tmpdir, "index.json")
|
||||
with open(idx, "w") as f:
|
||||
json.dump({"facts": [{"fact": "orphan"}]}, f)
|
||||
results = check_staleness(idx, tmpdir)
|
||||
assert results[0]["status"] == "no_source"
|
||||
print("PASS: test_no_source_field")
|
||||
|
||||
|
||||
def test_fix_hashes():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
src = os.path.join(tmpdir, "source.py")
|
||||
with open(src, "w") as f:
|
||||
f.write("content for hashing")
|
||||
idx = os.path.join(tmpdir, "index.json")
|
||||
with open(idx, "w") as f:
|
||||
json.dump({"facts": [{"fact": "needs hash", "source_file": "source.py"}]}, f)
|
||||
fixed = fix_hashes(idx, tmpdir)
|
||||
assert fixed == 1
|
||||
# Verify hash was added
|
||||
with open(idx) as f:
|
||||
data = json.load(f)
|
||||
assert data["facts"][0]["source_hash"].startswith("sha256:")
|
||||
print("PASS: test_fix_hashes")
|
||||
|
||||
|
||||
def test_empty_index():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
idx = os.path.join(tmpdir, "index.json")
|
||||
with open(idx, "w") as f:
|
||||
json.dump({"facts": []}, f)
|
||||
results = check_staleness(idx, tmpdir)
|
||||
assert results == []
|
||||
print("PASS: test_empty_index")
|
||||
|
||||
|
||||
def test_compute_hash_nonexistent():
|
||||
h = compute_file_hash("/nonexistent/path/file.py")
|
||||
assert h is None
|
||||
print("PASS: test_compute_hash_nonexistent")
|
||||
|
||||
|
||||
def run_all():
|
||||
test_fresh_entry()
|
||||
test_stale_entry()
|
||||
test_missing_source()
|
||||
test_no_hash()
|
||||
test_no_source_field()
|
||||
test_fix_hashes()
|
||||
test_empty_index()
|
||||
test_compute_hash_nonexistent()
|
||||
print("\nAll 8 tests passed!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_all()
|
||||
@@ -1,141 +0,0 @@
|
||||
"""Tests for knowledge_gap_identifier module."""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import tempfile
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'scripts'))
|
||||
|
||||
from knowledge_gap_identifier import KnowledgeGapIdentifier, GapType, GapSeverity
|
||||
|
||||
|
||||
def _make_repo(tmpdir, structure):
|
||||
"""Create a test repo from a dict of {path: content}."""
|
||||
for rel_path, content in structure.items():
|
||||
p = Path(tmpdir) / rel_path
|
||||
p.parent.mkdir(parents=True, exist_ok=True)
|
||||
p.write_text(content)
|
||||
|
||||
|
||||
def test_undocumented_symbol():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
_make_repo(tmpdir, {
|
||||
"src/calculator.py": "def add(a, b):\n return a + b\n",
|
||||
"README.md": "# Calculator\n",
|
||||
})
|
||||
report = KnowledgeGapIdentifier().analyze(tmpdir)
|
||||
undocumented = [g for g in report.gaps if g.gap_type == GapType.UNDOCUMENTED]
|
||||
assert any(g.name == "add" for g in undocumented), "add should be undocumented"
|
||||
|
||||
|
||||
def test_documented_symbol_no_gap():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
_make_repo(tmpdir, {
|
||||
"src/calculator.py": "def add(a, b):\n return a + b\n",
|
||||
"README.md": "# Calculator\nUse `add()` to add numbers.\n",
|
||||
})
|
||||
report = KnowledgeGapIdentifier().analyze(tmpdir)
|
||||
undocumented = [g for g in report.gaps
|
||||
if g.gap_type == GapType.UNDOCUMENTED and g.name == "add"]
|
||||
assert len(undocumented) == 0, "add is documented, should not be flagged"
|
||||
|
||||
|
||||
def test_untested_module():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
_make_repo(tmpdir, {
|
||||
"src/calculator.py": "def add(a, b):\n return a + b\n",
|
||||
"src/helper.py": "def format(x):\n return str(x)\n",
|
||||
"tests/test_calculator.py": "from src.calculator import add\nassert add(1,2) == 3\n",
|
||||
})
|
||||
report = KnowledgeGapIdentifier().analyze(tmpdir)
|
||||
untested = [g for g in report.gaps if g.gap_type == GapType.UNTESTED]
|
||||
assert any("helper" in g.name for g in untested), "helper should be untested"
|
||||
|
||||
|
||||
def test_tested_module_no_gap():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
_make_repo(tmpdir, {
|
||||
"src/calculator.py": "def add(a, b):\n return a + b\n",
|
||||
"tests/test_calculator.py": "def test_add():\n assert True\n",
|
||||
})
|
||||
report = KnowledgeGapIdentifier().analyze(tmpdir)
|
||||
untested = [g for g in report.gaps
|
||||
if g.gap_type == GapType.UNTESTED and "calculator" in g.name]
|
||||
assert len(untested) == 0, "calculator has tests, should not be flagged"
|
||||
|
||||
|
||||
def test_missing_implementation():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
_make_repo(tmpdir, {
|
||||
"src/app.py": "def run():\n pass\n",
|
||||
"docs/api.md": "# API\nUse `NonExistentClass` to do things.\n",
|
||||
})
|
||||
report = KnowledgeGapIdentifier().analyze(tmpdir)
|
||||
missing = [g for g in report.gaps if g.gap_type == GapType.MISSING_IMPLEMENTATION]
|
||||
assert any(g.name == "NonExistentClass" for g in missing)
|
||||
|
||||
|
||||
def test_private_symbols_skipped():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
_make_repo(tmpdir, {
|
||||
"src/app.py": "def _internal():\n pass\ndef public():\n pass\n",
|
||||
"README.md": "# App\n",
|
||||
})
|
||||
report = KnowledgeGapIdentifier().analyze(tmpdir)
|
||||
undocumented_names = [g.name for g in report.gaps if g.gap_type == GapType.UNDOCUMENTED]
|
||||
assert "_internal" not in undocumented_names, "Private symbols should be skipped"
|
||||
assert "public" in undocumented_names
|
||||
|
||||
|
||||
def test_empty_repo():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
report = KnowledgeGapIdentifier().analyze(tmpdir)
|
||||
assert len(report.gaps) == 0
|
||||
|
||||
|
||||
def test_invalid_path():
|
||||
report = KnowledgeGapIdentifier().analyze("/nonexistent/path/xyz")
|
||||
assert len(report.gaps) == 1
|
||||
assert report.gaps[0].severity == GapSeverity.ERROR
|
||||
|
||||
|
||||
def test_report_summary():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
_make_repo(tmpdir, {
|
||||
"src/app.py": "class MyService:\n def handle(self):\n pass\n",
|
||||
"README.md": "# App\n",
|
||||
})
|
||||
report = KnowledgeGapIdentifier().analyze(tmpdir)
|
||||
summary = report.summary()
|
||||
assert "UNDOCUMENTED" in summary
|
||||
assert "MyService" in summary
|
||||
|
||||
|
||||
def test_report_to_dict():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
_make_repo(tmpdir, {
|
||||
"src/app.py": "def hello():\n pass\n",
|
||||
"README.md": "# App\n",
|
||||
})
|
||||
report = KnowledgeGapIdentifier().analyze(tmpdir)
|
||||
d = report.to_dict()
|
||||
assert "total_gaps" in d
|
||||
assert "gaps" in d
|
||||
assert isinstance(d["gaps"], list)
|
||||
assert d["total_gaps"] > 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_undocumented_symbol()
|
||||
test_documented_symbol_no_gap()
|
||||
test_untested_module()
|
||||
test_tested_module_no_gap()
|
||||
test_missing_implementation()
|
||||
test_private_symbols_skipped()
|
||||
test_empty_repo()
|
||||
test_invalid_path()
|
||||
test_report_summary()
|
||||
test_report_to_dict()
|
||||
print("All 10 tests passed.")
|
||||
Reference in New Issue
Block a user