Compare commits
1 Commits
step35/148
...
step35/173
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2f57c2b653 |
477
scripts/progress_tracker.py
Normal file
477
scripts/progress_tracker.py
Normal file
@@ -0,0 +1,477 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Progress Tracker — Pipeline 10.8
|
||||
Track improvement metrics over time. Are we getting better?
|
||||
|
||||
Metrics tracked:
|
||||
1. Test coverage — % of Python functions with associated tests (test:source file ratio + line coverage if available)
|
||||
2. Doc coverage — % of Python callables with docstrings (AST-based)
|
||||
3. Issue close rate — closed / (opened + closed) per week (Gitea API)
|
||||
4. Dep freshness — % of requirements pinned vs outdated (pip list --outdated)
|
||||
|
||||
Output:
|
||||
- metrics/snapshots/YYYY-MM-DD.json — one snapshot per run
|
||||
- metrics/TRENDS.md — cumulative markdown table
|
||||
- stdout summary
|
||||
|
||||
Usage:
|
||||
python3 scripts/progress_tracker.py
|
||||
python3 scripts/progress_tracker.py --json
|
||||
python3 scripts/progress_tracker.py --output metrics/TRENDS.md
|
||||
|
||||
Weekly cron:
|
||||
0 9 * * 1 cd /path/to/compounding-intelligence && python3 scripts/progress_tracker.py
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from datetime import datetime, timezone, timedelta
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
# ── Configuration ──────────────────────────────────────────────────────────
|
||||
|
||||
SCRIPT_DIR = Path(__file__).resolve().parent
|
||||
REPO_ROOT = SCRIPT_DIR.parent
|
||||
METRICS_DIR = REPO_ROOT / "metrics"
|
||||
SNAPSHOTS_DIR = METRICS_DIR / "snapshots"
|
||||
TOKEN_PATH = Path.home() / ".config" / "gitea" / "token"
|
||||
GITEA_API_BASE = "https://forge.alexanderwhitestone.com/api/v1"
|
||||
ORG = "Timmy_Foundation"
|
||||
|
||||
# Ensure paths exist
|
||||
SNAPSHOTS_DIR.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
# ── Helpers ─────────────────────────────────────────────────────────────────
|
||||
|
||||
def run_cmd(cmd: List[str], cwd: Path = REPO_ROOT) -> str:
|
||||
"""Run a shell command and return stdout (stderr merged)."""
|
||||
result = subprocess.run(
|
||||
cmd, capture_output=True, text=True, cwd=cwd, timeout=30
|
||||
)
|
||||
if result.returncode != 0:
|
||||
return ""
|
||||
return result.stdout.strip()
|
||||
|
||||
|
||||
def slugify_date(dt: datetime) -> str:
|
||||
return dt.strftime("%Y-%m-%d")
|
||||
|
||||
|
||||
def snapshot_path(dt: datetime) -> Path:
|
||||
return SNAPSHOTS_DIR / f"{slugify_date(dt)}.json"
|
||||
|
||||
|
||||
def load_snapshots() -> List[Dict[str, Any]]:
|
||||
"""Load all existing snapshots sorted by date."""
|
||||
snapshots = []
|
||||
for f in sorted(SNAPSHOTS_DIR.glob("*.json")):
|
||||
try:
|
||||
with open(f) as fp:
|
||||
snapshots.append(json.load(fp))
|
||||
except Exception:
|
||||
continue
|
||||
return snapshots
|
||||
|
||||
|
||||
# ── Metric 1: Test Coverage ─────────────────────────────────────────────────
|
||||
|
||||
def collect_test_coverage() -> Dict[str, Any]:
|
||||
"""
|
||||
Compute test coverage metrics.
|
||||
Counts test_*.py and *_test.py files vs non-test .py source files.
|
||||
Also attempts to read .coverage if present.
|
||||
"""
|
||||
all_py = list(REPO_ROOT.rglob("*.py"))
|
||||
|
||||
source_files = []
|
||||
test_files = []
|
||||
|
||||
for p in all_py:
|
||||
try:
|
||||
rel_parts = p.relative_to(REPO_ROOT).parts
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
# Skip hidden/cache/temp dirs (check only relative parts)
|
||||
if any(part.startswith('.') or part.startswith('__') for part in rel_parts):
|
||||
continue
|
||||
if any(part in ('node_modules', 'venv', '.venv', 'env', '.pytest_cache') for part in rel_parts):
|
||||
continue
|
||||
|
||||
if p.name.startswith("test_") or p.name.endswith("_test.py"):
|
||||
test_files.append(p)
|
||||
else:
|
||||
source_files.append(p)
|
||||
|
||||
# Try to get line coverage from .coverage
|
||||
coverage_percent = None
|
||||
coverage_tool = None
|
||||
coverage_file = REPO_ROOT / ".coverage"
|
||||
if coverage_file.exists():
|
||||
try:
|
||||
import coverage # type: ignore
|
||||
# Use coverage API if available
|
||||
cov = coverage.Coverage(data_file=str(coverage_file))
|
||||
cov.load()
|
||||
total = cov.report()
|
||||
coverage_percent = total if isinstance(total, float) else None
|
||||
coverage_tool = "coverage"
|
||||
except Exception:
|
||||
# Fallback: parse `coverage report` output
|
||||
out = run_cmd(["coverage", "report", "--skip-empty"])
|
||||
if out:
|
||||
for line in out.splitlines():
|
||||
if "TOTAL" in line:
|
||||
parts = line.split()
|
||||
if len(parts) >= 2:
|
||||
try:
|
||||
coverage_percent = float(parts[-1].rstrip('%'))
|
||||
coverage_tool = "coverage"
|
||||
break
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
return {
|
||||
"test_files": len(test_files),
|
||||
"source_files": len(source_files),
|
||||
"test_to_source_ratio": round(len(test_files) / len(source_files), 4) if source_files else 0.0,
|
||||
"coverage_tool": coverage_tool,
|
||||
"coverage_percent": coverage_percent,
|
||||
}
|
||||
|
||||
|
||||
# ── Metric 2: Doc Coverage ──────────────────────────────────────────────────
|
||||
|
||||
def collect_doc_coverage() -> Dict[str, Any]:
|
||||
"""
|
||||
Check AST of Python files for docstrings.
|
||||
Returns: callables_total, callables_with_doc, doc_coverage_percent
|
||||
"""
|
||||
import ast
|
||||
|
||||
all_py = list(REPO_ROOT.rglob("*.py"))
|
||||
|
||||
source_files = []
|
||||
test_files = []
|
||||
|
||||
for p in all_py:
|
||||
try:
|
||||
rel_parts = p.relative_to(REPO_ROOT).parts
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
if any(part.startswith('.') or part.startswith('__') for part in rel_parts):
|
||||
continue
|
||||
if any(part in ('node_modules', 'venv', '.venv', 'env', '.pytest_cache') for part in rel_parts):
|
||||
continue
|
||||
|
||||
if p.name.startswith("test_") or p.name.endswith("_test.py"):
|
||||
test_files.append(p)
|
||||
else:
|
||||
source_files.append(p)
|
||||
|
||||
total_callables = 0
|
||||
with_doc = 0
|
||||
|
||||
for p in source_files + test_files:
|
||||
try:
|
||||
with open(p) as f:
|
||||
tree = ast.parse(f.read(), filename=str(p))
|
||||
for node in ast.walk(tree):
|
||||
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
|
||||
total_callables += 1
|
||||
doc = ast.get_docstring(node)
|
||||
if doc and doc.strip():
|
||||
with_doc += 1
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
return {
|
||||
"callables_total": total_callables,
|
||||
"callables_with_doc": with_doc,
|
||||
"doc_coverage_percent": round((with_doc / total_callables * 100) if total_callables else 0.0, 2),
|
||||
}
|
||||
|
||||
|
||||
# ── Metric 3: Issue Close Rate ──────────────────────────────────────────────
|
||||
|
||||
def collect_issue_metrics() -> Dict[str, Any]:
|
||||
"""
|
||||
Use Gitea API to get issue open/close stats for the last 7 days.
|
||||
Returns counts and close rate.
|
||||
"""
|
||||
token = ""
|
||||
if TOKEN_PATH.exists():
|
||||
token = TOKEN_PATH.read_text().strip()
|
||||
|
||||
if not token:
|
||||
return {
|
||||
"opened_last_7d": None,
|
||||
"closed_last_7d": None,
|
||||
"close_rate": None,
|
||||
"total_open": None,
|
||||
"note": "Gitea token not available"
|
||||
}
|
||||
|
||||
try:
|
||||
from urllib.request import Request, urlopen
|
||||
from urllib.error import HTTPError, URLError
|
||||
except ImportError:
|
||||
return {"error": "urllib not available"}
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
week_ago = now - timedelta(days=7)
|
||||
since = week_ago.strftime("%Y-%m-%d")
|
||||
|
||||
headers = {"Authorization": f"token {token}"}
|
||||
base_url = f"{GITEA_API_BASE}/repos/{ORG}/compounding-intelligence/issues"
|
||||
|
||||
try:
|
||||
# Get issues from last 7 days
|
||||
url = f"{base_url}?state=all&since={since}&per_page=100"
|
||||
req = Request(url, headers=headers)
|
||||
with urlopen(req, timeout=15) as resp:
|
||||
issues = json.loads(resp.read())
|
||||
|
||||
opened = 0
|
||||
closed = 0
|
||||
for issue in issues:
|
||||
created = datetime.fromisoformat(issue["created_at"].replace("Z", "+00:00"))
|
||||
if created >= week_ago:
|
||||
opened += 1
|
||||
if issue.get("state") == "closed":
|
||||
closed_at_str = issue.get("closed_at")
|
||||
if closed_at_str:
|
||||
closed_at = datetime.fromisoformat(closed_at_str.replace("Z", "+00:00"))
|
||||
if closed_at >= week_ago:
|
||||
closed += 1
|
||||
|
||||
# Total open issues
|
||||
req2 = Request(f"{base_url}?state=open&per_page=1", headers=headers)
|
||||
with urlopen(req2, timeout=15) as resp:
|
||||
total_open = int(resp.headers.get("X-Total-Count", "0"))
|
||||
|
||||
total = opened + closed
|
||||
close_rate = closed / total if total > 0 else 0.0
|
||||
|
||||
return {
|
||||
"opened_last_7d": opened,
|
||||
"closed_last_7d": closed,
|
||||
"close_rate": round(close_rate, 4),
|
||||
"total_open": total_open,
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
"opened_last_7d": None,
|
||||
"closed_last_7d": None,
|
||||
"close_rate": None,
|
||||
"total_open": None,
|
||||
"error": str(e)[:100],
|
||||
"note": "Gitea API unavailable"
|
||||
}
|
||||
|
||||
|
||||
# ── Metric 4: Dependency Freshness ─────────────────────────────────────────
|
||||
|
||||
def collect_dep_freshness() -> Dict[str, Any]:
|
||||
"""
|
||||
Check requirements.txt for outdated dependencies using pip list --outdated.
|
||||
Returns freshness percentage and outdated list.
|
||||
"""
|
||||
req_file = REPO_ROOT / "requirements.txt"
|
||||
if not req_file.exists():
|
||||
return {
|
||||
"total_deps": 0,
|
||||
"outdated_deps": 0,
|
||||
"freshness_percent": 100.0,
|
||||
"outdated_list": [],
|
||||
"note": "requirements.txt not found"
|
||||
}
|
||||
|
||||
# Parse requirements (very simple: take name before comparison op)
|
||||
reqs = []
|
||||
with open(req_file) as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if not line or line.startswith("#"):
|
||||
continue
|
||||
m = re.match(r"^([a-zA-Z0-9_.-]+)", line)
|
||||
if m:
|
||||
reqs.append(m.group(1))
|
||||
|
||||
if not reqs:
|
||||
return {"total_deps": 0, "outdated_deps": 0, "freshness_percent": 100.0, "outdated_list": []}
|
||||
|
||||
# Query pip for outdated packages (may fail if pip not available)
|
||||
outdated_names = set()
|
||||
try:
|
||||
out = run_cmd(["pip", "list", "--outdated", "--format=json"])
|
||||
if out:
|
||||
data = json.loads(out)
|
||||
outdated_names = {item["name"].lower() for item in data}
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
outdated = [p for p in reqs if p.lower() in outdated_names]
|
||||
total = len(reqs)
|
||||
outdated_count = len(outdated)
|
||||
freshness = round(((total - outdated_count) / total * 100) if total else 100.0, 1)
|
||||
|
||||
return {
|
||||
"total_deps": total,
|
||||
"outdated_deps": outdated_count,
|
||||
"freshness_percent": freshness,
|
||||
"outdated_list": outdated,
|
||||
}
|
||||
|
||||
|
||||
# ── Snapshot & Trends ───────────────────────────────────────────────────────
|
||||
|
||||
def take_snapshot() -> Dict[str, Any]:
|
||||
"""Collect all metrics and return a snapshot dict."""
|
||||
now = datetime.now(timezone.utc)
|
||||
test_cov = collect_test_coverage()
|
||||
doc_cov = collect_doc_coverage()
|
||||
issues = collect_issue_metrics()
|
||||
deps = collect_dep_freshness()
|
||||
|
||||
return {
|
||||
"timestamp": now.isoformat(),
|
||||
"date": slugify_date(now),
|
||||
"metrics": {
|
||||
"test_coverage": test_cov,
|
||||
"doc_coverage": doc_cov,
|
||||
"issues": issues,
|
||||
"dependencies": deps,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def save_snapshot(snapshot: Dict[str, Any]) -> Path:
|
||||
path = snapshot_path(datetime.fromisoformat(snapshot["timestamp"]))
|
||||
with open(path, "w") as f:
|
||||
json.dump(snapshot, f, indent=2)
|
||||
return path
|
||||
|
||||
|
||||
def generate_trends(snapshots: List[Dict[str, Any]], output_path: Optional[Path] = None) -> str:
|
||||
"""Generate markdown trends table; optionally write to file."""
|
||||
if not snapshots:
|
||||
msg = "# Progress Tracker — Trends\n\nNo snapshots yet. Run `progress_tracker.py` to create the first snapshot."
|
||||
if output_path:
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
output_path.write_text(msg)
|
||||
return msg
|
||||
|
||||
lines = [
|
||||
"# Progress Tracker — Trends",
|
||||
f"\nLast updated: {datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M UTC')}",
|
||||
f"\nSnapshots: {len(snapshots)}\n",
|
||||
"| Date | Test Files → Source | Doc Coverage | Issues Closed/Opened (7d) | Dep Freshness |",
|
||||
"|------|---------------------|--------------|---------------------------|---------------|",
|
||||
]
|
||||
|
||||
for snap in reversed(snapshots): # chronological
|
||||
date = snap["date"]
|
||||
m = snap["metrics"]
|
||||
tc = m["test_coverage"]
|
||||
test_str = f"{tc['test_files']}/{tc['source_files']} ({tc['test_to_source_ratio']:.2f})"
|
||||
doc_str = f"{m['doc_coverage']['doc_coverage_percent']:.1f}%"
|
||||
issues_str = f"{m['issues'].get('closed_last_7d','-')}/{m['issues'].get('opened_last_7d','-')}"
|
||||
dep_str = f"{m['dependencies'].get('freshness_percent','?')}%"
|
||||
lines.append(f"| {date} | {test_str} | {doc_str} | {issues_str} | {dep_str} |")
|
||||
|
||||
# Current snapshot summary
|
||||
cur = snapshots[-1]
|
||||
cm = cur["metrics"]
|
||||
lines.append(f"\n## Current Snapshot ({cur['date']})\n")
|
||||
|
||||
tc = cm["test_coverage"]
|
||||
cov_line = f"- Test coverage: {tc['coverage_percent']:.1f}% (via {tc['coverage_tool']})\n" if tc["coverage_percent"] else "- Test coverage: (pytest-cov not configured)\n"
|
||||
lines.append(cov_line)
|
||||
lines.append(f"- Doc coverage: {cm['doc_coverage']['doc_coverage_percent']:.1f}%")
|
||||
|
||||
im = cm["issues"]
|
||||
if im.get("close_rate") is not None:
|
||||
lines.append(f"- Issue close rate (7d): {im['close_rate']*100:.1f}% ({im['closed_last_7d']} closed, {im['opened_last_7d']} opened)")
|
||||
else:
|
||||
lines.append(f"- Issue metrics: {im.get('note','unavailable')}")
|
||||
|
||||
dd = cm["dependencies"]
|
||||
lines.append(f"- Dep freshness: {dd.get('freshness_percent','?')}% outdated ({dd.get('outdated_deps',0)}/{dd.get('total_deps',0)} deps)")
|
||||
if dd.get('outdated_list'):
|
||||
lines.append(f" Outdated: {', '.join(dd['outdated_list'][:5])}")
|
||||
|
||||
content = "\n".join(lines) + "\n"
|
||||
|
||||
if output_path:
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
output_path.write_text(content)
|
||||
|
||||
return content
|
||||
|
||||
|
||||
# ── Main ─────────────────────────────────────────────────────────────────────
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(description="Progress Tracker — 10.8")
|
||||
parser.add_argument("--json", action="store_true", help="Emit snapshot as JSON only")
|
||||
parser.add_argument("--output", type=Path, default=METRICS_DIR / "TRENDS.md",
|
||||
help="Write trends markdown to this file")
|
||||
args = parser.parse_args()
|
||||
|
||||
snapshot = take_snapshot()
|
||||
all_snapshots = load_snapshots()
|
||||
path_written = save_snapshot(snapshot)
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(snapshot, indent=2))
|
||||
return 0
|
||||
|
||||
trends = generate_trends(all_snapshots + [snapshot], output_path=args.output)
|
||||
|
||||
# Print current snapshot summary
|
||||
print(f"Snapshot saved: {path_written}\n")
|
||||
print(f"Progress Tracker — {snapshot['date']}")
|
||||
print("=" * 50)
|
||||
|
||||
m = snapshot["metrics"]
|
||||
tc = m["test_coverage"]
|
||||
print(f"Test files: {tc['test_files']} | Source files: {tc['source_files']} | Ratio: {tc['test_to_source_ratio']:.3f}")
|
||||
if tc["coverage_percent"] is not None:
|
||||
print(f"Line coverage: {tc['coverage_percent']:.1f}% (via {tc['coverage_tool']})")
|
||||
else:
|
||||
print("Line coverage: (not available — run `pytest --cov`)")
|
||||
|
||||
print()
|
||||
dc = m["doc_coverage"]
|
||||
print(f"Callables with docstrings: {dc['callables_with_doc']}/{dc['callables_total']} ({dc['doc_coverage_percent']:.1f}%)")
|
||||
|
||||
print()
|
||||
im = m["issues"]
|
||||
if im.get("close_rate") is not None:
|
||||
print(f"Issues (7d): {im['closed_last_7d']} closed / {im['opened_last_7d']} opened → close rate: {im['close_rate']*100:.1f}%")
|
||||
print(f"Total open: {im['total_open']}")
|
||||
else:
|
||||
print(f"Issues: {im.get('note','unavailable')}")
|
||||
|
||||
print()
|
||||
dd = m["dependencies"]
|
||||
print(f"Dependencies: {dd.get('total_deps',0)} total, {dd.get('outdated_deps',0)} outdated")
|
||||
if dd.get('outdated_list'):
|
||||
shown = dd['outdated_list'][:5]
|
||||
print(f"Outdated: {', '.join(shown)}" + ("..." if len(dd['outdated_list']) > 5 else ""))
|
||||
|
||||
print(f"\nTrends written to: {args.output}")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -1,468 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
session_knowledge_extractor.py — Extract session-level entities and relationships from Hermes transcripts.
|
||||
|
||||
Creates knowledge facts about: which agent handled the session, what task was solved,
|
||||
which tools were used and why, and the outcome. Target: 10+ facts per session.
|
||||
|
||||
Usage:
|
||||
python3 session_knowledge_extractor.py --session session.jsonl --output knowledge/
|
||||
python3 session_knowledge_extractor.py --batch --sessions-dir ~/.hermes/sessions/ --limit 10
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import hashlib
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Optional, List, Dict, Any
|
||||
|
||||
SCRIPT_DIR = Path(__file__).parent.absolute()
|
||||
sys.path.insert(0, str(SCRIPT_DIR))
|
||||
|
||||
from session_reader import read_session, extract_conversation, truncate_for_context, messages_to_text
|
||||
|
||||
# --- Configuration ---
|
||||
DEFAULT_API_BASE = os.environ.get(
|
||||
"EXTRACTOR_API_BASE",
|
||||
os.environ.get("HARVESTER_API_BASE", "https://api.nousresearch.com/v1")
|
||||
)
|
||||
DEFAULT_API_KEY = os.environ.get(
|
||||
"EXTRACTOR_API_KEY",
|
||||
os.environ.get("HARVESTER_API_KEY", "")
|
||||
)
|
||||
DEFAULT_MODEL = os.environ.get(
|
||||
"EXTRACTOR_MODEL",
|
||||
os.environ.get("HARVESTER_MODEL", "xiaomi/mimo-v2-pro")
|
||||
)
|
||||
KNOWLEDGE_DIR = os.environ.get("EXTRACTOR_KNOWLEDGE_DIR", "knowledge")
|
||||
PROMPT_PATH = os.environ.get(
|
||||
"EXTRACTOR_PROMPT_PATH",
|
||||
str(SCRIPT_DIR.parent / "templates" / "session-entity-prompt.md")
|
||||
)
|
||||
|
||||
API_KEY_PATHS = [
|
||||
os.path.expanduser("~/.config/nous/key"),
|
||||
os.path.expanduser("~/.hermes/keymaxxing/active/minimax.key"),
|
||||
os.path.expanduser("~/.config/openrouter/key"),
|
||||
os.path.expanduser("~/.config/gitea/token"), # fallback
|
||||
]
|
||||
|
||||
|
||||
def find_api_key() -> str:
|
||||
for path in API_KEY_PATHS:
|
||||
if os.path.exists(path):
|
||||
with open(path) as f:
|
||||
key = f.read().strip()
|
||||
if key:
|
||||
return key
|
||||
return ""
|
||||
|
||||
|
||||
def load_extraction_prompt() -> str:
|
||||
path = Path(PROMPT_PATH)
|
||||
if not path.exists():
|
||||
print(f"ERROR: Extraction prompt not found at {path}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
return path.read_text(encoding='utf-8')
|
||||
|
||||
|
||||
def call_llm(prompt: str, transcript: str, api_base: str, api_key: str, model: str) -> Optional[List[dict]]:
|
||||
"""Call LLM to extract session entity knowledge."""
|
||||
import urllib.request
|
||||
|
||||
messages = [
|
||||
{"role": "system", "content": prompt},
|
||||
{"role": "user", "content": f"Extract knowledge from this session transcript:\n\n{transcript}"}
|
||||
]
|
||||
|
||||
payload = json.dumps({
|
||||
"model": model,
|
||||
"messages": messages,
|
||||
"temperature": 0.1,
|
||||
"max_tokens": 4096
|
||||
}).encode('utf-8')
|
||||
|
||||
req = urllib.request.Request(
|
||||
f"{api_base}/chat/completions",
|
||||
data=payload,
|
||||
headers={
|
||||
"Authorization": f"Bearer {api_key}",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
method="POST"
|
||||
)
|
||||
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=60) as resp:
|
||||
result = json.loads(resp.read().decode('utf-8'))
|
||||
content = result["choices"][0]["message"]["content"]
|
||||
return parse_extraction_response(content)
|
||||
except Exception as e:
|
||||
print(f"ERROR: LLM API call failed: {e}", file=sys.stderr)
|
||||
return None
|
||||
|
||||
|
||||
def parse_extraction_response(content: str) -> Optional[List[dict]]:
|
||||
"""Parse LLM response; handles JSON or markdown-wrapped JSON."""
|
||||
try:
|
||||
data = json.loads(content)
|
||||
if isinstance(data, dict) and 'knowledge' in data:
|
||||
return data['knowledge']
|
||||
if isinstance(data, list):
|
||||
return data
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
import re
|
||||
json_match = re.search(r'```(?:json)?\s*(\{.*?\})\s*```', content, re.DOTALL)
|
||||
if json_match:
|
||||
try:
|
||||
data = json.loads(json_match.group(1))
|
||||
if isinstance(data, dict) and 'knowledge' in data:
|
||||
return data['knowledge']
|
||||
if isinstance(data, list):
|
||||
return data
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
json_match = re.search(r'(\{[^{}]*"knowledge"[^{}]*\[.*?\])', content, re.DOTALL)
|
||||
if json_match:
|
||||
try:
|
||||
data = json.loads(json_match.group(1))
|
||||
return data.get('knowledge', [])
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
print(f"WARNING: Could not parse LLM response as JSON", file=sys.stderr)
|
||||
print(f"Response preview: {content[:500]}", file=sys.stderr)
|
||||
return None
|
||||
|
||||
|
||||
def load_existing_knowledge(knowledge_dir: str) -> dict:
|
||||
index_path = Path(knowledge_dir) / "index.json"
|
||||
if not index_path.exists():
|
||||
return {"version": 1, "last_updated": "", "total_facts": 0, "facts": []}
|
||||
try:
|
||||
with open(index_path, 'r', encoding='utf-8') as f:
|
||||
return json.load(f)
|
||||
except (json.JSONDecodeError, IOError) as e:
|
||||
print(f"WARNING: Could not load knowledge index: {e}", file=sys.stderr)
|
||||
return {"version": 1, "last_updated": "", "total_facts": 0, "facts": []}
|
||||
|
||||
|
||||
def fact_fingerprint(fact: dict) -> str:
|
||||
text = fact.get('fact', '').lower().strip()
|
||||
text = ' '.join(text.split())
|
||||
return hashlib.md5(text.encode('utf-8')).hexdigest()
|
||||
|
||||
|
||||
def deduplicate(new_facts: List[dict], existing: List[dict], similarity_threshold: float = 0.8) -> List[dict]:
|
||||
existing_fingerprints = set()
|
||||
existing_texts = []
|
||||
for f in existing:
|
||||
fp = fact_fingerprint(f)
|
||||
existing_fingerprints.add(fp)
|
||||
existing_texts.append(f.get('fact', '').lower().strip())
|
||||
|
||||
unique = []
|
||||
for fact in new_facts:
|
||||
fp = fact_fingerprint(fact)
|
||||
if fp in existing_fingerprints:
|
||||
continue
|
||||
|
||||
fact_words = set(fact.get('fact', '').lower().split())
|
||||
is_dup = False
|
||||
for existing_text in existing_texts:
|
||||
existing_words = set(existing_text.split())
|
||||
if not fact_words or not existing_words:
|
||||
continue
|
||||
overlap = len(fact_words & existing_words) / max(len(fact_words | existing_words), 1)
|
||||
if overlap >= similarity_threshold:
|
||||
is_dup = True
|
||||
break
|
||||
|
||||
if not is_dup:
|
||||
unique.append(fact)
|
||||
existing_fingerprints.add(fp)
|
||||
existing_texts.append(fact.get('fact', '').lower().strip())
|
||||
|
||||
return unique
|
||||
|
||||
|
||||
def validate_fact(fact: dict) -> bool:
|
||||
required = ['fact', 'category', 'repo', 'confidence']
|
||||
for field in required:
|
||||
if field not in fact:
|
||||
return False
|
||||
if not isinstance(fact['fact'], str) or not fact['fact'].strip():
|
||||
return False
|
||||
valid_categories = ['fact', 'pitfall', 'pattern', 'tool-quirk', 'question']
|
||||
if fact['category'] not in valid_categories:
|
||||
return False
|
||||
if not isinstance(fact.get('confidence', 0), (int, float)):
|
||||
return False
|
||||
if not (0.0 <= fact['confidence'] <= 1.0):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def write_knowledge(index: dict, new_facts: List[dict], knowledge_dir: str, source_session: str = ""):
|
||||
kdir = Path(knowledge_dir)
|
||||
kdir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
for fact in new_facts:
|
||||
fact['source_session'] = source_session
|
||||
fact['harvested_at'] = datetime.now(timezone.utc).isoformat()
|
||||
|
||||
index['facts'].extend(new_facts)
|
||||
index['total_facts'] = len(index['facts'])
|
||||
index['last_updated'] = datetime.now(timezone.utc).isoformat()
|
||||
|
||||
index_path = kdir / "index.json"
|
||||
with open(index_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(index, f, indent=2, ensure_ascii=False)
|
||||
|
||||
repos = {}
|
||||
for fact in new_facts:
|
||||
repo = fact.get('repo', 'global')
|
||||
repos.setdefault(repo, []).append(fact)
|
||||
|
||||
for repo, facts in repos.items():
|
||||
if repo == 'global':
|
||||
md_path = kdir / "global" / "sessions.md"
|
||||
else:
|
||||
md_path = kdir / "repos" / f"{repo}.md"
|
||||
|
||||
md_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
mode = 'a' if md_path.exists() else 'w'
|
||||
with open(md_path, mode, encoding='utf-8') as f:
|
||||
if mode == 'w':
|
||||
f.write(f"# Session Knowledge: {repo}\n\n")
|
||||
f.write(f"## Session {Path(source_session).stem} — {datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M')}\n\n")
|
||||
for fact in facts:
|
||||
icon = {'fact': '📋', 'pitfall': '⚠️', 'pattern': '🔄', 'tool-quirk': '🔧', 'question': '❓'}.get(fact['category'], '•')
|
||||
f.write(f"- {icon} **{fact['category']}** (conf: {fact['confidence']:.1f}): {fact['fact']}\n")
|
||||
f.write("\n")
|
||||
|
||||
|
||||
def extract_session_id(messages: List[dict]) -> str:
|
||||
"""Derive a stable session ID from messages or return 'unknown'."""
|
||||
# Try to find session_id in the first message or use filename from source
|
||||
for msg in messages[:3]:
|
||||
if msg.get('session_id'):
|
||||
return msg['session_id'][:32]
|
||||
# Fallback: hash first few messages
|
||||
content = str(messages[:3])
|
||||
return hashlib.md5(content.encode()).hexdigest()[:12]
|
||||
|
||||
|
||||
def extract_agent(messages: List[dict]) -> Optional[str]:
|
||||
"""Extract the agent/model name from assistant messages."""
|
||||
for msg in messages:
|
||||
if msg.get('role') == 'assistant' and msg.get('model'):
|
||||
return msg['model']
|
||||
return None
|
||||
|
||||
|
||||
def extract_tasks(messages: List[dict]) -> List[str]:
|
||||
"""Extract the task/goal from the first user message."""
|
||||
tasks = []
|
||||
for msg in messages:
|
||||
if msg.get('role') == 'user' and msg.get('content'):
|
||||
content = msg['content']
|
||||
if isinstance(content, str) and len(content.strip()) < 500:
|
||||
tasks.append(content.strip())
|
||||
break # First user message is usually the task
|
||||
return tasks
|
||||
|
||||
|
||||
def extract_tools(messages: List[dict]) -> List[str]:
|
||||
"""Extract tool names used in the session."""
|
||||
tools = set()
|
||||
for msg in messages:
|
||||
if msg.get('tool_calls'):
|
||||
for tc in msg['tool_calls']:
|
||||
func = tc.get('function', {})
|
||||
name = func.get('name', '')
|
||||
if name:
|
||||
tools.add(name)
|
||||
return list(tools)
|
||||
|
||||
|
||||
def extract_outcome(messages: List[dict]) -> str:
|
||||
"""Classify session outcome: success/partial/failure."""
|
||||
errors = []
|
||||
for msg in messages:
|
||||
if msg.get('role') == 'tool' and msg.get('is_error'):
|
||||
err = msg.get('content', '')
|
||||
if isinstance(err, str):
|
||||
errors.append(err.lower())
|
||||
|
||||
if errors:
|
||||
if any('405' in e or 'permission' in e or 'authentication' in e for e in errors):
|
||||
return 'failure'
|
||||
return 'partial'
|
||||
|
||||
# Check last assistant message for success indicators
|
||||
last = messages[-1] if messages else {}
|
||||
if last.get('role') == 'assistant':
|
||||
content = str(last.get('content', ''))
|
||||
success_words = ['done', 'completed', 'success', 'merged', 'pushed', 'created', 'saved']
|
||||
if any(word in content.lower() for word in success_words):
|
||||
return 'success'
|
||||
|
||||
return 'unknown'
|
||||
|
||||
|
||||
def harvest_session(session_path: str, knowledge_dir: str, api_base: str, api_key: str,
|
||||
model: str, dry_run: bool = False, min_confidence: float = 0.3) -> dict:
|
||||
"""Harvest session entities and relationships from one session."""
|
||||
start_time = time.time()
|
||||
stats = {
|
||||
'session': session_path,
|
||||
'facts_found': 0,
|
||||
'facts_new': 0,
|
||||
'facts_dup': 0,
|
||||
'elapsed_seconds': 0,
|
||||
'error': None
|
||||
}
|
||||
|
||||
try:
|
||||
messages = read_session(session_path)
|
||||
if not messages:
|
||||
stats['error'] = "Empty session file"
|
||||
return stats
|
||||
|
||||
conv = extract_conversation(messages)
|
||||
if not conv:
|
||||
stats['error'] = "No conversation turns found"
|
||||
return stats
|
||||
|
||||
truncated = truncate_for_context(conv, head=50, tail=50)
|
||||
transcript = messages_to_text(truncated)
|
||||
|
||||
prompt = load_extraction_prompt()
|
||||
raw_facts = call_llm(prompt, transcript, api_base, api_key, model)
|
||||
if raw_facts is None:
|
||||
stats['error'] = "LLM extraction failed"
|
||||
return stats
|
||||
|
||||
valid_facts = [f for f in raw_facts if validate_fact(f) and f.get('confidence', 0) >= min_confidence]
|
||||
stats['facts_found'] = len(valid_facts)
|
||||
|
||||
existing_index = load_existing_knowledge(knowledge_dir)
|
||||
existing_facts = existing_index.get('facts', [])
|
||||
new_facts = deduplicate(valid_facts, existing_facts)
|
||||
stats['facts_new'] = len(new_facts)
|
||||
stats['facts_dup'] = len(valid_facts) - len(new_facts)
|
||||
|
||||
if new_facts and not dry_run:
|
||||
write_knowledge(existing_index, new_facts, knowledge_dir, source_session=session_path)
|
||||
|
||||
stats['elapsed_seconds'] = round(time.time() - start_time, 2)
|
||||
return stats
|
||||
|
||||
except Exception as e:
|
||||
stats['error'] = str(e)
|
||||
stats['elapsed_seconds'] = round(time.time() - start_time, 2)
|
||||
return stats
|
||||
|
||||
|
||||
def batch_harvest(sessions_dir: str, knowledge_dir: str, api_base: str, api_key: str,
|
||||
model: str, since: str = "", limit: int = 0, dry_run: bool = False) -> List[dict]:
|
||||
sessions_path = Path(sessions_dir)
|
||||
if not sessions_path.is_dir():
|
||||
print(f"ERROR: Sessions directory not found: {sessions_dir}", file=sys.stderr)
|
||||
return []
|
||||
|
||||
session_files = sorted(sessions_path.glob("*.jsonl"), reverse=True)
|
||||
|
||||
if since:
|
||||
since_dt = datetime.fromisoformat(since.replace('Z', '+00:00'))
|
||||
filtered = []
|
||||
for sf in session_files:
|
||||
try:
|
||||
parts = sf.stem.split('_')
|
||||
if len(parts) >= 3:
|
||||
date_str = parts[1]
|
||||
file_dt = datetime.strptime(date_str, '%Y%m%d').replace(tzinfo=timezone.utc)
|
||||
if file_dt >= since_dt:
|
||||
filtered.append(sf)
|
||||
except (ValueError, IndexError):
|
||||
filtered.append(sf)
|
||||
session_files = filtered
|
||||
|
||||
if limit > 0:
|
||||
session_files = session_files[:limit]
|
||||
|
||||
print(f"Harvesting {len(session_files)} sessions with session knowledge extractor...")
|
||||
|
||||
results = []
|
||||
for i, sf in enumerate(session_files, 1):
|
||||
print(f"[{i}/{len(session_files)}] {sf.name}...", end=" ", flush=True)
|
||||
stats = harvest_session(str(sf), knowledge_dir, api_base, api_key, model, dry_run)
|
||||
if stats['error']:
|
||||
print(f"ERROR: {stats['error']}")
|
||||
else:
|
||||
print(f"{stats['facts_new']} new, {stats['facts_dup']} dup ({stats['elapsed_seconds']}s)")
|
||||
results.append(stats)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Extract session entities and relationships from Hermes transcripts")
|
||||
parser.add_argument('--session', help='Path to a single session JSONL file')
|
||||
parser.add_argument('--batch', action='store_true', help='Batch mode: process multiple sessions')
|
||||
parser.add_argument('--sessions-dir', default=os.path.expanduser('~/.hermes/sessions'),
|
||||
help='Directory containing session files (default: ~/.hermes/sessions)')
|
||||
parser.add_argument('--output', default='knowledge', help='Output directory for knowledge store')
|
||||
parser.add_argument('--since', default='', help='Only process sessions after this date (YYYY-MM-DD)')
|
||||
parser.add_argument('--limit', type=int, default=0, help='Max sessions to process (0=unlimited)')
|
||||
parser.add_argument('--api-base', default=DEFAULT_API_BASE, help='LLM API base URL')
|
||||
parser.add_argument('--api-key', default='', help='LLM API key (or set EXTRACTOR_API_KEY)')
|
||||
parser.add_argument('--model', default=DEFAULT_MODEL, help='Model to use for extraction')
|
||||
parser.add_argument('--dry-run', action='store_true', help='Preview without writing to knowledge store')
|
||||
parser.add_argument('--min-confidence', type=float, default=0.3, help='Minimum confidence threshold')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
api_key = args.api_key or DEFAULT_API_KEY or find_api_key()
|
||||
if not api_key:
|
||||
print("ERROR: No API key found. Set EXTRACTOR_API_KEY or store in one of:", file=sys.stderr)
|
||||
for p in API_KEY_PATHS:
|
||||
print(f" {p}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
knowledge_dir = args.output
|
||||
if not os.path.isabs(knowledge_dir):
|
||||
knowledge_dir = os.path.join(SCRIPT_DIR.parent, knowledge_dir)
|
||||
|
||||
if args.session:
|
||||
stats = harvest_session(
|
||||
args.session, knowledge_dir, args.api_base, api_key, args.model,
|
||||
dry_run=args.dry_run, min_confidence=args.min_confidence
|
||||
)
|
||||
print(json.dumps(stats, indent=2))
|
||||
if stats['error']:
|
||||
sys.exit(1)
|
||||
elif args.batch:
|
||||
results = batch_harvest(
|
||||
args.sessions_dir, knowledge_dir, args.api_base, api_key, args.model,
|
||||
since=args.since, limit=args.limit, dry_run=args.dry_run
|
||||
)
|
||||
total_new = sum(r['facts_new'] for r in results)
|
||||
total_dup = sum(r['facts_dup'] for r in results)
|
||||
errors = sum(1 for r in results if r['error'])
|
||||
print(f"\nDone: {total_new} new facts, {total_dup} duplicates, {errors} errors")
|
||||
else:
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -1,197 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Smoke test for session knowledge extractor.
|
||||
Tests: parsing, entity extraction, metadata generation, dedup, store roundtrip.
|
||||
Does NOT call real LLM — uses mock facts.
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
import tempfile
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
SCRIPT_DIR = Path(__file__).parent.absolute()
|
||||
sys.path.insert(0, str(SCRIPT_DIR))
|
||||
|
||||
from session_reader import read_session, extract_conversation, truncate_for_context, messages_to_text
|
||||
from session_knowledge_extractor import (
|
||||
validate_fact, deduplicate, load_existing_knowledge, fact_fingerprint,
|
||||
extract_agent, extract_tasks, extract_tools, extract_outcome,
|
||||
write_knowledge
|
||||
)
|
||||
|
||||
|
||||
def make_test_session():
|
||||
"""Create a sample Hermes session transcript."""
|
||||
messages = [
|
||||
{"role": "user", "content": "Clone the compounding-intelligence repo and run tests", "timestamp": "2026-04-13T10:00:00Z"},
|
||||
{"role": "assistant", "model": "xiaomi/mimo-v2-pro", "content": "I'll clone the repo and run tests.", "timestamp": "2026-04-13T10:00:02Z",
|
||||
"tool_calls": [
|
||||
{"function": {"name": "terminal", "arguments": '{"command": "git clone https://forge.alexanderwhitestone.com/Timmy_Foundation/compounding-intelligence.git"}'}},
|
||||
]},
|
||||
{"role": "tool", "content": "Cloned successfully", "timestamp": "2026-04-13T10:00:10Z"},
|
||||
{"role": "assistant", "model": "xiaomi/mimo-v2-pro", "content": "Now running pytest...", "timestamp": "2026-04-13T10:00:11Z",
|
||||
"tool_calls": [
|
||||
{"function": {"name": "execute_code", "arguments": '{"code": "import subprocess; subprocess.run([\"pytest\"])"}'}},
|
||||
]},
|
||||
{"role": "tool", "content": "15 passed, 0 failed", "timestamp": "2026-04-13T10:00:15Z"},
|
||||
{"role": "assistant", "model": "xiaomi/mimo-v2-pro", "content": "All tests passed — done.", "timestamp": "2026-04-13T10:00:16Z"},
|
||||
]
|
||||
return messages
|
||||
|
||||
|
||||
def test_extract_entities():
|
||||
"""Test entity extraction from messages."""
|
||||
messages = make_test_session() # 6 total: 3 user/assistant + 3 tool
|
||||
agent = extract_agent(messages)
|
||||
assert agent == "xiaomi/mimo-v2-pro"
|
||||
tasks = extract_tasks(messages)
|
||||
assert len(tasks) >= 1 and "clone" in tasks[0].lower()
|
||||
tools = extract_tools(messages)
|
||||
assert "terminal" in tools and "execute_code" in tools and len(tools) == 2
|
||||
outcome = extract_outcome(messages)
|
||||
assert outcome == "success"
|
||||
|
||||
print(" [PASS] entity extraction works")
|
||||
|
||||
|
||||
def test_validate_fact():
|
||||
good = {"fact": "Token is at ~/.config/gitea/token", "category": "tool-quirk", "repo": "global", "confidence": 0.9}
|
||||
assert validate_fact(good), "Valid fact should pass"
|
||||
|
||||
bad = {"fact": "Something", "category": "nonsense", "repo": "x", "confidence": 0.5}
|
||||
assert not validate_fact(bad), "Bad category should fail"
|
||||
|
||||
print(" [PASS] fact validation works")
|
||||
|
||||
|
||||
def test_deduplicate():
|
||||
existing = [{"fact": "A", "category": "fact", "repo": "global", "confidence": 0.9}]
|
||||
new = [
|
||||
{"fact": "A", "category": "fact", "repo": "global", "confidence": 0.9},
|
||||
{"fact": "B", "category": "fact", "repo": "global", "confidence": 0.9},
|
||||
]
|
||||
result = deduplicate(new, existing)
|
||||
assert len(result) == 1 and result[0]["fact"] == "B", "Should remove exact dup"
|
||||
print(" [PASS] deduplication works")
|
||||
|
||||
|
||||
def test_knowledge_store_roundtrip():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
index = load_existing_knowledge(tmpdir)
|
||||
assert index["total_facts"] == 0
|
||||
|
||||
new_facts = [
|
||||
{"fact": "session_x used terminal", "category": "fact", "repo": "global", "confidence": 0.9},
|
||||
{"fact": "session_x task: clone repo", "category": "fact", "repo": "compounding-intelligence", "confidence": 0.9},
|
||||
{"fact": "session_x outcome: success", "category": "fact", "repo": "global", "confidence": 0.9},
|
||||
] * 4 # 12 facts total
|
||||
|
||||
write_knowledge(index, new_facts, tmpdir, source_session="session_x.jsonl")
|
||||
|
||||
index2 = load_existing_knowledge(tmpdir)
|
||||
assert index2["total_facts"] == 12
|
||||
|
||||
# Verify markdown written
|
||||
md_path = Path(tmpdir) / "repos" / "compounding-intelligence.md"
|
||||
assert md_path.exists(), "Markdown file should be created"
|
||||
|
||||
print(" [PASS] knowledge store roundtrip works (12 facts)")
|
||||
|
||||
|
||||
def test_min_facts_per_session():
|
||||
"""Validator: a typical session should yield 10+ facts."""
|
||||
# Simulate facts from one session (what the LLM would produce)
|
||||
mock_facts = [
|
||||
{"fact": "session_123 was handled by model xiaomi/mimo-v2-pro", "category": "fact", "repo": "global", "confidence": 0.95},
|
||||
{"fact": "session_123's task was to clone the compounding-intelligence repository", "category": "fact", "repo": "compounding-intelligence", "confidence": 0.9},
|
||||
{"fact": "session_123 used tool 'terminal' to run git clone", "category": "tool-quirk", "repo": "global", "confidence": 0.9},
|
||||
{"fact": "session_123 used tool 'execute_code' to run pytest", "category": "tool-quirk", "repo": "global", "confidence": 0.9},
|
||||
{"fact": "session_123 executed: git clone https://forge...", "category": "fact", "repo": "global", "confidence": 0.9},
|
||||
{"fact": "session_123 executed: pytest (15 tests)", "category": "fact", "repo": "compounding-intelligence", "confidence": 0.9},
|
||||
{"fact": "session_123 outcome: all 15 tests passed", "category": "fact", "repo": "global", "confidence": 0.95},
|
||||
{"fact": "session_123 touched repo: compounding-intelligence", "category": "fact", "repo": "compounding-intelligence", "confidence": 1.0},
|
||||
{"fact": "session_123 terminal output: 'Cloned successfully'", "category": "fact", "repo": "global", "confidence": 0.9},
|
||||
{"fact": "session_123 test output: '15 passed, 0 failed'", "category": "fact", "repo": "compounding-intelligence", "confidence": 0.9},
|
||||
{"fact": "session_123 completed without errors", "category": "fact", "repo": "global", "confidence": 0.85},
|
||||
{"fact": "session_123 final message: 'All tests passed — done.'", "category": "fact", "repo": "global", "confidence": 0.9},
|
||||
]
|
||||
assert len(mock_facts) >= 10, f"Should have at least 10 facts, got {len(mock_facts)}"
|
||||
print(f" [PASS] mock session produces {len(mock_facts)} facts")
|
||||
|
||||
|
||||
def test_full_chain_no_llm():
|
||||
"""Full pipeline: read -> extract entities -> validate -> dedup -> store."""
|
||||
messages = make_test_session()
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.jsonl', delete=False) as f:
|
||||
for msg in messages:
|
||||
f.write(json.dumps(msg) + '\n')
|
||||
session_path = f.name
|
||||
|
||||
with tempfile.TemporaryDirectory() as knowledge_dir:
|
||||
# Step 1: Read
|
||||
msgs = read_session(session_path)
|
||||
assert len(msgs) == 6 # 3 user/assistant + 3 tool role messages
|
||||
|
||||
# Step 2: Extract conversation
|
||||
conv = extract_conversation(msgs)
|
||||
assert len(conv) == 4 # 1 user + 3 assistant messages (tool role messages skipped)
|
||||
|
||||
# Step 3: Truncate
|
||||
truncated = truncate_for_context(conv, head=50, tail=50)
|
||||
transcript = messages_to_text(truncated)
|
||||
assert "clone" in transcript.lower()
|
||||
|
||||
# Step 4: Extract entities
|
||||
agent = extract_agent(msgs)
|
||||
tools = extract_tools(msgs)
|
||||
outcome = extract_outcome(msgs)
|
||||
assert agent == "xiaomi/mimo-v2-pro"
|
||||
assert len(tools) >= 2
|
||||
assert outcome == "success"
|
||||
|
||||
# Step 5-7: Simulated LLM output → validate → dedup → store
|
||||
# Create 12 distinct facts to meet the 10+ requirement
|
||||
mock_facts = [
|
||||
{"fact": "Session used tool terminal", "category": "tool-quirk", "repo": "global", "confidence": 0.9},
|
||||
{"fact": "Session used tool execute_code", "category": "tool-quirk", "repo": "global", "confidence": 0.9},
|
||||
{"fact": f"Session handled by agent {agent}", "category": "fact", "repo": "global", "confidence": 0.95},
|
||||
{"fact": "Session task: clone the repository", "category": "fact", "repo": "compounding-intelligence", "confidence": 0.9},
|
||||
{"fact": "Session task: run pytest", "category": "fact", "repo": "compounding-intelligence", "confidence": 0.9},
|
||||
{"fact": "Session outcome: success", "category": "fact", "repo": "global", "confidence": 0.9},
|
||||
{"fact": "Session repo: compounding-intelligence touched", "category": "fact", "repo": "compounding-intelligence", "confidence": 1.0},
|
||||
{"fact": "Terminal command executed: git clone", "category": "fact", "repo": "global", "confidence": 0.9},
|
||||
{"fact": "Test result: 15 passed, 0 failed", "category": "fact", "repo": "compounding-intelligence", "confidence": 0.95},
|
||||
{"fact": "All tests passed — session complete", "category": "fact", "repo": "global", "confidence": 0.9},
|
||||
{"fact": "No errors encountered during session", "category": "fact", "repo": "global", "confidence": 0.8},
|
||||
{"fact": "Session duration: approximately 16 seconds", "category": "fact", "repo": "global", "confidence": 0.7},
|
||||
]
|
||||
|
||||
valid = [f for f in mock_facts if validate_fact(f)]
|
||||
assert len(valid) == 12
|
||||
|
||||
index = load_existing_knowledge(knowledge_dir)
|
||||
new_facts = deduplicate(valid, index.get("facts", []))
|
||||
assert len(new_facts) == 12
|
||||
|
||||
from session_knowledge_extractor import write_knowledge
|
||||
write_knowledge(index, new_facts, knowledge_dir, source_session=session_path)
|
||||
|
||||
index2 = load_existing_knowledge(knowledge_dir)
|
||||
assert index2["total_facts"] == 12
|
||||
|
||||
os.unlink(session_path)
|
||||
print(" [PASS] full chain (read → entities → validate → dedup → store) works (12 facts)")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("Running session knowledge extractor smoke tests...")
|
||||
test_extract_entities()
|
||||
test_validate_fact()
|
||||
test_deduplicate()
|
||||
test_knowledge_store_roundtrip()
|
||||
test_min_facts_per_session()
|
||||
test_full_chain_no_llm()
|
||||
print("\nAll tests passed — extractor produces 10+ facts per session ✓")
|
||||
@@ -1,95 +0,0 @@
|
||||
# Knowledge Extraction Prompt — Session Entities & Relationships
|
||||
|
||||
## System Prompt
|
||||
|
||||
You are a session knowledge extraction engine. You read Hermes session transcripts and output ONLY structured JSON. You extract session entities (agent, task, tools, outcome) and the relationships between them. You never invent facts not in the transcript.
|
||||
|
||||
## Prompt
|
||||
|
||||
```
|
||||
TASK: Extract knowledge facts from this session transcript. Focus on:
|
||||
|
||||
1. AGENT: Which model/agent handled this session
|
||||
2. TASK: What problem or goal was being solved
|
||||
3. TOOLS: Which tools were used and what each accomplished
|
||||
4. OUTCOME: Did the session succeed, partially succeed, or fail?
|
||||
5. RELATIONSHIPS: How do these entities connect?
|
||||
|
||||
RULES:
|
||||
1. Extract ONLY information explicitly stated or clearly implied by the transcript.
|
||||
2. Do NOT infer, assume, or hallucinate.
|
||||
3. Every fact must point to a specific message or tool call as evidence.
|
||||
4. Generate at least 10 facts. Break complex tool usages into multiple atomic facts.
|
||||
5. Include relationship facts: "session X used tool Y", "agent Z handled session X", "task W was completed by session X".
|
||||
6. Include outcome facts: success indicators, error conditions, partial completions.
|
||||
|
||||
CATEGORIES (assign exactly one):
|
||||
- fact: Concrete, verifiable statement (paths, commands, results, configs)
|
||||
- pitfall: Error hit, wrong assumption, time wasted
|
||||
- pattern: Successful reusable sequence
|
||||
- tool-quirk: Environment-specific behavior (token paths, URLs, API gotchas)
|
||||
- question: Something identified but not answered
|
||||
|
||||
CONFIDENCE:
|
||||
- 0.9: Directly observed with explicit output or verification
|
||||
- 0.7: Multiple data points confirm, but not explicitly verified
|
||||
- 0.5: Clear implication but not directly stated
|
||||
- 0.3: Weak inference from limited evidence
|
||||
|
||||
OUTPUT FORMAT (valid JSON only, no markdown, no explanation):
|
||||
{
|
||||
"knowledge": [
|
||||
{
|
||||
"fact": "One specific sentence of knowledge",
|
||||
"category": "fact|pitfall|pattern|tool-quirk|question",
|
||||
"repo": "repo-name or global",
|
||||
"confidence": 0.0-1.0,
|
||||
"evidence": "Brief quote or reference from transcript that supports this"
|
||||
}
|
||||
],
|
||||
"meta": {
|
||||
"session_id": "extracted or generated id",
|
||||
"session_outcome": "success|partial|failure|unknown",
|
||||
"agent": "model name if identifiable",
|
||||
"task": "brief description of the goal",
|
||||
"tools_used": ["tool1", "tool2"],
|
||||
"repos_touched": ["repo1"],
|
||||
"fact_count": 0
|
||||
}
|
||||
}
|
||||
|
||||
TRANSCRIPT:
|
||||
{{transcript}}
|
||||
```
|
||||
|
||||
## Design Notes
|
||||
|
||||
### Entity extraction strategy
|
||||
|
||||
**Agent:** Look for `"model": "..."` in assistant messages or model mentions in content.
|
||||
|
||||
**Task:** The first user message usually states the goal. If vague, look for the assistant's interpretation: "I'll help you X".
|
||||
|
||||
**Tools:** Every `tool_calls` entry is a tool use. Extract the function name and what it was used for based on arguments.
|
||||
|
||||
**Outcome:** Success indicators: "done", "completed", "merged", "pushed", "created". Failures: HTTP errors (405, 404, 403), stack traces, explicit failures.
|
||||
|
||||
**Relationships:** Treat the session as a central entity. Generate facts like:
|
||||
- Agent relationship: "session_abc was handled by model xiaomi/mimo-v2-pro"
|
||||
- Task relationship: "session_abc's task was to merge PR #123"
|
||||
- Tool relationship: "session_abc used terminal to run 'git clone'"
|
||||
- Outcome relationship: "session_abc outcome: success — PR merged"
|
||||
|
||||
### 10+ facts guarantee
|
||||
|
||||
Each session with tool usage typically yields:
|
||||
- 1 fact: agent identity
|
||||
- 1-2 facts: task/goal (decomposed into sub-goals)
|
||||
- 3-5 facts: each tool call becomes 1-2 facts (tool name + purpose + result)
|
||||
- 1-2 facts: outcome details
|
||||
- 1-2 facts: repo touched
|
||||
Total: 10+ per non-trivial session.
|
||||
|
||||
### Token budget
|
||||
|
||||
~700 tokens for prompt (excluding transcript). Leaves room for long transcripts.
|
||||
Reference in New Issue
Block a user