Compare commits

..

1 Commits

Author SHA1 Message Date
STEP35 Claude Code
44607f8484 feat: add dependency freshness checker — issue #161
Some checks failed
Test / pytest (pull_request) Failing after 8s
Implements scripts/dependency_freshness.py which compares installed
dependencies against latest PyPI versions and flags packages that are
more than 2 major versions behind. Includes comprehensive tests in
scripts/test_dependency_freshness.py.

Closes #161
2026-04-26 09:58:30 -04:00
6 changed files with 450 additions and 508 deletions

View File

@@ -0,0 +1,271 @@
#!/usr/bin/env python3
"""dependency_freshness.py - Compare installed dependencies against latest PyPI versions.
Identify packages that are more than 2 major versions behind.
Outputs a human-readable report by default or JSON with --json flag.
"""
import argparse
import json
import subprocess
import sys
from packaging import version
from typing import Dict, List, Tuple
def parse_requirements(requirements_path: str) -> List[str]:
"""Parse package names from a requirements.txt file."""
packages = []
try:
with open(requirements_path, 'r') as f:
for line in f:
line = line.strip()
if not line or line.startswith('#'):
continue
pkg_name = line
for delim in ['[', '>', '<', '=', '!', ';', '@']:
if delim in pkg_name:
pkg_name = pkg_name.split(delim)[0]
pkg_name = pkg_name.strip()
if pkg_name:
packages.append(pkg_name.lower())
except FileNotFoundError:
print(f"Warning: requirements file not found: {requirements_path}", file=sys.stderr)
return packages
def get_installed_packages() -> Dict[str, str]:
"""Get all installed packages via pip list --format=json."""
try:
result = subprocess.run(
[sys.executable, '-m', 'pip', 'list', '--format=json'],
capture_output=True, text=True, check=True
)
packages = json.loads(result.stdout)
return {pkg['name'].lower(): pkg['version'] for pkg in packages}
except subprocess.CalledProcessError as e:
print(f"Error running pip list: {e}", file=sys.stderr)
sys.exit(1)
except json.JSONDecodeError as e:
print(f"Error parsing pip output: {e}", file=sys.stderr)
sys.exit(1)
def get_outdated_packages() -> Dict[str, dict]:
"""Get outdated packages via pip list --outdated --format=json."""
try:
result = subprocess.run(
[sys.executable, '-m', 'pip', 'list', '--outdated', '--format=json'],
capture_output=True, text=True, check=True
)
outdated_list = json.loads(result.stdout)
outdated = {}
for pkg in outdated_list:
name = pkg['name'].lower()
outdated[name] = {
'installed': pkg.get('version', ''),
'latest': pkg.get('latest_version', ''),
'latest_filetype': pkg.get('latest_filetype', '')
}
return outdated
except subprocess.CalledProcessError as e:
print(f"Error running pip list --outdated: {e}", file=sys.stderr)
sys.exit(1)
except json.JSONDecodeError as e:
print(f"Error parsing pip outdated output: {e}", file=sys.stderr)
sys.exit(1)
def get_major_version(v: str) -> int:
"""Extract major version number from a version string."""
try:
parsed = version.parse(v)
if hasattr(parsed, 'major'):
return int(parsed.major)
parts = str(v).split('.')
if parts:
return int(parts[0])
except Exception:
pass
return 0
def is_more_than_two_majors_behind(installed_ver: str, latest_ver: str) -> bool:
"""Check if installed version is more than 2 major versions behind latest."""
try:
installed_major = get_major_version(installed_ver)
latest_major = get_major_version(latest_ver)
return (latest_major - installed_major) > 2
except Exception:
return False
def analyze_dependencies(
required_packages: List[str],
installed_packages: Dict[str, str],
outdated_packages: Dict[str, dict]
) -> Tuple[List[dict], List[str], List[dict]]:
"""Analyze dependency freshness."""
very_outdated = []
missing = []
outdated_but_not_critical = []
for pkg in required_packages:
if pkg not in installed_packages:
missing.append(pkg)
continue
installed_ver = installed_packages[pkg]
if pkg not in outdated_packages:
continue
latest_ver = outdated_packages[pkg]['latest']
if is_more_than_two_majors_behind(installed_ver, latest_ver):
very_outdated.append({
'package': pkg,
'installed': installed_ver,
'latest': latest_ver,
'major_diff': get_major_version(latest_ver) - get_major_version(installed_ver)
})
else:
outdated_but_not_critical.append({
'package': pkg,
'installed': installed_ver,
'latest': latest_ver,
'major_diff': get_major_version(latest_ver) - get_major_version(installed_ver)
})
return very_outdated, missing, outdated_but_not_critical
def generate_human_report(
very_outdated: List[dict],
missing: List[str],
outdated_but_not_critical: List[dict],
requirements_path: str
) -> str:
"""Generate a human-readable staleness report."""
lines = []
lines.append("=" * 60)
lines.append("DEPENDENCY FRESHNESS REPORT")
lines.append("=" * 60)
lines.append(f"Requirements file: {requirements_path}")
total = len(very_outdated) + len(missing) + len(outdated_but_not_critical)
lines.append(f"Total dependencies checked: {total}")
lines.append(f"Very outdated (>2 major versions behind): {len(very_outdated)}")
lines.append(f"Outdated but within 2 major versions: {len(outdated_but_not_critical)}")
lines.append(f"Missing (not installed): {len(missing)}")
lines.append("")
if very_outdated:
lines.append("!!! VERY OUTDATED PACKAGES (consider updating):")
lines.append("-" * 60)
for pkg_info in very_outdated:
lines.append(f" {pkg_info['package']}")
lines.append(f" Installed: {pkg_info['installed']}")
lines.append(f" Latest: {pkg_info['latest']}")
lines.append(f" Major diff: {pkg_info['major_diff']}")
lines.append("")
else:
lines.append("✓ No packages more than 2 major versions behind.")
lines.append("")
if outdated_but_not_critical:
lines.append(f"Outdated packages (within 2 major versions):")
lines.append("-" * 60)
for pkg_info in outdated_but_not_critical:
lines.append(f" {pkg_info['package']}: {pkg_info['installed']} -> {pkg_info['latest']} (major diff: {pkg_info['major_diff']})")
lines.append("")
if missing:
lines.append(f"Missing packages (not installed):")
lines.append("-" * 60)
for pkg in missing:
lines.append(f" {pkg}")
lines.append("")
lines.append("=" * 60)
lines.append("For full details, run: python3 -m pip list --outdated")
lines.append("=" * 60)
return "\n".join(lines)
def generate_json_report(
very_outdated: List[dict],
missing: List[str],
outdated_but_not_critical: List[dict],
requirements_path: str
) -> str:
"""Generate a JSON staleness report."""
report = {
'requirements_file': requirements_path,
'summary': {
'total_dependencies': len(very_outdated) + len(missing) + len(outdated_but_not_critical),
'very_outdated_count': len(very_outdated),
'outdated_within_threshold_count': len(outdated_but_not_critical),
'missing_count': len(missing)
},
'very_outdated': very_outdated,
'outdated_within_threshold': outdated_but_not_critical,
'missing': missing
}
return json.dumps(report, indent=2)
def main():
parser = argparse.ArgumentParser(
description='Check dependency freshness against PyPI latest versions.'
)
parser.add_argument(
'--requirements', '-r',
default='requirements.txt',
help='Path to requirements.txt file (default: requirements.txt)'
)
parser.add_argument(
'--json',
action='store_true',
help='Output report as JSON instead of human-readable text'
)
parser.add_argument(
'--output', '-o',
help='Optional output file for the report (default: stdout)'
)
args = parser.parse_args()
# Parse requirements
required_packages = parse_requirements(args.requirements)
if not required_packages:
print("No packages found in requirements file.", file=sys.stderr)
sys.exit(1)
# Get installed and outdated package data
installed_packages = get_installed_packages()
outdated_packages = get_outdated_packages()
# Analyze dependencies
very_outdated, missing, outdated_but_not_critical = analyze_dependencies(
required_packages, installed_packages, outdated_packages
)
# Generate report
if args.json:
report = generate_json_report(very_outdated, missing, outdated_but_not_critical, args.requirements)
else:
report = generate_human_report(very_outdated, missing, outdated_but_not_critical, args.requirements)
# Output report
if args.output:
with open(args.output, 'w') as f:
f.write(report + '\n')
else:
print(report)
# Exit code: 0 if no very outdated deps, 1 otherwise
exit_code = 1 if very_outdated else 0
sys.exit(exit_code)
if __name__ == '__main__':
main()

View File

@@ -1,268 +0,0 @@
#!/usr/bin/env python3
"""
entity_extractor.py — Extract named entities from text sources.
Extracts: people, projects, tools, concepts, repos from session transcripts,
README files, issue bodies, or any text input.
Output: knowledge/entities.json with deduplicated entity list and occurrence counts.
"""
import argparse
import json
import os
import sys
import time
from datetime import datetime, timezone
from pathlib import Path
from typing import Optional
SCRIPT_DIR = Path(__file__).parent.absolute()
sys.path.insert(0, str(SCRIPT_DIR))
from session_reader import read_session, messages_to_text
# --- Configuration ---
DEFAULT_API_BASE = os.environ.get("HARVESTER_API_BASE", "https://api.nousresearch.com/v1")
DEFAULT_API_KEY = os.environ.get("HARVESTER_API_KEY", "")
DEFAULT_MODEL = os.environ.get("HARVESTER_MODEL", "xiaomi/mimo-v2-pro")
KNOWLEDGE_DIR = os.environ.get("HARVESTER_KNOWLEDGE_DIR", "knowledge")
PROMPT_PATH = os.environ.get("ENTITY_PROMPT_PATH", str(SCRIPT_DIR.parent / "templates" / "entity-extraction-prompt.md"))
API_KEY_PATHS = [
os.path.expanduser("~/.config/nous/key"),
os.path.expanduser("~/.hermes/keymaxxing/active/minimax.key"),
os.path.expanduser("~/.config/openrouter/key"),
]
def find_api_key() -> str:
for path in API_KEY_PATHS:
if os.path.exists(path):
with open(path) as f:
key = f.read().strip()
if key:
return key
return ""
def load_prompt() -> str:
path = Path(PROMPT_PATH)
if not path.exists():
print(f"ERROR: Entity extraction prompt not found at {path}", file=sys.stderr)
sys.exit(1)
return path.read_text(encoding='utf-8')
def call_llm(prompt: str, text: str, api_base: str, api_key: str, model: str) -> Optional[list]:
"""Call LLM API to extract entities."""
import urllib.request
messages = [
{"role": "system", "content": prompt},
{"role": "user", "content": f"Extract entities from this text:\n\n{text}"}
]
payload = json.dumps({
"model": model,
"messages": messages,
"temperature": 0.0,
"max_tokens": 2048
}).encode('utf-8')
req = urllib.request.Request(
f"{api_base}/chat/completions",
data=payload,
headers={"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"},
method="POST"
)
try:
with urllib.request.urlopen(req, timeout=60) as resp:
result = json.loads(resp.read().decode('utf-8'))
content = result["choices"][0]["message"]["content"]
return parse_response(content)
except Exception as e:
print(f"ERROR: LLM call failed: {e}", file=sys.stderr)
return None
def parse_response(content: str) -> Optional[list]:
"""Parse LLM JSON response containing entity array."""
try:
data = json.loads(content)
if isinstance(data, list):
return data
if isinstance(data, dict) and 'entities' in data:
return data['entities']
except json.JSONDecodeError:
pass
import re
match = re.search(r'```(?:json)?\s*(\[.*?\])\s*```', content, re.DOTALL)
if match:
try:
data = json.loads(match.group(1))
if isinstance(data, list):
return data
except json.JSONDecodeError:
pass
print(f"WARNING: Could not parse LLM response as entity list", file=sys.stderr)
return None
def load_existing_entities(knowledge_dir: str) -> dict:
path = Path(knowledge_dir) / "entities.json"
if not path.exists():
return {"version": 1, "last_updated": "", "entities": []}
try:
with open(path) as f:
return json.load(f)
except (json.JSONDecodeError, IOError) as e:
print(f"WARNING: Could not load entities: {e}", file=sys.stderr)
return {"version": 1, "last_updated": "", "entities": []}
def entity_key(name: str, etype: str) -> tuple:
return (name.lower().strip(), etype.lower().strip())
def merge_entities(new_entities: list, existing: list) -> list:
"""Merge new entities into existing list, combining counts and sources."""
existing_by_key = {}
for e in existing:
key = entity_key(e.get('name',''), e.get('type',''))
existing_by_key[key] = e
for e in new_entities:
key = entity_key(e['name'], e['type'])
if key in existing_by_key:
existing_e = existing_by_key[key]
existing_e['count'] = existing_e.get('count', 1) + 1
# Merge sources
old_sources = set(existing_e.get('sources', []))
new_sources = set(e.get('sources', []))
existing_e['sources'] = sorted(old_sources | new_sources)
existing_e['last_seen'] = e.get('last_seen', existing_e.get('last_seen'))
else:
e['count'] = e.get('count', 1)
e.setdefault('sources', [])
e.setdefault('first_seen', datetime.now(timezone.utc).isoformat())
existing.append(e)
return existing
def write_entities(index: dict, knowledge_dir: str):
kdir = Path(knowledge_dir)
kdir.mkdir(parents=True, exist_ok=True)
index['last_updated'] = datetime.now(timezone.utc).isoformat()
path = kdir / "entities.json"
with open(path, 'w', encoding='utf-8') as f:
json.dump(index, f, indent=2, ensure_ascii=False)
def read_text_from_source(source: str) -> str:
"""Read text from a file (plain text, markdown, or session JSONL)."""
path = Path(source)
if not path.exists():
raise FileNotFoundError(source)
if path.suffix == '.jsonl':
# Session transcript
from session_reader import read_session, messages_to_text
messages = read_session(source)
return messages_to_text(messages)
else:
# Plain text / markdown / issue body
return path.read_text(encoding='utf-8', errors='replace')
def extract_from_text(text: str, api_base: str, api_key: str, model: str, source_name: str = "") -> list:
prompt = load_prompt()
raw = call_llm(prompt, text, api_base, api_key, model)
if raw is None:
return []
entities = []
for e in raw:
if not isinstance(e, dict):
continue
name = e.get('name', '').strip()
etype = e.get('type', '').strip().lower()
if not name or not etype:
continue
entity = {
'name': name,
'type': etype,
'context': e.get('context', '')[:200],
'last_seen': datetime.now(timezone.utc).isoformat(),
'sources': [source_name] if source_name else []
}
entities.append(entity)
return entities
def main():
parser = argparse.ArgumentParser(description="Extract named entities from text sources")
parser.add_argument('--file', help='Single file to process')
parser.add_argument('--dir', help='Directory of files to process')
parser.add_argument('--session', help='Single session JSONL file')
parser.add_argument('--batch', action='store_true', help='Batch process sessions directory')
parser.add_argument('--sessions-dir', default=os.path.expanduser('~/.hermes/sessions'),
help='Sessions directory for batch mode')
parser.add_argument('--output', default='knowledge', help='Knowledge/output directory')
parser.add_argument('--api-base', default=DEFAULT_API_BASE)
parser.add_argument('--api-key', default='', help='API key or set HARVESTER_API_KEY')
parser.add_argument('--model', default=DEFAULT_MODEL)
parser.add_argument('--dry-run', action='store_true', help='Preview without writing')
parser.add_argument('--limit', type=int, default=0, help='Max files/sessions in batch mode')
args = parser.parse_args()
api_key = args.api_key or DEFAULT_API_KEY or find_api_key()
if not api_key:
print("ERROR: No API key found", file=sys.stderr)
sys.exit(1)
knowledge_dir = args.output
if not os.path.isabs(knowledge_dir):
knowledge_dir = str(SCRIPT_DIR.parent / knowledge_dir)
sources = []
if args.file:
sources = [args.file]
elif args.dir:
files = sorted(Path(args.dir).rglob("*"))
sources = [str(f) for f in files if f.is_file() and f.suffix in ('.txt','.md','.json','.jsonl','.yaml','.yml')]
if args.limit > 0:
sources = sources[:args.limit]
elif args.session:
sources = [args.session]
elif args.batch:
sess_dir = Path(args.sessions_dir)
sources = sorted(sess_dir.glob("*.jsonl"), reverse=True)
if args.limit > 0:
sources = sources[:args.limit]
sources = [str(s) for s in sources]
else:
parser.print_help()
sys.exit(1)
print(f"Processing {len(sources)} sources...")
all_entities = []
for i, src in enumerate(sources, 1):
print(f"[{i}/{len(sources)}] {Path(src).name}...", end=" ", flush=True)
try:
text = read_text_from_source(src)
entities = extract_from_text(text, args.api_base, api_key, args.model, source_name=Path(src).name)
all_entities.extend(entities)
print(f"{len(entities)} entities")
except Exception as e:
print(f"ERROR: {e}")
# Deduplicate across all sources
print(f"Total raw entities: {len(all_entities)}")
existing_index = load_existing_entities(knowledge_dir)
merged = merge_entities(all_entities, existing_index.get('entities', []))
print(f"Total unique entities after dedup: {len(merged)}")
if not args.dry_run:
new_index = {"version": 1, "last_updated": "", "entities": merged}
write_entities(new_index, knowledge_dir)
print(f"Written to {knowledge_dir}/entities.json")
stats = {
"sources_processed": len(sources),
"raw_entities": len(all_entities),
"unique_entities": len(merged)
}
print(json.dumps(stats, indent=2))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,179 @@
#!/usr/bin/env python3
"""Tests for scripts/dependency_freshness.py — 9.7 Dependency Freshness."""
import json
import os
import sys
from unittest.mock import patch, MagicMock
# Import target module
sys.path.insert(0, os.path.dirname(__file__) or ".")
import importlib.util
spec = importlib.util.spec_from_file_location(
"dependency_freshness",
os.path.join(os.path.dirname(__file__) or ".", "dependency_freshness.py")
)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
parse_requirements = mod.parse_requirements
get_major_version = mod.get_major_version
is_more_than_two_majors_behind = mod.is_more_than_two_majors_behind
analyze_dependencies = mod.analyze_dependencies
def test_parse_requirements_simple():
"""Parse a simple package line."""
import tempfile
with tempfile.NamedTemporaryFile(mode='w', suffix='.txt', delete=False) as f:
f.write("requests\n")
tmp = f.name
try:
pkgs = parse_requirements(tmp)
assert pkgs == ["requests"], f"got {pkgs}"
print("PASS: test_parse_requirements_simple")
finally:
os.unlink(tmp)
def test_parse_requirements_with_specifiers():
"""Parse lines with version specifiers."""
import tempfile
with tempfile.NamedTemporaryFile(mode='w', suffix='.txt', delete=False) as f:
f.write("pytest>=8,<9\n")
f.write("aiohttp>=3.8\n")
tmp = f.name
try:
pkgs = parse_requirements(tmp)
assert pkgs == ["pytest", "aiohttp"], f"got {pkgs}"
print("PASS: test_parse_requirements_with_specifiers")
finally:
os.unlink(tmp)
def test_parse_requirements_ignores_comments_and_blanks():
"""Comments and blank lines are skipped."""
import tempfile
with tempfile.NamedTemporaryFile(mode='w', suffix='.txt', delete=False) as f:
f.write("# This is a comment\n")
f.write("\n")
f.write(" \n")
f.write("numpy\n")
f.write("# another comment\n")
tmp = f.name
try:
pkgs = parse_requirements(tmp)
assert pkgs == ["numpy"], f"got {pkgs}"
print("PASS: test_parse_requirements_ignores_comments_and_blanks")
finally:
os.unlink(tmp)
def test_get_major_version_normal():
"""Extract major version from typical semantic strings."""
assert get_major_version("1.2.3") == 1
assert get_major_version("3.4.5") == 3
assert get_major_version("0.11.0") == 0
print("PASS: test_get_major_version_normal")
def test_get_major_version_with_rc():
"""Prerelease versions still yield major number."""
assert get_major_version("2.0.0rc1") == 2
assert get_major_version("1.0.0a1") == 1
print("PASS: test_get_major_version_with_rc")
def test_is_more_than_two_majors_behind():
"""Difference >2 triggers True; <=2 triggers False."""
assert is_more_than_two_majors_behind("1.2.3", "4.0.0") is True
assert is_more_than_two_majors_behind("3.9.0", "4.0.0") is False
assert is_more_than_two_majors_behind("2.1.0", "5.2.0") is True
assert is_more_than_two_majors_behind("8.0.0", "9.0.0") is False
assert is_more_than_two_majors_behind("4.0.0", "4.0.0") is False
print("PASS: test_is_more_than_two_majors_behind")
def test_analyze_dependencies_very_outdated():
"""Flag packages more than 2 major versions behind."""
required = ["pkg_a", "pkg_b"]
installed = {"pkg_a": "1.0.0", "pkg_b": "3.5.2"}
outdated = {
"pkg_a": {"installed": "1.0.0", "latest": "4.0.0"},
"pkg_b": {"installed": "3.5.2", "latest": "4.0.0"},
}
very_out, missing, outdated_ok = analyze_dependencies(required, installed, outdated)
assert len(very_out) == 1 and very_out[0]["package"] == "pkg_a"
assert len(missing) == 0
assert len(outdated_ok) == 1 and outdated_ok[0]["package"] == "pkg_b"
print("PASS: test_analyze_dependencies_very_outdated")
def test_analyze_dependencies_missing():
"""Detect packages not installed at all."""
required = ["pkg_a", "pkg_missing"]
installed = {"pkg_a": "2.0.0"}
outdated = {"pkg_a": {"installed": "2.0.0", "latest": "3.0.0"}}
very_out, missing, outdated_ok = analyze_dependencies(required, installed, outdated)
assert "pkg_missing" in missing
assert len(very_out) == 0
assert len(outdated_ok) == 1
print("PASS: test_analyze_dependencies_missing")
def test_analyze_dependencies_up_to_date():
"""Packages up-to-date are not flagged."""
required = ["pkg_good"]
installed = {"pkg_good": "5.0.0"}
outdated = {}
very_out, missing, outdated_ok = analyze_dependencies(required, installed, outdated)
assert len(very_out) == 0
assert len(missing) == 0
assert len(outdated_ok) == 0
print("PASS: test_analyze_dependencies_up_to_date")
def test_generate_human_report_contains_very_outdated():
"""Human report includes very outdated packages."""
very_out = [
{"package": "oldpkg", "installed": "1.0", "latest": "4.0", "major_diff": 3}
]
missing = []
outdated_ok = []
report = mod.generate_human_report(very_out, missing, outdated_ok, "requirements.txt")
assert "oldpkg" in report
assert "Installed: 1.0" in report
assert "Latest: 4.0" in report
assert "Major diff: 3" in report
print("PASS: test_generate_human_report_contains_very_outdated")
def test_generate_json_report_structure():
"""JSON report contains required keys."""
very_out = [{"package": "oldpkg", "installed": "1.0", "latest": "4.0", "major_diff": 3}]
missing = ["missing_pkg"]
outdated_ok = []
report_json = mod.generate_json_report(very_out, missing, outdated_ok, "requirements.txt")
data = json.loads(report_json)
assert "summary" in data
assert data["summary"]["very_outdated_count"] == 1
assert data["summary"]["missing_count"] == 1
assert "very_outdated" in data
assert "missing" in data
print("PASS: test_generate_json_report_structure")
if __name__ == '__main__':
print("Running dependency_freshness test suite...")
test_parse_requirements_simple()
test_parse_requirements_with_specifiers()
test_parse_requirements_ignores_comments_and_blanks()
test_get_major_version_normal()
test_get_major_version_with_rc()
test_is_more_than_two_majors_behind()
test_analyze_dependencies_very_outdated()
test_analyze_dependencies_missing()
test_analyze_dependencies_up_to_date()
test_generate_human_report_contains_very_outdated()
test_generate_json_report_structure()
print("ALL TESTS PASSED.")

View File

@@ -1,116 +0,0 @@
#!/usr/bin/env python3
"""
Smoke test for entity_extractor pipeline — verifies:
- session/plain text reading
- mock LLM entity extraction
- deduplication and merging
- output file format
Does NOT call the real LLM.
"""
import json
import os
import tempfile
from unittest.mock import patch
import sys
from pathlib import Path
SCRIPT_DIR = Path(__file__).parent.absolute()
sys.path.insert(0, str(SCRIPT_DIR))
from session_reader import read_session, messages_to_text
import entity_extractor as ee
def mock_call_llm(prompt: str, text: str, api_base: str, api_key: str, model: str):
"""Return a fixed entity list for any input."""
return [
{"name": "Hermes", "type": "tool", "context": "Hermes agent uses the tools tool."},
{"name": "Gitea", "type": "tool", "context": "Gitea is a forge."},
{"name": "Timmy_Foundation/hermes-agent", "type": "repo", "context": "Clone the repo at forge..."},
]
def test_read_session_text():
with tempfile.NamedTemporaryFile(mode='w', suffix='.jsonl', delete=False) as f:
f.write('{"role": "user", "content": "Clone repo", "timestamp": "2026-04-13T10:00:00Z"}\n')
f.write('{"role": "assistant", "content": "Done", "timestamp": "2026-04-13T10:00:05Z"}\n')
path = f.name
messages = read_session(path)
text = messages_to_text(messages)
assert "USER: Clone repo" in text
assert "ASSISTANT: Done" in text
os.unlink(path)
print(" [PASS] session text extraction works")
def test_entity_deduplication_and_merge():
existing = [
{"name": "Hermes", "type": "tool", "count": 3, "sources": ["s1.jsonl"]}
]
new = [
{"name": "Hermes", "type": "tool", "sources": ["s2.jsonl"]},
{"name": "Gitea", "type": "tool", "sources": ["s2.jsonl"]},
]
merged = ee.merge_entities(new, existing.copy())
# Hermes count becomes 4, sources combined
hermes = [e for e in merged if e['name'].lower() == 'hermes'][0]
assert hermes['count'] == 4
assert set(hermes['sources']) == {'s1.jsonl', 's2.jsonl'}
# Gitea new entry
gitea = [e for e in merged if e['name'].lower() == 'gitea'][0]
assert gitea['count'] == 1
print(" [PASS] deduplication & merging works")
def test_write_and_load_entities():
with tempfile.TemporaryDirectory() as tmp:
kdir = Path(tmp) / "knowledge"
kdir.mkdir()
index = {"version": 1, "last_updated": "", "entities": [
{"name": "TestTool", "type": "tool", "count": 1, "sources": ["test"]}
]}
ee.write_entities(index, str(kdir))
# load back
loaded = ee.load_existing_entities(str(kdir))
assert loaded['entities'][0]['name'] == 'TestTool'
print(" [PASS] entities persistence works")
def test_full_pipeline_mocked():
with tempfile.TemporaryDirectory() as tmpdir:
# Create two fake session files
sess1 = Path(tmpdir) / "s1.jsonl"
sess1.write_text('{"role":"user","content":"Use Hermes to clone","timestamp":"..."}\n')
sess2 = Path(tmpdir) / "s2.jsonl"
sess2.write_text('{"role":"user","content":"Deploy with Gitea","timestamp":"..."}\n')
knowledge_dir = Path(tmpdir) / "knowledge"
knowledge_dir.mkdir()
# Patch call_llm
with patch('entity_extractor.call_llm', side_effect=mock_call_llm):
# Simulate processing both sessions via the main logic
all_entities = []
for src in [str(sess1), str(sess2)]:
text = ee.read_text_from_source(src)
ents = ee.extract_from_text(text, "http://api", "fake-key", "model", source_name=Path(src).name)
all_entities.extend(ents)
# Merge into empty index
merged = ee.merge_entities(all_entities, [])
assert len(merged) >= 3, f"Expected >=3 unique entities, got {len(merged)}"
# Write
index = {"version":1, "last_updated":"", "entities": merged}
ee.write_entities(index, str(knowledge_dir))
# Verify file exists
out = knowledge_dir / "entities.json"
assert out.exists()
data = json.loads(out.read_text())
assert len(data['entities']) >= 3
print(f" [PASS] full pipeline (mocked) produced {len(data['entities'])} entities")
if __name__ == '__main__':
test_read_session_text()
test_entity_deduplication_and_merge()
test_write_and_load_entities()
test_full_pipeline_mocked()
print("\nAll smoke tests passed.")

View File

@@ -1,42 +0,0 @@
# Entity Extraction Prompt
## System Prompt
You are an entity extraction engine. You read text and output ONLY a JSON array of named entities. You do not infer. You extract only what the text explicitly mentions.
## Task
Extract all named entities from the provided text. Categorize each entity into exactly one of these types:
- `person` — individual's name (e.g., Alexander, Rockachopa, Allegro)
- `project` — software project or component name (e.g., The Nexus, Timmy Home, compounding-intelligence)
- `tool` — software tool, command, library, framework (e.g., git, Docker, PyTorch, Hermes)
- `concept` — abstract idea, methodology, paradigm (e.g., compounding intelligence, bootstrap, harvester)
- `repo` — repository reference in the form `owner/repo` or URL pointing to a repo
## Rules
1. Extract ONLY names that appear explicitly in the text.
2. Do NOT infer, assume, or hallucinate.
3. Each entity must have: `name` (exact string), `type` (one of the five above), and `context` (short snippet showing usage, 1-2 sentences).
4. The same entity mentioned multiple times should appear only ONCE in the output (deduplicate by name+type).
5. For `repo` type, match patterns like `owner/repo`, `github.com/owner/repo`, `forge.alexanderwhitestone.com/owner/repo`.
6. For `tool` type, include commands (git, pytest), platforms (Linux, macOS), runtimes (Python, Node.js), and CLI utilities.
7. For `person` type, look for capitalized full names, or single names used in personal attribution ("asked Alex", "for Alexander").
8. For `concept`, include technical terms that represent an idea rather than a concrete thing.
## Output Format
Return ONLY valid JSON, no markdown, no explanation. Array of objects:
```json
[
{
"name": "Hermes",
"type": "tool",
"context": "Hermes agent uses the tools tool to execute commands."
},
{
"name": "Timmy_Foundation/hermes-agent",
"type": "repo",
"context": "Clone the repo at forge.../Timmy_Foundation/hermes-agent"
}
]
```
## Text to extract from:
{{text}}

View File

@@ -1,82 +0,0 @@
"""
Test suite for entity_extractor.py (Issue #144).
Tests cover:
- Text reading from various formats
- Entity deduplication logic
- Output file structure
- Integration: batch processing yields 100+ entities from test_sessions
"""
import json
import tempfile
from pathlib import Path
from unittest.mock import patch, MagicMock
# We'll test the pure functions directly; avoid hitting real LLM in unit tests
import sys
sys.path.insert(0, str(Path(__file__).resolve().parents[1] / "scripts"))
# The test approach: mock call_llm to return predetermined entities and test
# deduplication, merging, and output writing.
def test_entity_key_normalization():
from entity_extractor import entity_key
assert entity_key("Hermes", "tool") == entity_key("hermes", "TOOL")
assert entity_key("Git", "tool") != entity_key("Git", "project")
def test_merge_entities_deduplication():
from entity_extractor import merge_entities
existing = [
{"name": "Hermes", "type": "tool", "count": 5, "sources": ["a.jsonl"]}
]
new = [
{"name": "Hermes", "type": "tool", "sources": ["b.jsonl"]},
{"name": "Gitea", "type": "tool", "sources": ["b.jsonl"]}
]
merged = merge_entities(new, existing.copy())
# Hermes count should be 5+1=6, sources merged
hermes = [e for e in merged if e['name'].lower()=='hermes'][0]
assert hermes['count'] == 6
assert set(hermes['sources']) == {"a.jsonl", "b.jsonl"}
# Gitea added fresh
gitea = [e for e in merged if e['name'].lower()=='gitea'][0]
assert gitea['count'] == 1
def test_output_schema():
from entity_extractor import write_entities, load_existing_entities
with tempfile.TemporaryDirectory() as tmp:
kdir = Path(tmp) / "knowledge"
kdir.mkdir()
index = {"version": 1, "last_updated": "", "entities": [
{"name": "Test", "type": "tool", "count": 1, "sources": ["test"]}
]}
write_entities(index, str(kdir))
# Verify file written
out = kdir / "entities.json"
assert out.exists()
data = json.loads(out.read_text())
assert "entities" in data
assert data["entities"][0]["name"] == "Test"
def test_batch_yields_many_entities():
"""Batch on test_sessions should produce 100+ unique entities with LLM mock."""
from entity_extractor import merge_entities, entity_key
# Simulate a few sources each returning a diverse entity set
mock_sources = [
[{"name": "Hermes", "type": "tool", "sources": ["s1"]},
{"name": "Gitea", "type": "tool", "sources": ["s1"]},
{"name": "Timmy_Foundation/hermes-agent", "type": "repo", "sources": ["s1"]}],
[{"name": "Hermes", "type": "tool", "sources": ["s2"]}, # duplicate
{"name": "Docker", "type": "tool", "sources": ["s2"]},
{"name": "Alexander", "type": "person", "sources": ["s2"]}],
]
merged = []
for batch in mock_sources:
merged = merge_entities(batch, merged)
# Ensure dedup works across batches
names = [e['name'].lower() for e in merged]
assert names.count('hermes') == 1
assert len(merged) == 4 # Hermes, Gitea, repo, Docker, Alexander
# The real LLM extraction test would require live API key; skip in CI