Compare commits
1 Commits
step35/144
...
step35/173
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2f57c2b653 |
@@ -1,268 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
entity_extractor.py — Extract named entities from text sources.
|
||||
|
||||
Extracts: people, projects, tools, concepts, repos from session transcripts,
|
||||
README files, issue bodies, or any text input.
|
||||
|
||||
Output: knowledge/entities.json with deduplicated entity list and occurrence counts.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
SCRIPT_DIR = Path(__file__).parent.absolute()
|
||||
sys.path.insert(0, str(SCRIPT_DIR))
|
||||
|
||||
from session_reader import read_session, messages_to_text
|
||||
|
||||
# --- Configuration ---
|
||||
DEFAULT_API_BASE = os.environ.get("HARVESTER_API_BASE", "https://api.nousresearch.com/v1")
|
||||
DEFAULT_API_KEY = os.environ.get("HARVESTER_API_KEY", "")
|
||||
DEFAULT_MODEL = os.environ.get("HARVESTER_MODEL", "xiaomi/mimo-v2-pro")
|
||||
KNOWLEDGE_DIR = os.environ.get("HARVESTER_KNOWLEDGE_DIR", "knowledge")
|
||||
PROMPT_PATH = os.environ.get("ENTITY_PROMPT_PATH", str(SCRIPT_DIR.parent / "templates" / "entity-extraction-prompt.md"))
|
||||
|
||||
API_KEY_PATHS = [
|
||||
os.path.expanduser("~/.config/nous/key"),
|
||||
os.path.expanduser("~/.hermes/keymaxxing/active/minimax.key"),
|
||||
os.path.expanduser("~/.config/openrouter/key"),
|
||||
]
|
||||
|
||||
def find_api_key() -> str:
|
||||
for path in API_KEY_PATHS:
|
||||
if os.path.exists(path):
|
||||
with open(path) as f:
|
||||
key = f.read().strip()
|
||||
if key:
|
||||
return key
|
||||
return ""
|
||||
|
||||
def load_prompt() -> str:
|
||||
path = Path(PROMPT_PATH)
|
||||
if not path.exists():
|
||||
print(f"ERROR: Entity extraction prompt not found at {path}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
return path.read_text(encoding='utf-8')
|
||||
|
||||
def call_llm(prompt: str, text: str, api_base: str, api_key: str, model: str) -> Optional[list]:
|
||||
"""Call LLM API to extract entities."""
|
||||
import urllib.request
|
||||
|
||||
messages = [
|
||||
{"role": "system", "content": prompt},
|
||||
{"role": "user", "content": f"Extract entities from this text:\n\n{text}"}
|
||||
]
|
||||
|
||||
payload = json.dumps({
|
||||
"model": model,
|
||||
"messages": messages,
|
||||
"temperature": 0.0,
|
||||
"max_tokens": 2048
|
||||
}).encode('utf-8')
|
||||
|
||||
req = urllib.request.Request(
|
||||
f"{api_base}/chat/completions",
|
||||
data=payload,
|
||||
headers={"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"},
|
||||
method="POST"
|
||||
)
|
||||
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=60) as resp:
|
||||
result = json.loads(resp.read().decode('utf-8'))
|
||||
content = result["choices"][0]["message"]["content"]
|
||||
return parse_response(content)
|
||||
except Exception as e:
|
||||
print(f"ERROR: LLM call failed: {e}", file=sys.stderr)
|
||||
return None
|
||||
|
||||
def parse_response(content: str) -> Optional[list]:
|
||||
"""Parse LLM JSON response containing entity array."""
|
||||
try:
|
||||
data = json.loads(content)
|
||||
if isinstance(data, list):
|
||||
return data
|
||||
if isinstance(data, dict) and 'entities' in data:
|
||||
return data['entities']
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
import re
|
||||
match = re.search(r'```(?:json)?\s*(\[.*?\])\s*```', content, re.DOTALL)
|
||||
if match:
|
||||
try:
|
||||
data = json.loads(match.group(1))
|
||||
if isinstance(data, list):
|
||||
return data
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
print(f"WARNING: Could not parse LLM response as entity list", file=sys.stderr)
|
||||
return None
|
||||
|
||||
def load_existing_entities(knowledge_dir: str) -> dict:
|
||||
path = Path(knowledge_dir) / "entities.json"
|
||||
if not path.exists():
|
||||
return {"version": 1, "last_updated": "", "entities": []}
|
||||
try:
|
||||
with open(path) as f:
|
||||
return json.load(f)
|
||||
except (json.JSONDecodeError, IOError) as e:
|
||||
print(f"WARNING: Could not load entities: {e}", file=sys.stderr)
|
||||
return {"version": 1, "last_updated": "", "entities": []}
|
||||
|
||||
def entity_key(name: str, etype: str) -> tuple:
|
||||
return (name.lower().strip(), etype.lower().strip())
|
||||
|
||||
def merge_entities(new_entities: list, existing: list) -> list:
|
||||
"""Merge new entities into existing list, combining counts and sources."""
|
||||
existing_by_key = {}
|
||||
for e in existing:
|
||||
key = entity_key(e.get('name',''), e.get('type',''))
|
||||
existing_by_key[key] = e
|
||||
|
||||
for e in new_entities:
|
||||
key = entity_key(e['name'], e['type'])
|
||||
if key in existing_by_key:
|
||||
existing_e = existing_by_key[key]
|
||||
existing_e['count'] = existing_e.get('count', 1) + 1
|
||||
# Merge sources
|
||||
old_sources = set(existing_e.get('sources', []))
|
||||
new_sources = set(e.get('sources', []))
|
||||
existing_e['sources'] = sorted(old_sources | new_sources)
|
||||
existing_e['last_seen'] = e.get('last_seen', existing_e.get('last_seen'))
|
||||
else:
|
||||
e['count'] = e.get('count', 1)
|
||||
e.setdefault('sources', [])
|
||||
e.setdefault('first_seen', datetime.now(timezone.utc).isoformat())
|
||||
existing.append(e)
|
||||
|
||||
return existing
|
||||
|
||||
def write_entities(index: dict, knowledge_dir: str):
|
||||
kdir = Path(knowledge_dir)
|
||||
kdir.mkdir(parents=True, exist_ok=True)
|
||||
index['last_updated'] = datetime.now(timezone.utc).isoformat()
|
||||
path = kdir / "entities.json"
|
||||
with open(path, 'w', encoding='utf-8') as f:
|
||||
json.dump(index, f, indent=2, ensure_ascii=False)
|
||||
|
||||
def read_text_from_source(source: str) -> str:
|
||||
"""Read text from a file (plain text, markdown, or session JSONL)."""
|
||||
path = Path(source)
|
||||
if not path.exists():
|
||||
raise FileNotFoundError(source)
|
||||
if path.suffix == '.jsonl':
|
||||
# Session transcript
|
||||
from session_reader import read_session, messages_to_text
|
||||
messages = read_session(source)
|
||||
return messages_to_text(messages)
|
||||
else:
|
||||
# Plain text / markdown / issue body
|
||||
return path.read_text(encoding='utf-8', errors='replace')
|
||||
|
||||
def extract_from_text(text: str, api_base: str, api_key: str, model: str, source_name: str = "") -> list:
|
||||
prompt = load_prompt()
|
||||
raw = call_llm(prompt, text, api_base, api_key, model)
|
||||
if raw is None:
|
||||
return []
|
||||
entities = []
|
||||
for e in raw:
|
||||
if not isinstance(e, dict):
|
||||
continue
|
||||
name = e.get('name', '').strip()
|
||||
etype = e.get('type', '').strip().lower()
|
||||
if not name or not etype:
|
||||
continue
|
||||
entity = {
|
||||
'name': name,
|
||||
'type': etype,
|
||||
'context': e.get('context', '')[:200],
|
||||
'last_seen': datetime.now(timezone.utc).isoformat(),
|
||||
'sources': [source_name] if source_name else []
|
||||
}
|
||||
entities.append(entity)
|
||||
return entities
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Extract named entities from text sources")
|
||||
parser.add_argument('--file', help='Single file to process')
|
||||
parser.add_argument('--dir', help='Directory of files to process')
|
||||
parser.add_argument('--session', help='Single session JSONL file')
|
||||
parser.add_argument('--batch', action='store_true', help='Batch process sessions directory')
|
||||
parser.add_argument('--sessions-dir', default=os.path.expanduser('~/.hermes/sessions'),
|
||||
help='Sessions directory for batch mode')
|
||||
parser.add_argument('--output', default='knowledge', help='Knowledge/output directory')
|
||||
parser.add_argument('--api-base', default=DEFAULT_API_BASE)
|
||||
parser.add_argument('--api-key', default='', help='API key or set HARVESTER_API_KEY')
|
||||
parser.add_argument('--model', default=DEFAULT_MODEL)
|
||||
parser.add_argument('--dry-run', action='store_true', help='Preview without writing')
|
||||
parser.add_argument('--limit', type=int, default=0, help='Max files/sessions in batch mode')
|
||||
args = parser.parse_args()
|
||||
|
||||
api_key = args.api_key or DEFAULT_API_KEY or find_api_key()
|
||||
if not api_key:
|
||||
print("ERROR: No API key found", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
knowledge_dir = args.output
|
||||
if not os.path.isabs(knowledge_dir):
|
||||
knowledge_dir = str(SCRIPT_DIR.parent / knowledge_dir)
|
||||
|
||||
sources = []
|
||||
if args.file:
|
||||
sources = [args.file]
|
||||
elif args.dir:
|
||||
files = sorted(Path(args.dir).rglob("*"))
|
||||
sources = [str(f) for f in files if f.is_file() and f.suffix in ('.txt','.md','.json','.jsonl','.yaml','.yml')]
|
||||
if args.limit > 0:
|
||||
sources = sources[:args.limit]
|
||||
elif args.session:
|
||||
sources = [args.session]
|
||||
elif args.batch:
|
||||
sess_dir = Path(args.sessions_dir)
|
||||
sources = sorted(sess_dir.glob("*.jsonl"), reverse=True)
|
||||
if args.limit > 0:
|
||||
sources = sources[:args.limit]
|
||||
sources = [str(s) for s in sources]
|
||||
else:
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
print(f"Processing {len(sources)} sources...")
|
||||
all_entities = []
|
||||
for i, src in enumerate(sources, 1):
|
||||
print(f"[{i}/{len(sources)}] {Path(src).name}...", end=" ", flush=True)
|
||||
try:
|
||||
text = read_text_from_source(src)
|
||||
entities = extract_from_text(text, args.api_base, api_key, args.model, source_name=Path(src).name)
|
||||
all_entities.extend(entities)
|
||||
print(f"→ {len(entities)} entities")
|
||||
except Exception as e:
|
||||
print(f"ERROR: {e}")
|
||||
|
||||
# Deduplicate across all sources
|
||||
print(f"Total raw entities: {len(all_entities)}")
|
||||
existing_index = load_existing_entities(knowledge_dir)
|
||||
merged = merge_entities(all_entities, existing_index.get('entities', []))
|
||||
print(f"Total unique entities after dedup: {len(merged)}")
|
||||
|
||||
if not args.dry_run:
|
||||
new_index = {"version": 1, "last_updated": "", "entities": merged}
|
||||
write_entities(new_index, knowledge_dir)
|
||||
print(f"Written to {knowledge_dir}/entities.json")
|
||||
|
||||
stats = {
|
||||
"sources_processed": len(sources),
|
||||
"raw_entities": len(all_entities),
|
||||
"unique_entities": len(merged)
|
||||
}
|
||||
print(json.dumps(stats, indent=2))
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
477
scripts/progress_tracker.py
Normal file
477
scripts/progress_tracker.py
Normal file
@@ -0,0 +1,477 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Progress Tracker — Pipeline 10.8
|
||||
Track improvement metrics over time. Are we getting better?
|
||||
|
||||
Metrics tracked:
|
||||
1. Test coverage — % of Python functions with associated tests (test:source file ratio + line coverage if available)
|
||||
2. Doc coverage — % of Python callables with docstrings (AST-based)
|
||||
3. Issue close rate — closed / (opened + closed) per week (Gitea API)
|
||||
4. Dep freshness — % of requirements pinned vs outdated (pip list --outdated)
|
||||
|
||||
Output:
|
||||
- metrics/snapshots/YYYY-MM-DD.json — one snapshot per run
|
||||
- metrics/TRENDS.md — cumulative markdown table
|
||||
- stdout summary
|
||||
|
||||
Usage:
|
||||
python3 scripts/progress_tracker.py
|
||||
python3 scripts/progress_tracker.py --json
|
||||
python3 scripts/progress_tracker.py --output metrics/TRENDS.md
|
||||
|
||||
Weekly cron:
|
||||
0 9 * * 1 cd /path/to/compounding-intelligence && python3 scripts/progress_tracker.py
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from datetime import datetime, timezone, timedelta
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
# ── Configuration ──────────────────────────────────────────────────────────
|
||||
|
||||
SCRIPT_DIR = Path(__file__).resolve().parent
|
||||
REPO_ROOT = SCRIPT_DIR.parent
|
||||
METRICS_DIR = REPO_ROOT / "metrics"
|
||||
SNAPSHOTS_DIR = METRICS_DIR / "snapshots"
|
||||
TOKEN_PATH = Path.home() / ".config" / "gitea" / "token"
|
||||
GITEA_API_BASE = "https://forge.alexanderwhitestone.com/api/v1"
|
||||
ORG = "Timmy_Foundation"
|
||||
|
||||
# Ensure paths exist
|
||||
SNAPSHOTS_DIR.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
# ── Helpers ─────────────────────────────────────────────────────────────────
|
||||
|
||||
def run_cmd(cmd: List[str], cwd: Path = REPO_ROOT) -> str:
|
||||
"""Run a shell command and return stdout (stderr merged)."""
|
||||
result = subprocess.run(
|
||||
cmd, capture_output=True, text=True, cwd=cwd, timeout=30
|
||||
)
|
||||
if result.returncode != 0:
|
||||
return ""
|
||||
return result.stdout.strip()
|
||||
|
||||
|
||||
def slugify_date(dt: datetime) -> str:
|
||||
return dt.strftime("%Y-%m-%d")
|
||||
|
||||
|
||||
def snapshot_path(dt: datetime) -> Path:
|
||||
return SNAPSHOTS_DIR / f"{slugify_date(dt)}.json"
|
||||
|
||||
|
||||
def load_snapshots() -> List[Dict[str, Any]]:
|
||||
"""Load all existing snapshots sorted by date."""
|
||||
snapshots = []
|
||||
for f in sorted(SNAPSHOTS_DIR.glob("*.json")):
|
||||
try:
|
||||
with open(f) as fp:
|
||||
snapshots.append(json.load(fp))
|
||||
except Exception:
|
||||
continue
|
||||
return snapshots
|
||||
|
||||
|
||||
# ── Metric 1: Test Coverage ─────────────────────────────────────────────────
|
||||
|
||||
def collect_test_coverage() -> Dict[str, Any]:
|
||||
"""
|
||||
Compute test coverage metrics.
|
||||
Counts test_*.py and *_test.py files vs non-test .py source files.
|
||||
Also attempts to read .coverage if present.
|
||||
"""
|
||||
all_py = list(REPO_ROOT.rglob("*.py"))
|
||||
|
||||
source_files = []
|
||||
test_files = []
|
||||
|
||||
for p in all_py:
|
||||
try:
|
||||
rel_parts = p.relative_to(REPO_ROOT).parts
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
# Skip hidden/cache/temp dirs (check only relative parts)
|
||||
if any(part.startswith('.') or part.startswith('__') for part in rel_parts):
|
||||
continue
|
||||
if any(part in ('node_modules', 'venv', '.venv', 'env', '.pytest_cache') for part in rel_parts):
|
||||
continue
|
||||
|
||||
if p.name.startswith("test_") or p.name.endswith("_test.py"):
|
||||
test_files.append(p)
|
||||
else:
|
||||
source_files.append(p)
|
||||
|
||||
# Try to get line coverage from .coverage
|
||||
coverage_percent = None
|
||||
coverage_tool = None
|
||||
coverage_file = REPO_ROOT / ".coverage"
|
||||
if coverage_file.exists():
|
||||
try:
|
||||
import coverage # type: ignore
|
||||
# Use coverage API if available
|
||||
cov = coverage.Coverage(data_file=str(coverage_file))
|
||||
cov.load()
|
||||
total = cov.report()
|
||||
coverage_percent = total if isinstance(total, float) else None
|
||||
coverage_tool = "coverage"
|
||||
except Exception:
|
||||
# Fallback: parse `coverage report` output
|
||||
out = run_cmd(["coverage", "report", "--skip-empty"])
|
||||
if out:
|
||||
for line in out.splitlines():
|
||||
if "TOTAL" in line:
|
||||
parts = line.split()
|
||||
if len(parts) >= 2:
|
||||
try:
|
||||
coverage_percent = float(parts[-1].rstrip('%'))
|
||||
coverage_tool = "coverage"
|
||||
break
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
return {
|
||||
"test_files": len(test_files),
|
||||
"source_files": len(source_files),
|
||||
"test_to_source_ratio": round(len(test_files) / len(source_files), 4) if source_files else 0.0,
|
||||
"coverage_tool": coverage_tool,
|
||||
"coverage_percent": coverage_percent,
|
||||
}
|
||||
|
||||
|
||||
# ── Metric 2: Doc Coverage ──────────────────────────────────────────────────
|
||||
|
||||
def collect_doc_coverage() -> Dict[str, Any]:
|
||||
"""
|
||||
Check AST of Python files for docstrings.
|
||||
Returns: callables_total, callables_with_doc, doc_coverage_percent
|
||||
"""
|
||||
import ast
|
||||
|
||||
all_py = list(REPO_ROOT.rglob("*.py"))
|
||||
|
||||
source_files = []
|
||||
test_files = []
|
||||
|
||||
for p in all_py:
|
||||
try:
|
||||
rel_parts = p.relative_to(REPO_ROOT).parts
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
if any(part.startswith('.') or part.startswith('__') for part in rel_parts):
|
||||
continue
|
||||
if any(part in ('node_modules', 'venv', '.venv', 'env', '.pytest_cache') for part in rel_parts):
|
||||
continue
|
||||
|
||||
if p.name.startswith("test_") or p.name.endswith("_test.py"):
|
||||
test_files.append(p)
|
||||
else:
|
||||
source_files.append(p)
|
||||
|
||||
total_callables = 0
|
||||
with_doc = 0
|
||||
|
||||
for p in source_files + test_files:
|
||||
try:
|
||||
with open(p) as f:
|
||||
tree = ast.parse(f.read(), filename=str(p))
|
||||
for node in ast.walk(tree):
|
||||
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
|
||||
total_callables += 1
|
||||
doc = ast.get_docstring(node)
|
||||
if doc and doc.strip():
|
||||
with_doc += 1
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
return {
|
||||
"callables_total": total_callables,
|
||||
"callables_with_doc": with_doc,
|
||||
"doc_coverage_percent": round((with_doc / total_callables * 100) if total_callables else 0.0, 2),
|
||||
}
|
||||
|
||||
|
||||
# ── Metric 3: Issue Close Rate ──────────────────────────────────────────────
|
||||
|
||||
def collect_issue_metrics() -> Dict[str, Any]:
|
||||
"""
|
||||
Use Gitea API to get issue open/close stats for the last 7 days.
|
||||
Returns counts and close rate.
|
||||
"""
|
||||
token = ""
|
||||
if TOKEN_PATH.exists():
|
||||
token = TOKEN_PATH.read_text().strip()
|
||||
|
||||
if not token:
|
||||
return {
|
||||
"opened_last_7d": None,
|
||||
"closed_last_7d": None,
|
||||
"close_rate": None,
|
||||
"total_open": None,
|
||||
"note": "Gitea token not available"
|
||||
}
|
||||
|
||||
try:
|
||||
from urllib.request import Request, urlopen
|
||||
from urllib.error import HTTPError, URLError
|
||||
except ImportError:
|
||||
return {"error": "urllib not available"}
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
week_ago = now - timedelta(days=7)
|
||||
since = week_ago.strftime("%Y-%m-%d")
|
||||
|
||||
headers = {"Authorization": f"token {token}"}
|
||||
base_url = f"{GITEA_API_BASE}/repos/{ORG}/compounding-intelligence/issues"
|
||||
|
||||
try:
|
||||
# Get issues from last 7 days
|
||||
url = f"{base_url}?state=all&since={since}&per_page=100"
|
||||
req = Request(url, headers=headers)
|
||||
with urlopen(req, timeout=15) as resp:
|
||||
issues = json.loads(resp.read())
|
||||
|
||||
opened = 0
|
||||
closed = 0
|
||||
for issue in issues:
|
||||
created = datetime.fromisoformat(issue["created_at"].replace("Z", "+00:00"))
|
||||
if created >= week_ago:
|
||||
opened += 1
|
||||
if issue.get("state") == "closed":
|
||||
closed_at_str = issue.get("closed_at")
|
||||
if closed_at_str:
|
||||
closed_at = datetime.fromisoformat(closed_at_str.replace("Z", "+00:00"))
|
||||
if closed_at >= week_ago:
|
||||
closed += 1
|
||||
|
||||
# Total open issues
|
||||
req2 = Request(f"{base_url}?state=open&per_page=1", headers=headers)
|
||||
with urlopen(req2, timeout=15) as resp:
|
||||
total_open = int(resp.headers.get("X-Total-Count", "0"))
|
||||
|
||||
total = opened + closed
|
||||
close_rate = closed / total if total > 0 else 0.0
|
||||
|
||||
return {
|
||||
"opened_last_7d": opened,
|
||||
"closed_last_7d": closed,
|
||||
"close_rate": round(close_rate, 4),
|
||||
"total_open": total_open,
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
"opened_last_7d": None,
|
||||
"closed_last_7d": None,
|
||||
"close_rate": None,
|
||||
"total_open": None,
|
||||
"error": str(e)[:100],
|
||||
"note": "Gitea API unavailable"
|
||||
}
|
||||
|
||||
|
||||
# ── Metric 4: Dependency Freshness ─────────────────────────────────────────
|
||||
|
||||
def collect_dep_freshness() -> Dict[str, Any]:
|
||||
"""
|
||||
Check requirements.txt for outdated dependencies using pip list --outdated.
|
||||
Returns freshness percentage and outdated list.
|
||||
"""
|
||||
req_file = REPO_ROOT / "requirements.txt"
|
||||
if not req_file.exists():
|
||||
return {
|
||||
"total_deps": 0,
|
||||
"outdated_deps": 0,
|
||||
"freshness_percent": 100.0,
|
||||
"outdated_list": [],
|
||||
"note": "requirements.txt not found"
|
||||
}
|
||||
|
||||
# Parse requirements (very simple: take name before comparison op)
|
||||
reqs = []
|
||||
with open(req_file) as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if not line or line.startswith("#"):
|
||||
continue
|
||||
m = re.match(r"^([a-zA-Z0-9_.-]+)", line)
|
||||
if m:
|
||||
reqs.append(m.group(1))
|
||||
|
||||
if not reqs:
|
||||
return {"total_deps": 0, "outdated_deps": 0, "freshness_percent": 100.0, "outdated_list": []}
|
||||
|
||||
# Query pip for outdated packages (may fail if pip not available)
|
||||
outdated_names = set()
|
||||
try:
|
||||
out = run_cmd(["pip", "list", "--outdated", "--format=json"])
|
||||
if out:
|
||||
data = json.loads(out)
|
||||
outdated_names = {item["name"].lower() for item in data}
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
outdated = [p for p in reqs if p.lower() in outdated_names]
|
||||
total = len(reqs)
|
||||
outdated_count = len(outdated)
|
||||
freshness = round(((total - outdated_count) / total * 100) if total else 100.0, 1)
|
||||
|
||||
return {
|
||||
"total_deps": total,
|
||||
"outdated_deps": outdated_count,
|
||||
"freshness_percent": freshness,
|
||||
"outdated_list": outdated,
|
||||
}
|
||||
|
||||
|
||||
# ── Snapshot & Trends ───────────────────────────────────────────────────────
|
||||
|
||||
def take_snapshot() -> Dict[str, Any]:
|
||||
"""Collect all metrics and return a snapshot dict."""
|
||||
now = datetime.now(timezone.utc)
|
||||
test_cov = collect_test_coverage()
|
||||
doc_cov = collect_doc_coverage()
|
||||
issues = collect_issue_metrics()
|
||||
deps = collect_dep_freshness()
|
||||
|
||||
return {
|
||||
"timestamp": now.isoformat(),
|
||||
"date": slugify_date(now),
|
||||
"metrics": {
|
||||
"test_coverage": test_cov,
|
||||
"doc_coverage": doc_cov,
|
||||
"issues": issues,
|
||||
"dependencies": deps,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def save_snapshot(snapshot: Dict[str, Any]) -> Path:
|
||||
path = snapshot_path(datetime.fromisoformat(snapshot["timestamp"]))
|
||||
with open(path, "w") as f:
|
||||
json.dump(snapshot, f, indent=2)
|
||||
return path
|
||||
|
||||
|
||||
def generate_trends(snapshots: List[Dict[str, Any]], output_path: Optional[Path] = None) -> str:
|
||||
"""Generate markdown trends table; optionally write to file."""
|
||||
if not snapshots:
|
||||
msg = "# Progress Tracker — Trends\n\nNo snapshots yet. Run `progress_tracker.py` to create the first snapshot."
|
||||
if output_path:
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
output_path.write_text(msg)
|
||||
return msg
|
||||
|
||||
lines = [
|
||||
"# Progress Tracker — Trends",
|
||||
f"\nLast updated: {datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M UTC')}",
|
||||
f"\nSnapshots: {len(snapshots)}\n",
|
||||
"| Date | Test Files → Source | Doc Coverage | Issues Closed/Opened (7d) | Dep Freshness |",
|
||||
"|------|---------------------|--------------|---------------------------|---------------|",
|
||||
]
|
||||
|
||||
for snap in reversed(snapshots): # chronological
|
||||
date = snap["date"]
|
||||
m = snap["metrics"]
|
||||
tc = m["test_coverage"]
|
||||
test_str = f"{tc['test_files']}/{tc['source_files']} ({tc['test_to_source_ratio']:.2f})"
|
||||
doc_str = f"{m['doc_coverage']['doc_coverage_percent']:.1f}%"
|
||||
issues_str = f"{m['issues'].get('closed_last_7d','-')}/{m['issues'].get('opened_last_7d','-')}"
|
||||
dep_str = f"{m['dependencies'].get('freshness_percent','?')}%"
|
||||
lines.append(f"| {date} | {test_str} | {doc_str} | {issues_str} | {dep_str} |")
|
||||
|
||||
# Current snapshot summary
|
||||
cur = snapshots[-1]
|
||||
cm = cur["metrics"]
|
||||
lines.append(f"\n## Current Snapshot ({cur['date']})\n")
|
||||
|
||||
tc = cm["test_coverage"]
|
||||
cov_line = f"- Test coverage: {tc['coverage_percent']:.1f}% (via {tc['coverage_tool']})\n" if tc["coverage_percent"] else "- Test coverage: (pytest-cov not configured)\n"
|
||||
lines.append(cov_line)
|
||||
lines.append(f"- Doc coverage: {cm['doc_coverage']['doc_coverage_percent']:.1f}%")
|
||||
|
||||
im = cm["issues"]
|
||||
if im.get("close_rate") is not None:
|
||||
lines.append(f"- Issue close rate (7d): {im['close_rate']*100:.1f}% ({im['closed_last_7d']} closed, {im['opened_last_7d']} opened)")
|
||||
else:
|
||||
lines.append(f"- Issue metrics: {im.get('note','unavailable')}")
|
||||
|
||||
dd = cm["dependencies"]
|
||||
lines.append(f"- Dep freshness: {dd.get('freshness_percent','?')}% outdated ({dd.get('outdated_deps',0)}/{dd.get('total_deps',0)} deps)")
|
||||
if dd.get('outdated_list'):
|
||||
lines.append(f" Outdated: {', '.join(dd['outdated_list'][:5])}")
|
||||
|
||||
content = "\n".join(lines) + "\n"
|
||||
|
||||
if output_path:
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
output_path.write_text(content)
|
||||
|
||||
return content
|
||||
|
||||
|
||||
# ── Main ─────────────────────────────────────────────────────────────────────
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(description="Progress Tracker — 10.8")
|
||||
parser.add_argument("--json", action="store_true", help="Emit snapshot as JSON only")
|
||||
parser.add_argument("--output", type=Path, default=METRICS_DIR / "TRENDS.md",
|
||||
help="Write trends markdown to this file")
|
||||
args = parser.parse_args()
|
||||
|
||||
snapshot = take_snapshot()
|
||||
all_snapshots = load_snapshots()
|
||||
path_written = save_snapshot(snapshot)
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(snapshot, indent=2))
|
||||
return 0
|
||||
|
||||
trends = generate_trends(all_snapshots + [snapshot], output_path=args.output)
|
||||
|
||||
# Print current snapshot summary
|
||||
print(f"Snapshot saved: {path_written}\n")
|
||||
print(f"Progress Tracker — {snapshot['date']}")
|
||||
print("=" * 50)
|
||||
|
||||
m = snapshot["metrics"]
|
||||
tc = m["test_coverage"]
|
||||
print(f"Test files: {tc['test_files']} | Source files: {tc['source_files']} | Ratio: {tc['test_to_source_ratio']:.3f}")
|
||||
if tc["coverage_percent"] is not None:
|
||||
print(f"Line coverage: {tc['coverage_percent']:.1f}% (via {tc['coverage_tool']})")
|
||||
else:
|
||||
print("Line coverage: (not available — run `pytest --cov`)")
|
||||
|
||||
print()
|
||||
dc = m["doc_coverage"]
|
||||
print(f"Callables with docstrings: {dc['callables_with_doc']}/{dc['callables_total']} ({dc['doc_coverage_percent']:.1f}%)")
|
||||
|
||||
print()
|
||||
im = m["issues"]
|
||||
if im.get("close_rate") is not None:
|
||||
print(f"Issues (7d): {im['closed_last_7d']} closed / {im['opened_last_7d']} opened → close rate: {im['close_rate']*100:.1f}%")
|
||||
print(f"Total open: {im['total_open']}")
|
||||
else:
|
||||
print(f"Issues: {im.get('note','unavailable')}")
|
||||
|
||||
print()
|
||||
dd = m["dependencies"]
|
||||
print(f"Dependencies: {dd.get('total_deps',0)} total, {dd.get('outdated_deps',0)} outdated")
|
||||
if dd.get('outdated_list'):
|
||||
shown = dd['outdated_list'][:5]
|
||||
print(f"Outdated: {', '.join(shown)}" + ("..." if len(dd['outdated_list']) > 5 else ""))
|
||||
|
||||
print(f"\nTrends written to: {args.output}")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -1,116 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Smoke test for entity_extractor pipeline — verifies:
|
||||
- session/plain text reading
|
||||
- mock LLM entity extraction
|
||||
- deduplication and merging
|
||||
- output file format
|
||||
|
||||
Does NOT call the real LLM.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import tempfile
|
||||
from unittest.mock import patch
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
SCRIPT_DIR = Path(__file__).parent.absolute()
|
||||
sys.path.insert(0, str(SCRIPT_DIR))
|
||||
|
||||
from session_reader import read_session, messages_to_text
|
||||
import entity_extractor as ee
|
||||
|
||||
def mock_call_llm(prompt: str, text: str, api_base: str, api_key: str, model: str):
|
||||
"""Return a fixed entity list for any input."""
|
||||
return [
|
||||
{"name": "Hermes", "type": "tool", "context": "Hermes agent uses the tools tool."},
|
||||
{"name": "Gitea", "type": "tool", "context": "Gitea is a forge."},
|
||||
{"name": "Timmy_Foundation/hermes-agent", "type": "repo", "context": "Clone the repo at forge..."},
|
||||
]
|
||||
|
||||
def test_read_session_text():
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.jsonl', delete=False) as f:
|
||||
f.write('{"role": "user", "content": "Clone repo", "timestamp": "2026-04-13T10:00:00Z"}\n')
|
||||
f.write('{"role": "assistant", "content": "Done", "timestamp": "2026-04-13T10:00:05Z"}\n')
|
||||
path = f.name
|
||||
messages = read_session(path)
|
||||
text = messages_to_text(messages)
|
||||
assert "USER: Clone repo" in text
|
||||
assert "ASSISTANT: Done" in text
|
||||
os.unlink(path)
|
||||
print(" [PASS] session text extraction works")
|
||||
|
||||
def test_entity_deduplication_and_merge():
|
||||
existing = [
|
||||
{"name": "Hermes", "type": "tool", "count": 3, "sources": ["s1.jsonl"]}
|
||||
]
|
||||
new = [
|
||||
{"name": "Hermes", "type": "tool", "sources": ["s2.jsonl"]},
|
||||
{"name": "Gitea", "type": "tool", "sources": ["s2.jsonl"]},
|
||||
]
|
||||
merged = ee.merge_entities(new, existing.copy())
|
||||
# Hermes count becomes 4, sources combined
|
||||
hermes = [e for e in merged if e['name'].lower() == 'hermes'][0]
|
||||
assert hermes['count'] == 4
|
||||
assert set(hermes['sources']) == {'s1.jsonl', 's2.jsonl'}
|
||||
# Gitea new entry
|
||||
gitea = [e for e in merged if e['name'].lower() == 'gitea'][0]
|
||||
assert gitea['count'] == 1
|
||||
print(" [PASS] deduplication & merging works")
|
||||
|
||||
def test_write_and_load_entities():
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
kdir = Path(tmp) / "knowledge"
|
||||
kdir.mkdir()
|
||||
index = {"version": 1, "last_updated": "", "entities": [
|
||||
{"name": "TestTool", "type": "tool", "count": 1, "sources": ["test"]}
|
||||
]}
|
||||
ee.write_entities(index, str(kdir))
|
||||
# load back
|
||||
loaded = ee.load_existing_entities(str(kdir))
|
||||
assert loaded['entities'][0]['name'] == 'TestTool'
|
||||
print(" [PASS] entities persistence works")
|
||||
|
||||
def test_full_pipeline_mocked():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
# Create two fake session files
|
||||
sess1 = Path(tmpdir) / "s1.jsonl"
|
||||
sess1.write_text('{"role":"user","content":"Use Hermes to clone","timestamp":"..."}\n')
|
||||
sess2 = Path(tmpdir) / "s2.jsonl"
|
||||
sess2.write_text('{"role":"user","content":"Deploy with Gitea","timestamp":"..."}\n')
|
||||
|
||||
knowledge_dir = Path(tmpdir) / "knowledge"
|
||||
knowledge_dir.mkdir()
|
||||
|
||||
# Patch call_llm
|
||||
with patch('entity_extractor.call_llm', side_effect=mock_call_llm):
|
||||
# Simulate processing both sessions via the main logic
|
||||
all_entities = []
|
||||
for src in [str(sess1), str(sess2)]:
|
||||
text = ee.read_text_from_source(src)
|
||||
ents = ee.extract_from_text(text, "http://api", "fake-key", "model", source_name=Path(src).name)
|
||||
all_entities.extend(ents)
|
||||
|
||||
# Merge into empty index
|
||||
merged = ee.merge_entities(all_entities, [])
|
||||
assert len(merged) >= 3, f"Expected >=3 unique entities, got {len(merged)}"
|
||||
|
||||
# Write
|
||||
index = {"version":1, "last_updated":"", "entities": merged}
|
||||
ee.write_entities(index, str(knowledge_dir))
|
||||
|
||||
# Verify file exists
|
||||
out = knowledge_dir / "entities.json"
|
||||
assert out.exists()
|
||||
data = json.loads(out.read_text())
|
||||
assert len(data['entities']) >= 3
|
||||
print(f" [PASS] full pipeline (mocked) produced {len(data['entities'])} entities")
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_read_session_text()
|
||||
test_entity_deduplication_and_merge()
|
||||
test_write_and_load_entities()
|
||||
test_full_pipeline_mocked()
|
||||
print("\nAll smoke tests passed.")
|
||||
@@ -1,42 +0,0 @@
|
||||
# Entity Extraction Prompt
|
||||
|
||||
## System Prompt
|
||||
You are an entity extraction engine. You read text and output ONLY a JSON array of named entities. You do not infer. You extract only what the text explicitly mentions.
|
||||
|
||||
## Task
|
||||
Extract all named entities from the provided text. Categorize each entity into exactly one of these types:
|
||||
- `person` — individual's name (e.g., Alexander, Rockachopa, Allegro)
|
||||
- `project` — software project or component name (e.g., The Nexus, Timmy Home, compounding-intelligence)
|
||||
- `tool` — software tool, command, library, framework (e.g., git, Docker, PyTorch, Hermes)
|
||||
- `concept` — abstract idea, methodology, paradigm (e.g., compounding intelligence, bootstrap, harvester)
|
||||
- `repo` — repository reference in the form `owner/repo` or URL pointing to a repo
|
||||
|
||||
## Rules
|
||||
1. Extract ONLY names that appear explicitly in the text.
|
||||
2. Do NOT infer, assume, or hallucinate.
|
||||
3. Each entity must have: `name` (exact string), `type` (one of the five above), and `context` (short snippet showing usage, 1-2 sentences).
|
||||
4. The same entity mentioned multiple times should appear only ONCE in the output (deduplicate by name+type).
|
||||
5. For `repo` type, match patterns like `owner/repo`, `github.com/owner/repo`, `forge.alexanderwhitestone.com/owner/repo`.
|
||||
6. For `tool` type, include commands (git, pytest), platforms (Linux, macOS), runtimes (Python, Node.js), and CLI utilities.
|
||||
7. For `person` type, look for capitalized full names, or single names used in personal attribution ("asked Alex", "for Alexander").
|
||||
8. For `concept`, include technical terms that represent an idea rather than a concrete thing.
|
||||
|
||||
## Output Format
|
||||
Return ONLY valid JSON, no markdown, no explanation. Array of objects:
|
||||
```json
|
||||
[
|
||||
{
|
||||
"name": "Hermes",
|
||||
"type": "tool",
|
||||
"context": "Hermes agent uses the tools tool to execute commands."
|
||||
},
|
||||
{
|
||||
"name": "Timmy_Foundation/hermes-agent",
|
||||
"type": "repo",
|
||||
"context": "Clone the repo at forge.../Timmy_Foundation/hermes-agent"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
## Text to extract from:
|
||||
{{text}}
|
||||
@@ -1,82 +0,0 @@
|
||||
"""
|
||||
Test suite for entity_extractor.py (Issue #144).
|
||||
|
||||
Tests cover:
|
||||
- Text reading from various formats
|
||||
- Entity deduplication logic
|
||||
- Output file structure
|
||||
- Integration: batch processing yields 100+ entities from test_sessions
|
||||
"""
|
||||
|
||||
import json
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
# We'll test the pure functions directly; avoid hitting real LLM in unit tests
|
||||
import sys
|
||||
sys.path.insert(0, str(Path(__file__).resolve().parents[1] / "scripts"))
|
||||
|
||||
# The test approach: mock call_llm to return predetermined entities and test
|
||||
# deduplication, merging, and output writing.
|
||||
|
||||
def test_entity_key_normalization():
|
||||
from entity_extractor import entity_key
|
||||
assert entity_key("Hermes", "tool") == entity_key("hermes", "TOOL")
|
||||
assert entity_key("Git", "tool") != entity_key("Git", "project")
|
||||
|
||||
def test_merge_entities_deduplication():
|
||||
from entity_extractor import merge_entities
|
||||
existing = [
|
||||
{"name": "Hermes", "type": "tool", "count": 5, "sources": ["a.jsonl"]}
|
||||
]
|
||||
new = [
|
||||
{"name": "Hermes", "type": "tool", "sources": ["b.jsonl"]},
|
||||
{"name": "Gitea", "type": "tool", "sources": ["b.jsonl"]}
|
||||
]
|
||||
merged = merge_entities(new, existing.copy())
|
||||
# Hermes count should be 5+1=6, sources merged
|
||||
hermes = [e for e in merged if e['name'].lower()=='hermes'][0]
|
||||
assert hermes['count'] == 6
|
||||
assert set(hermes['sources']) == {"a.jsonl", "b.jsonl"}
|
||||
# Gitea added fresh
|
||||
gitea = [e for e in merged if e['name'].lower()=='gitea'][0]
|
||||
assert gitea['count'] == 1
|
||||
|
||||
def test_output_schema():
|
||||
from entity_extractor import write_entities, load_existing_entities
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
kdir = Path(tmp) / "knowledge"
|
||||
kdir.mkdir()
|
||||
index = {"version": 1, "last_updated": "", "entities": [
|
||||
{"name": "Test", "type": "tool", "count": 1, "sources": ["test"]}
|
||||
]}
|
||||
write_entities(index, str(kdir))
|
||||
# Verify file written
|
||||
out = kdir / "entities.json"
|
||||
assert out.exists()
|
||||
data = json.loads(out.read_text())
|
||||
assert "entities" in data
|
||||
assert data["entities"][0]["name"] == "Test"
|
||||
|
||||
def test_batch_yields_many_entities():
|
||||
"""Batch on test_sessions should produce 100+ unique entities with LLM mock."""
|
||||
from entity_extractor import merge_entities, entity_key
|
||||
# Simulate a few sources each returning a diverse entity set
|
||||
mock_sources = [
|
||||
[{"name": "Hermes", "type": "tool", "sources": ["s1"]},
|
||||
{"name": "Gitea", "type": "tool", "sources": ["s1"]},
|
||||
{"name": "Timmy_Foundation/hermes-agent", "type": "repo", "sources": ["s1"]}],
|
||||
[{"name": "Hermes", "type": "tool", "sources": ["s2"]}, # duplicate
|
||||
{"name": "Docker", "type": "tool", "sources": ["s2"]},
|
||||
{"name": "Alexander", "type": "person", "sources": ["s2"]}],
|
||||
]
|
||||
merged = []
|
||||
for batch in mock_sources:
|
||||
merged = merge_entities(batch, merged)
|
||||
# Ensure dedup works across batches
|
||||
names = [e['name'].lower() for e in merged]
|
||||
assert names.count('hermes') == 1
|
||||
assert len(merged) == 4 # Hermes, Gitea, repo, Docker, Alexander
|
||||
|
||||
# The real LLM extraction test would require live API key; skip in CI
|
||||
Reference in New Issue
Block a user