Compare commits
2 Commits
step35/144
...
step35/134
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ec76e9fec3 | ||
| 38c5862737 |
@@ -1,268 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
entity_extractor.py — Extract named entities from text sources.
|
||||
|
||||
Extracts: people, projects, tools, concepts, repos from session transcripts,
|
||||
README files, issue bodies, or any text input.
|
||||
|
||||
Output: knowledge/entities.json with deduplicated entity list and occurrence counts.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
SCRIPT_DIR = Path(__file__).parent.absolute()
|
||||
sys.path.insert(0, str(SCRIPT_DIR))
|
||||
|
||||
from session_reader import read_session, messages_to_text
|
||||
|
||||
# --- Configuration ---
|
||||
DEFAULT_API_BASE = os.environ.get("HARVESTER_API_BASE", "https://api.nousresearch.com/v1")
|
||||
DEFAULT_API_KEY = os.environ.get("HARVESTER_API_KEY", "")
|
||||
DEFAULT_MODEL = os.environ.get("HARVESTER_MODEL", "xiaomi/mimo-v2-pro")
|
||||
KNOWLEDGE_DIR = os.environ.get("HARVESTER_KNOWLEDGE_DIR", "knowledge")
|
||||
PROMPT_PATH = os.environ.get("ENTITY_PROMPT_PATH", str(SCRIPT_DIR.parent / "templates" / "entity-extraction-prompt.md"))
|
||||
|
||||
API_KEY_PATHS = [
|
||||
os.path.expanduser("~/.config/nous/key"),
|
||||
os.path.expanduser("~/.hermes/keymaxxing/active/minimax.key"),
|
||||
os.path.expanduser("~/.config/openrouter/key"),
|
||||
]
|
||||
|
||||
def find_api_key() -> str:
|
||||
for path in API_KEY_PATHS:
|
||||
if os.path.exists(path):
|
||||
with open(path) as f:
|
||||
key = f.read().strip()
|
||||
if key:
|
||||
return key
|
||||
return ""
|
||||
|
||||
def load_prompt() -> str:
|
||||
path = Path(PROMPT_PATH)
|
||||
if not path.exists():
|
||||
print(f"ERROR: Entity extraction prompt not found at {path}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
return path.read_text(encoding='utf-8')
|
||||
|
||||
def call_llm(prompt: str, text: str, api_base: str, api_key: str, model: str) -> Optional[list]:
|
||||
"""Call LLM API to extract entities."""
|
||||
import urllib.request
|
||||
|
||||
messages = [
|
||||
{"role": "system", "content": prompt},
|
||||
{"role": "user", "content": f"Extract entities from this text:\n\n{text}"}
|
||||
]
|
||||
|
||||
payload = json.dumps({
|
||||
"model": model,
|
||||
"messages": messages,
|
||||
"temperature": 0.0,
|
||||
"max_tokens": 2048
|
||||
}).encode('utf-8')
|
||||
|
||||
req = urllib.request.Request(
|
||||
f"{api_base}/chat/completions",
|
||||
data=payload,
|
||||
headers={"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"},
|
||||
method="POST"
|
||||
)
|
||||
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=60) as resp:
|
||||
result = json.loads(resp.read().decode('utf-8'))
|
||||
content = result["choices"][0]["message"]["content"]
|
||||
return parse_response(content)
|
||||
except Exception as e:
|
||||
print(f"ERROR: LLM call failed: {e}", file=sys.stderr)
|
||||
return None
|
||||
|
||||
def parse_response(content: str) -> Optional[list]:
|
||||
"""Parse LLM JSON response containing entity array."""
|
||||
try:
|
||||
data = json.loads(content)
|
||||
if isinstance(data, list):
|
||||
return data
|
||||
if isinstance(data, dict) and 'entities' in data:
|
||||
return data['entities']
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
import re
|
||||
match = re.search(r'```(?:json)?\s*(\[.*?\])\s*```', content, re.DOTALL)
|
||||
if match:
|
||||
try:
|
||||
data = json.loads(match.group(1))
|
||||
if isinstance(data, list):
|
||||
return data
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
print(f"WARNING: Could not parse LLM response as entity list", file=sys.stderr)
|
||||
return None
|
||||
|
||||
def load_existing_entities(knowledge_dir: str) -> dict:
|
||||
path = Path(knowledge_dir) / "entities.json"
|
||||
if not path.exists():
|
||||
return {"version": 1, "last_updated": "", "entities": []}
|
||||
try:
|
||||
with open(path) as f:
|
||||
return json.load(f)
|
||||
except (json.JSONDecodeError, IOError) as e:
|
||||
print(f"WARNING: Could not load entities: {e}", file=sys.stderr)
|
||||
return {"version": 1, "last_updated": "", "entities": []}
|
||||
|
||||
def entity_key(name: str, etype: str) -> tuple:
|
||||
return (name.lower().strip(), etype.lower().strip())
|
||||
|
||||
def merge_entities(new_entities: list, existing: list) -> list:
|
||||
"""Merge new entities into existing list, combining counts and sources."""
|
||||
existing_by_key = {}
|
||||
for e in existing:
|
||||
key = entity_key(e.get('name',''), e.get('type',''))
|
||||
existing_by_key[key] = e
|
||||
|
||||
for e in new_entities:
|
||||
key = entity_key(e['name'], e['type'])
|
||||
if key in existing_by_key:
|
||||
existing_e = existing_by_key[key]
|
||||
existing_e['count'] = existing_e.get('count', 1) + 1
|
||||
# Merge sources
|
||||
old_sources = set(existing_e.get('sources', []))
|
||||
new_sources = set(e.get('sources', []))
|
||||
existing_e['sources'] = sorted(old_sources | new_sources)
|
||||
existing_e['last_seen'] = e.get('last_seen', existing_e.get('last_seen'))
|
||||
else:
|
||||
e['count'] = e.get('count', 1)
|
||||
e.setdefault('sources', [])
|
||||
e.setdefault('first_seen', datetime.now(timezone.utc).isoformat())
|
||||
existing.append(e)
|
||||
|
||||
return existing
|
||||
|
||||
def write_entities(index: dict, knowledge_dir: str):
|
||||
kdir = Path(knowledge_dir)
|
||||
kdir.mkdir(parents=True, exist_ok=True)
|
||||
index['last_updated'] = datetime.now(timezone.utc).isoformat()
|
||||
path = kdir / "entities.json"
|
||||
with open(path, 'w', encoding='utf-8') as f:
|
||||
json.dump(index, f, indent=2, ensure_ascii=False)
|
||||
|
||||
def read_text_from_source(source: str) -> str:
|
||||
"""Read text from a file (plain text, markdown, or session JSONL)."""
|
||||
path = Path(source)
|
||||
if not path.exists():
|
||||
raise FileNotFoundError(source)
|
||||
if path.suffix == '.jsonl':
|
||||
# Session transcript
|
||||
from session_reader import read_session, messages_to_text
|
||||
messages = read_session(source)
|
||||
return messages_to_text(messages)
|
||||
else:
|
||||
# Plain text / markdown / issue body
|
||||
return path.read_text(encoding='utf-8', errors='replace')
|
||||
|
||||
def extract_from_text(text: str, api_base: str, api_key: str, model: str, source_name: str = "") -> list:
|
||||
prompt = load_prompt()
|
||||
raw = call_llm(prompt, text, api_base, api_key, model)
|
||||
if raw is None:
|
||||
return []
|
||||
entities = []
|
||||
for e in raw:
|
||||
if not isinstance(e, dict):
|
||||
continue
|
||||
name = e.get('name', '').strip()
|
||||
etype = e.get('type', '').strip().lower()
|
||||
if not name or not etype:
|
||||
continue
|
||||
entity = {
|
||||
'name': name,
|
||||
'type': etype,
|
||||
'context': e.get('context', '')[:200],
|
||||
'last_seen': datetime.now(timezone.utc).isoformat(),
|
||||
'sources': [source_name] if source_name else []
|
||||
}
|
||||
entities.append(entity)
|
||||
return entities
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Extract named entities from text sources")
|
||||
parser.add_argument('--file', help='Single file to process')
|
||||
parser.add_argument('--dir', help='Directory of files to process')
|
||||
parser.add_argument('--session', help='Single session JSONL file')
|
||||
parser.add_argument('--batch', action='store_true', help='Batch process sessions directory')
|
||||
parser.add_argument('--sessions-dir', default=os.path.expanduser('~/.hermes/sessions'),
|
||||
help='Sessions directory for batch mode')
|
||||
parser.add_argument('--output', default='knowledge', help='Knowledge/output directory')
|
||||
parser.add_argument('--api-base', default=DEFAULT_API_BASE)
|
||||
parser.add_argument('--api-key', default='', help='API key or set HARVESTER_API_KEY')
|
||||
parser.add_argument('--model', default=DEFAULT_MODEL)
|
||||
parser.add_argument('--dry-run', action='store_true', help='Preview without writing')
|
||||
parser.add_argument('--limit', type=int, default=0, help='Max files/sessions in batch mode')
|
||||
args = parser.parse_args()
|
||||
|
||||
api_key = args.api_key or DEFAULT_API_KEY or find_api_key()
|
||||
if not api_key:
|
||||
print("ERROR: No API key found", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
knowledge_dir = args.output
|
||||
if not os.path.isabs(knowledge_dir):
|
||||
knowledge_dir = str(SCRIPT_DIR.parent / knowledge_dir)
|
||||
|
||||
sources = []
|
||||
if args.file:
|
||||
sources = [args.file]
|
||||
elif args.dir:
|
||||
files = sorted(Path(args.dir).rglob("*"))
|
||||
sources = [str(f) for f in files if f.is_file() and f.suffix in ('.txt','.md','.json','.jsonl','.yaml','.yml')]
|
||||
if args.limit > 0:
|
||||
sources = sources[:args.limit]
|
||||
elif args.session:
|
||||
sources = [args.session]
|
||||
elif args.batch:
|
||||
sess_dir = Path(args.sessions_dir)
|
||||
sources = sorted(sess_dir.glob("*.jsonl"), reverse=True)
|
||||
if args.limit > 0:
|
||||
sources = sources[:args.limit]
|
||||
sources = [str(s) for s in sources]
|
||||
else:
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
print(f"Processing {len(sources)} sources...")
|
||||
all_entities = []
|
||||
for i, src in enumerate(sources, 1):
|
||||
print(f"[{i}/{len(sources)}] {Path(src).name}...", end=" ", flush=True)
|
||||
try:
|
||||
text = read_text_from_source(src)
|
||||
entities = extract_from_text(text, args.api_base, api_key, args.model, source_name=Path(src).name)
|
||||
all_entities.extend(entities)
|
||||
print(f"→ {len(entities)} entities")
|
||||
except Exception as e:
|
||||
print(f"ERROR: {e}")
|
||||
|
||||
# Deduplicate across all sources
|
||||
print(f"Total raw entities: {len(all_entities)}")
|
||||
existing_index = load_existing_entities(knowledge_dir)
|
||||
merged = merge_entities(all_entities, existing_index.get('entities', []))
|
||||
print(f"Total unique entities after dedup: {len(merged)}")
|
||||
|
||||
if not args.dry_run:
|
||||
new_index = {"version": 1, "last_updated": "", "entities": merged}
|
||||
write_entities(new_index, knowledge_dir)
|
||||
print(f"Written to {knowledge_dir}/entities.json")
|
||||
|
||||
stats = {
|
||||
"sources_processed": len(sources),
|
||||
"raw_entities": len(all_entities),
|
||||
"unique_entities": len(merged)
|
||||
}
|
||||
print(json.dumps(stats, indent=2))
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
258
scripts/github_trending_scanner.py
Normal file
258
scripts/github_trending_scanner.py
Normal file
@@ -0,0 +1,258 @@
|
||||
#!/usr/bin/env python3
|
||||
"""GitHub Trending Scanner — Scan trending repos in AI/ML.
|
||||
|
||||
Extracts: repo description, stars, key features (topics, inferred highlights).
|
||||
Filters by language and/or topic. Outputs dated JSON for daily scan pipeline.
|
||||
|
||||
Usage:
|
||||
python3 github_trending_scanner.py --language python --topic ai --output metrics/trending
|
||||
python3 github_trending_scanner.py --topic machine-learning --limit 50
|
||||
python3 github_trending_scanner.py --language rust --topic artificial-intelligence
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Optional, List, Dict
|
||||
import urllib.request
|
||||
import urllib.parse
|
||||
import urllib.error
|
||||
|
||||
GITHUB_API_BASE = os.environ.get("GITHUB_API_BASE", "https://api.github.com")
|
||||
DEFAULT_OUTPUT_DIR = os.environ.get("TRENDING_OUTPUT_DIR", "metrics/trending")
|
||||
DEFAULT_LIMIT = int(os.environ.get("TRENDING_LIMIT", "30"))
|
||||
DEFAULT_MIN_STARS = int(os.environ.get("TRENDING_MIN_STARS", "1000"))
|
||||
|
||||
|
||||
def fetch_trending_repos(
|
||||
language: Optional[str] = None,
|
||||
topic: Optional[str] = None,
|
||||
min_stars: int = DEFAULT_MIN_STARS,
|
||||
limit: int = DEFAULT_LIMIT,
|
||||
) -> List[Dict]:
|
||||
"""Fetch trending-like repositories from GitHub using the search API.
|
||||
|
||||
GitHub's public search API is unauthenticated-rate-limited (60 req/hr).
|
||||
This function retries on rate-limit backoff and falls back gracefully.
|
||||
"""
|
||||
# Build search query: stars threshold + optional language/topic filters
|
||||
query = f"stars:>{min_stars}"
|
||||
if language:
|
||||
query += f" language:{language}"
|
||||
if topic:
|
||||
query += f" topic:{topic}"
|
||||
|
||||
# Sort by stars descending as a proxy for trending/popular
|
||||
params = {
|
||||
"q": query,
|
||||
"sort": "stars",
|
||||
"order": "desc",
|
||||
"per_page": min(limit, 100), # GitHub max per_page is 100
|
||||
}
|
||||
url = f"{GITHUB_API_BASE}/search/repositories?{urllib.parse.urlencode(params)}"
|
||||
|
||||
headers = {
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
"User-Agent": "Sovereign-Trending-Scanner/1.0",
|
||||
}
|
||||
|
||||
for attempt in range(3):
|
||||
try:
|
||||
req = urllib.request.Request(url, headers=headers)
|
||||
with urllib.request.urlopen(req, timeout=30) as resp:
|
||||
if resp.status != 200:
|
||||
raise RuntimeError(f"GitHub API returned {resp.status}")
|
||||
data = json.loads(resp.read().decode("utf-8"))
|
||||
return data.get("items", [])[:limit]
|
||||
except urllib.error.HTTPError as e:
|
||||
if e.code == 403:
|
||||
# Check for rate limit message
|
||||
body = e.read().decode("utf-8", errors="replace").lower()
|
||||
if "rate limit" in body or "api rate limit exceeded" in body:
|
||||
reset_ts = int(e.headers.get("X-RateLimit-Reset", 0))
|
||||
wait_seconds = max(5, reset_ts - int(time.time()) + 5)
|
||||
print(f"Rate limit exceeded — waiting {wait_seconds}s (attempt {attempt+1}/3)...", file=sys.stderr)
|
||||
time.sleep(wait_seconds)
|
||||
continue
|
||||
print(f"ERROR: GitHub API request failed: {e} — {e.read().decode('utf-8', errors='replace')[:200]}", file=sys.stderr)
|
||||
return []
|
||||
except Exception as e:
|
||||
if attempt < 2:
|
||||
backoff = 2 ** attempt
|
||||
print(f"WARNING: Fetch attempt {attempt+1} failed: {e} — retrying in {backoff}s", file=sys.stderr)
|
||||
time.sleep(backoff)
|
||||
continue
|
||||
print(f"ERROR: All fetch attempts failed: {e}", file=sys.stderr)
|
||||
return []
|
||||
|
||||
return []
|
||||
|
||||
|
||||
def extract_repo_features(repo_data: Dict) -> Dict:
|
||||
"""Extract structured fields for a trending repo."""
|
||||
description = (repo_data.get("description") or "").strip()
|
||||
topics = repo_data.get("topics", [])
|
||||
|
||||
# Infer key features from description and topics
|
||||
features = infer_features(description, topics)
|
||||
|
||||
return {
|
||||
"name": repo_data.get("full_name", ""),
|
||||
"description": description,
|
||||
"stars": repo_data.get("stargazers_count", 0),
|
||||
"forks": repo_data.get("forks_count", 0),
|
||||
"open_issues": repo_data.get("open_issues_count", 0),
|
||||
"language": repo_data.get("language", ""),
|
||||
"topics": topics,
|
||||
"url": repo_data.get("html_url", ""),
|
||||
"created_at": repo_data.get("created_at", ""),
|
||||
"updated_at": repo_data.get("updated_at", ""),
|
||||
"key_features": features,
|
||||
"scanned_at": datetime.now(timezone.utc).isoformat(),
|
||||
}
|
||||
|
||||
|
||||
def infer_features(description: str, topics: List[str]) -> List[str]:
|
||||
"""Infer notable capabilities/features from repo metadata.
|
||||
|
||||
Looks for AI/ML-relevant capabilities in topics and description.
|
||||
"""
|
||||
features = []
|
||||
text = (description + " " + " ".join(topics)).lower()
|
||||
|
||||
# Domain capabilities (keys normalized to lowercase for consistency)
|
||||
capability_keywords = {
|
||||
"fine-tuning": ["fine-tun", "finetun"],
|
||||
"agent framework": ["agent"],
|
||||
"local/offline": ["local", "on-device", "offline"],
|
||||
"quantized models": ["quantized", "quantization", "gguf", "gptq"],
|
||||
"vision": ["vision", "multimodal", "image", "visual"],
|
||||
"speech/audio": ["speech", "audio", "whisper", "tts"],
|
||||
"retrieval/rag": ["rag", "retrieval", "embedding", "vector"],
|
||||
"training": ["train", "training", "sft", "dpo"],
|
||||
"gui/playground": ["gui", "playground", "webui", "interface"],
|
||||
"sota": ["state-of-the-art", "sota", "latest"],
|
||||
}
|
||||
|
||||
for label, keywords in capability_keywords.items():
|
||||
if any(kw in text for kw in keywords):
|
||||
features.append(label)
|
||||
|
||||
# Also include non-generic topics as features
|
||||
generic_topics = {"ai", "ml", "machine-learning", "deep-learning", "llm", "python", "pytorch", "tensorflow"}
|
||||
for topic in topics:
|
||||
if topic.lower() not in generic_topics:
|
||||
features.append(topic)
|
||||
|
||||
# Deduplicate while preserving order, return up to 10
|
||||
seen = set()
|
||||
unique = []
|
||||
for f in features:
|
||||
key = f.lower()
|
||||
if key not in seen:
|
||||
seen.add(key)
|
||||
unique.append(f)
|
||||
return unique[:10]
|
||||
|
||||
|
||||
def save_trending(repos: List[Dict], output_dir: str = "metrics/trending") -> str:
|
||||
"""Save trending results to a dated JSON file.
|
||||
|
||||
Returns the path of the written file.
|
||||
"""
|
||||
output_path = Path(output_dir)
|
||||
output_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
date_str = datetime.now(timezone.utc).strftime("%Y-%m-%d")
|
||||
filename = output_path / f"github-trending-{date_str}.json"
|
||||
|
||||
output_data = {
|
||||
"scanned_at": datetime.now(timezone.utc).isoformat(),
|
||||
"count": len(repos),
|
||||
"repos": repos,
|
||||
}
|
||||
|
||||
with open(filename, "w") as f:
|
||||
json.dump(output_data, f, indent=2, ensure_ascii=False)
|
||||
|
||||
return str(filename)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Scan GitHub trending repositories in AI/ML"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--language",
|
||||
help="Filter by programming language (e.g., python, rust, go)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--topic",
|
||||
help="Filter by GitHub topic (e.g., ai, machine-learning, llm)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--since",
|
||||
default="daily",
|
||||
choices=["daily", "weekly", "monthly"],
|
||||
help="Trending period (daily/weekly/monthly) — informational only",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output",
|
||||
default="metrics/trending",
|
||||
help="Output directory for results (default: metrics/trending)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--limit",
|
||||
type=int,
|
||||
default=DEFAULT_LIMIT,
|
||||
help=f"Maximum repos to fetch (default: {DEFAULT_LIMIT})",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--min-stars",
|
||||
type=int,
|
||||
default=DEFAULT_MIN_STARS,
|
||||
help=f"Minimum star count for relevance (default: {DEFAULT_MIN_STARS})",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
print(
|
||||
f"Fetching trending repos "
|
||||
f"(language={args.language or 'any'}, topic={args.topic or 'any'}, period={args.since})..."
|
||||
)
|
||||
|
||||
repos_raw = fetch_trending_repos(
|
||||
language=args.language,
|
||||
topic=args.topic,
|
||||
min_stars=args.min_stars,
|
||||
limit=args.limit,
|
||||
)
|
||||
|
||||
if not repos_raw:
|
||||
print("WARNING: No repos fetched — check network or rate limits", file=sys.stderr)
|
||||
|
||||
repos = [extract_repo_features(r) for r in repos_raw]
|
||||
|
||||
output_file = save_trending(repos, args.output)
|
||||
print(f"Saved {len(repos)} trending repos to {output_file}")
|
||||
|
||||
# Brief human-readable summary
|
||||
if repos:
|
||||
print("\nTop repos:")
|
||||
for repo in repos[:5]:
|
||||
features_preview = ", ".join(repo["key_features"][:3])
|
||||
print(f" ★ {repo['stars']:>7} {repo['name']}")
|
||||
if repo["description"]:
|
||||
desc = repo["description"][:80]
|
||||
print(f" {desc}{'...' if len(repo['description']) > 80 else ''}")
|
||||
if features_preview:
|
||||
print(f" Features: {features_preview}")
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -1,116 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Smoke test for entity_extractor pipeline — verifies:
|
||||
- session/plain text reading
|
||||
- mock LLM entity extraction
|
||||
- deduplication and merging
|
||||
- output file format
|
||||
|
||||
Does NOT call the real LLM.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import tempfile
|
||||
from unittest.mock import patch
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
SCRIPT_DIR = Path(__file__).parent.absolute()
|
||||
sys.path.insert(0, str(SCRIPT_DIR))
|
||||
|
||||
from session_reader import read_session, messages_to_text
|
||||
import entity_extractor as ee
|
||||
|
||||
def mock_call_llm(prompt: str, text: str, api_base: str, api_key: str, model: str):
|
||||
"""Return a fixed entity list for any input."""
|
||||
return [
|
||||
{"name": "Hermes", "type": "tool", "context": "Hermes agent uses the tools tool."},
|
||||
{"name": "Gitea", "type": "tool", "context": "Gitea is a forge."},
|
||||
{"name": "Timmy_Foundation/hermes-agent", "type": "repo", "context": "Clone the repo at forge..."},
|
||||
]
|
||||
|
||||
def test_read_session_text():
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.jsonl', delete=False) as f:
|
||||
f.write('{"role": "user", "content": "Clone repo", "timestamp": "2026-04-13T10:00:00Z"}\n')
|
||||
f.write('{"role": "assistant", "content": "Done", "timestamp": "2026-04-13T10:00:05Z"}\n')
|
||||
path = f.name
|
||||
messages = read_session(path)
|
||||
text = messages_to_text(messages)
|
||||
assert "USER: Clone repo" in text
|
||||
assert "ASSISTANT: Done" in text
|
||||
os.unlink(path)
|
||||
print(" [PASS] session text extraction works")
|
||||
|
||||
def test_entity_deduplication_and_merge():
|
||||
existing = [
|
||||
{"name": "Hermes", "type": "tool", "count": 3, "sources": ["s1.jsonl"]}
|
||||
]
|
||||
new = [
|
||||
{"name": "Hermes", "type": "tool", "sources": ["s2.jsonl"]},
|
||||
{"name": "Gitea", "type": "tool", "sources": ["s2.jsonl"]},
|
||||
]
|
||||
merged = ee.merge_entities(new, existing.copy())
|
||||
# Hermes count becomes 4, sources combined
|
||||
hermes = [e for e in merged if e['name'].lower() == 'hermes'][0]
|
||||
assert hermes['count'] == 4
|
||||
assert set(hermes['sources']) == {'s1.jsonl', 's2.jsonl'}
|
||||
# Gitea new entry
|
||||
gitea = [e for e in merged if e['name'].lower() == 'gitea'][0]
|
||||
assert gitea['count'] == 1
|
||||
print(" [PASS] deduplication & merging works")
|
||||
|
||||
def test_write_and_load_entities():
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
kdir = Path(tmp) / "knowledge"
|
||||
kdir.mkdir()
|
||||
index = {"version": 1, "last_updated": "", "entities": [
|
||||
{"name": "TestTool", "type": "tool", "count": 1, "sources": ["test"]}
|
||||
]}
|
||||
ee.write_entities(index, str(kdir))
|
||||
# load back
|
||||
loaded = ee.load_existing_entities(str(kdir))
|
||||
assert loaded['entities'][0]['name'] == 'TestTool'
|
||||
print(" [PASS] entities persistence works")
|
||||
|
||||
def test_full_pipeline_mocked():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
# Create two fake session files
|
||||
sess1 = Path(tmpdir) / "s1.jsonl"
|
||||
sess1.write_text('{"role":"user","content":"Use Hermes to clone","timestamp":"..."}\n')
|
||||
sess2 = Path(tmpdir) / "s2.jsonl"
|
||||
sess2.write_text('{"role":"user","content":"Deploy with Gitea","timestamp":"..."}\n')
|
||||
|
||||
knowledge_dir = Path(tmpdir) / "knowledge"
|
||||
knowledge_dir.mkdir()
|
||||
|
||||
# Patch call_llm
|
||||
with patch('entity_extractor.call_llm', side_effect=mock_call_llm):
|
||||
# Simulate processing both sessions via the main logic
|
||||
all_entities = []
|
||||
for src in [str(sess1), str(sess2)]:
|
||||
text = ee.read_text_from_source(src)
|
||||
ents = ee.extract_from_text(text, "http://api", "fake-key", "model", source_name=Path(src).name)
|
||||
all_entities.extend(ents)
|
||||
|
||||
# Merge into empty index
|
||||
merged = ee.merge_entities(all_entities, [])
|
||||
assert len(merged) >= 3, f"Expected >=3 unique entities, got {len(merged)}"
|
||||
|
||||
# Write
|
||||
index = {"version":1, "last_updated":"", "entities": merged}
|
||||
ee.write_entities(index, str(knowledge_dir))
|
||||
|
||||
# Verify file exists
|
||||
out = knowledge_dir / "entities.json"
|
||||
assert out.exists()
|
||||
data = json.loads(out.read_text())
|
||||
assert len(data['entities']) >= 3
|
||||
print(f" [PASS] full pipeline (mocked) produced {len(data['entities'])} entities")
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_read_session_text()
|
||||
test_entity_deduplication_and_merge()
|
||||
test_write_and_load_entities()
|
||||
test_full_pipeline_mocked()
|
||||
print("\nAll smoke tests passed.")
|
||||
125
scripts/test_github_trending_scanner.py
Normal file
125
scripts/test_github_trending_scanner.py
Normal file
@@ -0,0 +1,125 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for github_trending_scanner.py — pure function validation.
|
||||
|
||||
Tests the feature inference, extraction, and output formatting logic
|
||||
without relying on external GitHub API calls.
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
# Add scripts dir to path for import
|
||||
sys.path.insert(0, str(Path(__file__).resolve().parent))
|
||||
|
||||
from github_trending_scanner import (
|
||||
extract_repo_features,
|
||||
infer_features,
|
||||
save_trending,
|
||||
)
|
||||
|
||||
|
||||
def test_infer_features_from_description():
|
||||
"""Feature inference extracts capabilities from description text."""
|
||||
desc = "A local, quantized LLM framework for fine-tuning and agent-based RAG with vision."
|
||||
topics = ["ai", "llm"]
|
||||
features = infer_features(desc, topics)
|
||||
|
||||
# Should include relevant capabilities (case-insensitive comparison)
|
||||
expected_lower = {"fine-tuning", "local/offline", "quantized models", "agent framework", "vision", "retrieval/rag"}
|
||||
actual_lower = set(f.lower() for f in features)
|
||||
assert expected_lower.issubset(actual_lower), f"Missing features. Expected subset of {expected_lower}, got {actual_lower}"
|
||||
print("PASS: infer_features_from_description")
|
||||
|
||||
|
||||
def test_infer_features_from_topics_only():
|
||||
"""Topics alone can drive feature detection."""
|
||||
desc = ""
|
||||
topics = ["computer-vision", "speech", "pytorch"]
|
||||
features = infer_features(desc, topics)
|
||||
|
||||
# Non-generic topics should appear as features (topics preserved as-is)
|
||||
assert "computer-vision" in features, f"Expected 'computer-vision' in {features}"
|
||||
assert "speech" in features, f"Expected 'speech' in {features}"
|
||||
# Generic topics (pytorch) may be filtered
|
||||
print(f"PASS: infer_features_from_topics_only → {features}")
|
||||
|
||||
|
||||
def test_extract_repo_features_produces_valid_structure():
|
||||
"""extract_repo_features returns all required fields."""
|
||||
mock_repo = {
|
||||
"full_name": "example/repo",
|
||||
"description": "An example repository",
|
||||
"stargazers_count": 1234,
|
||||
"forks_count": 56,
|
||||
"open_issues_count": 7,
|
||||
"language": "Python",
|
||||
"topics": ["ai", "llm"],
|
||||
"html_url": "https://github.com/example/repo",
|
||||
"created_at": "2025-01-01T00:00:00Z",
|
||||
"updated_at": "2026-01-01T00:00:00Z",
|
||||
}
|
||||
|
||||
result = extract_repo_features(mock_repo)
|
||||
|
||||
assert result["name"] == "example/repo"
|
||||
assert result["description"] == "An example repository"
|
||||
assert result["stars"] == 1234
|
||||
assert isinstance(result["key_features"], list)
|
||||
assert "scanned_at" in result
|
||||
assert result["url"] == "https://github.com/example/repo"
|
||||
print("PASS: extract_repo_features_structure")
|
||||
|
||||
|
||||
def test_save_trending_creates_dated_json():
|
||||
"""save_trending writes a valid JSON file with the expected schema."""
|
||||
repos = [
|
||||
{
|
||||
"name": "test/repo",
|
||||
"description": "Test repository",
|
||||
"stars": 999,
|
||||
"language": "Python",
|
||||
"topics": ["test"],
|
||||
"key_features": ["testing"],
|
||||
"scanned_at": "2026-04-26T00:00:00+00:00",
|
||||
}
|
||||
]
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
output_file = save_trending(repos, output_dir=tmp)
|
||||
|
||||
path = Path(output_file)
|
||||
assert path.exists(), f"Output file not created: {output_file}"
|
||||
|
||||
with open(path) as f:
|
||||
data = json.load(f)
|
||||
|
||||
assert "scanned_at" in data
|
||||
assert data["count"] == 1
|
||||
assert isinstance(data["repos"], list)
|
||||
assert data["repos"][0]["name"] == "test/repo"
|
||||
print(f"PASS: save_trending → {output_file}")
|
||||
|
||||
|
||||
def test_save_trending_respects_output_dir_creation():
|
||||
"""Output directory is created if it doesn't exist."""
|
||||
repos = []
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
nested = Path(tmp) / "nested" / "trending"
|
||||
assert not nested.exists()
|
||||
|
||||
output_file = save_trending(repos, output_dir=str(nested))
|
||||
assert nested.exists()
|
||||
assert Path(output_file).exists()
|
||||
print("PASS: output_dir_creation")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_infer_features_from_description()
|
||||
test_infer_features_from_topics_only()
|
||||
test_extract_repo_features_produces_valid_structure()
|
||||
test_save_trending_creates_dated_json()
|
||||
test_save_trending_respects_output_dir_creation()
|
||||
print("\nAll github_trending_scanner tests passed.")
|
||||
@@ -1,42 +0,0 @@
|
||||
# Entity Extraction Prompt
|
||||
|
||||
## System Prompt
|
||||
You are an entity extraction engine. You read text and output ONLY a JSON array of named entities. You do not infer. You extract only what the text explicitly mentions.
|
||||
|
||||
## Task
|
||||
Extract all named entities from the provided text. Categorize each entity into exactly one of these types:
|
||||
- `person` — individual's name (e.g., Alexander, Rockachopa, Allegro)
|
||||
- `project` — software project or component name (e.g., The Nexus, Timmy Home, compounding-intelligence)
|
||||
- `tool` — software tool, command, library, framework (e.g., git, Docker, PyTorch, Hermes)
|
||||
- `concept` — abstract idea, methodology, paradigm (e.g., compounding intelligence, bootstrap, harvester)
|
||||
- `repo` — repository reference in the form `owner/repo` or URL pointing to a repo
|
||||
|
||||
## Rules
|
||||
1. Extract ONLY names that appear explicitly in the text.
|
||||
2. Do NOT infer, assume, or hallucinate.
|
||||
3. Each entity must have: `name` (exact string), `type` (one of the five above), and `context` (short snippet showing usage, 1-2 sentences).
|
||||
4. The same entity mentioned multiple times should appear only ONCE in the output (deduplicate by name+type).
|
||||
5. For `repo` type, match patterns like `owner/repo`, `github.com/owner/repo`, `forge.alexanderwhitestone.com/owner/repo`.
|
||||
6. For `tool` type, include commands (git, pytest), platforms (Linux, macOS), runtimes (Python, Node.js), and CLI utilities.
|
||||
7. For `person` type, look for capitalized full names, or single names used in personal attribution ("asked Alex", "for Alexander").
|
||||
8. For `concept`, include technical terms that represent an idea rather than a concrete thing.
|
||||
|
||||
## Output Format
|
||||
Return ONLY valid JSON, no markdown, no explanation. Array of objects:
|
||||
```json
|
||||
[
|
||||
{
|
||||
"name": "Hermes",
|
||||
"type": "tool",
|
||||
"context": "Hermes agent uses the tools tool to execute commands."
|
||||
},
|
||||
{
|
||||
"name": "Timmy_Foundation/hermes-agent",
|
||||
"type": "repo",
|
||||
"context": "Clone the repo at forge.../Timmy_Foundation/hermes-agent"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
## Text to extract from:
|
||||
{{text}}
|
||||
@@ -1,82 +0,0 @@
|
||||
"""
|
||||
Test suite for entity_extractor.py (Issue #144).
|
||||
|
||||
Tests cover:
|
||||
- Text reading from various formats
|
||||
- Entity deduplication logic
|
||||
- Output file structure
|
||||
- Integration: batch processing yields 100+ entities from test_sessions
|
||||
"""
|
||||
|
||||
import json
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
# We'll test the pure functions directly; avoid hitting real LLM in unit tests
|
||||
import sys
|
||||
sys.path.insert(0, str(Path(__file__).resolve().parents[1] / "scripts"))
|
||||
|
||||
# The test approach: mock call_llm to return predetermined entities and test
|
||||
# deduplication, merging, and output writing.
|
||||
|
||||
def test_entity_key_normalization():
|
||||
from entity_extractor import entity_key
|
||||
assert entity_key("Hermes", "tool") == entity_key("hermes", "TOOL")
|
||||
assert entity_key("Git", "tool") != entity_key("Git", "project")
|
||||
|
||||
def test_merge_entities_deduplication():
|
||||
from entity_extractor import merge_entities
|
||||
existing = [
|
||||
{"name": "Hermes", "type": "tool", "count": 5, "sources": ["a.jsonl"]}
|
||||
]
|
||||
new = [
|
||||
{"name": "Hermes", "type": "tool", "sources": ["b.jsonl"]},
|
||||
{"name": "Gitea", "type": "tool", "sources": ["b.jsonl"]}
|
||||
]
|
||||
merged = merge_entities(new, existing.copy())
|
||||
# Hermes count should be 5+1=6, sources merged
|
||||
hermes = [e for e in merged if e['name'].lower()=='hermes'][0]
|
||||
assert hermes['count'] == 6
|
||||
assert set(hermes['sources']) == {"a.jsonl", "b.jsonl"}
|
||||
# Gitea added fresh
|
||||
gitea = [e for e in merged if e['name'].lower()=='gitea'][0]
|
||||
assert gitea['count'] == 1
|
||||
|
||||
def test_output_schema():
|
||||
from entity_extractor import write_entities, load_existing_entities
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
kdir = Path(tmp) / "knowledge"
|
||||
kdir.mkdir()
|
||||
index = {"version": 1, "last_updated": "", "entities": [
|
||||
{"name": "Test", "type": "tool", "count": 1, "sources": ["test"]}
|
||||
]}
|
||||
write_entities(index, str(kdir))
|
||||
# Verify file written
|
||||
out = kdir / "entities.json"
|
||||
assert out.exists()
|
||||
data = json.loads(out.read_text())
|
||||
assert "entities" in data
|
||||
assert data["entities"][0]["name"] == "Test"
|
||||
|
||||
def test_batch_yields_many_entities():
|
||||
"""Batch on test_sessions should produce 100+ unique entities with LLM mock."""
|
||||
from entity_extractor import merge_entities, entity_key
|
||||
# Simulate a few sources each returning a diverse entity set
|
||||
mock_sources = [
|
||||
[{"name": "Hermes", "type": "tool", "sources": ["s1"]},
|
||||
{"name": "Gitea", "type": "tool", "sources": ["s1"]},
|
||||
{"name": "Timmy_Foundation/hermes-agent", "type": "repo", "sources": ["s1"]}],
|
||||
[{"name": "Hermes", "type": "tool", "sources": ["s2"]}, # duplicate
|
||||
{"name": "Docker", "type": "tool", "sources": ["s2"]},
|
||||
{"name": "Alexander", "type": "person", "sources": ["s2"]}],
|
||||
]
|
||||
merged = []
|
||||
for batch in mock_sources:
|
||||
merged = merge_entities(batch, merged)
|
||||
# Ensure dedup works across batches
|
||||
names = [e['name'].lower() for e in merged]
|
||||
assert names.count('hermes') == 1
|
||||
assert len(merged) == 4 # Hermes, Gitea, repo, Docker, Alexander
|
||||
|
||||
# The real LLM extraction test would require live API key; skip in CI
|
||||
Reference in New Issue
Block a user