Compare commits
2 Commits
step35/144
...
step35/171
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8374ec937e | ||
|
|
4b5a675355 |
@@ -1,268 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
entity_extractor.py — Extract named entities from text sources.
|
||||
|
||||
Extracts: people, projects, tools, concepts, repos from session transcripts,
|
||||
README files, issue bodies, or any text input.
|
||||
|
||||
Output: knowledge/entities.json with deduplicated entity list and occurrence counts.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
SCRIPT_DIR = Path(__file__).parent.absolute()
|
||||
sys.path.insert(0, str(SCRIPT_DIR))
|
||||
|
||||
from session_reader import read_session, messages_to_text
|
||||
|
||||
# --- Configuration ---
|
||||
DEFAULT_API_BASE = os.environ.get("HARVESTER_API_BASE", "https://api.nousresearch.com/v1")
|
||||
DEFAULT_API_KEY = os.environ.get("HARVESTER_API_KEY", "")
|
||||
DEFAULT_MODEL = os.environ.get("HARVESTER_MODEL", "xiaomi/mimo-v2-pro")
|
||||
KNOWLEDGE_DIR = os.environ.get("HARVESTER_KNOWLEDGE_DIR", "knowledge")
|
||||
PROMPT_PATH = os.environ.get("ENTITY_PROMPT_PATH", str(SCRIPT_DIR.parent / "templates" / "entity-extraction-prompt.md"))
|
||||
|
||||
API_KEY_PATHS = [
|
||||
os.path.expanduser("~/.config/nous/key"),
|
||||
os.path.expanduser("~/.hermes/keymaxxing/active/minimax.key"),
|
||||
os.path.expanduser("~/.config/openrouter/key"),
|
||||
]
|
||||
|
||||
def find_api_key() -> str:
|
||||
for path in API_KEY_PATHS:
|
||||
if os.path.exists(path):
|
||||
with open(path) as f:
|
||||
key = f.read().strip()
|
||||
if key:
|
||||
return key
|
||||
return ""
|
||||
|
||||
def load_prompt() -> str:
|
||||
path = Path(PROMPT_PATH)
|
||||
if not path.exists():
|
||||
print(f"ERROR: Entity extraction prompt not found at {path}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
return path.read_text(encoding='utf-8')
|
||||
|
||||
def call_llm(prompt: str, text: str, api_base: str, api_key: str, model: str) -> Optional[list]:
|
||||
"""Call LLM API to extract entities."""
|
||||
import urllib.request
|
||||
|
||||
messages = [
|
||||
{"role": "system", "content": prompt},
|
||||
{"role": "user", "content": f"Extract entities from this text:\n\n{text}"}
|
||||
]
|
||||
|
||||
payload = json.dumps({
|
||||
"model": model,
|
||||
"messages": messages,
|
||||
"temperature": 0.0,
|
||||
"max_tokens": 2048
|
||||
}).encode('utf-8')
|
||||
|
||||
req = urllib.request.Request(
|
||||
f"{api_base}/chat/completions",
|
||||
data=payload,
|
||||
headers={"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"},
|
||||
method="POST"
|
||||
)
|
||||
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=60) as resp:
|
||||
result = json.loads(resp.read().decode('utf-8'))
|
||||
content = result["choices"][0]["message"]["content"]
|
||||
return parse_response(content)
|
||||
except Exception as e:
|
||||
print(f"ERROR: LLM call failed: {e}", file=sys.stderr)
|
||||
return None
|
||||
|
||||
def parse_response(content: str) -> Optional[list]:
|
||||
"""Parse LLM JSON response containing entity array."""
|
||||
try:
|
||||
data = json.loads(content)
|
||||
if isinstance(data, list):
|
||||
return data
|
||||
if isinstance(data, dict) and 'entities' in data:
|
||||
return data['entities']
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
import re
|
||||
match = re.search(r'```(?:json)?\s*(\[.*?\])\s*```', content, re.DOTALL)
|
||||
if match:
|
||||
try:
|
||||
data = json.loads(match.group(1))
|
||||
if isinstance(data, list):
|
||||
return data
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
print(f"WARNING: Could not parse LLM response as entity list", file=sys.stderr)
|
||||
return None
|
||||
|
||||
def load_existing_entities(knowledge_dir: str) -> dict:
|
||||
path = Path(knowledge_dir) / "entities.json"
|
||||
if not path.exists():
|
||||
return {"version": 1, "last_updated": "", "entities": []}
|
||||
try:
|
||||
with open(path) as f:
|
||||
return json.load(f)
|
||||
except (json.JSONDecodeError, IOError) as e:
|
||||
print(f"WARNING: Could not load entities: {e}", file=sys.stderr)
|
||||
return {"version": 1, "last_updated": "", "entities": []}
|
||||
|
||||
def entity_key(name: str, etype: str) -> tuple:
|
||||
return (name.lower().strip(), etype.lower().strip())
|
||||
|
||||
def merge_entities(new_entities: list, existing: list) -> list:
|
||||
"""Merge new entities into existing list, combining counts and sources."""
|
||||
existing_by_key = {}
|
||||
for e in existing:
|
||||
key = entity_key(e.get('name',''), e.get('type',''))
|
||||
existing_by_key[key] = e
|
||||
|
||||
for e in new_entities:
|
||||
key = entity_key(e['name'], e['type'])
|
||||
if key in existing_by_key:
|
||||
existing_e = existing_by_key[key]
|
||||
existing_e['count'] = existing_e.get('count', 1) + 1
|
||||
# Merge sources
|
||||
old_sources = set(existing_e.get('sources', []))
|
||||
new_sources = set(e.get('sources', []))
|
||||
existing_e['sources'] = sorted(old_sources | new_sources)
|
||||
existing_e['last_seen'] = e.get('last_seen', existing_e.get('last_seen'))
|
||||
else:
|
||||
e['count'] = e.get('count', 1)
|
||||
e.setdefault('sources', [])
|
||||
e.setdefault('first_seen', datetime.now(timezone.utc).isoformat())
|
||||
existing.append(e)
|
||||
|
||||
return existing
|
||||
|
||||
def write_entities(index: dict, knowledge_dir: str):
|
||||
kdir = Path(knowledge_dir)
|
||||
kdir.mkdir(parents=True, exist_ok=True)
|
||||
index['last_updated'] = datetime.now(timezone.utc).isoformat()
|
||||
path = kdir / "entities.json"
|
||||
with open(path, 'w', encoding='utf-8') as f:
|
||||
json.dump(index, f, indent=2, ensure_ascii=False)
|
||||
|
||||
def read_text_from_source(source: str) -> str:
|
||||
"""Read text from a file (plain text, markdown, or session JSONL)."""
|
||||
path = Path(source)
|
||||
if not path.exists():
|
||||
raise FileNotFoundError(source)
|
||||
if path.suffix == '.jsonl':
|
||||
# Session transcript
|
||||
from session_reader import read_session, messages_to_text
|
||||
messages = read_session(source)
|
||||
return messages_to_text(messages)
|
||||
else:
|
||||
# Plain text / markdown / issue body
|
||||
return path.read_text(encoding='utf-8', errors='replace')
|
||||
|
||||
def extract_from_text(text: str, api_base: str, api_key: str, model: str, source_name: str = "") -> list:
|
||||
prompt = load_prompt()
|
||||
raw = call_llm(prompt, text, api_base, api_key, model)
|
||||
if raw is None:
|
||||
return []
|
||||
entities = []
|
||||
for e in raw:
|
||||
if not isinstance(e, dict):
|
||||
continue
|
||||
name = e.get('name', '').strip()
|
||||
etype = e.get('type', '').strip().lower()
|
||||
if not name or not etype:
|
||||
continue
|
||||
entity = {
|
||||
'name': name,
|
||||
'type': etype,
|
||||
'context': e.get('context', '')[:200],
|
||||
'last_seen': datetime.now(timezone.utc).isoformat(),
|
||||
'sources': [source_name] if source_name else []
|
||||
}
|
||||
entities.append(entity)
|
||||
return entities
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Extract named entities from text sources")
|
||||
parser.add_argument('--file', help='Single file to process')
|
||||
parser.add_argument('--dir', help='Directory of files to process')
|
||||
parser.add_argument('--session', help='Single session JSONL file')
|
||||
parser.add_argument('--batch', action='store_true', help='Batch process sessions directory')
|
||||
parser.add_argument('--sessions-dir', default=os.path.expanduser('~/.hermes/sessions'),
|
||||
help='Sessions directory for batch mode')
|
||||
parser.add_argument('--output', default='knowledge', help='Knowledge/output directory')
|
||||
parser.add_argument('--api-base', default=DEFAULT_API_BASE)
|
||||
parser.add_argument('--api-key', default='', help='API key or set HARVESTER_API_KEY')
|
||||
parser.add_argument('--model', default=DEFAULT_MODEL)
|
||||
parser.add_argument('--dry-run', action='store_true', help='Preview without writing')
|
||||
parser.add_argument('--limit', type=int, default=0, help='Max files/sessions in batch mode')
|
||||
args = parser.parse_args()
|
||||
|
||||
api_key = args.api_key or DEFAULT_API_KEY or find_api_key()
|
||||
if not api_key:
|
||||
print("ERROR: No API key found", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
knowledge_dir = args.output
|
||||
if not os.path.isabs(knowledge_dir):
|
||||
knowledge_dir = str(SCRIPT_DIR.parent / knowledge_dir)
|
||||
|
||||
sources = []
|
||||
if args.file:
|
||||
sources = [args.file]
|
||||
elif args.dir:
|
||||
files = sorted(Path(args.dir).rglob("*"))
|
||||
sources = [str(f) for f in files if f.is_file() and f.suffix in ('.txt','.md','.json','.jsonl','.yaml','.yml')]
|
||||
if args.limit > 0:
|
||||
sources = sources[:args.limit]
|
||||
elif args.session:
|
||||
sources = [args.session]
|
||||
elif args.batch:
|
||||
sess_dir = Path(args.sessions_dir)
|
||||
sources = sorted(sess_dir.glob("*.jsonl"), reverse=True)
|
||||
if args.limit > 0:
|
||||
sources = sources[:args.limit]
|
||||
sources = [str(s) for s in sources]
|
||||
else:
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
print(f"Processing {len(sources)} sources...")
|
||||
all_entities = []
|
||||
for i, src in enumerate(sources, 1):
|
||||
print(f"[{i}/{len(sources)}] {Path(src).name}...", end=" ", flush=True)
|
||||
try:
|
||||
text = read_text_from_source(src)
|
||||
entities = extract_from_text(text, args.api_base, api_key, args.model, source_name=Path(src).name)
|
||||
all_entities.extend(entities)
|
||||
print(f"→ {len(entities)} entities")
|
||||
except Exception as e:
|
||||
print(f"ERROR: {e}")
|
||||
|
||||
# Deduplicate across all sources
|
||||
print(f"Total raw entities: {len(all_entities)}")
|
||||
existing_index = load_existing_entities(knowledge_dir)
|
||||
merged = merge_entities(all_entities, existing_index.get('entities', []))
|
||||
print(f"Total unique entities after dedup: {len(merged)}")
|
||||
|
||||
if not args.dry_run:
|
||||
new_index = {"version": 1, "last_updated": "", "entities": merged}
|
||||
write_entities(new_index, knowledge_dir)
|
||||
print(f"Written to {knowledge_dir}/entities.json")
|
||||
|
||||
stats = {
|
||||
"sources_processed": len(sources),
|
||||
"raw_entities": len(all_entities),
|
||||
"unique_entities": len(merged)
|
||||
}
|
||||
print(json.dumps(stats, indent=2))
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -70,37 +70,38 @@ class PerfReport:
|
||||
# ── Test Analysis ──────────────────────────────────────────────────
|
||||
|
||||
def find_slow_tests_pytest(repo_path: str) -> List[Bottleneck]:
|
||||
"""Run pytest --durations and parse slow tests."""
|
||||
"""Run pytest with --durations and parse slow test output."""
|
||||
bottlenecks = []
|
||||
|
||||
# Try to run pytest with durations
|
||||
try:
|
||||
# Run pytest to get slowest tests; maxfail=1 avoids hanging on failures
|
||||
result = subprocess.run(
|
||||
["python3", "-m", "pytest", "--co", "-q", "--durations=0"],
|
||||
cwd=repo_path, capture_output=True, text=True, timeout=30
|
||||
["python3", "-m", "pytest", "-q",
|
||||
f"--durations={PYTEST_DURATIONS_COUNT}", "--tb=no", "--maxfail=1"],
|
||||
cwd=repo_path, capture_output=True, text=True, timeout=60
|
||||
)
|
||||
# If tests exist, try to get durations from last run
|
||||
durations_file = os.path.join(repo_path, ".pytest_cache", "v", "cache", "durations")
|
||||
if os.path.exists(durations_file):
|
||||
with open(durations_file) as f:
|
||||
for line in f:
|
||||
parts = line.strip().split()
|
||||
if len(parts) >= 2:
|
||||
try:
|
||||
duration = float(parts[0])
|
||||
test_name = " ".join(parts[1:])
|
||||
if duration > SLOW_TEST_THRESHOLD_S:
|
||||
severity = "critical" if duration > 10 else "warning"
|
||||
bottlenecks.append(Bottleneck(
|
||||
category="test",
|
||||
name=test_name,
|
||||
duration_s=duration,
|
||||
severity=severity,
|
||||
recommendation=f"Test takes {duration:.1f}s. Consider mocking slow I/O, using fixtures, or marking with @pytest.mark.slow."
|
||||
))
|
||||
except ValueError:
|
||||
continue
|
||||
except (subprocess.TimeoutExpired, FileNotFoundError):
|
||||
# Parse durations from stdout.
|
||||
# Lines look like: " 3.45s call test_file.py::test_name"
|
||||
for line in result.stdout.splitlines():
|
||||
line = line.strip()
|
||||
m = re.match(r'^(\d+\.?\d*)s\s+(call|setup|teardown)\s+(.+)$', line)
|
||||
if not m:
|
||||
continue
|
||||
try:
|
||||
duration = float(m.group(1))
|
||||
test_name = m.group(3).strip()
|
||||
if duration > SLOW_TEST_THRESHOLD_S:
|
||||
severity = "critical" if duration > 10 else "warning"
|
||||
bottlenecks.append(Bottleneck(
|
||||
category="test",
|
||||
name=test_name,
|
||||
duration_s=duration,
|
||||
severity=severity,
|
||||
recommendation=f"Test takes {duration:.1f}s. Consider mocking slow I/O, using fixtures, or marking with @pytest.mark.slow."
|
||||
))
|
||||
except ValueError:
|
||||
continue
|
||||
except (subprocess.TimeoutExpired, FileNotFoundError, PermissionError):
|
||||
pass
|
||||
|
||||
return bottlenecks
|
||||
|
||||
351
scripts/pr_complexity_scorer.py
Normal file
351
scripts/pr_complexity_scorer.py
Normal file
@@ -0,0 +1,351 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
PR Complexity Scorer - Estimate review effort for PRs.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from dataclasses import dataclass, asdict
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
import urllib.request
|
||||
import urllib.error
|
||||
|
||||
GITEA_BASE = "https://forge.alexanderwhitestone.com/api/v1"
|
||||
|
||||
DEPENDENCY_FILES = {
|
||||
"requirements.txt", "pyproject.toml", "setup.py", "setup.cfg",
|
||||
"Pipfile", "poetry.lock", "package.json", "yarn.lock", "Gemfile",
|
||||
"go.mod", "Cargo.toml", "pom.xml", "build.gradle"
|
||||
}
|
||||
|
||||
TEST_PATTERNS = [
|
||||
r"tests?/.*\.py$", r".*_test\.py$", r"test_.*\.py$",
|
||||
r"spec/.*\.rb$", r".*_spec\.rb$",
|
||||
r"__tests__/", r".*\.test\.(js|ts|jsx|tsx)$"
|
||||
]
|
||||
|
||||
WEIGHT_FILES = 0.25
|
||||
WEIGHT_LINES = 0.25
|
||||
WEIGHT_DEPS = 0.30
|
||||
WEIGHT_TEST_COV = 0.20
|
||||
|
||||
SMALL_FILES = 5
|
||||
MEDIUM_FILES = 20
|
||||
LARGE_FILES = 50
|
||||
|
||||
SMALL_LINES = 100
|
||||
MEDIUM_LINES = 500
|
||||
LARGE_LINES = 2000
|
||||
|
||||
TIME_PER_POINT = {1: 5, 2: 10, 3: 15, 4: 20, 5: 25, 6: 30, 7: 45, 8: 60, 9: 90, 10: 120}
|
||||
|
||||
|
||||
@dataclass
|
||||
class PRComplexity:
|
||||
pr_number: int
|
||||
title: str
|
||||
files_changed: int
|
||||
additions: int
|
||||
deletions: int
|
||||
has_dependency_changes: bool
|
||||
test_coverage_delta: Optional[int]
|
||||
score: int
|
||||
estimated_minutes: int
|
||||
reasons: List[str]
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return asdict(self)
|
||||
|
||||
|
||||
class GiteaClient:
|
||||
def __init__(self, token: str):
|
||||
self.token = token
|
||||
self.base_url = GITEA_BASE.rstrip("/")
|
||||
|
||||
def _request(self, path: str, params: Dict = None) -> Any:
|
||||
url = f"{self.base_url}{path}"
|
||||
if params:
|
||||
qs = "&".join(f"{k}={v}" for k, v in params.items() if v is not None)
|
||||
url += f"?{qs}"
|
||||
|
||||
req = urllib.request.Request(url)
|
||||
req.add_header("Authorization", f"token {self.token}")
|
||||
req.add_header("Content-Type", "application/json")
|
||||
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=30) as resp:
|
||||
return json.loads(resp.read().decode())
|
||||
except urllib.error.HTTPError as e:
|
||||
print(f"API error {e.code}: {e.read().decode()[:200]}", file=sys.stderr)
|
||||
return None
|
||||
except urllib.error.URLError as e:
|
||||
print(f"Network error: {e}", file=sys.stderr)
|
||||
return None
|
||||
|
||||
def get_open_prs(self, org: str, repo: str) -> List[Dict]:
|
||||
prs = []
|
||||
page = 1
|
||||
while True:
|
||||
batch = self._request(f"/repos/{org}/{repo}/pulls", {"limit": 50, "page": page, "state": "open"})
|
||||
if not batch:
|
||||
break
|
||||
prs.extend(batch)
|
||||
if len(batch) < 50:
|
||||
break
|
||||
page += 1
|
||||
return prs
|
||||
|
||||
def get_pr_files(self, org: str, repo: str, pr_number: int) -> List[Dict]:
|
||||
files = []
|
||||
page = 1
|
||||
while True:
|
||||
batch = self._request(
|
||||
f"/repos/{org}/{repo}/pulls/{pr_number}/files",
|
||||
{"limit": 100, "page": page}
|
||||
)
|
||||
if not batch:
|
||||
break
|
||||
files.extend(batch)
|
||||
if len(batch) < 100:
|
||||
break
|
||||
page += 1
|
||||
return files
|
||||
|
||||
def post_comment(self, org: str, repo: str, pr_number: int, body: str) -> bool:
|
||||
data = json.dumps({"body": body}).encode("utf-8")
|
||||
req = urllib.request.Request(
|
||||
f"{self.base_url}/repos/{org}/{repo}/issues/{pr_number}/comments",
|
||||
data=data,
|
||||
method="POST",
|
||||
headers={"Authorization": f"token {self.token}", "Content-Type": "application/json"}
|
||||
)
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=30) as resp:
|
||||
return resp.status in (200, 201)
|
||||
except urllib.error.HTTPError:
|
||||
return False
|
||||
|
||||
|
||||
def is_dependency_file(filename: str) -> bool:
|
||||
return any(filename.endswith(dep) for dep in DEPENDENCY_FILES)
|
||||
|
||||
|
||||
def is_test_file(filename: str) -> bool:
|
||||
return any(re.search(pattern, filename) for pattern in TEST_PATTERNS)
|
||||
|
||||
|
||||
def score_pr(
|
||||
files_changed: int,
|
||||
additions: int,
|
||||
deletions: int,
|
||||
has_dependency_changes: bool,
|
||||
test_coverage_delta: Optional[int] = None
|
||||
) -> tuple[int, int, List[str]]:
|
||||
score = 1.0
|
||||
reasons = []
|
||||
|
||||
# Files changed
|
||||
if files_changed <= SMALL_FILES:
|
||||
fscore = 1.0
|
||||
reasons.append("small number of files changed")
|
||||
elif files_changed <= MEDIUM_FILES:
|
||||
fscore = 2.0
|
||||
reasons.append("moderate number of files changed")
|
||||
elif files_changed <= LARGE_FILES:
|
||||
fscore = 2.5
|
||||
reasons.append("large number of files changed")
|
||||
else:
|
||||
fscore = 3.0
|
||||
reasons.append("very large PR spanning many files")
|
||||
|
||||
# Lines changed
|
||||
total_lines = additions + deletions
|
||||
if total_lines <= SMALL_LINES:
|
||||
lscore = 1.0
|
||||
reasons.append("small change size")
|
||||
elif total_lines <= MEDIUM_LINES:
|
||||
lscore = 2.0
|
||||
reasons.append("moderate change size")
|
||||
elif total_lines <= LARGE_LINES:
|
||||
lscore = 3.0
|
||||
reasons.append("large change size")
|
||||
else:
|
||||
lscore = 4.0
|
||||
reasons.append("very large change")
|
||||
|
||||
# Dependency changes
|
||||
if has_dependency_changes:
|
||||
dscore = 2.5
|
||||
reasons.append("dependency changes (architectural impact)")
|
||||
else:
|
||||
dscore = 0.0
|
||||
|
||||
# Test coverage delta
|
||||
tscore = 0.0
|
||||
if test_coverage_delta is not None:
|
||||
if test_coverage_delta > 0:
|
||||
reasons.append(f"test additions (+{test_coverage_delta} test files)")
|
||||
tscore = -min(2.0, test_coverage_delta / 2.0)
|
||||
elif test_coverage_delta < 0:
|
||||
reasons.append(f"test removals ({abs(test_coverage_delta)} test files)")
|
||||
tscore = min(2.0, abs(test_coverage_delta) * 0.5)
|
||||
else:
|
||||
reasons.append("test coverage change not assessed")
|
||||
|
||||
# Weighted sum, scaled by 3 to use full 1-10 range
|
||||
bonus = (fscore * WEIGHT_FILES) + (lscore * WEIGHT_LINES) + (dscore * WEIGHT_DEPS) + (tscore * WEIGHT_TEST_COV)
|
||||
scaled_bonus = bonus * 3.0
|
||||
score = 1.0 + scaled_bonus
|
||||
|
||||
final_score = max(1, min(10, int(round(score))))
|
||||
est_minutes = TIME_PER_POINT.get(final_score, 30)
|
||||
|
||||
return final_score, est_minutes, reasons
|
||||
|
||||
|
||||
def analyze_pr(client: GiteaClient, org: str, repo: str, pr_data: Dict) -> PRComplexity:
|
||||
pr_num = pr_data["number"]
|
||||
title = pr_data.get("title", "")
|
||||
files = client.get_pr_files(org, repo, pr_num)
|
||||
|
||||
additions = sum(f.get("additions", 0) for f in files)
|
||||
deletions = sum(f.get("deletions", 0) for f in files)
|
||||
filenames = [f.get("filename", "") for f in files]
|
||||
|
||||
has_deps = any(is_dependency_file(f) for f in filenames)
|
||||
|
||||
test_added = sum(1 for f in files if f.get("status") == "added" and is_test_file(f.get("filename", "")))
|
||||
test_removed = sum(1 for f in files if f.get("status") == "removed" and is_test_file(f.get("filename", "")))
|
||||
test_delta = test_added - test_removed if (test_added or test_removed) else None
|
||||
|
||||
score, est_min, reasons = score_pr(
|
||||
files_changed=len(files),
|
||||
additions=additions,
|
||||
deletions=deletions,
|
||||
has_dependency_changes=has_deps,
|
||||
test_coverage_delta=test_delta
|
||||
)
|
||||
|
||||
return PRComplexity(
|
||||
pr_number=pr_num,
|
||||
title=title,
|
||||
files_changed=len(files),
|
||||
additions=additions,
|
||||
deletions=deletions,
|
||||
has_dependency_changes=has_deps,
|
||||
test_coverage_delta=test_delta,
|
||||
score=score,
|
||||
estimated_minutes=est_min,
|
||||
reasons=reasons
|
||||
)
|
||||
|
||||
|
||||
def build_comment(complexity: PRComplexity) -> str:
|
||||
change_desc = f"{complexity.files_changed} files, +{complexity.additions}/-{complexity.deletions} lines"
|
||||
deps_note = "\n- :warning: Dependency changes detected — architectural review recommended" if complexity.has_dependency_changes else ""
|
||||
test_note = ""
|
||||
if complexity.test_coverage_delta is not None:
|
||||
if complexity.test_coverage_delta > 0:
|
||||
test_note = f"\n- :+1: {complexity.test_coverage_delta} test file(s) added"
|
||||
elif complexity.test_coverage_delta < 0:
|
||||
test_note = f"\n- :warning: {abs(complexity.test_coverage_delta)} test file(s) removed"
|
||||
|
||||
comment = f"## 📊 PR Complexity Analysis\n\n"
|
||||
comment += f"**PR #{complexity.pr_number}: {complexity.title}**\n\n"
|
||||
comment += f"| Metric | Value |\n|--------|-------|\n"
|
||||
comment += f"| Changes | {change_desc} |\n"
|
||||
comment += f"| Complexity Score | **{complexity.score}/10** |\n"
|
||||
comment += f"| Estimated Review Time | ~{complexity.estimated_minutes} minutes |\n\n"
|
||||
comment += f"### Scoring rationale:"
|
||||
for r in complexity.reasons:
|
||||
comment += f"\n- {r}"
|
||||
if deps_note:
|
||||
comment += deps_note
|
||||
if test_note:
|
||||
comment += test_note
|
||||
comment += f"\n\n---\n"
|
||||
comment += f"*Generated by PR Complexity Scorer — [issue #135](https://forge.alexanderwhitestone.com/Timmy_Foundation/compounding-intelligence/issues/135)*"
|
||||
return comment
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="PR Complexity Scorer")
|
||||
parser.add_argument("--org", default="Timmy_Foundation")
|
||||
parser.add_argument("--repo", default="compounding-intelligence")
|
||||
parser.add_argument("--token", default=os.environ.get("GITEA_TOKEN") or os.path.expanduser("~/.config/gitea/token"))
|
||||
parser.add_argument("--dry-run", action="store_true")
|
||||
parser.add_argument("--apply", action="store_true")
|
||||
parser.add_argument("--output", default="metrics/pr_complexity.json")
|
||||
args = parser.parse_args()
|
||||
|
||||
token_path = args.token
|
||||
if os.path.exists(token_path):
|
||||
with open(token_path) as f:
|
||||
token = f.read().strip()
|
||||
else:
|
||||
token = args.token
|
||||
|
||||
if not token:
|
||||
print("ERROR: No Gitea token provided", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
client = GiteaClient(token)
|
||||
|
||||
print(f"Fetching open PRs for {args.org}/{args.repo}...")
|
||||
prs = client.get_open_prs(args.org, args.repo)
|
||||
if not prs:
|
||||
print("No open PRs found.")
|
||||
sys.exit(0)
|
||||
|
||||
print(f"Found {len(prs)} open PR(s). Analyzing...")
|
||||
|
||||
results = []
|
||||
Path(args.output).parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
for pr in prs:
|
||||
pr_num = pr["number"]
|
||||
title = pr.get("title", "")
|
||||
print(f" Analyzing PR #{pr_num}: {title[:60]}")
|
||||
|
||||
try:
|
||||
complexity = analyze_pr(client, args.org, args.repo, pr)
|
||||
results.append(complexity.to_dict())
|
||||
|
||||
comment = build_comment(complexity)
|
||||
|
||||
if args.dry_run:
|
||||
print(f" → Score: {complexity.score}/10, Est: {complexity.estimated_minutes}min [DRY-RUN]")
|
||||
elif args.apply:
|
||||
success = client.post_comment(args.org, args.repo, pr_num, comment)
|
||||
status = "[commented]" if success else "[FAILED]"
|
||||
print(f" → Score: {complexity.score}/10, Est: {complexity.estimated_minutes}min {status}")
|
||||
else:
|
||||
print(f" → Score: {complexity.score}/10, Est: {complexity.estimated_minutes}min [no action]")
|
||||
|
||||
except Exception as e:
|
||||
print(f" ERROR analyzing PR #{pr_num}: {e}", file=sys.stderr)
|
||||
|
||||
with open(args.output, "w") as f:
|
||||
json.dump({
|
||||
"org": args.org,
|
||||
"repo": args.repo,
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"pr_count": len(results),
|
||||
"results": results
|
||||
}, f, indent=2)
|
||||
|
||||
if results:
|
||||
scores = [r["score"] for r in results]
|
||||
print(f"\nResults saved to {args.output}")
|
||||
print(f"Summary: {len(results)} PRs, scores range {min(scores):.0f}-{max(scores):.0f}")
|
||||
else:
|
||||
print("\nNo results to save.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,116 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Smoke test for entity_extractor pipeline — verifies:
|
||||
- session/plain text reading
|
||||
- mock LLM entity extraction
|
||||
- deduplication and merging
|
||||
- output file format
|
||||
|
||||
Does NOT call the real LLM.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import tempfile
|
||||
from unittest.mock import patch
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
SCRIPT_DIR = Path(__file__).parent.absolute()
|
||||
sys.path.insert(0, str(SCRIPT_DIR))
|
||||
|
||||
from session_reader import read_session, messages_to_text
|
||||
import entity_extractor as ee
|
||||
|
||||
def mock_call_llm(prompt: str, text: str, api_base: str, api_key: str, model: str):
|
||||
"""Return a fixed entity list for any input."""
|
||||
return [
|
||||
{"name": "Hermes", "type": "tool", "context": "Hermes agent uses the tools tool."},
|
||||
{"name": "Gitea", "type": "tool", "context": "Gitea is a forge."},
|
||||
{"name": "Timmy_Foundation/hermes-agent", "type": "repo", "context": "Clone the repo at forge..."},
|
||||
]
|
||||
|
||||
def test_read_session_text():
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.jsonl', delete=False) as f:
|
||||
f.write('{"role": "user", "content": "Clone repo", "timestamp": "2026-04-13T10:00:00Z"}\n')
|
||||
f.write('{"role": "assistant", "content": "Done", "timestamp": "2026-04-13T10:00:05Z"}\n')
|
||||
path = f.name
|
||||
messages = read_session(path)
|
||||
text = messages_to_text(messages)
|
||||
assert "USER: Clone repo" in text
|
||||
assert "ASSISTANT: Done" in text
|
||||
os.unlink(path)
|
||||
print(" [PASS] session text extraction works")
|
||||
|
||||
def test_entity_deduplication_and_merge():
|
||||
existing = [
|
||||
{"name": "Hermes", "type": "tool", "count": 3, "sources": ["s1.jsonl"]}
|
||||
]
|
||||
new = [
|
||||
{"name": "Hermes", "type": "tool", "sources": ["s2.jsonl"]},
|
||||
{"name": "Gitea", "type": "tool", "sources": ["s2.jsonl"]},
|
||||
]
|
||||
merged = ee.merge_entities(new, existing.copy())
|
||||
# Hermes count becomes 4, sources combined
|
||||
hermes = [e for e in merged if e['name'].lower() == 'hermes'][0]
|
||||
assert hermes['count'] == 4
|
||||
assert set(hermes['sources']) == {'s1.jsonl', 's2.jsonl'}
|
||||
# Gitea new entry
|
||||
gitea = [e for e in merged if e['name'].lower() == 'gitea'][0]
|
||||
assert gitea['count'] == 1
|
||||
print(" [PASS] deduplication & merging works")
|
||||
|
||||
def test_write_and_load_entities():
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
kdir = Path(tmp) / "knowledge"
|
||||
kdir.mkdir()
|
||||
index = {"version": 1, "last_updated": "", "entities": [
|
||||
{"name": "TestTool", "type": "tool", "count": 1, "sources": ["test"]}
|
||||
]}
|
||||
ee.write_entities(index, str(kdir))
|
||||
# load back
|
||||
loaded = ee.load_existing_entities(str(kdir))
|
||||
assert loaded['entities'][0]['name'] == 'TestTool'
|
||||
print(" [PASS] entities persistence works")
|
||||
|
||||
def test_full_pipeline_mocked():
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
# Create two fake session files
|
||||
sess1 = Path(tmpdir) / "s1.jsonl"
|
||||
sess1.write_text('{"role":"user","content":"Use Hermes to clone","timestamp":"..."}\n')
|
||||
sess2 = Path(tmpdir) / "s2.jsonl"
|
||||
sess2.write_text('{"role":"user","content":"Deploy with Gitea","timestamp":"..."}\n')
|
||||
|
||||
knowledge_dir = Path(tmpdir) / "knowledge"
|
||||
knowledge_dir.mkdir()
|
||||
|
||||
# Patch call_llm
|
||||
with patch('entity_extractor.call_llm', side_effect=mock_call_llm):
|
||||
# Simulate processing both sessions via the main logic
|
||||
all_entities = []
|
||||
for src in [str(sess1), str(sess2)]:
|
||||
text = ee.read_text_from_source(src)
|
||||
ents = ee.extract_from_text(text, "http://api", "fake-key", "model", source_name=Path(src).name)
|
||||
all_entities.extend(ents)
|
||||
|
||||
# Merge into empty index
|
||||
merged = ee.merge_entities(all_entities, [])
|
||||
assert len(merged) >= 3, f"Expected >=3 unique entities, got {len(merged)}"
|
||||
|
||||
# Write
|
||||
index = {"version":1, "last_updated":"", "entities": merged}
|
||||
ee.write_entities(index, str(knowledge_dir))
|
||||
|
||||
# Verify file exists
|
||||
out = knowledge_dir / "entities.json"
|
||||
assert out.exists()
|
||||
data = json.loads(out.read_text())
|
||||
assert len(data['entities']) >= 3
|
||||
print(f" [PASS] full pipeline (mocked) produced {len(data['entities'])} entities")
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_read_session_text()
|
||||
test_entity_deduplication_and_merge()
|
||||
test_write_and_load_entities()
|
||||
test_full_pipeline_mocked()
|
||||
print("\nAll smoke tests passed.")
|
||||
171
scripts/test_pr_complexity_scorer.py
Normal file
171
scripts/test_pr_complexity_scorer.py
Normal file
@@ -0,0 +1,171 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Tests for PR Complexity Scorer — unit tests for the scoring logic.
|
||||
"""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent))
|
||||
|
||||
from pr_complexity_scorer import (
|
||||
score_pr,
|
||||
is_dependency_file,
|
||||
is_test_file,
|
||||
TIME_PER_POINT,
|
||||
SMALL_FILES,
|
||||
MEDIUM_FILES,
|
||||
LARGE_FILES,
|
||||
SMALL_LINES,
|
||||
MEDIUM_LINES,
|
||||
LARGE_LINES,
|
||||
)
|
||||
|
||||
PASS = 0
|
||||
FAIL = 0
|
||||
|
||||
def test(name):
|
||||
def decorator(fn):
|
||||
global PASS, FAIL
|
||||
try:
|
||||
fn()
|
||||
PASS += 1
|
||||
print(f" [PASS] {name}")
|
||||
except AssertionError as e:
|
||||
FAIL += 1
|
||||
print(f" [FAIL] {name}: {e}")
|
||||
except Exception as e:
|
||||
FAIL += 1
|
||||
print(f" [FAIL] {name}: Unexpected error: {e}")
|
||||
return decorator
|
||||
|
||||
def assert_eq(a, b, msg=""):
|
||||
if a != b:
|
||||
raise AssertionError(f"{msg} expected {b!r}, got {a!r}")
|
||||
|
||||
def assert_true(v, msg=""):
|
||||
if not v:
|
||||
raise AssertionError(msg or "Expected True")
|
||||
|
||||
def assert_false(v, msg=""):
|
||||
if v:
|
||||
raise AssertionError(msg or "Expected False")
|
||||
|
||||
|
||||
print("=== PR Complexity Scorer Tests ===\n")
|
||||
|
||||
print("-- File Classification --")
|
||||
|
||||
@test("dependency file detection — requirements.txt")
|
||||
def _():
|
||||
assert_true(is_dependency_file("requirements.txt"))
|
||||
assert_true(is_dependency_file("src/requirements.txt"))
|
||||
assert_false(is_dependency_file("requirements_test.txt"))
|
||||
|
||||
@test("dependency file detection — pyproject.toml")
|
||||
def _():
|
||||
assert_true(is_dependency_file("pyproject.toml"))
|
||||
assert_false(is_dependency_file("myproject.py"))
|
||||
|
||||
@test("test file detection — pytest style")
|
||||
def _():
|
||||
assert_true(is_test_file("tests/test_api.py"))
|
||||
assert_true(is_test_file("test_module.py"))
|
||||
assert_true(is_test_file("src/module_test.py"))
|
||||
|
||||
@test("test file detection — other frameworks")
|
||||
def _():
|
||||
assert_true(is_test_file("spec/feature_spec.rb"))
|
||||
assert_true(is_test_file("__tests__/component.test.js"))
|
||||
assert_false(is_test_file("testfixtures/helper.py"))
|
||||
|
||||
|
||||
print("\n-- Scoring Logic --")
|
||||
|
||||
@test("small PR gets low score (1-3)")
|
||||
def _():
|
||||
score, minutes, _ = score_pr(
|
||||
files_changed=3,
|
||||
additions=50,
|
||||
deletions=10,
|
||||
has_dependency_changes=False,
|
||||
test_coverage_delta=None
|
||||
)
|
||||
assert_true(1 <= score <= 3, f"Score should be low, got {score}")
|
||||
assert_true(minutes < 20)
|
||||
|
||||
@test("medium PR gets medium score (4-6)")
|
||||
def _():
|
||||
score, minutes, _ = score_pr(
|
||||
files_changed=15,
|
||||
additions=400,
|
||||
deletions=100,
|
||||
has_dependency_changes=False,
|
||||
test_coverage_delta=None
|
||||
)
|
||||
assert_true(4 <= score <= 6, f"Score should be medium, got {score}")
|
||||
assert_true(20 <= minutes <= 45)
|
||||
|
||||
@test("large PR gets high score (7-9)")
|
||||
def _():
|
||||
score, minutes, _ = score_pr(
|
||||
files_changed=60,
|
||||
additions=3000,
|
||||
deletions=1500,
|
||||
has_dependency_changes=True,
|
||||
test_coverage_delta=None
|
||||
)
|
||||
assert_true(7 <= score <= 9, f"Score should be high, got {score}")
|
||||
assert_true(minutes >= 45)
|
||||
|
||||
@test("dependency changes boost score")
|
||||
def _():
|
||||
base_score, _, _ = score_pr(
|
||||
files_changed=10, additions=200, deletions=50,
|
||||
has_dependency_changes=False, test_coverage_delta=None
|
||||
)
|
||||
dep_score, _, _ = score_pr(
|
||||
files_changed=10, additions=200, deletions=50,
|
||||
has_dependency_changes=True, test_coverage_delta=None
|
||||
)
|
||||
assert_true(dep_score > base_score, f"Deps: {base_score} -> {dep_score}")
|
||||
|
||||
@test("adding tests lowers complexity")
|
||||
def _():
|
||||
base_score, _, _ = score_pr(
|
||||
files_changed=8, additions=150, deletions=20,
|
||||
has_dependency_changes=False, test_coverage_delta=None
|
||||
)
|
||||
better_score, _, _ = score_pr(
|
||||
files_changed=8, additions=180, deletions=20,
|
||||
has_dependency_changes=False, test_coverage_delta=3
|
||||
)
|
||||
assert_true(better_score < base_score, f"Tests: {base_score} -> {better_score}")
|
||||
|
||||
@test("removing tests increases complexity")
|
||||
def _():
|
||||
base_score, _, _ = score_pr(
|
||||
files_changed=8, additions=150, deletions=20,
|
||||
has_dependency_changes=False, test_coverage_delta=None
|
||||
)
|
||||
worse_score, _, _ = score_pr(
|
||||
files_changed=8, additions=150, deletions=20,
|
||||
has_dependency_changes=False, test_coverage_delta=-2
|
||||
)
|
||||
assert_true(worse_score > base_score, f"Remove tests: {base_score} -> {worse_score}")
|
||||
|
||||
@test("score bounded 1-10")
|
||||
def _():
|
||||
for files, adds, dels in [(1, 10, 5), (100, 10000, 5000)]:
|
||||
score, _, _ = score_pr(files, adds, dels, False, None)
|
||||
assert_true(1 <= score <= 10, f"Score {score} out of range")
|
||||
|
||||
@test("estimated minutes exist for all scores")
|
||||
def _():
|
||||
for s in range(1, 11):
|
||||
assert_true(s in TIME_PER_POINT, f"Missing time for score {s}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print(f"\n=== Results: {PASS} passed, {FAIL} failed ===")
|
||||
sys.exit(0 if FAIL == 0 else 1)
|
||||
@@ -1,42 +0,0 @@
|
||||
# Entity Extraction Prompt
|
||||
|
||||
## System Prompt
|
||||
You are an entity extraction engine. You read text and output ONLY a JSON array of named entities. You do not infer. You extract only what the text explicitly mentions.
|
||||
|
||||
## Task
|
||||
Extract all named entities from the provided text. Categorize each entity into exactly one of these types:
|
||||
- `person` — individual's name (e.g., Alexander, Rockachopa, Allegro)
|
||||
- `project` — software project or component name (e.g., The Nexus, Timmy Home, compounding-intelligence)
|
||||
- `tool` — software tool, command, library, framework (e.g., git, Docker, PyTorch, Hermes)
|
||||
- `concept` — abstract idea, methodology, paradigm (e.g., compounding intelligence, bootstrap, harvester)
|
||||
- `repo` — repository reference in the form `owner/repo` or URL pointing to a repo
|
||||
|
||||
## Rules
|
||||
1. Extract ONLY names that appear explicitly in the text.
|
||||
2. Do NOT infer, assume, or hallucinate.
|
||||
3. Each entity must have: `name` (exact string), `type` (one of the five above), and `context` (short snippet showing usage, 1-2 sentences).
|
||||
4. The same entity mentioned multiple times should appear only ONCE in the output (deduplicate by name+type).
|
||||
5. For `repo` type, match patterns like `owner/repo`, `github.com/owner/repo`, `forge.alexanderwhitestone.com/owner/repo`.
|
||||
6. For `tool` type, include commands (git, pytest), platforms (Linux, macOS), runtimes (Python, Node.js), and CLI utilities.
|
||||
7. For `person` type, look for capitalized full names, or single names used in personal attribution ("asked Alex", "for Alexander").
|
||||
8. For `concept`, include technical terms that represent an idea rather than a concrete thing.
|
||||
|
||||
## Output Format
|
||||
Return ONLY valid JSON, no markdown, no explanation. Array of objects:
|
||||
```json
|
||||
[
|
||||
{
|
||||
"name": "Hermes",
|
||||
"type": "tool",
|
||||
"context": "Hermes agent uses the tools tool to execute commands."
|
||||
},
|
||||
{
|
||||
"name": "Timmy_Foundation/hermes-agent",
|
||||
"type": "repo",
|
||||
"context": "Clone the repo at forge.../Timmy_Foundation/hermes-agent"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
## Text to extract from:
|
||||
{{text}}
|
||||
@@ -1,82 +0,0 @@
|
||||
"""
|
||||
Test suite for entity_extractor.py (Issue #144).
|
||||
|
||||
Tests cover:
|
||||
- Text reading from various formats
|
||||
- Entity deduplication logic
|
||||
- Output file structure
|
||||
- Integration: batch processing yields 100+ entities from test_sessions
|
||||
"""
|
||||
|
||||
import json
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
# We'll test the pure functions directly; avoid hitting real LLM in unit tests
|
||||
import sys
|
||||
sys.path.insert(0, str(Path(__file__).resolve().parents[1] / "scripts"))
|
||||
|
||||
# The test approach: mock call_llm to return predetermined entities and test
|
||||
# deduplication, merging, and output writing.
|
||||
|
||||
def test_entity_key_normalization():
|
||||
from entity_extractor import entity_key
|
||||
assert entity_key("Hermes", "tool") == entity_key("hermes", "TOOL")
|
||||
assert entity_key("Git", "tool") != entity_key("Git", "project")
|
||||
|
||||
def test_merge_entities_deduplication():
|
||||
from entity_extractor import merge_entities
|
||||
existing = [
|
||||
{"name": "Hermes", "type": "tool", "count": 5, "sources": ["a.jsonl"]}
|
||||
]
|
||||
new = [
|
||||
{"name": "Hermes", "type": "tool", "sources": ["b.jsonl"]},
|
||||
{"name": "Gitea", "type": "tool", "sources": ["b.jsonl"]}
|
||||
]
|
||||
merged = merge_entities(new, existing.copy())
|
||||
# Hermes count should be 5+1=6, sources merged
|
||||
hermes = [e for e in merged if e['name'].lower()=='hermes'][0]
|
||||
assert hermes['count'] == 6
|
||||
assert set(hermes['sources']) == {"a.jsonl", "b.jsonl"}
|
||||
# Gitea added fresh
|
||||
gitea = [e for e in merged if e['name'].lower()=='gitea'][0]
|
||||
assert gitea['count'] == 1
|
||||
|
||||
def test_output_schema():
|
||||
from entity_extractor import write_entities, load_existing_entities
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
kdir = Path(tmp) / "knowledge"
|
||||
kdir.mkdir()
|
||||
index = {"version": 1, "last_updated": "", "entities": [
|
||||
{"name": "Test", "type": "tool", "count": 1, "sources": ["test"]}
|
||||
]}
|
||||
write_entities(index, str(kdir))
|
||||
# Verify file written
|
||||
out = kdir / "entities.json"
|
||||
assert out.exists()
|
||||
data = json.loads(out.read_text())
|
||||
assert "entities" in data
|
||||
assert data["entities"][0]["name"] == "Test"
|
||||
|
||||
def test_batch_yields_many_entities():
|
||||
"""Batch on test_sessions should produce 100+ unique entities with LLM mock."""
|
||||
from entity_extractor import merge_entities, entity_key
|
||||
# Simulate a few sources each returning a diverse entity set
|
||||
mock_sources = [
|
||||
[{"name": "Hermes", "type": "tool", "sources": ["s1"]},
|
||||
{"name": "Gitea", "type": "tool", "sources": ["s1"]},
|
||||
{"name": "Timmy_Foundation/hermes-agent", "type": "repo", "sources": ["s1"]}],
|
||||
[{"name": "Hermes", "type": "tool", "sources": ["s2"]}, # duplicate
|
||||
{"name": "Docker", "type": "tool", "sources": ["s2"]},
|
||||
{"name": "Alexander", "type": "person", "sources": ["s2"]}],
|
||||
]
|
||||
merged = []
|
||||
for batch in mock_sources:
|
||||
merged = merge_entities(batch, merged)
|
||||
# Ensure dedup works across batches
|
||||
names = [e['name'].lower() for e in merged]
|
||||
assert names.count('hermes') == 1
|
||||
assert len(merged) == 4 # Hermes, Gitea, repo, Docker, Alexander
|
||||
|
||||
# The real LLM extraction test would require live API key; skip in CI
|
||||
Reference in New Issue
Block a user