Compare commits
1 Commits
step35/140
...
step35/150
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
11a4666363 |
@@ -1,16 +0,0 @@
|
||||
# Key Papers to Track
|
||||
# Configuration for citation_tracker.py
|
||||
# Each paper needs a Semantic Scholar ID (s2_id) and title
|
||||
|
||||
papers:
|
||||
- s2_id: "CorpusId:215715652"
|
||||
title: "Attention Is All You Need"
|
||||
notes: "Foundational transformer paper by Vaswani et al. (2017)"
|
||||
|
||||
- s2_id: "CorpusId:643390714"
|
||||
title: "Language Models are Few-Shot Learners"
|
||||
notes: "GPT-3 paper by Brown et al. (2020)"
|
||||
|
||||
- s2_id: "arXiv:2303.18247"
|
||||
title: "Sovereign Intelligence: Local-First AI Agents"
|
||||
notes: "Timmy architecture paper (placeholder - update when published)"
|
||||
@@ -1,235 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Citation Tracker — Monitor citations of key papers.
|
||||
Tracks citation counts, identifies citing papers, extracts citation context, generates monthly reports.
|
||||
|
||||
Issue: #140 (7.8)
|
||||
Categories: fact, pattern
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import sys
|
||||
import urllib.request
|
||||
import urllib.error
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Any, Optional
|
||||
|
||||
SCRIPT_DIR = Path(__file__).parent.absolute()
|
||||
KNOWLEDGE_DIR = SCRIPT_DIR.parent / "knowledge"
|
||||
METRICS_DIR = SCRIPT_DIR.parent / "metrics"
|
||||
INDEX_PATH = KNOWLEDGE_DIR / "index.json"
|
||||
|
||||
# Semantic Scholar API (free, no key required for basic lookups)
|
||||
S2_API_BASE = "https://api.semanticscholar.org/graph/v1"
|
||||
|
||||
def fetch_paper(s2_id: str) -> Optional[Dict]:
|
||||
"""Fetch paper metadata from Semantic Scholar."""
|
||||
url = f"{S2_API_BASE}/paper/{s2_id}?fields=title,year,citationCount,externalIds,publicationVenue,publicationTypes"
|
||||
try:
|
||||
with urllib.request.urlopen(url, timeout=10) as resp:
|
||||
return json.loads(resp.read())
|
||||
except (urllib.error.HTTPError, urllib.error.URLError) as e:
|
||||
print(f"Warning: Failed to fetch {s2_id}: {e}", file=sys.stderr)
|
||||
return None
|
||||
|
||||
def fetch_citations(s2_id: str, limit: int = 50) -> List[Dict]:
|
||||
"""Fetch recent citing papers from Semantic Scholar."""
|
||||
url = f"{S2_API_BASE}/paper/{s2_id}/citations?fields=title,year,authors,publicationVenue,publicationTypes&limit={limit}"
|
||||
try:
|
||||
with urllib.request.urlopen(url, timeout=15) as resp:
|
||||
data = json.loads(resp.read())
|
||||
return [c["citingPaper"] for c in data.get("data", [])]
|
||||
except (urllib.error.HTTPError, urllib.error.URLError) as e:
|
||||
print(f"Warning: Failed to fetch citations for {s2_id}: {e}", file=sys.stderr)
|
||||
return []
|
||||
|
||||
def load_key_papers() -> List[Dict]:
|
||||
"""Load key papers list from citations.yaml."""
|
||||
config_path = KNOWLEDGE_DIR / "global" / "citations.yaml"
|
||||
if not config_path.exists():
|
||||
print(f"Error: {config_path} not found. Create it with key papers list.", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
import yaml
|
||||
with open(config_path) as f:
|
||||
data = yaml.safe_load(f)
|
||||
|
||||
papers = []
|
||||
for entry in data.get("papers", []):
|
||||
papers.append({
|
||||
"id": entry["s2_id"],
|
||||
"title": entry.get("title", "Unknown"),
|
||||
"notes": entry.get("notes", "")
|
||||
})
|
||||
return papers
|
||||
|
||||
def load_index() -> Dict:
|
||||
"""Load or initialize knowledge index."""
|
||||
if INDEX_PATH.exists():
|
||||
with open(INDEX_PATH) as f:
|
||||
return json.load(f)
|
||||
return {"version": 1, "last_updated": "", "total_facts": 0, "facts": []}
|
||||
|
||||
def save_index(index: Dict) -> None:
|
||||
"""Save knowledge index."""
|
||||
KNOWLEDGE_DIR.mkdir(parents=True, exist_ok=True)
|
||||
with open(INDEX_PATH, "w") as f:
|
||||
json.dump(index, f, indent=2)
|
||||
|
||||
def add_citation_fact(index: Dict, fact: str, repo: str, confidence: float,
|
||||
tags: List[str], source_count: int = 1) -> None:
|
||||
"""Add a new citation fact to the index."""
|
||||
# Determine next sequence number for citation:facts in this domain
|
||||
domain = "global"
|
||||
category = "fact"
|
||||
prefix = f"{domain}:{category}:"
|
||||
seq_nums = []
|
||||
for f in index["facts"]:
|
||||
if f["id"].startswith(prefix):
|
||||
try:
|
||||
seq = int(f["id"].split(":")[-1])
|
||||
seq_nums.append(seq)
|
||||
except ValueError:
|
||||
continue
|
||||
next_seq = max(seq_nums, default=0) + 1
|
||||
new_id = f"{domain}:{category}:{next_seq:03d}"
|
||||
|
||||
today = datetime.now(timezone.utc).strftime("%Y-%m-%d")
|
||||
fact_entry = {
|
||||
"id": new_id,
|
||||
"fact": fact,
|
||||
"category": category,
|
||||
"domain": domain,
|
||||
"confidence": confidence,
|
||||
"tags": tags,
|
||||
"source_count": source_count,
|
||||
"first_seen": today,
|
||||
"last_confirmed": today
|
||||
}
|
||||
index["facts"].append(fact_entry)
|
||||
index["total_facts"] = len(index["facts"])
|
||||
index["last_updated"] = datetime.now(timezone.utc).isoformat()
|
||||
|
||||
def update_citation_data() -> None:
|
||||
"""Update citation counts and facts for all key papers."""
|
||||
papers = load_key_papers()
|
||||
index = load_index()
|
||||
updated = 0
|
||||
|
||||
for paper in papers:
|
||||
s2_id = paper["id"]
|
||||
title = paper["title"]
|
||||
|
||||
# Fetch current paper data
|
||||
data = fetch_paper(s2_id)
|
||||
if not data:
|
||||
continue
|
||||
|
||||
citation_count = data.get("citationCount", 0)
|
||||
external_ids = data.get("externalIds", {})
|
||||
arxiv_id = externalIds.get("ArXiv") if external_ids else None
|
||||
|
||||
# Add citation count fact (high confidence - directly from API)
|
||||
count_fact = f"Paper '{title}' (S2:{s2_id}) has {citation_count} citations as of {datetime.now(timezone.utc).strftime('%Y-%m-%d')}"
|
||||
if arxiv_id:
|
||||
count_fact += f" [arXiv:{arxiv_id}]"
|
||||
|
||||
add_citation_fact(
|
||||
index=index,
|
||||
fact=count_fact,
|
||||
repo="compounding-intelligence",
|
||||
confidence=0.95,
|
||||
tags=["citation", "tracking", "paper", s2_id],
|
||||
source_count=1
|
||||
)
|
||||
updated += 1
|
||||
|
||||
# Fetch recent citations (context extraction - limited batch)
|
||||
citations = fetch_citations(s2_id, limit=20)
|
||||
for citation in citations:
|
||||
citing_title = citation.get("title", "Unknown")
|
||||
citing_year = citation.get("year", "Unknown year")
|
||||
authors = citation.get("authors", [])
|
||||
author_names = [a.get("name", "") for a in authors[:3]]
|
||||
if len(authors) > 3:
|
||||
author_names.append("et al.")
|
||||
|
||||
cite_fact = f"Paper '{citing_title}' ({', '.join(author_names)}, {citing_year}) cites '{title}'"
|
||||
add_citation_fact(
|
||||
index=index,
|
||||
fact=cite_fact,
|
||||
repo="compounding-intelligence",
|
||||
confidence=0.8,
|
||||
tags=["citation", "citing-paper", s2_id],
|
||||
source_count=1
|
||||
)
|
||||
|
||||
print(f"Updated: {title} — {citation_count} citations, {len(citations)} recent")
|
||||
|
||||
save_index(index)
|
||||
print(f"\nUpdated {updated} papers. Total facts in index: {index['total_facts']}")
|
||||
|
||||
def generate_monthly_report(month: Optional[str] = None) -> str:
|
||||
"""Generate a monthly citation report."""
|
||||
target_month = month or datetime.now(timezone.utc).strftime("%Y-%m")
|
||||
year, mon = map(int, target_month.split("-"))
|
||||
|
||||
index = load_index()
|
||||
monthly_facts = []
|
||||
|
||||
for fact in index["facts"]:
|
||||
last_confirmed = fact.get("last_confirmed", "")
|
||||
if last_confirmed.startswith(f"{year}-{mon:02d}"):
|
||||
monthly_facts.append(fact)
|
||||
|
||||
# Build report
|
||||
lines = []
|
||||
lines.append(f"# Citation Tracker Monthly Report — {target_month}")
|
||||
lines.append("")
|
||||
lines.append(f"Generated: {datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M UTC')}")
|
||||
lines.append(f"Total citation facts this month: {len(monthly_facts)}")
|
||||
lines.append("")
|
||||
|
||||
# Group by paper
|
||||
from collections import defaultdict
|
||||
by_paper = defaultdict(list)
|
||||
for fact in monthly_facts:
|
||||
# Extract paper identifier from fact text
|
||||
text = fact["fact"]
|
||||
by_paper[text].append(fact)
|
||||
|
||||
for paper_title, facts in by_paper.items():
|
||||
lines.append(f"## {paper_title}")
|
||||
for f in facts:
|
||||
lines.append(f"- {f['fact']} (confidence: {f['confidence']})")
|
||||
lines.append("")
|
||||
|
||||
report = "\n".join(lines)
|
||||
|
||||
# Save report
|
||||
METRICS_DIR.mkdir(parents=True, exist_ok=True)
|
||||
report_path = METRICS_DIR / f"citation_report_{target_month}.md"
|
||||
with open(report_path, "w") as f:
|
||||
f.write(report)
|
||||
|
||||
print(f"Monthly report saved to: {report_path}")
|
||||
return report
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser(description="Citation Tracker — Monitor key paper citations")
|
||||
parser.add_argument("--update", action="store_true", help="Fetch latest citation data")
|
||||
parser.add_argument("--report", action="store_true", help="Generate monthly report")
|
||||
parser.add_argument("--month", type=str, help="Month for report (YYYY-MM), defaults to current")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.update:
|
||||
update_citation_data()
|
||||
elif args.report:
|
||||
generate_monthly_report(args.month)
|
||||
else:
|
||||
parser.print_help()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
170
scripts/graph_query.py
Executable file
170
scripts/graph_query.py
Executable file
@@ -0,0 +1,170 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Graph Query Engine — traverse the knowledge graph.
|
||||
|
||||
Usage:
|
||||
python3 scripts/graph_query.py neighbors <fact_id> [--knowledge-dir knowledge/]
|
||||
python3 scripts/graph_query.py path <from_id> <to_id> [--max-hops 10]
|
||||
python3 scripts/graph_query.py subgraph <fact_id> [--depth 2]
|
||||
python3 scripts/graph_query.py stats # Graph statistics
|
||||
|
||||
Outputs JSON to stdout.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
from collections import defaultdict, deque
|
||||
from typing import Optional
|
||||
|
||||
# --- Graph building ---
|
||||
|
||||
def load_index(knowledge_dir: Path) -> dict:
|
||||
index_path = knowledge_dir / "index.json"
|
||||
if not index_path.exists():
|
||||
return {"version": 1, "total_facts": 0, "facts": []}
|
||||
with open(index_path) as f:
|
||||
return json.load(f)
|
||||
|
||||
def build_adjacency(facts: list[dict]) -> dict:
|
||||
"""Build undirected adjacency list from fact 'related' fields."""
|
||||
adj = defaultdict(set)
|
||||
id_to_fact = {}
|
||||
for fact in facts:
|
||||
fid = fact.get("id")
|
||||
if not fid:
|
||||
continue
|
||||
id_to_fact[fid] = fact
|
||||
for related_id in fact.get("related", []):
|
||||
adj[fid].add(related_id)
|
||||
adj[related_id].add(fid) # undirected
|
||||
return dict(adj), id_to_fact
|
||||
|
||||
# --- Queries ---
|
||||
|
||||
def query_neighbors(fact_id: str, adj: dict, id_to_fact: dict) -> dict:
|
||||
"""Return directly connected facts."""
|
||||
neighbors = list(adj.get(fact_id, set()))
|
||||
return {
|
||||
"query": "neighbors",
|
||||
"fact_id": fact_id,
|
||||
"neighbors": [
|
||||
{"id": nid, "fact": id_to_fact.get(nid, {}).get("fact", ""), "category": id_to_fact.get(nid, {}).get("category", "")}
|
||||
for nid in neighbors if nid in id_to_fact
|
||||
],
|
||||
"count": len(neighbors),
|
||||
}
|
||||
|
||||
def query_path(from_id: str, to_id: str, adj: dict, max_hops: int = 10) -> dict:
|
||||
"""Find shortest path between two facts using BFS."""
|
||||
if from_id not in adj or to_id not in adj:
|
||||
return {"query": "path", "from": from_id, "to": to_id, "path": None, "error": "Fact not found in graph"}
|
||||
|
||||
if from_id == to_id:
|
||||
return {"query": "path", "from": from_id, "to": to_id, "path": [from_id], "length": 0}
|
||||
|
||||
queue = deque([(from_id, [from_id])])
|
||||
visited = {from_id}
|
||||
|
||||
while queue:
|
||||
current, path = queue.popleft()
|
||||
if len(path) > max_hops:
|
||||
continue
|
||||
for neighbor in adj.get(current, []):
|
||||
if neighbor == to_id:
|
||||
return {"query": "path", "from": from_id, "to": to_id, "path": path + [to_id], "length": len(path)}
|
||||
if neighbor not in visited:
|
||||
visited.add(neighbor)
|
||||
queue.append((neighbor, path + [neighbor]))
|
||||
|
||||
return {"query": "path", "from": from_id, "to": to_id, "path": None, "error": f"No path found within {max_hops} hops"}
|
||||
|
||||
def query_subgraph(fact_id: str, adj: dict, id_to_fact: dict, depth: int = 2) -> dict:
|
||||
"""Extract connected subgraph within N hops."""
|
||||
if fact_id not in adj:
|
||||
return {"query": "subgraph", "fact_id": fact_id, "nodes": [], "edges": [], "error": "Fact not found"}
|
||||
|
||||
visited = set()
|
||||
queue = deque([(fact_id, 0)])
|
||||
subgraph_nodes = set()
|
||||
subgraph_edges = []
|
||||
|
||||
while queue:
|
||||
node, d = queue.popleft()
|
||||
if node in visited or d > depth:
|
||||
continue
|
||||
visited.add(node)
|
||||
subgraph_nodes.add(node)
|
||||
for neighbor in adj.get(node, []):
|
||||
subgraph_edges.append({"source": node, "target": neighbor})
|
||||
if neighbor not in visited:
|
||||
queue.append((neighbor, d + 1))
|
||||
|
||||
return {
|
||||
"query": "subgraph",
|
||||
"fact_id": fact_id,
|
||||
"depth": depth,
|
||||
"nodes": [
|
||||
{"id": nid, "fact": id_to_fact.get(nid, {}).get("fact", ""), "category": id_to_fact.get(nid, {}).get("category", "")}
|
||||
for nid in sorted(subgraph_nodes)
|
||||
],
|
||||
"edges": [{"source": e["source"], "target": e["target"]} for e in subgraph_edges],
|
||||
"node_count": len(subgraph_nodes),
|
||||
"edge_count": len(subgraph_edges),
|
||||
}
|
||||
|
||||
def query_stats(adj: dict, id_to_fact: dict) -> dict:
|
||||
"""Graph statistics."""
|
||||
return {
|
||||
"statistics": {
|
||||
"total_facts": len(id_to_fact),
|
||||
"total_edges": sum(len(neighbors) for neighbors in adj.values()) // 2,
|
||||
"connected_components": 0, # TODO: compute if needed
|
||||
"average_degree": sum(len(neighbors) for neighbors in adj.values()) / len(adj) if adj else 0,
|
||||
}
|
||||
}
|
||||
|
||||
# --- CLI ---
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Graph query engine for knowledge store")
|
||||
parser.add_argument("command", choices=["neighbors", "path", "subgraph", "stats"])
|
||||
parser.add_argument("from_id", nargs="?", help="Starting fact ID")
|
||||
parser.add_argument("to_id", nargs="?", help="Target fact ID (for path query)")
|
||||
parser.add_argument("--knowledge-dir", default="knowledge", help="Knowledge directory")
|
||||
parser.add_argument("--depth", type=int, default=2, help="Depth for subgraph query")
|
||||
parser.add_argument("--max-hops", type=int, default=10, help="Max hops for path query")
|
||||
args = parser.parse_args()
|
||||
|
||||
start = time.time()
|
||||
knowledge_dir = Path(args.knowledge_dir)
|
||||
index = load_index(knowledge_dir)
|
||||
facts = index.get("facts", [])
|
||||
adj, id_to_fact = build_adjacency(facts)
|
||||
|
||||
result = None
|
||||
if args.command == "neighbors":
|
||||
if not args.from_id:
|
||||
print("ERROR: neighbors requires <fact_id>", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
result = query_neighbors(args.from_id, adj, id_to_fact)
|
||||
elif args.command == "path":
|
||||
if not args.from_id or not args.to_id:
|
||||
print("ERROR: path requires <from_id> <to_id>", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
result = query_path(args.from_id, args.to_id, adj, max_hops=args.max_hops)
|
||||
elif args.command == "subgraph":
|
||||
if not args.from_id:
|
||||
print("ERROR: subgraph requires <fact_id>", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
result = query_subgraph(args.from_id, adj, id_to_fact, depth=args.depth)
|
||||
elif args.command == "stats":
|
||||
result = query_stats(adj, id_to_fact)
|
||||
|
||||
result["elapsed_ms"] = round((time.time() - start) * 1000, 2)
|
||||
print(json.dumps(result, indent=2))
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,31 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
sys.path.insert(0, "/Users/apayne/burn-clone/STEP35-compounding-intelligence-140/scripts")
|
||||
import yaml
|
||||
from pathlib import Path
|
||||
|
||||
KNOWLEDGE_DIR = Path("/Users/apayne/burn-clone/STEP35-compounding-intelligence-140/knowledge")
|
||||
config_path = KNOWLEDGE_DIR / "global" / "citations.yaml"
|
||||
|
||||
with open(config_path) as f:
|
||||
data = yaml.safe_load(f)
|
||||
|
||||
papers = data.get("papers", [])
|
||||
print(f"Loaded {len(papers)} key papers:")
|
||||
for p in papers:
|
||||
print(f" - {p['s2_id']}: {p['title']}")
|
||||
|
||||
# Test that citation_tracker module loads
|
||||
import importlib.util
|
||||
spec = importlib.util.spec_from_file_location("citation_tracker",
|
||||
"/Users/apayne/burn-clone/STEP35-compounding-intelligence-140/scripts/citation_tracker.py")
|
||||
mod = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(mod)
|
||||
print("Module loaded successfully")
|
||||
|
||||
# Test fetch functions (with mock/real API)
|
||||
result = mod.fetch_paper("CorpusId:215715652") # Attention Is All You Need
|
||||
if result:
|
||||
print(f"Paper fetched: {result.get('title')} — {result.get('citationCount')} citations")
|
||||
else:
|
||||
print("Paper fetch failed (may be network issue)")
|
||||
165
scripts/test_graph_query.py
Executable file
165
scripts/test_graph_query.py
Executable file
@@ -0,0 +1,165 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Tests for scripts/graph_query.py — Graph Query Engine.
|
||||
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).resolve().parent))
|
||||
|
||||
from graph_query import load_index, build_adjacency, query_neighbors, query_path, query_subgraph, query_stats
|
||||
|
||||
|
||||
def make_index(facts: list[dict], tmp_dir: Path) -> Path:
|
||||
index = {
|
||||
"version": 1,
|
||||
"last_updated": "2026-04-13T20:00:00Z",
|
||||
"total_facts": len(facts),
|
||||
"facts": facts,
|
||||
}
|
||||
path = tmp_dir / "index.json"
|
||||
with open(path, "w") as f:
|
||||
json.dump(index, f)
|
||||
return path
|
||||
|
||||
|
||||
def test_neighbors():
|
||||
"""Neighbor query returns directly connected facts."""
|
||||
facts = [
|
||||
{"id": "a", "fact": "A", "category": "fact", "related": ["b", "c"]},
|
||||
{"id": "b", "fact": "B", "category": "fact", "related": ["a"]},
|
||||
{"id": "c", "fact": "C", "category": "fact", "related": ["a"]},
|
||||
{"id": "d", "fact": "D", "category": "fact", "related": []},
|
||||
]
|
||||
adj, id_to_fact = build_adjacency(facts)
|
||||
result = query_neighbors("a", adj, id_to_fact)
|
||||
neighbor_ids = {n["id"] for n in result["neighbors"]}
|
||||
assert neighbor_ids == {"b", "c"}, f"Expected b,c got {neighbor_ids}"
|
||||
assert result["count"] == 2
|
||||
print("PASS: neighbors")
|
||||
|
||||
|
||||
def test_path_found():
|
||||
"""Path query finds shortest path."""
|
||||
facts = [
|
||||
{"id": "a", "fact": "A", "related": ["b"]},
|
||||
{"id": "b", "fact": "B", "related": ["a", "c"]},
|
||||
{"id": "c", "fact": "C", "related": ["b", "d"]},
|
||||
{"id": "d", "fact": "D", "related": ["c"]},
|
||||
]
|
||||
adj, id_to_fact = build_adjacency(facts)
|
||||
result = query_path("a", "d", adj)
|
||||
assert result["path"] == ["a", "b", "c", "d"], f"Got path {result['path']}"
|
||||
assert result["length"] == 3
|
||||
print("PASS: path_found")
|
||||
|
||||
|
||||
def test_path_not_found():
|
||||
"""Path query returns error when no path exists."""
|
||||
facts = [
|
||||
{"id": "a", "fact": "A", "related": ["b"]},
|
||||
{"id": "b", "fact": "B", "related": ["a"]},
|
||||
{"id": "c", "fact": "C", "related": ["d"]},
|
||||
{"id": "d", "fact": "D", "related": ["c"]},
|
||||
]
|
||||
adj, id_to_fact = build_adjacency(facts)
|
||||
result = query_path("a", "c", adj, max_hops=5)
|
||||
assert result["path"] is None
|
||||
assert "error" in result
|
||||
print("PASS: path_not_found")
|
||||
|
||||
|
||||
def test_subgraph_extraction():
|
||||
"""Subgraph extraction returns nodes within depth."""
|
||||
facts = [
|
||||
{"id": "a", "fact": "A", "related": ["b", "c"]},
|
||||
{"id": "b", "fact": "B", "related": ["a", "d"]},
|
||||
{"id": "c", "fact": "C", "related": ["a"]},
|
||||
{"id": "d", "fact": "D", "related": ["b", "e"]},
|
||||
{"id": "e", "fact": "E", "related": ["d"]},
|
||||
]
|
||||
adj, id_to_fact = build_adjacency(facts)
|
||||
result = query_subgraph("a", adj, id_to_fact, depth=1)
|
||||
node_ids = {n["id"] for n in result["nodes"]}
|
||||
assert node_ids == {"a", "b", "c"}, f"Got {node_ids}"
|
||||
assert result["node_count"] == 3
|
||||
print("PASS: subgraph_depth1")
|
||||
|
||||
|
||||
def test_subgraph_depth2():
|
||||
"""Depth-2 subgraph includes further nodes."""
|
||||
facts = [
|
||||
{"id": "a", "fact": "A", "related": ["b"]},
|
||||
{"id": "b", "fact": "B", "related": ["a", "c"]},
|
||||
{"id": "c", "fact": "C", "related": ["b", "d"]},
|
||||
{"id": "d", "fact": "D", "related": ["c"]},
|
||||
]
|
||||
adj, id_to_fact = build_adjacency(facts)
|
||||
result = query_subgraph("a", adj, id_to_fact, depth=2)
|
||||
node_ids = {n["id"] for n in result["nodes"]}
|
||||
assert node_ids == {"a", "b", "c"}, f"Got {node_ids}"
|
||||
print("PASS: subgraph_depth2")
|
||||
|
||||
|
||||
def test_stats():
|
||||
"""Statistics query returns graph metrics."""
|
||||
facts = [
|
||||
{"id": "a", "fact": "A", "related": ["b"]},
|
||||
{"id": "b", "fact": "B", "related": ["a", "c"]},
|
||||
{"id": "c", "fact": "C", "related": ["b"]},
|
||||
]
|
||||
adj, id_to_fact = build_adjacency(facts)
|
||||
result = query_stats(adj, id_to_fact)
|
||||
assert result["statistics"]["total_facts"] == 3
|
||||
assert result["statistics"]["total_edges"] == 2 # undirected double-counted /2
|
||||
assert result["statistics"]["average_degree"] > 0
|
||||
print("PASS: stats")
|
||||
|
||||
|
||||
def test_cli_integration():
|
||||
"""CLI produces valid JSON with correct query types."""
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
import subprocess as sp
|
||||
tmp_dir = Path(tmp)
|
||||
facts = [
|
||||
{"id": "x", "fact": "X", "related": ["y"]},
|
||||
{"id": "y", "fact": "Y", "related": ["x", "z"]},
|
||||
{"id": "z", "fact": "Z", "related": ["y"]},
|
||||
]
|
||||
index_path = make_index(facts, tmp_dir)
|
||||
knowledge_dir = index_path.parent
|
||||
script_path = Path(__file__).resolve().parent / "graph_query.py"
|
||||
|
||||
result = sp.run(
|
||||
[sys.executable, str(script_path), "neighbors", "x", "--knowledge-dir", str(knowledge_dir)],
|
||||
capture_output=True, text=True, cwd=str(tmp_dir)
|
||||
)
|
||||
assert result.returncode == 0, f"neighbors failed: {result.stderr}"
|
||||
out = json.loads(result.stdout)
|
||||
assert out["query"] == "neighbors"
|
||||
assert out["fact_id"] == "x"
|
||||
assert out["count"] == 1
|
||||
|
||||
result = sp.run(
|
||||
[sys.executable, str(script_path), "path", "x", "z", "--knowledge-dir", str(knowledge_dir)],
|
||||
capture_output=True, text=True, cwd=str(tmp_dir)
|
||||
)
|
||||
assert result.returncode == 0, f"path failed: {result.stderr}"
|
||||
out = json.loads(result.stdout)
|
||||
assert out["path"] == ["x", "y", "z"]
|
||||
|
||||
print("PASS: cli_integration")
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_neighbors()
|
||||
test_path_found()
|
||||
test_path_not_found()
|
||||
test_subgraph_extraction()
|
||||
test_subgraph_depth2()
|
||||
test_stats()
|
||||
test_cli_integration()
|
||||
print("\nAll graph_query tests passed!")
|
||||
Reference in New Issue
Block a user