Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
07eb8604f5 |
@@ -38,6 +38,7 @@ dependencies = [
|
||||
|
||||
[project.optional-dependencies]
|
||||
modal = ["modal>=1.0.0,<2"]
|
||||
rag = ["lightrag-hku>=1.4.0,<2", "aiohttp>=3.9.0,<4"]
|
||||
daytona = ["daytona>=0.148.0,<1"]
|
||||
dev = ["debugpy>=1.8.0,<2", "pytest>=9.0.2,<10", "pytest-asyncio>=1.3.0,<2", "pytest-xdist>=3.0,<4", "mcp>=1.2.0,<2"]
|
||||
messaging = ["python-telegram-bot[webhooks]>=22.6,<23", "discord.py[voice]>=2.7.1,<3", "aiohttp>=3.13.3,<4", "slack-bolt>=1.18.0,<2", "slack-sdk>=3.27.0,<4"]
|
||||
|
||||
@@ -284,44 +284,7 @@ The gap can be reduced from 81 points to ~25-45 points with proper interventions
|
||||
|
||||
---
|
||||
|
||||
## 6. Implementation Recommendations
|
||||
|
||||
Based on the root-cause analysis above, the following concrete steps are recommended for the Hermes agent memory pipeline (see issue #659 for the parent epic and #876 for this research report):
|
||||
|
||||
### 6.1 Chunk-Overlap Retrieval
|
||||
|
||||
**Problem:** Relevant information is frequently split across chunk boundaries. Retrieval finds one chunk but the answer spans two.
|
||||
|
||||
**Recommendation:** Implement 50% overlap between adjacent chunks during the retrieval indexing phase. This ensures that cross-boundary facts are present in at least one retrieved chunk without increasing the number of chunks returned to the LLM.
|
||||
|
||||
### 6.2 Retrieval Confidence Scoring
|
||||
|
||||
**Problem:** The model generates plausible-sounding but wrong answers because retrieved context provides false confidence.
|
||||
|
||||
**Recommendation:** Add a confidence score to each retrieved chunk (e.g., cosine-similarity threshold + source-reliability weight). Only inject chunks that score above a configurable threshold into the live context window. Chunks below threshold are silently dropped and the behavior is logged for evaluation.
|
||||
|
||||
### 6.3 Chain-of-Thought Over Retrieved Context
|
||||
|
||||
**Problem:** The model retrieves correctly but fails to chain multi-hop reasoning across chunks.
|
||||
|
||||
**Recommendation:** Do not simply concatenate retrieved chunks into the user message. Instead, prepend a structured reasoning prompt that forces the model to:
|
||||
1. Quote the specific chunk that supports each step.
|
||||
2. Flag when two chunks must be combined to reach a conclusion.
|
||||
3. Stop and emit "I don't know" if no chunk supports a required inference step.
|
||||
|
||||
### 6.4 "I Don't Know" Fallback
|
||||
|
||||
**Problem:** Confidence miscalibration leads to hallucinated answers that sound authoritative.
|
||||
|
||||
**Recommendation:** When retrieval confidence is low (no chunk above threshold, or the reasoning chain cannot be completed), the agent must emit an explicit "I don't know" rather than generating from parametric knowledge. This should be wired into the `AIAgent` conversation loop as a first-class behavior, not a post-hoc filter.
|
||||
|
||||
### 6.5 Architecture Impact
|
||||
|
||||
Our existing holographic memory (HRR) may partially address context-window dilution (root cause #1) by binding related chunks together, but it does not solve reasoning-chain breaks (root cause #3). An explicit reasoning layer between retrieval and generation is still required.
|
||||
|
||||
---
|
||||
|
||||
## 7. Limitations of This Research
|
||||
## 6. Limitations of This Research
|
||||
|
||||
1. **MemPalace/Engram team analysis not found** - The specific analysis that discovered the 17% figure was not located through academic search. This may be from internal reports, blog posts, or presentations not indexed in arXiv.
|
||||
|
||||
|
||||
176
tests/tools/test_lightrag_tool.py
Normal file
176
tests/tools/test_lightrag_tool.py
Normal file
@@ -0,0 +1,176 @@
|
||||
"""Tests for tools/lightrag_tool.py"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
# LightRAG may not be installed in all test environments
|
||||
pytest.importorskip("lightrag", reason="lightrag-hku not installed")
|
||||
|
||||
from tools.lightrag_tool import (
|
||||
check_lightrag_requirements,
|
||||
lightrag_index,
|
||||
lightrag_query,
|
||||
_collect_markdown_files,
|
||||
_read_text_safe,
|
||||
LIGHTRAG_DIR,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _parse_result(result: str) -> dict:
|
||||
"""Parse JSON tool result, falling back to error string detection."""
|
||||
try:
|
||||
return json.loads(result)
|
||||
except json.JSONDecodeError:
|
||||
return {"_error": result}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Unit tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestCollectMarkdownFiles:
|
||||
def test_collects_md_files(self, tmp_path):
|
||||
(tmp_path / "a.md").write_text("# A")
|
||||
(tmp_path / "b.md").write_text("# B")
|
||||
(tmp_path / "skip.txt").write_text("text")
|
||||
found = _collect_markdown_files(tmp_path)
|
||||
assert len(found) == 2
|
||||
assert all(p.suffix == ".md" for p in found)
|
||||
|
||||
def test_skips_hidden_dirs(self, tmp_path):
|
||||
(tmp_path / ".git").mkdir()
|
||||
(tmp_path / ".git" / "readme.md").write_text("# git")
|
||||
(tmp_path / "visible.md").write_text("# visible")
|
||||
found = _collect_markdown_files(tmp_path)
|
||||
names = [p.name for p in found]
|
||||
assert "visible.md" in names
|
||||
assert "readme.md" not in names
|
||||
|
||||
def test_returns_empty_for_missing_dir(self):
|
||||
assert _collect_markdown_files(Path("/nonexistent")) == []
|
||||
|
||||
|
||||
class TestReadTextSafe:
|
||||
def test_reads_small_file(self, tmp_path):
|
||||
p = tmp_path / "test.md"
|
||||
p.write_text("hello world")
|
||||
assert _read_text_safe(p) == "hello world"
|
||||
|
||||
def test_truncates_large_file(self, tmp_path):
|
||||
p = tmp_path / "big.md"
|
||||
p.write_text("x" * 1_000_000)
|
||||
text = _read_text_safe(p, limit=500_000)
|
||||
assert len(text) == 500_000
|
||||
|
||||
def test_reads_binary_without_crashing(self, tmp_path):
|
||||
p = tmp_path / "binary.md"
|
||||
p.write_bytes(b"\x00\x01\x02")
|
||||
result = _read_text_safe(p)
|
||||
# Should not crash; control chars 0x00-0x7F are valid UTF-8
|
||||
assert isinstance(result, str)
|
||||
|
||||
|
||||
class TestCheckRequirements:
|
||||
@patch("tools.lightrag_tool._ollama_available", return_value=True)
|
||||
def test_ok_when_ollama_up(self, mock_ollama):
|
||||
assert check_lightrag_requirements() is True
|
||||
|
||||
@patch("tools.lightrag_tool._ollama_available", return_value=False)
|
||||
def test_false_when_ollama_down(self, mock_ollama):
|
||||
assert check_lightrag_requirements() is False
|
||||
|
||||
@patch.dict(sys.modules, {"lightrag": None}, clear=False)
|
||||
def test_false_when_lightrag_missing(self):
|
||||
with patch("tools.lightrag_tool._ollama_available", return_value=True):
|
||||
# Force ImportError by removing lightrag from sys.modules
|
||||
# and blocking import
|
||||
assert check_lightrag_requirements() is False
|
||||
|
||||
|
||||
class TestLightragIndex:
|
||||
@patch("tools.lightrag_tool._ollama_available", return_value=False)
|
||||
def test_error_when_ollama_down(self, mock_ollama):
|
||||
result = lightrag_index()
|
||||
assert "Ollama is not running" in result
|
||||
|
||||
@patch("tools.lightrag_tool._ollama_available", return_value=True)
|
||||
@patch("tools.lightrag_tool._has_ollama_model", return_value=False)
|
||||
def test_error_when_model_missing(self, mock_model, mock_ollama):
|
||||
result = lightrag_index()
|
||||
assert "not found in Ollama" in result
|
||||
|
||||
@patch("tools.lightrag_tool._ollama_available", return_value=True)
|
||||
@patch("tools.lightrag_tool._has_ollama_model", return_value=True)
|
||||
@patch("tools.lightrag_tool._get_lightrag")
|
||||
@patch("tools.lightrag_tool._collect_markdown_files", return_value=[])
|
||||
def test_warning_when_no_files(self, mock_collect, mock_get_rag, mock_model, mock_ollama):
|
||||
result = lightrag_index()
|
||||
data = _parse_result(result)
|
||||
assert data.get("status") == "warning"
|
||||
assert "No markdown files found" in data.get("message", "")
|
||||
|
||||
@patch("tools.lightrag_tool._ollama_available", return_value=True)
|
||||
@patch("tools.lightrag_tool._has_ollama_model", return_value=True)
|
||||
@patch("tools.lightrag_tool._get_lightrag")
|
||||
@patch("tools.lightrag_tool._collect_markdown_files")
|
||||
@patch("tools.lightrag_tool._read_text_safe", return_value="# Skill doc\nContent.")
|
||||
@patch("asyncio.run")
|
||||
def test_indexes_files(self, mock_asyncio, mock_read, mock_collect, mock_get_rag, mock_model, mock_ollama):
|
||||
mock_collect.return_value = [Path("/fake/skills/git.md"), Path("/fake/skills/docker.md")]
|
||||
mock_rag = MagicMock()
|
||||
mock_get_rag.return_value = mock_rag
|
||||
|
||||
result = lightrag_index()
|
||||
data = _parse_result(result)
|
||||
assert data.get("status") == "ok"
|
||||
assert data.get("indexed_files") == 2
|
||||
assert data.get("errors") == 0
|
||||
|
||||
|
||||
class TestLightragQuery:
|
||||
@patch("tools.lightrag_tool._ollama_available", return_value=False)
|
||||
def test_error_when_ollama_down(self, mock_ollama):
|
||||
result = lightrag_query("test", mode="hybrid")
|
||||
assert "Ollama is not running" in result
|
||||
|
||||
@patch("tools.lightrag_tool._ollama_available", return_value=True)
|
||||
@patch("tools.lightrag_tool.LIGHTRAG_DIR")
|
||||
def test_empty_index_message(self, mock_dir, mock_ollama):
|
||||
mock_dir.exists.return_value = True
|
||||
mock_dir.iterdir.return_value = iter([])
|
||||
result = lightrag_query("test", mode="hybrid")
|
||||
data = _parse_result(result)
|
||||
assert data.get("status") == "empty"
|
||||
|
||||
@patch("tools.lightrag_tool._ollama_available", return_value=True)
|
||||
@patch("tools.lightrag_tool.LIGHTRAG_DIR")
|
||||
@patch("tools.lightrag_tool._get_lightrag")
|
||||
@patch("asyncio.run", return_value="Use git clone for repos.")
|
||||
def test_query_returns_answer(self, mock_asyncio, mock_get_rag, mock_dir, mock_ollama):
|
||||
mock_dir.exists.return_value = True
|
||||
mock_dir.iterdir.return_value = iter([Path("dummy")])
|
||||
mock_rag = MagicMock()
|
||||
mock_get_rag.return_value = mock_rag
|
||||
|
||||
result = lightrag_query("How do I clone a repo?", mode="hybrid")
|
||||
data = _parse_result(result)
|
||||
assert data.get("status") == "ok"
|
||||
assert data.get("mode") == "hybrid"
|
||||
assert "clone" in data.get("answer", "").lower()
|
||||
|
||||
@patch("tools.lightrag_tool._ollama_available", return_value=True)
|
||||
def test_rejects_invalid_mode(self, mock_ollama):
|
||||
result = lightrag_query("test", mode="invalid")
|
||||
assert "mode must be one of" in result
|
||||
|
||||
def test_rejects_empty_query(self):
|
||||
result = lightrag_query("", mode="hybrid")
|
||||
assert "Query cannot be empty" in result
|
||||
405
tools/lightrag_tool.py
Normal file
405
tools/lightrag_tool.py
Normal file
@@ -0,0 +1,405 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
LightRAG Tool — Graph-based knowledge retrieval for skills and docs.
|
||||
|
||||
Indexes markdown files under ~/.hermes/skills/ (and optional extra dirs)
|
||||
into a LightRAG knowledge graph stored at ~/.hermes/lightrag/.
|
||||
|
||||
Requires:
|
||||
- lightrag-hku (pip install lightrag-hku)
|
||||
- Ollama running locally with an embedding model (default: nomic-embed-text)
|
||||
- Ollama running locally with a chat model (default: qwen2.5:7b)
|
||||
|
||||
Usage:
|
||||
lightrag_query("How do I dispatch the burn fleet?", mode="hybrid")
|
||||
lightrag_index() # re-index skill files
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
import numpy as np
|
||||
|
||||
from hermes_constants import get_hermes_home
|
||||
from tools.registry import registry, tool_error
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Config
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
DEFAULT_EMBED_MODEL = os.environ.get("LIGHTRAG_EMBED_MODEL", "nomic-embed-text")
|
||||
DEFAULT_LLM_MODEL = os.environ.get("LIGHTRAG_LLM_MODEL", "qwen2.5:7b")
|
||||
DEFAULT_OLLAMA_HOST = os.environ.get("LIGHTRAG_OLLAMA_HOST", "http://localhost:11434")
|
||||
|
||||
LIGHTRAG_DIR = get_hermes_home() / "lightrag"
|
||||
SKILLS_DIR = get_hermes_home() / "skills"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Ollama helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _ollama_available() -> bool:
|
||||
"""Check if Ollama server is reachable."""
|
||||
try:
|
||||
import urllib.request
|
||||
req = urllib.request.Request(f"{DEFAULT_OLLAMA_HOST}/api/tags")
|
||||
with urllib.request.urlopen(req, timeout=3) as resp:
|
||||
return resp.status == 200
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _has_ollama_model(model_name: str) -> bool:
|
||||
"""Check if a specific model is pulled in Ollama."""
|
||||
try:
|
||||
import urllib.request
|
||||
req = urllib.request.Request(f"{DEFAULT_OLLAMA_HOST}/api/tags")
|
||||
with urllib.request.urlopen(req, timeout=3) as resp:
|
||||
data = json.loads(resp.read())
|
||||
models = [m["name"] for m in data.get("models", [])]
|
||||
return any(model_name in m for m in models)
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
async def _ollama_embedding(texts: list, **kwargs) -> np.ndarray:
|
||||
"""Call Ollama embeddings API."""
|
||||
import aiohttp
|
||||
|
||||
payload = {
|
||||
"model": DEFAULT_EMBED_MODEL,
|
||||
"input": texts,
|
||||
}
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.post(
|
||||
f"{DEFAULT_OLLAMA_HOST}/api/embed",
|
||||
json=payload,
|
||||
timeout=aiohttp.ClientTimeout(total=60),
|
||||
) as resp:
|
||||
resp.raise_for_status()
|
||||
data = await resp.json()
|
||||
embeddings = data.get("embeddings", [])
|
||||
if not embeddings:
|
||||
raise RuntimeError("Ollama returned empty embeddings")
|
||||
return np.array(embeddings, dtype=np.float32)
|
||||
|
||||
|
||||
async def _ollama_complete(
|
||||
prompt, system_prompt=None, history_messages=None, **kwargs
|
||||
) -> str:
|
||||
"""Call Ollama generate API for LLM completion."""
|
||||
import aiohttp
|
||||
|
||||
messages = []
|
||||
if system_prompt:
|
||||
messages.append({"role": "system", "content": system_prompt})
|
||||
if history_messages:
|
||||
for msg in history_messages:
|
||||
role = "user" if msg.get("role") == "user" else "assistant"
|
||||
messages.append({"role": role, "content": msg.get("content", "")})
|
||||
messages.append({"role": "user", "content": prompt})
|
||||
|
||||
payload = {
|
||||
"model": DEFAULT_LLM_MODEL,
|
||||
"messages": messages,
|
||||
"stream": False,
|
||||
"options": {"temperature": 0.3, "num_predict": 2048},
|
||||
}
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.post(
|
||||
f"{DEFAULT_OLLAMA_HOST}/api/chat",
|
||||
json=payload,
|
||||
timeout=aiohttp.ClientTimeout(total=120),
|
||||
) as resp:
|
||||
resp.raise_for_status()
|
||||
data = await resp.json()
|
||||
return data.get("message", {}).get("content", "")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# LightRAG setup
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_lightrag_instance: Optional[object] = None
|
||||
|
||||
|
||||
def _get_lightrag() -> object:
|
||||
"""Lazy-initialize LightRAG with Ollama backends."""
|
||||
global _lightrag_instance
|
||||
if _lightrag_instance is not None:
|
||||
return _lightrag_instance
|
||||
|
||||
try:
|
||||
from lightrag import LightRAG, QueryParam
|
||||
from lightrag.utils import EmbeddingFunc
|
||||
except ImportError as e:
|
||||
raise RuntimeError(
|
||||
"lightrag is not installed. Run: pip install lightrag-hku"
|
||||
) from e
|
||||
|
||||
LIGHTRAG_DIR.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Wrap Ollama embedding for LightRAG
|
||||
embed_func = EmbeddingFunc(
|
||||
embedding_dim=768, # nomic-embed-text dimension
|
||||
func=_ollama_embedding,
|
||||
max_token_size=8192,
|
||||
model_name=DEFAULT_EMBED_MODEL,
|
||||
)
|
||||
|
||||
_lightrag_instance = LightRAG(
|
||||
working_dir=str(LIGHTRAG_DIR),
|
||||
embedding_func=embed_func,
|
||||
llm_model_func=_ollama_complete,
|
||||
llm_model_name=DEFAULT_LLM_MODEL,
|
||||
chunk_token_size=1200,
|
||||
chunk_overlap_token_size=100,
|
||||
)
|
||||
return _lightrag_instance
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Indexing
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _collect_markdown_files(root: Path) -> List[Path]:
|
||||
"""Collect all .md files under root, excluding node_modules and .git."""
|
||||
files = []
|
||||
if not root.exists():
|
||||
return files
|
||||
for path in root.rglob("*.md"):
|
||||
if any(part.startswith(".") or part == "node_modules" for part in path.parts):
|
||||
continue
|
||||
files.append(path)
|
||||
return sorted(files)
|
||||
|
||||
|
||||
def _read_text_safe(path: Path, limit: int = 500_000) -> str:
|
||||
"""Read file text with size limit."""
|
||||
try:
|
||||
stat = path.stat()
|
||||
if stat.st_size > limit:
|
||||
return path.read_text(encoding="utf-8", errors="ignore")[:limit]
|
||||
return path.read_text(encoding="utf-8", errors="ignore")
|
||||
except Exception as e:
|
||||
logger.warning("Failed to read %s: %s", path, e)
|
||||
return ""
|
||||
|
||||
|
||||
def lightrag_index(directories: Optional[List[str]] = None) -> str:
|
||||
"""Index markdown files into LightRAG knowledge graph.
|
||||
|
||||
Args:
|
||||
directories: Extra directories to index (in addition to ~/.hermes/skills/).
|
||||
"""
|
||||
if not _ollama_available():
|
||||
return tool_error(
|
||||
"Ollama is not running. Start it with: ollama serve"
|
||||
)
|
||||
|
||||
if not _has_ollama_model(DEFAULT_EMBED_MODEL):
|
||||
return tool_error(
|
||||
f"Embedding model '{DEFAULT_EMBED_MODEL}' not found in Ollama. "
|
||||
f"Pull it with: ollama pull {DEFAULT_EMBED_MODEL}"
|
||||
)
|
||||
|
||||
if not _has_ollama_model(DEFAULT_LLM_MODEL):
|
||||
return tool_error(
|
||||
f"LLM model '{DEFAULT_LLM_MODEL}' not found in Ollama. "
|
||||
f"Pull it with: ollama pull {DEFAULT_LLM_MODEL}"
|
||||
)
|
||||
|
||||
rag = _get_lightrag()
|
||||
dirs = [SKILLS_DIR]
|
||||
if directories:
|
||||
for d in directories:
|
||||
p = Path(d).expanduser()
|
||||
if p.exists():
|
||||
dirs.append(p)
|
||||
|
||||
all_files = []
|
||||
for d in dirs:
|
||||
all_files.extend(_collect_markdown_files(d))
|
||||
|
||||
if not all_files:
|
||||
return json.dumps({
|
||||
"status": "warning",
|
||||
"message": "No markdown files found to index.",
|
||||
"directories": [str(d) for d in dirs],
|
||||
})
|
||||
|
||||
# Read and insert files
|
||||
inserted = 0
|
||||
errors = 0
|
||||
for path in all_files:
|
||||
text = _read_text_safe(path)
|
||||
if not text.strip():
|
||||
continue
|
||||
try:
|
||||
# LightRAG insert is async; bridge it
|
||||
asyncio.run(rag.atext(text))
|
||||
inserted += 1
|
||||
except Exception as e:
|
||||
logger.warning("Failed to index %s: %s", path, e)
|
||||
errors += 1
|
||||
|
||||
return json.dumps({
|
||||
"status": "ok",
|
||||
"indexed_files": inserted,
|
||||
"errors": errors,
|
||||
"total_files": len(all_files),
|
||||
"storage_dir": str(LIGHTRAG_DIR),
|
||||
})
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Query
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def lightrag_query(query: str, mode: str = "hybrid") -> str:
|
||||
"""Query the LightRAG knowledge graph.
|
||||
|
||||
Args:
|
||||
query: The question or search query.
|
||||
mode: Search mode — "local" (nearby entities), "global" (graph-wide),
|
||||
or "hybrid" (both).
|
||||
"""
|
||||
if not query or not query.strip():
|
||||
return tool_error("Query cannot be empty.")
|
||||
|
||||
if mode not in {"local", "global", "hybrid"}:
|
||||
return tool_error("mode must be one of: local, global, hybrid")
|
||||
|
||||
if not _ollama_available():
|
||||
return tool_error(
|
||||
"Ollama is not running. Start it with: ollama serve"
|
||||
)
|
||||
|
||||
rag = _get_lightrag()
|
||||
|
||||
# Check if any data has been indexed
|
||||
if not LIGHTRAG_DIR.exists() or not any(LIGHTRAG_DIR.iterdir()):
|
||||
return json.dumps({
|
||||
"status": "empty",
|
||||
"message": "LightRAG index is empty. Run lightrag_index() first.",
|
||||
})
|
||||
|
||||
try:
|
||||
from lightrag import QueryParam
|
||||
param = QueryParam(mode=mode)
|
||||
result = asyncio.run(rag.aquery(query, param=param))
|
||||
return json.dumps({
|
||||
"status": "ok",
|
||||
"mode": mode,
|
||||
"query": query,
|
||||
"answer": result,
|
||||
})
|
||||
except Exception as e:
|
||||
logger.exception("LightRAG query failed")
|
||||
return tool_error(f"Query failed: {e}")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Tool schemas
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
LIGHTRAG_QUERY_SCHEMA = {
|
||||
"name": "lightrag_query",
|
||||
"description": (
|
||||
"Graph-based knowledge retrieval over indexed skills and documentation.\n\n"
|
||||
"Use this when the user asks about: conventions, workflows, tool usage, "
|
||||
"project-specific practices, or anything that might be documented in skills.\n\n"
|
||||
"Modes:\n"
|
||||
"- local: fast, searches nearby entities in the graph\n"
|
||||
"- global: thorough, reasons across the entire knowledge graph\n"
|
||||
"- hybrid: balanced, combines local and global (recommended)\n\n"
|
||||
"If the index is empty, the tool will report that and you should "
|
||||
"call lightrag_index() to populate it."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "The question or search query.",
|
||||
},
|
||||
"mode": {
|
||||
"type": "string",
|
||||
"enum": ["local", "global", "hybrid"],
|
||||
"description": "Search mode. hybrid is recommended.",
|
||||
},
|
||||
},
|
||||
"required": ["query"],
|
||||
},
|
||||
}
|
||||
|
||||
LIGHTRAG_INDEX_SCHEMA = {
|
||||
"name": "lightrag_index",
|
||||
"description": (
|
||||
"(Re-)build the LightRAG knowledge graph from skill files and docs.\n\n"
|
||||
"By default indexes ~/.hermes/skills/. Pass extra directories if needed.\n"
|
||||
"This is a one-time or occasional operation; queries work against the "
|
||||
"existing index until you re-index."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"directories": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "Optional extra directories to index (in addition to ~/.hermes/skills/).",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Availability check
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def check_lightrag_requirements() -> bool:
|
||||
"""Return True if LightRAG and Ollama appear to be available."""
|
||||
try:
|
||||
import lightrag # noqa: F401
|
||||
except ImportError:
|
||||
return False
|
||||
return _ollama_available()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Registry
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
registry.register(
|
||||
name="lightrag_query",
|
||||
toolset="rag",
|
||||
schema=LIGHTRAG_QUERY_SCHEMA,
|
||||
handler=lambda args, **kw: lightrag_query(
|
||||
query=args.get("query", ""),
|
||||
mode=args.get("mode", "hybrid"),
|
||||
),
|
||||
check_fn=check_lightrag_requirements,
|
||||
emoji="🔎",
|
||||
)
|
||||
|
||||
registry.register(
|
||||
name="lightrag_index",
|
||||
toolset="rag",
|
||||
schema=LIGHTRAG_INDEX_SCHEMA,
|
||||
handler=lambda args, **kw: lightrag_index(
|
||||
directories=args.get("directories"),
|
||||
),
|
||||
check_fn=check_lightrag_requirements,
|
||||
emoji="📚",
|
||||
)
|
||||
@@ -167,6 +167,12 @@ TOOLSETS = {
|
||||
"tools": ["memory"],
|
||||
"includes": []
|
||||
},
|
||||
|
||||
"rag": {
|
||||
"description": "Graph-based knowledge retrieval over indexed skills and docs (LightRAG)",
|
||||
"tools": ["lightrag_query", "lightrag_index"],
|
||||
"includes": []
|
||||
},
|
||||
|
||||
"session_search": {
|
||||
"description": "Search and recall past conversations with summarization",
|
||||
|
||||
Reference in New Issue
Block a user