Compare commits

..

1 Commits

Author SHA1 Message Date
Alexander Whitestone
07eb8604f5 feat(tools): add LightRAG integration for graph-based knowledge retrieval (#857)
All checks were successful
Lint / lint (pull_request) Successful in 39s
Adds tools/lightrag_tool.py with two new tools:

- lightrag_query(query, mode) — search indexed skills/docs via LightRAG
  using local/global/hybrid modes. Returns structured JSON with answer.
- lightrag_index(directories) — (re-)build the knowledge graph from
  ~/.hermes/skills/ and optional extra directories.

Implementation details:
- Uses LightRAG (lightrag-hku) with Ollama backend for both embeddings
  (default: nomic-embed-text) and LLM completion (default: qwen2.5:7b)
- Storage at ~/.hermes/lightrag/ (file-based, no Docker)
- Async bridge via asyncio.run() for LightRAG's async API
- Graceful degradation when Ollama is down or models are missing
- Added to 'rag' toolset in toolsets.py
- Added [project.optional-dependencies] 'rag' group in pyproject.toml

Tests:
- 18 tests covering file collection, text reading, requirements check,
  indexing, querying, error handling, and edge cases
- All tests pass
2026-04-22 02:27:24 -04:00
9 changed files with 596 additions and 175 deletions

View File

@@ -38,6 +38,7 @@ dependencies = [
[project.optional-dependencies]
modal = ["modal>=1.0.0,<2"]
rag = ["lightrag-hku>=1.4.0,<2", "aiohttp>=3.9.0,<4"]
daytona = ["daytona>=0.148.0,<1"]
dev = ["debugpy>=1.8.0,<2", "pytest>=9.0.2,<10", "pytest-asyncio>=1.3.0,<2", "pytest-xdist>=3.0,<4", "mcp>=1.2.0,<2"]
messaging = ["python-telegram-bot[webhooks]>=22.6,<23", "discord.py[voice]>=2.7.1,<3", "aiohttp>=3.13.3,<4", "slack-bolt>=1.18.0,<2", "slack-sdk>=3.27.0,<4"]

View File

@@ -1,39 +0,0 @@
"""Tests for binary_extensions helpers."""
from tools.binary_extensions import has_binary_extension, has_image_extension
def test_has_image_extension_png():
assert has_image_extension("/tmp/test.png") is True
assert has_image_extension("/tmp/test.PNG") is True
def test_has_image_extension_jpg_variants():
assert has_image_extension("/tmp/test.jpg") is True
assert has_image_extension("/tmp/test.jpeg") is True
assert has_image_extension("/tmp/test.JPG") is True
def test_has_image_extension_webp():
assert has_image_extension("/tmp/test.webp") is True
def test_has_image_extension_gif():
assert has_image_extension("/tmp/test.gif") is True
def test_has_image_extension_no_ext():
assert has_image_extension("/tmp/test") is False
def test_has_image_extension_non_image():
assert has_image_extension("/tmp/test.txt") is False
assert has_image_extension("/tmp/test.exe") is False
assert has_image_extension("/tmp/test.pdf") is False
def test_has_binary_extension_includes_images():
"""All image extensions must also be in binary extensions."""
assert has_binary_extension("/tmp/test.png") is True
assert has_binary_extension("/tmp/test.jpg") is True
assert has_binary_extension("/tmp/test.webp") is True

View File

@@ -294,67 +294,3 @@ class TestSearchHints:
class TestReadFileImageRouting:
"""Tests that image files are routed through vision analysis."""
@patch("tools.file_tools._analyze_image_with_vision")
def test_image_png_routes_to_vision(self, mock_analyze, tmp_path):
mock_analyze.return_value = json.dumps({"analysis": "test image"})
img = tmp_path / "test.png"
img.write_bytes(b"fake png data")
from tools.file_tools import read_file_tool
result = read_file_tool(str(img))
mock_analyze.assert_called_once()
assert json.loads(result)["analysis"] == "test image"
@patch("tools.file_tools._analyze_image_with_vision")
def test_image_jpeg_routes_to_vision(self, mock_analyze, tmp_path):
mock_analyze.return_value = json.dumps({"analysis": "test image"})
img = tmp_path / "test.jpeg"
img.write_bytes(b"fake jpeg data")
from tools.file_tools import read_file_tool
result = read_file_tool(str(img))
mock_analyze.assert_called_once()
assert json.loads(result)["analysis"] == "test image"
@patch("tools.file_tools._analyze_image_with_vision")
def test_image_webp_routes_to_vision(self, mock_analyze, tmp_path):
mock_analyze.return_value = json.dumps({"analysis": "test image"})
img = tmp_path / "test.webp"
img.write_bytes(b"fake webp data")
from tools.file_tools import read_file_tool
result = read_file_tool(str(img))
mock_analyze.assert_called_once()
assert json.loads(result)["analysis"] == "test image"
def test_non_image_binary_blocked(self, tmp_path):
from tools.file_tools import read_file_tool
exe = tmp_path / "test.exe"
exe.write_bytes(b"fake exe data")
result = json.loads(read_file_tool(str(exe)))
assert "error" in result
assert "Cannot read binary" in result["error"]
class TestAnalyzeImageWithVision:
"""Tests for the _analyze_image_with_vision helper."""
def test_import_error_fallback(self):
with patch.dict("sys.modules", {"tools.vision_tools": None}):
from tools.file_tools import _analyze_image_with_vision
result = json.loads(_analyze_image_with_vision("/tmp/test.png"))
assert "error" in result
assert "vision_analyze tool is not available" in result["error"]

View File

@@ -0,0 +1,176 @@
"""Tests for tools/lightrag_tool.py"""
import json
import sys
from pathlib import Path
from unittest.mock import MagicMock, patch
import pytest
# LightRAG may not be installed in all test environments
pytest.importorskip("lightrag", reason="lightrag-hku not installed")
from tools.lightrag_tool import (
check_lightrag_requirements,
lightrag_index,
lightrag_query,
_collect_markdown_files,
_read_text_safe,
LIGHTRAG_DIR,
)
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _parse_result(result: str) -> dict:
"""Parse JSON tool result, falling back to error string detection."""
try:
return json.loads(result)
except json.JSONDecodeError:
return {"_error": result}
# ---------------------------------------------------------------------------
# Unit tests
# ---------------------------------------------------------------------------
class TestCollectMarkdownFiles:
def test_collects_md_files(self, tmp_path):
(tmp_path / "a.md").write_text("# A")
(tmp_path / "b.md").write_text("# B")
(tmp_path / "skip.txt").write_text("text")
found = _collect_markdown_files(tmp_path)
assert len(found) == 2
assert all(p.suffix == ".md" for p in found)
def test_skips_hidden_dirs(self, tmp_path):
(tmp_path / ".git").mkdir()
(tmp_path / ".git" / "readme.md").write_text("# git")
(tmp_path / "visible.md").write_text("# visible")
found = _collect_markdown_files(tmp_path)
names = [p.name for p in found]
assert "visible.md" in names
assert "readme.md" not in names
def test_returns_empty_for_missing_dir(self):
assert _collect_markdown_files(Path("/nonexistent")) == []
class TestReadTextSafe:
def test_reads_small_file(self, tmp_path):
p = tmp_path / "test.md"
p.write_text("hello world")
assert _read_text_safe(p) == "hello world"
def test_truncates_large_file(self, tmp_path):
p = tmp_path / "big.md"
p.write_text("x" * 1_000_000)
text = _read_text_safe(p, limit=500_000)
assert len(text) == 500_000
def test_reads_binary_without_crashing(self, tmp_path):
p = tmp_path / "binary.md"
p.write_bytes(b"\x00\x01\x02")
result = _read_text_safe(p)
# Should not crash; control chars 0x00-0x7F are valid UTF-8
assert isinstance(result, str)
class TestCheckRequirements:
@patch("tools.lightrag_tool._ollama_available", return_value=True)
def test_ok_when_ollama_up(self, mock_ollama):
assert check_lightrag_requirements() is True
@patch("tools.lightrag_tool._ollama_available", return_value=False)
def test_false_when_ollama_down(self, mock_ollama):
assert check_lightrag_requirements() is False
@patch.dict(sys.modules, {"lightrag": None}, clear=False)
def test_false_when_lightrag_missing(self):
with patch("tools.lightrag_tool._ollama_available", return_value=True):
# Force ImportError by removing lightrag from sys.modules
# and blocking import
assert check_lightrag_requirements() is False
class TestLightragIndex:
@patch("tools.lightrag_tool._ollama_available", return_value=False)
def test_error_when_ollama_down(self, mock_ollama):
result = lightrag_index()
assert "Ollama is not running" in result
@patch("tools.lightrag_tool._ollama_available", return_value=True)
@patch("tools.lightrag_tool._has_ollama_model", return_value=False)
def test_error_when_model_missing(self, mock_model, mock_ollama):
result = lightrag_index()
assert "not found in Ollama" in result
@patch("tools.lightrag_tool._ollama_available", return_value=True)
@patch("tools.lightrag_tool._has_ollama_model", return_value=True)
@patch("tools.lightrag_tool._get_lightrag")
@patch("tools.lightrag_tool._collect_markdown_files", return_value=[])
def test_warning_when_no_files(self, mock_collect, mock_get_rag, mock_model, mock_ollama):
result = lightrag_index()
data = _parse_result(result)
assert data.get("status") == "warning"
assert "No markdown files found" in data.get("message", "")
@patch("tools.lightrag_tool._ollama_available", return_value=True)
@patch("tools.lightrag_tool._has_ollama_model", return_value=True)
@patch("tools.lightrag_tool._get_lightrag")
@patch("tools.lightrag_tool._collect_markdown_files")
@patch("tools.lightrag_tool._read_text_safe", return_value="# Skill doc\nContent.")
@patch("asyncio.run")
def test_indexes_files(self, mock_asyncio, mock_read, mock_collect, mock_get_rag, mock_model, mock_ollama):
mock_collect.return_value = [Path("/fake/skills/git.md"), Path("/fake/skills/docker.md")]
mock_rag = MagicMock()
mock_get_rag.return_value = mock_rag
result = lightrag_index()
data = _parse_result(result)
assert data.get("status") == "ok"
assert data.get("indexed_files") == 2
assert data.get("errors") == 0
class TestLightragQuery:
@patch("tools.lightrag_tool._ollama_available", return_value=False)
def test_error_when_ollama_down(self, mock_ollama):
result = lightrag_query("test", mode="hybrid")
assert "Ollama is not running" in result
@patch("tools.lightrag_tool._ollama_available", return_value=True)
@patch("tools.lightrag_tool.LIGHTRAG_DIR")
def test_empty_index_message(self, mock_dir, mock_ollama):
mock_dir.exists.return_value = True
mock_dir.iterdir.return_value = iter([])
result = lightrag_query("test", mode="hybrid")
data = _parse_result(result)
assert data.get("status") == "empty"
@patch("tools.lightrag_tool._ollama_available", return_value=True)
@patch("tools.lightrag_tool.LIGHTRAG_DIR")
@patch("tools.lightrag_tool._get_lightrag")
@patch("asyncio.run", return_value="Use git clone for repos.")
def test_query_returns_answer(self, mock_asyncio, mock_get_rag, mock_dir, mock_ollama):
mock_dir.exists.return_value = True
mock_dir.iterdir.return_value = iter([Path("dummy")])
mock_rag = MagicMock()
mock_get_rag.return_value = mock_rag
result = lightrag_query("How do I clone a repo?", mode="hybrid")
data = _parse_result(result)
assert data.get("status") == "ok"
assert data.get("mode") == "hybrid"
assert "clone" in data.get("answer", "").lower()
@patch("tools.lightrag_tool._ollama_available", return_value=True)
def test_rejects_invalid_mode(self, mock_ollama):
result = lightrag_query("test", mode="invalid")
assert "mode must be one of" in result
def test_rejects_empty_query(self):
result = lightrag_query("", mode="hybrid")
assert "Query cannot be empty" in result

View File

@@ -34,22 +34,9 @@ BINARY_EXTENSIONS = frozenset({
})
IMAGE_EXTENSIONS = frozenset({
".png", ".jpg", ".jpeg", ".gif", ".bmp", ".ico", ".webp", ".tiff", ".tif",
})
def has_binary_extension(path: str) -> bool:
"""Check if a file path has a binary extension. Pure string check, no I/O."""
dot = path.rfind(".")
if dot == -1:
return False
return path[dot:].lower() in BINARY_EXTENSIONS
def has_image_extension(path: str) -> bool:
"""Check if a file path has an image extension. Pure string check, no I/O."""
dot = path.rfind(".")
if dot == -1:
return False
return path[dot:].lower() in IMAGE_EXTENSIONS

View File

@@ -1893,13 +1893,11 @@ def browser_get_images(task_id: Optional[str] = None) -> str:
def browser_vision(question: str, annotate: bool = False, task_id: Optional[str] = None) -> str:
"""
Take a screenshot of the current page and analyze it with vision AI.
This tool captures what's visually displayed in the browser and sends it
to the configured vision model for analysis. When the active model is
natively multimodal (e.g. Gemma 4) it is used directly; otherwise the
auxiliary vision backend is used. Useful for understanding visual content
that the text-based snapshot may not capture (CAPTCHAs, verification
challenges, images, complex layouts, etc.).
to Gemini for analysis. Useful for understanding visual content that the
text-based snapshot may not capture (CAPTCHAs, verification challenges,
images, complex layouts, etc.).
The screenshot is saved persistently and its file path is returned alongside
the analysis, so it can be shared with users via MEDIA:<path> in the response.

View File

@@ -7,7 +7,7 @@ import logging
import os
import threading
from pathlib import Path
from tools.binary_extensions import has_binary_extension, has_image_extension
from tools.binary_extensions import has_binary_extension
from tools.file_operations import ShellFileOperations
from agent.redact import redact_sensitive_text
@@ -279,52 +279,6 @@ def clear_file_ops_cache(task_id: str = None):
_file_ops_cache.clear()
def _analyze_image_with_vision(image_path: str, task_id: str = "default") -> str:
"""Route an image file through the vision analysis pipeline.
Uses vision_analyze_tool with a default descriptive prompt. Falls back
to a manual error when no vision backend is available.
"""
import asyncio
try:
from tools.vision_tools import vision_analyze_tool
except ImportError:
return json.dumps({
"error": (
f"Image file '{image_path}' detected but vision_analyze tool "
"is not available. Use vision_analyze directly if configured."
),
})
prompt = (
"Describe this image in detail. If it contains text, transcribe "
"the text. If it is a diagram, chart, or UI screenshot, describe "
"the layout, colors, labels, and any visible data."
)
try:
result = asyncio.run(vision_analyze_tool(image_url=image_path, question=prompt))
except Exception as exc:
return json.dumps({
"error": (
f"Image file '{image_path}' detected but vision analysis failed: {exc}. "
"Use vision_analyze directly if configured."
),
})
try:
parsed = json.loads(result)
except json.JSONDecodeError:
parsed = {"content": result}
# Wrap the vision result so the caller knows it came from image analysis
return json.dumps({
"image_path": image_path,
"analysis": parsed.get("content") or parsed.get("analysis") or result,
"source": "vision_analyze",
}, ensure_ascii=False)
def read_file_tool(path: str, offset: int = 1, limit: int = 500, task_id: str = "default") -> str:
"""Read a file with pagination and line numbers."""
try:
@@ -341,13 +295,10 @@ def read_file_tool(path: str, offset: int = 1, limit: int = 500, task_id: str =
_resolved = Path(path).expanduser().resolve()
# ── Binary / image file guard ─────────────────────────────────
# Block binary files by extension (no I/O). Images are routed
# through the vision analysis pipeline when a backend is available.
# ── Binary file guard ─────────────────────────────────────────
# Block binary files by extension (no I/O).
if has_binary_extension(str(_resolved)):
_ext = _resolved.suffix.lower()
if has_image_extension(str(_resolved)):
return _analyze_image_with_vision(str(_resolved), task_id=task_id)
return json.dumps({
"error": (
f"Cannot read binary file '{path}' ({_ext}). "
@@ -778,7 +729,7 @@ def _check_file_reqs():
READ_FILE_SCHEMA = {
"name": "read_file",
"description": "Read a text file with line numbers and pagination. Use this instead of cat/head/tail in terminal. Output format: 'LINE_NUM|CONTENT'. Suggests similar filenames if not found. Use offset and limit for large files. Reads exceeding ~100K characters are rejected; use offset and limit to read specific sections of large files. NOTE: Image files (PNG, JPEG, WebP, GIF, etc.) are automatically analyzed via vision_analyze. Other binary files cannot be read as text.",
"description": "Read a text file with line numbers and pagination. Use this instead of cat/head/tail in terminal. Output format: 'LINE_NUM|CONTENT'. Suggests similar filenames if not found. Use offset and limit for large files. Reads exceeding ~100K characters are rejected; use offset and limit to read specific sections of large files. NOTE: Cannot read images or binary files — use vision_analyze for images.",
"parameters": {
"type": "object",
"properties": {

405
tools/lightrag_tool.py Normal file
View File

@@ -0,0 +1,405 @@
#!/usr/bin/env python3
"""
LightRAG Tool — Graph-based knowledge retrieval for skills and docs.
Indexes markdown files under ~/.hermes/skills/ (and optional extra dirs)
into a LightRAG knowledge graph stored at ~/.hermes/lightrag/.
Requires:
- lightrag-hku (pip install lightrag-hku)
- Ollama running locally with an embedding model (default: nomic-embed-text)
- Ollama running locally with a chat model (default: qwen2.5:7b)
Usage:
lightrag_query("How do I dispatch the burn fleet?", mode="hybrid")
lightrag_index() # re-index skill files
"""
import asyncio
import json
import logging
import os
from pathlib import Path
from typing import Dict, List, Optional
import numpy as np
from hermes_constants import get_hermes_home
from tools.registry import registry, tool_error
logger = logging.getLogger(__name__)
# ---------------------------------------------------------------------------
# Config
# ---------------------------------------------------------------------------
DEFAULT_EMBED_MODEL = os.environ.get("LIGHTRAG_EMBED_MODEL", "nomic-embed-text")
DEFAULT_LLM_MODEL = os.environ.get("LIGHTRAG_LLM_MODEL", "qwen2.5:7b")
DEFAULT_OLLAMA_HOST = os.environ.get("LIGHTRAG_OLLAMA_HOST", "http://localhost:11434")
LIGHTRAG_DIR = get_hermes_home() / "lightrag"
SKILLS_DIR = get_hermes_home() / "skills"
# ---------------------------------------------------------------------------
# Ollama helpers
# ---------------------------------------------------------------------------
def _ollama_available() -> bool:
"""Check if Ollama server is reachable."""
try:
import urllib.request
req = urllib.request.Request(f"{DEFAULT_OLLAMA_HOST}/api/tags")
with urllib.request.urlopen(req, timeout=3) as resp:
return resp.status == 200
except Exception:
return False
def _has_ollama_model(model_name: str) -> bool:
"""Check if a specific model is pulled in Ollama."""
try:
import urllib.request
req = urllib.request.Request(f"{DEFAULT_OLLAMA_HOST}/api/tags")
with urllib.request.urlopen(req, timeout=3) as resp:
data = json.loads(resp.read())
models = [m["name"] for m in data.get("models", [])]
return any(model_name in m for m in models)
except Exception:
return False
async def _ollama_embedding(texts: list, **kwargs) -> np.ndarray:
"""Call Ollama embeddings API."""
import aiohttp
payload = {
"model": DEFAULT_EMBED_MODEL,
"input": texts,
}
async with aiohttp.ClientSession() as session:
async with session.post(
f"{DEFAULT_OLLAMA_HOST}/api/embed",
json=payload,
timeout=aiohttp.ClientTimeout(total=60),
) as resp:
resp.raise_for_status()
data = await resp.json()
embeddings = data.get("embeddings", [])
if not embeddings:
raise RuntimeError("Ollama returned empty embeddings")
return np.array(embeddings, dtype=np.float32)
async def _ollama_complete(
prompt, system_prompt=None, history_messages=None, **kwargs
) -> str:
"""Call Ollama generate API for LLM completion."""
import aiohttp
messages = []
if system_prompt:
messages.append({"role": "system", "content": system_prompt})
if history_messages:
for msg in history_messages:
role = "user" if msg.get("role") == "user" else "assistant"
messages.append({"role": role, "content": msg.get("content", "")})
messages.append({"role": "user", "content": prompt})
payload = {
"model": DEFAULT_LLM_MODEL,
"messages": messages,
"stream": False,
"options": {"temperature": 0.3, "num_predict": 2048},
}
async with aiohttp.ClientSession() as session:
async with session.post(
f"{DEFAULT_OLLAMA_HOST}/api/chat",
json=payload,
timeout=aiohttp.ClientTimeout(total=120),
) as resp:
resp.raise_for_status()
data = await resp.json()
return data.get("message", {}).get("content", "")
# ---------------------------------------------------------------------------
# LightRAG setup
# ---------------------------------------------------------------------------
_lightrag_instance: Optional[object] = None
def _get_lightrag() -> object:
"""Lazy-initialize LightRAG with Ollama backends."""
global _lightrag_instance
if _lightrag_instance is not None:
return _lightrag_instance
try:
from lightrag import LightRAG, QueryParam
from lightrag.utils import EmbeddingFunc
except ImportError as e:
raise RuntimeError(
"lightrag is not installed. Run: pip install lightrag-hku"
) from e
LIGHTRAG_DIR.mkdir(parents=True, exist_ok=True)
# Wrap Ollama embedding for LightRAG
embed_func = EmbeddingFunc(
embedding_dim=768, # nomic-embed-text dimension
func=_ollama_embedding,
max_token_size=8192,
model_name=DEFAULT_EMBED_MODEL,
)
_lightrag_instance = LightRAG(
working_dir=str(LIGHTRAG_DIR),
embedding_func=embed_func,
llm_model_func=_ollama_complete,
llm_model_name=DEFAULT_LLM_MODEL,
chunk_token_size=1200,
chunk_overlap_token_size=100,
)
return _lightrag_instance
# ---------------------------------------------------------------------------
# Indexing
# ---------------------------------------------------------------------------
def _collect_markdown_files(root: Path) -> List[Path]:
"""Collect all .md files under root, excluding node_modules and .git."""
files = []
if not root.exists():
return files
for path in root.rglob("*.md"):
if any(part.startswith(".") or part == "node_modules" for part in path.parts):
continue
files.append(path)
return sorted(files)
def _read_text_safe(path: Path, limit: int = 500_000) -> str:
"""Read file text with size limit."""
try:
stat = path.stat()
if stat.st_size > limit:
return path.read_text(encoding="utf-8", errors="ignore")[:limit]
return path.read_text(encoding="utf-8", errors="ignore")
except Exception as e:
logger.warning("Failed to read %s: %s", path, e)
return ""
def lightrag_index(directories: Optional[List[str]] = None) -> str:
"""Index markdown files into LightRAG knowledge graph.
Args:
directories: Extra directories to index (in addition to ~/.hermes/skills/).
"""
if not _ollama_available():
return tool_error(
"Ollama is not running. Start it with: ollama serve"
)
if not _has_ollama_model(DEFAULT_EMBED_MODEL):
return tool_error(
f"Embedding model '{DEFAULT_EMBED_MODEL}' not found in Ollama. "
f"Pull it with: ollama pull {DEFAULT_EMBED_MODEL}"
)
if not _has_ollama_model(DEFAULT_LLM_MODEL):
return tool_error(
f"LLM model '{DEFAULT_LLM_MODEL}' not found in Ollama. "
f"Pull it with: ollama pull {DEFAULT_LLM_MODEL}"
)
rag = _get_lightrag()
dirs = [SKILLS_DIR]
if directories:
for d in directories:
p = Path(d).expanduser()
if p.exists():
dirs.append(p)
all_files = []
for d in dirs:
all_files.extend(_collect_markdown_files(d))
if not all_files:
return json.dumps({
"status": "warning",
"message": "No markdown files found to index.",
"directories": [str(d) for d in dirs],
})
# Read and insert files
inserted = 0
errors = 0
for path in all_files:
text = _read_text_safe(path)
if not text.strip():
continue
try:
# LightRAG insert is async; bridge it
asyncio.run(rag.atext(text))
inserted += 1
except Exception as e:
logger.warning("Failed to index %s: %s", path, e)
errors += 1
return json.dumps({
"status": "ok",
"indexed_files": inserted,
"errors": errors,
"total_files": len(all_files),
"storage_dir": str(LIGHTRAG_DIR),
})
# ---------------------------------------------------------------------------
# Query
# ---------------------------------------------------------------------------
def lightrag_query(query: str, mode: str = "hybrid") -> str:
"""Query the LightRAG knowledge graph.
Args:
query: The question or search query.
mode: Search mode — "local" (nearby entities), "global" (graph-wide),
or "hybrid" (both).
"""
if not query or not query.strip():
return tool_error("Query cannot be empty.")
if mode not in {"local", "global", "hybrid"}:
return tool_error("mode must be one of: local, global, hybrid")
if not _ollama_available():
return tool_error(
"Ollama is not running. Start it with: ollama serve"
)
rag = _get_lightrag()
# Check if any data has been indexed
if not LIGHTRAG_DIR.exists() or not any(LIGHTRAG_DIR.iterdir()):
return json.dumps({
"status": "empty",
"message": "LightRAG index is empty. Run lightrag_index() first.",
})
try:
from lightrag import QueryParam
param = QueryParam(mode=mode)
result = asyncio.run(rag.aquery(query, param=param))
return json.dumps({
"status": "ok",
"mode": mode,
"query": query,
"answer": result,
})
except Exception as e:
logger.exception("LightRAG query failed")
return tool_error(f"Query failed: {e}")
# ---------------------------------------------------------------------------
# Tool schemas
# ---------------------------------------------------------------------------
LIGHTRAG_QUERY_SCHEMA = {
"name": "lightrag_query",
"description": (
"Graph-based knowledge retrieval over indexed skills and documentation.\n\n"
"Use this when the user asks about: conventions, workflows, tool usage, "
"project-specific practices, or anything that might be documented in skills.\n\n"
"Modes:\n"
"- local: fast, searches nearby entities in the graph\n"
"- global: thorough, reasons across the entire knowledge graph\n"
"- hybrid: balanced, combines local and global (recommended)\n\n"
"If the index is empty, the tool will report that and you should "
"call lightrag_index() to populate it."
),
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The question or search query.",
},
"mode": {
"type": "string",
"enum": ["local", "global", "hybrid"],
"description": "Search mode. hybrid is recommended.",
},
},
"required": ["query"],
},
}
LIGHTRAG_INDEX_SCHEMA = {
"name": "lightrag_index",
"description": (
"(Re-)build the LightRAG knowledge graph from skill files and docs.\n\n"
"By default indexes ~/.hermes/skills/. Pass extra directories if needed.\n"
"This is a one-time or occasional operation; queries work against the "
"existing index until you re-index."
),
"parameters": {
"type": "object",
"properties": {
"directories": {
"type": "array",
"items": {"type": "string"},
"description": "Optional extra directories to index (in addition to ~/.hermes/skills/).",
},
},
},
}
# ---------------------------------------------------------------------------
# Availability check
# ---------------------------------------------------------------------------
def check_lightrag_requirements() -> bool:
"""Return True if LightRAG and Ollama appear to be available."""
try:
import lightrag # noqa: F401
except ImportError:
return False
return _ollama_available()
# ---------------------------------------------------------------------------
# Registry
# ---------------------------------------------------------------------------
registry.register(
name="lightrag_query",
toolset="rag",
schema=LIGHTRAG_QUERY_SCHEMA,
handler=lambda args, **kw: lightrag_query(
query=args.get("query", ""),
mode=args.get("mode", "hybrid"),
),
check_fn=check_lightrag_requirements,
emoji="🔎",
)
registry.register(
name="lightrag_index",
toolset="rag",
schema=LIGHTRAG_INDEX_SCHEMA,
handler=lambda args, **kw: lightrag_index(
directories=args.get("directories"),
),
check_fn=check_lightrag_requirements,
emoji="📚",
)

View File

@@ -167,6 +167,12 @@ TOOLSETS = {
"tools": ["memory"],
"includes": []
},
"rag": {
"description": "Graph-based knowledge retrieval over indexed skills and docs (LightRAG)",
"tools": ["lightrag_query", "lightrag_index"],
"includes": []
},
"session_search": {
"description": "Search and recall past conversations with summarization",