Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
07eb8604f5 |
@@ -38,6 +38,7 @@ dependencies = [
|
||||
|
||||
[project.optional-dependencies]
|
||||
modal = ["modal>=1.0.0,<2"]
|
||||
rag = ["lightrag-hku>=1.4.0,<2", "aiohttp>=3.9.0,<4"]
|
||||
daytona = ["daytona>=0.148.0,<1"]
|
||||
dev = ["debugpy>=1.8.0,<2", "pytest>=9.0.2,<10", "pytest-asyncio>=1.3.0,<2", "pytest-xdist>=3.0,<4", "mcp>=1.2.0,<2"]
|
||||
messaging = ["python-telegram-bot[webhooks]>=22.6,<23", "discord.py[voice]>=2.7.1,<3", "aiohttp>=3.13.3,<4", "slack-bolt>=1.18.0,<2", "slack-sdk>=3.27.0,<4"]
|
||||
|
||||
@@ -1302,9 +1302,9 @@ class TestConcurrentToolExecution:
|
||||
mock_con.assert_not_called()
|
||||
|
||||
def test_malformed_json_args_forces_sequential(self, agent):
|
||||
"""Non-dict tool arguments (e.g. JSON array) should fall back to sequential."""
|
||||
"""Unparseable tool arguments should fall back to sequential."""
|
||||
tc1 = _mock_tool_call(name="web_search", arguments='{}', call_id="c1")
|
||||
tc2 = _mock_tool_call(name="web_search", arguments='[1, 2, 3]', call_id="c2")
|
||||
tc2 = _mock_tool_call(name="web_search", arguments="NOT JSON {{{", call_id="c2")
|
||||
mock_msg = _mock_assistant_msg(content="", tool_calls=[tc1, tc2])
|
||||
messages = []
|
||||
with patch.object(agent, "_execute_tool_calls_sequential") as mock_seq:
|
||||
@@ -1384,9 +1384,10 @@ class TestConcurrentToolExecution:
|
||||
mock_msg = _mock_assistant_msg(content="", tool_calls=[tc1, tc2])
|
||||
messages = []
|
||||
|
||||
call_count = [0]
|
||||
def fake_handle(name, args, task_id, **kwargs):
|
||||
# Deterministic failure based on tool_call_id to avoid race conditions
|
||||
if kwargs.get("tool_call_id") == "c1":
|
||||
call_count[0] += 1
|
||||
if call_count[0] == 1:
|
||||
raise RuntimeError("boom")
|
||||
return "success"
|
||||
|
||||
|
||||
@@ -416,219 +416,3 @@ class TestEdgeCases:
|
||||
"""Verify max workers constant exists and is reasonable."""
|
||||
from run_agent import _MAX_TOOL_WORKERS
|
||||
assert 1 <= _MAX_TOOL_WORKERS <= 32
|
||||
|
||||
|
||||
# ── Integration Tests: AIAgent Concurrent Execution ───────────────────────────
|
||||
|
||||
class TestAIAgentConcurrentExecution:
|
||||
"""Exercise _execute_tool_calls_concurrent through an AIAgent instance."""
|
||||
|
||||
@pytest.fixture
|
||||
def agent(self):
|
||||
"""Minimal AIAgent with mocked OpenAI client and tool loading."""
|
||||
from types import SimpleNamespace
|
||||
from unittest.mock import patch
|
||||
from run_agent import AIAgent
|
||||
|
||||
def _make_tool_defs(*names):
|
||||
return [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": n,
|
||||
"description": f"{n} tool",
|
||||
"parameters": {"type": "object", "properties": {}},
|
||||
},
|
||||
}
|
||||
for n in names
|
||||
]
|
||||
|
||||
with (
|
||||
patch("run_agent.get_tool_definitions", return_value=_make_tool_defs("web_search", "read_file")),
|
||||
patch("run_agent.check_toolset_requirements", return_value={}),
|
||||
patch("run_agent.OpenAI"),
|
||||
):
|
||||
a = AIAgent(
|
||||
api_key="test-key-1234567890",
|
||||
quiet_mode=True,
|
||||
skip_context_files=True,
|
||||
skip_memory=True,
|
||||
)
|
||||
a.client = MagicMock()
|
||||
return a
|
||||
|
||||
def _mock_assistant_msg(self, tool_calls=None):
|
||||
from types import SimpleNamespace
|
||||
return SimpleNamespace(content="", tool_calls=tool_calls)
|
||||
|
||||
def _mock_tool_call(self, name, arguments, call_id):
|
||||
from types import SimpleNamespace
|
||||
return SimpleNamespace(
|
||||
id=call_id,
|
||||
type="function",
|
||||
function=SimpleNamespace(name=name, arguments=json.dumps(arguments)),
|
||||
)
|
||||
|
||||
def test_two_tool_batch_executes_concurrently(self, agent):
|
||||
"""2-tool parallel batch: all execute, results ordered, 100% pass."""
|
||||
tc1 = self._mock_tool_call("read_file", {"path": "a.txt"}, "c1")
|
||||
tc2 = self._mock_tool_call("read_file", {"path": "b.txt"}, "c2")
|
||||
mock_msg = self._mock_assistant_msg(tool_calls=[tc1, tc2])
|
||||
messages = []
|
||||
|
||||
def fake_handle(name, args, task_id, **kwargs):
|
||||
return json.dumps({"file": args.get("path", ""), "content": f"content_of_{args.get('path', '')}"})
|
||||
|
||||
with patch("run_agent.handle_function_call", side_effect=fake_handle):
|
||||
agent._execute_tool_calls_concurrent(mock_msg, messages, "task-1")
|
||||
|
||||
assert len(messages) == 2
|
||||
assert messages[0]["tool_call_id"] == "c1"
|
||||
assert messages[1]["tool_call_id"] == "c2"
|
||||
assert "a.txt" in messages[0]["content"]
|
||||
assert "b.txt" in messages[1]["content"]
|
||||
|
||||
def test_three_tool_batch_executes_concurrently(self, agent):
|
||||
"""3-tool parallel batch: all execute, results ordered, 100% pass."""
|
||||
tcs = [
|
||||
self._mock_tool_call("web_search", {"query": f"q{i}"}, f"c{i}")
|
||||
for i in range(3)
|
||||
]
|
||||
mock_msg = self._mock_assistant_msg(tool_calls=tcs)
|
||||
messages = []
|
||||
|
||||
def fake_handle(name, args, task_id, **kwargs):
|
||||
return json.dumps({"query": args.get("query", ""), "results": [f"result_{args.get('query', '')}"]})
|
||||
|
||||
with patch("run_agent.handle_function_call", side_effect=fake_handle):
|
||||
agent._execute_tool_calls_concurrent(mock_msg, messages, "task-1")
|
||||
|
||||
assert len(messages) == 3
|
||||
for i, tc in enumerate(tcs):
|
||||
assert messages[i]["tool_call_id"] == tc.id
|
||||
assert f"q{i}" in messages[i]["content"]
|
||||
|
||||
def test_four_tool_batch_executes_concurrently(self, agent):
|
||||
"""4-tool parallel batch: all execute, results ordered, 100% pass."""
|
||||
tcs = [
|
||||
self._mock_tool_call("read_file", {"path": f"file{i}.txt"}, f"c{i}")
|
||||
for i in range(4)
|
||||
]
|
||||
mock_msg = self._mock_assistant_msg(tool_calls=tcs)
|
||||
messages = []
|
||||
|
||||
def fake_handle(name, args, task_id, **kwargs):
|
||||
return json.dumps({"path": args.get("path", ""), "size": 100})
|
||||
|
||||
with patch("run_agent.handle_function_call", side_effect=fake_handle):
|
||||
agent._execute_tool_calls_concurrent(mock_msg, messages, "task-1")
|
||||
|
||||
assert len(messages) == 4
|
||||
for i, tc in enumerate(tcs):
|
||||
assert messages[i]["tool_call_id"] == tc.id
|
||||
assert f"file{i}.txt" in messages[i]["content"]
|
||||
|
||||
def test_mixed_read_and_search_batch(self, agent):
|
||||
"""read_file + search_files: safe parallel, different scopes."""
|
||||
tc1 = self._mock_tool_call("read_file", {"path": "config.yaml"}, "c1")
|
||||
tc2 = self._mock_tool_call("web_search", {"query": "provider"}, "c2")
|
||||
mock_msg = self._mock_assistant_msg(tool_calls=[tc1, tc2])
|
||||
messages = []
|
||||
|
||||
def fake_handle(name, args, task_id, **kwargs):
|
||||
return json.dumps({"tool": name, "args": args})
|
||||
|
||||
with patch("run_agent.handle_function_call", side_effect=fake_handle):
|
||||
agent._execute_tool_calls_concurrent(mock_msg, messages, "task-1")
|
||||
|
||||
assert len(messages) == 2
|
||||
assert messages[0]["tool_call_id"] == "c1"
|
||||
assert messages[1]["tool_call_id"] == "c2"
|
||||
assert "config.yaml" in messages[0]["content"]
|
||||
assert "provider" in messages[1]["content"]
|
||||
|
||||
def test_concurrent_pass_rate_report(self, agent):
|
||||
"""Simulate 2/3/4-tool batches and report pass rate."""
|
||||
batch_sizes = [2, 3, 4]
|
||||
pass_rates = {}
|
||||
|
||||
for size in batch_sizes:
|
||||
tcs = [
|
||||
self._mock_tool_call("web_search", {"query": f"q{i}"}, f"c{i}")
|
||||
for i in range(size)
|
||||
]
|
||||
mock_msg = self._mock_assistant_msg(tool_calls=tcs)
|
||||
messages = []
|
||||
|
||||
def fake_handle(name, args, task_id, **kwargs):
|
||||
return json.dumps({"ok": True, "query": args.get("query", "")})
|
||||
|
||||
with patch("run_agent.handle_function_call", side_effect=fake_handle):
|
||||
agent._execute_tool_calls_concurrent(mock_msg, messages, "task-1")
|
||||
|
||||
passed = sum(1 for m in messages if "ok" in m.get("content", ""))
|
||||
pass_rates[size] = passed / size if size > 0 else 0.0
|
||||
|
||||
for size, rate in pass_rates.items():
|
||||
assert rate == 1.0, f"Expected 100% pass rate for {size}-tool batch, got {rate:.0%}"
|
||||
|
||||
def test_gemma4_style_two_read_files(self, agent):
|
||||
"""Gemma 4 may issue two reads simultaneously — verify both returned."""
|
||||
tc1 = self._mock_tool_call("read_file", {"path": "src/main.py"}, "c1")
|
||||
tc2 = self._mock_tool_call("read_file", {"path": "src/utils.py"}, "c2")
|
||||
mock_msg = self._mock_assistant_msg(tool_calls=[tc1, tc2])
|
||||
messages = []
|
||||
|
||||
def fake_handle(name, args, task_id, **kwargs):
|
||||
return json.dumps({"content": f"# {args['path']}\nprint('hello')"})
|
||||
|
||||
with patch("run_agent.handle_function_call", side_effect=fake_handle):
|
||||
agent._execute_tool_calls_concurrent(mock_msg, messages, "task-1")
|
||||
|
||||
assert len(messages) == 2
|
||||
assert "main.py" in messages[0]["content"]
|
||||
assert "utils.py" in messages[1]["content"]
|
||||
|
||||
def test_gemma4_style_three_reads(self, agent):
|
||||
"""Gemma 4 may issue 3 reads for different files — all returned."""
|
||||
tcs = [
|
||||
self._mock_tool_call("read_file", {"path": f"mod{i}.py"}, f"c{i}")
|
||||
for i in range(3)
|
||||
]
|
||||
mock_msg = self._mock_assistant_msg(tool_calls=tcs)
|
||||
messages = []
|
||||
|
||||
def fake_handle(name, args, task_id, **kwargs):
|
||||
return json.dumps({"content": f"# {args['path']}"})
|
||||
|
||||
with patch("run_agent.handle_function_call", side_effect=fake_handle):
|
||||
agent._execute_tool_calls_concurrent(mock_msg, messages, "task-1")
|
||||
|
||||
assert len(messages) == 3
|
||||
for i in range(3):
|
||||
assert f"mod{i}.py" in messages[i]["content"]
|
||||
|
||||
def test_mixed_safe_and_write_tools_parallel(self, agent):
|
||||
"""Mix of read (safe) and write (path-scoped) on different paths — parallel."""
|
||||
tc1 = self._mock_tool_call("read_file", {"path": "input.txt"}, "c1")
|
||||
tc2 = self._mock_tool_call("write_file", {"path": "output.txt", "content": "x"}, "c2")
|
||||
tc3 = self._mock_tool_call("read_file", {"path": "config.txt"}, "c3")
|
||||
mock_msg = self._mock_assistant_msg(tool_calls=[tc1, tc2, tc3])
|
||||
messages = []
|
||||
|
||||
call_order = []
|
||||
|
||||
def fake_handle(name, args, task_id, **kwargs):
|
||||
call_order.append(name)
|
||||
return json.dumps({"tool": name, "path": args.get("path", "")})
|
||||
|
||||
with patch("run_agent.handle_function_call", side_effect=fake_handle):
|
||||
agent._execute_tool_calls_concurrent(mock_msg, messages, "task-1")
|
||||
|
||||
assert len(messages) == 3
|
||||
# Results ordered by tool call ID, not completion order
|
||||
assert messages[0]["tool_call_id"] == "c1"
|
||||
assert messages[1]["tool_call_id"] == "c2"
|
||||
assert messages[2]["tool_call_id"] == "c3"
|
||||
# All three should have executed
|
||||
assert len(call_order) == 3
|
||||
|
||||
176
tests/tools/test_lightrag_tool.py
Normal file
176
tests/tools/test_lightrag_tool.py
Normal file
@@ -0,0 +1,176 @@
|
||||
"""Tests for tools/lightrag_tool.py"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
# LightRAG may not be installed in all test environments
|
||||
pytest.importorskip("lightrag", reason="lightrag-hku not installed")
|
||||
|
||||
from tools.lightrag_tool import (
|
||||
check_lightrag_requirements,
|
||||
lightrag_index,
|
||||
lightrag_query,
|
||||
_collect_markdown_files,
|
||||
_read_text_safe,
|
||||
LIGHTRAG_DIR,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _parse_result(result: str) -> dict:
|
||||
"""Parse JSON tool result, falling back to error string detection."""
|
||||
try:
|
||||
return json.loads(result)
|
||||
except json.JSONDecodeError:
|
||||
return {"_error": result}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Unit tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestCollectMarkdownFiles:
|
||||
def test_collects_md_files(self, tmp_path):
|
||||
(tmp_path / "a.md").write_text("# A")
|
||||
(tmp_path / "b.md").write_text("# B")
|
||||
(tmp_path / "skip.txt").write_text("text")
|
||||
found = _collect_markdown_files(tmp_path)
|
||||
assert len(found) == 2
|
||||
assert all(p.suffix == ".md" for p in found)
|
||||
|
||||
def test_skips_hidden_dirs(self, tmp_path):
|
||||
(tmp_path / ".git").mkdir()
|
||||
(tmp_path / ".git" / "readme.md").write_text("# git")
|
||||
(tmp_path / "visible.md").write_text("# visible")
|
||||
found = _collect_markdown_files(tmp_path)
|
||||
names = [p.name for p in found]
|
||||
assert "visible.md" in names
|
||||
assert "readme.md" not in names
|
||||
|
||||
def test_returns_empty_for_missing_dir(self):
|
||||
assert _collect_markdown_files(Path("/nonexistent")) == []
|
||||
|
||||
|
||||
class TestReadTextSafe:
|
||||
def test_reads_small_file(self, tmp_path):
|
||||
p = tmp_path / "test.md"
|
||||
p.write_text("hello world")
|
||||
assert _read_text_safe(p) == "hello world"
|
||||
|
||||
def test_truncates_large_file(self, tmp_path):
|
||||
p = tmp_path / "big.md"
|
||||
p.write_text("x" * 1_000_000)
|
||||
text = _read_text_safe(p, limit=500_000)
|
||||
assert len(text) == 500_000
|
||||
|
||||
def test_reads_binary_without_crashing(self, tmp_path):
|
||||
p = tmp_path / "binary.md"
|
||||
p.write_bytes(b"\x00\x01\x02")
|
||||
result = _read_text_safe(p)
|
||||
# Should not crash; control chars 0x00-0x7F are valid UTF-8
|
||||
assert isinstance(result, str)
|
||||
|
||||
|
||||
class TestCheckRequirements:
|
||||
@patch("tools.lightrag_tool._ollama_available", return_value=True)
|
||||
def test_ok_when_ollama_up(self, mock_ollama):
|
||||
assert check_lightrag_requirements() is True
|
||||
|
||||
@patch("tools.lightrag_tool._ollama_available", return_value=False)
|
||||
def test_false_when_ollama_down(self, mock_ollama):
|
||||
assert check_lightrag_requirements() is False
|
||||
|
||||
@patch.dict(sys.modules, {"lightrag": None}, clear=False)
|
||||
def test_false_when_lightrag_missing(self):
|
||||
with patch("tools.lightrag_tool._ollama_available", return_value=True):
|
||||
# Force ImportError by removing lightrag from sys.modules
|
||||
# and blocking import
|
||||
assert check_lightrag_requirements() is False
|
||||
|
||||
|
||||
class TestLightragIndex:
|
||||
@patch("tools.lightrag_tool._ollama_available", return_value=False)
|
||||
def test_error_when_ollama_down(self, mock_ollama):
|
||||
result = lightrag_index()
|
||||
assert "Ollama is not running" in result
|
||||
|
||||
@patch("tools.lightrag_tool._ollama_available", return_value=True)
|
||||
@patch("tools.lightrag_tool._has_ollama_model", return_value=False)
|
||||
def test_error_when_model_missing(self, mock_model, mock_ollama):
|
||||
result = lightrag_index()
|
||||
assert "not found in Ollama" in result
|
||||
|
||||
@patch("tools.lightrag_tool._ollama_available", return_value=True)
|
||||
@patch("tools.lightrag_tool._has_ollama_model", return_value=True)
|
||||
@patch("tools.lightrag_tool._get_lightrag")
|
||||
@patch("tools.lightrag_tool._collect_markdown_files", return_value=[])
|
||||
def test_warning_when_no_files(self, mock_collect, mock_get_rag, mock_model, mock_ollama):
|
||||
result = lightrag_index()
|
||||
data = _parse_result(result)
|
||||
assert data.get("status") == "warning"
|
||||
assert "No markdown files found" in data.get("message", "")
|
||||
|
||||
@patch("tools.lightrag_tool._ollama_available", return_value=True)
|
||||
@patch("tools.lightrag_tool._has_ollama_model", return_value=True)
|
||||
@patch("tools.lightrag_tool._get_lightrag")
|
||||
@patch("tools.lightrag_tool._collect_markdown_files")
|
||||
@patch("tools.lightrag_tool._read_text_safe", return_value="# Skill doc\nContent.")
|
||||
@patch("asyncio.run")
|
||||
def test_indexes_files(self, mock_asyncio, mock_read, mock_collect, mock_get_rag, mock_model, mock_ollama):
|
||||
mock_collect.return_value = [Path("/fake/skills/git.md"), Path("/fake/skills/docker.md")]
|
||||
mock_rag = MagicMock()
|
||||
mock_get_rag.return_value = mock_rag
|
||||
|
||||
result = lightrag_index()
|
||||
data = _parse_result(result)
|
||||
assert data.get("status") == "ok"
|
||||
assert data.get("indexed_files") == 2
|
||||
assert data.get("errors") == 0
|
||||
|
||||
|
||||
class TestLightragQuery:
|
||||
@patch("tools.lightrag_tool._ollama_available", return_value=False)
|
||||
def test_error_when_ollama_down(self, mock_ollama):
|
||||
result = lightrag_query("test", mode="hybrid")
|
||||
assert "Ollama is not running" in result
|
||||
|
||||
@patch("tools.lightrag_tool._ollama_available", return_value=True)
|
||||
@patch("tools.lightrag_tool.LIGHTRAG_DIR")
|
||||
def test_empty_index_message(self, mock_dir, mock_ollama):
|
||||
mock_dir.exists.return_value = True
|
||||
mock_dir.iterdir.return_value = iter([])
|
||||
result = lightrag_query("test", mode="hybrid")
|
||||
data = _parse_result(result)
|
||||
assert data.get("status") == "empty"
|
||||
|
||||
@patch("tools.lightrag_tool._ollama_available", return_value=True)
|
||||
@patch("tools.lightrag_tool.LIGHTRAG_DIR")
|
||||
@patch("tools.lightrag_tool._get_lightrag")
|
||||
@patch("asyncio.run", return_value="Use git clone for repos.")
|
||||
def test_query_returns_answer(self, mock_asyncio, mock_get_rag, mock_dir, mock_ollama):
|
||||
mock_dir.exists.return_value = True
|
||||
mock_dir.iterdir.return_value = iter([Path("dummy")])
|
||||
mock_rag = MagicMock()
|
||||
mock_get_rag.return_value = mock_rag
|
||||
|
||||
result = lightrag_query("How do I clone a repo?", mode="hybrid")
|
||||
data = _parse_result(result)
|
||||
assert data.get("status") == "ok"
|
||||
assert data.get("mode") == "hybrid"
|
||||
assert "clone" in data.get("answer", "").lower()
|
||||
|
||||
@patch("tools.lightrag_tool._ollama_available", return_value=True)
|
||||
def test_rejects_invalid_mode(self, mock_ollama):
|
||||
result = lightrag_query("test", mode="invalid")
|
||||
assert "mode must be one of" in result
|
||||
|
||||
def test_rejects_empty_query(self):
|
||||
result = lightrag_query("", mode="hybrid")
|
||||
assert "Query cannot be empty" in result
|
||||
405
tools/lightrag_tool.py
Normal file
405
tools/lightrag_tool.py
Normal file
@@ -0,0 +1,405 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
LightRAG Tool — Graph-based knowledge retrieval for skills and docs.
|
||||
|
||||
Indexes markdown files under ~/.hermes/skills/ (and optional extra dirs)
|
||||
into a LightRAG knowledge graph stored at ~/.hermes/lightrag/.
|
||||
|
||||
Requires:
|
||||
- lightrag-hku (pip install lightrag-hku)
|
||||
- Ollama running locally with an embedding model (default: nomic-embed-text)
|
||||
- Ollama running locally with a chat model (default: qwen2.5:7b)
|
||||
|
||||
Usage:
|
||||
lightrag_query("How do I dispatch the burn fleet?", mode="hybrid")
|
||||
lightrag_index() # re-index skill files
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
import numpy as np
|
||||
|
||||
from hermes_constants import get_hermes_home
|
||||
from tools.registry import registry, tool_error
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Config
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
DEFAULT_EMBED_MODEL = os.environ.get("LIGHTRAG_EMBED_MODEL", "nomic-embed-text")
|
||||
DEFAULT_LLM_MODEL = os.environ.get("LIGHTRAG_LLM_MODEL", "qwen2.5:7b")
|
||||
DEFAULT_OLLAMA_HOST = os.environ.get("LIGHTRAG_OLLAMA_HOST", "http://localhost:11434")
|
||||
|
||||
LIGHTRAG_DIR = get_hermes_home() / "lightrag"
|
||||
SKILLS_DIR = get_hermes_home() / "skills"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Ollama helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _ollama_available() -> bool:
|
||||
"""Check if Ollama server is reachable."""
|
||||
try:
|
||||
import urllib.request
|
||||
req = urllib.request.Request(f"{DEFAULT_OLLAMA_HOST}/api/tags")
|
||||
with urllib.request.urlopen(req, timeout=3) as resp:
|
||||
return resp.status == 200
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _has_ollama_model(model_name: str) -> bool:
|
||||
"""Check if a specific model is pulled in Ollama."""
|
||||
try:
|
||||
import urllib.request
|
||||
req = urllib.request.Request(f"{DEFAULT_OLLAMA_HOST}/api/tags")
|
||||
with urllib.request.urlopen(req, timeout=3) as resp:
|
||||
data = json.loads(resp.read())
|
||||
models = [m["name"] for m in data.get("models", [])]
|
||||
return any(model_name in m for m in models)
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
async def _ollama_embedding(texts: list, **kwargs) -> np.ndarray:
|
||||
"""Call Ollama embeddings API."""
|
||||
import aiohttp
|
||||
|
||||
payload = {
|
||||
"model": DEFAULT_EMBED_MODEL,
|
||||
"input": texts,
|
||||
}
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.post(
|
||||
f"{DEFAULT_OLLAMA_HOST}/api/embed",
|
||||
json=payload,
|
||||
timeout=aiohttp.ClientTimeout(total=60),
|
||||
) as resp:
|
||||
resp.raise_for_status()
|
||||
data = await resp.json()
|
||||
embeddings = data.get("embeddings", [])
|
||||
if not embeddings:
|
||||
raise RuntimeError("Ollama returned empty embeddings")
|
||||
return np.array(embeddings, dtype=np.float32)
|
||||
|
||||
|
||||
async def _ollama_complete(
|
||||
prompt, system_prompt=None, history_messages=None, **kwargs
|
||||
) -> str:
|
||||
"""Call Ollama generate API for LLM completion."""
|
||||
import aiohttp
|
||||
|
||||
messages = []
|
||||
if system_prompt:
|
||||
messages.append({"role": "system", "content": system_prompt})
|
||||
if history_messages:
|
||||
for msg in history_messages:
|
||||
role = "user" if msg.get("role") == "user" else "assistant"
|
||||
messages.append({"role": role, "content": msg.get("content", "")})
|
||||
messages.append({"role": "user", "content": prompt})
|
||||
|
||||
payload = {
|
||||
"model": DEFAULT_LLM_MODEL,
|
||||
"messages": messages,
|
||||
"stream": False,
|
||||
"options": {"temperature": 0.3, "num_predict": 2048},
|
||||
}
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.post(
|
||||
f"{DEFAULT_OLLAMA_HOST}/api/chat",
|
||||
json=payload,
|
||||
timeout=aiohttp.ClientTimeout(total=120),
|
||||
) as resp:
|
||||
resp.raise_for_status()
|
||||
data = await resp.json()
|
||||
return data.get("message", {}).get("content", "")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# LightRAG setup
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_lightrag_instance: Optional[object] = None
|
||||
|
||||
|
||||
def _get_lightrag() -> object:
|
||||
"""Lazy-initialize LightRAG with Ollama backends."""
|
||||
global _lightrag_instance
|
||||
if _lightrag_instance is not None:
|
||||
return _lightrag_instance
|
||||
|
||||
try:
|
||||
from lightrag import LightRAG, QueryParam
|
||||
from lightrag.utils import EmbeddingFunc
|
||||
except ImportError as e:
|
||||
raise RuntimeError(
|
||||
"lightrag is not installed. Run: pip install lightrag-hku"
|
||||
) from e
|
||||
|
||||
LIGHTRAG_DIR.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Wrap Ollama embedding for LightRAG
|
||||
embed_func = EmbeddingFunc(
|
||||
embedding_dim=768, # nomic-embed-text dimension
|
||||
func=_ollama_embedding,
|
||||
max_token_size=8192,
|
||||
model_name=DEFAULT_EMBED_MODEL,
|
||||
)
|
||||
|
||||
_lightrag_instance = LightRAG(
|
||||
working_dir=str(LIGHTRAG_DIR),
|
||||
embedding_func=embed_func,
|
||||
llm_model_func=_ollama_complete,
|
||||
llm_model_name=DEFAULT_LLM_MODEL,
|
||||
chunk_token_size=1200,
|
||||
chunk_overlap_token_size=100,
|
||||
)
|
||||
return _lightrag_instance
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Indexing
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _collect_markdown_files(root: Path) -> List[Path]:
|
||||
"""Collect all .md files under root, excluding node_modules and .git."""
|
||||
files = []
|
||||
if not root.exists():
|
||||
return files
|
||||
for path in root.rglob("*.md"):
|
||||
if any(part.startswith(".") or part == "node_modules" for part in path.parts):
|
||||
continue
|
||||
files.append(path)
|
||||
return sorted(files)
|
||||
|
||||
|
||||
def _read_text_safe(path: Path, limit: int = 500_000) -> str:
|
||||
"""Read file text with size limit."""
|
||||
try:
|
||||
stat = path.stat()
|
||||
if stat.st_size > limit:
|
||||
return path.read_text(encoding="utf-8", errors="ignore")[:limit]
|
||||
return path.read_text(encoding="utf-8", errors="ignore")
|
||||
except Exception as e:
|
||||
logger.warning("Failed to read %s: %s", path, e)
|
||||
return ""
|
||||
|
||||
|
||||
def lightrag_index(directories: Optional[List[str]] = None) -> str:
|
||||
"""Index markdown files into LightRAG knowledge graph.
|
||||
|
||||
Args:
|
||||
directories: Extra directories to index (in addition to ~/.hermes/skills/).
|
||||
"""
|
||||
if not _ollama_available():
|
||||
return tool_error(
|
||||
"Ollama is not running. Start it with: ollama serve"
|
||||
)
|
||||
|
||||
if not _has_ollama_model(DEFAULT_EMBED_MODEL):
|
||||
return tool_error(
|
||||
f"Embedding model '{DEFAULT_EMBED_MODEL}' not found in Ollama. "
|
||||
f"Pull it with: ollama pull {DEFAULT_EMBED_MODEL}"
|
||||
)
|
||||
|
||||
if not _has_ollama_model(DEFAULT_LLM_MODEL):
|
||||
return tool_error(
|
||||
f"LLM model '{DEFAULT_LLM_MODEL}' not found in Ollama. "
|
||||
f"Pull it with: ollama pull {DEFAULT_LLM_MODEL}"
|
||||
)
|
||||
|
||||
rag = _get_lightrag()
|
||||
dirs = [SKILLS_DIR]
|
||||
if directories:
|
||||
for d in directories:
|
||||
p = Path(d).expanduser()
|
||||
if p.exists():
|
||||
dirs.append(p)
|
||||
|
||||
all_files = []
|
||||
for d in dirs:
|
||||
all_files.extend(_collect_markdown_files(d))
|
||||
|
||||
if not all_files:
|
||||
return json.dumps({
|
||||
"status": "warning",
|
||||
"message": "No markdown files found to index.",
|
||||
"directories": [str(d) for d in dirs],
|
||||
})
|
||||
|
||||
# Read and insert files
|
||||
inserted = 0
|
||||
errors = 0
|
||||
for path in all_files:
|
||||
text = _read_text_safe(path)
|
||||
if not text.strip():
|
||||
continue
|
||||
try:
|
||||
# LightRAG insert is async; bridge it
|
||||
asyncio.run(rag.atext(text))
|
||||
inserted += 1
|
||||
except Exception as e:
|
||||
logger.warning("Failed to index %s: %s", path, e)
|
||||
errors += 1
|
||||
|
||||
return json.dumps({
|
||||
"status": "ok",
|
||||
"indexed_files": inserted,
|
||||
"errors": errors,
|
||||
"total_files": len(all_files),
|
||||
"storage_dir": str(LIGHTRAG_DIR),
|
||||
})
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Query
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def lightrag_query(query: str, mode: str = "hybrid") -> str:
|
||||
"""Query the LightRAG knowledge graph.
|
||||
|
||||
Args:
|
||||
query: The question or search query.
|
||||
mode: Search mode — "local" (nearby entities), "global" (graph-wide),
|
||||
or "hybrid" (both).
|
||||
"""
|
||||
if not query or not query.strip():
|
||||
return tool_error("Query cannot be empty.")
|
||||
|
||||
if mode not in {"local", "global", "hybrid"}:
|
||||
return tool_error("mode must be one of: local, global, hybrid")
|
||||
|
||||
if not _ollama_available():
|
||||
return tool_error(
|
||||
"Ollama is not running. Start it with: ollama serve"
|
||||
)
|
||||
|
||||
rag = _get_lightrag()
|
||||
|
||||
# Check if any data has been indexed
|
||||
if not LIGHTRAG_DIR.exists() or not any(LIGHTRAG_DIR.iterdir()):
|
||||
return json.dumps({
|
||||
"status": "empty",
|
||||
"message": "LightRAG index is empty. Run lightrag_index() first.",
|
||||
})
|
||||
|
||||
try:
|
||||
from lightrag import QueryParam
|
||||
param = QueryParam(mode=mode)
|
||||
result = asyncio.run(rag.aquery(query, param=param))
|
||||
return json.dumps({
|
||||
"status": "ok",
|
||||
"mode": mode,
|
||||
"query": query,
|
||||
"answer": result,
|
||||
})
|
||||
except Exception as e:
|
||||
logger.exception("LightRAG query failed")
|
||||
return tool_error(f"Query failed: {e}")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Tool schemas
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
LIGHTRAG_QUERY_SCHEMA = {
|
||||
"name": "lightrag_query",
|
||||
"description": (
|
||||
"Graph-based knowledge retrieval over indexed skills and documentation.\n\n"
|
||||
"Use this when the user asks about: conventions, workflows, tool usage, "
|
||||
"project-specific practices, or anything that might be documented in skills.\n\n"
|
||||
"Modes:\n"
|
||||
"- local: fast, searches nearby entities in the graph\n"
|
||||
"- global: thorough, reasons across the entire knowledge graph\n"
|
||||
"- hybrid: balanced, combines local and global (recommended)\n\n"
|
||||
"If the index is empty, the tool will report that and you should "
|
||||
"call lightrag_index() to populate it."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "The question or search query.",
|
||||
},
|
||||
"mode": {
|
||||
"type": "string",
|
||||
"enum": ["local", "global", "hybrid"],
|
||||
"description": "Search mode. hybrid is recommended.",
|
||||
},
|
||||
},
|
||||
"required": ["query"],
|
||||
},
|
||||
}
|
||||
|
||||
LIGHTRAG_INDEX_SCHEMA = {
|
||||
"name": "lightrag_index",
|
||||
"description": (
|
||||
"(Re-)build the LightRAG knowledge graph from skill files and docs.\n\n"
|
||||
"By default indexes ~/.hermes/skills/. Pass extra directories if needed.\n"
|
||||
"This is a one-time or occasional operation; queries work against the "
|
||||
"existing index until you re-index."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"directories": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "Optional extra directories to index (in addition to ~/.hermes/skills/).",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Availability check
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def check_lightrag_requirements() -> bool:
|
||||
"""Return True if LightRAG and Ollama appear to be available."""
|
||||
try:
|
||||
import lightrag # noqa: F401
|
||||
except ImportError:
|
||||
return False
|
||||
return _ollama_available()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Registry
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
registry.register(
|
||||
name="lightrag_query",
|
||||
toolset="rag",
|
||||
schema=LIGHTRAG_QUERY_SCHEMA,
|
||||
handler=lambda args, **kw: lightrag_query(
|
||||
query=args.get("query", ""),
|
||||
mode=args.get("mode", "hybrid"),
|
||||
),
|
||||
check_fn=check_lightrag_requirements,
|
||||
emoji="🔎",
|
||||
)
|
||||
|
||||
registry.register(
|
||||
name="lightrag_index",
|
||||
toolset="rag",
|
||||
schema=LIGHTRAG_INDEX_SCHEMA,
|
||||
handler=lambda args, **kw: lightrag_index(
|
||||
directories=args.get("directories"),
|
||||
),
|
||||
check_fn=check_lightrag_requirements,
|
||||
emoji="📚",
|
||||
)
|
||||
@@ -167,6 +167,12 @@ TOOLSETS = {
|
||||
"tools": ["memory"],
|
||||
"includes": []
|
||||
},
|
||||
|
||||
"rag": {
|
||||
"description": "Graph-based knowledge retrieval over indexed skills and docs (LightRAG)",
|
||||
"tools": ["lightrag_query", "lightrag_index"],
|
||||
"includes": []
|
||||
},
|
||||
|
||||
"session_search": {
|
||||
"description": "Search and recall past conversations with summarization",
|
||||
|
||||
Reference in New Issue
Block a user