Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
eb16a6671e | ||
|
|
18998b60c3 |
@@ -50,78 +50,6 @@ def sanitize_context(text: str) -> str:
|
||||
return _FENCE_TAG_RE.sub('', text)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Prefetch filtering helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Meta-instruction debris that memory providers sometimes echo back.
|
||||
# These are prompts/instructions, not user-generated content.
|
||||
_META_INSTRUCTION_PATTERNS = [
|
||||
re.compile(r"^\s*[\-\*]?\s*>?\s*Focus on:\s*", re.IGNORECASE),
|
||||
re.compile(r"^\s*[\-\*]?\s*>?\s*Note:\s*", re.IGNORECASE),
|
||||
re.compile(r"^\s*[\-\*]?\s*>?\s*System\s+(note|prompt|instruction):", re.IGNORECASE),
|
||||
re.compile(r"^\s*[\-\*]?\s*>?\s*You are\s+", re.IGNORECASE),
|
||||
re.compile(r"^\s*[\-\*]?\s*>?\s*Please\s+(provide|respond|answer|write)", re.IGNORECASE),
|
||||
re.compile(r"^\s*[\-\*]?\s*>?\s*Do not\s+", re.IGNORECASE),
|
||||
re.compile(r"^\s*[\-\*]?\s*>?\s*Always\s+", re.IGNORECASE),
|
||||
re.compile(r"^\s*[\-\*]?\s*>?\s*Consider\s+(the following|these|this)\b", re.IGNORECASE),
|
||||
re.compile(r"^\s*[\-\*]?\s*>?\s*Here\s+(is|are)\s+(some|the|a few)\b", re.IGNORECASE),
|
||||
]
|
||||
|
||||
|
||||
def _is_meta_instruction_line(line: str) -> bool:
|
||||
"""Return True if the line looks like a prompt/template instruction, not memory content."""
|
||||
for pat in _META_INSTRUCTION_PATTERNS:
|
||||
if pat.search(line):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _is_low_signal_line(line: str) -> bool:
|
||||
"""Return True for very short or content-free lines."""
|
||||
stripped = line.strip()
|
||||
# Empty or just punctuation/list marker
|
||||
if not stripped or stripped in {"-", "*", ">", "•", "—", "--"}:
|
||||
return True
|
||||
# Too short to be meaningful (< 15 chars after stripping markers)
|
||||
cleaned = re.sub(r"^[\-\*•>\s]+", "", stripped)
|
||||
if len(cleaned) < 15:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _filter_prefetch_lines(text: str) -> str:
|
||||
"""Filter and deduplicate prefetch result lines.
|
||||
|
||||
Removes:
|
||||
- exact duplicate lines
|
||||
- meta-instruction debris (prompts, templates)
|
||||
- very short / content-free lines
|
||||
|
||||
Returns cleaned text, preserving original line grouping.
|
||||
"""
|
||||
if not text or not text.strip():
|
||||
return ""
|
||||
|
||||
seen: set = set()
|
||||
kept: list = []
|
||||
for line in text.splitlines(keepends=False):
|
||||
stripped = line.strip()
|
||||
# Deduplicate exact lines
|
||||
if stripped in seen:
|
||||
continue
|
||||
# Skip meta-instructions
|
||||
if _is_meta_instruction_line(line):
|
||||
continue
|
||||
# Skip low-signal lines
|
||||
if _is_low_signal_line(line):
|
||||
continue
|
||||
seen.add(stripped)
|
||||
kept.append(line)
|
||||
|
||||
return "\n".join(kept)
|
||||
|
||||
|
||||
def build_memory_context_block(raw_context: str) -> str:
|
||||
"""Wrap prefetched memory in a fenced block with system note.
|
||||
|
||||
@@ -252,14 +180,7 @@ class MemoryManager:
|
||||
"Memory provider '%s' prefetch failed (non-fatal): %s",
|
||||
provider.name, e,
|
||||
)
|
||||
raw = "\n\n".join(parts)
|
||||
if not raw:
|
||||
return ""
|
||||
# Apply line-level filtering: dedupe, strip meta-instructions,
|
||||
# remove very short fragments. This prevents noisy providers
|
||||
# (e.g. MemPalace transcript recall) from bloating context.
|
||||
filtered = _filter_prefetch_lines(raw)
|
||||
return filtered
|
||||
return "\n\n".join(parts)
|
||||
|
||||
def queue_prefetch_all(self, query: str, *, session_id: str = "") -> None:
|
||||
"""Queue background prefetch on all providers for the next turn."""
|
||||
|
||||
68
docs/ragflow-integration.md
Normal file
68
docs/ragflow-integration.md
Normal file
@@ -0,0 +1,68 @@
|
||||
# RAGFlow integration
|
||||
|
||||
This repo-side slice adds:
|
||||
|
||||
- `tools/ragflow_tool.py`
|
||||
- `ragflow_ingest(document_url, dataset)`
|
||||
- `ragflow_query(query, dataset, limit=5)`
|
||||
- `scripts/ragflow_bootstrap.py`
|
||||
- fetches the upstream RAGFlow Docker bundle
|
||||
- runs `docker compose --profile cpu up -d` or `gpu`
|
||||
|
||||
## Deployment
|
||||
|
||||
Bootstrap the upstream CPU stack locally:
|
||||
|
||||
```bash
|
||||
python3 scripts/ragflow_bootstrap.py --profile cpu
|
||||
```
|
||||
|
||||
Dry-run only:
|
||||
|
||||
```bash
|
||||
python3 scripts/ragflow_bootstrap.py --profile cpu --dry-run
|
||||
```
|
||||
|
||||
Fetch files without launching Docker:
|
||||
|
||||
```bash
|
||||
python3 scripts/ragflow_bootstrap.py --no-up
|
||||
```
|
||||
|
||||
Default bundle target:
|
||||
|
||||
- `~/.hermes/services/ragflow`
|
||||
|
||||
## Runtime configuration
|
||||
|
||||
Optional environment variables:
|
||||
|
||||
- `RAGFLOW_API_URL` — defaults to `http://localhost:9380`
|
||||
- `RAGFLOW_API_KEY` — Bearer token for authenticated RAGFlow APIs
|
||||
|
||||
## Supported document types
|
||||
|
||||
RAGFlow ingest accepts:
|
||||
|
||||
- PDF: `.pdf`
|
||||
- Word: `.doc`, `.docx`
|
||||
- Presentations: `.ppt`, `.pptx`
|
||||
- Images via OCR: `.png`, `.jpg`, `.jpeg`, `.webp`, `.bmp`, `.tif`, `.tiff`, `.gif`
|
||||
- Text and codebase documents: `.txt`, `.md`, `.rst`, `.html`, `.json`, `.yaml`, `.yml`, `.toml`, `.ini`, `.py`, `.js`, `.ts`, `.tsx`, `.jsx`, `.java`, `.go`, `.rs`, `.c`, `.cpp`, `.h`, `.hpp`, `.rb`, `.php`, `.sql`, `.sh`
|
||||
|
||||
## Example tool usage
|
||||
|
||||
```json
|
||||
{"document_url":"https://arxiv.org/pdf/1706.03762.pdf","dataset":"research-papers"}
|
||||
```
|
||||
|
||||
```json
|
||||
{"query":"What does the paper say about attention heads?","dataset":"research-papers","limit":5}
|
||||
```
|
||||
|
||||
## Use cases
|
||||
|
||||
- research papers
|
||||
- technical documentation
|
||||
- OCR-heavy image workflows
|
||||
- ingested codebases and architecture docs
|
||||
79
scripts/ragflow_bootstrap.py
Normal file
79
scripts/ragflow_bootstrap.py
Normal file
@@ -0,0 +1,79 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Bootstrap an upstream RAGFlow Docker bundle for Hermes.
|
||||
|
||||
This script fetches the upstream RAGFlow docker bundle into a local directory
|
||||
so operators can run `docker compose --profile cpu up -d` (or `gpu`) without
|
||||
manually assembling the required files.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import subprocess
|
||||
import urllib.request
|
||||
from pathlib import Path
|
||||
|
||||
UPSTREAM_BASE = "https://raw.githubusercontent.com/infiniflow/ragflow/main/docker"
|
||||
UPSTREAM_FILES = {
|
||||
"docker-compose.yml": f"{UPSTREAM_BASE}/docker-compose.yml",
|
||||
"docker-compose-base.yml": f"{UPSTREAM_BASE}/docker-compose-base.yml",
|
||||
".env": f"{UPSTREAM_BASE}/.env",
|
||||
"service_conf.yaml.template": f"{UPSTREAM_BASE}/service_conf.yaml.template",
|
||||
"entrypoint.sh": f"{UPSTREAM_BASE}/entrypoint.sh",
|
||||
}
|
||||
|
||||
|
||||
def materialize_bundle(target_dir: str | Path, overwrite: bool = False) -> list[Path]:
|
||||
target = Path(target_dir).expanduser()
|
||||
target.mkdir(parents=True, exist_ok=True)
|
||||
written: list[Path] = []
|
||||
for name, url in UPSTREAM_FILES.items():
|
||||
dest = target / name
|
||||
if dest.exists() and not overwrite:
|
||||
written.append(dest)
|
||||
continue
|
||||
with urllib.request.urlopen(url, timeout=60) as response:
|
||||
dest.write_bytes(response.read())
|
||||
if name == "entrypoint.sh":
|
||||
dest.chmod(0o755)
|
||||
written.append(dest)
|
||||
return written
|
||||
|
||||
|
||||
def build_compose_command(target_dir: str | Path, profile: str = "cpu") -> list[str]:
|
||||
return ["docker", "compose", "--profile", profile, "up", "-d"]
|
||||
|
||||
|
||||
def run_compose(target_dir: str | Path, profile: str = "cpu", dry_run: bool = False) -> dict:
|
||||
target = Path(target_dir).expanduser()
|
||||
command = build_compose_command(target, profile=profile)
|
||||
if dry_run:
|
||||
return {"target_dir": str(target), "command": command, "executed": False}
|
||||
subprocess.run(command, cwd=target, check=True)
|
||||
return {"target_dir": str(target), "command": command, "executed": True}
|
||||
|
||||
|
||||
def main(argv: list[str] | None = None) -> int:
|
||||
parser = argparse.ArgumentParser(description="Fetch and launch the upstream RAGFlow Docker bundle")
|
||||
parser.add_argument("--target-dir", default=str(Path.home() / ".hermes" / "services" / "ragflow"))
|
||||
parser.add_argument("--profile", choices=["cpu", "gpu"], default="cpu")
|
||||
parser.add_argument("--overwrite", action="store_true")
|
||||
parser.add_argument("--dry-run", action="store_true")
|
||||
parser.add_argument("--no-up", action="store_true", help="Only fetch bundle files; do not run docker compose")
|
||||
args = parser.parse_args(argv)
|
||||
|
||||
written = materialize_bundle(args.target_dir, overwrite=args.overwrite)
|
||||
print(f"Fetched {len(written)} RAGFlow docker files into {Path(args.target_dir).expanduser()}")
|
||||
if args.no_up:
|
||||
return 0
|
||||
result = run_compose(args.target_dir, profile=args.profile, dry_run=args.dry_run)
|
||||
print("Command:", " ".join(result["command"]))
|
||||
if result["executed"]:
|
||||
print("RAGFlow docker stack launch requested.")
|
||||
else:
|
||||
print("Dry run only; docker compose not executed.")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
@@ -198,14 +198,14 @@ class TestMemoryManager:
|
||||
def test_prefetch_skips_empty(self):
|
||||
mgr = MemoryManager()
|
||||
p1 = FakeMemoryProvider("builtin")
|
||||
p1._prefetch_result = "This provider has meaningful memories with enough length"
|
||||
p1._prefetch_result = "Has memories"
|
||||
p2 = FakeMemoryProvider("external")
|
||||
p2._prefetch_result = ""
|
||||
mgr.add_provider(p1)
|
||||
mgr.add_provider(p2)
|
||||
|
||||
result = mgr.prefetch_all("query")
|
||||
assert result == "This provider has meaningful memories with enough length"
|
||||
assert result == "Has memories"
|
||||
|
||||
def test_queue_prefetch_all(self):
|
||||
mgr = MemoryManager()
|
||||
@@ -695,92 +695,3 @@ class TestMemoryContextFencing:
|
||||
fence_end = combined.index("</memory-context>")
|
||||
assert "Alice" in combined[fence_start:fence_end]
|
||||
assert combined.index("weather") < fence_start
|
||||
|
||||
|
||||
class TestPrefetchFiltering:
|
||||
"""Tests for _filter_prefetch_lines and related helpers."""
|
||||
|
||||
def test_deduplicates_exact_lines(self):
|
||||
from agent.memory_manager import _filter_prefetch_lines
|
||||
raw = "- This is line one with enough characters\n- This is line two with enough characters\n- This is line one with enough characters\n- This is line three with enough characters"
|
||||
result = _filter_prefetch_lines(raw)
|
||||
lines = [l for l in result.splitlines() if l.strip()]
|
||||
assert len(lines) == 3
|
||||
assert "- This is line one with enough characters" in result
|
||||
assert "- This is line two with enough characters" in result
|
||||
assert "- This is line three with enough characters" in result
|
||||
|
||||
def test_removes_meta_instruction_debris(self):
|
||||
from agent.memory_manager import _filter_prefetch_lines
|
||||
raw = (
|
||||
"## Fleet Memories\n"
|
||||
"- > Focus on: was a non-trivial approach used\n"
|
||||
"- > Focus on: was a non-trivial approach used\n"
|
||||
"- Actual memory content about fleet ops\n"
|
||||
"- Note: this is just a note\n"
|
||||
)
|
||||
result = _filter_prefetch_lines(raw)
|
||||
assert "Focus on" not in result
|
||||
assert "Note:" not in result
|
||||
assert "Actual memory content about fleet ops" in result
|
||||
assert "Fleet Memories" in result
|
||||
|
||||
def test_removes_low_signal_short_lines(self):
|
||||
from agent.memory_manager import _filter_prefetch_lines
|
||||
raw = (
|
||||
"- \n"
|
||||
"- x\n"
|
||||
"- This is a meaningful memory entry with enough length\n"
|
||||
)
|
||||
result = _filter_prefetch_lines(raw)
|
||||
assert "- x" not in result
|
||||
assert "meaningful memory entry" in result
|
||||
|
||||
def test_preserves_structured_facts(self):
|
||||
from agent.memory_manager import _filter_prefetch_lines
|
||||
raw = (
|
||||
"## Local Facts (Hologram)\n"
|
||||
"- ALEXANDER: Prefers Gitea for reports and deliverables.\n"
|
||||
"- Telegram home channel is Timmy Time.\n"
|
||||
)
|
||||
result = _filter_prefetch_lines(raw)
|
||||
assert "ALEXANDER" in result
|
||||
assert "Gitea" in result
|
||||
assert "Telegram" in result
|
||||
|
||||
def test_is_meta_instruction_line(self):
|
||||
from agent.memory_manager import _is_meta_instruction_line
|
||||
assert _is_meta_instruction_line("- > Focus on: something") is True
|
||||
assert _is_meta_instruction_line("- Focus on: something") is True
|
||||
assert _is_meta_instruction_line("* Focus on: something") is True
|
||||
assert _is_meta_instruction_line("- Actual user memory content") is False
|
||||
assert _is_meta_instruction_line("ALEXANDER: Prefers Gitea") is False
|
||||
|
||||
def test_is_low_signal_line(self):
|
||||
from agent.memory_manager import _is_low_signal_line
|
||||
assert _is_low_signal_line("- ") is True
|
||||
assert _is_low_signal_line("*") is True
|
||||
assert _is_low_signal_line("- x") is True
|
||||
assert _is_low_signal_line("- Short line") is True
|
||||
assert _is_low_signal_line("- This is a long meaningful memory entry") is False
|
||||
|
||||
def test_prefetch_all_applies_filtering(self):
|
||||
from agent.memory_manager import MemoryManager
|
||||
mgr = MemoryManager()
|
||||
fake = FakeMemoryProvider(name="test")
|
||||
fake._prefetch_result = (
|
||||
"- > Focus on: was a non-trivial approach\n"
|
||||
"- > Focus on: was a non-trivial approach\n"
|
||||
"- Real memory fact\n"
|
||||
)
|
||||
mgr.add_provider(fake)
|
||||
result = mgr.prefetch_all("query")
|
||||
assert "Focus on" not in result
|
||||
assert "Real memory fact" in result
|
||||
assert result.count("Real memory fact") == 1
|
||||
|
||||
def test_empty_prefetch_returns_empty(self):
|
||||
from agent.memory_manager import _filter_prefetch_lines
|
||||
assert _filter_prefetch_lines("") == ""
|
||||
assert _filter_prefetch_lines(" ") == ""
|
||||
assert _filter_prefetch_lines("\n\n") == ""
|
||||
|
||||
43
tests/test_ragflow_bootstrap.py
Normal file
43
tests/test_ragflow_bootstrap.py
Normal file
@@ -0,0 +1,43 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib.util
|
||||
import io
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
|
||||
ROOT = Path(__file__).resolve().parent.parent
|
||||
SCRIPT_PATH = ROOT / "scripts" / "ragflow_bootstrap.py"
|
||||
|
||||
|
||||
def _load_module():
|
||||
spec = importlib.util.spec_from_file_location("ragflow_bootstrap", SCRIPT_PATH)
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
assert spec.loader is not None
|
||||
spec.loader.exec_module(module)
|
||||
return module
|
||||
|
||||
|
||||
def test_materialize_bundle_downloads_required_upstream_artifacts(tmp_path):
|
||||
module = _load_module()
|
||||
|
||||
def fake_urlopen(url, timeout=0):
|
||||
name = url.rsplit("/", 1)[-1]
|
||||
return io.BytesIO(f"# fetched {name}\n".encode())
|
||||
|
||||
with patch.object(module.urllib.request, "urlopen", side_effect=fake_urlopen):
|
||||
written = module.materialize_bundle(tmp_path)
|
||||
|
||||
assert (tmp_path / "docker-compose.yml").exists()
|
||||
assert (tmp_path / "docker-compose-base.yml").exists()
|
||||
assert (tmp_path / ".env").exists()
|
||||
assert any(path.name == "entrypoint.sh" for path in written)
|
||||
|
||||
|
||||
def test_build_compose_command_respects_profile_and_directory(tmp_path):
|
||||
module = _load_module()
|
||||
|
||||
command = module.build_compose_command(tmp_path, profile="gpu")
|
||||
|
||||
assert command[:4] == ["docker", "compose", "--profile", "gpu"]
|
||||
assert command[-2:] == ["up", "-d"]
|
||||
122
tests/tools/test_ragflow_tool.py
Normal file
122
tests/tools/test_ragflow_tool.py
Normal file
@@ -0,0 +1,122 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
from tools.registry import registry
|
||||
|
||||
|
||||
class _Response:
|
||||
def __init__(self, payload: dict, status_code: int = 200):
|
||||
self._payload = payload
|
||||
self.status_code = status_code
|
||||
self.text = json.dumps(payload)
|
||||
|
||||
def json(self):
|
||||
return self._payload
|
||||
|
||||
def raise_for_status(self):
|
||||
if self.status_code >= 400:
|
||||
raise RuntimeError(f"HTTP {self.status_code}")
|
||||
|
||||
|
||||
def _reload_module():
|
||||
registry.deregister("ragflow_ingest")
|
||||
registry.deregister("ragflow_query")
|
||||
sys.modules.pop("tools.ragflow_tool", None)
|
||||
module = importlib.import_module("tools.ragflow_tool")
|
||||
return importlib.reload(module)
|
||||
|
||||
|
||||
def test_ragflow_tools_register_and_support_document_formats():
|
||||
module = _reload_module()
|
||||
|
||||
assert registry.get_entry("ragflow_ingest") is not None
|
||||
assert registry.get_entry("ragflow_query") is not None
|
||||
assert ".pdf" in module.SUPPORTED_EXTENSIONS
|
||||
assert ".docx" in module.SUPPORTED_EXTENSIONS
|
||||
assert ".png" in module.SUPPORTED_EXTENSIONS
|
||||
assert ".md" in module.SUPPORTED_EXTENSIONS
|
||||
|
||||
|
||||
def test_ragflow_ingest_creates_dataset_uploads_and_starts_parse(tmp_path):
|
||||
module = _reload_module()
|
||||
document = tmp_path / "paper.pdf"
|
||||
document.write_bytes(b"%PDF-1.7\n")
|
||||
calls: list[tuple[str, str, dict | None, dict | None]] = []
|
||||
|
||||
def fake_request(method, url, *, headers=None, params=None, json=None, files=None, timeout=None):
|
||||
calls.append((method, url, params, json))
|
||||
if method == "GET" and url.endswith("/api/v1/datasets"):
|
||||
return _Response({"code": 0, "data": []})
|
||||
if method == "POST" and url.endswith("/api/v1/datasets"):
|
||||
assert json["name"] == "research-papers"
|
||||
assert json["chunk_method"] == "paper"
|
||||
return _Response({"code": 0, "data": {"id": "ds-1", "name": "research-papers"}})
|
||||
if method == "POST" and url.endswith("/api/v1/datasets/ds-1/documents"):
|
||||
assert files and files[0][0] == "file"
|
||||
return _Response({"code": 0, "data": [{"id": "doc-1", "name": "paper.pdf"}]})
|
||||
if method == "POST" and url.endswith("/api/v1/datasets/ds-1/chunks"):
|
||||
assert json == {"document_ids": ["doc-1"]}
|
||||
return _Response({"code": 0})
|
||||
raise AssertionError(f"Unexpected request: {method} {url}")
|
||||
|
||||
with patch("tools.ragflow_tool.requests.request", side_effect=fake_request):
|
||||
result = json.loads(module.ragflow_ingest_tool(str(document), dataset="research-papers"))
|
||||
|
||||
assert result["dataset_id"] == "ds-1"
|
||||
assert result["document_ids"] == ["doc-1"]
|
||||
assert result["parse_started"] is True
|
||||
assert result["chunk_method"] == "paper"
|
||||
assert calls[0][0] == "GET"
|
||||
|
||||
|
||||
def test_ragflow_query_retrieves_chunks_for_named_dataset():
|
||||
module = _reload_module()
|
||||
|
||||
def fake_request(method, url, *, headers=None, params=None, json=None, files=None, timeout=None):
|
||||
if method == "GET" and url.endswith("/api/v1/datasets"):
|
||||
assert params == {"name": "tech-docs"}
|
||||
return _Response({"code": 0, "data": [{"id": "ds-9", "name": "tech-docs"}]})
|
||||
if method == "POST" and url.endswith("/api/v1/retrieval"):
|
||||
assert json["question"] == "How does parsing work?"
|
||||
assert json["dataset_ids"] == ["ds-9"]
|
||||
assert json["page_size"] == 2
|
||||
return _Response(
|
||||
{
|
||||
"code": 0,
|
||||
"data": {
|
||||
"chunks": [
|
||||
{
|
||||
"content": "Parsing starts by uploading documents.",
|
||||
"document_id": "doc-9",
|
||||
"document_keyword": "guide.md",
|
||||
"similarity": 0.98,
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
},
|
||||
}
|
||||
)
|
||||
raise AssertionError(f"Unexpected request: {method} {url}")
|
||||
|
||||
with patch("tools.ragflow_tool.requests.request", side_effect=fake_request):
|
||||
result = json.loads(module.ragflow_query_tool("How does parsing work?", "tech-docs", limit=2))
|
||||
|
||||
assert result["dataset_id"] == "ds-9"
|
||||
assert result["total"] == 1
|
||||
assert result["chunks"][0]["content"] == "Parsing starts by uploading documents."
|
||||
|
||||
|
||||
def test_ragflow_ingest_rejects_unsupported_document_types(tmp_path):
|
||||
module = _reload_module()
|
||||
document = tmp_path / "binary.exe"
|
||||
document.write_bytes(b"MZ")
|
||||
|
||||
result = json.loads(module.ragflow_ingest_tool(str(document), dataset="ignored"))
|
||||
|
||||
assert "error" in result
|
||||
assert "Unsupported document type" in result["error"]
|
||||
344
tools/ragflow_tool.py
Normal file
344
tools/ragflow_tool.py
Normal file
@@ -0,0 +1,344 @@
|
||||
#!/usr/bin/env python3
|
||||
"""RAGFlow tool integration for document understanding.
|
||||
|
||||
Provides two tools:
|
||||
- ragflow_ingest(document_url, dataset): upload and parse a document into RAGFlow
|
||||
- ragflow_query(query, dataset): retrieve relevant chunks from a dataset
|
||||
|
||||
Default deployment target is a local RAGFlow server on http://localhost:9380.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import mimetypes
|
||||
import os
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import requests
|
||||
|
||||
from tools.registry import registry, tool_error, tool_result
|
||||
|
||||
RAGFLOW_INGEST_SCHEMA = {
|
||||
"name": "ragflow_ingest",
|
||||
"description": (
|
||||
"Upload a document into a RAGFlow dataset, creating the dataset if needed, "
|
||||
"then trigger parsing so Hermes can query the content later. Supports PDF, "
|
||||
"Word, images via OCR, plus text and code documents."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"document_url": {
|
||||
"type": "string",
|
||||
"description": "HTTP(S) URL, file:// URL, or local filesystem path to the document.",
|
||||
},
|
||||
"dataset": {
|
||||
"type": "string",
|
||||
"description": "Dataset name or id to ingest into. Created automatically when absent.",
|
||||
},
|
||||
},
|
||||
"required": ["document_url", "dataset"],
|
||||
},
|
||||
}
|
||||
|
||||
RAGFLOW_QUERY_SCHEMA = {
|
||||
"name": "ragflow_query",
|
||||
"description": (
|
||||
"Query a RAGFlow dataset for relevant chunks. Useful for research papers, "
|
||||
"technical docs, OCR-processed images, and ingested codebase documents."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "Question or search query to run against RAGFlow.",
|
||||
},
|
||||
"dataset": {
|
||||
"type": "string",
|
||||
"description": "Dataset name or id to search.",
|
||||
},
|
||||
"limit": {
|
||||
"type": "integer",
|
||||
"description": "Maximum number of chunks to return.",
|
||||
"default": 5,
|
||||
"minimum": 1,
|
||||
"maximum": 25,
|
||||
},
|
||||
},
|
||||
"required": ["query", "dataset"],
|
||||
},
|
||||
}
|
||||
|
||||
SUPPORTED_EXTENSIONS = {
|
||||
".pdf": "paper",
|
||||
".doc": "paper",
|
||||
".docx": "paper",
|
||||
".ppt": "presentation",
|
||||
".pptx": "presentation",
|
||||
".png": "picture",
|
||||
".jpg": "picture",
|
||||
".jpeg": "picture",
|
||||
".webp": "picture",
|
||||
".bmp": "picture",
|
||||
".tif": "picture",
|
||||
".tiff": "picture",
|
||||
".gif": "picture",
|
||||
".txt": "naive",
|
||||
".md": "naive",
|
||||
".rst": "naive",
|
||||
".html": "naive",
|
||||
".htm": "naive",
|
||||
".csv": "table",
|
||||
".tsv": "table",
|
||||
".json": "naive",
|
||||
".yaml": "naive",
|
||||
".yml": "naive",
|
||||
".toml": "naive",
|
||||
".ini": "naive",
|
||||
".py": "naive",
|
||||
".js": "naive",
|
||||
".ts": "naive",
|
||||
".tsx": "naive",
|
||||
".jsx": "naive",
|
||||
".java": "naive",
|
||||
".go": "naive",
|
||||
".rs": "naive",
|
||||
".c": "naive",
|
||||
".cc": "naive",
|
||||
".cpp": "naive",
|
||||
".h": "naive",
|
||||
".hpp": "naive",
|
||||
".rb": "naive",
|
||||
".php": "naive",
|
||||
".sql": "naive",
|
||||
".sh": "naive",
|
||||
}
|
||||
|
||||
|
||||
def _ragflow_base_url() -> str:
|
||||
return os.getenv("RAGFLOW_API_URL", "http://localhost:9380").rstrip("/")
|
||||
|
||||
|
||||
def _ragflow_headers(json_body: bool = True) -> dict[str, str]:
|
||||
headers: dict[str, str] = {}
|
||||
api_key = os.getenv("RAGFLOW_API_KEY", "").strip()
|
||||
if api_key:
|
||||
headers["Authorization"] = f"Bearer {api_key}"
|
||||
if json_body:
|
||||
headers["Content-Type"] = "application/json"
|
||||
return headers
|
||||
|
||||
|
||||
def _ragflow_check_requirements() -> bool:
|
||||
return True
|
||||
|
||||
|
||||
def _request_json(method: str, path: str, *, params=None, json_payload=None, files=None) -> dict[str, Any]:
|
||||
response = requests.request(
|
||||
method,
|
||||
f"{_ragflow_base_url()}{path}",
|
||||
headers=_ragflow_headers(json_body=files is None),
|
||||
params=params,
|
||||
json=json_payload,
|
||||
files=files,
|
||||
timeout=120,
|
||||
)
|
||||
response.raise_for_status()
|
||||
payload = response.json()
|
||||
if payload.get("code", 0) != 0:
|
||||
message = payload.get("message") or payload.get("error") or "RAGFlow request failed"
|
||||
raise RuntimeError(message)
|
||||
return payload
|
||||
|
||||
|
||||
def _is_probable_dataset_id(dataset: str) -> bool:
|
||||
compact = dataset.replace("-", "")
|
||||
return len(compact) >= 16 and all(ch.isalnum() for ch in compact)
|
||||
|
||||
|
||||
def _resolve_dataset(dataset: str) -> tuple[str, str] | None:
|
||||
dataset = dataset.strip()
|
||||
if not dataset:
|
||||
return None
|
||||
params = {"id": dataset} if _is_probable_dataset_id(dataset) else {"name": dataset}
|
||||
payload = _request_json("GET", "/api/v1/datasets", params=params)
|
||||
data = payload.get("data") or []
|
||||
if not data:
|
||||
return None
|
||||
match = data[0]
|
||||
return match["id"], match.get("name", dataset)
|
||||
|
||||
|
||||
def _ensure_dataset(dataset: str, chunk_method: str) -> tuple[str, str]:
|
||||
resolved = _resolve_dataset(dataset)
|
||||
if resolved:
|
||||
return resolved
|
||||
payload = _request_json(
|
||||
"POST",
|
||||
"/api/v1/datasets",
|
||||
json_payload={"name": dataset, "chunk_method": chunk_method},
|
||||
)
|
||||
data = payload.get("data") or {}
|
||||
return data["id"], data.get("name", dataset)
|
||||
|
||||
|
||||
def _prepare_document(document_url: str) -> tuple[Path, bool]:
|
||||
parsed = urlparse(document_url)
|
||||
if parsed.scheme in {"http", "https"}:
|
||||
response = requests.get(document_url, timeout=120)
|
||||
response.raise_for_status()
|
||||
suffix = Path(parsed.path).suffix or ".bin"
|
||||
tmp = tempfile.NamedTemporaryFile(delete=False, suffix=suffix)
|
||||
tmp.write(response.content)
|
||||
tmp.flush()
|
||||
tmp.close()
|
||||
return Path(tmp.name), True
|
||||
if parsed.scheme == "file":
|
||||
return Path(parsed.path), False
|
||||
return Path(document_url).expanduser(), False
|
||||
|
||||
|
||||
def _detect_chunk_method(path: Path) -> str:
|
||||
extension = path.suffix.lower()
|
||||
if extension not in SUPPORTED_EXTENSIONS:
|
||||
supported = ", ".join(sorted(SUPPORTED_EXTENSIONS))
|
||||
raise ValueError(f"Unsupported document type '{extension or path.name}'. Supported document types: {supported}")
|
||||
return SUPPORTED_EXTENSIONS[extension]
|
||||
|
||||
|
||||
def _upload_document(dataset_id: str, path: Path) -> list[str]:
|
||||
mime = mimetypes.guess_type(path.name)[0] or "application/octet-stream"
|
||||
with path.open("rb") as handle:
|
||||
payload = _request_json(
|
||||
"POST",
|
||||
f"/api/v1/datasets/{dataset_id}/documents",
|
||||
files=[("file", (path.name, handle, mime))],
|
||||
)
|
||||
documents = payload.get("data") or []
|
||||
ids = [item["id"] for item in documents if item.get("id")]
|
||||
if not ids:
|
||||
raise RuntimeError("RAGFlow upload did not return any document ids")
|
||||
return ids
|
||||
|
||||
|
||||
def ragflow_ingest_tool(document_url: str, dataset: str) -> str:
|
||||
local_path = None
|
||||
should_cleanup = False
|
||||
try:
|
||||
local_path, should_cleanup = _prepare_document(document_url)
|
||||
if not local_path.exists():
|
||||
return tool_error(f"Document not found: {document_url}")
|
||||
chunk_method = _detect_chunk_method(local_path)
|
||||
dataset_id, dataset_name = _ensure_dataset(dataset, chunk_method)
|
||||
document_ids = _upload_document(dataset_id, local_path)
|
||||
_request_json(
|
||||
"POST",
|
||||
f"/api/v1/datasets/{dataset_id}/chunks",
|
||||
json_payload={"document_ids": document_ids},
|
||||
)
|
||||
return tool_result(
|
||||
success=True,
|
||||
dataset_id=dataset_id,
|
||||
dataset_name=dataset_name,
|
||||
document_ids=document_ids,
|
||||
parse_started=True,
|
||||
chunk_method=chunk_method,
|
||||
source=document_url,
|
||||
filename=local_path.name,
|
||||
)
|
||||
except ValueError as exc:
|
||||
return tool_error(str(exc))
|
||||
except Exception as exc:
|
||||
return tool_error(f"RAGFlow ingest failed: {exc}")
|
||||
finally:
|
||||
if should_cleanup and local_path is not None:
|
||||
try:
|
||||
local_path.unlink(missing_ok=True)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def _normalize_chunks(chunks: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
||||
normalized = []
|
||||
for chunk in chunks:
|
||||
normalized.append(
|
||||
{
|
||||
"content": chunk.get("content", ""),
|
||||
"document_id": chunk.get("document_id", ""),
|
||||
"document_name": chunk.get("document_keyword", ""),
|
||||
"similarity": chunk.get("similarity"),
|
||||
"highlight": chunk.get("highlight", ""),
|
||||
}
|
||||
)
|
||||
return normalized
|
||||
|
||||
|
||||
def ragflow_query_tool(query: str, dataset: str, limit: int = 5) -> str:
|
||||
try:
|
||||
resolved = _resolve_dataset(dataset)
|
||||
if not resolved:
|
||||
return tool_error(f"RAGFlow dataset not found: {dataset}")
|
||||
dataset_id, dataset_name = resolved
|
||||
payload = _request_json(
|
||||
"POST",
|
||||
"/api/v1/retrieval",
|
||||
json_payload={
|
||||
"question": query,
|
||||
"dataset_ids": [dataset_id],
|
||||
"page_size": max(1, min(int(limit), 25)),
|
||||
"highlight": True,
|
||||
"keyword": True,
|
||||
},
|
||||
)
|
||||
data = payload.get("data") or {}
|
||||
chunks = data.get("chunks") or []
|
||||
return tool_result(
|
||||
success=True,
|
||||
dataset_id=dataset_id,
|
||||
dataset_name=dataset_name,
|
||||
total=data.get("total", len(chunks)),
|
||||
chunks=_normalize_chunks(chunks),
|
||||
)
|
||||
except Exception as exc:
|
||||
return tool_error(f"RAGFlow query failed: {exc}")
|
||||
|
||||
|
||||
def _handle_ragflow_ingest(args, **_kwargs):
|
||||
return ragflow_ingest_tool(
|
||||
document_url=args.get("document_url", ""),
|
||||
dataset=args.get("dataset", ""),
|
||||
)
|
||||
|
||||
|
||||
def _handle_ragflow_query(args, **_kwargs):
|
||||
return ragflow_query_tool(
|
||||
query=args.get("query", ""),
|
||||
dataset=args.get("dataset", ""),
|
||||
limit=args.get("limit", 5),
|
||||
)
|
||||
|
||||
|
||||
registry.register(
|
||||
name="ragflow_ingest",
|
||||
toolset="web",
|
||||
schema=RAGFLOW_INGEST_SCHEMA,
|
||||
handler=_handle_ragflow_ingest,
|
||||
check_fn=_ragflow_check_requirements,
|
||||
requires_env=["RAGFLOW_API_URL", "RAGFLOW_API_KEY"],
|
||||
emoji="📚",
|
||||
)
|
||||
|
||||
registry.register(
|
||||
name="ragflow_query",
|
||||
toolset="web",
|
||||
schema=RAGFLOW_QUERY_SCHEMA,
|
||||
handler=_handle_ragflow_query,
|
||||
check_fn=_ragflow_check_requirements,
|
||||
requires_env=["RAGFLOW_API_URL", "RAGFLOW_API_KEY"],
|
||||
emoji="🧠",
|
||||
)
|
||||
Reference in New Issue
Block a user