Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a1d536826e |
@@ -1,68 +0,0 @@
|
||||
# RAGFlow integration
|
||||
|
||||
This repo-side slice adds:
|
||||
|
||||
- `tools/ragflow_tool.py`
|
||||
- `ragflow_ingest(document_url, dataset)`
|
||||
- `ragflow_query(query, dataset, limit=5)`
|
||||
- `scripts/ragflow_bootstrap.py`
|
||||
- fetches the upstream RAGFlow Docker bundle
|
||||
- runs `docker compose --profile cpu up -d` or `gpu`
|
||||
|
||||
## Deployment
|
||||
|
||||
Bootstrap the upstream CPU stack locally:
|
||||
|
||||
```bash
|
||||
python3 scripts/ragflow_bootstrap.py --profile cpu
|
||||
```
|
||||
|
||||
Dry-run only:
|
||||
|
||||
```bash
|
||||
python3 scripts/ragflow_bootstrap.py --profile cpu --dry-run
|
||||
```
|
||||
|
||||
Fetch files without launching Docker:
|
||||
|
||||
```bash
|
||||
python3 scripts/ragflow_bootstrap.py --no-up
|
||||
```
|
||||
|
||||
Default bundle target:
|
||||
|
||||
- `~/.hermes/services/ragflow`
|
||||
|
||||
## Runtime configuration
|
||||
|
||||
Optional environment variables:
|
||||
|
||||
- `RAGFLOW_API_URL` — defaults to `http://localhost:9380`
|
||||
- `RAGFLOW_API_KEY` — Bearer token for authenticated RAGFlow APIs
|
||||
|
||||
## Supported document types
|
||||
|
||||
RAGFlow ingest accepts:
|
||||
|
||||
- PDF: `.pdf`
|
||||
- Word: `.doc`, `.docx`
|
||||
- Presentations: `.ppt`, `.pptx`
|
||||
- Images via OCR: `.png`, `.jpg`, `.jpeg`, `.webp`, `.bmp`, `.tif`, `.tiff`, `.gif`
|
||||
- Text and codebase documents: `.txt`, `.md`, `.rst`, `.html`, `.json`, `.yaml`, `.yml`, `.toml`, `.ini`, `.py`, `.js`, `.ts`, `.tsx`, `.jsx`, `.java`, `.go`, `.rs`, `.c`, `.cpp`, `.h`, `.hpp`, `.rb`, `.php`, `.sql`, `.sh`
|
||||
|
||||
## Example tool usage
|
||||
|
||||
```json
|
||||
{"document_url":"https://arxiv.org/pdf/1706.03762.pdf","dataset":"research-papers"}
|
||||
```
|
||||
|
||||
```json
|
||||
{"query":"What does the paper say about attention heads?","dataset":"research-papers","limit":5}
|
||||
```
|
||||
|
||||
## Use cases
|
||||
|
||||
- research papers
|
||||
- technical documentation
|
||||
- OCR-heavy image workflows
|
||||
- ingested codebases and architecture docs
|
||||
@@ -1,79 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Bootstrap an upstream RAGFlow Docker bundle for Hermes.
|
||||
|
||||
This script fetches the upstream RAGFlow docker bundle into a local directory
|
||||
so operators can run `docker compose --profile cpu up -d` (or `gpu`) without
|
||||
manually assembling the required files.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import subprocess
|
||||
import urllib.request
|
||||
from pathlib import Path
|
||||
|
||||
UPSTREAM_BASE = "https://raw.githubusercontent.com/infiniflow/ragflow/main/docker"
|
||||
UPSTREAM_FILES = {
|
||||
"docker-compose.yml": f"{UPSTREAM_BASE}/docker-compose.yml",
|
||||
"docker-compose-base.yml": f"{UPSTREAM_BASE}/docker-compose-base.yml",
|
||||
".env": f"{UPSTREAM_BASE}/.env",
|
||||
"service_conf.yaml.template": f"{UPSTREAM_BASE}/service_conf.yaml.template",
|
||||
"entrypoint.sh": f"{UPSTREAM_BASE}/entrypoint.sh",
|
||||
}
|
||||
|
||||
|
||||
def materialize_bundle(target_dir: str | Path, overwrite: bool = False) -> list[Path]:
|
||||
target = Path(target_dir).expanduser()
|
||||
target.mkdir(parents=True, exist_ok=True)
|
||||
written: list[Path] = []
|
||||
for name, url in UPSTREAM_FILES.items():
|
||||
dest = target / name
|
||||
if dest.exists() and not overwrite:
|
||||
written.append(dest)
|
||||
continue
|
||||
with urllib.request.urlopen(url, timeout=60) as response:
|
||||
dest.write_bytes(response.read())
|
||||
if name == "entrypoint.sh":
|
||||
dest.chmod(0o755)
|
||||
written.append(dest)
|
||||
return written
|
||||
|
||||
|
||||
def build_compose_command(target_dir: str | Path, profile: str = "cpu") -> list[str]:
|
||||
return ["docker", "compose", "--profile", profile, "up", "-d"]
|
||||
|
||||
|
||||
def run_compose(target_dir: str | Path, profile: str = "cpu", dry_run: bool = False) -> dict:
|
||||
target = Path(target_dir).expanduser()
|
||||
command = build_compose_command(target, profile=profile)
|
||||
if dry_run:
|
||||
return {"target_dir": str(target), "command": command, "executed": False}
|
||||
subprocess.run(command, cwd=target, check=True)
|
||||
return {"target_dir": str(target), "command": command, "executed": True}
|
||||
|
||||
|
||||
def main(argv: list[str] | None = None) -> int:
|
||||
parser = argparse.ArgumentParser(description="Fetch and launch the upstream RAGFlow Docker bundle")
|
||||
parser.add_argument("--target-dir", default=str(Path.home() / ".hermes" / "services" / "ragflow"))
|
||||
parser.add_argument("--profile", choices=["cpu", "gpu"], default="cpu")
|
||||
parser.add_argument("--overwrite", action="store_true")
|
||||
parser.add_argument("--dry-run", action="store_true")
|
||||
parser.add_argument("--no-up", action="store_true", help="Only fetch bundle files; do not run docker compose")
|
||||
args = parser.parse_args(argv)
|
||||
|
||||
written = materialize_bundle(args.target_dir, overwrite=args.overwrite)
|
||||
print(f"Fetched {len(written)} RAGFlow docker files into {Path(args.target_dir).expanduser()}")
|
||||
if args.no_up:
|
||||
return 0
|
||||
result = run_compose(args.target_dir, profile=args.profile, dry_run=args.dry_run)
|
||||
print("Command:", " ".join(result["command"]))
|
||||
if result["executed"]:
|
||||
print("RAGFlow docker stack launch requested.")
|
||||
else:
|
||||
print("Dry run only; docker compose not executed.")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
@@ -1,43 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib.util
|
||||
import io
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
|
||||
ROOT = Path(__file__).resolve().parent.parent
|
||||
SCRIPT_PATH = ROOT / "scripts" / "ragflow_bootstrap.py"
|
||||
|
||||
|
||||
def _load_module():
|
||||
spec = importlib.util.spec_from_file_location("ragflow_bootstrap", SCRIPT_PATH)
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
assert spec.loader is not None
|
||||
spec.loader.exec_module(module)
|
||||
return module
|
||||
|
||||
|
||||
def test_materialize_bundle_downloads_required_upstream_artifacts(tmp_path):
|
||||
module = _load_module()
|
||||
|
||||
def fake_urlopen(url, timeout=0):
|
||||
name = url.rsplit("/", 1)[-1]
|
||||
return io.BytesIO(f"# fetched {name}\n".encode())
|
||||
|
||||
with patch.object(module.urllib.request, "urlopen", side_effect=fake_urlopen):
|
||||
written = module.materialize_bundle(tmp_path)
|
||||
|
||||
assert (tmp_path / "docker-compose.yml").exists()
|
||||
assert (tmp_path / "docker-compose-base.yml").exists()
|
||||
assert (tmp_path / ".env").exists()
|
||||
assert any(path.name == "entrypoint.sh" for path in written)
|
||||
|
||||
|
||||
def test_build_compose_command_respects_profile_and_directory(tmp_path):
|
||||
module = _load_module()
|
||||
|
||||
command = module.build_compose_command(tmp_path, profile="gpu")
|
||||
|
||||
assert command[:4] == ["docker", "compose", "--profile", "gpu"]
|
||||
assert command[-2:] == ["up", "-d"]
|
||||
@@ -1,122 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
from tools.registry import registry
|
||||
|
||||
|
||||
class _Response:
|
||||
def __init__(self, payload: dict, status_code: int = 200):
|
||||
self._payload = payload
|
||||
self.status_code = status_code
|
||||
self.text = json.dumps(payload)
|
||||
|
||||
def json(self):
|
||||
return self._payload
|
||||
|
||||
def raise_for_status(self):
|
||||
if self.status_code >= 400:
|
||||
raise RuntimeError(f"HTTP {self.status_code}")
|
||||
|
||||
|
||||
def _reload_module():
|
||||
registry.deregister("ragflow_ingest")
|
||||
registry.deregister("ragflow_query")
|
||||
sys.modules.pop("tools.ragflow_tool", None)
|
||||
module = importlib.import_module("tools.ragflow_tool")
|
||||
return importlib.reload(module)
|
||||
|
||||
|
||||
def test_ragflow_tools_register_and_support_document_formats():
|
||||
module = _reload_module()
|
||||
|
||||
assert registry.get_entry("ragflow_ingest") is not None
|
||||
assert registry.get_entry("ragflow_query") is not None
|
||||
assert ".pdf" in module.SUPPORTED_EXTENSIONS
|
||||
assert ".docx" in module.SUPPORTED_EXTENSIONS
|
||||
assert ".png" in module.SUPPORTED_EXTENSIONS
|
||||
assert ".md" in module.SUPPORTED_EXTENSIONS
|
||||
|
||||
|
||||
def test_ragflow_ingest_creates_dataset_uploads_and_starts_parse(tmp_path):
|
||||
module = _reload_module()
|
||||
document = tmp_path / "paper.pdf"
|
||||
document.write_bytes(b"%PDF-1.7\n")
|
||||
calls: list[tuple[str, str, dict | None, dict | None]] = []
|
||||
|
||||
def fake_request(method, url, *, headers=None, params=None, json=None, files=None, timeout=None):
|
||||
calls.append((method, url, params, json))
|
||||
if method == "GET" and url.endswith("/api/v1/datasets"):
|
||||
return _Response({"code": 0, "data": []})
|
||||
if method == "POST" and url.endswith("/api/v1/datasets"):
|
||||
assert json["name"] == "research-papers"
|
||||
assert json["chunk_method"] == "paper"
|
||||
return _Response({"code": 0, "data": {"id": "ds-1", "name": "research-papers"}})
|
||||
if method == "POST" and url.endswith("/api/v1/datasets/ds-1/documents"):
|
||||
assert files and files[0][0] == "file"
|
||||
return _Response({"code": 0, "data": [{"id": "doc-1", "name": "paper.pdf"}]})
|
||||
if method == "POST" and url.endswith("/api/v1/datasets/ds-1/chunks"):
|
||||
assert json == {"document_ids": ["doc-1"]}
|
||||
return _Response({"code": 0})
|
||||
raise AssertionError(f"Unexpected request: {method} {url}")
|
||||
|
||||
with patch("tools.ragflow_tool.requests.request", side_effect=fake_request):
|
||||
result = json.loads(module.ragflow_ingest_tool(str(document), dataset="research-papers"))
|
||||
|
||||
assert result["dataset_id"] == "ds-1"
|
||||
assert result["document_ids"] == ["doc-1"]
|
||||
assert result["parse_started"] is True
|
||||
assert result["chunk_method"] == "paper"
|
||||
assert calls[0][0] == "GET"
|
||||
|
||||
|
||||
def test_ragflow_query_retrieves_chunks_for_named_dataset():
|
||||
module = _reload_module()
|
||||
|
||||
def fake_request(method, url, *, headers=None, params=None, json=None, files=None, timeout=None):
|
||||
if method == "GET" and url.endswith("/api/v1/datasets"):
|
||||
assert params == {"name": "tech-docs"}
|
||||
return _Response({"code": 0, "data": [{"id": "ds-9", "name": "tech-docs"}]})
|
||||
if method == "POST" and url.endswith("/api/v1/retrieval"):
|
||||
assert json["question"] == "How does parsing work?"
|
||||
assert json["dataset_ids"] == ["ds-9"]
|
||||
assert json["page_size"] == 2
|
||||
return _Response(
|
||||
{
|
||||
"code": 0,
|
||||
"data": {
|
||||
"chunks": [
|
||||
{
|
||||
"content": "Parsing starts by uploading documents.",
|
||||
"document_id": "doc-9",
|
||||
"document_keyword": "guide.md",
|
||||
"similarity": 0.98,
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
},
|
||||
}
|
||||
)
|
||||
raise AssertionError(f"Unexpected request: {method} {url}")
|
||||
|
||||
with patch("tools.ragflow_tool.requests.request", side_effect=fake_request):
|
||||
result = json.loads(module.ragflow_query_tool("How does parsing work?", "tech-docs", limit=2))
|
||||
|
||||
assert result["dataset_id"] == "ds-9"
|
||||
assert result["total"] == 1
|
||||
assert result["chunks"][0]["content"] == "Parsing starts by uploading documents."
|
||||
|
||||
|
||||
def test_ragflow_ingest_rejects_unsupported_document_types(tmp_path):
|
||||
module = _reload_module()
|
||||
document = tmp_path / "binary.exe"
|
||||
document.write_bytes(b"MZ")
|
||||
|
||||
result = json.loads(module.ragflow_ingest_tool(str(document), dataset="ignored"))
|
||||
|
||||
assert "error" in result
|
||||
assert "Unsupported document type" in result["error"]
|
||||
@@ -308,12 +308,12 @@ word word
|
||||
content = """\
|
||||
---
|
||||
name: test-skill
|
||||
description: A test skill.
|
||||
description: A test skill with enough content to pass the minimum length validation check of one hundred characters.
|
||||
---
|
||||
|
||||
# Test
|
||||
|
||||
word word
|
||||
word word word word word word word word word word
|
||||
"""
|
||||
with _skill_dir(tmp_path):
|
||||
_create_skill("my-skill", content)
|
||||
@@ -484,3 +484,185 @@ class TestSkillManageDispatcher:
|
||||
raw = skill_manage(action="create", name="test-skill", content=VALID_SKILL_CONTENT)
|
||||
result = json.loads(raw)
|
||||
assert result["success"] is True
|
||||
|
||||
|
||||
|
||||
class TestPokaYokeValidation:
|
||||
"""Tests for poka-yoke auto-revert functionality (#837)."""
|
||||
|
||||
def test_short_skill_md_reverts(self, tmp_path):
|
||||
"""SKILL.md shorter than 100 chars should be reverted."""
|
||||
short_content = """---
|
||||
name: test-skill
|
||||
description: Test
|
||||
---
|
||||
|
||||
Short
|
||||
"""
|
||||
with _skill_dir(tmp_path):
|
||||
_create_skill("my-skill", VALID_SKILL_CONTENT)
|
||||
result = _edit_skill("my-skill", short_content)
|
||||
|
||||
assert result["success"] is False
|
||||
assert "too short" in result["error"].lower()
|
||||
|
||||
# Verify the original file is preserved
|
||||
skill_md = tmp_path / "my-skill" / "SKILL.md"
|
||||
content = skill_md.read_text()
|
||||
assert "test-skill" in content # Original content preserved
|
||||
|
||||
def test_truncated_skill_reverts(self, tmp_path):
|
||||
"""Truncated YAML frontmatter should be reverted."""
|
||||
truncated = """---
|
||||
name: test-skill
|
||||
description: Test skill with enough content to pass minimum length validation check.
|
||||
---
|
||||
|
||||
# Test
|
||||
|
||||
This is a longer body section with plenty of text to ensure the content exceeds the minimum one hundred character requirement for SKILL.md files.
|
||||
"""
|
||||
# Chop it off to simulate truncation
|
||||
truncated = truncated[:80]
|
||||
|
||||
with _skill_dir(tmp_path):
|
||||
_create_skill("my-skill", VALID_SKILL_CONTENT)
|
||||
result = _edit_skill("my-skill", truncated)
|
||||
|
||||
assert result["success"] is False
|
||||
|
||||
def test_linked_files_validation(self, tmp_path):
|
||||
"""Missing linked_files should cause revert."""
|
||||
content_with_links = """---
|
||||
name: test-skill
|
||||
description: Test skill with enough content to pass minimum length validation check.
|
||||
linked_files:
|
||||
- references/nonexistent.md
|
||||
---
|
||||
|
||||
# Test
|
||||
|
||||
This is a longer body section with plenty of text to ensure the content exceeds the minimum one hundred character requirement for SKILL.md files.
|
||||
"""
|
||||
with _skill_dir(tmp_path):
|
||||
_create_skill("my-skill", VALID_SKILL_CONTENT)
|
||||
result = _edit_skill("my-skill", content_with_links)
|
||||
|
||||
assert result["success"] is False
|
||||
assert "linked files missing" in result["error"].lower()
|
||||
|
||||
def test_valid_linked_files_pass(self, tmp_path):
|
||||
"""Existing linked_files should pass validation."""
|
||||
content_with_links = """---
|
||||
name: test-skill
|
||||
description: Test skill with enough content to pass minimum length validation check.
|
||||
linked_files:
|
||||
- references/exists.md
|
||||
---
|
||||
|
||||
# Test
|
||||
|
||||
This is a longer body section with plenty of text to ensure the content exceeds the minimum one hundred character requirement for SKILL.md files.
|
||||
"""
|
||||
with _skill_dir(tmp_path):
|
||||
_create_skill("my-skill", VALID_SKILL_CONTENT)
|
||||
# Create the linked file
|
||||
ref_dir = tmp_path / "my-skill" / "references"
|
||||
ref_dir.mkdir(parents=True, exist_ok=True)
|
||||
(ref_dir / "exists.md").write_text("# Reference")
|
||||
|
||||
result = _edit_skill("my-skill", content_with_links)
|
||||
|
||||
assert result["success"] is True
|
||||
|
||||
|
||||
class TestHistoryRegistry:
|
||||
"""Tests for history registry functionality (#837)."""
|
||||
|
||||
def test_history_saved_on_edit(self, tmp_path):
|
||||
"""Editing a skill should save the original to history."""
|
||||
with _skill_dir(tmp_path):
|
||||
_create_skill("my-skill", VALID_SKILL_CONTENT)
|
||||
|
||||
# Make an edit
|
||||
new_content = """---
|
||||
name: test-skill
|
||||
description: Updated description that is longer than one hundred characters to pass validation.
|
||||
---
|
||||
|
||||
# Updated Test
|
||||
|
||||
This body has more content to ensure it passes the minimum length check of one hundred characters.
|
||||
"""
|
||||
result = _edit_skill("my-skill", new_content)
|
||||
assert result["success"] is True
|
||||
|
||||
# Check history was saved
|
||||
history_dir = tmp_path / ".history" / "my-skill"
|
||||
assert history_dir.exists()
|
||||
history_files = list(history_dir.glob("*.md"))
|
||||
assert len(history_files) == 1
|
||||
|
||||
def test_history_pruned_to_three(self, tmp_path):
|
||||
"""Only last 3 history versions should be kept."""
|
||||
from tools.skill_manager_tool import _save_to_history
|
||||
|
||||
with _skill_dir(tmp_path):
|
||||
_create_skill("my-skill", VALID_SKILL_CONTENT)
|
||||
|
||||
# Save 5 versions to history
|
||||
for i in range(5):
|
||||
content = f"""---
|
||||
name: test-skill
|
||||
description: Version {i} that is long enough to pass minimum length validation check of one hundred characters.
|
||||
---
|
||||
|
||||
# Version {i}
|
||||
|
||||
This is the body content for version {i} that ensures we meet the minimum length requirement.
|
||||
"""
|
||||
_save_to_history("my-skill", content, timestamp=1000 + i)
|
||||
|
||||
# Check only 3 history files remain
|
||||
history_dir = tmp_path / ".history" / "my-skill"
|
||||
history_files = sorted(history_dir.glob("*.md"))
|
||||
assert len(history_files) == 3
|
||||
# Should be the last 3 (timestamps 1002, 1003, 1004)
|
||||
assert "1002" in str(history_files[0])
|
||||
|
||||
def test_revert_to_history(self, tmp_path):
|
||||
"""Should be able to revert to a history version."""
|
||||
from tools.skill_manager_tool import _revert_to_history, _get_history_versions
|
||||
|
||||
with _skill_dir(tmp_path):
|
||||
_create_skill("my-skill", VALID_SKILL_CONTENT)
|
||||
skill_md = tmp_path / "my-skill" / "SKILL.md"
|
||||
|
||||
# Save original to history
|
||||
original = skill_md.read_text()
|
||||
from tools.skill_manager_tool import _save_to_history
|
||||
_save_to_history("my-skill", original)
|
||||
|
||||
# Edit the skill
|
||||
new_content = """---
|
||||
name: test-skill
|
||||
description: Updated description that is longer than one hundred characters to pass validation.
|
||||
---
|
||||
|
||||
# Updated
|
||||
|
||||
This body has more content to ensure it passes the minimum length check of one hundred characters.
|
||||
"""
|
||||
_edit_skill("my-skill", new_content)
|
||||
|
||||
# Verify edit was applied
|
||||
assert "Updated" in skill_md.read_text()
|
||||
|
||||
# Revert to history
|
||||
error = _revert_to_history("my-skill", skill_md, version=0)
|
||||
assert error is None
|
||||
|
||||
# Verify revert worked
|
||||
content = skill_md.read_text()
|
||||
assert "test-skill" in content
|
||||
assert "A test skill" in content
|
||||
|
||||
@@ -1,344 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""RAGFlow tool integration for document understanding.
|
||||
|
||||
Provides two tools:
|
||||
- ragflow_ingest(document_url, dataset): upload and parse a document into RAGFlow
|
||||
- ragflow_query(query, dataset): retrieve relevant chunks from a dataset
|
||||
|
||||
Default deployment target is a local RAGFlow server on http://localhost:9380.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import mimetypes
|
||||
import os
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import requests
|
||||
|
||||
from tools.registry import registry, tool_error, tool_result
|
||||
|
||||
RAGFLOW_INGEST_SCHEMA = {
|
||||
"name": "ragflow_ingest",
|
||||
"description": (
|
||||
"Upload a document into a RAGFlow dataset, creating the dataset if needed, "
|
||||
"then trigger parsing so Hermes can query the content later. Supports PDF, "
|
||||
"Word, images via OCR, plus text and code documents."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"document_url": {
|
||||
"type": "string",
|
||||
"description": "HTTP(S) URL, file:// URL, or local filesystem path to the document.",
|
||||
},
|
||||
"dataset": {
|
||||
"type": "string",
|
||||
"description": "Dataset name or id to ingest into. Created automatically when absent.",
|
||||
},
|
||||
},
|
||||
"required": ["document_url", "dataset"],
|
||||
},
|
||||
}
|
||||
|
||||
RAGFLOW_QUERY_SCHEMA = {
|
||||
"name": "ragflow_query",
|
||||
"description": (
|
||||
"Query a RAGFlow dataset for relevant chunks. Useful for research papers, "
|
||||
"technical docs, OCR-processed images, and ingested codebase documents."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "Question or search query to run against RAGFlow.",
|
||||
},
|
||||
"dataset": {
|
||||
"type": "string",
|
||||
"description": "Dataset name or id to search.",
|
||||
},
|
||||
"limit": {
|
||||
"type": "integer",
|
||||
"description": "Maximum number of chunks to return.",
|
||||
"default": 5,
|
||||
"minimum": 1,
|
||||
"maximum": 25,
|
||||
},
|
||||
},
|
||||
"required": ["query", "dataset"],
|
||||
},
|
||||
}
|
||||
|
||||
SUPPORTED_EXTENSIONS = {
|
||||
".pdf": "paper",
|
||||
".doc": "paper",
|
||||
".docx": "paper",
|
||||
".ppt": "presentation",
|
||||
".pptx": "presentation",
|
||||
".png": "picture",
|
||||
".jpg": "picture",
|
||||
".jpeg": "picture",
|
||||
".webp": "picture",
|
||||
".bmp": "picture",
|
||||
".tif": "picture",
|
||||
".tiff": "picture",
|
||||
".gif": "picture",
|
||||
".txt": "naive",
|
||||
".md": "naive",
|
||||
".rst": "naive",
|
||||
".html": "naive",
|
||||
".htm": "naive",
|
||||
".csv": "table",
|
||||
".tsv": "table",
|
||||
".json": "naive",
|
||||
".yaml": "naive",
|
||||
".yml": "naive",
|
||||
".toml": "naive",
|
||||
".ini": "naive",
|
||||
".py": "naive",
|
||||
".js": "naive",
|
||||
".ts": "naive",
|
||||
".tsx": "naive",
|
||||
".jsx": "naive",
|
||||
".java": "naive",
|
||||
".go": "naive",
|
||||
".rs": "naive",
|
||||
".c": "naive",
|
||||
".cc": "naive",
|
||||
".cpp": "naive",
|
||||
".h": "naive",
|
||||
".hpp": "naive",
|
||||
".rb": "naive",
|
||||
".php": "naive",
|
||||
".sql": "naive",
|
||||
".sh": "naive",
|
||||
}
|
||||
|
||||
|
||||
def _ragflow_base_url() -> str:
|
||||
return os.getenv("RAGFLOW_API_URL", "http://localhost:9380").rstrip("/")
|
||||
|
||||
|
||||
def _ragflow_headers(json_body: bool = True) -> dict[str, str]:
|
||||
headers: dict[str, str] = {}
|
||||
api_key = os.getenv("RAGFLOW_API_KEY", "").strip()
|
||||
if api_key:
|
||||
headers["Authorization"] = f"Bearer {api_key}"
|
||||
if json_body:
|
||||
headers["Content-Type"] = "application/json"
|
||||
return headers
|
||||
|
||||
|
||||
def _ragflow_check_requirements() -> bool:
|
||||
return True
|
||||
|
||||
|
||||
def _request_json(method: str, path: str, *, params=None, json_payload=None, files=None) -> dict[str, Any]:
|
||||
response = requests.request(
|
||||
method,
|
||||
f"{_ragflow_base_url()}{path}",
|
||||
headers=_ragflow_headers(json_body=files is None),
|
||||
params=params,
|
||||
json=json_payload,
|
||||
files=files,
|
||||
timeout=120,
|
||||
)
|
||||
response.raise_for_status()
|
||||
payload = response.json()
|
||||
if payload.get("code", 0) != 0:
|
||||
message = payload.get("message") or payload.get("error") or "RAGFlow request failed"
|
||||
raise RuntimeError(message)
|
||||
return payload
|
||||
|
||||
|
||||
def _is_probable_dataset_id(dataset: str) -> bool:
|
||||
compact = dataset.replace("-", "")
|
||||
return len(compact) >= 16 and all(ch.isalnum() for ch in compact)
|
||||
|
||||
|
||||
def _resolve_dataset(dataset: str) -> tuple[str, str] | None:
|
||||
dataset = dataset.strip()
|
||||
if not dataset:
|
||||
return None
|
||||
params = {"id": dataset} if _is_probable_dataset_id(dataset) else {"name": dataset}
|
||||
payload = _request_json("GET", "/api/v1/datasets", params=params)
|
||||
data = payload.get("data") or []
|
||||
if not data:
|
||||
return None
|
||||
match = data[0]
|
||||
return match["id"], match.get("name", dataset)
|
||||
|
||||
|
||||
def _ensure_dataset(dataset: str, chunk_method: str) -> tuple[str, str]:
|
||||
resolved = _resolve_dataset(dataset)
|
||||
if resolved:
|
||||
return resolved
|
||||
payload = _request_json(
|
||||
"POST",
|
||||
"/api/v1/datasets",
|
||||
json_payload={"name": dataset, "chunk_method": chunk_method},
|
||||
)
|
||||
data = payload.get("data") or {}
|
||||
return data["id"], data.get("name", dataset)
|
||||
|
||||
|
||||
def _prepare_document(document_url: str) -> tuple[Path, bool]:
|
||||
parsed = urlparse(document_url)
|
||||
if parsed.scheme in {"http", "https"}:
|
||||
response = requests.get(document_url, timeout=120)
|
||||
response.raise_for_status()
|
||||
suffix = Path(parsed.path).suffix or ".bin"
|
||||
tmp = tempfile.NamedTemporaryFile(delete=False, suffix=suffix)
|
||||
tmp.write(response.content)
|
||||
tmp.flush()
|
||||
tmp.close()
|
||||
return Path(tmp.name), True
|
||||
if parsed.scheme == "file":
|
||||
return Path(parsed.path), False
|
||||
return Path(document_url).expanduser(), False
|
||||
|
||||
|
||||
def _detect_chunk_method(path: Path) -> str:
|
||||
extension = path.suffix.lower()
|
||||
if extension not in SUPPORTED_EXTENSIONS:
|
||||
supported = ", ".join(sorted(SUPPORTED_EXTENSIONS))
|
||||
raise ValueError(f"Unsupported document type '{extension or path.name}'. Supported document types: {supported}")
|
||||
return SUPPORTED_EXTENSIONS[extension]
|
||||
|
||||
|
||||
def _upload_document(dataset_id: str, path: Path) -> list[str]:
|
||||
mime = mimetypes.guess_type(path.name)[0] or "application/octet-stream"
|
||||
with path.open("rb") as handle:
|
||||
payload = _request_json(
|
||||
"POST",
|
||||
f"/api/v1/datasets/{dataset_id}/documents",
|
||||
files=[("file", (path.name, handle, mime))],
|
||||
)
|
||||
documents = payload.get("data") or []
|
||||
ids = [item["id"] for item in documents if item.get("id")]
|
||||
if not ids:
|
||||
raise RuntimeError("RAGFlow upload did not return any document ids")
|
||||
return ids
|
||||
|
||||
|
||||
def ragflow_ingest_tool(document_url: str, dataset: str) -> str:
|
||||
local_path = None
|
||||
should_cleanup = False
|
||||
try:
|
||||
local_path, should_cleanup = _prepare_document(document_url)
|
||||
if not local_path.exists():
|
||||
return tool_error(f"Document not found: {document_url}")
|
||||
chunk_method = _detect_chunk_method(local_path)
|
||||
dataset_id, dataset_name = _ensure_dataset(dataset, chunk_method)
|
||||
document_ids = _upload_document(dataset_id, local_path)
|
||||
_request_json(
|
||||
"POST",
|
||||
f"/api/v1/datasets/{dataset_id}/chunks",
|
||||
json_payload={"document_ids": document_ids},
|
||||
)
|
||||
return tool_result(
|
||||
success=True,
|
||||
dataset_id=dataset_id,
|
||||
dataset_name=dataset_name,
|
||||
document_ids=document_ids,
|
||||
parse_started=True,
|
||||
chunk_method=chunk_method,
|
||||
source=document_url,
|
||||
filename=local_path.name,
|
||||
)
|
||||
except ValueError as exc:
|
||||
return tool_error(str(exc))
|
||||
except Exception as exc:
|
||||
return tool_error(f"RAGFlow ingest failed: {exc}")
|
||||
finally:
|
||||
if should_cleanup and local_path is not None:
|
||||
try:
|
||||
local_path.unlink(missing_ok=True)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def _normalize_chunks(chunks: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
||||
normalized = []
|
||||
for chunk in chunks:
|
||||
normalized.append(
|
||||
{
|
||||
"content": chunk.get("content", ""),
|
||||
"document_id": chunk.get("document_id", ""),
|
||||
"document_name": chunk.get("document_keyword", ""),
|
||||
"similarity": chunk.get("similarity"),
|
||||
"highlight": chunk.get("highlight", ""),
|
||||
}
|
||||
)
|
||||
return normalized
|
||||
|
||||
|
||||
def ragflow_query_tool(query: str, dataset: str, limit: int = 5) -> str:
|
||||
try:
|
||||
resolved = _resolve_dataset(dataset)
|
||||
if not resolved:
|
||||
return tool_error(f"RAGFlow dataset not found: {dataset}")
|
||||
dataset_id, dataset_name = resolved
|
||||
payload = _request_json(
|
||||
"POST",
|
||||
"/api/v1/retrieval",
|
||||
json_payload={
|
||||
"question": query,
|
||||
"dataset_ids": [dataset_id],
|
||||
"page_size": max(1, min(int(limit), 25)),
|
||||
"highlight": True,
|
||||
"keyword": True,
|
||||
},
|
||||
)
|
||||
data = payload.get("data") or {}
|
||||
chunks = data.get("chunks") or []
|
||||
return tool_result(
|
||||
success=True,
|
||||
dataset_id=dataset_id,
|
||||
dataset_name=dataset_name,
|
||||
total=data.get("total", len(chunks)),
|
||||
chunks=_normalize_chunks(chunks),
|
||||
)
|
||||
except Exception as exc:
|
||||
return tool_error(f"RAGFlow query failed: {exc}")
|
||||
|
||||
|
||||
def _handle_ragflow_ingest(args, **_kwargs):
|
||||
return ragflow_ingest_tool(
|
||||
document_url=args.get("document_url", ""),
|
||||
dataset=args.get("dataset", ""),
|
||||
)
|
||||
|
||||
|
||||
def _handle_ragflow_query(args, **_kwargs):
|
||||
return ragflow_query_tool(
|
||||
query=args.get("query", ""),
|
||||
dataset=args.get("dataset", ""),
|
||||
limit=args.get("limit", 5),
|
||||
)
|
||||
|
||||
|
||||
registry.register(
|
||||
name="ragflow_ingest",
|
||||
toolset="web",
|
||||
schema=RAGFLOW_INGEST_SCHEMA,
|
||||
handler=_handle_ragflow_ingest,
|
||||
check_fn=_ragflow_check_requirements,
|
||||
requires_env=["RAGFLOW_API_URL", "RAGFLOW_API_KEY"],
|
||||
emoji="📚",
|
||||
)
|
||||
|
||||
registry.register(
|
||||
name="ragflow_query",
|
||||
toolset="web",
|
||||
schema=RAGFLOW_QUERY_SCHEMA,
|
||||
handler=_handle_ragflow_query,
|
||||
check_fn=_ragflow_check_requirements,
|
||||
requires_env=["RAGFLOW_API_URL", "RAGFLOW_API_KEY"],
|
||||
emoji="🧠",
|
||||
)
|
||||
@@ -322,12 +322,112 @@ def _cleanup_old_backups(file_path: Path, max_backups: int = MAX_BACKUPS_PER_FIL
|
||||
break
|
||||
|
||||
|
||||
|
||||
|
||||
# History registry for rollback (#837)
|
||||
MAX_HISTORY_VERSIONS = 3
|
||||
|
||||
|
||||
def _history_dir_for_skill(skill_name: str) -> Path:
|
||||
"""Return the history directory path for a skill."""
|
||||
return SKILLS_DIR / ".history" / skill_name
|
||||
|
||||
|
||||
def _save_to_history(skill_name: str, content: str, timestamp: Optional[int] = None) -> Optional[Path]:
|
||||
"""Save a version of the skill to the history registry.
|
||||
|
||||
History is stored in ~/.hermes/skills/.history/<skill-name>/<timestamp>.md
|
||||
Keeps the last MAX_HISTORY_VERSIONS versions.
|
||||
|
||||
Returns the path to the saved history file, or None if not saved.
|
||||
"""
|
||||
if timestamp is None:
|
||||
timestamp = int(time.time())
|
||||
|
||||
history_dir = _history_dir_for_skill(skill_name)
|
||||
history_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
history_file = history_dir / f"{timestamp}.md"
|
||||
_atomic_write_text(history_file, content)
|
||||
|
||||
# Clean up old history versions
|
||||
_cleanup_history(skill_name)
|
||||
|
||||
return history_file
|
||||
|
||||
|
||||
def _cleanup_history(skill_name: str, max_versions: int = MAX_HISTORY_VERSIONS) -> None:
|
||||
"""Prune old history versions, keeping only the most recent max_versions."""
|
||||
history_dir = _history_dir_for_skill(skill_name)
|
||||
if not history_dir.exists():
|
||||
return
|
||||
|
||||
try:
|
||||
# Get all history files sorted by modification time (oldest first)
|
||||
history_files = sorted(
|
||||
[f for f in history_dir.iterdir() if f.suffix == '.md' and f.is_file()],
|
||||
key=lambda p: p.stat().st_mtime,
|
||||
)
|
||||
except OSError:
|
||||
return
|
||||
|
||||
# Remove oldest files if we have more than max_versions
|
||||
while len(history_files) > max_versions:
|
||||
try:
|
||||
history_files.pop(0).unlink()
|
||||
except OSError:
|
||||
break
|
||||
|
||||
|
||||
def _get_history_versions(skill_name: str) -> List[Path]:
|
||||
"""Get list of history versions for a skill, newest first."""
|
||||
history_dir = _history_dir_for_skill(skill_name)
|
||||
if not history_dir.exists():
|
||||
return []
|
||||
|
||||
try:
|
||||
return sorted(
|
||||
[f for f in history_dir.iterdir() if f.suffix == '.md' and f.is_file()],
|
||||
key=lambda p: p.stat().st_mtime,
|
||||
reverse=True,
|
||||
)
|
||||
except OSError:
|
||||
return []
|
||||
|
||||
|
||||
def _revert_to_history(skill_name: str, skill_md_path: Path, version: int = 0) -> Optional[str]:
|
||||
"""Revert a skill to a previous history version.
|
||||
|
||||
Args:
|
||||
skill_name: Name of the skill
|
||||
skill_md_path: Path to the current SKILL.md
|
||||
version: Which history version to revert to (0 = most recent, 1 = second most recent, etc.)
|
||||
|
||||
Returns:
|
||||
Error message if revert failed, None if successful
|
||||
"""
|
||||
history_versions = _get_history_versions(skill_name)
|
||||
if not history_versions:
|
||||
return "No history versions available to revert to."
|
||||
|
||||
if version >= len(history_versions):
|
||||
return f"History version {version} not found (only {len(history_versions)} versions available)."
|
||||
|
||||
target_version = history_versions[version]
|
||||
|
||||
try:
|
||||
content = target_version.read_text(encoding="utf-8")
|
||||
_atomic_write_text(skill_md_path, content)
|
||||
return None
|
||||
except Exception as exc:
|
||||
return f"Failed to revert to history version: {exc}"
|
||||
|
||||
def _validate_written_file(file_path: Path, is_skill_md: bool = False) -> Optional[str]:
|
||||
"""Re-read a file from disk and validate it after writing.
|
||||
|
||||
Catches filesystem-level issues (truncation, encoding errors, empty
|
||||
writes) that pre-write validation cannot detect. For SKILL.md files
|
||||
the frontmatter is also re-validated.
|
||||
the frontmatter is also re-validated and linked_files are verified.
|
||||
|
||||
Returns an error message, or *None* if the file looks healthy.
|
||||
"""
|
||||
@@ -341,11 +441,69 @@ def _validate_written_file(file_path: Path, is_skill_md: bool = False) -> Option
|
||||
if len(content) == 0:
|
||||
return "File is empty after write (possible truncation)."
|
||||
|
||||
# Minimum content length check for SKILL.md only (#837)
|
||||
if is_skill_md and len(content) < 100:
|
||||
return f"SKILL.md is too short after write ({len(content)} chars, minimum 100)."
|
||||
|
||||
if is_skill_md:
|
||||
err = _validate_frontmatter(content)
|
||||
if err:
|
||||
return f"Post-write validation failed: {err}"
|
||||
|
||||
# Verify linked_files exist (#837)
|
||||
err = _validate_linked_files(content, file_path.parent)
|
||||
if err:
|
||||
return f"Post-write validation failed: {err}"
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _validate_linked_files(content: str, skill_dir: Path) -> Optional[str]:
|
||||
"""Validate that all files referenced in linked_files exist.
|
||||
|
||||
Parses the SKILL.md frontmatter and checks that any linked_files
|
||||
entries point to files that actually exist in the skill directory.
|
||||
|
||||
Returns an error message, or *None* if all linked files exist.
|
||||
"""
|
||||
if not content.startswith("---"):
|
||||
return None
|
||||
|
||||
end_match = re.search(r'\n---\s*\n', content[3:])
|
||||
if not end_match:
|
||||
return None
|
||||
|
||||
yaml_content = content[3:end_match.start() + 3]
|
||||
try:
|
||||
parsed = yaml.safe_load(yaml_content)
|
||||
except yaml.YAMLError:
|
||||
return None
|
||||
|
||||
if not isinstance(parsed, dict):
|
||||
return None
|
||||
|
||||
linked_files = parsed.get("linked_files", [])
|
||||
if not linked_files:
|
||||
return None
|
||||
|
||||
missing = []
|
||||
for lf in linked_files:
|
||||
if isinstance(lf, dict):
|
||||
file_ref = lf.get("file") or lf.get("path", "")
|
||||
elif isinstance(lf, str):
|
||||
file_ref = lf
|
||||
else:
|
||||
continue
|
||||
|
||||
if file_ref:
|
||||
# Resolve relative to skill directory
|
||||
target = skill_dir / file_ref
|
||||
if not target.exists():
|
||||
missing.append(file_ref)
|
||||
|
||||
if missing:
|
||||
return f"Linked files missing: {', '.join(missing)}"
|
||||
|
||||
return None
|
||||
|
||||
|
||||
@@ -483,6 +641,13 @@ def _edit_skill(name: str, content: str) -> Dict[str, Any]:
|
||||
|
||||
skill_md = existing["path"] / "SKILL.md"
|
||||
|
||||
# Save original to history before modification (#837)
|
||||
try:
|
||||
original_content = skill_md.read_text(encoding="utf-8")
|
||||
_save_to_history(name, original_content)
|
||||
except (OSError, UnicodeDecodeError):
|
||||
pass # If we can't read original, proceed without history
|
||||
|
||||
# --- Transactional write-validate-commit-or-rollback ---
|
||||
backup_path = _backup_skill_file(skill_md)
|
||||
_atomic_write_text(skill_md, content)
|
||||
@@ -598,6 +763,14 @@ def _patch_skill(
|
||||
|
||||
is_skill_md = not file_path
|
||||
|
||||
# Save original to history when patching SKILL.md (#837)
|
||||
if is_skill_md:
|
||||
try:
|
||||
original_content = target.read_text(encoding="utf-8")
|
||||
_save_to_history(name, original_content)
|
||||
except (OSError, UnicodeDecodeError):
|
||||
pass
|
||||
|
||||
# --- Transactional write-validate-commit-or-rollback ---
|
||||
backup_path = _backup_skill_file(target)
|
||||
_atomic_write_text(target, new_content)
|
||||
|
||||
Reference in New Issue
Block a user