Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
eb16a6671e | ||
|
|
18998b60c3 |
@@ -1,281 +0,0 @@
|
||||
"""
|
||||
Hallucination Metrics — Persistent logging and alerting for tool hallucinations.
|
||||
|
||||
Logs tool hallucination events to a JSONL file and provides aggregated statistics.
|
||||
Integrates with the poka-yoke validation system.
|
||||
|
||||
Usage:
|
||||
from agent.hallucination_metrics import log_hallucination_event, get_hallucination_stats
|
||||
log_hallucination_event("invalid_tool", "unknown_tool", "suggested_correct_name")
|
||||
stats = get_hallucination_stats()
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from threading import Lock
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
from hermes_constants import get_hermes_home
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Constants
|
||||
METRICS_FILE_NAME = "hallucination_metrics.jsonl"
|
||||
ALERT_THRESHOLD = 10 # Alert after this many consecutive failures for a tool
|
||||
SESSION_WINDOW_HOURS = 24 # Consider events within this window as "session"
|
||||
|
||||
# In-memory cache for fast lookups
|
||||
_cache: Dict[str, Any] = {"events": [], "last_flush": 0, "session_counts": defaultdict(int)}
|
||||
_cache_lock = Lock()
|
||||
|
||||
|
||||
def _get_metrics_path() -> Path:
|
||||
"""Return the path to the hallucination metrics file."""
|
||||
return get_hermes_home() / "metrics" / METRICS_FILE_NAME
|
||||
|
||||
|
||||
def _ensure_metrics_dir():
|
||||
"""Ensure the metrics directory exists."""
|
||||
metrics_dir = _get_metrics_path().parent
|
||||
metrics_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
def log_hallucination_event(
|
||||
tool_name: str,
|
||||
error_type: str = "unknown_tool",
|
||||
suggested_name: Optional[str] = None,
|
||||
validation_messages: Optional[List[str]] = None,
|
||||
session_id: Optional[str] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Log a hallucination event to the metrics file.
|
||||
|
||||
Args:
|
||||
tool_name: The hallucinated tool name
|
||||
error_type: Type of error (unknown_tool, invalid_params, etc.)
|
||||
suggested_name: Suggested correction if available
|
||||
validation_messages: List of validation error messages
|
||||
session_id: Optional session identifier for grouping
|
||||
|
||||
Returns:
|
||||
The logged event dict with additional metadata
|
||||
"""
|
||||
event = {
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"tool_name": tool_name,
|
||||
"error_type": error_type,
|
||||
"suggested_name": suggested_name,
|
||||
"validation_messages": validation_messages or [],
|
||||
"session_id": session_id,
|
||||
"unix_timestamp": time.time(),
|
||||
}
|
||||
|
||||
# Write to file
|
||||
_ensure_metrics_dir()
|
||||
metrics_path = _get_metrics_path()
|
||||
|
||||
try:
|
||||
with open(metrics_path, "a", encoding="utf-8") as f:
|
||||
f.write(json.dumps(event, ensure_ascii=False) + "\n")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to write hallucination event: {e}")
|
||||
|
||||
# Update in-memory cache
|
||||
with _cache_lock:
|
||||
_cache["events"].append(event)
|
||||
_cache["session_counts"][tool_name] += 1
|
||||
session_count = _cache["session_counts"][tool_name]
|
||||
|
||||
# Check alert threshold
|
||||
if session_count >= ALERT_THRESHOLD:
|
||||
logger.warning(
|
||||
f"HALLUCINATION ALERT: Tool '{tool_name}' has failed {session_count} times "
|
||||
f"in this session (threshold: {ALERT_THRESHOLD}). "
|
||||
f"This may indicate a persistent hallucination pattern."
|
||||
)
|
||||
|
||||
return event
|
||||
|
||||
|
||||
def _load_events_from_file() -> List[Dict[str, Any]]:
|
||||
"""Load all events from the metrics file."""
|
||||
metrics_path = _get_metrics_path()
|
||||
if not metrics_path.exists():
|
||||
return []
|
||||
|
||||
events = []
|
||||
try:
|
||||
with open(metrics_path, "r", encoding="utf-8") as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if line:
|
||||
try:
|
||||
events.append(json.loads(line))
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to load hallucination events: {e}")
|
||||
|
||||
return events
|
||||
|
||||
|
||||
def get_hallucination_stats(
|
||||
hours: Optional[int] = None,
|
||||
tool_name: Optional[str] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Get aggregated hallucination statistics.
|
||||
|
||||
Args:
|
||||
hours: Only consider events from the last N hours (None = all time)
|
||||
tool_name: Filter to specific tool name (None = all tools)
|
||||
|
||||
Returns:
|
||||
Dict with aggregated statistics
|
||||
"""
|
||||
events = _load_events_from_file()
|
||||
|
||||
# Filter by time window
|
||||
if hours is not None:
|
||||
cutoff = time.time() - (hours * 3600)
|
||||
events = [e for e in events if e.get("unix_timestamp", 0) >= cutoff]
|
||||
|
||||
# Filter by tool name
|
||||
if tool_name is not None:
|
||||
events = [e for e in events if e.get("tool_name") == tool_name]
|
||||
|
||||
# Aggregate by tool
|
||||
tool_counts: Dict[str, Dict[str, Any]] = defaultdict(
|
||||
lambda: {"count": 0, "suggested_names": [], "error_types": defaultdict(int)}
|
||||
)
|
||||
|
||||
for event in events:
|
||||
name = event.get("tool_name", "unknown")
|
||||
tool_counts[name]["count"] += 1
|
||||
if event.get("suggested_name"):
|
||||
tool_counts[name]["suggested_names"].append(event["suggested_name"])
|
||||
if event.get("error_type"):
|
||||
tool_counts[name]["error_types"][event["error_type"]] += 1
|
||||
|
||||
# Find most common suggestions per tool
|
||||
for name, data in tool_counts.items():
|
||||
suggestions = data["suggested_names"]
|
||||
if suggestions:
|
||||
from collections import Counter
|
||||
most_common = Counter(suggestions).most_common(1)[0]
|
||||
data["most_common_suggestion"] = most_common[0]
|
||||
data["suggestion_count"] = most_common[1]
|
||||
del data["suggested_names"] # Remove raw list from output
|
||||
|
||||
# Calculate time-based stats
|
||||
if events:
|
||||
first_event = min(e.get("unix_timestamp", 0) for e in events)
|
||||
last_event = max(e.get("unix_timestamp", 0) for e in events)
|
||||
time_span_hours = (last_event - first_event) / 3600 if first_event != last_event else 0
|
||||
else:
|
||||
time_span_hours = 0
|
||||
|
||||
# Error type breakdown
|
||||
all_error_types: Dict[str, int] = defaultdict(int)
|
||||
for event in events:
|
||||
et = event.get("error_type", "unknown")
|
||||
all_error_types[et] += 1
|
||||
|
||||
return {
|
||||
"total_events": len(events),
|
||||
"unique_tools": len(tool_counts),
|
||||
"time_span_hours": round(time_span_hours, 1),
|
||||
"top_hallucinated_tools": sorted(
|
||||
[{"tool": k, **v} for k, v in tool_counts.items()],
|
||||
key=lambda x: -x["count"]
|
||||
)[:20],
|
||||
"error_type_breakdown": dict(all_error_types),
|
||||
"alert_threshold": ALERT_THRESHOLD,
|
||||
"session_window_hours": SESSION_WINDOW_HOURS,
|
||||
}
|
||||
|
||||
|
||||
def get_most_hallucinated_tools(n: int = 10) -> List[Tuple[str, int]]:
|
||||
"""Get the top N most frequently hallucinated tool names."""
|
||||
stats = get_hallucination_stats()
|
||||
tools = stats.get("top_hallucinated_tools", [])
|
||||
return [(t["tool"], t["count"]) for t in tools[:n]]
|
||||
|
||||
|
||||
def clear_metrics(older_than_hours: Optional[int] = None) -> int:
|
||||
"""
|
||||
Clear hallucination metrics.
|
||||
|
||||
Args:
|
||||
older_than_hours: Only clear events older than this many hours (None = clear all)
|
||||
|
||||
Returns:
|
||||
Number of events removed
|
||||
"""
|
||||
metrics_path = _get_metrics_path()
|
||||
if not metrics_path.exists():
|
||||
return 0
|
||||
|
||||
if older_than_hours is None:
|
||||
# Clear all
|
||||
count = len(_load_events_from_file())
|
||||
metrics_path.unlink(missing_ok=True)
|
||||
with _cache_lock:
|
||||
_cache["events"].clear()
|
||||
_cache["session_counts"].clear()
|
||||
return count
|
||||
|
||||
# Clear only old events
|
||||
cutoff = time.time() - (older_than_hours * 3600)
|
||||
events = _load_events_from_file()
|
||||
keep = [e for e in events if e.get("unix_timestamp", 0) >= cutoff]
|
||||
removed = len(events) - len(keep)
|
||||
|
||||
# Rewrite file
|
||||
_ensure_metrics_dir()
|
||||
with open(metrics_path, "w", encoding="utf-8") as f:
|
||||
for event in keep:
|
||||
f.write(json.dumps(event, ensure_ascii=False) + "\n")
|
||||
|
||||
return removed
|
||||
|
||||
|
||||
def format_stats_for_display(stats: Dict[str, Any]) -> str:
|
||||
"""Format statistics as a human-readable string."""
|
||||
lines = [
|
||||
"=== Hallucination Metrics ===",
|
||||
"",
|
||||
f"Total events: {stats['total_events']}",
|
||||
f"Unique tools hallucinated: {stats['unique_tools']}",
|
||||
f"Time span: {stats['time_span_hours']:.1f} hours",
|
||||
"",
|
||||
"Top Hallucinated Tools:",
|
||||
"-" * 40,
|
||||
]
|
||||
|
||||
for tool in stats.get("top_hallucinated_tools", [])[:10]:
|
||||
lines.append(f" {tool['tool']:<30} {tool['count']:>5} events")
|
||||
if "most_common_suggestion" in tool:
|
||||
lines.append(f" → Suggested: {tool['most_common_suggestion']} ({tool['suggestion_count']}x)")
|
||||
|
||||
if stats.get("error_type_breakdown"):
|
||||
lines.extend([
|
||||
"",
|
||||
"Error Types:",
|
||||
"-" * 40,
|
||||
])
|
||||
for et, count in sorted(stats["error_type_breakdown"].items(), key=lambda x: -x[1]):
|
||||
lines.append(f" {et:<30} {count:>5}")
|
||||
|
||||
lines.extend([
|
||||
"",
|
||||
f"Alert threshold: {stats['alert_threshold']} failures per session",
|
||||
f"Session window: {stats['session_window_hours']} hours",
|
||||
])
|
||||
|
||||
return "\n".join(lines)
|
||||
68
docs/ragflow-integration.md
Normal file
68
docs/ragflow-integration.md
Normal file
@@ -0,0 +1,68 @@
|
||||
# RAGFlow integration
|
||||
|
||||
This repo-side slice adds:
|
||||
|
||||
- `tools/ragflow_tool.py`
|
||||
- `ragflow_ingest(document_url, dataset)`
|
||||
- `ragflow_query(query, dataset, limit=5)`
|
||||
- `scripts/ragflow_bootstrap.py`
|
||||
- fetches the upstream RAGFlow Docker bundle
|
||||
- runs `docker compose --profile cpu up -d` or `gpu`
|
||||
|
||||
## Deployment
|
||||
|
||||
Bootstrap the upstream CPU stack locally:
|
||||
|
||||
```bash
|
||||
python3 scripts/ragflow_bootstrap.py --profile cpu
|
||||
```
|
||||
|
||||
Dry-run only:
|
||||
|
||||
```bash
|
||||
python3 scripts/ragflow_bootstrap.py --profile cpu --dry-run
|
||||
```
|
||||
|
||||
Fetch files without launching Docker:
|
||||
|
||||
```bash
|
||||
python3 scripts/ragflow_bootstrap.py --no-up
|
||||
```
|
||||
|
||||
Default bundle target:
|
||||
|
||||
- `~/.hermes/services/ragflow`
|
||||
|
||||
## Runtime configuration
|
||||
|
||||
Optional environment variables:
|
||||
|
||||
- `RAGFLOW_API_URL` — defaults to `http://localhost:9380`
|
||||
- `RAGFLOW_API_KEY` — Bearer token for authenticated RAGFlow APIs
|
||||
|
||||
## Supported document types
|
||||
|
||||
RAGFlow ingest accepts:
|
||||
|
||||
- PDF: `.pdf`
|
||||
- Word: `.doc`, `.docx`
|
||||
- Presentations: `.ppt`, `.pptx`
|
||||
- Images via OCR: `.png`, `.jpg`, `.jpeg`, `.webp`, `.bmp`, `.tif`, `.tiff`, `.gif`
|
||||
- Text and codebase documents: `.txt`, `.md`, `.rst`, `.html`, `.json`, `.yaml`, `.yml`, `.toml`, `.ini`, `.py`, `.js`, `.ts`, `.tsx`, `.jsx`, `.java`, `.go`, `.rs`, `.c`, `.cpp`, `.h`, `.hpp`, `.rb`, `.php`, `.sql`, `.sh`
|
||||
|
||||
## Example tool usage
|
||||
|
||||
```json
|
||||
{"document_url":"https://arxiv.org/pdf/1706.03762.pdf","dataset":"research-papers"}
|
||||
```
|
||||
|
||||
```json
|
||||
{"query":"What does the paper say about attention heads?","dataset":"research-papers","limit":5}
|
||||
```
|
||||
|
||||
## Use cases
|
||||
|
||||
- research papers
|
||||
- technical documentation
|
||||
- OCR-heavy image workflows
|
||||
- ingested codebases and architecture docs
|
||||
@@ -18,7 +18,6 @@ Usage:
|
||||
hermes cron list # List cron jobs
|
||||
hermes cron status # Check if cron scheduler is running
|
||||
hermes doctor # Check configuration and dependencies
|
||||
hermes hallucination-stats # Show tool hallucination statistics
|
||||
hermes honcho setup # Configure Honcho AI memory integration
|
||||
hermes honcho status # Show Honcho config and connection status
|
||||
hermes honcho sessions # List directory → session name mappings
|
||||
@@ -2805,17 +2804,6 @@ def cmd_doctor(args):
|
||||
run_doctor(args)
|
||||
|
||||
|
||||
def cmd_hallucination_stats(args):
|
||||
"""Show tool hallucination statistics."""
|
||||
from agent.hallucination_metrics import get_hallucination_stats, format_stats_for_display, clear_metrics
|
||||
if getattr(args, 'clear', False):
|
||||
removed = clear_metrics(older_than_hours=getattr(args, 'older_than', None))
|
||||
print(f"Cleared {removed} hallucination events.")
|
||||
return
|
||||
stats = get_hallucination_stats(hours=getattr(args, 'hours', None))
|
||||
print(format_stats_for_display(stats))
|
||||
|
||||
|
||||
def cmd_dump(args):
|
||||
"""Dump setup summary for support/debugging."""
|
||||
from hermes_cli.dump import run_dump
|
||||
@@ -5053,33 +5041,6 @@ For more help on a command:
|
||||
)
|
||||
doctor_parser.set_defaults(func=cmd_doctor)
|
||||
|
||||
# =========================================================================
|
||||
# hallucination-stats command
|
||||
# =========================================================================
|
||||
hallucination_parser = subparsers.add_parser(
|
||||
"hallucination-stats",
|
||||
help="Show tool hallucination statistics",
|
||||
description="View aggregated tool hallucination metrics from poka-yoke validation"
|
||||
)
|
||||
hallucination_parser.add_argument(
|
||||
"--hours",
|
||||
type=int,
|
||||
default=None,
|
||||
help="Only show events from the last N hours"
|
||||
)
|
||||
hallucination_parser.add_argument(
|
||||
"--clear",
|
||||
action="store_true",
|
||||
help="Clear all hallucination metrics"
|
||||
)
|
||||
hallucination_parser.add_argument(
|
||||
"--older-than",
|
||||
type=int,
|
||||
default=None,
|
||||
help="When clearing, only remove events older than N hours"
|
||||
)
|
||||
hallucination_parser.set_defaults(func=cmd_hallucination_stats)
|
||||
|
||||
# =========================================================================
|
||||
# dump command
|
||||
# =========================================================================
|
||||
|
||||
79
scripts/ragflow_bootstrap.py
Normal file
79
scripts/ragflow_bootstrap.py
Normal file
@@ -0,0 +1,79 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Bootstrap an upstream RAGFlow Docker bundle for Hermes.
|
||||
|
||||
This script fetches the upstream RAGFlow docker bundle into a local directory
|
||||
so operators can run `docker compose --profile cpu up -d` (or `gpu`) without
|
||||
manually assembling the required files.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import subprocess
|
||||
import urllib.request
|
||||
from pathlib import Path
|
||||
|
||||
UPSTREAM_BASE = "https://raw.githubusercontent.com/infiniflow/ragflow/main/docker"
|
||||
UPSTREAM_FILES = {
|
||||
"docker-compose.yml": f"{UPSTREAM_BASE}/docker-compose.yml",
|
||||
"docker-compose-base.yml": f"{UPSTREAM_BASE}/docker-compose-base.yml",
|
||||
".env": f"{UPSTREAM_BASE}/.env",
|
||||
"service_conf.yaml.template": f"{UPSTREAM_BASE}/service_conf.yaml.template",
|
||||
"entrypoint.sh": f"{UPSTREAM_BASE}/entrypoint.sh",
|
||||
}
|
||||
|
||||
|
||||
def materialize_bundle(target_dir: str | Path, overwrite: bool = False) -> list[Path]:
|
||||
target = Path(target_dir).expanduser()
|
||||
target.mkdir(parents=True, exist_ok=True)
|
||||
written: list[Path] = []
|
||||
for name, url in UPSTREAM_FILES.items():
|
||||
dest = target / name
|
||||
if dest.exists() and not overwrite:
|
||||
written.append(dest)
|
||||
continue
|
||||
with urllib.request.urlopen(url, timeout=60) as response:
|
||||
dest.write_bytes(response.read())
|
||||
if name == "entrypoint.sh":
|
||||
dest.chmod(0o755)
|
||||
written.append(dest)
|
||||
return written
|
||||
|
||||
|
||||
def build_compose_command(target_dir: str | Path, profile: str = "cpu") -> list[str]:
|
||||
return ["docker", "compose", "--profile", profile, "up", "-d"]
|
||||
|
||||
|
||||
def run_compose(target_dir: str | Path, profile: str = "cpu", dry_run: bool = False) -> dict:
|
||||
target = Path(target_dir).expanduser()
|
||||
command = build_compose_command(target, profile=profile)
|
||||
if dry_run:
|
||||
return {"target_dir": str(target), "command": command, "executed": False}
|
||||
subprocess.run(command, cwd=target, check=True)
|
||||
return {"target_dir": str(target), "command": command, "executed": True}
|
||||
|
||||
|
||||
def main(argv: list[str] | None = None) -> int:
|
||||
parser = argparse.ArgumentParser(description="Fetch and launch the upstream RAGFlow Docker bundle")
|
||||
parser.add_argument("--target-dir", default=str(Path.home() / ".hermes" / "services" / "ragflow"))
|
||||
parser.add_argument("--profile", choices=["cpu", "gpu"], default="cpu")
|
||||
parser.add_argument("--overwrite", action="store_true")
|
||||
parser.add_argument("--dry-run", action="store_true")
|
||||
parser.add_argument("--no-up", action="store_true", help="Only fetch bundle files; do not run docker compose")
|
||||
args = parser.parse_args(argv)
|
||||
|
||||
written = materialize_bundle(args.target_dir, overwrite=args.overwrite)
|
||||
print(f"Fetched {len(written)} RAGFlow docker files into {Path(args.target_dir).expanduser()}")
|
||||
if args.no_up:
|
||||
return 0
|
||||
result = run_compose(args.target_dir, profile=args.profile, dry_run=args.dry_run)
|
||||
print("Command:", " ".join(result["command"]))
|
||||
if result["executed"]:
|
||||
print("RAGFlow docker stack launch requested.")
|
||||
else:
|
||||
print("Dry run only; docker compose not executed.")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
@@ -1,171 +0,0 @@
|
||||
"""Tests for agent/hallucination_metrics.py — #853."""
|
||||
|
||||
import json
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from agent.hallucination_metrics import (
|
||||
log_hallucination_event,
|
||||
get_hallucination_stats,
|
||||
get_most_hallucinated_tools,
|
||||
clear_metrics,
|
||||
format_stats_for_display,
|
||||
_get_metrics_path,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def isolated_metrics(monkeypatch, tmp_path):
|
||||
"""Redirect metrics to a temp file for every test."""
|
||||
metrics_dir = tmp_path / "test_hermes_home" / "metrics"
|
||||
metrics_dir.mkdir(parents=True)
|
||||
metrics_file = metrics_dir / "hallucination_metrics.jsonl"
|
||||
|
||||
# Patch the get_hermes_home function to return our temp path
|
||||
def mock_get_hermes_home():
|
||||
return tmp_path / "test_hermes_home"
|
||||
|
||||
monkeypatch.setattr(
|
||||
"agent.hallucination_metrics.get_hermes_home",
|
||||
mock_get_hermes_home,
|
||||
)
|
||||
|
||||
# Also clear cache
|
||||
from agent.hallucination_metrics import _cache, _cache_lock
|
||||
with _cache_lock:
|
||||
_cache["events"].clear()
|
||||
_cache["session_counts"].clear()
|
||||
yield
|
||||
clear_metrics()
|
||||
|
||||
|
||||
class TestLogEvent:
|
||||
def test_log_event_returns_dict(self):
|
||||
event = log_hallucination_event("fake_tool", "unknown_tool", "real_tool")
|
||||
assert event["tool_name"] == "fake_tool"
|
||||
assert event["error_type"] == "unknown_tool"
|
||||
assert event["suggested_name"] == "real_tool"
|
||||
assert "timestamp" in event
|
||||
assert "unix_timestamp" in event
|
||||
|
||||
def test_log_event_persists_to_file(self):
|
||||
log_hallucination_event("tool_a", "unknown_tool")
|
||||
log_hallucination_event("tool_b", "invalid_params")
|
||||
|
||||
path = _get_metrics_path()
|
||||
assert path.exists()
|
||||
lines = path.read_text().strip().splitlines()
|
||||
assert len(lines) == 2
|
||||
|
||||
data = [json.loads(line) for line in lines]
|
||||
assert data[0]["tool_name"] == "tool_a"
|
||||
assert data[1]["tool_name"] == "tool_b"
|
||||
|
||||
|
||||
class TestGetStats:
|
||||
def test_empty_stats(self):
|
||||
stats = get_hallucination_stats()
|
||||
assert stats["total_events"] == 0
|
||||
assert stats["unique_tools"] == 0
|
||||
|
||||
def test_stats_by_tool(self):
|
||||
log_hallucination_event("tool_x", "unknown_tool", "tool_y")
|
||||
log_hallucination_event("tool_x", "unknown_tool", "tool_y")
|
||||
log_hallucination_event("tool_z", "invalid_params")
|
||||
|
||||
stats = get_hallucination_stats()
|
||||
assert stats["total_events"] == 3
|
||||
assert stats["unique_tools"] == 2
|
||||
|
||||
top = stats["top_hallucinated_tools"]
|
||||
assert len(top) == 2
|
||||
assert top[0]["tool"] == "tool_x"
|
||||
assert top[0]["count"] == 2
|
||||
assert top[1]["tool"] == "tool_z"
|
||||
assert top[1]["count"] == 1
|
||||
|
||||
def test_stats_hours_filter(self):
|
||||
# Log old event by faking timestamp
|
||||
old_event = {
|
||||
"timestamp": "2026-01-01T00:00:00+00:00",
|
||||
"tool_name": "old_tool",
|
||||
"error_type": "unknown_tool",
|
||||
"unix_timestamp": time.time() - 48 * 3600,
|
||||
}
|
||||
path = _get_metrics_path()
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(path, "w") as f:
|
||||
f.write(json.dumps(old_event) + "\n")
|
||||
|
||||
log_hallucination_event("new_tool", "unknown_tool")
|
||||
|
||||
stats = get_hallucination_stats(hours=24)
|
||||
assert stats["total_events"] == 1
|
||||
assert stats["top_hallucinated_tools"][0]["tool"] == "new_tool"
|
||||
|
||||
def test_error_type_breakdown(self):
|
||||
log_hallucination_event("t1", "unknown_tool")
|
||||
log_hallucination_event("t2", "invalid_params")
|
||||
log_hallucination_event("t3", "unknown_tool")
|
||||
|
||||
stats = get_hallucination_stats()
|
||||
breakdown = stats["error_type_breakdown"]
|
||||
assert breakdown["unknown_tool"] == 2
|
||||
assert breakdown["invalid_params"] == 1
|
||||
|
||||
|
||||
class TestGetMostHallucinated:
|
||||
def test_top_tools(self):
|
||||
for _ in range(5):
|
||||
log_hallucination_event("common_tool", "unknown_tool")
|
||||
for _ in range(2):
|
||||
log_hallucination_event("rare_tool", "unknown_tool")
|
||||
|
||||
tools = get_most_hallucinated_tools(n=2)
|
||||
assert tools[0] == ("common_tool", 5)
|
||||
assert tools[1] == ("rare_tool", 2)
|
||||
|
||||
|
||||
class TestClearMetrics:
|
||||
def test_clear_all(self):
|
||||
log_hallucination_event("t1", "unknown_tool")
|
||||
removed = clear_metrics()
|
||||
assert removed == 1
|
||||
assert _get_metrics_path().exists() is False
|
||||
|
||||
def test_clear_older_than(self):
|
||||
path = _get_metrics_path()
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
old = {"tool_name": "old", "unix_timestamp": time.time() - 48 * 3600}
|
||||
new = {"tool_name": "new", "unix_timestamp": time.time()}
|
||||
with open(path, "w") as f:
|
||||
f.write(json.dumps(old) + "\n")
|
||||
f.write(json.dumps(new) + "\n")
|
||||
|
||||
removed = clear_metrics(older_than_hours=24)
|
||||
assert removed == 1
|
||||
|
||||
remaining = get_hallucination_stats()
|
||||
assert remaining["total_events"] == 1
|
||||
|
||||
|
||||
class TestFormatDisplay:
|
||||
def test_format_includes_headers(self):
|
||||
log_hallucination_event("bad_tool", "unknown_tool", "good_tool")
|
||||
stats = get_hallucination_stats()
|
||||
text = format_stats_for_display(stats)
|
||||
assert "Hallucination Metrics" in text
|
||||
assert "bad_tool" in text
|
||||
assert "Total events: 1" in text
|
||||
|
||||
|
||||
class TestAlertThreshold:
|
||||
def test_alert_after_threshold(self, monkeypatch, caplog):
|
||||
monkeypatch.setattr("agent.hallucination_metrics.ALERT_THRESHOLD", 3)
|
||||
for i in range(4):
|
||||
log_hallucination_event("persistent_tool", "unknown_tool")
|
||||
assert "HALLUCINATION ALERT" in caplog.text
|
||||
assert "persistent_tool" in caplog.text
|
||||
43
tests/test_ragflow_bootstrap.py
Normal file
43
tests/test_ragflow_bootstrap.py
Normal file
@@ -0,0 +1,43 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib.util
|
||||
import io
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
|
||||
ROOT = Path(__file__).resolve().parent.parent
|
||||
SCRIPT_PATH = ROOT / "scripts" / "ragflow_bootstrap.py"
|
||||
|
||||
|
||||
def _load_module():
|
||||
spec = importlib.util.spec_from_file_location("ragflow_bootstrap", SCRIPT_PATH)
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
assert spec.loader is not None
|
||||
spec.loader.exec_module(module)
|
||||
return module
|
||||
|
||||
|
||||
def test_materialize_bundle_downloads_required_upstream_artifacts(tmp_path):
|
||||
module = _load_module()
|
||||
|
||||
def fake_urlopen(url, timeout=0):
|
||||
name = url.rsplit("/", 1)[-1]
|
||||
return io.BytesIO(f"# fetched {name}\n".encode())
|
||||
|
||||
with patch.object(module.urllib.request, "urlopen", side_effect=fake_urlopen):
|
||||
written = module.materialize_bundle(tmp_path)
|
||||
|
||||
assert (tmp_path / "docker-compose.yml").exists()
|
||||
assert (tmp_path / "docker-compose-base.yml").exists()
|
||||
assert (tmp_path / ".env").exists()
|
||||
assert any(path.name == "entrypoint.sh" for path in written)
|
||||
|
||||
|
||||
def test_build_compose_command_respects_profile_and_directory(tmp_path):
|
||||
module = _load_module()
|
||||
|
||||
command = module.build_compose_command(tmp_path, profile="gpu")
|
||||
|
||||
assert command[:4] == ["docker", "compose", "--profile", "gpu"]
|
||||
assert command[-2:] == ["up", "-d"]
|
||||
122
tests/tools/test_ragflow_tool.py
Normal file
122
tests/tools/test_ragflow_tool.py
Normal file
@@ -0,0 +1,122 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
from tools.registry import registry
|
||||
|
||||
|
||||
class _Response:
|
||||
def __init__(self, payload: dict, status_code: int = 200):
|
||||
self._payload = payload
|
||||
self.status_code = status_code
|
||||
self.text = json.dumps(payload)
|
||||
|
||||
def json(self):
|
||||
return self._payload
|
||||
|
||||
def raise_for_status(self):
|
||||
if self.status_code >= 400:
|
||||
raise RuntimeError(f"HTTP {self.status_code}")
|
||||
|
||||
|
||||
def _reload_module():
|
||||
registry.deregister("ragflow_ingest")
|
||||
registry.deregister("ragflow_query")
|
||||
sys.modules.pop("tools.ragflow_tool", None)
|
||||
module = importlib.import_module("tools.ragflow_tool")
|
||||
return importlib.reload(module)
|
||||
|
||||
|
||||
def test_ragflow_tools_register_and_support_document_formats():
|
||||
module = _reload_module()
|
||||
|
||||
assert registry.get_entry("ragflow_ingest") is not None
|
||||
assert registry.get_entry("ragflow_query") is not None
|
||||
assert ".pdf" in module.SUPPORTED_EXTENSIONS
|
||||
assert ".docx" in module.SUPPORTED_EXTENSIONS
|
||||
assert ".png" in module.SUPPORTED_EXTENSIONS
|
||||
assert ".md" in module.SUPPORTED_EXTENSIONS
|
||||
|
||||
|
||||
def test_ragflow_ingest_creates_dataset_uploads_and_starts_parse(tmp_path):
|
||||
module = _reload_module()
|
||||
document = tmp_path / "paper.pdf"
|
||||
document.write_bytes(b"%PDF-1.7\n")
|
||||
calls: list[tuple[str, str, dict | None, dict | None]] = []
|
||||
|
||||
def fake_request(method, url, *, headers=None, params=None, json=None, files=None, timeout=None):
|
||||
calls.append((method, url, params, json))
|
||||
if method == "GET" and url.endswith("/api/v1/datasets"):
|
||||
return _Response({"code": 0, "data": []})
|
||||
if method == "POST" and url.endswith("/api/v1/datasets"):
|
||||
assert json["name"] == "research-papers"
|
||||
assert json["chunk_method"] == "paper"
|
||||
return _Response({"code": 0, "data": {"id": "ds-1", "name": "research-papers"}})
|
||||
if method == "POST" and url.endswith("/api/v1/datasets/ds-1/documents"):
|
||||
assert files and files[0][0] == "file"
|
||||
return _Response({"code": 0, "data": [{"id": "doc-1", "name": "paper.pdf"}]})
|
||||
if method == "POST" and url.endswith("/api/v1/datasets/ds-1/chunks"):
|
||||
assert json == {"document_ids": ["doc-1"]}
|
||||
return _Response({"code": 0})
|
||||
raise AssertionError(f"Unexpected request: {method} {url}")
|
||||
|
||||
with patch("tools.ragflow_tool.requests.request", side_effect=fake_request):
|
||||
result = json.loads(module.ragflow_ingest_tool(str(document), dataset="research-papers"))
|
||||
|
||||
assert result["dataset_id"] == "ds-1"
|
||||
assert result["document_ids"] == ["doc-1"]
|
||||
assert result["parse_started"] is True
|
||||
assert result["chunk_method"] == "paper"
|
||||
assert calls[0][0] == "GET"
|
||||
|
||||
|
||||
def test_ragflow_query_retrieves_chunks_for_named_dataset():
|
||||
module = _reload_module()
|
||||
|
||||
def fake_request(method, url, *, headers=None, params=None, json=None, files=None, timeout=None):
|
||||
if method == "GET" and url.endswith("/api/v1/datasets"):
|
||||
assert params == {"name": "tech-docs"}
|
||||
return _Response({"code": 0, "data": [{"id": "ds-9", "name": "tech-docs"}]})
|
||||
if method == "POST" and url.endswith("/api/v1/retrieval"):
|
||||
assert json["question"] == "How does parsing work?"
|
||||
assert json["dataset_ids"] == ["ds-9"]
|
||||
assert json["page_size"] == 2
|
||||
return _Response(
|
||||
{
|
||||
"code": 0,
|
||||
"data": {
|
||||
"chunks": [
|
||||
{
|
||||
"content": "Parsing starts by uploading documents.",
|
||||
"document_id": "doc-9",
|
||||
"document_keyword": "guide.md",
|
||||
"similarity": 0.98,
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
},
|
||||
}
|
||||
)
|
||||
raise AssertionError(f"Unexpected request: {method} {url}")
|
||||
|
||||
with patch("tools.ragflow_tool.requests.request", side_effect=fake_request):
|
||||
result = json.loads(module.ragflow_query_tool("How does parsing work?", "tech-docs", limit=2))
|
||||
|
||||
assert result["dataset_id"] == "ds-9"
|
||||
assert result["total"] == 1
|
||||
assert result["chunks"][0]["content"] == "Parsing starts by uploading documents."
|
||||
|
||||
|
||||
def test_ragflow_ingest_rejects_unsupported_document_types(tmp_path):
|
||||
module = _reload_module()
|
||||
document = tmp_path / "binary.exe"
|
||||
document.write_bytes(b"MZ")
|
||||
|
||||
result = json.loads(module.ragflow_ingest_tool(str(document), dataset="ignored"))
|
||||
|
||||
assert "error" in result
|
||||
assert "Unsupported document type" in result["error"]
|
||||
344
tools/ragflow_tool.py
Normal file
344
tools/ragflow_tool.py
Normal file
@@ -0,0 +1,344 @@
|
||||
#!/usr/bin/env python3
|
||||
"""RAGFlow tool integration for document understanding.
|
||||
|
||||
Provides two tools:
|
||||
- ragflow_ingest(document_url, dataset): upload and parse a document into RAGFlow
|
||||
- ragflow_query(query, dataset): retrieve relevant chunks from a dataset
|
||||
|
||||
Default deployment target is a local RAGFlow server on http://localhost:9380.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import mimetypes
|
||||
import os
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import requests
|
||||
|
||||
from tools.registry import registry, tool_error, tool_result
|
||||
|
||||
RAGFLOW_INGEST_SCHEMA = {
|
||||
"name": "ragflow_ingest",
|
||||
"description": (
|
||||
"Upload a document into a RAGFlow dataset, creating the dataset if needed, "
|
||||
"then trigger parsing so Hermes can query the content later. Supports PDF, "
|
||||
"Word, images via OCR, plus text and code documents."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"document_url": {
|
||||
"type": "string",
|
||||
"description": "HTTP(S) URL, file:// URL, or local filesystem path to the document.",
|
||||
},
|
||||
"dataset": {
|
||||
"type": "string",
|
||||
"description": "Dataset name or id to ingest into. Created automatically when absent.",
|
||||
},
|
||||
},
|
||||
"required": ["document_url", "dataset"],
|
||||
},
|
||||
}
|
||||
|
||||
RAGFLOW_QUERY_SCHEMA = {
|
||||
"name": "ragflow_query",
|
||||
"description": (
|
||||
"Query a RAGFlow dataset for relevant chunks. Useful for research papers, "
|
||||
"technical docs, OCR-processed images, and ingested codebase documents."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "Question or search query to run against RAGFlow.",
|
||||
},
|
||||
"dataset": {
|
||||
"type": "string",
|
||||
"description": "Dataset name or id to search.",
|
||||
},
|
||||
"limit": {
|
||||
"type": "integer",
|
||||
"description": "Maximum number of chunks to return.",
|
||||
"default": 5,
|
||||
"minimum": 1,
|
||||
"maximum": 25,
|
||||
},
|
||||
},
|
||||
"required": ["query", "dataset"],
|
||||
},
|
||||
}
|
||||
|
||||
SUPPORTED_EXTENSIONS = {
|
||||
".pdf": "paper",
|
||||
".doc": "paper",
|
||||
".docx": "paper",
|
||||
".ppt": "presentation",
|
||||
".pptx": "presentation",
|
||||
".png": "picture",
|
||||
".jpg": "picture",
|
||||
".jpeg": "picture",
|
||||
".webp": "picture",
|
||||
".bmp": "picture",
|
||||
".tif": "picture",
|
||||
".tiff": "picture",
|
||||
".gif": "picture",
|
||||
".txt": "naive",
|
||||
".md": "naive",
|
||||
".rst": "naive",
|
||||
".html": "naive",
|
||||
".htm": "naive",
|
||||
".csv": "table",
|
||||
".tsv": "table",
|
||||
".json": "naive",
|
||||
".yaml": "naive",
|
||||
".yml": "naive",
|
||||
".toml": "naive",
|
||||
".ini": "naive",
|
||||
".py": "naive",
|
||||
".js": "naive",
|
||||
".ts": "naive",
|
||||
".tsx": "naive",
|
||||
".jsx": "naive",
|
||||
".java": "naive",
|
||||
".go": "naive",
|
||||
".rs": "naive",
|
||||
".c": "naive",
|
||||
".cc": "naive",
|
||||
".cpp": "naive",
|
||||
".h": "naive",
|
||||
".hpp": "naive",
|
||||
".rb": "naive",
|
||||
".php": "naive",
|
||||
".sql": "naive",
|
||||
".sh": "naive",
|
||||
}
|
||||
|
||||
|
||||
def _ragflow_base_url() -> str:
|
||||
return os.getenv("RAGFLOW_API_URL", "http://localhost:9380").rstrip("/")
|
||||
|
||||
|
||||
def _ragflow_headers(json_body: bool = True) -> dict[str, str]:
|
||||
headers: dict[str, str] = {}
|
||||
api_key = os.getenv("RAGFLOW_API_KEY", "").strip()
|
||||
if api_key:
|
||||
headers["Authorization"] = f"Bearer {api_key}"
|
||||
if json_body:
|
||||
headers["Content-Type"] = "application/json"
|
||||
return headers
|
||||
|
||||
|
||||
def _ragflow_check_requirements() -> bool:
|
||||
return True
|
||||
|
||||
|
||||
def _request_json(method: str, path: str, *, params=None, json_payload=None, files=None) -> dict[str, Any]:
|
||||
response = requests.request(
|
||||
method,
|
||||
f"{_ragflow_base_url()}{path}",
|
||||
headers=_ragflow_headers(json_body=files is None),
|
||||
params=params,
|
||||
json=json_payload,
|
||||
files=files,
|
||||
timeout=120,
|
||||
)
|
||||
response.raise_for_status()
|
||||
payload = response.json()
|
||||
if payload.get("code", 0) != 0:
|
||||
message = payload.get("message") or payload.get("error") or "RAGFlow request failed"
|
||||
raise RuntimeError(message)
|
||||
return payload
|
||||
|
||||
|
||||
def _is_probable_dataset_id(dataset: str) -> bool:
|
||||
compact = dataset.replace("-", "")
|
||||
return len(compact) >= 16 and all(ch.isalnum() for ch in compact)
|
||||
|
||||
|
||||
def _resolve_dataset(dataset: str) -> tuple[str, str] | None:
|
||||
dataset = dataset.strip()
|
||||
if not dataset:
|
||||
return None
|
||||
params = {"id": dataset} if _is_probable_dataset_id(dataset) else {"name": dataset}
|
||||
payload = _request_json("GET", "/api/v1/datasets", params=params)
|
||||
data = payload.get("data") or []
|
||||
if not data:
|
||||
return None
|
||||
match = data[0]
|
||||
return match["id"], match.get("name", dataset)
|
||||
|
||||
|
||||
def _ensure_dataset(dataset: str, chunk_method: str) -> tuple[str, str]:
|
||||
resolved = _resolve_dataset(dataset)
|
||||
if resolved:
|
||||
return resolved
|
||||
payload = _request_json(
|
||||
"POST",
|
||||
"/api/v1/datasets",
|
||||
json_payload={"name": dataset, "chunk_method": chunk_method},
|
||||
)
|
||||
data = payload.get("data") or {}
|
||||
return data["id"], data.get("name", dataset)
|
||||
|
||||
|
||||
def _prepare_document(document_url: str) -> tuple[Path, bool]:
|
||||
parsed = urlparse(document_url)
|
||||
if parsed.scheme in {"http", "https"}:
|
||||
response = requests.get(document_url, timeout=120)
|
||||
response.raise_for_status()
|
||||
suffix = Path(parsed.path).suffix or ".bin"
|
||||
tmp = tempfile.NamedTemporaryFile(delete=False, suffix=suffix)
|
||||
tmp.write(response.content)
|
||||
tmp.flush()
|
||||
tmp.close()
|
||||
return Path(tmp.name), True
|
||||
if parsed.scheme == "file":
|
||||
return Path(parsed.path), False
|
||||
return Path(document_url).expanduser(), False
|
||||
|
||||
|
||||
def _detect_chunk_method(path: Path) -> str:
|
||||
extension = path.suffix.lower()
|
||||
if extension not in SUPPORTED_EXTENSIONS:
|
||||
supported = ", ".join(sorted(SUPPORTED_EXTENSIONS))
|
||||
raise ValueError(f"Unsupported document type '{extension or path.name}'. Supported document types: {supported}")
|
||||
return SUPPORTED_EXTENSIONS[extension]
|
||||
|
||||
|
||||
def _upload_document(dataset_id: str, path: Path) -> list[str]:
|
||||
mime = mimetypes.guess_type(path.name)[0] or "application/octet-stream"
|
||||
with path.open("rb") as handle:
|
||||
payload = _request_json(
|
||||
"POST",
|
||||
f"/api/v1/datasets/{dataset_id}/documents",
|
||||
files=[("file", (path.name, handle, mime))],
|
||||
)
|
||||
documents = payload.get("data") or []
|
||||
ids = [item["id"] for item in documents if item.get("id")]
|
||||
if not ids:
|
||||
raise RuntimeError("RAGFlow upload did not return any document ids")
|
||||
return ids
|
||||
|
||||
|
||||
def ragflow_ingest_tool(document_url: str, dataset: str) -> str:
|
||||
local_path = None
|
||||
should_cleanup = False
|
||||
try:
|
||||
local_path, should_cleanup = _prepare_document(document_url)
|
||||
if not local_path.exists():
|
||||
return tool_error(f"Document not found: {document_url}")
|
||||
chunk_method = _detect_chunk_method(local_path)
|
||||
dataset_id, dataset_name = _ensure_dataset(dataset, chunk_method)
|
||||
document_ids = _upload_document(dataset_id, local_path)
|
||||
_request_json(
|
||||
"POST",
|
||||
f"/api/v1/datasets/{dataset_id}/chunks",
|
||||
json_payload={"document_ids": document_ids},
|
||||
)
|
||||
return tool_result(
|
||||
success=True,
|
||||
dataset_id=dataset_id,
|
||||
dataset_name=dataset_name,
|
||||
document_ids=document_ids,
|
||||
parse_started=True,
|
||||
chunk_method=chunk_method,
|
||||
source=document_url,
|
||||
filename=local_path.name,
|
||||
)
|
||||
except ValueError as exc:
|
||||
return tool_error(str(exc))
|
||||
except Exception as exc:
|
||||
return tool_error(f"RAGFlow ingest failed: {exc}")
|
||||
finally:
|
||||
if should_cleanup and local_path is not None:
|
||||
try:
|
||||
local_path.unlink(missing_ok=True)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def _normalize_chunks(chunks: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
||||
normalized = []
|
||||
for chunk in chunks:
|
||||
normalized.append(
|
||||
{
|
||||
"content": chunk.get("content", ""),
|
||||
"document_id": chunk.get("document_id", ""),
|
||||
"document_name": chunk.get("document_keyword", ""),
|
||||
"similarity": chunk.get("similarity"),
|
||||
"highlight": chunk.get("highlight", ""),
|
||||
}
|
||||
)
|
||||
return normalized
|
||||
|
||||
|
||||
def ragflow_query_tool(query: str, dataset: str, limit: int = 5) -> str:
|
||||
try:
|
||||
resolved = _resolve_dataset(dataset)
|
||||
if not resolved:
|
||||
return tool_error(f"RAGFlow dataset not found: {dataset}")
|
||||
dataset_id, dataset_name = resolved
|
||||
payload = _request_json(
|
||||
"POST",
|
||||
"/api/v1/retrieval",
|
||||
json_payload={
|
||||
"question": query,
|
||||
"dataset_ids": [dataset_id],
|
||||
"page_size": max(1, min(int(limit), 25)),
|
||||
"highlight": True,
|
||||
"keyword": True,
|
||||
},
|
||||
)
|
||||
data = payload.get("data") or {}
|
||||
chunks = data.get("chunks") or []
|
||||
return tool_result(
|
||||
success=True,
|
||||
dataset_id=dataset_id,
|
||||
dataset_name=dataset_name,
|
||||
total=data.get("total", len(chunks)),
|
||||
chunks=_normalize_chunks(chunks),
|
||||
)
|
||||
except Exception as exc:
|
||||
return tool_error(f"RAGFlow query failed: {exc}")
|
||||
|
||||
|
||||
def _handle_ragflow_ingest(args, **_kwargs):
|
||||
return ragflow_ingest_tool(
|
||||
document_url=args.get("document_url", ""),
|
||||
dataset=args.get("dataset", ""),
|
||||
)
|
||||
|
||||
|
||||
def _handle_ragflow_query(args, **_kwargs):
|
||||
return ragflow_query_tool(
|
||||
query=args.get("query", ""),
|
||||
dataset=args.get("dataset", ""),
|
||||
limit=args.get("limit", 5),
|
||||
)
|
||||
|
||||
|
||||
registry.register(
|
||||
name="ragflow_ingest",
|
||||
toolset="web",
|
||||
schema=RAGFLOW_INGEST_SCHEMA,
|
||||
handler=_handle_ragflow_ingest,
|
||||
check_fn=_ragflow_check_requirements,
|
||||
requires_env=["RAGFLOW_API_URL", "RAGFLOW_API_KEY"],
|
||||
emoji="📚",
|
||||
)
|
||||
|
||||
registry.register(
|
||||
name="ragflow_query",
|
||||
toolset="web",
|
||||
schema=RAGFLOW_QUERY_SCHEMA,
|
||||
handler=_handle_ragflow_query,
|
||||
check_fn=_ragflow_check_requirements,
|
||||
requires_env=["RAGFLOW_API_URL", "RAGFLOW_API_KEY"],
|
||||
emoji="🧠",
|
||||
)
|
||||
@@ -204,17 +204,6 @@ class ToolCallValidator:
|
||||
self.consecutive_failures[tool_name] = self.consecutive_failures.get(tool_name, 0) + 1
|
||||
count = self.consecutive_failures[tool_name]
|
||||
|
||||
# Log to persistent metrics
|
||||
try:
|
||||
from agent.hallucination_metrics import log_hallucination_event
|
||||
log_hallucination_event(
|
||||
tool_name=tool_name,
|
||||
error_type="unknown_tool",
|
||||
suggested_name=None,
|
||||
)
|
||||
except Exception:
|
||||
pass # Best-effort metrics logging
|
||||
|
||||
if count >= self.failure_threshold:
|
||||
logger.warning(
|
||||
f"Poka-yoke circuit breaker triggered for '{tool_name}': "
|
||||
|
||||
Reference in New Issue
Block a user