Compare commits

..

1 Commits

Author SHA1 Message Date
Alexander Whitestone
a61761d321 fix(memory): filter noisy prefetch results before injection (#982)
All checks were successful
Lint / lint (pull_request) Successful in 28s
Add line-level filtering to MemoryManager.prefetch_all() that removes:
- exact duplicate lines
- meta-instruction debris (prompts, templates like 'Focus on:')
- very short / content-free fragments (< 15 chars after markers)

This prevents noisy memory providers (e.g. MemPalace transcript recall)
from bloating the <memory-context> block with repeated instructions and
low-signal snippets.

Changes:
- agent/memory_manager.py: add _filter_prefetch_lines(),
  _is_meta_instruction_line(), _is_low_signal_line() helpers;
  wire filtering into prefetch_all()
- tests/agent/test_memory_provider.py: add TestPrefetchFiltering with
  8 tests covering dedup, meta-instruction removal, low-signal filtering,
  structured-fact preservation, and integration with prefetch_all()

Closes #982
2026-04-22 03:06:50 -04:00
6 changed files with 171 additions and 505 deletions

View File

@@ -1,281 +0,0 @@
"""
Hallucination Metrics — Persistent logging and alerting for tool hallucinations.
Logs tool hallucination events to a JSONL file and provides aggregated statistics.
Integrates with the poka-yoke validation system.
Usage:
from agent.hallucination_metrics import log_hallucination_event, get_hallucination_stats
log_hallucination_event("invalid_tool", "unknown_tool", "suggested_correct_name")
stats = get_hallucination_stats()
"""
import json
import logging
import os
import time
from collections import defaultdict
from datetime import datetime, timezone
from pathlib import Path
from threading import Lock
from typing import Any, Dict, List, Optional, Tuple
from hermes_constants import get_hermes_home
logger = logging.getLogger(__name__)
# Constants
METRICS_FILE_NAME = "hallucination_metrics.jsonl"
ALERT_THRESHOLD = 10 # Alert after this many consecutive failures for a tool
SESSION_WINDOW_HOURS = 24 # Consider events within this window as "session"
# In-memory cache for fast lookups
_cache: Dict[str, Any] = {"events": [], "last_flush": 0, "session_counts": defaultdict(int)}
_cache_lock = Lock()
def _get_metrics_path() -> Path:
"""Return the path to the hallucination metrics file."""
return get_hermes_home() / "metrics" / METRICS_FILE_NAME
def _ensure_metrics_dir():
"""Ensure the metrics directory exists."""
metrics_dir = _get_metrics_path().parent
metrics_dir.mkdir(parents=True, exist_ok=True)
def log_hallucination_event(
tool_name: str,
error_type: str = "unknown_tool",
suggested_name: Optional[str] = None,
validation_messages: Optional[List[str]] = None,
session_id: Optional[str] = None,
) -> Dict[str, Any]:
"""
Log a hallucination event to the metrics file.
Args:
tool_name: The hallucinated tool name
error_type: Type of error (unknown_tool, invalid_params, etc.)
suggested_name: Suggested correction if available
validation_messages: List of validation error messages
session_id: Optional session identifier for grouping
Returns:
The logged event dict with additional metadata
"""
event = {
"timestamp": datetime.now(timezone.utc).isoformat(),
"tool_name": tool_name,
"error_type": error_type,
"suggested_name": suggested_name,
"validation_messages": validation_messages or [],
"session_id": session_id,
"unix_timestamp": time.time(),
}
# Write to file
_ensure_metrics_dir()
metrics_path = _get_metrics_path()
try:
with open(metrics_path, "a", encoding="utf-8") as f:
f.write(json.dumps(event, ensure_ascii=False) + "\n")
except Exception as e:
logger.warning(f"Failed to write hallucination event: {e}")
# Update in-memory cache
with _cache_lock:
_cache["events"].append(event)
_cache["session_counts"][tool_name] += 1
session_count = _cache["session_counts"][tool_name]
# Check alert threshold
if session_count >= ALERT_THRESHOLD:
logger.warning(
f"HALLUCINATION ALERT: Tool '{tool_name}' has failed {session_count} times "
f"in this session (threshold: {ALERT_THRESHOLD}). "
f"This may indicate a persistent hallucination pattern."
)
return event
def _load_events_from_file() -> List[Dict[str, Any]]:
"""Load all events from the metrics file."""
metrics_path = _get_metrics_path()
if not metrics_path.exists():
return []
events = []
try:
with open(metrics_path, "r", encoding="utf-8") as f:
for line in f:
line = line.strip()
if line:
try:
events.append(json.loads(line))
except json.JSONDecodeError:
continue
except Exception as e:
logger.warning(f"Failed to load hallucination events: {e}")
return events
def get_hallucination_stats(
hours: Optional[int] = None,
tool_name: Optional[str] = None,
) -> Dict[str, Any]:
"""
Get aggregated hallucination statistics.
Args:
hours: Only consider events from the last N hours (None = all time)
tool_name: Filter to specific tool name (None = all tools)
Returns:
Dict with aggregated statistics
"""
events = _load_events_from_file()
# Filter by time window
if hours is not None:
cutoff = time.time() - (hours * 3600)
events = [e for e in events if e.get("unix_timestamp", 0) >= cutoff]
# Filter by tool name
if tool_name is not None:
events = [e for e in events if e.get("tool_name") == tool_name]
# Aggregate by tool
tool_counts: Dict[str, Dict[str, Any]] = defaultdict(
lambda: {"count": 0, "suggested_names": [], "error_types": defaultdict(int)}
)
for event in events:
name = event.get("tool_name", "unknown")
tool_counts[name]["count"] += 1
if event.get("suggested_name"):
tool_counts[name]["suggested_names"].append(event["suggested_name"])
if event.get("error_type"):
tool_counts[name]["error_types"][event["error_type"]] += 1
# Find most common suggestions per tool
for name, data in tool_counts.items():
suggestions = data["suggested_names"]
if suggestions:
from collections import Counter
most_common = Counter(suggestions).most_common(1)[0]
data["most_common_suggestion"] = most_common[0]
data["suggestion_count"] = most_common[1]
del data["suggested_names"] # Remove raw list from output
# Calculate time-based stats
if events:
first_event = min(e.get("unix_timestamp", 0) for e in events)
last_event = max(e.get("unix_timestamp", 0) for e in events)
time_span_hours = (last_event - first_event) / 3600 if first_event != last_event else 0
else:
time_span_hours = 0
# Error type breakdown
all_error_types: Dict[str, int] = defaultdict(int)
for event in events:
et = event.get("error_type", "unknown")
all_error_types[et] += 1
return {
"total_events": len(events),
"unique_tools": len(tool_counts),
"time_span_hours": round(time_span_hours, 1),
"top_hallucinated_tools": sorted(
[{"tool": k, **v} for k, v in tool_counts.items()],
key=lambda x: -x["count"]
)[:20],
"error_type_breakdown": dict(all_error_types),
"alert_threshold": ALERT_THRESHOLD,
"session_window_hours": SESSION_WINDOW_HOURS,
}
def get_most_hallucinated_tools(n: int = 10) -> List[Tuple[str, int]]:
"""Get the top N most frequently hallucinated tool names."""
stats = get_hallucination_stats()
tools = stats.get("top_hallucinated_tools", [])
return [(t["tool"], t["count"]) for t in tools[:n]]
def clear_metrics(older_than_hours: Optional[int] = None) -> int:
"""
Clear hallucination metrics.
Args:
older_than_hours: Only clear events older than this many hours (None = clear all)
Returns:
Number of events removed
"""
metrics_path = _get_metrics_path()
if not metrics_path.exists():
return 0
if older_than_hours is None:
# Clear all
count = len(_load_events_from_file())
metrics_path.unlink(missing_ok=True)
with _cache_lock:
_cache["events"].clear()
_cache["session_counts"].clear()
return count
# Clear only old events
cutoff = time.time() - (older_than_hours * 3600)
events = _load_events_from_file()
keep = [e for e in events if e.get("unix_timestamp", 0) >= cutoff]
removed = len(events) - len(keep)
# Rewrite file
_ensure_metrics_dir()
with open(metrics_path, "w", encoding="utf-8") as f:
for event in keep:
f.write(json.dumps(event, ensure_ascii=False) + "\n")
return removed
def format_stats_for_display(stats: Dict[str, Any]) -> str:
"""Format statistics as a human-readable string."""
lines = [
"=== Hallucination Metrics ===",
"",
f"Total events: {stats['total_events']}",
f"Unique tools hallucinated: {stats['unique_tools']}",
f"Time span: {stats['time_span_hours']:.1f} hours",
"",
"Top Hallucinated Tools:",
"-" * 40,
]
for tool in stats.get("top_hallucinated_tools", [])[:10]:
lines.append(f" {tool['tool']:<30} {tool['count']:>5} events")
if "most_common_suggestion" in tool:
lines.append(f" → Suggested: {tool['most_common_suggestion']} ({tool['suggestion_count']}x)")
if stats.get("error_type_breakdown"):
lines.extend([
"",
"Error Types:",
"-" * 40,
])
for et, count in sorted(stats["error_type_breakdown"].items(), key=lambda x: -x[1]):
lines.append(f" {et:<30} {count:>5}")
lines.extend([
"",
f"Alert threshold: {stats['alert_threshold']} failures per session",
f"Session window: {stats['session_window_hours']} hours",
])
return "\n".join(lines)

View File

@@ -50,6 +50,78 @@ def sanitize_context(text: str) -> str:
return _FENCE_TAG_RE.sub('', text)
# ---------------------------------------------------------------------------
# Prefetch filtering helpers
# ---------------------------------------------------------------------------
# Meta-instruction debris that memory providers sometimes echo back.
# These are prompts/instructions, not user-generated content.
_META_INSTRUCTION_PATTERNS = [
re.compile(r"^\s*[\-\*]?\s*>?\s*Focus on:\s*", re.IGNORECASE),
re.compile(r"^\s*[\-\*]?\s*>?\s*Note:\s*", re.IGNORECASE),
re.compile(r"^\s*[\-\*]?\s*>?\s*System\s+(note|prompt|instruction):", re.IGNORECASE),
re.compile(r"^\s*[\-\*]?\s*>?\s*You are\s+", re.IGNORECASE),
re.compile(r"^\s*[\-\*]?\s*>?\s*Please\s+(provide|respond|answer|write)", re.IGNORECASE),
re.compile(r"^\s*[\-\*]?\s*>?\s*Do not\s+", re.IGNORECASE),
re.compile(r"^\s*[\-\*]?\s*>?\s*Always\s+", re.IGNORECASE),
re.compile(r"^\s*[\-\*]?\s*>?\s*Consider\s+(the following|these|this)\b", re.IGNORECASE),
re.compile(r"^\s*[\-\*]?\s*>?\s*Here\s+(is|are)\s+(some|the|a few)\b", re.IGNORECASE),
]
def _is_meta_instruction_line(line: str) -> bool:
"""Return True if the line looks like a prompt/template instruction, not memory content."""
for pat in _META_INSTRUCTION_PATTERNS:
if pat.search(line):
return True
return False
def _is_low_signal_line(line: str) -> bool:
"""Return True for very short or content-free lines."""
stripped = line.strip()
# Empty or just punctuation/list marker
if not stripped or stripped in {"-", "*", ">", "", "", "--"}:
return True
# Too short to be meaningful (< 15 chars after stripping markers)
cleaned = re.sub(r"^[\-\*•>\s]+", "", stripped)
if len(cleaned) < 15:
return True
return False
def _filter_prefetch_lines(text: str) -> str:
"""Filter and deduplicate prefetch result lines.
Removes:
- exact duplicate lines
- meta-instruction debris (prompts, templates)
- very short / content-free lines
Returns cleaned text, preserving original line grouping.
"""
if not text or not text.strip():
return ""
seen: set = set()
kept: list = []
for line in text.splitlines(keepends=False):
stripped = line.strip()
# Deduplicate exact lines
if stripped in seen:
continue
# Skip meta-instructions
if _is_meta_instruction_line(line):
continue
# Skip low-signal lines
if _is_low_signal_line(line):
continue
seen.add(stripped)
kept.append(line)
return "\n".join(kept)
def build_memory_context_block(raw_context: str) -> str:
"""Wrap prefetched memory in a fenced block with system note.
@@ -180,7 +252,14 @@ class MemoryManager:
"Memory provider '%s' prefetch failed (non-fatal): %s",
provider.name, e,
)
return "\n\n".join(parts)
raw = "\n\n".join(parts)
if not raw:
return ""
# Apply line-level filtering: dedupe, strip meta-instructions,
# remove very short fragments. This prevents noisy providers
# (e.g. MemPalace transcript recall) from bloating context.
filtered = _filter_prefetch_lines(raw)
return filtered
def queue_prefetch_all(self, query: str, *, session_id: str = "") -> None:
"""Queue background prefetch on all providers for the next turn."""

View File

@@ -18,7 +18,6 @@ Usage:
hermes cron list # List cron jobs
hermes cron status # Check if cron scheduler is running
hermes doctor # Check configuration and dependencies
hermes hallucination-stats # Show tool hallucination statistics
hermes honcho setup # Configure Honcho AI memory integration
hermes honcho status # Show Honcho config and connection status
hermes honcho sessions # List directory → session name mappings
@@ -2805,17 +2804,6 @@ def cmd_doctor(args):
run_doctor(args)
def cmd_hallucination_stats(args):
"""Show tool hallucination statistics."""
from agent.hallucination_metrics import get_hallucination_stats, format_stats_for_display, clear_metrics
if getattr(args, 'clear', False):
removed = clear_metrics(older_than_hours=getattr(args, 'older_than', None))
print(f"Cleared {removed} hallucination events.")
return
stats = get_hallucination_stats(hours=getattr(args, 'hours', None))
print(format_stats_for_display(stats))
def cmd_dump(args):
"""Dump setup summary for support/debugging."""
from hermes_cli.dump import run_dump
@@ -5053,33 +5041,6 @@ For more help on a command:
)
doctor_parser.set_defaults(func=cmd_doctor)
# =========================================================================
# hallucination-stats command
# =========================================================================
hallucination_parser = subparsers.add_parser(
"hallucination-stats",
help="Show tool hallucination statistics",
description="View aggregated tool hallucination metrics from poka-yoke validation"
)
hallucination_parser.add_argument(
"--hours",
type=int,
default=None,
help="Only show events from the last N hours"
)
hallucination_parser.add_argument(
"--clear",
action="store_true",
help="Clear all hallucination metrics"
)
hallucination_parser.add_argument(
"--older-than",
type=int,
default=None,
help="When clearing, only remove events older than N hours"
)
hallucination_parser.set_defaults(func=cmd_hallucination_stats)
# =========================================================================
# dump command
# =========================================================================

View File

@@ -198,14 +198,14 @@ class TestMemoryManager:
def test_prefetch_skips_empty(self):
mgr = MemoryManager()
p1 = FakeMemoryProvider("builtin")
p1._prefetch_result = "Has memories"
p1._prefetch_result = "This provider has meaningful memories with enough length"
p2 = FakeMemoryProvider("external")
p2._prefetch_result = ""
mgr.add_provider(p1)
mgr.add_provider(p2)
result = mgr.prefetch_all("query")
assert result == "Has memories"
assert result == "This provider has meaningful memories with enough length"
def test_queue_prefetch_all(self):
mgr = MemoryManager()
@@ -695,3 +695,92 @@ class TestMemoryContextFencing:
fence_end = combined.index("</memory-context>")
assert "Alice" in combined[fence_start:fence_end]
assert combined.index("weather") < fence_start
class TestPrefetchFiltering:
"""Tests for _filter_prefetch_lines and related helpers."""
def test_deduplicates_exact_lines(self):
from agent.memory_manager import _filter_prefetch_lines
raw = "- This is line one with enough characters\n- This is line two with enough characters\n- This is line one with enough characters\n- This is line three with enough characters"
result = _filter_prefetch_lines(raw)
lines = [l for l in result.splitlines() if l.strip()]
assert len(lines) == 3
assert "- This is line one with enough characters" in result
assert "- This is line two with enough characters" in result
assert "- This is line three with enough characters" in result
def test_removes_meta_instruction_debris(self):
from agent.memory_manager import _filter_prefetch_lines
raw = (
"## Fleet Memories\n"
"- > Focus on: was a non-trivial approach used\n"
"- > Focus on: was a non-trivial approach used\n"
"- Actual memory content about fleet ops\n"
"- Note: this is just a note\n"
)
result = _filter_prefetch_lines(raw)
assert "Focus on" not in result
assert "Note:" not in result
assert "Actual memory content about fleet ops" in result
assert "Fleet Memories" in result
def test_removes_low_signal_short_lines(self):
from agent.memory_manager import _filter_prefetch_lines
raw = (
"- \n"
"- x\n"
"- This is a meaningful memory entry with enough length\n"
)
result = _filter_prefetch_lines(raw)
assert "- x" not in result
assert "meaningful memory entry" in result
def test_preserves_structured_facts(self):
from agent.memory_manager import _filter_prefetch_lines
raw = (
"## Local Facts (Hologram)\n"
"- ALEXANDER: Prefers Gitea for reports and deliverables.\n"
"- Telegram home channel is Timmy Time.\n"
)
result = _filter_prefetch_lines(raw)
assert "ALEXANDER" in result
assert "Gitea" in result
assert "Telegram" in result
def test_is_meta_instruction_line(self):
from agent.memory_manager import _is_meta_instruction_line
assert _is_meta_instruction_line("- > Focus on: something") is True
assert _is_meta_instruction_line("- Focus on: something") is True
assert _is_meta_instruction_line("* Focus on: something") is True
assert _is_meta_instruction_line("- Actual user memory content") is False
assert _is_meta_instruction_line("ALEXANDER: Prefers Gitea") is False
def test_is_low_signal_line(self):
from agent.memory_manager import _is_low_signal_line
assert _is_low_signal_line("- ") is True
assert _is_low_signal_line("*") is True
assert _is_low_signal_line("- x") is True
assert _is_low_signal_line("- Short line") is True
assert _is_low_signal_line("- This is a long meaningful memory entry") is False
def test_prefetch_all_applies_filtering(self):
from agent.memory_manager import MemoryManager
mgr = MemoryManager()
fake = FakeMemoryProvider(name="test")
fake._prefetch_result = (
"- > Focus on: was a non-trivial approach\n"
"- > Focus on: was a non-trivial approach\n"
"- Real memory fact\n"
)
mgr.add_provider(fake)
result = mgr.prefetch_all("query")
assert "Focus on" not in result
assert "Real memory fact" in result
assert result.count("Real memory fact") == 1
def test_empty_prefetch_returns_empty(self):
from agent.memory_manager import _filter_prefetch_lines
assert _filter_prefetch_lines("") == ""
assert _filter_prefetch_lines(" ") == ""
assert _filter_prefetch_lines("\n\n") == ""

View File

@@ -1,171 +0,0 @@
"""Tests for agent/hallucination_metrics.py — #853."""
import json
import time
from pathlib import Path
import pytest
from agent.hallucination_metrics import (
log_hallucination_event,
get_hallucination_stats,
get_most_hallucinated_tools,
clear_metrics,
format_stats_for_display,
_get_metrics_path,
)
@pytest.fixture(autouse=True)
def isolated_metrics(monkeypatch, tmp_path):
"""Redirect metrics to a temp file for every test."""
metrics_dir = tmp_path / "test_hermes_home" / "metrics"
metrics_dir.mkdir(parents=True)
metrics_file = metrics_dir / "hallucination_metrics.jsonl"
# Patch the get_hermes_home function to return our temp path
def mock_get_hermes_home():
return tmp_path / "test_hermes_home"
monkeypatch.setattr(
"agent.hallucination_metrics.get_hermes_home",
mock_get_hermes_home,
)
# Also clear cache
from agent.hallucination_metrics import _cache, _cache_lock
with _cache_lock:
_cache["events"].clear()
_cache["session_counts"].clear()
yield
clear_metrics()
class TestLogEvent:
def test_log_event_returns_dict(self):
event = log_hallucination_event("fake_tool", "unknown_tool", "real_tool")
assert event["tool_name"] == "fake_tool"
assert event["error_type"] == "unknown_tool"
assert event["suggested_name"] == "real_tool"
assert "timestamp" in event
assert "unix_timestamp" in event
def test_log_event_persists_to_file(self):
log_hallucination_event("tool_a", "unknown_tool")
log_hallucination_event("tool_b", "invalid_params")
path = _get_metrics_path()
assert path.exists()
lines = path.read_text().strip().splitlines()
assert len(lines) == 2
data = [json.loads(line) for line in lines]
assert data[0]["tool_name"] == "tool_a"
assert data[1]["tool_name"] == "tool_b"
class TestGetStats:
def test_empty_stats(self):
stats = get_hallucination_stats()
assert stats["total_events"] == 0
assert stats["unique_tools"] == 0
def test_stats_by_tool(self):
log_hallucination_event("tool_x", "unknown_tool", "tool_y")
log_hallucination_event("tool_x", "unknown_tool", "tool_y")
log_hallucination_event("tool_z", "invalid_params")
stats = get_hallucination_stats()
assert stats["total_events"] == 3
assert stats["unique_tools"] == 2
top = stats["top_hallucinated_tools"]
assert len(top) == 2
assert top[0]["tool"] == "tool_x"
assert top[0]["count"] == 2
assert top[1]["tool"] == "tool_z"
assert top[1]["count"] == 1
def test_stats_hours_filter(self):
# Log old event by faking timestamp
old_event = {
"timestamp": "2026-01-01T00:00:00+00:00",
"tool_name": "old_tool",
"error_type": "unknown_tool",
"unix_timestamp": time.time() - 48 * 3600,
}
path = _get_metrics_path()
path.parent.mkdir(parents=True, exist_ok=True)
with open(path, "w") as f:
f.write(json.dumps(old_event) + "\n")
log_hallucination_event("new_tool", "unknown_tool")
stats = get_hallucination_stats(hours=24)
assert stats["total_events"] == 1
assert stats["top_hallucinated_tools"][0]["tool"] == "new_tool"
def test_error_type_breakdown(self):
log_hallucination_event("t1", "unknown_tool")
log_hallucination_event("t2", "invalid_params")
log_hallucination_event("t3", "unknown_tool")
stats = get_hallucination_stats()
breakdown = stats["error_type_breakdown"]
assert breakdown["unknown_tool"] == 2
assert breakdown["invalid_params"] == 1
class TestGetMostHallucinated:
def test_top_tools(self):
for _ in range(5):
log_hallucination_event("common_tool", "unknown_tool")
for _ in range(2):
log_hallucination_event("rare_tool", "unknown_tool")
tools = get_most_hallucinated_tools(n=2)
assert tools[0] == ("common_tool", 5)
assert tools[1] == ("rare_tool", 2)
class TestClearMetrics:
def test_clear_all(self):
log_hallucination_event("t1", "unknown_tool")
removed = clear_metrics()
assert removed == 1
assert _get_metrics_path().exists() is False
def test_clear_older_than(self):
path = _get_metrics_path()
path.parent.mkdir(parents=True, exist_ok=True)
old = {"tool_name": "old", "unix_timestamp": time.time() - 48 * 3600}
new = {"tool_name": "new", "unix_timestamp": time.time()}
with open(path, "w") as f:
f.write(json.dumps(old) + "\n")
f.write(json.dumps(new) + "\n")
removed = clear_metrics(older_than_hours=24)
assert removed == 1
remaining = get_hallucination_stats()
assert remaining["total_events"] == 1
class TestFormatDisplay:
def test_format_includes_headers(self):
log_hallucination_event("bad_tool", "unknown_tool", "good_tool")
stats = get_hallucination_stats()
text = format_stats_for_display(stats)
assert "Hallucination Metrics" in text
assert "bad_tool" in text
assert "Total events: 1" in text
class TestAlertThreshold:
def test_alert_after_threshold(self, monkeypatch, caplog):
monkeypatch.setattr("agent.hallucination_metrics.ALERT_THRESHOLD", 3)
for i in range(4):
log_hallucination_event("persistent_tool", "unknown_tool")
assert "HALLUCINATION ALERT" in caplog.text
assert "persistent_tool" in caplog.text

View File

@@ -204,17 +204,6 @@ class ToolCallValidator:
self.consecutive_failures[tool_name] = self.consecutive_failures.get(tool_name, 0) + 1
count = self.consecutive_failures[tool_name]
# Log to persistent metrics
try:
from agent.hallucination_metrics import log_hallucination_event
log_hallucination_event(
tool_name=tool_name,
error_type="unknown_tool",
suggested_name=None,
)
except Exception:
pass # Best-effort metrics logging
if count >= self.failure_threshold:
logger.warning(
f"Poka-yoke circuit breaker triggered for '{tool_name}': "