Compare commits
4 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e28d16b324 | ||
|
|
bc32047610 | ||
|
|
3a24420d7d | ||
|
|
d14c1c5a56 |
@@ -55,7 +55,7 @@ FACT_STORE_SCHEMA = {
|
||||
"properties": {
|
||||
"action": {
|
||||
"type": "string",
|
||||
"enum": ["add", "search", "probe", "related", "reason", "contradict", "trace", "update", "remove", "list"],
|
||||
"enum": ["add", "search", "probe", "related", "reason", "contradict", "update", "remove", "list"],
|
||||
},
|
||||
"content": {"type": "string", "description": "Fact content (required for 'add')."},
|
||||
"query": {"type": "string", "description": "Search query (required for 'search')."},
|
||||
@@ -67,13 +67,6 @@ FACT_STORE_SCHEMA = {
|
||||
"trust_delta": {"type": "number", "description": "Trust adjustment for 'update'."},
|
||||
"min_trust": {"type": "number", "description": "Minimum trust filter (default: 0.3)."},
|
||||
"limit": {"type": "integer", "description": "Max results (default: 10)."},
|
||||
"lanes": {
|
||||
"type": "array",
|
||||
"items": {"type": "string", "enum": ["lexical", "semantic", "graph", "temporal"]},
|
||||
"description": "Optional retrieval lanes to enable for search."
|
||||
},
|
||||
"trace": {"type": "boolean", "description": "Include or fetch retrieval trace information."},
|
||||
"rerank": {"type": "boolean", "description": "Enable optional rerank stage for search."},
|
||||
},
|
||||
"required": ["action"],
|
||||
},
|
||||
@@ -126,9 +119,6 @@ class HolographicMemoryProvider(MemoryProvider):
|
||||
self._store = None
|
||||
self._retriever = None
|
||||
self._min_trust = float(self._config.get("min_trust_threshold", 0.3))
|
||||
self._retrieval_lanes = self._parse_retrieval_lanes(self._config.get("retrieval_lanes"))
|
||||
self._enable_rerank = str(self._config.get("enable_rerank", "true")).lower() != "false"
|
||||
self._last_retrieval_trace: dict | None = None
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
@@ -154,14 +144,6 @@ class HolographicMemoryProvider(MemoryProvider):
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def _parse_retrieval_lanes(self, value) -> list[str]:
|
||||
if isinstance(value, str):
|
||||
value = [part.strip() for part in value.split(",") if part.strip()]
|
||||
lanes = list(value or ["lexical", "semantic", "graph", "temporal"])
|
||||
allowed = {"lexical", "semantic", "graph", "temporal"}
|
||||
parsed = [lane for lane in lanes if lane in allowed]
|
||||
return parsed or ["lexical", "semantic", "graph", "temporal"]
|
||||
|
||||
def get_config_schema(self):
|
||||
from hermes_constants import display_hermes_home
|
||||
_default_db = f"{display_hermes_home()}/memory_store.db"
|
||||
@@ -170,10 +152,6 @@ class HolographicMemoryProvider(MemoryProvider):
|
||||
{"key": "auto_extract", "description": "Auto-extract facts at session end", "default": "false", "choices": ["true", "false"]},
|
||||
{"key": "default_trust", "description": "Default trust score for new facts", "default": "0.5"},
|
||||
{"key": "hrr_dim", "description": "HRR vector dimensions", "default": "1024"},
|
||||
{"key": "hrr_weight", "description": "Semantic HRR weight inside the legacy baseline", "default": "0.3"},
|
||||
{"key": "temporal_decay_half_life", "description": "Temporal decay half-life in days (0 disables baseline decay)", "default": "0"},
|
||||
{"key": "retrieval_lanes", "description": "Comma-separated retrieval lanes (lexical,semantic,graph,temporal)", "default": "lexical,semantic,graph,temporal"},
|
||||
{"key": "enable_rerank", "description": "Enable optional local rerank stage", "default": "true", "choices": ["true", "false"]},
|
||||
]
|
||||
|
||||
def initialize(self, session_id: str, **kwargs) -> None:
|
||||
@@ -191,8 +169,6 @@ class HolographicMemoryProvider(MemoryProvider):
|
||||
hrr_dim = int(self._config.get("hrr_dim", 1024))
|
||||
hrr_weight = float(self._config.get("hrr_weight", 0.3))
|
||||
temporal_decay = int(self._config.get("temporal_decay_half_life", 0))
|
||||
self._retrieval_lanes = self._parse_retrieval_lanes(self._config.get("retrieval_lanes", self._retrieval_lanes))
|
||||
self._enable_rerank = str(self._config.get("enable_rerank", self._enable_rerank)).lower() != "false"
|
||||
|
||||
self._store = MemoryStore(db_path=db_path, default_trust=default_trust, hrr_dim=hrr_dim)
|
||||
self._retriever = FactRetriever(
|
||||
@@ -200,8 +176,6 @@ class HolographicMemoryProvider(MemoryProvider):
|
||||
temporal_decay_half_life=temporal_decay,
|
||||
hrr_weight=hrr_weight,
|
||||
hrr_dim=hrr_dim,
|
||||
retrieval_lanes=self._retrieval_lanes,
|
||||
enable_rerank=self._enable_rerank,
|
||||
)
|
||||
self._session_id = session_id
|
||||
|
||||
@@ -232,23 +206,13 @@ class HolographicMemoryProvider(MemoryProvider):
|
||||
if not self._retriever or not query:
|
||||
return ""
|
||||
try:
|
||||
payload = self._retriever.search_with_trace(
|
||||
query,
|
||||
min_trust=self._min_trust,
|
||||
limit=5,
|
||||
lanes=self._retrieval_lanes,
|
||||
rerank=self._enable_rerank,
|
||||
)
|
||||
self._last_retrieval_trace = payload["trace"]
|
||||
results = payload["results"]
|
||||
results = self._retriever.search(query, min_trust=self._min_trust, limit=5)
|
||||
if not results:
|
||||
return ""
|
||||
lines = []
|
||||
for r in results:
|
||||
trust = r.get("trust_score", r.get("trust", 0))
|
||||
lanes = ",".join(r.get("matched_lanes", []))
|
||||
lane_suffix = f" [{lanes}]" if lanes else ""
|
||||
lines.append(f"- [{trust:.1f}] {r.get('content', '')}{lane_suffix}")
|
||||
lines.append(f"- [{trust:.1f}] {r.get('content', '')}")
|
||||
return "## Holographic Memory\n" + "\n".join(lines)
|
||||
except Exception as e:
|
||||
logger.debug("Holographic prefetch failed: %s", e)
|
||||
@@ -306,39 +270,14 @@ class HolographicMemoryProvider(MemoryProvider):
|
||||
return json.dumps({"fact_id": fact_id, "status": "added"})
|
||||
|
||||
elif action == "search":
|
||||
lanes = args.get("lanes")
|
||||
rerank = args.get("rerank")
|
||||
with_trace = bool(args.get("trace", False))
|
||||
if with_trace:
|
||||
payload = retriever.search_with_trace(
|
||||
args["query"],
|
||||
category=args.get("category"),
|
||||
min_trust=float(args.get("min_trust", self._min_trust)),
|
||||
limit=int(args.get("limit", 10)),
|
||||
lanes=lanes,
|
||||
rerank=rerank,
|
||||
)
|
||||
self._last_retrieval_trace = payload["trace"]
|
||||
return json.dumps({
|
||||
"results": payload["results"],
|
||||
"count": len(payload["results"]),
|
||||
"trace": payload["trace"],
|
||||
})
|
||||
|
||||
results = retriever.search(
|
||||
args["query"],
|
||||
category=args.get("category"),
|
||||
min_trust=float(args.get("min_trust", self._min_trust)),
|
||||
limit=int(args.get("limit", 10)),
|
||||
lanes=lanes,
|
||||
rerank=rerank,
|
||||
)
|
||||
self._last_retrieval_trace = retriever.last_trace
|
||||
return json.dumps({"results": results, "count": len(results)})
|
||||
|
||||
elif action == "trace":
|
||||
return json.dumps({"trace": self._last_retrieval_trace or retriever.last_trace or {}})
|
||||
|
||||
elif action == "probe":
|
||||
results = retriever.probe(
|
||||
args["entity"],
|
||||
@@ -384,8 +323,7 @@ class HolographicMemoryProvider(MemoryProvider):
|
||||
return json.dumps({"updated": updated})
|
||||
|
||||
elif action == "remove":
|
||||
removed = store.remove_fact(int(args["fact_id"])
|
||||
)
|
||||
removed = store.remove_fact(int(args["fact_id"]))
|
||||
return json.dumps({"removed": removed})
|
||||
|
||||
elif action == "list":
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -83,7 +83,6 @@ _TRUST_MAX = 1.0
|
||||
|
||||
# Entity extraction patterns
|
||||
_RE_CAPITALIZED = re.compile(r'\b([A-Z][a-z]+(?:\s+[A-Z][a-z]+)+)\b')
|
||||
_RE_SINGLE_PROPER = re.compile(r'\b([A-Z][A-Za-z0-9_-]{2,})\b')
|
||||
_RE_DOUBLE_QUOTE = re.compile(r'"([^"]+)"')
|
||||
_RE_SINGLE_QUOTE = re.compile(r"'([^']+)'")
|
||||
_RE_AKA = re.compile(
|
||||
@@ -415,13 +414,6 @@ class MemoryStore:
|
||||
for m in _RE_CAPITALIZED.finditer(text):
|
||||
_add(m.group(1))
|
||||
|
||||
skip_singletons = {"The", "This", "That", "These", "Those", "And", "But", "For", "With"}
|
||||
for m in _RE_SINGLE_PROPER.finditer(text):
|
||||
candidate = m.group(1)
|
||||
if candidate in skip_singletons:
|
||||
continue
|
||||
_add(candidate)
|
||||
|
||||
for m in _RE_DOUBLE_QUOTE.finditer(text):
|
||||
_add(m.group(1))
|
||||
|
||||
|
||||
56
tests/fixtures/holographic_recall_matrix.json
vendored
56
tests/fixtures/holographic_recall_matrix.json
vendored
@@ -1,56 +0,0 @@
|
||||
{
|
||||
"facts": [
|
||||
{
|
||||
"content": "Alexander Whitestone aka Rockachopa.",
|
||||
"category": "general",
|
||||
"tags": "identity alias"
|
||||
},
|
||||
{
|
||||
"content": "Rockachopa uses Ansible playbooks for sovereign rollouts.",
|
||||
"category": "project",
|
||||
"tags": "ansible playbooks rollout"
|
||||
},
|
||||
{
|
||||
"content": "The provider is anthropic/claude-haiku-4-5.",
|
||||
"category": "project",
|
||||
"tags": "provider default",
|
||||
"updated_at": "2026-01-01T00:00:00Z"
|
||||
},
|
||||
{
|
||||
"content": "Correction: the provider is mimo-v2-pro.",
|
||||
"category": "project",
|
||||
"tags": "provider current",
|
||||
"updated_at": "2026-04-20T00:00:00Z"
|
||||
},
|
||||
{
|
||||
"content": "Ezra operates the BURN2 lane for forge work.",
|
||||
"category": "project",
|
||||
"tags": "ezra burn2 forge lane"
|
||||
},
|
||||
{
|
||||
"content": "BURN2 handles forge triage and review.",
|
||||
"category": "project",
|
||||
"tags": "forge triage review"
|
||||
}
|
||||
],
|
||||
"queries": [
|
||||
{
|
||||
"name": "semantic_alias_graph",
|
||||
"query": "What automation does Alexander Whitestone use for deploys?",
|
||||
"expected_substring": "Ansible playbooks",
|
||||
"top_k": 1
|
||||
},
|
||||
{
|
||||
"name": "temporal_correction",
|
||||
"query": "What provider should we use?",
|
||||
"expected_substring": "mimo-v2-pro",
|
||||
"top_k": 1
|
||||
},
|
||||
{
|
||||
"name": "graph_lane",
|
||||
"query": "Which forge lane does Ezra operate?",
|
||||
"expected_substring": "BURN2 lane",
|
||||
"top_k": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,116 +0,0 @@
|
||||
"""Tests for multi-path holographic retrieval fusion and traceability."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).resolve().parents[3]))
|
||||
|
||||
from plugins.memory.holographic import HolographicMemoryProvider
|
||||
from plugins.memory.holographic.retrieval import FactRetriever, format_benchmark_report
|
||||
from plugins.memory.holographic.store import MemoryStore
|
||||
|
||||
_FIXTURE_PATH = Path(__file__).resolve().parents[2] / "fixtures" / "holographic_recall_matrix.json"
|
||||
|
||||
|
||||
def _fixture() -> dict:
|
||||
return json.loads(_FIXTURE_PATH.read_text())
|
||||
|
||||
|
||||
def _seed_store(tmp_path) -> MemoryStore:
|
||||
store = MemoryStore(db_path=tmp_path / "memory_store.db")
|
||||
for fact in _fixture()["facts"]:
|
||||
fact_id = store.add_fact(fact["content"], category=fact["category"], tags=fact.get("tags", ""))
|
||||
if fact.get("updated_at"):
|
||||
store._conn.execute(
|
||||
"UPDATE facts SET created_at = ?, updated_at = ? WHERE fact_id = ?",
|
||||
(fact["updated_at"], fact["updated_at"], fact_id),
|
||||
)
|
||||
store._conn.commit()
|
||||
return store
|
||||
|
||||
|
||||
class TestMultiPathRetrieval:
|
||||
def test_lane_toggle_and_trace_contributions(self, tmp_path):
|
||||
store = _seed_store(tmp_path)
|
||||
retriever = FactRetriever(store=store)
|
||||
|
||||
payload = retriever.search_with_trace(
|
||||
"Which forge lane does Ezra operate?",
|
||||
limit=3,
|
||||
lanes=["lexical", "graph"],
|
||||
)
|
||||
|
||||
assert payload["trace"]["lanes_run"] == ["lexical", "graph"]
|
||||
assert payload["results"]
|
||||
top = payload["results"][0]
|
||||
assert "BURN2 lane" in top["content"]
|
||||
assert "graph" in top["lane_contributions"]
|
||||
assert set(top["lane_contributions"]).issubset({"lexical", "graph"})
|
||||
|
||||
def test_trace_available_for_failed_recall(self, tmp_path):
|
||||
store = _seed_store(tmp_path)
|
||||
retriever = FactRetriever(store=store)
|
||||
|
||||
payload = retriever.search_with_trace(
|
||||
"nonexistent memory topic xyz123",
|
||||
limit=3,
|
||||
lanes=["lexical", "semantic", "graph", "temporal"],
|
||||
)
|
||||
|
||||
assert payload["results"] == []
|
||||
assert payload["trace"]["fused_count"] == 0
|
||||
assert payload["trace"]["lane_hits"]["lexical"] == 0
|
||||
assert payload["trace"]["lane_hits"]["semantic"] == 0
|
||||
|
||||
def test_benchmark_prompt_matrix_shows_gain_over_baseline(self, tmp_path):
|
||||
store = _seed_store(tmp_path)
|
||||
retriever = FactRetriever(store=store)
|
||||
report = retriever.benchmark_prompt_matrix(_fixture()["queries"], limit=3)
|
||||
|
||||
assert report["fused_top1_hits"] > report["baseline_top1_hits"]
|
||||
assert report["improvement"] > 0
|
||||
|
||||
rendered = format_benchmark_report(report)
|
||||
assert "Prompt matrix benchmark" in rendered
|
||||
assert "semantic_alias_graph" in rendered
|
||||
assert "improvement" in rendered.lower()
|
||||
|
||||
|
||||
class TestHolographicProviderTrace:
|
||||
def test_prefetch_records_trace_and_trace_action_returns_it(self, tmp_path):
|
||||
provider = HolographicMemoryProvider(
|
||||
config={
|
||||
"db_path": str(tmp_path / "provider.db"),
|
||||
"retrieval_lanes": ["lexical", "semantic", "graph", "temporal"],
|
||||
"enable_rerank": True,
|
||||
}
|
||||
)
|
||||
provider.initialize("test-session")
|
||||
|
||||
seed_store = _seed_store(tmp_path / "seed")
|
||||
rows = seed_store.list_facts(min_trust=0.0, limit=20)
|
||||
for row in rows:
|
||||
provider._store.add_fact(row["content"], category=row["category"], tags=row.get("tags", ""))
|
||||
if row["content"].startswith("The provider is anthropic"):
|
||||
provider._store._conn.execute(
|
||||
"UPDATE facts SET created_at = ?, updated_at = ? WHERE content = ?",
|
||||
("2026-01-01T00:00:00Z", "2026-01-01T00:00:00Z", row["content"]),
|
||||
)
|
||||
elif row["content"].startswith("Correction: the provider is mimo"):
|
||||
provider._store._conn.execute(
|
||||
"UPDATE facts SET created_at = ?, updated_at = ? WHERE content = ?",
|
||||
("2026-04-20T00:00:00Z", "2026-04-20T00:00:00Z", row["content"]),
|
||||
)
|
||||
provider._store._conn.commit()
|
||||
|
||||
block = provider.prefetch("What provider should we use?")
|
||||
assert "Holographic Memory" in block
|
||||
assert "mimo-v2-pro" in block
|
||||
|
||||
trace_payload = json.loads(provider.handle_tool_call("fact_store", {"action": "trace"}))
|
||||
assert trace_payload["trace"]["query"] == "What provider should we use?"
|
||||
assert trace_payload["trace"]["rerank_applied"] in {True, False}
|
||||
assert trace_payload["trace"]["lane_hits"]["temporal"] >= 1
|
||||
@@ -148,3 +148,184 @@ class TestStrategyNameSurfaced:
|
||||
assert count == 0
|
||||
assert strategy is None
|
||||
assert err is not None
|
||||
|
||||
|
||||
class TestEscapeDriftGuard:
|
||||
"""Tests for the escape-drift guard that catches bash/JSON serialization
|
||||
artifacts where an apostrophe gets prefixed with a spurious backslash
|
||||
in tool-call transport.
|
||||
"""
|
||||
|
||||
def test_drift_blocked_apostrophe(self):
|
||||
"""File has ', old_string and new_string both have \\' — classic
|
||||
tool-call drift. Guard must block with a helpful error instead of
|
||||
writing \\' literals into source code."""
|
||||
content = "x = \"hello there\"\n"
|
||||
# Simulate transport-corrupted old_string and new_string where an
|
||||
# apostrophe-like context got prefixed with a backslash. The content
|
||||
# itself has no apostrophe, but both strings do — matching via
|
||||
# whitespace/anchor strategies would otherwise succeed.
|
||||
old_string = "x = \"hello there\" # don\\'t edit\n"
|
||||
new_string = "x = \"hi there\" # don\\'t edit\n"
|
||||
# This particular pair won't match anything, so it exits via
|
||||
# no-match path. Build a case where a non-exact strategy DOES match.
|
||||
content = "line\n x = 1\nline"
|
||||
old_string = "line\n x = \\'a\\'\nline"
|
||||
new_string = "line\n x = \\'b\\'\nline"
|
||||
new, count, strategy, err = fuzzy_find_and_replace(content, old_string, new_string)
|
||||
assert count == 0
|
||||
assert err is not None and "Escape-drift" in err
|
||||
assert "backslash" in err.lower()
|
||||
assert new == content # file untouched
|
||||
|
||||
def test_drift_blocked_double_quote(self):
|
||||
"""Same idea but with \\" drift instead of \\'."""
|
||||
content = 'line\n x = 1\nline'
|
||||
old_string = 'line\n x = \\"a\\"\nline'
|
||||
new_string = 'line\n x = \\"b\\"\nline'
|
||||
new, count, strategy, err = fuzzy_find_and_replace(content, old_string, new_string)
|
||||
assert count == 0
|
||||
assert err is not None and "Escape-drift" in err
|
||||
|
||||
def test_drift_allowed_when_file_genuinely_has_backslash_escapes(self):
|
||||
"""If the file already contains \\' (e.g. inside an existing escaped
|
||||
string), the model is legitimately preserving it. Guard must NOT
|
||||
fire."""
|
||||
content = "line\n x = \\'a\\'\nline"
|
||||
old_string = "line\n x = \\'a\\'\nline"
|
||||
new_string = "line\n x = \\'b\\'\nline"
|
||||
new, count, strategy, err = fuzzy_find_and_replace(content, old_string, new_string)
|
||||
assert err is None
|
||||
assert count == 1
|
||||
assert "\\'b\\'" in new
|
||||
|
||||
def test_drift_allowed_on_exact_match(self):
|
||||
"""Exact matches bypass the drift guard entirely — if the file
|
||||
really contains the exact bytes old_string specified, it's not
|
||||
drift."""
|
||||
content = "hello \\'world\\'"
|
||||
new, count, strategy, err = fuzzy_find_and_replace(
|
||||
content, "hello \\'world\\'", "hello \\'there\\'"
|
||||
)
|
||||
assert err is None
|
||||
assert count == 1
|
||||
assert strategy == "exact"
|
||||
|
||||
def test_drift_allowed_when_adding_escaped_strings(self):
|
||||
"""Model is adding new content with \\' that wasn't in the original.
|
||||
old_string has no \\', so guard doesn't fire."""
|
||||
content = "line1\nline2\nline3"
|
||||
old_string = "line1\nline2\nline3"
|
||||
new_string = "line1\nprint(\\'added\\')\nline2\nline3"
|
||||
new, count, strategy, err = fuzzy_find_and_replace(content, old_string, new_string)
|
||||
assert err is None
|
||||
assert count == 1
|
||||
assert "\\'added\\'" in new
|
||||
|
||||
def test_no_drift_check_when_new_string_lacks_suspect_chars(self):
|
||||
"""Fast-path: if new_string has no \\' or \\", guard must not
|
||||
fire even on fuzzy match."""
|
||||
content = "def foo():\n pass" # extra space ignored by line_trimmed
|
||||
old_string = "def foo():\n pass"
|
||||
new_string = "def bar():\n return 1"
|
||||
new, count, strategy, err = fuzzy_find_and_replace(content, old_string, new_string)
|
||||
assert err is None
|
||||
assert count == 1
|
||||
|
||||
|
||||
class TestFindClosestLines:
|
||||
def setup_method(self):
|
||||
from tools.fuzzy_match import find_closest_lines
|
||||
self.find_closest_lines = find_closest_lines
|
||||
|
||||
def test_finds_similar_line(self):
|
||||
content = "def foo():\n pass\ndef bar():\n return 1\n"
|
||||
result = self.find_closest_lines("def baz():", content)
|
||||
assert "def foo" in result or "def bar" in result
|
||||
|
||||
def test_returns_empty_for_no_match(self):
|
||||
content = "completely different content here"
|
||||
result = self.find_closest_lines("xyzzy_no_match_possible_!!!", content)
|
||||
assert result == ""
|
||||
|
||||
def test_returns_empty_for_empty_inputs(self):
|
||||
assert self.find_closest_lines("", "some content") == ""
|
||||
assert self.find_closest_lines("old string", "") == ""
|
||||
|
||||
def test_includes_context_lines(self):
|
||||
content = "line1\nline2\ndef target():\n pass\nline5\n"
|
||||
result = self.find_closest_lines("def target():", content)
|
||||
assert "target" in result
|
||||
|
||||
def test_includes_line_numbers(self):
|
||||
content = "line1\nline2\ndef foo():\n pass\n"
|
||||
result = self.find_closest_lines("def foo():", content)
|
||||
# Should include line numbers in format "N| content"
|
||||
assert "|" in result
|
||||
|
||||
|
||||
class TestFormatNoMatchHint:
|
||||
"""Gating tests for format_no_match_hint — the shared helper that decides
|
||||
whether a 'Did you mean?' snippet should be appended to an error.
|
||||
"""
|
||||
|
||||
def setup_method(self):
|
||||
from tools.fuzzy_match import format_no_match_hint
|
||||
self.fmt = format_no_match_hint
|
||||
|
||||
def test_fires_on_could_not_find_with_match(self):
|
||||
"""Classic no-match: similar content exists → hint fires."""
|
||||
content = "def foo():\n pass\ndef bar():\n pass\n"
|
||||
result = self.fmt(
|
||||
"Could not find a match for old_string in the file",
|
||||
0, "def baz():", content,
|
||||
)
|
||||
assert "Did you mean" in result
|
||||
assert "foo" in result or "bar" in result
|
||||
|
||||
def test_silent_on_ambiguous_match_error(self):
|
||||
"""'Found N matches' is not a missing-match failure — no hint."""
|
||||
content = "aaa bbb aaa\n"
|
||||
result = self.fmt(
|
||||
"Found 2 matches for old_string. Provide more context to make it unique, or use replace_all=True.",
|
||||
0, "aaa", content,
|
||||
)
|
||||
assert result == ""
|
||||
|
||||
def test_silent_on_escape_drift_error(self):
|
||||
"""Escape-drift errors are intentional blocks — hint would mislead."""
|
||||
content = "x = 1\n"
|
||||
result = self.fmt(
|
||||
"Escape-drift detected: old_string and new_string contain the literal sequence '\\\\''...",
|
||||
0, "x = \\'1\\'", content,
|
||||
)
|
||||
assert result == ""
|
||||
|
||||
def test_silent_on_identical_strings(self):
|
||||
"""old_string == new_string — hint irrelevant."""
|
||||
result = self.fmt(
|
||||
"old_string and new_string are identical",
|
||||
0, "foo", "foo bar\n",
|
||||
)
|
||||
assert result == ""
|
||||
|
||||
def test_silent_when_match_count_nonzero(self):
|
||||
"""If match succeeded, we shouldn't be in the error path — defense in depth."""
|
||||
result = self.fmt(
|
||||
"Could not find a match for old_string in the file",
|
||||
1, "foo", "foo bar\n",
|
||||
)
|
||||
assert result == ""
|
||||
|
||||
def test_silent_on_none_error(self):
|
||||
"""No error at all — no hint."""
|
||||
result = self.fmt(None, 0, "foo", "bar\n")
|
||||
assert result == ""
|
||||
|
||||
def test_silent_when_no_similar_content(self):
|
||||
"""Even for a valid no-match error, skip hint when nothing similar exists."""
|
||||
result = self.fmt(
|
||||
"Could not find a match for old_string in the file",
|
||||
0, "totally_unique_xyzzy_qux", "abc\nxyz\n",
|
||||
)
|
||||
assert result == ""
|
||||
|
||||
114
tests/tools/test_patch_did_you_mean.py
Normal file
114
tests/tools/test_patch_did_you_mean.py
Normal file
@@ -0,0 +1,114 @@
|
||||
import json
|
||||
import os
|
||||
import textwrap
|
||||
from pathlib import Path
|
||||
|
||||
import tools.skill_manager_tool as skill_manager_tool
|
||||
from tools.file_tools import patch_tool
|
||||
from tools.skill_manager_tool import _create_skill, _patch_skill
|
||||
|
||||
|
||||
def _disable_patch_tool_guards(monkeypatch):
|
||||
monkeypatch.setattr("tools.file_tools._check_sensitive_path", lambda _path: None)
|
||||
monkeypatch.setattr("tools.file_tools._check_file_staleness", lambda _path, _task_id: None)
|
||||
monkeypatch.setattr("tools.file_tools._log_and_check_conflict", lambda _path, _task_id, _action: None)
|
||||
|
||||
|
||||
def test_patch_tool_replace_no_match_shows_rich_hint_without_legacy_hint(tmp_path, monkeypatch):
|
||||
_disable_patch_tool_guards(monkeypatch)
|
||||
sample = tmp_path / "sample.py"
|
||||
sample.write_text("def foo():\n return 1\n\ndef bar():\n return 2\n", encoding="utf-8")
|
||||
|
||||
raw = patch_tool(
|
||||
mode="replace",
|
||||
path=str(sample),
|
||||
old_string="def barycentric():",
|
||||
new_string="def barycentric_new():",
|
||||
task_id="qa960-replace-rich-hint",
|
||||
)
|
||||
|
||||
result = json.loads(raw)
|
||||
assert result["success"] is False
|
||||
assert "Could not find a match" in result["error"]
|
||||
assert "Did you mean one of these sections?" in result["error"]
|
||||
assert "def bar():" in result["error"] or "def foo():" in result["error"]
|
||||
assert "[Hint:" not in raw
|
||||
|
||||
|
||||
def test_patch_tool_replace_ambiguous_error_does_not_show_did_you_mean(tmp_path, monkeypatch):
|
||||
_disable_patch_tool_guards(monkeypatch)
|
||||
sample = tmp_path / "sample.py"
|
||||
sample.write_text("aaa\nbbb\naaa\n", encoding="utf-8")
|
||||
|
||||
raw = patch_tool(
|
||||
mode="replace",
|
||||
path=str(sample),
|
||||
old_string="aaa",
|
||||
new_string="ccc",
|
||||
task_id="qa960-replace-ambiguous",
|
||||
)
|
||||
|
||||
result = json.loads(raw)
|
||||
assert result["success"] is False
|
||||
assert "Found 2 matches" in result["error"]
|
||||
assert "Did you mean one of these sections?" not in result["error"]
|
||||
assert "[Hint:" not in raw
|
||||
|
||||
|
||||
def test_patch_tool_v4a_no_match_shows_rich_hint(tmp_path, monkeypatch):
|
||||
_disable_patch_tool_guards(monkeypatch)
|
||||
sample = tmp_path / "sample.py"
|
||||
sample.write_text("def foo():\n return 1\n", encoding="utf-8")
|
||||
|
||||
patch = textwrap.dedent(
|
||||
f"""\
|
||||
*** Begin Patch
|
||||
*** Update File: {sample}
|
||||
@@
|
||||
-def barycentric():
|
||||
+def barycentric_new():
|
||||
*** End Patch
|
||||
"""
|
||||
)
|
||||
|
||||
raw = patch_tool(mode="patch", patch=patch, task_id="qa960-v4a-rich-hint")
|
||||
result = json.loads(raw)
|
||||
assert result["success"] is False
|
||||
assert "Patch validation failed" in result["error"]
|
||||
assert "Did you mean one of these sections?" in result["error"]
|
||||
assert "def foo():" in result["error"]
|
||||
|
||||
|
||||
def test_skill_patch_no_match_shows_rich_hint(tmp_path, monkeypatch):
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
skills_dir = tmp_path / "skills"
|
||||
skills_dir.mkdir(parents=True, exist_ok=True)
|
||||
monkeypatch.setattr(skill_manager_tool, "SKILLS_DIR", skills_dir)
|
||||
monkeypatch.setattr(skill_manager_tool, "_security_scan_skill", lambda _skill_dir: None)
|
||||
|
||||
_create_skill(
|
||||
"qa-skill",
|
||||
textwrap.dedent(
|
||||
"""\
|
||||
---
|
||||
name: qa-skill
|
||||
description: test
|
||||
---
|
||||
|
||||
Step 1: Do the thing.
|
||||
Step 2: Verify the thing.
|
||||
"""
|
||||
),
|
||||
)
|
||||
|
||||
result = _patch_skill(
|
||||
"qa-skill",
|
||||
"Step 1: Do the production rollout.",
|
||||
"Step 1: Updated.",
|
||||
)
|
||||
|
||||
assert result["success"] is False
|
||||
assert "Could not find a match" in result["error"]
|
||||
assert "Did you mean one of these sections?" in result["error"]
|
||||
assert "Step 1: Do the thing." in result["error"]
|
||||
assert "file_preview" in result
|
||||
@@ -757,12 +757,14 @@ class ShellFileOperations(FileOperations):
|
||||
content, old_string, new_string, replace_all
|
||||
)
|
||||
|
||||
if error:
|
||||
return PatchResult(error=error)
|
||||
|
||||
if match_count == 0:
|
||||
return PatchResult(error=f"Could not find match for old_string in {path}")
|
||||
|
||||
if error or match_count == 0:
|
||||
err_msg = error or f"Could not find match for old_string in {path}"
|
||||
try:
|
||||
from tools.fuzzy_match import format_no_match_hint
|
||||
err_msg += format_no_match_hint(err_msg, match_count, old_string, content)
|
||||
except Exception:
|
||||
pass
|
||||
return PatchResult(error=err_msg)
|
||||
# Write back
|
||||
write_result = self.write_file(path, new_content)
|
||||
if write_result.error:
|
||||
|
||||
@@ -8,6 +8,7 @@ import os
|
||||
import threading
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional
|
||||
from tools.binary_extensions import has_binary_extension
|
||||
from tools.file_operations import ShellFileOperations
|
||||
from agent.redact import redact_sensitive_text
|
||||
@@ -690,8 +691,11 @@ def patch_tool(mode: str = "replace", path: str = None, old_string: str = None,
|
||||
result_json = json.dumps(result_dict, ensure_ascii=False)
|
||||
# Hint when old_string not found — saves iterations where the agent
|
||||
# retries with stale content instead of re-reading the file.
|
||||
# Suppressed when patch_replace already attached a rich "Did you mean?"
|
||||
# snippet (which is strictly more useful than the generic hint).
|
||||
if result_dict.get("error") and "Could not find" in str(result_dict["error"]):
|
||||
result_json += "\n\n[Hint: old_string not found. Use read_file to verify the current content, or search_files to locate the text.]"
|
||||
if "Did you mean one of these sections?" not in str(result_dict["error"]):
|
||||
result_json += "\n\n[Hint: old_string not found. Use read_file to verify the current content, or search_files to locate the text.]"
|
||||
return result_json
|
||||
except Exception as e:
|
||||
return tool_error(str(e))
|
||||
|
||||
@@ -93,6 +93,21 @@ def fuzzy_find_and_replace(content: str, old_string: str, new_string: str,
|
||||
f"Provide more context to make it unique, or use replace_all=True."
|
||||
)
|
||||
|
||||
# Escape-drift guard: when the matched strategy is NOT `exact`,
|
||||
# we matched via some form of normalization. If new_string
|
||||
# contains shell/JSON-style escape sequences (\\' or \\") that
|
||||
# would be written literally into the file but the matched
|
||||
# region of the file has no such sequences, this is almost
|
||||
# certainly tool-call serialization drift — the model typed
|
||||
# an apostrophe/quote and the transport added a stray
|
||||
# backslash. Writing new_string as-is would corrupt the file.
|
||||
# Block with a helpful error so the model re-reads and retries
|
||||
# instead of the caller silently persisting garbage (or not).
|
||||
if strategy_name != "exact":
|
||||
drift_err = _detect_escape_drift(content, matches, old_string, new_string)
|
||||
if drift_err:
|
||||
return content, 0, None, drift_err
|
||||
|
||||
# Perform replacement
|
||||
new_content = _apply_replacements(content, matches, new_string)
|
||||
return new_content, len(matches), strategy_name, None
|
||||
@@ -101,6 +116,46 @@ def fuzzy_find_and_replace(content: str, old_string: str, new_string: str,
|
||||
return content, 0, None, "Could not find a match for old_string in the file"
|
||||
|
||||
|
||||
def _detect_escape_drift(content: str, matches: List[Tuple[int, int]],
|
||||
old_string: str, new_string: str) -> Optional[str]:
|
||||
"""Detect tool-call escape-drift artifacts in new_string.
|
||||
|
||||
Looks for ``\\'`` or ``\\"`` sequences that are present in both
|
||||
old_string and new_string (i.e. the model copy-pasted them as "context"
|
||||
it intended to preserve) but don't exist in the matched region of the
|
||||
file. That pattern indicates the transport layer inserted spurious
|
||||
shell-style escapes around apostrophes or quotes — writing new_string
|
||||
verbatim would literally insert ``\\'`` into source code.
|
||||
|
||||
Returns an error string if drift is detected, None otherwise.
|
||||
"""
|
||||
# Cheap pre-check: bail out unless new_string actually contains a
|
||||
# suspect escape sequence. This keeps the guard free for all the
|
||||
# common, correct cases.
|
||||
if "\\'" not in new_string and '\\"' not in new_string:
|
||||
return None
|
||||
|
||||
# Aggregate matched regions of the file — that's what new_string will
|
||||
# replace. If the suspect escapes are present there already, the
|
||||
# model is genuinely preserving them (valid for some languages /
|
||||
# escaped strings); accept the patch.
|
||||
matched_regions = "".join(content[start:end] for start, end in matches)
|
||||
|
||||
for suspect in ("\\'", '\\"'):
|
||||
if suspect in new_string and suspect in old_string and suspect not in matched_regions:
|
||||
plain = suspect[1] # "'" or '"'
|
||||
return (
|
||||
f"Escape-drift detected: old_string and new_string contain "
|
||||
f"the literal sequence {suspect!r} but the matched region of "
|
||||
f"the file does not. This is almost always a tool-call "
|
||||
f"serialization artifact where an apostrophe or quote got "
|
||||
f"prefixed with a spurious backslash. Re-read the file with "
|
||||
f"read_file and pass old_string/new_string without "
|
||||
f"backslash-escaping {plain!r} characters."
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
def _apply_replacements(content: str, matches: List[Tuple[int, int]], new_string: str) -> str:
|
||||
"""
|
||||
Apply replacements at the given positions.
|
||||
@@ -564,3 +619,86 @@ def _map_normalized_positions(original: str, normalized: str,
|
||||
original_matches.append((orig_start, min(orig_end, len(original))))
|
||||
|
||||
return original_matches
|
||||
|
||||
|
||||
def find_closest_lines(old_string: str, content: str, context_lines: int = 2, max_results: int = 3) -> str:
|
||||
"""Find lines in content most similar to old_string for "did you mean?" feedback.
|
||||
|
||||
Returns a formatted string showing the closest matching lines with context,
|
||||
or empty string if no useful match is found.
|
||||
"""
|
||||
if not old_string or not content:
|
||||
return ""
|
||||
|
||||
old_lines = old_string.splitlines()
|
||||
content_lines = content.splitlines()
|
||||
|
||||
if not old_lines or not content_lines:
|
||||
return ""
|
||||
|
||||
# Use first line of old_string as anchor for search
|
||||
anchor = old_lines[0].strip()
|
||||
if not anchor:
|
||||
# Try second line if first is blank
|
||||
candidates = [l.strip() for l in old_lines if l.strip()]
|
||||
if not candidates:
|
||||
return ""
|
||||
anchor = candidates[0]
|
||||
|
||||
# Score each line in content by similarity to anchor
|
||||
scored = []
|
||||
for i, line in enumerate(content_lines):
|
||||
stripped = line.strip()
|
||||
if not stripped:
|
||||
continue
|
||||
ratio = SequenceMatcher(None, anchor, stripped).ratio()
|
||||
if ratio > 0.3:
|
||||
scored.append((ratio, i))
|
||||
|
||||
if not scored:
|
||||
return ""
|
||||
|
||||
# Take top matches
|
||||
scored.sort(key=lambda x: -x[0])
|
||||
top = scored[:max_results]
|
||||
|
||||
parts = []
|
||||
seen_ranges = set()
|
||||
for _, line_idx in top:
|
||||
start = max(0, line_idx - context_lines)
|
||||
end = min(len(content_lines), line_idx + len(old_lines) + context_lines)
|
||||
key = (start, end)
|
||||
if key in seen_ranges:
|
||||
continue
|
||||
seen_ranges.add(key)
|
||||
snippet = "\n".join(
|
||||
f"{start + j + 1:4d}| {content_lines[start + j]}"
|
||||
for j in range(end - start)
|
||||
)
|
||||
parts.append(snippet)
|
||||
|
||||
if not parts:
|
||||
return ""
|
||||
|
||||
return "\n---\n".join(parts)
|
||||
|
||||
|
||||
def format_no_match_hint(error: Optional[str], match_count: int,
|
||||
old_string: str, content: str) -> str:
|
||||
"""Return a '\\n\\nDid you mean...' snippet for plain no-match errors.
|
||||
|
||||
Gated so the hint only fires for actual "old_string not found" failures.
|
||||
Ambiguous-match ("Found N matches"), escape-drift, and identical-strings
|
||||
errors all have ``match_count == 0`` but a "did you mean?" snippet would
|
||||
be misleading — those failed for unrelated reasons.
|
||||
|
||||
Returns an empty string when there's nothing useful to append.
|
||||
"""
|
||||
if match_count != 0:
|
||||
return ""
|
||||
if not error or not error.startswith("Could not find"):
|
||||
return ""
|
||||
hint = find_closest_lines(old_string, content)
|
||||
if not hint:
|
||||
return ""
|
||||
return "\n\nDid you mean one of these sections?\n" + hint
|
||||
|
||||
@@ -290,10 +290,16 @@ def _validate_operations(
|
||||
)
|
||||
if count == 0:
|
||||
label = f"'{hunk.context_hint}'" if hunk.context_hint else "(no hint)"
|
||||
errors.append(
|
||||
msg = (
|
||||
f"{op.file_path}: hunk {label} not found"
|
||||
+ (f" — {match_error}" if match_error else "")
|
||||
)
|
||||
try:
|
||||
from tools.fuzzy_match import format_no_match_hint
|
||||
msg += format_no_match_hint(match_error, count, search_pattern, simulated)
|
||||
except Exception:
|
||||
pass
|
||||
errors.append(msg)
|
||||
else:
|
||||
# Advance simulation so subsequent hunks validate correctly.
|
||||
# Reuse the result from the call above — no second fuzzy run.
|
||||
@@ -537,7 +543,13 @@ def _apply_update(op: PatchOperation, file_ops: Any) -> Tuple[bool, str]:
|
||||
error = None
|
||||
|
||||
if error:
|
||||
return False, f"Could not apply hunk: {error}"
|
||||
err_msg = f"Could not apply hunk: {error}"
|
||||
try:
|
||||
from tools.fuzzy_match import format_no_match_hint
|
||||
err_msg += format_no_match_hint(error, 0, search_pattern, new_content)
|
||||
except Exception:
|
||||
pass
|
||||
return False, err_msg
|
||||
else:
|
||||
# Addition-only hunk (no context or removed lines).
|
||||
# Insert at the location indicated by the context hint, or at end of file.
|
||||
|
||||
@@ -575,9 +575,15 @@ def _patch_skill(
|
||||
if match_error:
|
||||
# Show a short preview of the file so the model can self-correct
|
||||
preview = content[:500] + ("..." if len(content) > 500 else "")
|
||||
err_msg = match_error
|
||||
try:
|
||||
from tools.fuzzy_match import format_no_match_hint
|
||||
err_msg += format_no_match_hint(match_error, match_count, old_string, content)
|
||||
except Exception:
|
||||
pass
|
||||
return {
|
||||
"success": False,
|
||||
"error": match_error,
|
||||
"error": err_msg,
|
||||
"file_preview": preview,
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user