Compare commits

..

1 Commits

Author SHA1 Message Date
Alexander Whitestone
7c38007094 feat(memory): add grounded observation synthesis layer
All checks were successful
Lint / lint (pull_request) Successful in 16s
2026-04-22 10:59:40 -04:00
11 changed files with 574 additions and 482 deletions

View File

@@ -26,6 +26,7 @@ from agent.memory_provider import MemoryProvider
from tools.registry import tool_error
from .store import MemoryStore
from .retrieval import FactRetriever
from .observations import ObservationSynthesizer
logger = logging.getLogger(__name__)
@@ -37,28 +38,29 @@ logger = logging.getLogger(__name__)
FACT_STORE_SCHEMA = {
"name": "fact_store",
"description": (
"Deep structured memory with algebraic reasoning. "
"Deep structured memory with algebraic reasoning and grounded observation synthesis. "
"Use alongside the memory tool — memory for always-on context, "
"fact_store for deep recall and compositional queries.\n\n"
"fact_store for deep recall, compositional queries, and higher-order observations.\n\n"
"ACTIONS (simple → powerful):\n"
"• add — Store a fact the user would expect you to remember.\n"
"• search — Keyword lookup ('editor config', 'deploy process').\n"
"• probe — Entity recall: ALL facts about a person/thing.\n"
"• related — What connects to an entity? Structural adjacency.\n"
"• reason — Compositional: facts connected to MULTIPLE entities simultaneously.\n"
"• observe — Synthesized higher-order observations backed by supporting facts.\n"
"• contradict — Memory hygiene: find facts making conflicting claims.\n"
"• update/remove/list — CRUD operations.\n\n"
"IMPORTANT: Before answering questions about the user, ALWAYS probe or reason first."
"IMPORTANT: Before answering questions about the user, ALWAYS probe/reason/observe first."
),
"parameters": {
"type": "object",
"properties": {
"action": {
"type": "string",
"enum": ["add", "search", "probe", "related", "reason", "contradict", "update", "remove", "list"],
"enum": ["add", "search", "probe", "related", "reason", "observe", "contradict", "update", "remove", "list"],
},
"content": {"type": "string", "description": "Fact content (required for 'add')."},
"query": {"type": "string", "description": "Search query (required for 'search')."},
"query": {"type": "string", "description": "Search query (required for 'search'/'observe')."},
"entity": {"type": "string", "description": "Entity name for 'probe'/'related'."},
"entities": {"type": "array", "items": {"type": "string"}, "description": "Entity names for 'reason'."},
"fact_id": {"type": "integer", "description": "Fact ID for 'update'/'remove'."},
@@ -66,6 +68,12 @@ FACT_STORE_SCHEMA = {
"tags": {"type": "string", "description": "Comma-separated tags."},
"trust_delta": {"type": "number", "description": "Trust adjustment for 'update'."},
"min_trust": {"type": "number", "description": "Minimum trust filter (default: 0.3)."},
"min_confidence": {"type": "number", "description": "Minimum observation confidence (default: 0.6)."},
"observation_type": {
"type": "string",
"enum": ["recurring_preference", "stable_direction", "behavioral_pattern"],
"description": "Optional observation type filter for 'observe'.",
},
"limit": {"type": "integer", "description": "Max results (default: 10)."},
},
"required": ["action"],
@@ -118,7 +126,9 @@ class HolographicMemoryProvider(MemoryProvider):
self._config = config or _load_plugin_config()
self._store = None
self._retriever = None
self._observation_synth = None
self._min_trust = float(self._config.get("min_trust_threshold", 0.3))
self._observation_min_confidence = float(self._config.get("observation_min_confidence", 0.6))
@property
def name(self) -> str:
@@ -177,6 +187,7 @@ class HolographicMemoryProvider(MemoryProvider):
hrr_weight=hrr_weight,
hrr_dim=hrr_dim,
)
self._observation_synth = ObservationSynthesizer(self._store)
self._session_id = session_id
def system_prompt_block(self) -> str:
@@ -193,30 +204,76 @@ class HolographicMemoryProvider(MemoryProvider):
"# Holographic Memory\n"
"Active. Empty fact store — proactively add facts the user would expect you to remember.\n"
"Use fact_store(action='add') to store durable structured facts about people, projects, preferences, decisions.\n"
"Use fact_store(action='observe') to synthesize higher-order observations with evidence.\n"
"Use fact_feedback to rate facts after using them (trains trust scores)."
)
return (
f"# Holographic Memory\n"
f"Active. {total} facts stored with entity resolution and trust scoring.\n"
f"Use fact_store to search, probe entities, reason across entities, or add facts.\n"
f"Use fact_store to search, probe entities, reason across entities, or synthesize observations.\n"
f"Use fact_feedback to rate facts after using them (trains trust scores)."
)
def prefetch(self, query: str, *, session_id: str = "") -> str:
if not self._retriever or not query:
if not query:
return ""
parts = []
raw_results = []
try:
results = self._retriever.search(query, min_trust=self._min_trust, limit=5)
if not results:
return ""
if self._retriever:
raw_results = self._retriever.search(query, min_trust=self._min_trust, limit=5)
except Exception as e:
logger.debug("Holographic prefetch fact search failed: %s", e)
raw_results = []
observations = []
try:
if self._observation_synth:
observations = self._observation_synth.observe(
query,
min_confidence=self._observation_min_confidence,
limit=3,
refresh=True,
)
except Exception as e:
logger.debug("Holographic prefetch observation search failed: %s", e)
observations = []
if not raw_results and observations:
seen_fact_ids = set()
evidence_backfill = []
for observation in observations:
for evidence in observation.get("evidence", []):
fact_id = evidence.get("fact_id")
if fact_id in seen_fact_ids:
continue
seen_fact_ids.add(fact_id)
evidence_backfill.append(evidence)
raw_results = evidence_backfill[:5]
if raw_results:
lines = []
for r in results:
for r in raw_results:
trust = r.get("trust_score", r.get("trust", 0))
lines.append(f"- [{trust:.1f}] {r.get('content', '')}")
return "## Holographic Memory\n" + "\n".join(lines)
except Exception as e:
logger.debug("Holographic prefetch failed: %s", e)
return ""
parts.append("## Holographic Memory\n" + "\n".join(lines))
if observations:
lines = []
for observation in observations:
evidence_ids = ", ".join(
f"#{item['fact_id']}" for item in observation.get("evidence", [])[:3]
) or "none"
lines.append(
f"- [{observation.get('confidence', 0.0):.2f}] "
f"{observation.get('observation_type', 'observation')}: "
f"{observation.get('summary', '')} "
f"(evidence: {evidence_ids})"
)
parts.append("## Holographic Observations\n" + "\n".join(lines))
return "\n\n".join(parts)
def sync_turn(self, user_content: str, assistant_content: str, *, session_id: str = "") -> None:
# Holographic memory stores explicit facts via tools, not auto-sync.
@@ -252,6 +309,7 @@ class HolographicMemoryProvider(MemoryProvider):
def shutdown(self) -> None:
self._store = None
self._retriever = None
self._observation_synth = None
# -- Tool handlers -------------------------------------------------------
@@ -305,6 +363,19 @@ class HolographicMemoryProvider(MemoryProvider):
)
return json.dumps({"results": results, "count": len(results)})
elif action == "observe":
synthesizer = self._observation_synth
if not synthesizer:
return tool_error("Observation synthesizer is not initialized")
observations = synthesizer.observe(
args.get("query", ""),
observation_type=args.get("observation_type"),
min_confidence=float(args.get("min_confidence", self._observation_min_confidence)),
limit=int(args.get("limit", 10)),
refresh=True,
)
return json.dumps({"observations": observations, "count": len(observations)})
elif action == "contradict":
results = retriever.contradict(
category=args.get("category"),

View File

@@ -0,0 +1,249 @@
"""Higher-order observation synthesis for holographic memory.
Builds grounded observations from accumulated facts and keeps them in a
separate retrieval layer with explicit evidence links back to supporting facts.
"""
from __future__ import annotations
import re
from typing import Any
from .store import MemoryStore
_TOKEN_RE = re.compile(r"[a-z0-9_]+")
_HIGHER_ORDER_CUES = {
"prefer",
"preference",
"preferences",
"style",
"pattern",
"patterns",
"behavior",
"behaviour",
"habit",
"habits",
"workflow",
"direction",
"trajectory",
"strategy",
"tend",
"usually",
}
_OBSERVATION_PATTERNS = [
{
"observation_type": "recurring_preference",
"subject": "communication_style",
"categories": {"user_pref", "general"},
"labels": {
"concise": ["concise", "terse", "brief", "short", "no fluff"],
"result_first": ["result-only", "result only", "outcome only", "quick", "quickly"],
"silent_ops": ["silent", "no status", "no repetitive status", "no questions"],
},
"summary_prefix": "Recurring preference",
},
{
"observation_type": "stable_direction",
"subject": "project_direction",
"categories": {"project", "general", "tool"},
"labels": {
"local_first": ["local-first", "local first", "local-only", "local only", "ollama", "own hardware"],
"gitea_first": ["gitea-first", "gitea first", "forge", "pull request", "pr flow", "issue flow"],
"ansible": ["ansible", "playbook", "role", "deploy via ansible"],
},
"summary_prefix": "Stable direction",
},
{
"observation_type": "behavioral_pattern",
"subject": "operator_workflow",
"categories": {"general", "project", "tool", "user_pref"},
"labels": {
"commit_early": ["commit early", "commits early", "commit after", "wip commit"],
"pr_first": ["open pr", "push a pr", "pull request", "pr immediately", "create pr"],
"dedup_guard": ["no dupes", "no duplicates", "avoid duplicate", "existing pr"],
},
"summary_prefix": "Behavioral pattern",
},
]
_TYPE_QUERY_HINTS = {
"recurring_preference": {"prefer", "preference", "style", "communication", "likes", "wants"},
"stable_direction": {"direction", "trajectory", "strategy", "project", "roadmap", "moving"},
"behavioral_pattern": {"pattern", "behavior", "workflow", "habit", "operator", "agent", "usually"},
}
class ObservationSynthesizer:
"""Synthesizes grounded observations from facts and retrieves them by query."""
def __init__(self, store: MemoryStore):
self.store = store
def synthesize(
self,
*,
persist: bool = True,
min_confidence: float = 0.6,
limit: int = 10,
) -> list[dict[str, Any]]:
facts = self.store.list_facts(min_trust=0.0, limit=1000)
observations: list[dict[str, Any]] = []
for pattern in _OBSERVATION_PATTERNS:
candidate = self._build_candidate(pattern, facts, min_confidence=min_confidence)
if not candidate:
continue
if persist:
candidate["observation_id"] = self.store.upsert_observation(
candidate["observation_type"],
candidate["subject"],
candidate["summary"],
candidate["confidence"],
candidate["evidence_fact_ids"],
metadata=candidate["metadata"],
)
candidate["evidence"] = self._expand_evidence(candidate["evidence_fact_ids"])
candidate["evidence_count"] = len(candidate["evidence"])
candidate.pop("evidence_fact_ids", None)
observations.append(candidate)
observations.sort(
key=lambda item: (item["confidence"], item.get("evidence_count", 0)),
reverse=True,
)
return observations[:limit]
def observe(
self,
query: str = "",
*,
observation_type: str | None = None,
min_confidence: float = 0.6,
limit: int = 10,
refresh: bool = True,
) -> list[dict[str, Any]]:
if refresh:
self.synthesize(persist=True, min_confidence=min_confidence, limit=limit)
observations = self.store.list_observations(
observation_type=observation_type,
min_confidence=min_confidence,
limit=max(limit * 4, 20),
)
if not observations:
return []
if not query:
return observations[:limit]
query_tokens = self._tokenize(query)
is_higher_order = bool(query_tokens & _HIGHER_ORDER_CUES)
ranked: list[dict[str, Any]] = []
for item in observations:
searchable = " ".join(
[
item.get("summary", ""),
item.get("subject", ""),
item.get("observation_type", ""),
" ".join(item.get("metadata", {}).get("labels", [])),
]
)
overlap = self._overlap_score(query_tokens, self._tokenize(searchable))
type_bonus = self._type_bonus(query_tokens, item.get("observation_type", ""))
if overlap <= 0 and type_bonus <= 0 and not is_higher_order:
continue
ranked_item = dict(item)
ranked_item["score"] = round(item.get("confidence", 0.0) + overlap + type_bonus, 3)
ranked.append(ranked_item)
if not ranked and is_higher_order:
ranked = [
{**item, "score": round(float(item.get("confidence", 0.0)), 3)}
for item in observations
]
ranked.sort(
key=lambda item: (item.get("score", 0.0), item.get("confidence", 0.0), item.get("evidence_count", 0)),
reverse=True,
)
return ranked[:limit]
def _build_candidate(
self,
pattern: dict[str, Any],
facts: list[dict[str, Any]],
*,
min_confidence: float,
) -> dict[str, Any] | None:
matched_fact_ids: set[int] = set()
matched_labels: dict[str, set[int]] = {label: set() for label in pattern["labels"]}
for fact in facts:
if fact.get("category") not in pattern["categories"]:
continue
haystack = f"{fact.get('content', '')} {fact.get('tags', '')}".lower()
local_match = False
for label, keywords in pattern["labels"].items():
if any(keyword in haystack for keyword in keywords):
matched_labels[label].add(int(fact["fact_id"]))
local_match = True
if local_match:
matched_fact_ids.add(int(fact["fact_id"]))
if len(matched_fact_ids) < 2:
return None
active_labels = sorted(label for label, ids in matched_labels.items() if ids)
confidence = min(0.95, 0.35 + 0.12 * len(matched_fact_ids) + 0.08 * len(active_labels))
confidence = round(confidence, 3)
if confidence < min_confidence:
return None
label_summary = ", ".join(label.replace("_", "-") for label in active_labels)
subject_text = pattern["subject"].replace("_", " ")
summary = (
f"{pattern['summary_prefix']}: {subject_text} trends toward {label_summary} "
f"based on {len(matched_fact_ids)} supporting facts."
)
return {
"observation_type": pattern["observation_type"],
"subject": pattern["subject"],
"summary": summary,
"confidence": confidence,
"metadata": {
"labels": active_labels,
"evidence_count": len(matched_fact_ids),
},
"evidence_fact_ids": sorted(matched_fact_ids),
}
def _expand_evidence(self, fact_ids: list[int]) -> list[dict[str, Any]]:
facts_by_id = {
fact["fact_id"]: fact
for fact in self.store.list_facts(min_trust=0.0, limit=1000)
}
return [facts_by_id[fact_id] for fact_id in fact_ids if fact_id in facts_by_id]
@staticmethod
def _tokenize(text: str) -> set[str]:
return set(_TOKEN_RE.findall(text.lower()))
@staticmethod
def _overlap_score(query_tokens: set[str], text_tokens: set[str]) -> float:
if not query_tokens or not text_tokens:
return 0.0
overlap = query_tokens & text_tokens
if not overlap:
return 0.0
return round(len(overlap) / max(len(query_tokens), 1), 3)
@staticmethod
def _type_bonus(query_tokens: set[str], observation_type: str) -> float:
hints = _TYPE_QUERY_HINTS.get(observation_type, set())
if not hints:
return 0.0
return 0.25 if query_tokens & hints else 0.0

View File

@@ -3,6 +3,7 @@ SQLite-backed fact store with entity resolution and trust scoring.
Single-user Hermes memory store plugin.
"""
import json
import re
import sqlite3
import threading
@@ -73,6 +74,28 @@ CREATE TABLE IF NOT EXISTS memory_banks (
fact_count INTEGER DEFAULT 0,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS observations (
observation_id INTEGER PRIMARY KEY AUTOINCREMENT,
observation_type TEXT NOT NULL,
subject TEXT NOT NULL,
summary TEXT NOT NULL,
confidence REAL DEFAULT 0.0,
metadata_json TEXT DEFAULT '{}',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
UNIQUE(observation_type, subject)
);
CREATE TABLE IF NOT EXISTS observation_evidence (
observation_id INTEGER REFERENCES observations(observation_id) ON DELETE CASCADE,
fact_id INTEGER REFERENCES facts(fact_id) ON DELETE CASCADE,
evidence_weight REAL DEFAULT 1.0,
PRIMARY KEY (observation_id, fact_id)
);
CREATE INDEX IF NOT EXISTS idx_observations_type ON observations(observation_type);
CREATE INDEX IF NOT EXISTS idx_observations_confidence ON observations(confidence DESC);
"""
# Trust adjustment constants
@@ -128,6 +151,7 @@ class MemoryStore:
def _init_db(self) -> None:
"""Create tables, indexes, and triggers if they do not exist. Enable WAL mode."""
self._conn.execute("PRAGMA journal_mode=WAL")
self._conn.execute("PRAGMA foreign_keys=ON")
self._conn.executescript(_SCHEMA)
# Migrate: add hrr_vector column if missing (safe for existing databases)
columns = {row[1] for row in self._conn.execute("PRAGMA table_info(facts)").fetchall()}
@@ -346,6 +370,115 @@ class MemoryStore:
rows = self._conn.execute(sql, params).fetchall()
return [self._row_to_dict(r) for r in rows]
def upsert_observation(
self,
observation_type: str,
subject: str,
summary: str,
confidence: float,
evidence_fact_ids: list[int],
metadata: dict | None = None,
) -> int:
"""Create or update a synthesized observation and its evidence links."""
with self._lock:
metadata_json = json.dumps(metadata or {}, sort_keys=True)
self._conn.execute(
"""
INSERT INTO observations (
observation_type, subject, summary, confidence, metadata_json
)
VALUES (?, ?, ?, ?, ?)
ON CONFLICT(observation_type, subject) DO UPDATE SET
summary = excluded.summary,
confidence = excluded.confidence,
metadata_json = excluded.metadata_json,
updated_at = CURRENT_TIMESTAMP
""",
(observation_type, subject, summary, confidence, metadata_json),
)
row = self._conn.execute(
"""
SELECT observation_id
FROM observations
WHERE observation_type = ? AND subject = ?
""",
(observation_type, subject),
).fetchone()
observation_id = int(row["observation_id"])
self._conn.execute(
"DELETE FROM observation_evidence WHERE observation_id = ?",
(observation_id,),
)
unique_fact_ids = sorted({int(fid) for fid in evidence_fact_ids})
if unique_fact_ids:
self._conn.executemany(
"""
INSERT OR IGNORE INTO observation_evidence (observation_id, fact_id)
VALUES (?, ?)
""",
[(observation_id, fact_id) for fact_id in unique_fact_ids],
)
self._conn.commit()
return observation_id
def list_observations(
self,
observation_type: str | None = None,
min_confidence: float = 0.0,
limit: int = 50,
) -> list[dict]:
"""List synthesized observations with expanded supporting evidence."""
with self._lock:
params: list = [min_confidence]
observation_clause = ""
if observation_type is not None:
observation_clause = "AND observation_type = ?"
params.append(observation_type)
params.append(limit)
rows = self._conn.execute(
f"""
SELECT observation_id, observation_type, subject, summary, confidence,
metadata_json, created_at, updated_at,
(
SELECT COUNT(*)
FROM observation_evidence oe
WHERE oe.observation_id = observations.observation_id
) AS evidence_count
FROM observations
WHERE confidence >= ?
{observation_clause}
ORDER BY confidence DESC, updated_at DESC
LIMIT ?
""",
params,
).fetchall()
results = []
for row in rows:
item = dict(row)
try:
item["metadata"] = json.loads(item.pop("metadata_json") or "{}")
except json.JSONDecodeError:
item["metadata"] = {}
item["evidence"] = self._get_observation_evidence(int(item["observation_id"]))
results.append(item)
return results
def _get_observation_evidence(self, observation_id: int) -> list[dict]:
rows = self._conn.execute(
"""
SELECT f.fact_id, f.content, f.category, f.tags, f.trust_score,
f.retrieval_count, f.helpful_count, f.created_at, f.updated_at
FROM observation_evidence oe
JOIN facts f ON f.fact_id = oe.fact_id
WHERE oe.observation_id = ?
ORDER BY f.trust_score DESC, f.updated_at DESC
""",
(observation_id,),
).fetchall()
return [self._row_to_dict(row) for row in rows]
def record_feedback(self, fact_id: int, helpful: bool) -> dict:
"""Record user feedback and adjust trust asymmetrically.

View File

@@ -0,0 +1,96 @@
import json
import pytest
from plugins.memory.holographic import HolographicMemoryProvider
from plugins.memory.holographic.store import MemoryStore
@pytest.fixture()
def store(tmp_path):
db_path = tmp_path / "memory.db"
s = MemoryStore(db_path=str(db_path), default_trust=0.5)
yield s
s.close()
@pytest.fixture()
def provider(tmp_path):
p = HolographicMemoryProvider(
config={
"db_path": str(tmp_path / "memory.db"),
"default_trust": 0.5,
}
)
p.initialize(session_id="test-session")
yield p
if p._store:
p._store.close()
class TestObservationSynthesis:
def test_observe_action_persists_observation_with_evidence_links(self, provider):
fact_ids = [
provider._store.add_fact('User prefers concise status updates', category='user_pref'),
provider._store.add_fact('User wants result-only replies with no fluff', category='user_pref'),
]
result = json.loads(
provider.handle_tool_call(
'fact_store',
{
'action': 'observe',
'query': 'What communication style does the user prefer?',
'limit': 5,
},
)
)
assert result['count'] == 1
observation = result['observations'][0]
assert observation['observation_type'] == 'recurring_preference'
assert observation['confidence'] >= 0.6
assert sorted(item['fact_id'] for item in observation['evidence']) == sorted(fact_ids)
stored = provider._store.list_observations(limit=10)
assert len(stored) == 1
assert stored[0]['observation_type'] == 'recurring_preference'
assert stored[0]['evidence_count'] == 2
assert len(provider._store.list_facts(limit=10)) == 2
def test_observe_action_synthesizes_three_observation_types(self, provider):
provider._store.add_fact('User prefers concise updates', category='user_pref')
provider._store.add_fact('User wants result-only communication', category='user_pref')
provider._store.add_fact('Project is moving to a local-first deployment model', category='project')
provider._store.add_fact('Project direction stays Gitea-first for issue and PR flow', category='project')
provider._store.add_fact('Operator always commits early before moving on', category='general')
provider._store.add_fact('Operator pushes a PR immediately after each meaningful fix', category='general')
result = json.loads(provider.handle_tool_call('fact_store', {'action': 'observe', 'limit': 10}))
types = {item['observation_type'] for item in result['observations']}
assert {'recurring_preference', 'stable_direction', 'behavioral_pattern'} <= types
def test_single_fact_does_not_create_overconfident_observation(self, provider):
provider._store.add_fact('User prefers concise updates', category='user_pref')
result = json.loads(
provider.handle_tool_call(
'fact_store',
{'action': 'observe', 'query': 'What does the user prefer?', 'limit': 5},
)
)
assert result['count'] == 0
assert provider._store.list_observations(limit=10) == []
def test_prefetch_surfaces_observations_as_separate_layer(self, provider):
provider._store.add_fact('User prefers concise updates', category='user_pref')
provider._store.add_fact('User wants result-only communication', category='user_pref')
prefetch = provider.prefetch('What communication style does the user prefer?')
assert '## Holographic Observations' in prefetch
assert '## Holographic Memory' in prefetch
assert 'recurring_preference' in prefetch
assert 'evidence' in prefetch.lower()

View File

@@ -148,184 +148,3 @@ class TestStrategyNameSurfaced:
assert count == 0
assert strategy is None
assert err is not None
class TestEscapeDriftGuard:
"""Tests for the escape-drift guard that catches bash/JSON serialization
artifacts where an apostrophe gets prefixed with a spurious backslash
in tool-call transport.
"""
def test_drift_blocked_apostrophe(self):
"""File has ', old_string and new_string both have \\' — classic
tool-call drift. Guard must block with a helpful error instead of
writing \\' literals into source code."""
content = "x = \"hello there\"\n"
# Simulate transport-corrupted old_string and new_string where an
# apostrophe-like context got prefixed with a backslash. The content
# itself has no apostrophe, but both strings do — matching via
# whitespace/anchor strategies would otherwise succeed.
old_string = "x = \"hello there\" # don\\'t edit\n"
new_string = "x = \"hi there\" # don\\'t edit\n"
# This particular pair won't match anything, so it exits via
# no-match path. Build a case where a non-exact strategy DOES match.
content = "line\n x = 1\nline"
old_string = "line\n x = \\'a\\'\nline"
new_string = "line\n x = \\'b\\'\nline"
new, count, strategy, err = fuzzy_find_and_replace(content, old_string, new_string)
assert count == 0
assert err is not None and "Escape-drift" in err
assert "backslash" in err.lower()
assert new == content # file untouched
def test_drift_blocked_double_quote(self):
"""Same idea but with \\" drift instead of \\'."""
content = 'line\n x = 1\nline'
old_string = 'line\n x = \\"a\\"\nline'
new_string = 'line\n x = \\"b\\"\nline'
new, count, strategy, err = fuzzy_find_and_replace(content, old_string, new_string)
assert count == 0
assert err is not None and "Escape-drift" in err
def test_drift_allowed_when_file_genuinely_has_backslash_escapes(self):
"""If the file already contains \\' (e.g. inside an existing escaped
string), the model is legitimately preserving it. Guard must NOT
fire."""
content = "line\n x = \\'a\\'\nline"
old_string = "line\n x = \\'a\\'\nline"
new_string = "line\n x = \\'b\\'\nline"
new, count, strategy, err = fuzzy_find_and_replace(content, old_string, new_string)
assert err is None
assert count == 1
assert "\\'b\\'" in new
def test_drift_allowed_on_exact_match(self):
"""Exact matches bypass the drift guard entirely — if the file
really contains the exact bytes old_string specified, it's not
drift."""
content = "hello \\'world\\'"
new, count, strategy, err = fuzzy_find_and_replace(
content, "hello \\'world\\'", "hello \\'there\\'"
)
assert err is None
assert count == 1
assert strategy == "exact"
def test_drift_allowed_when_adding_escaped_strings(self):
"""Model is adding new content with \\' that wasn't in the original.
old_string has no \\', so guard doesn't fire."""
content = "line1\nline2\nline3"
old_string = "line1\nline2\nline3"
new_string = "line1\nprint(\\'added\\')\nline2\nline3"
new, count, strategy, err = fuzzy_find_and_replace(content, old_string, new_string)
assert err is None
assert count == 1
assert "\\'added\\'" in new
def test_no_drift_check_when_new_string_lacks_suspect_chars(self):
"""Fast-path: if new_string has no \\' or \\", guard must not
fire even on fuzzy match."""
content = "def foo():\n pass" # extra space ignored by line_trimmed
old_string = "def foo():\n pass"
new_string = "def bar():\n return 1"
new, count, strategy, err = fuzzy_find_and_replace(content, old_string, new_string)
assert err is None
assert count == 1
class TestFindClosestLines:
def setup_method(self):
from tools.fuzzy_match import find_closest_lines
self.find_closest_lines = find_closest_lines
def test_finds_similar_line(self):
content = "def foo():\n pass\ndef bar():\n return 1\n"
result = self.find_closest_lines("def baz():", content)
assert "def foo" in result or "def bar" in result
def test_returns_empty_for_no_match(self):
content = "completely different content here"
result = self.find_closest_lines("xyzzy_no_match_possible_!!!", content)
assert result == ""
def test_returns_empty_for_empty_inputs(self):
assert self.find_closest_lines("", "some content") == ""
assert self.find_closest_lines("old string", "") == ""
def test_includes_context_lines(self):
content = "line1\nline2\ndef target():\n pass\nline5\n"
result = self.find_closest_lines("def target():", content)
assert "target" in result
def test_includes_line_numbers(self):
content = "line1\nline2\ndef foo():\n pass\n"
result = self.find_closest_lines("def foo():", content)
# Should include line numbers in format "N| content"
assert "|" in result
class TestFormatNoMatchHint:
"""Gating tests for format_no_match_hint — the shared helper that decides
whether a 'Did you mean?' snippet should be appended to an error.
"""
def setup_method(self):
from tools.fuzzy_match import format_no_match_hint
self.fmt = format_no_match_hint
def test_fires_on_could_not_find_with_match(self):
"""Classic no-match: similar content exists → hint fires."""
content = "def foo():\n pass\ndef bar():\n pass\n"
result = self.fmt(
"Could not find a match for old_string in the file",
0, "def baz():", content,
)
assert "Did you mean" in result
assert "foo" in result or "bar" in result
def test_silent_on_ambiguous_match_error(self):
"""'Found N matches' is not a missing-match failure — no hint."""
content = "aaa bbb aaa\n"
result = self.fmt(
"Found 2 matches for old_string. Provide more context to make it unique, or use replace_all=True.",
0, "aaa", content,
)
assert result == ""
def test_silent_on_escape_drift_error(self):
"""Escape-drift errors are intentional blocks — hint would mislead."""
content = "x = 1\n"
result = self.fmt(
"Escape-drift detected: old_string and new_string contain the literal sequence '\\\\''...",
0, "x = \\'1\\'", content,
)
assert result == ""
def test_silent_on_identical_strings(self):
"""old_string == new_string — hint irrelevant."""
result = self.fmt(
"old_string and new_string are identical",
0, "foo", "foo bar\n",
)
assert result == ""
def test_silent_when_match_count_nonzero(self):
"""If match succeeded, we shouldn't be in the error path — defense in depth."""
result = self.fmt(
"Could not find a match for old_string in the file",
1, "foo", "foo bar\n",
)
assert result == ""
def test_silent_on_none_error(self):
"""No error at all — no hint."""
result = self.fmt(None, 0, "foo", "bar\n")
assert result == ""
def test_silent_when_no_similar_content(self):
"""Even for a valid no-match error, skip hint when nothing similar exists."""
result = self.fmt(
"Could not find a match for old_string in the file",
0, "totally_unique_xyzzy_qux", "abc\nxyz\n",
)
assert result == ""

View File

@@ -1,114 +0,0 @@
import json
import os
import textwrap
from pathlib import Path
import tools.skill_manager_tool as skill_manager_tool
from tools.file_tools import patch_tool
from tools.skill_manager_tool import _create_skill, _patch_skill
def _disable_patch_tool_guards(monkeypatch):
monkeypatch.setattr("tools.file_tools._check_sensitive_path", lambda _path: None)
monkeypatch.setattr("tools.file_tools._check_file_staleness", lambda _path, _task_id: None)
monkeypatch.setattr("tools.file_tools._log_and_check_conflict", lambda _path, _task_id, _action: None)
def test_patch_tool_replace_no_match_shows_rich_hint_without_legacy_hint(tmp_path, monkeypatch):
_disable_patch_tool_guards(monkeypatch)
sample = tmp_path / "sample.py"
sample.write_text("def foo():\n return 1\n\ndef bar():\n return 2\n", encoding="utf-8")
raw = patch_tool(
mode="replace",
path=str(sample),
old_string="def barycentric():",
new_string="def barycentric_new():",
task_id="qa960-replace-rich-hint",
)
result = json.loads(raw)
assert result["success"] is False
assert "Could not find a match" in result["error"]
assert "Did you mean one of these sections?" in result["error"]
assert "def bar():" in result["error"] or "def foo():" in result["error"]
assert "[Hint:" not in raw
def test_patch_tool_replace_ambiguous_error_does_not_show_did_you_mean(tmp_path, monkeypatch):
_disable_patch_tool_guards(monkeypatch)
sample = tmp_path / "sample.py"
sample.write_text("aaa\nbbb\naaa\n", encoding="utf-8")
raw = patch_tool(
mode="replace",
path=str(sample),
old_string="aaa",
new_string="ccc",
task_id="qa960-replace-ambiguous",
)
result = json.loads(raw)
assert result["success"] is False
assert "Found 2 matches" in result["error"]
assert "Did you mean one of these sections?" not in result["error"]
assert "[Hint:" not in raw
def test_patch_tool_v4a_no_match_shows_rich_hint(tmp_path, monkeypatch):
_disable_patch_tool_guards(monkeypatch)
sample = tmp_path / "sample.py"
sample.write_text("def foo():\n return 1\n", encoding="utf-8")
patch = textwrap.dedent(
f"""\
*** Begin Patch
*** Update File: {sample}
@@
-def barycentric():
+def barycentric_new():
*** End Patch
"""
)
raw = patch_tool(mode="patch", patch=patch, task_id="qa960-v4a-rich-hint")
result = json.loads(raw)
assert result["success"] is False
assert "Patch validation failed" in result["error"]
assert "Did you mean one of these sections?" in result["error"]
assert "def foo():" in result["error"]
def test_skill_patch_no_match_shows_rich_hint(tmp_path, monkeypatch):
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
skills_dir = tmp_path / "skills"
skills_dir.mkdir(parents=True, exist_ok=True)
monkeypatch.setattr(skill_manager_tool, "SKILLS_DIR", skills_dir)
monkeypatch.setattr(skill_manager_tool, "_security_scan_skill", lambda _skill_dir: None)
_create_skill(
"qa-skill",
textwrap.dedent(
"""\
---
name: qa-skill
description: test
---
Step 1: Do the thing.
Step 2: Verify the thing.
"""
),
)
result = _patch_skill(
"qa-skill",
"Step 1: Do the production rollout.",
"Step 1: Updated.",
)
assert result["success"] is False
assert "Could not find a match" in result["error"]
assert "Did you mean one of these sections?" in result["error"]
assert "Step 1: Do the thing." in result["error"]
assert "file_preview" in result

View File

@@ -757,14 +757,12 @@ class ShellFileOperations(FileOperations):
content, old_string, new_string, replace_all
)
if error or match_count == 0:
err_msg = error or f"Could not find match for old_string in {path}"
try:
from tools.fuzzy_match import format_no_match_hint
err_msg += format_no_match_hint(err_msg, match_count, old_string, content)
except Exception:
pass
return PatchResult(error=err_msg)
if error:
return PatchResult(error=error)
if match_count == 0:
return PatchResult(error=f"Could not find match for old_string in {path}")
# Write back
write_result = self.write_file(path, new_content)
if write_result.error:

View File

@@ -8,7 +8,6 @@ import os
import threading
import time
from pathlib import Path
from typing import Any, Dict, Optional
from tools.binary_extensions import has_binary_extension
from tools.file_operations import ShellFileOperations
from agent.redact import redact_sensitive_text
@@ -691,11 +690,8 @@ def patch_tool(mode: str = "replace", path: str = None, old_string: str = None,
result_json = json.dumps(result_dict, ensure_ascii=False)
# Hint when old_string not found — saves iterations where the agent
# retries with stale content instead of re-reading the file.
# Suppressed when patch_replace already attached a rich "Did you mean?"
# snippet (which is strictly more useful than the generic hint).
if result_dict.get("error") and "Could not find" in str(result_dict["error"]):
if "Did you mean one of these sections?" not in str(result_dict["error"]):
result_json += "\n\n[Hint: old_string not found. Use read_file to verify the current content, or search_files to locate the text.]"
result_json += "\n\n[Hint: old_string not found. Use read_file to verify the current content, or search_files to locate the text.]"
return result_json
except Exception as e:
return tool_error(str(e))

View File

@@ -93,21 +93,6 @@ def fuzzy_find_and_replace(content: str, old_string: str, new_string: str,
f"Provide more context to make it unique, or use replace_all=True."
)
# Escape-drift guard: when the matched strategy is NOT `exact`,
# we matched via some form of normalization. If new_string
# contains shell/JSON-style escape sequences (\\' or \\") that
# would be written literally into the file but the matched
# region of the file has no such sequences, this is almost
# certainly tool-call serialization drift — the model typed
# an apostrophe/quote and the transport added a stray
# backslash. Writing new_string as-is would corrupt the file.
# Block with a helpful error so the model re-reads and retries
# instead of the caller silently persisting garbage (or not).
if strategy_name != "exact":
drift_err = _detect_escape_drift(content, matches, old_string, new_string)
if drift_err:
return content, 0, None, drift_err
# Perform replacement
new_content = _apply_replacements(content, matches, new_string)
return new_content, len(matches), strategy_name, None
@@ -116,46 +101,6 @@ def fuzzy_find_and_replace(content: str, old_string: str, new_string: str,
return content, 0, None, "Could not find a match for old_string in the file"
def _detect_escape_drift(content: str, matches: List[Tuple[int, int]],
old_string: str, new_string: str) -> Optional[str]:
"""Detect tool-call escape-drift artifacts in new_string.
Looks for ``\\'`` or ``\\"`` sequences that are present in both
old_string and new_string (i.e. the model copy-pasted them as "context"
it intended to preserve) but don't exist in the matched region of the
file. That pattern indicates the transport layer inserted spurious
shell-style escapes around apostrophes or quotes — writing new_string
verbatim would literally insert ``\\'`` into source code.
Returns an error string if drift is detected, None otherwise.
"""
# Cheap pre-check: bail out unless new_string actually contains a
# suspect escape sequence. This keeps the guard free for all the
# common, correct cases.
if "\\'" not in new_string and '\\"' not in new_string:
return None
# Aggregate matched regions of the file — that's what new_string will
# replace. If the suspect escapes are present there already, the
# model is genuinely preserving them (valid for some languages /
# escaped strings); accept the patch.
matched_regions = "".join(content[start:end] for start, end in matches)
for suspect in ("\\'", '\\"'):
if suspect in new_string and suspect in old_string and suspect not in matched_regions:
plain = suspect[1] # "'" or '"'
return (
f"Escape-drift detected: old_string and new_string contain "
f"the literal sequence {suspect!r} but the matched region of "
f"the file does not. This is almost always a tool-call "
f"serialization artifact where an apostrophe or quote got "
f"prefixed with a spurious backslash. Re-read the file with "
f"read_file and pass old_string/new_string without "
f"backslash-escaping {plain!r} characters."
)
return None
def _apply_replacements(content: str, matches: List[Tuple[int, int]], new_string: str) -> str:
"""
Apply replacements at the given positions.
@@ -619,86 +564,3 @@ def _map_normalized_positions(original: str, normalized: str,
original_matches.append((orig_start, min(orig_end, len(original))))
return original_matches
def find_closest_lines(old_string: str, content: str, context_lines: int = 2, max_results: int = 3) -> str:
"""Find lines in content most similar to old_string for "did you mean?" feedback.
Returns a formatted string showing the closest matching lines with context,
or empty string if no useful match is found.
"""
if not old_string or not content:
return ""
old_lines = old_string.splitlines()
content_lines = content.splitlines()
if not old_lines or not content_lines:
return ""
# Use first line of old_string as anchor for search
anchor = old_lines[0].strip()
if not anchor:
# Try second line if first is blank
candidates = [l.strip() for l in old_lines if l.strip()]
if not candidates:
return ""
anchor = candidates[0]
# Score each line in content by similarity to anchor
scored = []
for i, line in enumerate(content_lines):
stripped = line.strip()
if not stripped:
continue
ratio = SequenceMatcher(None, anchor, stripped).ratio()
if ratio > 0.3:
scored.append((ratio, i))
if not scored:
return ""
# Take top matches
scored.sort(key=lambda x: -x[0])
top = scored[:max_results]
parts = []
seen_ranges = set()
for _, line_idx in top:
start = max(0, line_idx - context_lines)
end = min(len(content_lines), line_idx + len(old_lines) + context_lines)
key = (start, end)
if key in seen_ranges:
continue
seen_ranges.add(key)
snippet = "\n".join(
f"{start + j + 1:4d}| {content_lines[start + j]}"
for j in range(end - start)
)
parts.append(snippet)
if not parts:
return ""
return "\n---\n".join(parts)
def format_no_match_hint(error: Optional[str], match_count: int,
old_string: str, content: str) -> str:
"""Return a '\\n\\nDid you mean...' snippet for plain no-match errors.
Gated so the hint only fires for actual "old_string not found" failures.
Ambiguous-match ("Found N matches"), escape-drift, and identical-strings
errors all have ``match_count == 0`` but a "did you mean?" snippet would
be misleading — those failed for unrelated reasons.
Returns an empty string when there's nothing useful to append.
"""
if match_count != 0:
return ""
if not error or not error.startswith("Could not find"):
return ""
hint = find_closest_lines(old_string, content)
if not hint:
return ""
return "\n\nDid you mean one of these sections?\n" + hint

View File

@@ -290,16 +290,10 @@ def _validate_operations(
)
if count == 0:
label = f"'{hunk.context_hint}'" if hunk.context_hint else "(no hint)"
msg = (
errors.append(
f"{op.file_path}: hunk {label} not found"
+ (f"{match_error}" if match_error else "")
)
try:
from tools.fuzzy_match import format_no_match_hint
msg += format_no_match_hint(match_error, count, search_pattern, simulated)
except Exception:
pass
errors.append(msg)
else:
# Advance simulation so subsequent hunks validate correctly.
# Reuse the result from the call above — no second fuzzy run.
@@ -543,13 +537,7 @@ def _apply_update(op: PatchOperation, file_ops: Any) -> Tuple[bool, str]:
error = None
if error:
err_msg = f"Could not apply hunk: {error}"
try:
from tools.fuzzy_match import format_no_match_hint
err_msg += format_no_match_hint(error, 0, search_pattern, new_content)
except Exception:
pass
return False, err_msg
return False, f"Could not apply hunk: {error}"
else:
# Addition-only hunk (no context or removed lines).
# Insert at the location indicated by the context hint, or at end of file.

View File

@@ -575,15 +575,9 @@ def _patch_skill(
if match_error:
# Show a short preview of the file so the model can self-correct
preview = content[:500] + ("..." if len(content) > 500 else "")
err_msg = match_error
try:
from tools.fuzzy_match import format_no_match_hint
err_msg += format_no_match_hint(match_error, match_count, old_string, content)
except Exception:
pass
return {
"success": False,
"error": err_msg,
"error": match_error,
"file_preview": preview,
}