Compare commits

..

1 Commits

Author SHA1 Message Date
Alexander Whitestone
eeed075974 feat: add profile-scoped local Hindsight eval path
All checks were successful
Lint / lint (pull_request) Successful in 28s
Closes #1010
2026-04-22 11:12:08 -04:00
14 changed files with 699 additions and 1079 deletions

View File

@@ -0,0 +1,69 @@
"""First-class context snapshot artifacts for live runtime memory evaluation."""
from __future__ import annotations
import json
import re
from pathlib import Path
from typing import Any
from hermes_constants import get_hermes_home
_SAFE_SEGMENT_RE = re.compile(r"[^A-Za-z0-9_.-]+")
class ContextSnapshotRecorder:
"""Write per-call prompt-composition artifacts for a Hermes session."""
def __init__(self, session_id: str, *, enabled: bool = False, base_dir: str | Path | None = None):
self.session_id = session_id or "session"
self.enabled = bool(enabled)
self.base_dir = Path(base_dir) if base_dir else get_hermes_home() / "reports" / "context_snapshots"
@property
def session_dir(self) -> Path:
safe_session = _SAFE_SEGMENT_RE.sub("_", self.session_id).strip("._") or "session"
return self.base_dir / safe_session
def record_call(
self,
api_call_count: int,
*,
system_prompt: str,
memory_provider_system_prompt: str = "",
memory_prefetch_raw: str = "",
memory_context_block: str = "",
api_user_message: str = "",
api_messages: list[dict[str, Any]] | None = None,
metadata: dict[str, Any] | None = None,
) -> Path | None:
if not self.enabled:
return None
call_dir = self.session_dir / f"call_{api_call_count:03d}"
call_dir.mkdir(parents=True, exist_ok=True)
self._write_text(call_dir / "system_prompt.txt", system_prompt or "")
self._write_text(call_dir / "memory_provider_system_prompt.txt", memory_provider_system_prompt or "")
self._write_text(call_dir / "memory_prefetch_raw.txt", memory_prefetch_raw or "")
self._write_text(call_dir / "memory_context_block.txt", memory_context_block or "")
self._write_text(call_dir / "api_user_message.txt", api_user_message or "")
self._write_json(call_dir / "api_messages.json", api_messages or [])
self._write_json(
call_dir / "metadata.json",
{
"session_id": self.session_id,
"api_call_count": api_call_count,
**(metadata or {}),
},
)
return call_dir
@staticmethod
def _write_text(path: Path, content: str) -> None:
path.write_text(content, encoding="utf-8")
@staticmethod
def _write_json(path: Path, payload: Any) -> None:
path.write_text(json.dumps(payload, indent=2, ensure_ascii=False), encoding="utf-8")

View File

@@ -1,546 +1,197 @@
"""Session compaction with structured fact extraction.
"""Session compaction with fact extraction.
Before compressing conversation context, extract durable facts with enough
structure to survive retrieval: source/provenance, temporal anchors,
normalized canonical keys, and contradiction groups.
Before compressing conversation context, extracts durable facts
(user preferences, corrections, project details) and saves them
to the fact store so they survive compression.
Usage:
from agent.session_compactor import extract_and_save_facts
facts = extract_and_save_facts(messages)
"""
from __future__ import annotations
import json
import logging
import re
import time
from dataclasses import dataclass, field
from datetime import datetime, timezone
from typing import Any, Dict, List, Tuple
from typing import Any, Dict, List, Optional, Tuple
logger = logging.getLogger(__name__)
_DEPLOY_METHOD_RE = re.compile(r"\bdeploy(?:ing)?\s+(?:via|through|with)\s+([A-Za-z0-9_./+-]+)", re.IGNORECASE)
_WATCHDOG_CAP_RE = re.compile(
r"\b(?:the\s+)?([A-Za-z0-9_-]+(?:\s+watchdog)?)\s+(?:caps|limits)\s+dispatches(?:\s+per\s+cycle)?\s+to\s+([0-9]+)",
re.IGNORECASE,
)
_PROVIDER_RE = re.compile(
r"\bprovider\s+(?:is|should\s+stay|should\s+be|needs\s+to\s+be)\s+([A-Za-z0-9._/-]+)",
re.IGNORECASE,
)
_MODEL_RE = re.compile(
r"\bmodel\s+(?:is|should\s+stay|should\s+be|needs\s+to\s+be)\s+([A-Za-z0-9._:/-]+)",
re.IGNORECASE,
)
_PORT_RE = re.compile(r"\bport\s+(?:is|should\s+be)\s+([0-9]+)", re.IGNORECASE)
_PROJECT_USES_RE = re.compile(r"\b(?:the\s+)?project\s+(?:uses|needs|requires)\s+(.+?)(?:[.!?]|$)", re.IGNORECASE)
_PREFERENCE_RE = re.compile(r"\bI\s+(?:prefer|like|want|need)\s+(.+?)(?:[.!?]|$)", re.IGNORECASE)
_CONSTRAINT_RE = re.compile(r"\b(?:do\s+not|don't)\s+(?:ever\s+|again\s+)?(.+?)(?:[.!?]|$)", re.IGNORECASE)
_DECISION_RE = re.compile(r"\b(?:we|the\s+team)\s+(?:decided|agreed|chose)\s+(?:to\s+)?(.+?)(?:[.!?]|$)", re.IGNORECASE)
@dataclass
class ExtractedFact:
"""A durable fact extracted from conversation."""
category: str
entity: str
content: str
confidence: float
source_turn: int
"""A fact extracted from conversation."""
category: str # "user_pref", "correction", "project", "tool_quirk", "general"
entity: str # what the fact is about
content: str # the fact itself
confidence: float # 0.0-1.0
source_turn: int # which message turn it came from
timestamp: float = 0.0
source_role: str = "user"
source_text: str = ""
normalized_content: str = ""
canonical_key: str = ""
relation: str = "general"
contradiction_group: str = ""
status: str = "active"
provenance: str = ""
observed_at: str = ""
evidence: List[Dict[str, Any]] = field(default_factory=list)
metadata: Dict[str, Any] = field(default_factory=dict)
def __post_init__(self) -> None:
if not self.timestamp:
self.timestamp = time.time()
if not self.observed_at:
self.observed_at = _iso_from_timestamp(self.timestamp)
if not self.normalized_content:
self.normalized_content = _normalize_value(self.content)
if not self.provenance:
self.provenance = f"conversation:{self.source_role}:{self.source_turn}"
if not self.canonical_key:
self.canonical_key = _canonical_key(self.entity, self.relation, self.normalized_content)
if not self.evidence:
self.evidence = [
{
"source_role": self.source_role,
"source_turn": self.source_turn,
"source_text": self.source_text or self.content,
"observed_at": self.observed_at,
"provenance": self.provenance,
}
]
self.metadata = dict(self.metadata or {})
self.metadata.setdefault("entity", self.entity)
self.metadata.setdefault("relation", self.relation)
self.metadata.setdefault("value", self.content)
self.metadata.setdefault("normalized_value", self.normalized_content)
self.metadata.setdefault("provenance", [self.provenance])
self.metadata.setdefault("evidence", list(self.evidence))
self.metadata.setdefault("observation_count", len(self.evidence))
self.metadata.setdefault("duplicate_count", max(0, self.metadata["observation_count"] - 1))
if self.contradiction_group:
self.metadata.setdefault("contradiction_group", self.contradiction_group)
self.metadata.setdefault("status", self.status)
# Patterns that indicate user preferences
_PREFERENCE_PATTERNS = [
(r"(?:I|we) (?:prefer|like|want|need) (.+?)(?:\.|$)", "preference"),
(r"(?:always|never) (?:use|do|run|deploy) (.+?)(?:\.|$)", "preference"),
(r"(?:my|our) (?:default|preferred|usual) (.+?) (?:is|are) (.+?)(?:\.|$)", "preference"),
(r"(?:make sure|ensure|remember) (?:to|that) (.+?)(?:\.|$)", "instruction"),
(r"(?:don'?t|do not) (?:ever|ever again) (.+?)(?:\.|$)", "constraint"),
]
# Patterns that indicate corrections
_CORRECTION_PATTERNS = [
(r"(?:actually|no[, ]|wait[, ]|correction[: ]|sorry[, ]) (.+)", "correction"),
(r"(?:I meant|what I meant was|the correct) (.+?)(?:\.|$)", "correction"),
(r"(?:it'?s|its) (?:not|shouldn'?t be|wrong) (.+?)(?:\.|$)", "correction"),
]
# Patterns that indicate project/tool facts
_PROJECT_PATTERNS = [
(r"(?:the |our )?(?:project|repo|codebase|code) (?:is|uses|needs|requires) (.+?)(?:\.|$)", "project"),
(r"(?:deploy|push|commit) (?:to|on) (.+?)(?:\.|$)", "project"),
(r"(?:this|that|the) (?:server|host|machine|VPS) (?:is|runs|has) (.+?)(?:\.|$)", "infrastructure"),
(r"(?:model|provider|engine) (?:is|should be|needs to be) (.+?)(?:\.|$)", "config"),
]
def extract_facts_from_messages(messages: List[Dict[str, Any]]) -> List[ExtractedFact]:
"""Extract durable facts from conversation messages.
Scans conversation turns for preferences, decisions, corrections, and
operational state. Raw candidates are normalized into canonical facts so
near-duplicates merge and contradictions remain inspectable.
Scans user messages for preferences, corrections, project facts,
and infrastructure details that should survive compression.
"""
facts = []
seen_contents = set()
raw_candidates: list[ExtractedFact] = []
for turn_idx, msg in enumerate(messages):
role = msg.get("role", "")
content = msg.get("content", "")
if role not in {"user", "assistant"}:
# Only scan user messages and assistant responses with corrections
if role not in ("user", "assistant"):
continue
if not content or not isinstance(content, str):
continue
if len(content) < 10:
continue
# Skip tool results and system messages
if role == "assistant" and msg.get("tool_calls"):
continue
if not isinstance(content, str) or len(content.strip()) < 10:
continue
timestamp, observed_at = _message_time(msg)
raw_candidates.extend(
_extract_from_text(
content.strip(),
turn_idx=turn_idx,
role=role,
timestamp=timestamp,
observed_at=observed_at,
)
)
extracted = _extract_from_text(content, turn_idx, role)
return _normalize_candidates(raw_candidates)
def evaluate_extraction_quality(messages: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Return before/after metrics for raw vs normalized extraction quality."""
raw_candidates: list[ExtractedFact] = []
for turn_idx, msg in enumerate(messages):
role = msg.get("role", "")
content = msg.get("content", "")
if role not in {"user", "assistant"}:
continue
if role == "assistant" and msg.get("tool_calls"):
continue
if not isinstance(content, str) or len(content.strip()) < 10:
continue
timestamp, observed_at = _message_time(msg)
raw_candidates.extend(
_extract_from_text(
content.strip(),
turn_idx=turn_idx,
role=role,
timestamp=timestamp,
observed_at=observed_at,
)
)
normalized = _normalize_candidates(raw_candidates)
raw_count = len(raw_candidates)
normalized_count = len(normalized)
contradiction_groups = {
fact.contradiction_group
for fact in normalized
if fact.status == "contradiction" and fact.contradiction_group
}
duplicate_count = max(0, raw_count - normalized_count)
noise_reduction = (duplicate_count / raw_count) if raw_count else 0.0
return {
"raw_candidates": raw_count,
"normalized_facts": normalized_count,
"duplicates_merged": duplicate_count,
"contradiction_groups": len(contradiction_groups),
"noise_reduction": round(noise_reduction, 3),
}
def _extract_from_text(
text: str,
*,
turn_idx: int,
role: str,
timestamp: float,
observed_at: str,
) -> List[ExtractedFact]:
"""Extract raw fact candidates from a single text block."""
facts: list[ExtractedFact] = []
if role != "user":
return facts
deploy_match = _DEPLOY_METHOD_RE.search(text)
if deploy_match:
method = deploy_match.group(1).strip()
facts.append(
_build_fact(
category="project.decision",
entity="project",
relation="workflow.deploy_method",
value=method,
content=f"Deploy via {method}",
confidence=0.88,
source_turn=turn_idx,
source_role=role,
source_text=text,
timestamp=timestamp,
observed_at=observed_at,
unique_slot=True,
)
)
watchdog_match = _WATCHDOG_CAP_RE.search(text)
if watchdog_match:
watchdog = watchdog_match.group(1).strip()
cap = watchdog_match.group(2).strip()
facts.append(
_build_fact(
category="project.operational",
entity=_normalize_entity(watchdog),
relation="fleet.dispatch_cap",
value=cap,
content=f"{watchdog} caps dispatches per cycle to {cap}",
confidence=0.92,
source_turn=turn_idx,
source_role=role,
source_text=text,
timestamp=timestamp,
observed_at=observed_at,
unique_slot=True,
)
)
provider_match = _PROVIDER_RE.search(text)
if provider_match:
provider = provider_match.group(1).strip()
facts.append(
_build_fact(
category="project.config",
entity="project",
relation="config.provider",
value=provider,
content=f"Provider should stay {provider}",
confidence=0.91,
source_turn=turn_idx,
source_role=role,
source_text=text,
timestamp=timestamp,
observed_at=observed_at,
unique_slot=True,
)
)
model_match = _MODEL_RE.search(text)
if model_match:
model = model_match.group(1).strip()
facts.append(
_build_fact(
category="project.config",
entity="project",
relation="config.model",
value=model,
content=f"Model should stay {model}",
confidence=0.9,
source_turn=turn_idx,
source_role=role,
source_text=text,
timestamp=timestamp,
observed_at=observed_at,
unique_slot=True,
)
)
port_match = _PORT_RE.search(text)
if port_match:
port = port_match.group(1).strip()
facts.append(
_build_fact(
category="project.config",
entity="project",
relation="config.port",
value=port,
content=f"Port is {port}",
confidence=0.9,
source_turn=turn_idx,
source_role=role,
source_text=text,
timestamp=timestamp,
observed_at=observed_at,
unique_slot=True,
)
)
project_match = _PROJECT_USES_RE.search(text)
if project_match:
value = project_match.group(1).strip().rstrip(".")
facts.append(
_build_fact(
category="project.stack",
entity="project",
relation="project.stack",
value=value,
content=f"Project uses {value}",
confidence=0.74,
source_turn=turn_idx,
source_role=role,
source_text=text,
timestamp=timestamp,
observed_at=observed_at,
unique_slot=False,
)
)
preference_match = _PREFERENCE_RE.search(text)
if preference_match:
value = preference_match.group(1).strip().rstrip(".")
facts.append(
_build_fact(
category="user_pref.preference",
entity="user",
relation="user.preference",
value=value,
content=value,
confidence=0.72,
source_turn=turn_idx,
source_role=role,
source_text=text,
timestamp=timestamp,
observed_at=observed_at,
unique_slot=False,
)
)
constraint_match = _CONSTRAINT_RE.search(text)
if constraint_match:
value = constraint_match.group(1).strip().rstrip(".")
facts.append(
_build_fact(
category="user_pref.constraint",
entity="user",
relation="user.constraint",
value=value,
content=f"Do not {value}",
confidence=0.82,
source_turn=turn_idx,
source_role=role,
source_text=text,
timestamp=timestamp,
observed_at=observed_at,
unique_slot=False,
)
)
decision_match = _DECISION_RE.search(text)
if decision_match:
value = decision_match.group(1).strip().rstrip(".")
facts.append(
_build_fact(
category="project.decision",
entity="project",
relation="project.decision",
value=value,
content=f"Decision: {value}",
confidence=0.79,
source_turn=turn_idx,
source_role=role,
source_text=text,
timestamp=timestamp,
observed_at=observed_at,
unique_slot=False,
)
)
# Deduplicate by content
for fact in extracted:
key = f"{fact.category}:{fact.content[:100]}"
if key not in seen_contents:
seen_contents.add(key)
facts.append(fact)
return facts
def _build_fact(
*,
category: str,
entity: str,
relation: str,
value: str,
content: str,
confidence: float,
source_turn: int,
source_role: str,
source_text: str,
timestamp: float,
observed_at: str,
unique_slot: bool,
) -> ExtractedFact:
normalized_value = _normalize_value(value.rstrip(".!?"))
value = value.rstrip(".!?")
content = content.rstrip(".!?")
provenance = f"conversation:{source_role}:{source_turn}"
contradiction_group = relation if unique_slot else ""
evidence = [
{
"source_role": source_role,
"source_turn": source_turn,
"source_text": source_text,
"observed_at": observed_at,
"provenance": provenance,
}
]
metadata = {
"entity": entity,
"relation": relation,
"value": value,
"normalized_value": normalized_value,
"provenance": [provenance],
"evidence": list(evidence),
"observation_count": 1,
"duplicate_count": 0,
"status": "active",
}
if contradiction_group:
metadata["contradiction_group"] = contradiction_group
return ExtractedFact(
category=category,
entity=entity,
content=content,
confidence=confidence,
source_turn=source_turn,
timestamp=timestamp,
source_role=source_role,
source_text=source_text,
normalized_content=normalized_value,
canonical_key=_canonical_key(entity, relation, normalized_value),
relation=relation,
contradiction_group=contradiction_group,
status="active",
provenance=provenance,
observed_at=observed_at,
evidence=evidence,
metadata=metadata,
)
def _extract_from_text(text: str, turn_idx: int, role: str) -> List[ExtractedFact]:
"""Extract facts from a single text block."""
facts = []
timestamp = time.time()
# Clean text for pattern matching
clean = text.strip()
def _normalize_candidates(candidates: List[ExtractedFact]) -> List[ExtractedFact]:
"""Merge duplicates and mark contradictions while preserving evidence."""
# User preference patterns (from user messages)
if role == "user":
for pattern, subcategory in _PREFERENCE_PATTERNS:
for match in re.finditer(pattern, clean, re.IGNORECASE):
content = match.group(1).strip() if match.lastindex else match.group(0).strip()
if len(content) > 5:
facts.append(ExtractedFact(
category=f"user_pref.{subcategory}",
entity="user",
content=content[:200],
confidence=0.7,
source_turn=turn_idx,
timestamp=timestamp,
))
by_key: dict[str, ExtractedFact] = {}
contradiction_groups: dict[str, list[ExtractedFact]] = {}
# Correction patterns (from user messages)
if role == "user":
for pattern, subcategory in _CORRECTION_PATTERNS:
for match in re.finditer(pattern, clean, re.IGNORECASE):
content = match.group(1).strip() if match.lastindex else match.group(0).strip()
if len(content) > 5:
facts.append(ExtractedFact(
category=f"correction.{subcategory}",
entity="user",
content=content[:200],
confidence=0.8,
source_turn=turn_idx,
timestamp=timestamp,
))
for candidate in candidates:
existing = by_key.get(candidate.canonical_key)
if existing is not None:
by_key[candidate.canonical_key] = _merge_fact(existing, candidate)
continue
# Project/infrastructure patterns (from both user and assistant)
for pattern, subcategory in _PROJECT_PATTERNS:
for match in re.finditer(pattern, clean, re.IGNORECASE):
content = match.group(1).strip() if match.lastindex else match.group(0).strip()
if len(content) > 5:
facts.append(ExtractedFact(
category=f"project.{subcategory}",
entity=subcategory,
content=content[:200],
confidence=0.6,
source_turn=turn_idx,
timestamp=timestamp,
))
by_key[candidate.canonical_key] = candidate
if candidate.contradiction_group:
contradiction_groups.setdefault(candidate.contradiction_group, []).append(candidate)
for group, facts in contradiction_groups.items():
canonical_keys = {fact.canonical_key for fact in facts}
if len(canonical_keys) <= 1:
continue
for fact in facts:
fact.status = "contradiction"
fact.metadata["status"] = "contradiction"
fact.metadata["contradiction_group"] = group
fact.metadata["contradiction_keys"] = sorted(canonical_keys - {fact.canonical_key})
return sorted(by_key.values(), key=lambda fact: (fact.source_turn, fact.timestamp, fact.canonical_key))
def _merge_fact(existing: ExtractedFact, incoming: ExtractedFact) -> ExtractedFact:
existing.confidence = max(existing.confidence, incoming.confidence)
existing.timestamp = min(existing.timestamp, incoming.timestamp)
existing.source_turn = min(existing.source_turn, incoming.source_turn)
if not existing.observed_at or (incoming.observed_at and incoming.observed_at < existing.observed_at):
existing.observed_at = incoming.observed_at
existing.provenance = min(existing.provenance, incoming.provenance)
provenance = _ordered_unique(existing.metadata.get("provenance", []), incoming.metadata.get("provenance", []))
evidence = _merge_evidence(existing.metadata.get("evidence", []), incoming.metadata.get("evidence", []))
observation_count = int(existing.metadata.get("observation_count", len(existing.evidence) or 1))
observation_count += int(incoming.metadata.get("observation_count", len(incoming.evidence) or 1))
existing.evidence = evidence
existing.metadata["provenance"] = provenance
existing.metadata["evidence"] = evidence
existing.metadata["observation_count"] = observation_count
existing.metadata["duplicate_count"] = max(0, observation_count - 1)
existing.metadata["status"] = existing.status
return existing
return facts
def save_facts_to_store(facts: List[ExtractedFact], fact_store_fn=None) -> int:
"""Save extracted facts to the fact store.
If a callback is supplied, prefer the structured signature but fall back to
the legacy four-argument callback for compatibility.
Args:
facts: List of extracted facts.
fact_store_fn: Optional callable(category, entity, content, trust).
If None, uses the holographic fact store if available.
Returns:
Number of facts saved.
"""
saved = 0
for fact in facts:
payload = {
"category": _store_category(fact.category),
"entity": fact.entity,
"content": fact.content,
"trust": fact.confidence,
"metadata": dict(fact.metadata),
"canonical_key": fact.canonical_key,
"observed_at": fact.observed_at,
"source_role": fact.source_role,
"source_turn": fact.source_turn,
"contradiction_group": fact.contradiction_group,
"status": fact.status,
"relation": fact.relation,
}
if fact_store_fn:
if fact_store_fn:
for fact in facts:
try:
fact_store_fn(**payload)
fact_store_fn(
category=fact.category,
entity=fact.entity,
content=fact.content,
trust=fact.confidence,
)
saved += 1
continue
except TypeError:
try:
fact_store_fn(payload["category"], payload["entity"], payload["content"], payload["trust"])
saved += 1
continue
except Exception as exc:
logger.debug("Failed to save fact via callback: %s", exc)
continue
except Exception as exc:
logger.debug("Failed to save fact via callback: %s", exc)
continue
except Exception as e:
logger.debug("Failed to save fact: %s", e)
else:
# Try holographic fact store
try:
from fact_store import fact_store as _fs
tags = ",".join(filter(None, [fact.entity, fact.relation, fact.status]))
_fs(
action="add",
content=fact.content,
category=_store_category(fact.category),
tags=tags,
trust_delta=fact.confidence - 0.5,
)
saved += 1
for fact in facts:
try:
_fs(
action="add",
content=fact.content,
category=fact.category,
tags=fact.entity,
trust_delta=fact.confidence - 0.5,
)
saved += 1
except Exception as e:
logger.debug("Failed to save fact via fact_store: %s", e)
except ImportError:
logger.debug("fact_store not available — facts not persisted")
break
except Exception as exc:
logger.debug("Failed to save fact via fact_store: %s", exc)
return saved
@@ -553,10 +204,9 @@ def extract_and_save_facts(
Returns (extracted_facts, saved_count).
"""
facts = extract_facts_from_messages(messages)
if facts:
logger.info("Extracted %d normalized facts from conversation", len(facts))
logger.info("Extracted %d facts from conversation", len(facts))
saved = save_facts_to_store(facts, fact_store_fn)
logger.info("Saved %d/%d facts to store", saved, len(facts))
else:
@@ -566,105 +216,16 @@ def extract_and_save_facts(
def format_facts_summary(facts: List[ExtractedFact]) -> str:
"""Format extracted facts as a readable summary."""
if not facts:
return "No facts extracted."
by_category: dict[str, list[ExtractedFact]] = {}
for fact in facts:
by_category.setdefault(fact.category, []).append(fact)
by_category = {}
for f in facts:
by_category.setdefault(f.category, []).append(f)
lines = [f"Extracted {len(facts)} facts:", ""]
for category, category_facts in sorted(by_category.items()):
lines.append(f" {category}:")
for fact in category_facts:
suffix = f" [{fact.status}]" if fact.status != "active" else ""
lines.append(f" - {fact.content[:80]}{suffix}")
for cat, cat_facts in sorted(by_category.items()):
lines.append(f" {cat}:")
for f in cat_facts:
lines.append(f" - {f.content[:80]}")
return "\n".join(lines)
def _store_category(category: str) -> str:
if category.startswith("user_pref"):
return "user_pref"
if category.startswith("project"):
return "project"
if category.startswith("tool"):
return "tool"
return "general"
def _message_time(msg: Dict[str, Any]) -> Tuple[float, str]:
for key in ("created_at", "timestamp", "time"):
value = msg.get(key)
if value is None:
continue
if isinstance(value, (int, float)):
ts = float(value)
return ts, _iso_from_timestamp(ts)
if isinstance(value, str):
parsed = _parse_time_string(value)
if parsed is not None:
return parsed, _iso_from_timestamp(parsed) if "T" not in value else value.replace("+00:00", "Z")
return time.time(), value
now = time.time()
return now, _iso_from_timestamp(now)
def _parse_time_string(value: str) -> float | None:
text = value.strip()
if not text:
return None
try:
return float(text)
except ValueError:
pass
try:
normalized = text[:-1] + "+00:00" if text.endswith("Z") else text
return datetime.fromisoformat(normalized).timestamp()
except ValueError:
return None
def _iso_from_timestamp(value: float) -> str:
return datetime.fromtimestamp(value, tz=timezone.utc).isoformat().replace("+00:00", "Z")
def _normalize_value(value: str) -> str:
normalized = re.sub(r"[^a-z0-9]+", " ", value.lower())
normalized = re.sub(r"\s+", " ", normalized).strip()
return normalized
def _normalize_entity(value: str) -> str:
return _normalize_value(value).replace(" ", "_") or "entity"
def _canonical_key(entity: str, relation: str, normalized_value: str) -> str:
return f"{entity}|{relation}|{normalized_value}"
def _ordered_unique(*groups: List[str]) -> List[str]:
seen: set[str] = set()
ordered: list[str] = []
for group in groups:
for item in group:
if item and item not in seen:
seen.add(item)
ordered.append(item)
return ordered
def _merge_evidence(existing: List[Dict[str, Any]], incoming: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
seen: set[tuple[str, str, str]] = set()
merged: list[dict[str, Any]] = []
for item in list(existing) + list(incoming):
key = (
str(item.get("provenance", "")),
str(item.get("observed_at", "")),
str(item.get("source_text", "")),
)
if key in seen:
continue
seen.add(key)
merged.append(dict(item))
return merged

View File

@@ -0,0 +1,132 @@
# Hindsight local eval homes for live Hermes runtime testing
Issue: #1010
Parent: #985
This document defines a reproducible, profile-scoped evaluation layout for baseline / MemPalace / Hindsight comparisons without requiring Hindsight Cloud.
## Eval home layout
Use three separate `HERMES_HOME` directories so each run has isolated config, memory, sessions, and artifacts.
```text
~/.hermes/profiles/atlas-baseline/
config.yaml
.env
MEMORY.md
USER.md
reports/context_snapshots/
~/.hermes/profiles/atlas-mempalace/
config.yaml
.env
MEMORY.md
USER.md
reports/context_snapshots/
plugins/ # if a local MemPalace plugin is installed for this eval lane
~/.hermes/profiles/atlas-hindsight/
config.yaml
.env
MEMORY.md
USER.md
hindsight/config.json
reports/context_snapshots/
```
## Hindsight local config
The Hindsight provider already loads config from `$HERMES_HOME/hindsight/config.json` first. For the local eval lane, prefer `local_embedded` so Hermes can bring up a local Hindsight daemon without cloud signup.
Example `~/.hermes/profiles/atlas-hindsight/hindsight/config.json`:
```json
{
"mode": "local_embedded",
"memory_mode": "context",
"recall_prefetch_method": "recall",
"llm_provider": "ollama",
"llm_model": "gemma3:12b",
"api_url": "http://localhost:8888"
}
```
Notes:
- `local_embedded` avoids any Hindsight Cloud dependency.
- If `profile` is omitted, Hermes now derives a stable local Hindsight profile name from the active profile identity / `HERMES_HOME` instead of collapsing all local runs into the shared legacy `hermes` profile.
- `local_external` remains valid if you already run a local Hindsight server yourself.
## Runtime switching procedure
Switch by exporting `HERMES_HOME` before launching Hermes.
### 1. Baseline
```bash
export HERMES_HOME="$HOME/.hermes/profiles/atlas-baseline"
unset HERMES_CONTEXT_SNAPSHOTS
hermes chat
```
### 2. MemPalace lane
```bash
export HERMES_HOME="$HOME/.hermes/profiles/atlas-mempalace"
export HERMES_CONTEXT_SNAPSHOTS=1
hermes chat
```
### 3. Hindsight lane
```bash
export HERMES_HOME="$HOME/.hermes/profiles/atlas-hindsight"
export HERMES_CONTEXT_SNAPSHOTS=1
hermes chat
```
## Raw artifact capture
When `HERMES_CONTEXT_SNAPSHOTS=1` is enabled, Hermes writes first-class prompt-composition artifacts under the active home by default.
Artifact tree:
```text
$HERMES_HOME/reports/context_snapshots/<session-id>/call_001/
system_prompt.txt
memory_provider_system_prompt.txt
memory_prefetch_raw.txt
memory_context_block.txt
api_user_message.txt
api_messages.json
metadata.json
```
Minimum files a benchmark should inspect:
- `system_prompt.txt`
- `memory_prefetch_raw.txt`
- `memory_context_block.txt`
- `api_user_message.txt`
- `api_messages.json`
These prove:
- what the system prompt was
- what the provider prefetched
- what entered `<memory-context>`
- what the final API user message looked like
- what full payload reached the model
## Follow-on benchmark workflow
A benchmark issue can now consume this path without redoing integration work:
1. pick one eval home (`atlas-baseline`, `atlas-mempalace`, `atlas-hindsight`)
2. export the corresponding `HERMES_HOME`
3. run Hermes on the same prompt set
4. compare the snapshot artifacts in `reports/context_snapshots/`
5. score recall quality and answer quality separately
## Why this is sovereign
- no hosted Hindsight Cloud dependency is required
- the Hindsight config is profile-scoped under `hindsight/config.json`
- the runtime artifacts stay under the active `HERMES_HOME`
- switching between baseline / MemPalace / Hindsight is just a `HERMES_HOME` swap

View File

@@ -178,6 +178,25 @@ def _load_config() -> dict:
}
def _derive_local_profile_name(agent_identity: str = "", hermes_home: str = "") -> str:
"""Return a stable profile name for local embedded Hindsight storage.
Prefer the active Hermes profile identity when available, otherwise fall back
to the basename of the active HERMES_HOME path. This prevents all local
Hindsight eval homes from sharing the legacy default profile name "hermes".
"""
from pathlib import Path
import re
raw = (agent_identity or "").strip()
if not raw and hermes_home:
raw = Path(hermes_home).name.strip()
if not raw:
raw = "hermes"
safe = re.sub(r"[^A-Za-z0-9_.-]+", "-", raw).strip(".-_")
return safe or "hermes"
# ---------------------------------------------------------------------------
# MemoryProvider implementation
# ---------------------------------------------------------------------------
@@ -468,6 +487,8 @@ class HindsightMemoryProvider(MemoryProvider):
def initialize(self, session_id: str, **kwargs) -> None:
self._session_id = session_id
hermes_home = str(kwargs.get("hermes_home") or "")
agent_identity = str(kwargs.get("agent_identity") or "")
# Check client version and auto-upgrade if needed
try:
@@ -500,6 +521,11 @@ class HindsightMemoryProvider(MemoryProvider):
# "local" is a legacy alias for "local_embedded"
if self._mode == "local":
self._mode = "local_embedded"
if self._mode == "local_embedded" and not self._config.get("profile"):
self._config["profile"] = _derive_local_profile_name(
agent_identity=agent_identity,
hermes_home=hermes_home,
)
self._api_key = self._config.get("apiKey") or self._config.get("api_key") or os.environ.get("HINDSIGHT_API_KEY", "")
default_url = _DEFAULT_LOCAL_URL if self._mode in ("local_embedded", "local_external") else _DEFAULT_API_URL
self._api_url = self._config.get("api_url") or os.environ.get("HINDSIGHT_API_URL", default_url)

View File

@@ -356,57 +356,44 @@ class HolographicMemoryProvider(MemoryProvider):
# -- Auto-extraction (on_session_end) ------------------------------------
def _auto_extract_facts(self, messages: list) -> None:
from agent.session_compactor import evaluate_extraction_quality, extract_facts_from_messages
def _store_category(category: str) -> str:
if category.startswith("user_pref"):
return "user_pref"
if category.startswith("project"):
return "project"
if category.startswith("tool"):
return "tool"
return "general"
facts = extract_facts_from_messages(messages)
if not facts:
return
_PREF_PATTERNS = [
re.compile(r'\bI\s+(?:prefer|like|love|use|want|need)\s+(.+)', re.IGNORECASE),
re.compile(r'\bmy\s+(?:favorite|preferred|default)\s+\w+\s+is\s+(.+)', re.IGNORECASE),
re.compile(r'\bI\s+(?:always|never|usually)\s+(.+)', re.IGNORECASE),
]
_DECISION_PATTERNS = [
re.compile(r'\bwe\s+(?:decided|agreed|chose)\s+(?:to\s+)?(.+)', re.IGNORECASE),
re.compile(r'\bthe\s+project\s+(?:uses|needs|requires)\s+(.+)', re.IGNORECASE),
]
extracted = 0
for fact in facts:
try:
metadata = dict(fact.metadata)
metadata.setdefault("relation", fact.relation)
metadata.setdefault("value", fact.content)
metadata.setdefault("provenance", [fact.provenance])
metadata.setdefault("evidence", list(fact.evidence))
metadata.setdefault("observation_count", len(fact.evidence))
metadata.setdefault("duplicate_count", max(0, len(fact.evidence) - 1))
self._store.add_fact(
fact.content,
category=_store_category(fact.category),
tags=",".join(filter(None, [fact.entity, fact.relation, fact.status])),
canonical_key=fact.canonical_key,
metadata=metadata,
confidence=fact.confidence,
source_role=fact.source_role,
source_turn=fact.source_turn,
observed_at=fact.observed_at,
contradiction_group=fact.contradiction_group,
status=fact.status,
)
extracted += 1
except Exception as exc:
logger.debug("Structured auto-extract failed for %s: %s", fact.canonical_key, exc)
for msg in messages:
if msg.get("role") != "user":
continue
content = msg.get("content", "")
if not isinstance(content, str) or len(content) < 10:
continue
for pattern in _PREF_PATTERNS:
if pattern.search(content):
try:
self._store.add_fact(content[:400], category="user_pref")
extracted += 1
except Exception:
pass
break
for pattern in _DECISION_PATTERNS:
if pattern.search(content):
try:
self._store.add_fact(content[:400], category="project")
extracted += 1
except Exception:
pass
break
if extracted:
metrics = evaluate_extraction_quality(messages)
logger.info(
"Auto-extracted %d structured facts from conversation (raw=%d normalized=%d contradictions=%d)",
extracted,
metrics["raw_candidates"],
metrics["normalized_facts"],
metrics["contradiction_groups"],
)
logger.info("Auto-extracted %d facts from conversation", extracted)
# ---------------------------------------------------------------------------

View File

@@ -3,7 +3,6 @@ SQLite-backed fact store with entity resolution and trust scoring.
Single-user Hermes memory store plugin.
"""
import json
import re
import sqlite3
import threading
@@ -16,24 +15,16 @@ except ImportError:
_SCHEMA = """
CREATE TABLE IF NOT EXISTS facts (
fact_id INTEGER PRIMARY KEY AUTOINCREMENT,
content TEXT NOT NULL UNIQUE,
category TEXT DEFAULT 'general',
tags TEXT DEFAULT '',
trust_score REAL DEFAULT 0.5,
retrieval_count INTEGER DEFAULT 0,
helpful_count INTEGER DEFAULT 0,
canonical_key TEXT DEFAULT '',
metadata_json TEXT DEFAULT '{}',
confidence REAL DEFAULT 0.5,
source_role TEXT DEFAULT '',
source_turn INTEGER DEFAULT -1,
observed_at TEXT DEFAULT '',
contradiction_group TEXT DEFAULT '',
status TEXT DEFAULT 'active',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
hrr_vector BLOB
fact_id INTEGER PRIMARY KEY AUTOINCREMENT,
content TEXT NOT NULL UNIQUE,
category TEXT DEFAULT 'general',
tags TEXT DEFAULT '',
trust_score REAL DEFAULT 0.5,
retrieval_count INTEGER DEFAULT 0,
helpful_count INTEGER DEFAULT 0,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
hrr_vector BLOB
);
CREATE TABLE IF NOT EXISTS entities (
@@ -50,11 +41,9 @@ CREATE TABLE IF NOT EXISTS fact_entities (
PRIMARY KEY (fact_id, entity_id)
);
CREATE INDEX IF NOT EXISTS idx_facts_trust ON facts(trust_score DESC);
CREATE INDEX IF NOT EXISTS idx_facts_category ON facts(category);
CREATE INDEX IF NOT EXISTS idx_facts_canonical_key ON facts(canonical_key);
CREATE INDEX IF NOT EXISTS idx_facts_contradiction_group ON facts(contradiction_group);
CREATE INDEX IF NOT EXISTS idx_entities_name ON entities(name);
CREATE INDEX IF NOT EXISTS idx_facts_trust ON facts(trust_score DESC);
CREATE INDEX IF NOT EXISTS idx_facts_category ON facts(category);
CREATE INDEX IF NOT EXISTS idx_entities_name ON entities(name);
CREATE VIRTUAL TABLE IF NOT EXISTS facts_fts
USING fts5(content, tags, content=facts, content_rowid=fact_id);
@@ -140,23 +129,10 @@ class MemoryStore:
"""Create tables, indexes, and triggers if they do not exist. Enable WAL mode."""
self._conn.execute("PRAGMA journal_mode=WAL")
self._conn.executescript(_SCHEMA)
# Migrate: add hrr_vector column if missing (safe for existing databases)
columns = {row[1] for row in self._conn.execute("PRAGMA table_info(facts)").fetchall()}
migrations = {
"hrr_vector": "ALTER TABLE facts ADD COLUMN hrr_vector BLOB",
"canonical_key": "ALTER TABLE facts ADD COLUMN canonical_key TEXT DEFAULT ''",
"metadata_json": "ALTER TABLE facts ADD COLUMN metadata_json TEXT DEFAULT '{}'",
"confidence": "ALTER TABLE facts ADD COLUMN confidence REAL DEFAULT 0.5",
"source_role": "ALTER TABLE facts ADD COLUMN source_role TEXT DEFAULT ''",
"source_turn": "ALTER TABLE facts ADD COLUMN source_turn INTEGER DEFAULT -1",
"observed_at": "ALTER TABLE facts ADD COLUMN observed_at TEXT DEFAULT ''",
"contradiction_group": "ALTER TABLE facts ADD COLUMN contradiction_group TEXT DEFAULT ''",
"status": "ALTER TABLE facts ADD COLUMN status TEXT DEFAULT 'active'",
}
for column, ddl in migrations.items():
if column not in columns:
self._conn.execute(ddl)
self._conn.execute("CREATE INDEX IF NOT EXISTS idx_facts_canonical_key ON facts(canonical_key)")
self._conn.execute("CREATE INDEX IF NOT EXISTS idx_facts_contradiction_group ON facts(contradiction_group)")
if "hrr_vector" not in columns:
self._conn.execute("ALTER TABLE facts ADD COLUMN hrr_vector BLOB")
self._conn.commit()
# ------------------------------------------------------------------
@@ -168,148 +144,41 @@ class MemoryStore:
content: str,
category: str = "general",
tags: str = "",
*,
canonical_key: str = "",
metadata: dict | None = None,
confidence: float | None = None,
source_role: str = "",
source_turn: int = -1,
observed_at: str = "",
contradiction_group: str = "",
status: str = "active",
) -> int:
"""Insert a fact and return its fact_id.
Exact duplicates are deduplicated by content. Near-duplicates are
normalized by canonical_key, with provenance/evidence merged into the
existing row. Contradictions sharing the same contradiction_group remain
stored as separate rows and are marked inspectably.
Deduplicates by content (UNIQUE constraint). On duplicate, returns
the existing fact_id without modifying the row. Extracts entities from
the content and links them to the fact.
"""
with self._lock:
content = content.strip()
if not content:
raise ValueError("content must not be empty")
metadata = dict(metadata or {})
canonical_key = canonical_key.strip()
contradiction_group = contradiction_group.strip()
observed_at = observed_at.strip()
status = status or "active"
trust_score = self.default_trust if confidence is None else _clamp_trust(confidence)
metadata_json = json.dumps(metadata, sort_keys=True)
if canonical_key:
existing = self._conn.execute(
"SELECT fact_id, metadata_json, trust_score, confidence, observed_at FROM facts WHERE canonical_key = ?",
(canonical_key,),
).fetchone()
if existing is not None:
merged_metadata = self._merge_metadata(existing["metadata_json"], metadata)
merged_trust = max(float(existing["trust_score"]), trust_score)
merged_observed_at = existing["observed_at"] or observed_at
if observed_at and merged_observed_at:
merged_observed_at = min(merged_observed_at, observed_at)
elif observed_at:
merged_observed_at = observed_at
self._conn.execute(
"""
UPDATE facts
SET metadata_json = ?,
trust_score = ?,
confidence = ?,
observed_at = ?,
updated_at = CURRENT_TIMESTAMP
WHERE fact_id = ?
""",
(
json.dumps(merged_metadata, sort_keys=True),
merged_trust,
max(float(existing["confidence"] or 0.0), confidence or trust_score),
merged_observed_at,
existing["fact_id"],
),
)
self._conn.commit()
return int(existing["fact_id"])
contradiction_rows = []
if contradiction_group:
contradiction_rows = self._conn.execute(
"""
SELECT fact_id, canonical_key, metadata_json
FROM facts
WHERE contradiction_group = ?
AND canonical_key != ?
""",
(contradiction_group, canonical_key),
).fetchall()
if contradiction_rows:
status = "contradiction"
metadata = dict(metadata)
metadata["status"] = "contradiction"
metadata["contradiction_group"] = contradiction_group
metadata["contradiction_keys"] = sorted(
{
canonical_key,
*[str(row["canonical_key"]) for row in contradiction_rows if row["canonical_key"]],
}
- {""}
)
metadata_json = json.dumps(metadata, sort_keys=True)
try:
cur = self._conn.execute(
"""
INSERT INTO facts (
content,
category,
tags,
trust_score,
canonical_key,
metadata_json,
confidence,
source_role,
source_turn,
observed_at,
contradiction_group,
status
)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
INSERT INTO facts (content, category, tags, trust_score)
VALUES (?, ?, ?, ?)
""",
(
content,
category,
tags,
trust_score,
canonical_key,
metadata_json,
confidence if confidence is not None else trust_score,
source_role,
source_turn,
observed_at,
contradiction_group,
status,
),
(content, category, tags, self.default_trust),
)
self._conn.commit()
fact_id: int = cur.lastrowid # type: ignore[assignment]
except sqlite3.IntegrityError:
# Duplicate content — return existing id
row = self._conn.execute(
"SELECT fact_id FROM facts WHERE content = ?", (content,)
).fetchone()
return int(row["fact_id"])
if contradiction_rows:
self._mark_contradictions(
contradiction_group=contradiction_group,
new_canonical_key=canonical_key,
existing_rows=contradiction_rows,
)
# Entity extraction and linking
for name in self._extract_entities(content):
entity_id = self._resolve_entity(name)
self._link_fact_entity(fact_id, entity_id)
# Compute HRR vector after entity linking
self._compute_hrr_vector(fact_id, content)
self._rebuild_bank(category)
@@ -342,9 +211,6 @@ class MemoryStore:
sql = f"""
SELECT f.fact_id, f.content, f.category, f.tags,
f.trust_score, f.retrieval_count, f.helpful_count,
f.canonical_key, f.metadata_json, f.confidence,
f.source_role, f.source_turn, f.observed_at,
f.contradiction_group, f.status,
f.created_at, f.updated_at
FROM facts f
JOIN facts_fts fts ON fts.rowid = f.fact_id
@@ -470,11 +336,7 @@ class MemoryStore:
sql = f"""
SELECT fact_id, content, category, tags, trust_score,
retrieval_count, helpful_count,
canonical_key, metadata_json, confidence,
source_role, source_turn, observed_at,
contradiction_group, status,
created_at, updated_at
retrieval_count, helpful_count, created_at, updated_at
FROM facts
WHERE trust_score >= ?
{category_clause}
@@ -525,89 +387,6 @@ class MemoryStore:
"helpful_count": row["helpful_count"] + helpful_increment,
}
# ------------------------------------------------------------------
# Metadata helpers
# ------------------------------------------------------------------
def _load_metadata(self, metadata_json: str | None) -> dict:
if not metadata_json:
return {}
try:
data = json.loads(metadata_json)
return data if isinstance(data, dict) else {}
except Exception:
return {}
def _merge_metadata(self, existing_json: str | None, incoming: dict | None) -> dict:
existing = self._load_metadata(existing_json)
incoming = dict(incoming or {})
merged = dict(existing)
merged.update({k: v for k, v in incoming.items() if k not in {"provenance", "evidence", "observation_count", "duplicate_count", "contradiction_keys"}})
provenance = []
seen_provenance: set[str] = set()
for item in list(existing.get("provenance", [])) + list(incoming.get("provenance", [])):
if item and item not in seen_provenance:
seen_provenance.add(item)
provenance.append(item)
evidence = []
seen_evidence: set[tuple[str, str, str]] = set()
for item in list(existing.get("evidence", [])) + list(incoming.get("evidence", [])):
if not isinstance(item, dict):
continue
key = (
str(item.get("provenance", "")),
str(item.get("observed_at", "")),
str(item.get("source_text", "")),
)
if key in seen_evidence:
continue
seen_evidence.add(key)
evidence.append(dict(item))
observation_count = int(existing.get("observation_count", max(1, len(existing.get("evidence", [])) or 1)))
observation_count += int(incoming.get("observation_count", max(1, len(incoming.get("evidence", [])) or 1)))
contradiction_keys = []
seen_keys: set[str] = set()
for item in list(existing.get("contradiction_keys", [])) + list(incoming.get("contradiction_keys", [])):
if item and item not in seen_keys:
seen_keys.add(item)
contradiction_keys.append(item)
merged["provenance"] = provenance
merged["evidence"] = evidence
merged["observation_count"] = observation_count
merged["duplicate_count"] = max(0, observation_count - 1)
if contradiction_keys:
merged["contradiction_keys"] = contradiction_keys
return merged
def _mark_contradictions(self, contradiction_group: str, new_canonical_key: str, existing_rows: list[sqlite3.Row]) -> None:
for row in existing_rows:
metadata = self._load_metadata(row["metadata_json"])
keys = []
seen: set[str] = set()
for item in list(metadata.get("contradiction_keys", [])) + [new_canonical_key]:
if item and item not in seen:
seen.add(item)
keys.append(item)
metadata["status"] = "contradiction"
metadata["contradiction_group"] = contradiction_group
metadata["contradiction_keys"] = keys
self._conn.execute(
"""
UPDATE facts
SET status = 'contradiction',
metadata_json = ?,
updated_at = CURRENT_TIMESTAMP
WHERE fact_id = ?
""",
(json.dumps(metadata, sort_keys=True), row["fact_id"]),
)
self._conn.commit()
# ------------------------------------------------------------------
# Entity helpers
# ------------------------------------------------------------------
@@ -781,14 +560,8 @@ class MemoryStore:
# ------------------------------------------------------------------
def _row_to_dict(self, row: sqlite3.Row) -> dict:
"""Convert a sqlite3.Row to a plain dict with decoded metadata."""
data = dict(row)
metadata = self._load_metadata(data.get("metadata_json"))
if metadata:
data["metadata"] = metadata
data.setdefault("relation", metadata.get("relation"))
data.pop("metadata_json", None)
return data
"""Convert a sqlite3.Row to a plain dict."""
return dict(row)
def close(self) -> None:
"""Close the database connection."""

View File

@@ -604,6 +604,8 @@ class AIAgent:
checkpoint_max_snapshots: int = 50,
pass_session_id: bool = False,
persist_session: bool = True,
context_snapshots_enabled: bool | None = None,
context_snapshots_dir: str | None = None,
):
"""
Initialize the AI Agent.
@@ -1129,6 +1131,43 @@ class AIAgent:
except Exception:
_agent_cfg = {}
def _is_enabled(value):
if isinstance(value, bool):
return value
return str(value).strip().lower() in {"1", "true", "yes", "on"}
_debug_cfg = _agent_cfg.get("debug", {}) if isinstance(_agent_cfg, dict) else {}
if not isinstance(_debug_cfg, dict):
_debug_cfg = {}
_snapshot_cfg = _debug_cfg.get("context_snapshots", {})
if not isinstance(_snapshot_cfg, dict):
_snapshot_cfg = {}
_snapshots_env = os.getenv("HERMES_CONTEXT_SNAPSHOTS")
_snapshots_dir_env = os.getenv("HERMES_CONTEXT_SNAPSHOTS_DIR")
if context_snapshots_enabled is None:
if _snapshots_env is not None:
self._context_snapshots_enabled = _is_enabled(_snapshots_env)
else:
self._context_snapshots_enabled = _is_enabled(_snapshot_cfg.get("enabled", False))
else:
self._context_snapshots_enabled = bool(context_snapshots_enabled)
self._context_snapshots_dir = (
context_snapshots_dir
or _snapshots_dir_env
or _snapshot_cfg.get("dir")
or None
)
try:
from agent.context_snapshots import ContextSnapshotRecorder
self._context_snapshot_recorder = ContextSnapshotRecorder(
session_id=self.session_id,
enabled=self._context_snapshots_enabled,
base_dir=self._context_snapshots_dir,
)
except Exception as _snapshot_err:
logger.debug("Context snapshot recorder init failed: %s", _snapshot_err)
self._context_snapshot_recorder = None
# Persistent memory (MEMORY.md + USER.md) -- loaded from disk
self._memory_store = None
self._memory_enabled = False
@@ -8144,12 +8183,17 @@ class AIAgent:
# Use original_user_message (clean input) — user_message may contain
# injected skill content that bloats / breaks provider queries.
_ext_prefetch_cache = ""
_memory_provider_prompt_cache = ""
if self._memory_manager:
try:
_query = original_user_message if isinstance(original_user_message, str) else ""
_ext_prefetch_cache = self._memory_manager.prefetch_all(_query) or ""
except Exception:
pass
try:
_memory_provider_prompt_cache = self._memory_manager.build_system_prompt() or ""
except Exception:
pass
while (api_call_count < self.max_iterations and self.iteration_budget.remaining > 0) or self._budget_grace_call:
# Reset per-turn checkpoint dedup so each iteration can take one snapshot
@@ -8217,6 +8261,8 @@ class AIAgent:
# However, providers like Moonshot AI require a separate 'reasoning_content' field
# on assistant messages with tool_calls. We handle both cases here.
api_messages = []
_current_api_user_message = ""
_current_memory_context_block = ""
for idx, msg in enumerate(messages):
api_msg = msg.copy()
@@ -8231,12 +8277,15 @@ class AIAgent:
_fenced = build_memory_context_block(_ext_prefetch_cache)
if _fenced:
_injections.append(_fenced)
_current_memory_context_block = _fenced
if _plugin_user_context:
_injections.append(_plugin_user_context)
if _injections:
_base = api_msg.get("content", "")
if isinstance(_base, str):
api_msg["content"] = _base + "\n\n" + "\n\n".join(_injections)
if isinstance(api_msg.get("content"), str):
_current_api_user_message = api_msg["content"]
# For ALL assistant messages, pass reasoning back to the API
# This ensures multi-turn reasoning context is preserved
@@ -8271,7 +8320,13 @@ class AIAgent:
from agent.privacy_filter import PrivacyFilter
pf = PrivacyFilter()
# Sanitize messages before they reach the provider
api_messages = pf.sanitize_messages(api_messages)
_pf_result = pf.sanitize_messages(api_messages)
if isinstance(_pf_result, tuple):
api_messages, _pf_report = _pf_result
if getattr(pf, "last_report", None) is None:
pf.last_report = _pf_report
else:
api_messages = _pf_result
if pf.last_report and pf.last_report.had_redactions:
logger.info(f"Privacy Filter: Redacted sensitive data from turn payload. Details: {pf.last_report.summary()}")
except Exception as e:
@@ -8342,6 +8397,27 @@ class AIAgent:
new_tcs.append(tc)
am["tool_calls"] = new_tcs
if self._context_snapshot_recorder:
try:
self._context_snapshot_recorder.record_call(
api_call_count,
system_prompt=effective_system,
memory_provider_system_prompt=_memory_provider_prompt_cache,
memory_prefetch_raw=_ext_prefetch_cache,
memory_context_block=_current_memory_context_block,
api_user_message=_current_api_user_message,
api_messages=api_messages,
metadata={
"model": self.model,
"provider": self.provider,
"platform": self.platform or "",
"api_mode": self.api_mode,
"memory_providers": [p.name for p in getattr(self._memory_manager, "providers", [])],
},
)
except Exception as _snapshot_err:
logger.debug("Context snapshot capture failed: %s", _snapshot_err)
# Calculate approximate request size for logging
total_chars = sum(len(str(msg)) for msg in api_messages)
approx_tokens = estimate_messages_tokens_rough(api_messages)

View File

@@ -0,0 +1,43 @@
from pathlib import Path
from agent.context_snapshots import ContextSnapshotRecorder
def test_disabled_recorder_writes_nothing(tmp_path):
recorder = ContextSnapshotRecorder(session_id="session-1", enabled=False, base_dir=tmp_path)
out = recorder.record_call(
1,
system_prompt="system",
api_messages=[{"role": "user", "content": "hello"}],
)
assert out is None
assert not (tmp_path / "session-1").exists()
def test_enabled_recorder_writes_expected_artifacts(tmp_path):
recorder = ContextSnapshotRecorder(session_id="session-1", enabled=True, base_dir=tmp_path)
out = recorder.record_call(
1,
system_prompt="system prompt",
memory_provider_system_prompt="# Hindsight Memory\nActive.",
memory_prefetch_raw="- remembered fact",
memory_context_block="<memory-context>\nremembered\n</memory-context>",
api_user_message="What do I prefer?\n\n<memory-context>\nremembered\n</memory-context>",
api_messages=[
{"role": "system", "content": "system prompt"},
{"role": "user", "content": "What do I prefer?"},
],
metadata={"provider": "openai", "memory_providers": ["builtin", "hindsight"]},
)
assert out == tmp_path / "session-1" / "call_001"
assert (out / "system_prompt.txt").read_text(encoding="utf-8") == "system prompt"
assert (out / "memory_provider_system_prompt.txt").read_text(encoding="utf-8").startswith("# Hindsight Memory")
assert (out / "memory_prefetch_raw.txt").read_text(encoding="utf-8") == "- remembered fact"
assert "<memory-context>" in (out / "memory_context_block.txt").read_text(encoding="utf-8")
assert "What do I prefer?" in (out / "api_user_message.txt").read_text(encoding="utf-8")
assert (out / "api_messages.json").read_text(encoding="utf-8").startswith("[")
assert '"hindsight"' in (out / "metadata.json").read_text(encoding="utf-8")

View File

@@ -1,63 +0,0 @@
{
"preferences_and_duplicates": [
{
"role": "user",
"content": "Deploy via Ansible for production changes.",
"created_at": "2026-04-22T10:00:00Z"
},
{
"role": "user",
"content": "We deploy through Ansible on this repo.",
"created_at": "2026-04-22T10:01:00Z"
},
{
"role": "user",
"content": "Gitea-first for repository work.",
"created_at": "2026-04-22T10:02:00Z"
}
],
"operational_and_contradictions": [
{
"role": "user",
"content": "The BURN watchdog caps dispatches per cycle to 6.",
"created_at": "2026-04-22T11:00:00Z"
},
{
"role": "user",
"content": "The provider should stay openai-codex/gpt-5.4.",
"created_at": "2026-04-22T11:01:00Z"
},
{
"role": "user",
"content": "Correction: the provider should stay mimo-v2-pro.",
"created_at": "2026-04-22T11:02:00Z"
}
],
"mixed_transcript": [
{
"role": "user",
"content": "Deploy via Ansible for production changes.",
"created_at": "2026-04-22T10:00:00Z"
},
{
"role": "user",
"content": "We deploy through Ansible on this repo.",
"created_at": "2026-04-22T10:01:00Z"
},
{
"role": "user",
"content": "The BURN watchdog caps dispatches per cycle to 6.",
"created_at": "2026-04-22T11:00:00Z"
},
{
"role": "user",
"content": "The provider should stay openai-codex/gpt-5.4.",
"created_at": "2026-04-22T11:01:00Z"
},
{
"role": "user",
"content": "Correction: the provider should stay mimo-v2-pro.",
"created_at": "2026-04-22T11:02:00Z"
}
]
}

View File

@@ -596,3 +596,26 @@ class TestAvailability:
monkeypatch.setenv("HINDSIGHT_MODE", "local")
p = HindsightMemoryProvider()
assert p.is_available()
def test_local_embedded_profile_defaults_to_agent_identity(self, tmp_path, monkeypatch):
config_path = tmp_path / "hindsight" / "config.json"
config_path.parent.mkdir(parents=True, exist_ok=True)
config_path.write_text(json.dumps({
"mode": "local_embedded",
"llm_provider": "ollama",
"llm_model": "gemma3:12b",
}))
monkeypatch.setattr(
"plugins.memory.hindsight.get_hermes_home",
lambda: tmp_path,
)
p = HindsightMemoryProvider()
p.initialize(
session_id="test-session",
hermes_home=str(tmp_path / "profiles" / "atlas-hindsight"),
platform="cli",
agent_identity="atlas-hindsight",
)
assert p._config["profile"] == "atlas-hindsight"

View File

@@ -1,50 +0,0 @@
"""Integration tests for holographic auto-extraction with structured fact persistence."""
import json
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).resolve().parents[3]))
from plugins.memory.holographic import HolographicMemoryProvider
_FIXTURE_PATH = Path(__file__).resolve().parents[2] / "fixtures" / "memory_extraction_fragments.json"
def _load_fixture(name: str):
return json.loads(_FIXTURE_PATH.read_text())[name]
class TestHolographicAutoExtract:
def test_auto_extract_persists_structured_metadata_and_normalizes_duplicates(self, tmp_path):
provider = HolographicMemoryProvider(
config={
"db_path": str(tmp_path / "memory_store.db"),
"auto_extract": True,
"default_trust": 0.5,
}
)
provider.initialize("test-session")
messages = _load_fixture("mixed_transcript")
provider.on_session_end(messages)
provider.on_session_end(messages)
facts = provider._store.list_facts(min_trust=0.0, limit=20)
deploy_facts = [f for f in facts if f.get("relation") == "workflow.deploy_method"]
provider_facts = [f for f in facts if f.get("contradiction_group") == "config.provider"]
assert len(deploy_facts) == 1
assert deploy_facts[0]["metadata"]["duplicate_count"] >= 3
assert deploy_facts[0]["observed_at"] == "2026-04-22T10:00:00Z"
assert deploy_facts[0]["metadata"]["provenance"] == [
"conversation:user:0",
"conversation:user:1",
]
assert len(provider_facts) == 2
assert {f["status"] for f in provider_facts} == {"contradiction"}
assert {f["metadata"]["value"] for f in provider_facts} == {
"openai-codex/gpt-5.4",
"mimo-v2-pro",
}

View File

@@ -0,0 +1,94 @@
from pathlib import Path
from types import SimpleNamespace
from unittest.mock import MagicMock, patch
import importlib
import sys
import types
def _make_tool_defs(*names: str) -> list:
return [
{
"type": "function",
"function": {
"name": n,
"description": f"{n} tool",
"parameters": {"type": "object", "properties": {}},
},
}
for n in names
]
def _mock_response(content="Done", finish_reason="stop"):
msg = SimpleNamespace(content=content, tool_calls=None)
choice = SimpleNamespace(message=msg, finish_reason=finish_reason)
return SimpleNamespace(choices=[choice], usage=SimpleNamespace(prompt_tokens=1, completion_tokens=1, total_tokens=2))
def _load_ai_agent():
sys.modules.setdefault("agent.auxiliary_client", types.SimpleNamespace(call_llm=lambda *a, **k: ""))
run_agent = importlib.import_module("run_agent")
return run_agent.AIAgent
def test_run_conversation_writes_context_snapshot_artifacts(tmp_path):
AIAgent = _load_ai_agent()
class _FakePrivacyFilter:
def __init__(self):
self.last_report = None
def sanitize_messages(self, messages):
return list(messages)
with (
patch("run_agent.get_tool_definitions", return_value=_make_tool_defs("web_search")),
patch("run_agent.check_toolset_requirements", return_value={}),
patch("run_agent.OpenAI"),
patch("hermes_cli.plugins.invoke_hook", return_value=[]),
patch.dict(sys.modules, {"agent.privacy_filter": types.SimpleNamespace(PrivacyFilter=_FakePrivacyFilter)}),
):
agent = AIAgent(
api_key="test-key-1234567890",
base_url="https://example.com/v1",
quiet_mode=True,
skip_context_files=True,
skip_memory=True,
context_snapshots_enabled=True,
context_snapshots_dir=str(tmp_path),
)
agent.client = MagicMock()
agent.client.chat.completions.create.return_value = _mock_response(content="Done")
agent._build_system_prompt = MagicMock(return_value="Core system prompt")
agent._memory_manager = MagicMock()
agent._memory_manager.prefetch_all.return_value = "- remembered preference"
agent._memory_manager.build_system_prompt.return_value = "# Hindsight Memory\nActive."
agent._memory_manager.providers = [
SimpleNamespace(name="builtin"),
SimpleNamespace(name="hindsight"),
]
result = agent.run_conversation("What do I prefer?")
assert result["final_response"] == "Done"
call_dir = tmp_path / agent.session_id / "call_001"
assert call_dir.exists()
assert (call_dir / "system_prompt.txt").read_text(encoding="utf-8") == "Core system prompt"
assert (call_dir / "memory_provider_system_prompt.txt").read_text(encoding="utf-8").startswith("# Hindsight Memory")
assert (call_dir / "memory_prefetch_raw.txt").read_text(encoding="utf-8") == "- remembered preference"
assert "<memory-context>" in (call_dir / "memory_context_block.txt").read_text(encoding="utf-8")
api_user_message = (call_dir / "api_user_message.txt").read_text(encoding="utf-8")
assert "What do I prefer?" in api_user_message
assert "remembered preference" in api_user_message
api_messages = (call_dir / "api_messages.json").read_text(encoding="utf-8")
assert '"role": "system"' in api_messages
assert '"role": "user"' in api_messages
metadata = (call_dir / "metadata.json").read_text(encoding="utf-8")
assert '"hindsight"' in metadata

View File

@@ -0,0 +1,22 @@
from pathlib import Path
ROOT = Path(__file__).resolve().parents[1]
DOC = ROOT / "docs" / "hindsight-local-eval.md"
def test_hindsight_local_eval_doc_exists_and_covers_switching():
assert DOC.exists(), "missing Hindsight local eval doc"
text = DOC.read_text(encoding="utf-8")
for snippet in (
"atlas-baseline",
"atlas-mempalace",
"atlas-hindsight",
"HERMES_HOME",
"HERMES_CONTEXT_SNAPSHOTS",
"memory_prefetch_raw.txt",
"api_user_message.txt",
"local_embedded",
"hindsight/config.json",
):
assert snippet in text

View File

@@ -1,6 +1,6 @@
"""Tests for session compaction with fact extraction."""
import json
import pytest
import sys
from pathlib import Path
@@ -8,19 +8,12 @@ sys.path.insert(0, str(Path(__file__).resolve().parent.parent))
from agent.session_compactor import (
ExtractedFact,
evaluate_extraction_quality,
extract_and_save_facts,
extract_facts_from_messages,
format_facts_summary,
save_facts_to_store,
extract_and_save_facts,
format_facts_summary,
)
_FIXTURE_PATH = Path(__file__).resolve().parent / "fixtures" / "memory_extraction_fragments.json"
def _load_fixture(name: str):
return json.loads(_FIXTURE_PATH.read_text())[name]
class TestFactExtraction:
def test_extract_preference(self):
@@ -67,48 +60,14 @@ class TestFactExtraction:
{"role": "user", "content": "I prefer Python."},
]
facts = extract_facts_from_messages(messages)
# Should deduplicate
python_facts = [f for f in facts if "Python" in f.content]
assert len(python_facts) == 1
def test_structured_fact_preserves_provenance_and_temporal_metadata(self):
facts = extract_facts_from_messages(_load_fixture("preferences_and_duplicates"))
deploy_fact = next(f for f in facts if f.relation == "workflow.deploy_method")
assert deploy_fact.source_role == "user"
assert deploy_fact.source_turn == 0
assert deploy_fact.observed_at == "2026-04-22T10:00:00Z"
assert deploy_fact.provenance == "conversation:user:0"
assert deploy_fact.canonical_key
assert deploy_fact.evidence
assert deploy_fact.evidence[0]["source_text"].startswith("Deploy via Ansible")
def test_near_duplicate_facts_are_normalized_into_one_canonical_fact(self):
facts = extract_facts_from_messages(_load_fixture("preferences_and_duplicates"))
deploy_facts = [f for f in facts if f.relation == "workflow.deploy_method"]
assert len(deploy_facts) == 1
assert len(deploy_facts[0].evidence) == 2
assert deploy_facts[0].metadata["duplicate_count"] == 1
def test_contradictory_facts_are_preserved_for_unique_slots(self):
facts = extract_facts_from_messages(_load_fixture("operational_and_contradictions"))
provider_facts = [f for f in facts if f.contradiction_group == "config.provider"]
assert len(provider_facts) == 2
assert {f.status for f in provider_facts} == {"contradiction"}
assert {f.normalized_content for f in provider_facts} == {
"openai codex gpt 5 4",
"mimo v2 pro",
}
def test_quality_evaluation_reports_noise_reduction(self):
metrics = evaluate_extraction_quality(_load_fixture("mixed_transcript"))
assert metrics["raw_candidates"] > metrics["normalized_facts"]
assert metrics["noise_reduction"] > 0
assert metrics["contradiction_groups"] == 1
class TestSaveFacts:
def test_save_with_callback(self):
saved = []
def mock_save(category, entity, content, trust):
saved.append({"category": category, "content": content})
@@ -117,38 +76,6 @@ class TestSaveFacts:
assert count == 1
assert len(saved) == 1
def test_save_with_extended_callback_metadata(self):
saved = []
def mock_save(category, entity, content, trust, **kwargs):
saved.append({
"category": category,
"entity": entity,
"content": content,
"trust": trust,
**kwargs,
})
fact = ExtractedFact(
"project.operational",
"watchdog",
"BURN watchdog caps dispatches per cycle to 6",
0.9,
2,
source_role="user",
observed_at="2026-04-22T11:00:00Z",
provenance="conversation:user:2",
canonical_key="project.operational|watchdog|dispatch_cap|6",
relation="fleet.dispatch_cap",
contradiction_group="fleet.dispatch_cap",
metadata={"duplicate_count": 0},
)
count = save_facts_to_store([fact], fact_store_fn=mock_save)
assert count == 1
assert saved[0]["canonical_key"] == fact.canonical_key
assert saved[0]["observed_at"] == "2026-04-22T11:00:00Z"
assert saved[0]["metadata"]["duplicate_count"] == 0
class TestFormatSummary:
def test_empty(self):