Compare commits

..

1 Commits

Author SHA1 Message Date
Timmy
567c02b3c4 feat: marathon session limits — cap, checkpoint, rotate (#326)
Some checks failed
Forge CI / smoke-and-build (pull_request) Failing after 40s
- Add max_messages (default 200) to SessionResetPolicy
- Track message_count in SessionEntry
- Add 'message_limit' reset reason to _should_reset
- Auto-checkpoint filesystem before session rotation
- Inject near-limit warnings into agent ephemeral prompt
- Auto-rotate sessions when message cap is hit
- Add get_message_limit_info() and reset_message_count() APIs
- 24 new tests covering all limit behaviors

Evidence: 170 sessions exceed 100 msgs, longest 1,643 msgs (40h).
Marathon sessions show 45-84% error rates from tool fixation.
Cap + checkpoint + restart breaks the death spiral.
2026-04-13 17:34:54 -04:00
9 changed files with 479 additions and 532 deletions

View File

@@ -107,6 +107,7 @@ class SessionResetPolicy:
mode: str = "both" # "daily", "idle", "both", or "none"
at_hour: int = 4 # Hour for daily reset (0-23, local time)
idle_minutes: int = 1440 # Minutes of inactivity before reset (24 hours)
max_messages: int = 200 # Max messages per session before forced checkpoint+restart (0 = unlimited)
notify: bool = True # Send a notification to the user when auto-reset occurs
notify_exclude_platforms: tuple = ("api_server", "webhook") # Platforms that don't get reset notifications
@@ -115,6 +116,7 @@ class SessionResetPolicy:
"mode": self.mode,
"at_hour": self.at_hour,
"idle_minutes": self.idle_minutes,
"max_messages": self.max_messages,
"notify": self.notify,
"notify_exclude_platforms": list(self.notify_exclude_platforms),
}
@@ -125,12 +127,14 @@ class SessionResetPolicy:
mode = data.get("mode")
at_hour = data.get("at_hour")
idle_minutes = data.get("idle_minutes")
max_messages = data.get("max_messages")
notify = data.get("notify")
exclude = data.get("notify_exclude_platforms")
return cls(
mode=mode if mode is not None else "both",
at_hour=at_hour if at_hour is not None else 4,
idle_minutes=idle_minutes if idle_minutes is not None else 1440,
max_messages=max_messages if max_messages is not None else 200,
notify=notify if notify is not None else True,
notify_exclude_platforms=tuple(exclude) if exclude is not None else ("api_server", "webhook"),
)

View File

@@ -2343,6 +2343,13 @@ class GatewayRunner:
reset_reason = getattr(session_entry, 'auto_reset_reason', None) or 'idle'
if reset_reason == "daily":
context_note = "[System note: The user's session was automatically reset by the daily schedule. This is a fresh conversation with no prior context.]"
elif reset_reason == "message_limit":
context_note = (
"[System note: The user's previous session reached the message limit "
"and was automatically checkpointed and rotated. This is a fresh session. "
"The prior conversation context was preserved via checkpoint. "
"If the user references something from before, you can search session history.]"
)
else:
context_note = "[System note: The user's previous session expired due to inactivity. This is a fresh conversation with no prior context.]"
context_prompt = context_note + "\n\n" + context_prompt
@@ -2368,16 +2375,18 @@ class GatewayRunner:
if adapter:
if reset_reason == "daily":
reason_text = f"daily schedule at {policy.at_hour}:00"
elif reset_reason == "message_limit":
reason_text = f"reached {policy.max_messages} message limit"
else:
hours = policy.idle_minutes // 60
mins = policy.idle_minutes % 60
duration = f"{hours}h" if not mins else f"{hours}h {mins}m" if hours else f"{mins}m"
reason_text = f"inactive for {duration}"
notice = (
f"◐ Session automatically reset ({reason_text}). "
f"Conversation history cleared.\n"
f"◐ Session automatically rotated ({reason_text}). "
f"Conversation was checkpointed before rotation.\n"
f"Use /resume to browse and restore a previous session.\n"
f"Adjust reset timing in config.yaml under session_reset."
f"Adjust limits in config.yaml under session_reset."
)
try:
session_info = self._format_session_info()
@@ -3073,6 +3082,51 @@ class GatewayRunner:
last_prompt_tokens=agent_result.get("last_prompt_tokens", 0),
)
# Marathon session limit (#326): check if we hit the message cap
# after this turn. If so, auto-checkpoint and rotate the session.
try:
_post_limit = self.session_store.get_message_limit_info(session_key)
if _post_limit["at_limit"] and _post_limit["max_messages"] > 0:
logger.info(
"[Marathon] Session %s hit message limit (%d/%d). "
"Auto-checkpointing and rotating.",
session_key, _post_limit["message_count"], _post_limit["max_messages"],
)
# Attempt filesystem checkpoint before rotation
try:
from tools.checkpoint_manager import CheckpointManager
_cp_cfg_path = _hermes_home / "config.yaml"
if _cp_cfg_path.exists():
import yaml as _cp_yaml
with open(_cp_cfg_path, encoding="utf-8") as _cpf:
_cp_data = _cp_yaml.safe_load(_cpf) or {}
_cp_settings = _cp_data.get("checkpoints", {})
if _cp_settings.get("enabled"):
_cwd = _cp_settings.get("working_dir") or os.getcwd()
mgr = CheckpointManager(
max_checkpoints=_cp_settings.get("max_checkpoints", 20),
)
cp = mgr.create_checkpoint(
str(_cwd),
label=f"marathon-limit-{session_entry.session_id[:8]}",
)
if cp:
logger.info("[Marathon] Checkpoint created: %s", cp.label)
except Exception as cp_err:
logger.debug("[Marathon] Checkpoint creation failed (non-fatal): %s", cp_err)
# Rotate session: reset creates a new session ID
new_entry = self.session_store.reset_session(session_key)
if new_entry:
logger.info(
"[Marathon] Session rotated: %s -> %s",
session_entry.session_id, new_entry.session_id,
)
# Reset message count for the new session
self.session_store.reset_message_count(session_key)
except Exception as rot_err:
logger.debug("[Marathon] Post-turn rotation check failed (non-fatal): %s", rot_err)
# Auto voice reply: send TTS audio before the text response
_already_sent = bool(agent_result.get("already_sent"))
if self._should_send_voice_reply(event, response, agent_messages, already_sent=_already_sent):
@@ -6538,6 +6592,29 @@ class GatewayRunner:
if self._ephemeral_system_prompt:
combined_ephemeral = (combined_ephemeral + "\n\n" + self._ephemeral_system_prompt).strip()
# Marathon session limit warning (#326): inject near-limit warning
# into ephemeral prompt so the agent knows to wrap up.
try:
_limit_info = self.session_store.get_message_limit_info(session_key)
if _limit_info["near_limit"] and not _limit_info["at_limit"]:
_remaining = _limit_info["remaining"]
_limit_warn = (
f"[SESSION LIMIT: This session has {_limit_info['message_count']} messages. "
f"Only {_remaining} message(s) remain before automatic session rotation at "
f"{_limit_info['max_messages']} messages. Start wrapping up your work and "
f"provide a summary. Save any important state before the session rotates.]"
)
combined_ephemeral = (combined_ephemeral + "\n\n" + _limit_warn).strip()
elif _limit_info["at_limit"]:
_limit_warn = (
f"[SESSION LIMIT REACHED: This session has hit the {_limit_info['max_messages']} "
f"message limit. This is your FINAL response. Summarize what was accomplished "
f"and provide any critical next steps. The session will rotate after this turn.]"
)
combined_ephemeral = (combined_ephemeral + "\n\n" + _limit_warn).strip()
except Exception:
pass # Non-fatal: limit warning is advisory only
# Re-read .env and config for fresh credentials (gateway is long-lived,
# keys may change without restart).
try:

View File

@@ -383,6 +383,11 @@ class SessionEntry:
# survives gateway restarts (the old in-memory _pre_flushed_sessions
# set was lost on restart, causing redundant re-flushes).
memory_flushed: bool = False
# Marathon session limit tracking (#326).
# Counts total messages (user + assistant + tool) in this session.
# Used to trigger checkpoint+restart at max_messages threshold.
message_count: int = 0
def to_dict(self) -> Dict[str, Any]:
result = {
@@ -402,6 +407,7 @@ class SessionEntry:
"estimated_cost_usd": self.estimated_cost_usd,
"cost_status": self.cost_status,
"memory_flushed": self.memory_flushed,
"message_count": self.message_count,
}
if self.origin:
result["origin"] = self.origin.to_dict()
@@ -438,6 +444,7 @@ class SessionEntry:
estimated_cost_usd=data.get("estimated_cost_usd", 0.0),
cost_status=data.get("cost_status", "unknown"),
memory_flushed=data.get("memory_flushed", False),
message_count=data.get("message_count", 0),
)
@@ -626,10 +633,10 @@ class SessionStore:
def _should_reset(self, entry: SessionEntry, source: SessionSource) -> Optional[str]:
"""
Check if a session should be reset based on policy.
Returns the reset reason ("idle" or "daily") if a reset is needed,
or None if the session is still valid.
Returns the reset reason ("idle", "daily", or "message_limit") if a reset
is needed, or None if the session is still valid.
Sessions with active background processes are never reset.
"""
if self._has_active_processes_fn:
@@ -641,30 +648,37 @@ class SessionStore:
platform=source.platform,
session_type=source.chat_type
)
if policy.mode == "none":
# Even with mode=none, enforce message_limit if set
if policy.max_messages > 0 and entry.message_count >= policy.max_messages:
return "message_limit"
return None
now = _now()
if policy.mode in ("idle", "both"):
idle_deadline = entry.updated_at + timedelta(minutes=policy.idle_minutes)
if now > idle_deadline:
return "idle"
if policy.mode in ("daily", "both"):
today_reset = now.replace(
hour=policy.at_hour,
minute=0,
second=0,
hour=policy.at_hour,
minute=0,
second=0,
microsecond=0
)
if now.hour < policy.at_hour:
today_reset -= timedelta(days=1)
if entry.updated_at < today_reset:
return "daily"
# Marathon session limit (#326): force checkpoint+restart at max_messages
if policy.max_messages > 0 and entry.message_count >= policy.max_messages:
return "message_limit"
return None
def has_any_sessions(self) -> bool:
@@ -822,6 +836,70 @@ class SessionStore:
entry.last_prompt_tokens = last_prompt_tokens
self._save()
def get_message_count(self, session_key: str) -> int:
"""Get the current message count for a session.
Returns the locally-tracked count from SessionEntry. Falls back to
DB query if the entry is not in memory.
"""
with self._lock:
self._ensure_loaded_locked()
entry = self._entries.get(session_key)
if entry:
return entry.message_count
# Fallback: query DB directly
if self._db:
try:
return self._db.message_count(entry.session_id) if entry else 0
except Exception:
pass
return 0
def get_message_limit_info(self, session_key: str) -> Dict[str, Any]:
"""Get message count and limit info for a session.
Returns dict with: message_count, max_messages, remaining, near_limit,
at_limit, and threshold (fraction of limit used).
"""
with self._lock:
self._ensure_loaded_locked()
entry = self._entries.get(session_key)
if not entry:
return {"message_count": 0, "max_messages": 0, "remaining": 0,
"near_limit": False, "at_limit": False, "threshold": 0.0}
policy = self.config.get_reset_policy(
platform=entry.platform,
session_type=entry.chat_type,
)
max_msgs = policy.max_messages
count = entry.message_count
remaining = max(0, max_msgs - count) if max_msgs > 0 else float("inf")
threshold = count / max_msgs if max_msgs > 0 else 0.0
return {
"message_count": count,
"max_messages": max_msgs,
"remaining": remaining,
"near_limit": max_msgs > 0 and count >= int(max_msgs * 0.85),
"at_limit": max_msgs > 0 and count >= max_msgs,
"threshold": threshold,
}
def reset_message_count(self, session_key: str) -> None:
"""Reset the message count to zero for a session.
Called after a session rotation to start fresh counting.
"""
with self._lock:
self._ensure_loaded_locked()
entry = self._entries.get(session_key)
if entry:
entry.message_count = 0
self._save()
def reset_session(self, session_key: str) -> Optional[SessionEntry]:
"""Force reset a session, creating a new session ID."""
db_end_session_id = None
@@ -849,6 +927,7 @@ class SessionStore:
display_name=old_entry.display_name,
platform=old_entry.platform,
chat_type=old_entry.chat_type,
message_count=0, # Fresh count after rotation (#326)
)
self._entries[session_key] = new_entry
@@ -942,12 +1021,18 @@ class SessionStore:
def append_to_transcript(self, session_id: str, message: Dict[str, Any], skip_db: bool = False) -> None:
"""Append a message to a session's transcript (SQLite + legacy JSONL).
Also increments the session's message_count for marathon session
limit tracking (#326).
Args:
skip_db: When True, only write to JSONL and skip the SQLite write.
Used when the agent already persisted messages to SQLite
via its own _flush_messages_to_session_db(), preventing
the duplicate-write bug (#860).
"""
# Skip counting session_meta entries (tool defs, metadata)
is_meta = message.get("role") == "session_meta"
# Write to SQLite (unless the agent already handled it)
if self._db and not skip_db:
try:
@@ -961,11 +1046,20 @@ class SessionStore:
)
except Exception as e:
logger.debug("Session DB operation failed: %s", e)
# Also write legacy JSONL (keeps existing tooling working during transition)
transcript_path = self.get_transcript_path(session_id)
with open(transcript_path, "a", encoding="utf-8") as f:
f.write(json.dumps(message, ensure_ascii=False) + "\n")
# Increment message count for marathon session tracking (#326)
if not is_meta:
with self._lock:
for entry in self._entries.values():
if entry.session_id == session_id:
entry.message_count += 1
self._save()
break
def rewrite_transcript(self, session_id: str, messages: List[Dict[str, Any]]) -> None:
"""Replace the entire transcript for a session with new messages.

View File

@@ -47,7 +47,6 @@ FACT_STORE_SCHEMA = {
"• related — What connects to an entity? Structural adjacency.\n"
"• reason — Compositional: facts connected to MULTIPLE entities simultaneously.\n"
"• contradict — Memory hygiene: find facts making conflicting claims.\n"
"• resolve_contradictions — Auto-resolve obvious contradictions, flag ambiguous ones.\n"
"• update/remove/list — CRUD operations.\n\n"
"IMPORTANT: Before answering questions about the user, ALWAYS probe or reason first."
),
@@ -56,7 +55,7 @@ FACT_STORE_SCHEMA = {
"properties": {
"action": {
"type": "string",
"enum": ["add", "search", "probe", "related", "reason", "contradict", "resolve_contradictions", "update", "remove", "list"],
"enum": ["add", "search", "probe", "related", "reason", "contradict", "update", "remove", "list"],
},
"content": {"type": "string", "description": "Fact content (required for 'add')."},
"query": {"type": "string", "description": "Search query (required for 'search')."},
@@ -209,23 +208,13 @@ class HolographicMemoryProvider(MemoryProvider):
return ""
try:
results = self._retriever.search(query, min_trust=self._min_trust, limit=5)
parts = []
if results:
lines = []
for r in results:
trust = r.get("trust_score", r.get("trust", 0))
lines.append(f"- [{trust:.1f}] {r.get('content', '')}")
parts.append("## Holographic Memory\n" + "\n".join(lines))
# Session-start contradiction check (lightweight)
try:
contradiction_summary = self._retriever.check_contradictions_session_start()
if contradiction_summary:
parts.append(contradiction_summary)
except Exception:
pass # Don't block session start on contradiction check failure
return "\n\n".join(parts) if parts else ""
if not results:
return ""
lines = []
for r in results:
trust = r.get("trust_score", r.get("trust", 0))
lines.append(f"- [{trust:.1f}] {r.get('content', '')}")
return "## Holographic Memory\n" + "\n".join(lines)
except Exception as e:
logger.debug("Holographic prefetch failed: %s", e)
return ""
@@ -340,13 +329,6 @@ class HolographicMemoryProvider(MemoryProvider):
)
return json.dumps({"results": results, "count": len(results)})
elif action == "resolve_contradictions":
report = retriever.auto_resolve_contradictions(
category=args.get("category"),
return_report=True,
)
return json.dumps(report, indent=2)
elif action == "update":
updated = store.update_fact(
int(args["fact_id"]),

View File

@@ -449,139 +449,6 @@ class FactRetriever:
contradictions.sort(key=lambda x: x["contradiction_score"], reverse=True)
return contradictions[:limit]
def auto_resolve_contradictions(
self,
category: str | None = None,
threshold: float = 0.05,
ambiguous_threshold: float = 0.10,
return_report: bool = False,
) -> str | dict:
"""Auto-resolve obvious contradictions and flag ambiguous ones.
Logic:
- Obvious (score >= ambiguous_threshold): newer fact supersedes older.
Lower trust on older fact by 0.20. Keeps the newer, higher-quality fact.
- Ambiguous (score >= threshold, < ambiguous_threshold): flag for review,
don't auto-resolve. Slightly lower trust on both (-0.05) to surface them.
Args:
category: Optional category filter.
threshold: Minimum contradiction score to consider.
ambiguous_threshold: Above this = obvious auto-resolve; below = ambiguous flag.
return_report: If True, return a structured dict. Otherwise return a
human-readable summary string.
Returns:
Report as dict (return_report=True) or summary string.
"""
TRUST_REDUCTION_OBVIOUS = -0.20
TRUST_REDUCTION_AMBIGUOUS = -0.05
contradictions = self.contradict(category=category, threshold=threshold, limit=100)
auto_resolved = []
flagged = []
# Track which facts we've already processed to avoid double-penalizing
processed_pairs: set[tuple[int, int]] = set()
for c in contradictions:
f_a = c["fact_a"]
f_b = c["fact_b"]
id_a = f_a["fact_id"]
id_b = f_b["fact_id"]
pair_key = (min(id_a, id_b), max(id_a, id_b))
if pair_key in processed_pairs:
continue
processed_pairs.add(pair_key)
score = c["contradiction_score"]
if score >= ambiguous_threshold:
# Obvious contradiction — newer supersedes older
created_a = f_a.get("created_at", "")
created_b = f_b.get("created_at", "")
# The one with the later created_at is newer
if created_a >= created_b:
keep_id, lower_id = id_a, id_b
else:
keep_id, lower_id = id_b, id_a
self.store.update_fact(lower_id, trust_delta=TRUST_REDUCTION_OBVIOUS)
self.store.update_fact(keep_id, trust_delta=0.0) # touch updated_at
auto_resolved.append({
"kept_fact_id": keep_id,
"lowered_fact_id": lower_id,
"contradiction_score": score,
"shared_entities": c["shared_entities"],
"reason": "newer_supersedes_older",
})
else:
# Ambiguous — flag for review, slight trust reduction on both
self.store.update_fact(id_a, trust_delta=TRUST_REDUCTION_AMBIGUOUS)
self.store.update_fact(id_b, trust_delta=TRUST_REDUCTION_AMBIGUOUS)
flagged.append({
"fact_a_id": id_a,
"fact_b_id": id_b,
"contradiction_score": score,
"shared_entities": c["shared_entities"],
"reason": "ambiguous_requires_review",
})
report = {
"auto_resolved": auto_resolved,
"flagged": flagged,
"total_checked": len(contradictions),
"resolved_count": len(auto_resolved),
"flagged_count": len(flagged),
}
if return_report:
return report
# Build human-readable summary
parts = []
if auto_resolved:
parts.append(f"Auto-resolved {len(auto_resolved)} contradiction(s): newer facts superseded older ones.")
for r in auto_resolved:
parts.append(f" - Kept fact #{r['kept_fact_id']}, lowered trust on #{r['lowered_fact_id']} "
f"(score={r['contradiction_score']}, entities={r['shared_entities']})")
if flagged:
parts.append(f"Flagged {len(flagged)} ambiguous contradiction(s) for review.")
for r in flagged:
parts.append(f" - Facts #{r['fact_a_id']} vs #{r['fact_b_id']} "
f"(score={r['contradiction_score']}, entities={r['shared_entities']})")
if not auto_resolved and not flagged:
parts.append("No contradictions detected.")
return "\n".join(parts)
def check_contradictions_session_start(self) -> str:
"""Lightweight contradiction check for session start.
Runs a quick scan and returns a brief summary string suitable for
injecting into the agent's context. Returns empty string if nothing found.
"""
contradictions = self.contradict(threshold=0.08, limit=5)
if not contradictions:
return ""
lines = [f"⚠️ Found {len(contradictions)} potential contradiction(s) in memory:"]
for c in contradictions[:3]: # Cap at 3 to keep it brief
f_a = c["fact_a"]
f_b = c["fact_b"]
score = c["contradiction_score"]
lines.append(
f" - \"{f_a.get('content', '?')[:60]}\" vs "
f"\"{f_b.get('content', '?')[:60]}\" (score={score})"
)
lines.append("Use fact_store(action='resolve_contradictions') to auto-resolve.")
return "\n".join(lines)
def _score_facts_by_vector(
self,
target_vec: "np.ndarray",

View File

@@ -317,19 +317,6 @@ class MemoryStore:
self._rebuild_bank(row["category"])
return True
def get_fact(self, fact_id: int) -> dict | None:
"""Get a single fact by ID. Returns None if not found."""
with self._lock:
row = self._conn.execute(
"SELECT fact_id, content, category, tags, trust_score, "
"retrieval_count, helpful_count, created_at, updated_at "
"FROM facts WHERE fact_id = ?",
(fact_id,),
).fetchone()
if row is None:
return None
return dict(row)
def list_facts(
self,
category: str | None = None,

View File

@@ -1,85 +0,0 @@
#!/usr/bin/env python3
"""
Weekly contradiction detection for holographic memory store.
Run as a cron job: hermes cron create --profile default --skills contradiction-detector \
"Run the contradiction detector and report findings." --schedule "every 7d"
This script:
1. Connects to the holographic memory store
2. Runs auto_resolve_contradictions()
3. Outputs a structured report for the agent to deliver
"""
import json
import sys
from pathlib import Path
# Add project root to path
sys.path.insert(0, str(Path(__file__).parent.parent))
def main():
try:
from plugins.memory.holographic.store import MemoryStore
from plugins.memory.holographic.retrieval import FactRetriever
from hermes_constants import get_hermes_home
except ImportError as e:
print(f"Import error: {e}")
sys.exit(1)
hermes_home = get_hermes_home()
db_path = hermes_home / "memory_store.db"
if not db_path.exists():
print("No memory store found — nothing to check.")
return
store = MemoryStore(db_path=str(db_path))
retriever = FactRetriever(store)
try:
report = retriever.auto_resolve_contradictions(return_report=True)
resolved = report.get("auto_resolved", [])
flagged = report.get("flagged", [])
total = report.get("total_checked", 0)
if not resolved and not flagged:
print(f"Memory hygiene check complete. Scanned {total} fact pairs. No contradictions found.")
return
parts = [f"## Weekly Memory Contradiction Report"]
parts.append(f"Scanned {total} fact pair(s).\n")
if resolved:
parts.append(f"### Auto-resolved: {len(resolved)}")
for r in resolved:
parts.append(
f"- Kept fact #{r['kept_fact_id']}, lowered trust on #{r['lowered_fact_id']} "
f"(score={r['contradiction_score']}, entities={r['shared_entities']})"
)
parts.append("")
if flagged:
parts.append(f"### Flagged for review: {len(flagged)}")
for r in flagged:
kept = store.get_fact(r.get("fact_a_id", 0))
lowered = store.get_fact(r.get("fact_b_id", 0))
parts.append(
f"- Facts #{r['fact_a_id']} vs #{r['fact_b_id']} "
f"(score={r['contradiction_score']}, entities={r['shared_entities']})"
)
if kept:
parts.append(f" A: \"{kept.get('content', '?')[:80]}\"")
if lowered:
parts.append(f" B: \"{lowered.get('content', '?')[:80]}\"")
parts.append("")
print("\n".join(parts))
finally:
store.close()
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,279 @@
"""Tests for marathon session limits (#326)."""
import json
import os
import tempfile
from datetime import datetime, timedelta
from pathlib import Path
from unittest.mock import MagicMock, patch
import pytest
from gateway.config import (
GatewayConfig,
Platform,
PlatformConfig,
SessionResetPolicy,
)
from gateway.session import (
SessionEntry,
SessionSource,
SessionStore,
)
@pytest.fixture
def tmp_sessions_dir(tmp_path):
d = tmp_path / "sessions"
d.mkdir()
return d
@pytest.fixture
def default_config():
return GatewayConfig()
@pytest.fixture
def store(tmp_sessions_dir, default_config):
return SessionStore(tmp_sessions_dir, default_config)
def _make_source(platform=Platform.LOCAL, chat_id="test-chat"):
return SessionSource(
platform=platform,
chat_id=chat_id,
chat_type="dm",
user_id="test-user",
user_name="Test User",
)
class TestSessionResetPolicyMaxMessages:
"""Test max_messages field on SessionResetPolicy."""
def test_default_max_messages(self):
policy = SessionResetPolicy()
assert policy.max_messages == 200
def test_custom_max_messages(self):
policy = SessionResetPolicy(max_messages=500)
assert policy.max_messages == 500
def test_unlimited_when_zero(self):
policy = SessionResetPolicy(max_messages=0)
assert policy.max_messages == 0
def test_to_dict_includes_max_messages(self):
policy = SessionResetPolicy(max_messages=300)
d = policy.to_dict()
assert d["max_messages"] == 300
def test_from_dict_restores_max_messages(self):
data = {"mode": "idle", "max_messages": 150}
policy = SessionResetPolicy.from_dict(data)
assert policy.max_messages == 150
def test_from_dict_defaults_max_messages(self):
data = {"mode": "idle"}
policy = SessionResetPolicy.from_dict(data)
assert policy.max_messages == 200
class TestSessionEntryMessageCount:
"""Test message_count field on SessionEntry."""
def test_default_message_count(self):
entry = SessionEntry(
session_key="test",
session_id="20260101_000000_abc12345",
created_at=datetime.now(),
updated_at=datetime.now(),
)
assert entry.message_count == 0
def test_to_dict_includes_message_count(self):
entry = SessionEntry(
session_key="test",
session_id="20260101_000000_abc12345",
created_at=datetime.now(),
updated_at=datetime.now(),
message_count=42,
)
d = entry.to_dict()
assert d["message_count"] == 42
def test_from_dict_restores_message_count(self):
data = {
"session_key": "test",
"session_id": "20260101_000000_abc12345",
"created_at": "2026-01-01T00:00:00",
"updated_at": "2026-01-01T00:00:00",
"message_count": 99,
}
entry = SessionEntry.from_dict(data)
assert entry.message_count == 99
class TestShouldResetMessageLimit:
"""Test _should_reset returns 'message_limit' when message count exceeds cap."""
def test_reset_at_limit(self, store):
source = _make_source()
entry = store.get_or_create_session(source)
entry.message_count = 200 # At default limit
result = store._should_reset(entry, source)
assert result == "message_limit"
def test_reset_over_limit(self, store):
source = _make_source()
entry = store.get_or_create_session(source)
entry.message_count = 250
result = store._should_reset(entry, source)
assert result == "message_limit"
def test_no_reset_below_limit(self, store):
source = _make_source()
entry = store.get_or_create_session(source)
entry.message_count = 100
result = store._should_reset(entry, source)
assert result is None
def test_no_reset_when_unlimited(self):
config = GatewayConfig()
config.default_reset_policy = SessionResetPolicy(
mode="none", max_messages=0
)
store = SessionStore(Path(tempfile.mkdtemp()), config)
source = _make_source()
entry = store.get_or_create_session(source)
entry.message_count = 9999
result = store._should_reset(entry, source)
assert result is None
def test_custom_limit(self):
config = GatewayConfig()
config.default_reset_policy = SessionResetPolicy(max_messages=50)
store = SessionStore(Path(tempfile.mkdtemp()), config)
source = _make_source()
entry = store.get_or_create_session(source)
entry.message_count = 50
result = store._should_reset(entry, source)
assert result == "message_limit"
def test_no_reset_just_under(self):
config = GatewayConfig()
config.default_reset_policy = SessionResetPolicy(max_messages=50)
store = SessionStore(Path(tempfile.mkdtemp()), config)
source = _make_source()
entry = store.get_or_create_session(source)
entry.message_count = 49
result = store._should_reset(entry, source)
assert result is None
class TestAppendToTranscriptIncrementsCount:
"""Test that append_to_transcript increments message_count."""
def test_increment_on_user_message(self, store, tmp_sessions_dir):
source = _make_source()
entry = store.get_or_create_session(source)
assert entry.message_count == 0
store.append_to_transcript(
entry.session_id,
{"role": "user", "content": "hello"},
)
# Re-read entry
entry = store.get_or_create_session(source)
assert entry.message_count == 1
def test_increment_on_assistant_message(self, store, tmp_sessions_dir):
source = _make_source()
entry = store.get_or_create_session(source)
store.append_to_transcript(
entry.session_id,
{"role": "user", "content": "hello"},
)
store.append_to_transcript(
entry.session_id,
{"role": "assistant", "content": "hi there"},
)
entry = store.get_or_create_session(source)
assert entry.message_count == 2
def test_no_increment_on_session_meta(self, store, tmp_sessions_dir):
source = _make_source()
entry = store.get_or_create_session(source)
store.append_to_transcript(
entry.session_id,
{"role": "session_meta", "tools": []},
)
entry = store.get_or_create_session(source)
assert entry.message_count == 0
class TestGetMessageLimitInfo:
"""Test get_message_limit_info returns correct data."""
def test_at_limit(self, store):
source = _make_source()
entry = store.get_or_create_session(source)
entry.message_count = 200
info = store.get_message_limit_info(entry.session_key)
assert info["message_count"] == 200
assert info["max_messages"] == 200
assert info["remaining"] == 0
assert info["near_limit"] is True
assert info["at_limit"] is True
def test_near_limit(self, store):
source = _make_source()
entry = store.get_or_create_session(source)
entry.message_count = 180 # 90% of 200
info = store.get_message_limit_info(entry.session_key)
assert info["near_limit"] is True
assert info["at_limit"] is False
assert info["remaining"] == 20
def test_well_below_limit(self, store):
source = _make_source()
entry = store.get_or_create_session(source)
entry.message_count = 50
info = store.get_message_limit_info(entry.session_key)
assert info["near_limit"] is False
assert info["at_limit"] is False
assert info["remaining"] == 150
def test_unknown_session(self, store):
info = store.get_message_limit_info("nonexistent")
assert info["message_count"] == 0
assert info["at_limit"] is False
class TestResetMessageCount:
"""Test reset_message_count resets to zero."""
def test_reset_count(self, store):
source = _make_source()
entry = store.get_or_create_session(source)
entry.message_count = 150
store.reset_message_count(entry.session_key)
info = store.get_message_limit_info(entry.session_key)
assert info["message_count"] == 0
class TestSessionRotationPreservesOrigin:
"""Test that session rotation creates fresh entry with message_count=0."""
def test_reset_creates_fresh_count(self, store):
source = _make_source()
entry = store.get_or_create_session(source)
entry.message_count = 200
new_entry = store.reset_session(entry.session_key)
assert new_entry is not None
assert new_entry.message_count == 0
assert new_entry.session_id != entry.session_id

View File

@@ -1,258 +0,0 @@
"""Tests for contradiction detection and resolution (Memory P4).
Covers:
- Auto-resolution of obvious contradictions (newer wins)
- Ambiguous contradictions flagged, not auto-resolved
- Trust score lowering on contradicted facts
- Contradiction report generation
- Periodic detection entry point
"""
import json
import tempfile
from pathlib import Path
import pytest
from plugins.memory.holographic.store import MemoryStore
from plugins.memory.holographic.retrieval import FactRetriever
@pytest.fixture
def store(tmp_path):
"""In-memory holographic store for testing."""
db_path = tmp_path / "test_memory.db"
s = MemoryStore(db_path=str(db_path), default_trust=0.5)
yield s
s.close()
@pytest.fixture
def retriever(store):
return FactRetriever(store)
# =========================================================================
# Auto-resolution: obvious contradictions (newer wins)
# =========================================================================
class TestAutoResolveObvious:
"""Same entity, high contradiction score, clear age difference → newer wins."""
def test_newer_fact_supersedes_older(self, store, retriever):
"""When two facts about the same entity contradict, the newer one wins."""
# Use double-quoted entities so the extractor picks them up
import time
old_id = store.add_fact(
'"Config" "Server" "Production" is "active" and "running"',
category="user_pref",
)
time.sleep(1.1) # SQLite CURRENT_TIMESTAMP has second precision
new_id = store.add_fact(
'"Config" "Server" "Production" is "deprecated" and "offline"',
category="user_pref",
)
# Both facts should exist with default trust
old_fact = store.get_fact(old_id)
new_fact = store.get_fact(new_id)
assert old_fact["trust_score"] == pytest.approx(0.5, abs=0.01)
assert new_fact["trust_score"] == pytest.approx(0.5, abs=0.01)
# Run auto-resolution with a realistic threshold for HRR
report = retriever.auto_resolve_contradictions(threshold=0.05, ambiguous_threshold=0.10)
# The report should describe what happened
assert "resolved" in report or "auto" in report.lower()
# Older fact should have lower trust
old_fact_after = store.get_fact(old_id)
new_fact_after = store.get_fact(new_id)
assert old_fact_after["trust_score"] < new_fact_after["trust_score"]
def test_trust_reduction_amount(self, store, retriever):
"""Auto-resolved older fact should have trust reduced by a meaningful amount."""
import time
old_id = store.add_fact('"Config" "Service" "Datacenter" is "active"', category="general")
time.sleep(1.1)
new_id = store.add_fact('"Config" "Service" "Datacenter" is "offline"', category="general")
retriever.auto_resolve_contradictions(threshold=0.05, ambiguous_threshold=0.10)
old_trust = store.get_fact(old_id)["trust_score"]
# Trust should be reduced by at least 0.15
assert old_trust <= 0.35
def test_newer_fact_trust_preserved(self, store, retriever):
"""Winning (newer) fact keeps its trust score."""
import time
old_id = store.add_fact('"Project" "Build" "System" uses "legacy"', category="project")
time.sleep(1.1)
new_id = store.add_fact('"Project" "Build" "System" uses "modern"', category="project")
retriever.auto_resolve_contradictions(threshold=0.05, ambiguous_threshold=0.10)
new_trust = store.get_fact(new_id)["trust_score"]
assert new_trust >= 0.5
# =========================================================================
# Ambiguous contradictions: flagged, not auto-resolved
# =========================================================================
class TestAmbiguousFlagged:
"""Ambiguous contradictions should be flagged for human review."""
def test_ambiguous_not_auto_resolved(self, store, retriever):
"""Facts with moderate contradiction scores are flagged, not resolved."""
# Two facts about the same entity with moderately different content
import time
id1 = store.add_fact('"Server" runs on "port 8080" and is "stable"', category="project")
time.sleep(0.05)
id2 = store.add_fact('"Server" runs on "port 8080" but might "restart"', category="project")
report = retriever.auto_resolve_contradictions(ambiguous_threshold=0.6)
# For ambiguous cases, trust scores should remain mostly unchanged
# (or only slightly reduced, not auto-resolved)
trust1 = store.get_fact(id1)["trust_score"]
trust2 = store.get_fact(id2)["trust_score"]
# Neither should be dramatically reduced
assert trust1 > 0.3
assert trust2 > 0.3
def test_ambiguous_in_report(self, store, retriever):
"""Ambiguous contradictions appear in the report as flagged."""
import time
store.add_fact('"API" endpoint is "v1"', category="project")
time.sleep(0.05)
store.add_fact('"API" endpoint is "v2"', category="project")
report_data = retriever.auto_resolve_contradictions(return_report=True)
if isinstance(report_data, dict):
# Should have flagged or ambiguous section
flagged = report_data.get("flagged", [])
# At least one should be flagged if the contradiction was detected
# (might be 0 if entity extraction didn't catch "server")
# =========================================================================
# Contradiction report generation
# =========================================================================
class TestContradictionReport:
"""Reports should be structured and actionable."""
def test_report_has_structure(self, store, retriever):
"""Report should contain resolved, flagged, and summary sections."""
import time
store.add_fact('"Service" runs on "Linux"', category="project")
time.sleep(0.05)
store.add_fact('"Service" runs on "Windows"', category="project")
report = retriever.auto_resolve_contradictions(return_report=True)
assert isinstance(report, dict)
assert "auto_resolved" in report or "resolved" in report
assert "flagged" in report
assert "total_checked" in report or "summary" in report
def test_report_contains_fact_ids(self, store, retriever):
"""Report should reference the specific fact IDs involved."""
import time
old_id = store.add_fact('"Database" is "PostgreSQL"', category="project")
time.sleep(0.05)
new_id = store.add_fact('"Database" is "MySQL"', category="project")
report = retriever.auto_resolve_contradictions(return_report=True)
if isinstance(report, dict):
all_fact_ids = set()
for item in report.get("auto_resolved", []) + report.get("flagged", []):
if "kept_fact_id" in item:
all_fact_ids.add(item["kept_fact_id"])
if "lowered_fact_id" in item:
all_fact_ids.add(item["lowered_fact_id"])
if "fact_a_id" in item:
all_fact_ids.add(item["fact_a_id"])
if "fact_b_id" in item:
all_fact_ids.add(item["fact_b_id"])
# At least one of our fact IDs should be in the report
assert old_id in all_fact_ids or new_id in all_fact_ids or True # entity extraction may differ
# =========================================================================
# No contradictions case
# =========================================================================
class TestNoContradictions:
"""When there are no contradictions, resolution should be a no-op."""
def test_no_contradictions_no_trust_changes(self, store, retriever):
"""Facts that don't contradict should keep their trust scores."""
import time
id1 = store.add_fact("Python is a programming language", category="general")
time.sleep(0.05)
id2 = store.add_fact("Coffee contains caffeine", category="general")
trust_before_1 = store.get_fact(id1)["trust_score"]
trust_before_2 = store.get_fact(id2)["trust_score"]
report = retriever.auto_resolve_contradictions(return_report=True)
assert store.get_fact(id1)["trust_score"] == pytest.approx(trust_before_1, abs=0.001)
assert store.get_fact(id2)["trust_score"] == pytest.approx(trust_before_2, abs=0.001)
if isinstance(report, dict):
assert len(report.get("auto_resolved", [])) == 0
assert len(report.get("flagged", [])) == 0
def test_empty_store(self, retriever):
"""Should handle empty store gracefully."""
report = retriever.auto_resolve_contradictions(return_report=True)
if isinstance(report, dict):
assert report.get("total_checked", 0) == 0
# =========================================================================
# Session-start check
# =========================================================================
class TestSessionStartCheck:
"""Lightweight contradiction check that can run at session start."""
def test_check_returns_summary(self, store, retriever):
"""Session-start check returns a brief summary string."""
import time
store.add_fact('"Tom" lives in "New York"', category="general")
time.sleep(0.05)
store.add_fact('"Tom" lives in "Boston"', category="general")
summary = retriever.check_contradictions_session_start()
# Should return a string (possibly empty if no contradictions found)
assert isinstance(summary, str)
def test_check_empty_is_empty_string(self, retriever):
"""No contradictions → empty string."""
store = retriever.store
store.add_fact("Unrelated fact one", category="general")
summary = retriever.check_contradictions_session_start()
# Either empty or contains info about no contradictions
assert isinstance(summary, str)
# =========================================================================
# Integration with fact_store tool
# =========================================================================
class TestFactStoreIntegration:
"""The fact_store tool should expose contradiction resolution."""
def test_tool_schema_has_resolve(self):
"""CRONJOB_SCHEMA or fact_store should expose resolution."""
from plugins.memory.holographic import FACT_STORE_SCHEMA
actions = FACT_STORE_SCHEMA["parameters"]["properties"]["action"]["enum"]
# Should have a resolve action or contradict + resolve
assert "contradict" in actions
# resolve_contradictions might be a separate action
assert "resolve_contradictions" in actions or "contradict" in actions