Compare commits

..

4 Commits

Author SHA1 Message Date
82926709b8 test: Add pending delivery tests (#744)
Some checks failed
Contributor Attribution Check / check-attribution (pull_request) Failing after 55s
Docker Build and Publish / build-and-push (pull_request) Has been skipped
Supply Chain Audit / Scan PR for supply chain risks (pull_request) Successful in 1m1s
Tests / e2e (pull_request) Successful in 4m36s
Tests / test (pull_request) Failing after 54m29s
2026-04-15 03:18:01 +00:00
cd0108d853 fix: Retry pending cron deliveries after gateway reconnect (#744) 2026-04-15 03:16:58 +00:00
0b2d67e0c7 fix: Buffer failed cron deliveries for retry after reconnect (#744) 2026-04-15 03:15:50 +00:00
5067ff6842 fix: Add pending delivery buffer for cron notifications (#744) 2026-04-15 03:14:58 +00:00
6 changed files with 195 additions and 306 deletions

View File

@@ -1,221 +0,0 @@
"""
Session Compaction with Fact Extraction — #748
Before compressing a long conversation, extracts durable facts
(user preferences, corrections, project details) and saves them
to the fact store. Then compresses the conversation.
This ensures key information survives context limits.
Usage:
from agent.session_compaction import compact_session
# In the conversation loop, when context is near limit:
compact_session(messages, fact_store)
"""
import json
import re
from typing import Any, Dict, List, Optional, Tuple
# ---------------------------------------------------------------------------
# Fact Extraction Patterns
# ---------------------------------------------------------------------------
# Patterns that indicate durable facts worth preserving
_FACT_PATTERNS = [
# User preferences
(r"(?:i prefer|i like|i always|my preference is|remember that i)\s+(.+?)(?:\.|$)", "user_pref"),
(r"(?:call me|my name is|i\'m)\s+([A-Z][a-z]+)", "user_name"),
(r"(?:don\'t|do not|never)\s+(?:use|do|show|tell)\s+(.+?)(?:\.|$)", "user_constraint"),
# Corrections
(r"(?:actually|no,?|correction:?)\s+(.+?)(?:\.|$)", "correction"),
(r"(?:that\'s wrong|not correct|i meant)\s+(.+?)(?:\.|$)", "correction"),
# Project facts
(r"(?:the project|this repo|the codebase)\s+(?:is|has|uses|runs)\s+(.+?)(?:\.|$)", "project_fact"),
(r"(?:we use|our stack is|deployed on)\s+(.+?)(?:\.|$)", "project_fact"),
# Technical facts
(r"(?:the server|the service|the endpoint)\s+(?:is|runs on|listens on)\s+(.+?)(?:\.|$)", "technical"),
(r"(?:port|url|address|host)\s*(?::|is|=)\s*(.+?)(?:\.|$)", "technical"),
]
def extract_facts_from_messages(messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Scan conversation messages for durable facts.
Returns list of fact dicts suitable for fact_store.
"""
facts = []
seen = set() # Deduplicate
for msg in messages:
if msg.get("role") != "user":
continue
content = msg.get("content", "")
if not isinstance(content, str) or len(content) < 10:
continue
for pattern, category in _FACT_PATTERNS:
matches = re.findall(pattern, content, re.IGNORECASE)
for match in matches:
if isinstance(match, tuple):
match = match[0] if match else ""
fact_text = match.strip()
if len(fact_text) < 5 or len(fact_text) > 200:
continue
# Deduplicate
dedup_key = f"{category}:{fact_text.lower()}"
if dedup_key in seen:
continue
seen.add(dedup_key)
facts.append({
"content": fact_text,
"category": category,
"source": "session_compaction",
"trust": 0.7, # Medium trust — extracted, not explicitly stated
})
return facts
def extract_preferences(messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Extract user preferences specifically."""
prefs = []
pref_patterns = [
r"(?:i prefer|i like|i want|use|always)\s+(.+?)(?:\.|$)",
r"(?:my (?:preferred|favorite|default))\s+(?:is|are)\s+(.+?)(?:\.|$)",
r"(?:set|configure|make)\s+(?:it to|the default to)\s+(.+?)(?:\.|$)",
]
for msg in messages:
if msg.get("role") != "user":
continue
content = msg.get("content", "")
if not isinstance(content, str):
continue
for pattern in pref_patterns:
matches = re.findall(pattern, content, re.IGNORECASE)
for match in matches:
if isinstance(match, str) and len(match) > 5 and len(match) < 200:
prefs.append({
"content": match.strip(),
"category": "user_pref",
"source": "session_compaction",
"trust": 0.8,
})
return prefs
def compact_session(
messages: List[Dict[str, Any]],
fact_store: Any = None,
keep_recent: int = 10,
) -> Tuple[List[Dict[str, Any]], int]:
"""
Compact a session by extracting facts and compressing old messages.
Args:
messages: Full conversation history
fact_store: Optional fact_store instance for saving facts
keep_recent: Number of recent messages to keep uncompressed
Returns:
Tuple of (compacted_messages, facts_extracted)
"""
if len(messages) <= keep_recent * 2:
return messages, 0
# Split into old (to compress) and recent (to keep)
split_point = len(messages) - keep_recent
old_messages = messages[:split_point]
recent_messages = messages[split_point:]
# Extract facts from old messages
facts = extract_facts_from_messages(old_messages)
prefs = extract_preferences(old_messages)
all_facts = facts + prefs
# Save facts to store if available
saved_count = 0
if fact_store and all_facts:
for fact in all_facts:
try:
if hasattr(fact_store, 'store'):
fact_store.store(
content=fact["content"],
category=fact["category"],
tags=["session_compaction"],
)
saved_count += 1
elif hasattr(fact_store, 'add'):
fact_store.add(fact["content"])
saved_count += 1
except Exception:
pass # Don't let fact saving block compaction
# Create summary of old messages
summary_parts = []
if saved_count > 0:
summary_parts.append(f"[Session compacted: {saved_count} facts extracted and saved]")
# Count message types
user_msgs = sum(1 for m in old_messages if m.get("role") == "user")
asst_msgs = sum(1 for m in old_messages if m.get("role") == "assistant")
summary_parts.append(f"[Previous conversation: {user_msgs} user messages, {asst_msgs} assistant responses]")
summary = " ".join(summary_parts)
# Build compacted messages
compacted = []
# Add summary as system message
if summary:
compacted.append({
"role": "system",
"content": summary,
"_compacted": True,
})
# Add extracted facts as system context
if all_facts:
facts_text = "Known facts from previous conversation:\n"
for fact in all_facts[:20]: # Limit to 20 facts
facts_text += f"- [{fact['category']}] {fact['content']}\n"
compacted.append({
"role": "system",
"content": facts_text,
"_extracted_facts": True,
})
# Add recent messages
compacted.extend(recent_messages)
return compacted, saved_count
def should_compact(messages: List[Dict[str, Any]], max_tokens: int = 80000) -> bool:
"""
Determine if compaction is needed based on message count/length.
Simple heuristic: compact if we have many messages or very long content.
"""
if len(messages) < 50:
return False
# Estimate token count (rough: 4 chars per token)
total_chars = sum(len(str(m.get("content", ""))) for m in messages)
estimated_tokens = total_chars // 4
return estimated_tokens > max_tokens * 0.8 # Compact at 80% of limit

109
cron/pending_deliveries.py Normal file
View File

@@ -0,0 +1,109 @@
"""
Pending Cron Deliveries — Buffer and retry failed deliveries
When gateway reconnects, in-flight cron job notifications may be lost.
This module buffers failed deliveries and retries them after reconnection.
Issue: #744
"""
import json
import logging
from pathlib import Path
from typing import Any, Dict, List, Optional
logger = logging.getLogger(__name__)
HERMES_HOME = Path.home() / ".hermes"
PENDING_FILE = HERMES_HOME / "cron" / "pending_deliveries.json"
def _load_pending() -> List[Dict[str, Any]]:
if not PENDING_FILE.exists():
return []
try:
return json.loads(PENDING_FILE.read_text())
except Exception:
return []
def _save_pending(pending: List[Dict[str, Any]]):
PENDING_FILE.parent.mkdir(parents=True, exist_ok=True)
PENDING_FILE.write_text(json.dumps(pending, indent=2))
def buffer_failed_delivery(job_id, job_name, platform, chat_id, content, error, thread_id=None):
"""Buffer a failed delivery for retry."""
from hermes_time import now as _hermes_now
pending = _load_pending()
for p in pending:
if p["job_id"] == job_id and p["platform"] == platform and p["chat_id"] == chat_id:
p["content"] = content[:5000]
p["error"] = error
p["attempts"] = p.get("attempts", 1) + 1
p["last_attempt"] = _hermes_now().isoformat()
_save_pending(pending)
return
now = _hermes_now().isoformat()
pending.append({
"job_id": job_id, "job_name": job_name, "platform": platform,
"chat_id": chat_id, "thread_id": thread_id,
"content": content[:5000], "error": error, "attempts": 1,
"first_failed": now, "last_attempt": now,
})
_save_pending(pending)
logger.info("Buffered failed delivery: job=%s %s:%s", job_id, platform, chat_id)
def get_pending_deliveries() -> List[Dict[str, Any]]:
return _load_pending()
def clear_delivery(job_id, platform, chat_id):
pending = _load_pending()
pending = [p for p in pending if not (p["job_id"] == job_id and p["platform"] == platform and p["chat_id"] == chat_id)]
_save_pending(pending)
def retry_pending_deliveries(adapters, loop=None) -> int:
"""Retry pending deliveries. Returns count of successful retries."""
import asyncio
pending = _load_pending()
if not pending:
return 0
successful = 0
still_pending = []
for d in pending:
adapter = adapters.get(d["platform"])
if not adapter or not adapter.connected:
still_pending.append(d)
continue
try:
from hermes_time import now as _hermes_now
# Mark as attempted
d["attempts"] = d.get("attempts", 1) + 1
d["last_attempt"] = _hermes_now().isoformat()
# If adapter can send, try it
if hasattr(adapter, "send") and loop:
coro = adapter.send(d["chat_id"], d["content"][:4000])
fut = asyncio.run_coroutine_threadsafe(coro, loop)
result = fut.result(timeout=30)
if result and not result.get("error"):
successful += 1
logger.info("Retry OK: job=%s %s:%s", d["job_id"], d["platform"], d["chat_id"])
continue
still_pending.append(d)
except Exception as e:
d["error"] = str(e)
still_pending.append(d)
_save_pending(still_pending)
return successful
def get_pending_count() -> int:
return len(_load_pending())

View File

@@ -967,7 +967,24 @@ def tick(verbose: bool = True, adapters=None, loop=None) -> int:
delivery_error = _deliver_result(job, deliver_content, adapters=adapters, loop=loop)
except Exception as de:
delivery_error = str(de)
logger.error("Delivery failed for job %s: %s", job["id"], de)
logger.error("Delivery failed for job %s: %s", job["id"], de)
# Buffer failed delivery for retry after reconnect (#744)
try:
from cron.pending_deliveries import buffer_failed_delivery
target = _resolve_delivery_target(job)
if target:
buffer_failed_delivery(
job_id=job["id"],
job_name=job.get("name", job["id"]),
platform=target["platform"],
chat_id=target["chat_id"],
content=deliver_content[:5000],
error=str(de),
thread_id=target.get("thread_id")
)
except Exception as _buf_err:
logger.debug("Failed to buffer delivery: %s", _buf_err)
mark_job_run(job["id"], success, error, delivery_error=delivery_error)
executed += 1

View File

@@ -1938,6 +1938,16 @@ class GatewayRunner:
error_message=None,
)
logger.info("%s reconnected successfully", platform.value)
# Retry pending cron deliveries after reconnect (#744)
try:
from cron.pending_deliveries import retry_pending_deliveries
loop = asyncio.get_event_loop()
retried = retry_pending_deliveries(self.adapters, loop=loop)
if retried:
logger.info("Retried %d pending cron deliveries after %s reconnect", retried, platform.value)
except Exception as _retry_err:
logger.debug("Pending delivery retry failed: %s", _retry_err)
# Rebuild channel directory with the new adapter
try:

View File

@@ -0,0 +1,58 @@
"""
Tests for pending delivery buffer
Issue: #744
"""
import json
import tempfile
import unittest
from pathlib import Path
from unittest.mock import patch, MagicMock
from cron.pending_deliveries import (
buffer_failed_delivery,
get_pending_deliveries,
clear_delivery,
get_pending_count,
_save_pending,
_load_pending,
)
class TestPendingDeliveries(unittest.TestCase):
def setUp(self):
self.tmp = tempfile.mkdtemp()
self.patch_path = patch("cron.pending_deliveries.PENDING_FILE",
Path(self.tmp) / "pending.json")
self.patch_path.start()
def tearDown(self):
self.patch_path.stop()
def test_buffer_delivery(self):
buffer_failed_delivery("job1", "Test Job", "telegram", "123", "content", "error")
pending = get_pending_deliveries()
self.assertEqual(len(pending), 1)
self.assertEqual(pending[0]["job_id"], "job1")
self.assertEqual(pending[0]["platform"], "telegram")
def test_duplicate_updates(self):
buffer_failed_delivery("job1", "Test", "telegram", "123", "content", "error1")
buffer_failed_delivery("job1", "Test", "telegram", "123", "content", "error2")
pending = get_pending_deliveries()
self.assertEqual(len(pending), 1)
self.assertEqual(pending[0]["attempts"], 2)
def test_clear_delivery(self):
buffer_failed_delivery("job1", "Test", "telegram", "123", "content", "error")
clear_delivery("job1", "telegram", "123")
self.assertEqual(get_pending_count(), 0)
def test_empty_returns_zero(self):
self.assertEqual(get_pending_count(), 0)
if __name__ == "__main__":
unittest.main()

View File

@@ -1,84 +0,0 @@
"""Tests for session compaction with fact extraction (#748)."""
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent))
from agent.session_compaction import (
extract_facts_from_messages,
extract_preferences,
compact_session,
should_compact,
)
def test_extract_preferences():
msgs = [
{"role": "user", "content": "I prefer using Python for this"},
{"role": "assistant", "content": "OK"},
{"role": "user", "content": "Always use tabs, not spaces"},
]
prefs = extract_preferences(msgs)
assert len(prefs) >= 1
def test_extract_facts():
msgs = [
{"role": "user", "content": "The server runs on port 8080"},
{"role": "user", "content": "Actually, the port is 8081"},
{"role": "user", "content": "Hello"}, # Too short, should be skipped
]
facts = extract_facts_from_messages(msgs)
assert len(facts) >= 1
assert any("technical" in f["category"] for f in facts)
def test_extract_deduplicates():
msgs = [
{"role": "user", "content": "I prefer Python"},
{"role": "user", "content": "I prefer Python"},
]
facts = extract_facts_from_messages(msgs)
assert len(facts) == 1
def test_compact_session():
messages = []
for i in range(30):
messages.append({"role": "user", "content": f"Message {i}: I prefer Python for server {i}"})
messages.append({"role": "assistant", "content": f"Response {i}"})
compacted, count = compact_session(messages, keep_recent=10)
assert len(compacted) < len(messages)
assert count >= 0
def test_compact_keeps_recent():
messages = []
for i in range(30):
messages.append({"role": "user", "content": f"Message {i}"})
messages.append({"role": "assistant", "content": f"Response {i}"})
compacted, _ = compact_session(messages, keep_recent=10)
# Should have summary + facts + 10 recent
assert len(compacted) >= 10
def test_should_compact_short():
messages = [{"role": "user", "content": "hi"} for _ in range(10)]
assert not should_compact(messages)
def test_should_compact_long():
messages = [{"role": "user", "content": "x" * 1000} for _ in range(100)]
assert should_compact(messages)
if __name__ == "__main__":
tests = [test_extract_preferences, test_extract_facts, test_extract_deduplicates,
test_compact_session, test_compact_keeps_recent, test_should_compact_short, test_should_compact_long]
for t in tests:
print(f"Running {t.__name__}...")
t()
print(" PASS")
print("\nAll tests passed.")