Compare commits
4 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 82926709b8 | |||
| cd0108d853 | |||
| 0b2d67e0c7 | |||
| 5067ff6842 |
109
cron/pending_deliveries.py
Normal file
109
cron/pending_deliveries.py
Normal file
@@ -0,0 +1,109 @@
|
||||
"""
|
||||
Pending Cron Deliveries — Buffer and retry failed deliveries
|
||||
|
||||
When gateway reconnects, in-flight cron job notifications may be lost.
|
||||
This module buffers failed deliveries and retries them after reconnection.
|
||||
|
||||
Issue: #744
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
HERMES_HOME = Path.home() / ".hermes"
|
||||
PENDING_FILE = HERMES_HOME / "cron" / "pending_deliveries.json"
|
||||
|
||||
|
||||
def _load_pending() -> List[Dict[str, Any]]:
|
||||
if not PENDING_FILE.exists():
|
||||
return []
|
||||
try:
|
||||
return json.loads(PENDING_FILE.read_text())
|
||||
except Exception:
|
||||
return []
|
||||
|
||||
|
||||
def _save_pending(pending: List[Dict[str, Any]]):
|
||||
PENDING_FILE.parent.mkdir(parents=True, exist_ok=True)
|
||||
PENDING_FILE.write_text(json.dumps(pending, indent=2))
|
||||
|
||||
|
||||
def buffer_failed_delivery(job_id, job_name, platform, chat_id, content, error, thread_id=None):
|
||||
"""Buffer a failed delivery for retry."""
|
||||
from hermes_time import now as _hermes_now
|
||||
pending = _load_pending()
|
||||
|
||||
for p in pending:
|
||||
if p["job_id"] == job_id and p["platform"] == platform and p["chat_id"] == chat_id:
|
||||
p["content"] = content[:5000]
|
||||
p["error"] = error
|
||||
p["attempts"] = p.get("attempts", 1) + 1
|
||||
p["last_attempt"] = _hermes_now().isoformat()
|
||||
_save_pending(pending)
|
||||
return
|
||||
|
||||
now = _hermes_now().isoformat()
|
||||
pending.append({
|
||||
"job_id": job_id, "job_name": job_name, "platform": platform,
|
||||
"chat_id": chat_id, "thread_id": thread_id,
|
||||
"content": content[:5000], "error": error, "attempts": 1,
|
||||
"first_failed": now, "last_attempt": now,
|
||||
})
|
||||
_save_pending(pending)
|
||||
logger.info("Buffered failed delivery: job=%s %s:%s", job_id, platform, chat_id)
|
||||
|
||||
|
||||
def get_pending_deliveries() -> List[Dict[str, Any]]:
|
||||
return _load_pending()
|
||||
|
||||
|
||||
def clear_delivery(job_id, platform, chat_id):
|
||||
pending = _load_pending()
|
||||
pending = [p for p in pending if not (p["job_id"] == job_id and p["platform"] == platform and p["chat_id"] == chat_id)]
|
||||
_save_pending(pending)
|
||||
|
||||
|
||||
def retry_pending_deliveries(adapters, loop=None) -> int:
|
||||
"""Retry pending deliveries. Returns count of successful retries."""
|
||||
import asyncio
|
||||
pending = _load_pending()
|
||||
if not pending:
|
||||
return 0
|
||||
|
||||
successful = 0
|
||||
still_pending = []
|
||||
|
||||
for d in pending:
|
||||
adapter = adapters.get(d["platform"])
|
||||
if not adapter or not adapter.connected:
|
||||
still_pending.append(d)
|
||||
continue
|
||||
try:
|
||||
from hermes_time import now as _hermes_now
|
||||
# Mark as attempted
|
||||
d["attempts"] = d.get("attempts", 1) + 1
|
||||
d["last_attempt"] = _hermes_now().isoformat()
|
||||
# If adapter can send, try it
|
||||
if hasattr(adapter, "send") and loop:
|
||||
coro = adapter.send(d["chat_id"], d["content"][:4000])
|
||||
fut = asyncio.run_coroutine_threadsafe(coro, loop)
|
||||
result = fut.result(timeout=30)
|
||||
if result and not result.get("error"):
|
||||
successful += 1
|
||||
logger.info("Retry OK: job=%s %s:%s", d["job_id"], d["platform"], d["chat_id"])
|
||||
continue
|
||||
still_pending.append(d)
|
||||
except Exception as e:
|
||||
d["error"] = str(e)
|
||||
still_pending.append(d)
|
||||
|
||||
_save_pending(still_pending)
|
||||
return successful
|
||||
|
||||
|
||||
def get_pending_count() -> int:
|
||||
return len(_load_pending())
|
||||
@@ -967,7 +967,24 @@ def tick(verbose: bool = True, adapters=None, loop=None) -> int:
|
||||
delivery_error = _deliver_result(job, deliver_content, adapters=adapters, loop=loop)
|
||||
except Exception as de:
|
||||
delivery_error = str(de)
|
||||
logger.error("Delivery failed for job %s: %s", job["id"], de)
|
||||
logger.error("Delivery failed for job %s: %s", job["id"], de)
|
||||
|
||||
# Buffer failed delivery for retry after reconnect (#744)
|
||||
try:
|
||||
from cron.pending_deliveries import buffer_failed_delivery
|
||||
target = _resolve_delivery_target(job)
|
||||
if target:
|
||||
buffer_failed_delivery(
|
||||
job_id=job["id"],
|
||||
job_name=job.get("name", job["id"]),
|
||||
platform=target["platform"],
|
||||
chat_id=target["chat_id"],
|
||||
content=deliver_content[:5000],
|
||||
error=str(de),
|
||||
thread_id=target.get("thread_id")
|
||||
)
|
||||
except Exception as _buf_err:
|
||||
logger.debug("Failed to buffer delivery: %s", _buf_err)
|
||||
|
||||
mark_job_run(job["id"], success, error, delivery_error=delivery_error)
|
||||
executed += 1
|
||||
|
||||
@@ -1938,6 +1938,16 @@ class GatewayRunner:
|
||||
error_message=None,
|
||||
)
|
||||
logger.info("✓ %s reconnected successfully", platform.value)
|
||||
|
||||
# Retry pending cron deliveries after reconnect (#744)
|
||||
try:
|
||||
from cron.pending_deliveries import retry_pending_deliveries
|
||||
loop = asyncio.get_event_loop()
|
||||
retried = retry_pending_deliveries(self.adapters, loop=loop)
|
||||
if retried:
|
||||
logger.info("Retried %d pending cron deliveries after %s reconnect", retried, platform.value)
|
||||
except Exception as _retry_err:
|
||||
logger.debug("Pending delivery retry failed: %s", _retry_err)
|
||||
|
||||
# Rebuild channel directory with the new adapter
|
||||
try:
|
||||
|
||||
@@ -1,55 +0,0 @@
|
||||
"""
|
||||
Tests for error classification (#752).
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from tools.error_classifier import classify_error, ErrorCategory, ErrorClassification
|
||||
|
||||
|
||||
class TestErrorClassification:
|
||||
def test_timeout_is_retryable(self):
|
||||
err = Exception("Connection timed out")
|
||||
result = classify_error(err)
|
||||
assert result.category == ErrorCategory.RETRYABLE
|
||||
assert result.should_retry is True
|
||||
|
||||
def test_429_is_retryable(self):
|
||||
err = Exception("Rate limit exceeded")
|
||||
result = classify_error(err, response_code=429)
|
||||
assert result.category == ErrorCategory.RETRYABLE
|
||||
assert result.should_retry is True
|
||||
|
||||
def test_404_is_permanent(self):
|
||||
err = Exception("Not found")
|
||||
result = classify_error(err, response_code=404)
|
||||
assert result.category == ErrorCategory.PERMANENT
|
||||
assert result.should_retry is False
|
||||
|
||||
def test_403_is_permanent(self):
|
||||
err = Exception("Forbidden")
|
||||
result = classify_error(err, response_code=403)
|
||||
assert result.category == ErrorCategory.PERMANENT
|
||||
assert result.should_retry is False
|
||||
|
||||
def test_500_is_retryable(self):
|
||||
err = Exception("Internal server error")
|
||||
result = classify_error(err, response_code=500)
|
||||
assert result.category == ErrorCategory.RETRYABLE
|
||||
assert result.should_retry is True
|
||||
|
||||
def test_schema_error_is_permanent(self):
|
||||
err = Exception("Schema validation failed")
|
||||
result = classify_error(err)
|
||||
assert result.category == ErrorCategory.PERMANENT
|
||||
assert result.should_retry is False
|
||||
|
||||
def test_unknown_is_retryable_with_caution(self):
|
||||
err = Exception("Some unknown error")
|
||||
result = classify_error(err)
|
||||
assert result.category == ErrorCategory.UNKNOWN
|
||||
assert result.should_retry is True
|
||||
assert result.max_retries == 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__])
|
||||
58
tests/test_pending_deliveries.py
Normal file
58
tests/test_pending_deliveries.py
Normal file
@@ -0,0 +1,58 @@
|
||||
"""
|
||||
Tests for pending delivery buffer
|
||||
|
||||
Issue: #744
|
||||
"""
|
||||
|
||||
import json
|
||||
import tempfile
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
from cron.pending_deliveries import (
|
||||
buffer_failed_delivery,
|
||||
get_pending_deliveries,
|
||||
clear_delivery,
|
||||
get_pending_count,
|
||||
_save_pending,
|
||||
_load_pending,
|
||||
)
|
||||
|
||||
|
||||
class TestPendingDeliveries(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.tmp = tempfile.mkdtemp()
|
||||
self.patch_path = patch("cron.pending_deliveries.PENDING_FILE",
|
||||
Path(self.tmp) / "pending.json")
|
||||
self.patch_path.start()
|
||||
|
||||
def tearDown(self):
|
||||
self.patch_path.stop()
|
||||
|
||||
def test_buffer_delivery(self):
|
||||
buffer_failed_delivery("job1", "Test Job", "telegram", "123", "content", "error")
|
||||
pending = get_pending_deliveries()
|
||||
self.assertEqual(len(pending), 1)
|
||||
self.assertEqual(pending[0]["job_id"], "job1")
|
||||
self.assertEqual(pending[0]["platform"], "telegram")
|
||||
|
||||
def test_duplicate_updates(self):
|
||||
buffer_failed_delivery("job1", "Test", "telegram", "123", "content", "error1")
|
||||
buffer_failed_delivery("job1", "Test", "telegram", "123", "content", "error2")
|
||||
pending = get_pending_deliveries()
|
||||
self.assertEqual(len(pending), 1)
|
||||
self.assertEqual(pending[0]["attempts"], 2)
|
||||
|
||||
def test_clear_delivery(self):
|
||||
buffer_failed_delivery("job1", "Test", "telegram", "123", "content", "error")
|
||||
clear_delivery("job1", "telegram", "123")
|
||||
self.assertEqual(get_pending_count(), 0)
|
||||
|
||||
def test_empty_returns_zero(self):
|
||||
self.assertEqual(get_pending_count(), 0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
@@ -1,233 +0,0 @@
|
||||
"""
|
||||
Tool Error Classification — Retryable vs Permanent.
|
||||
|
||||
Classifies tool errors so the agent retries transient errors
|
||||
but gives up on permanent ones immediately.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import re
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from typing import Optional, Dict, Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ErrorCategory(Enum):
|
||||
"""Error category classification."""
|
||||
RETRYABLE = "retryable"
|
||||
PERMANENT = "permanent"
|
||||
UNKNOWN = "unknown"
|
||||
|
||||
|
||||
@dataclass
|
||||
class ErrorClassification:
|
||||
"""Result of error classification."""
|
||||
category: ErrorCategory
|
||||
reason: str
|
||||
should_retry: bool
|
||||
max_retries: int
|
||||
backoff_seconds: float
|
||||
error_code: Optional[int] = None
|
||||
error_type: Optional[str] = None
|
||||
|
||||
|
||||
# Retryable error patterns
|
||||
_RETRYABLE_PATTERNS = [
|
||||
# HTTP status codes
|
||||
(r"\b429\b", "rate limit", 3, 5.0),
|
||||
(r"\b500\b", "server error", 3, 2.0),
|
||||
(r"\b502\b", "bad gateway", 3, 2.0),
|
||||
(r"\b503\b", "service unavailable", 3, 5.0),
|
||||
(r"\b504\b", "gateway timeout", 3, 5.0),
|
||||
|
||||
# Timeout patterns
|
||||
(r"timeout", "timeout", 3, 2.0),
|
||||
(r"timed out", "timeout", 3, 2.0),
|
||||
(r"TimeoutExpired", "timeout", 3, 2.0),
|
||||
|
||||
# Connection errors
|
||||
(r"connection refused", "connection refused", 2, 5.0),
|
||||
(r"connection reset", "connection reset", 2, 2.0),
|
||||
(r"network unreachable", "network unreachable", 2, 10.0),
|
||||
(r"DNS", "DNS error", 2, 5.0),
|
||||
|
||||
# Transient errors
|
||||
(r"temporary", "temporary error", 2, 2.0),
|
||||
(r"transient", "transient error", 2, 2.0),
|
||||
(r"retry", "retryable", 2, 2.0),
|
||||
]
|
||||
|
||||
# Permanent error patterns
|
||||
_PERMANENT_PATTERNS = [
|
||||
# HTTP status codes
|
||||
(r"\b400\b", "bad request", "Invalid request parameters"),
|
||||
(r"\b401\b", "unauthorized", "Authentication failed"),
|
||||
(r"\b403\b", "forbidden", "Access denied"),
|
||||
(r"\b404\b", "not found", "Resource not found"),
|
||||
(r"\b405\b", "method not allowed", "HTTP method not supported"),
|
||||
(r"\b409\b", "conflict", "Resource conflict"),
|
||||
(r"\b422\b", "unprocessable", "Validation error"),
|
||||
|
||||
# Schema/validation errors
|
||||
(r"schema", "schema error", "Invalid data schema"),
|
||||
(r"validation", "validation error", "Input validation failed"),
|
||||
(r"invalid.*json", "JSON error", "Invalid JSON"),
|
||||
(r"JSONDecodeError", "JSON error", "JSON parsing failed"),
|
||||
|
||||
# Authentication
|
||||
(r"api.?key", "API key error", "Invalid or missing API key"),
|
||||
(r"token.*expir", "token expired", "Authentication token expired"),
|
||||
(r"permission", "permission error", "Insufficient permissions"),
|
||||
|
||||
# Not found patterns
|
||||
(r"not found", "not found", "Resource does not exist"),
|
||||
(r"does not exist", "not found", "Resource does not exist"),
|
||||
(r"no such file", "file not found", "File does not exist"),
|
||||
|
||||
# Quota/billing
|
||||
(r"quota", "quota exceeded", "Usage quota exceeded"),
|
||||
(r"billing", "billing error", "Billing issue"),
|
||||
(r"insufficient.*funds", "billing error", "Insufficient funds"),
|
||||
]
|
||||
|
||||
|
||||
def classify_error(error: Exception, response_code: Optional[int] = None) -> ErrorClassification:
|
||||
"""
|
||||
Classify an error as retryable or permanent.
|
||||
|
||||
Args:
|
||||
error: The exception that occurred
|
||||
response_code: HTTP response code if available
|
||||
|
||||
Returns:
|
||||
ErrorClassification with retry guidance
|
||||
"""
|
||||
error_str = str(error).lower()
|
||||
error_type = type(error).__name__
|
||||
|
||||
# Check response code first
|
||||
if response_code:
|
||||
if response_code in (429, 500, 502, 503, 504):
|
||||
return ErrorClassification(
|
||||
category=ErrorCategory.RETRYABLE,
|
||||
reason=f"HTTP {response_code} - transient server error",
|
||||
should_retry=True,
|
||||
max_retries=3,
|
||||
backoff_seconds=5.0 if response_code == 429 else 2.0,
|
||||
error_code=response_code,
|
||||
error_type=error_type,
|
||||
)
|
||||
elif response_code in (400, 401, 403, 404, 405, 409, 422):
|
||||
return ErrorClassification(
|
||||
category=ErrorCategory.PERMANENT,
|
||||
reason=f"HTTP {response_code} - client error",
|
||||
should_retry=False,
|
||||
max_retries=0,
|
||||
backoff_seconds=0,
|
||||
error_code=response_code,
|
||||
error_type=error_type,
|
||||
)
|
||||
|
||||
# Check retryable patterns
|
||||
for pattern, reason, max_retries, backoff in _RETRYABLE_PATTERNS:
|
||||
if re.search(pattern, error_str, re.IGNORECASE):
|
||||
return ErrorClassification(
|
||||
category=ErrorCategory.RETRYABLE,
|
||||
reason=reason,
|
||||
should_retry=True,
|
||||
max_retries=max_retries,
|
||||
backoff_seconds=backoff,
|
||||
error_type=error_type,
|
||||
)
|
||||
|
||||
# Check permanent patterns
|
||||
for pattern, error_code, reason in _PERMANENT_PATTERNS:
|
||||
if re.search(pattern, error_str, re.IGNORECASE):
|
||||
return ErrorClassification(
|
||||
category=ErrorCategory.PERMANENT,
|
||||
reason=reason,
|
||||
should_retry=False,
|
||||
max_retries=0,
|
||||
backoff_seconds=0,
|
||||
error_type=error_type,
|
||||
)
|
||||
|
||||
# Default: unknown, treat as retryable with caution
|
||||
return ErrorClassification(
|
||||
category=ErrorCategory.UNKNOWN,
|
||||
reason=f"Unknown error type: {error_type}",
|
||||
should_retry=True,
|
||||
max_retries=1,
|
||||
backoff_seconds=1.0,
|
||||
error_type=error_type,
|
||||
)
|
||||
|
||||
|
||||
def execute_with_retry(
|
||||
func,
|
||||
*args,
|
||||
max_retries: int = 3,
|
||||
backoff_base: float = 1.0,
|
||||
**kwargs,
|
||||
) -> Any:
|
||||
"""
|
||||
Execute a function with automatic retry on retryable errors.
|
||||
|
||||
Args:
|
||||
func: Function to execute
|
||||
*args: Function arguments
|
||||
max_retries: Maximum retry attempts
|
||||
backoff_base: Base backoff time in seconds
|
||||
**kwargs: Function keyword arguments
|
||||
|
||||
Returns:
|
||||
Function result
|
||||
|
||||
Raises:
|
||||
Exception: If permanent error or max retries exceeded
|
||||
"""
|
||||
last_error = None
|
||||
|
||||
for attempt in range(max_retries + 1):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except Exception as e:
|
||||
last_error = e
|
||||
|
||||
# Classify the error
|
||||
classification = classify_error(e)
|
||||
|
||||
logger.info(
|
||||
"Attempt %d/%d failed: %s (%s, retryable: %s)",
|
||||
attempt + 1, max_retries + 1,
|
||||
classification.reason,
|
||||
classification.category.value,
|
||||
classification.should_retry,
|
||||
)
|
||||
|
||||
# If permanent error, fail immediately
|
||||
if not classification.should_retry:
|
||||
logger.error("Permanent error: %s", classification.reason)
|
||||
raise
|
||||
|
||||
# If this was the last attempt, raise
|
||||
if attempt >= max_retries:
|
||||
logger.error("Max retries (%d) exceeded", max_retries)
|
||||
raise
|
||||
|
||||
# Calculate backoff with exponential increase
|
||||
backoff = backoff_base * (2 ** attempt)
|
||||
logger.info("Retrying in %.1fs...", backoff)
|
||||
time.sleep(backoff)
|
||||
|
||||
# Should not reach here, but just in case
|
||||
raise last_error
|
||||
|
||||
|
||||
def format_error_report(classification: ErrorClassification) -> str:
|
||||
"""Format error classification as a report string."""
|
||||
icon = "🔄" if classification.should_retry else "❌"
|
||||
return f"{icon} {classification.category.value}: {classification.reason}"
|
||||
Reference in New Issue
Block a user