fix(feishu): add adaptive batch delay for split long messages
Feishu already had text batching with a static 0.6s delay. This adds adaptive delay: waits 2.0s when a chunk is near the ~4096-char split point since a continuation is almost certain. Tracks _last_chunk_len on each queued event to determine the delay. Configurable via HERMES_FEISHU_TEXT_BATCH_SPLIT_DELAY_SECONDS (default 2.0). Ref #6892
This commit is contained in:
@@ -264,6 +264,7 @@ class FeishuAdapterSettings:
|
||||
bot_name: str
|
||||
dedup_cache_size: int
|
||||
text_batch_delay_seconds: float
|
||||
text_batch_split_delay_seconds: float
|
||||
text_batch_max_messages: int
|
||||
text_batch_max_chars: int
|
||||
media_batch_delay_seconds: float
|
||||
@@ -1014,6 +1015,10 @@ class FeishuAdapter(BasePlatformAdapter):
|
||||
"""Feishu/Lark bot adapter."""
|
||||
|
||||
MAX_MESSAGE_LENGTH = 8000
|
||||
# Threshold for detecting Feishu client-side message splits.
|
||||
# When a chunk is near the ~4096-char practical limit, a continuation
|
||||
# is almost certain.
|
||||
_SPLIT_THRESHOLD = 4000
|
||||
|
||||
# =========================================================================
|
||||
# Lifecycle — init / settings / connect / disconnect
|
||||
@@ -1105,6 +1110,9 @@ class FeishuAdapter(BasePlatformAdapter):
|
||||
text_batch_delay_seconds=float(
|
||||
os.getenv("HERMES_FEISHU_TEXT_BATCH_DELAY_SECONDS", str(_DEFAULT_TEXT_BATCH_DELAY_SECONDS))
|
||||
),
|
||||
text_batch_split_delay_seconds=float(
|
||||
os.getenv("HERMES_FEISHU_TEXT_BATCH_SPLIT_DELAY_SECONDS", "2.0")
|
||||
),
|
||||
text_batch_max_messages=max(
|
||||
1,
|
||||
int(os.getenv("HERMES_FEISHU_TEXT_BATCH_MAX_MESSAGES", str(_DEFAULT_TEXT_BATCH_MAX_MESSAGES))),
|
||||
@@ -1152,6 +1160,7 @@ class FeishuAdapter(BasePlatformAdapter):
|
||||
self._bot_name = settings.bot_name
|
||||
self._dedup_cache_size = settings.dedup_cache_size
|
||||
self._text_batch_delay_seconds = settings.text_batch_delay_seconds
|
||||
self._text_batch_split_delay_seconds = settings.text_batch_split_delay_seconds
|
||||
self._text_batch_max_messages = settings.text_batch_max_messages
|
||||
self._text_batch_max_chars = settings.text_batch_max_chars
|
||||
self._media_batch_delay_seconds = settings.media_batch_delay_seconds
|
||||
@@ -2478,8 +2487,10 @@ class FeishuAdapter(BasePlatformAdapter):
|
||||
async def _enqueue_text_event(self, event: MessageEvent) -> None:
|
||||
"""Debounce rapid Feishu text bursts into a single MessageEvent."""
|
||||
key = self._text_batch_key(event)
|
||||
chunk_len = len(event.text or "")
|
||||
existing = self._pending_text_batches.get(key)
|
||||
if existing is None:
|
||||
event._last_chunk_len = chunk_len # type: ignore[attr-defined]
|
||||
self._pending_text_batches[key] = event
|
||||
self._pending_text_batch_counts[key] = 1
|
||||
self._schedule_text_batch_flush(key)
|
||||
@@ -2504,6 +2515,7 @@ class FeishuAdapter(BasePlatformAdapter):
|
||||
return
|
||||
|
||||
existing.text = next_text
|
||||
existing._last_chunk_len = chunk_len # type: ignore[attr-defined]
|
||||
existing.timestamp = event.timestamp
|
||||
if event.message_id:
|
||||
existing.message_id = event.message_id
|
||||
@@ -2530,10 +2542,22 @@ class FeishuAdapter(BasePlatformAdapter):
|
||||
task_map[key] = asyncio.create_task(flush_fn(key))
|
||||
|
||||
async def _flush_text_batch(self, key: str) -> None:
|
||||
"""Flush a pending text batch after the quiet period."""
|
||||
"""Flush a pending text batch after the quiet period.
|
||||
|
||||
Uses a longer delay when the latest chunk is near Feishu's ~4096-char
|
||||
split point, since a continuation chunk is almost certain.
|
||||
"""
|
||||
current_task = asyncio.current_task()
|
||||
try:
|
||||
await asyncio.sleep(self._text_batch_delay_seconds)
|
||||
# Adaptive delay: if the latest chunk is near the split threshold,
|
||||
# a continuation is almost certain — wait longer.
|
||||
pending = self._pending_text_batches.get(key)
|
||||
last_len = getattr(pending, "_last_chunk_len", 0) if pending else 0
|
||||
if last_len >= self._SPLIT_THRESHOLD:
|
||||
delay = self._text_batch_split_delay_seconds
|
||||
else:
|
||||
delay = self._text_batch_delay_seconds
|
||||
await asyncio.sleep(delay)
|
||||
await self._flush_text_batch_now(key)
|
||||
finally:
|
||||
if self._pending_text_batch_tasks.get(key) is current_task:
|
||||
|
||||
Reference in New Issue
Block a user