Compare commits
3 Commits
whip/326-1
...
queue/321-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
51d06becd3 | ||
| 954fd992eb | |||
|
|
f35f56e397 |
49
run_agent.py
49
run_agent.py
@@ -586,9 +586,6 @@ class AIAgent:
|
||||
|
||||
self.model = model
|
||||
self.max_iterations = max_iterations
|
||||
# Marathon session cap: force compression when messages exceed this.
|
||||
# Eliminates error-cascading in 200+ message sessions (#326).
|
||||
self.max_messages = int(os.environ.get("HERMES_MAX_MESSAGES", "200"))
|
||||
# Shared iteration budget — parent creates, children inherit.
|
||||
# Consumed by every LLM turn across parent + all subagents.
|
||||
self.iteration_budget = iteration_budget or IterationBudget(max_iterations)
|
||||
@@ -1004,30 +1001,10 @@ class AIAgent:
|
||||
self._session_db = session_db
|
||||
self._parent_session_id = parent_session_id
|
||||
self._last_flushed_db_idx = 0 # tracks DB-write cursor to prevent duplicate writes
|
||||
if self._session_db:
|
||||
try:
|
||||
self._session_db.create_session(
|
||||
session_id=self.session_id,
|
||||
source=self.platform or os.environ.get("HERMES_SESSION_SOURCE", "cli"),
|
||||
model=self.model,
|
||||
model_config={
|
||||
"max_iterations": self.max_iterations,
|
||||
"reasoning_config": reasoning_config,
|
||||
"max_tokens": max_tokens,
|
||||
},
|
||||
user_id=None,
|
||||
parent_session_id=self._parent_session_id,
|
||||
)
|
||||
except Exception as e:
|
||||
# Transient SQLite lock contention (e.g. CLI and gateway writing
|
||||
# concurrently) must NOT permanently disable session_search for
|
||||
# this agent. Keep _session_db alive — subsequent message
|
||||
# flushes and session_search calls will still work once the
|
||||
# lock clears. The session row may be missing from the index
|
||||
# for this run, but that is recoverable (flushes upsert rows).
|
||||
logger.warning(
|
||||
"Session DB create_session failed (session_search still available): %s", e
|
||||
)
|
||||
# Lazy session creation: defer until first message flush (#314).
|
||||
# _flush_messages_to_session_db() calls ensure_session() which uses
|
||||
# INSERT OR IGNORE — creating the row only when messages arrive.
|
||||
# This eliminates 32% of sessions that are created but never used.
|
||||
|
||||
# In-memory todo list for task planning (one per agent/session)
|
||||
from tools.todo_tool import TodoStore
|
||||
@@ -7331,24 +7308,6 @@ class AIAgent:
|
||||
pass
|
||||
|
||||
while api_call_count < self.max_iterations and self.iteration_budget.remaining > 0:
|
||||
# Marathon session guard: if message count exceeds max_messages,
|
||||
# force a compression pass to start a fresh session lineage.
|
||||
# Prevents error cascading and tool fixation in long sessions. #326
|
||||
if (
|
||||
self.compression_enabled
|
||||
and len(messages) > self.max_messages
|
||||
):
|
||||
if not self.quiet_mode:
|
||||
self._safe_print(
|
||||
f"\n🔄 Marathon session limit hit ({len(messages)} msgs > "
|
||||
f"{self.max_messages} cap). Compressing and continuing..."
|
||||
)
|
||||
messages, active_system_prompt = self._compress_context(
|
||||
messages, system_message,
|
||||
approx_tokens=estimate_messages_tokens_rough(messages),
|
||||
task_id=effective_task_id,
|
||||
)
|
||||
|
||||
# Reset per-turn checkpoint dedup so each iteration can take one snapshot
|
||||
self._checkpoint_mgr.new_turn()
|
||||
|
||||
|
||||
52
tests/tools/test_tts_speed.py
Normal file
52
tests/tools/test_tts_speed.py
Normal file
@@ -0,0 +1,52 @@
|
||||
"""Tests for TTS speed support (#321)."""
|
||||
import json
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch, AsyncMock
|
||||
|
||||
|
||||
class TestTTSSchemaHasSpeed:
|
||||
def test_schema_includes_speed(self):
|
||||
from tools.tts_tool import TTS_SCHEMA
|
||||
assert "speed" in TTS_SCHEMA["parameters"]["properties"]
|
||||
assert TTS_SCHEMA["parameters"]["properties"]["speed"]["type"] == "number"
|
||||
|
||||
def test_speed_not_required(self):
|
||||
from tools.tts_tool import TTS_SCHEMA
|
||||
assert "speed" not in TTS_SCHEMA["parameters"].get("required", [])
|
||||
|
||||
|
||||
class TestTextToSpeechToolSignature:
|
||||
def test_accepts_speed(self):
|
||||
from tools.tts_tool import text_to_speech_tool
|
||||
import inspect
|
||||
assert "speed" in inspect.signature(text_to_speech_tool).parameters
|
||||
|
||||
|
||||
class TestSpeedClamping:
|
||||
@patch("tools.tts_tool._load_tts_config", return_value={})
|
||||
@patch("tools.tts_tool._get_provider", return_value="edge")
|
||||
@patch("tools.tts_tool._import_edge_tts")
|
||||
def test_clamped_low(self, mock_edge, mock_prov, mock_cfg):
|
||||
from tools.tts_tool import text_to_speech_tool
|
||||
with patch("tools.tts_tool.asyncio.run"):
|
||||
with patch("tools.tts_tool.os.path.exists", return_value=True):
|
||||
with patch("tools.tts_tool.os.path.getsize", return_value=1000):
|
||||
assert "success" in json.loads(text_to_speech_tool("test", speed=0.01))
|
||||
|
||||
@patch("tools.tts_tool._load_tts_config", return_value={})
|
||||
@patch("tools.tts_tool._get_provider", return_value="edge")
|
||||
@patch("tools.tts_tool._import_edge_tts")
|
||||
def test_clamped_high(self, mock_edge, mock_prov, mock_cfg):
|
||||
from tools.tts_tool import text_to_speech_tool
|
||||
with patch("tools.tts_tool.asyncio.run"):
|
||||
with patch("tools.tts_tool.os.path.exists", return_value=True):
|
||||
with patch("tools.tts_tool.os.path.getsize", return_value=1000):
|
||||
assert "success" in json.loads(text_to_speech_tool("test", speed=100.0))
|
||||
|
||||
|
||||
class TestEdgeTTSRateConversion:
|
||||
def test_rates(self):
|
||||
for speed, expected in [(1.0, "+0%"), (1.5, "+50%"), (0.5, "-50%"), (2.0, "+100%"), (0.25, "-75%")]:
|
||||
pct = int((speed - 1.0) * 100)
|
||||
rate = f"+{pct}%" if pct >= 0 else f"{pct}%"
|
||||
assert rate == expected
|
||||
@@ -179,8 +179,10 @@ async def _generate_edge_tts(text: str, output_path: str, tts_config: Dict[str,
|
||||
_edge_tts = _import_edge_tts()
|
||||
edge_config = tts_config.get("edge", {})
|
||||
voice = edge_config.get("voice", DEFAULT_EDGE_VOICE)
|
||||
|
||||
communicate = _edge_tts.Communicate(text, voice)
|
||||
speed = tts_config.get("_speed_override") or edge_config.get("speed", 1.0)
|
||||
rate_pct = int((speed - 1.0) * 100)
|
||||
rate_str = f"+{rate_pct}%" if rate_pct >= 0 else f"{rate_pct}%"
|
||||
communicate = _edge_tts.Communicate(text, voice, rate=rate_str)
|
||||
await communicate.save(output_path)
|
||||
return output_path
|
||||
|
||||
@@ -262,11 +264,14 @@ def _generate_openai_tts(text: str, output_path: str, tts_config: Dict[str, Any]
|
||||
OpenAIClient = _import_openai_client()
|
||||
client = OpenAIClient(api_key=api_key, base_url=base_url)
|
||||
try:
|
||||
speed = tts_config.get("_speed_override") or oai_config.get("speed", 1.0)
|
||||
speed = max(0.25, min(4.0, speed))
|
||||
response = client.audio.speech.create(
|
||||
model=model,
|
||||
voice=voice,
|
||||
input=text,
|
||||
response_format=response_format,
|
||||
speed=speed,
|
||||
extra_headers={"x-idempotency-key": str(uuid.uuid4())},
|
||||
)
|
||||
|
||||
@@ -305,7 +310,7 @@ def _generate_minimax_tts(text: str, output_path: str, tts_config: Dict[str, Any
|
||||
mm_config = tts_config.get("minimax", {})
|
||||
model = mm_config.get("model", DEFAULT_MINIMAX_MODEL)
|
||||
voice_id = mm_config.get("voice_id", DEFAULT_MINIMAX_VOICE_ID)
|
||||
speed = mm_config.get("speed", 1)
|
||||
speed = tts_config.get("_speed_override") or mm_config.get("speed", 1)
|
||||
vol = mm_config.get("vol", 1)
|
||||
pitch = mm_config.get("pitch", 0)
|
||||
base_url = mm_config.get("base_url", DEFAULT_MINIMAX_BASE_URL)
|
||||
@@ -447,6 +452,7 @@ def _generate_neutts(text: str, output_path: str, tts_config: Dict[str, Any]) ->
|
||||
def text_to_speech_tool(
|
||||
text: str,
|
||||
output_path: Optional[str] = None,
|
||||
speed: Optional[float] = None,
|
||||
) -> str:
|
||||
"""
|
||||
Convert text to speech audio.
|
||||
@@ -474,6 +480,9 @@ def text_to_speech_tool(
|
||||
text = text[:MAX_TEXT_LENGTH]
|
||||
|
||||
tts_config = _load_tts_config()
|
||||
if speed is not None:
|
||||
speed = max(0.25, min(4.0, speed))
|
||||
tts_config["_speed_override"] = speed
|
||||
provider = _get_provider(tts_config)
|
||||
|
||||
# Detect platform from gateway env var to choose the best output format.
|
||||
@@ -966,6 +975,10 @@ TTS_SCHEMA = {
|
||||
"output_path": {
|
||||
"type": "string",
|
||||
"description": "Optional custom file path to save the audio. Defaults to ~/.hermes/audio_cache/<timestamp>.mp3"
|
||||
},
|
||||
"speed": {
|
||||
"type": "number",
|
||||
"description": "Speech speed multiplier. 1.0 = normal, 0.5 = half speed, 2.0 = double. Range: 0.25-4.0. Edge TTS uses SSML rate, OpenAI uses native speed param, MiniMax passes directly."
|
||||
}
|
||||
},
|
||||
"required": ["text"]
|
||||
@@ -978,7 +991,8 @@ registry.register(
|
||||
schema=TTS_SCHEMA,
|
||||
handler=lambda args, **kw: text_to_speech_tool(
|
||||
text=args.get("text", ""),
|
||||
output_path=args.get("output_path")),
|
||||
output_path=args.get("output_path"),
|
||||
speed=args.get("speed")),
|
||||
check_fn=check_tts_requirements,
|
||||
emoji="🔊",
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user