Compare commits

..

3 Commits

Author SHA1 Message Date
Alexander Whitestone
fc1db11f9b fix: preserve explicit KittenTTS output format outside Telegram
All checks were successful
Lint / lint (pull_request) Successful in 8s
Refs #955
2026-04-22 10:57:02 -04:00
Alexander Whitestone
4b075f5055 feat: add KittenTTS local provider support for #955
Refs #955
2026-04-22 10:51:32 -04:00
Alexander Whitestone
7eace4ead9 wip: add failing KittenTTS QA coverage for #955
Refs #955
2026-04-22 10:41:18 -04:00
10 changed files with 443 additions and 284 deletions

View File

@@ -523,7 +523,7 @@ DEFAULT_CONFIG = {
# Text-to-speech configuration
"tts": {
"provider": "edge", # "edge" (free) | "elevenlabs" (premium) | "openai" | "minimax" | "mistral" | "neutts" (local)
"provider": "edge", # "edge" (free) | "elevenlabs" (premium) | "openai" | "minimax" | "mistral" | "neutts" (local) | "kittentts" (local)
"edge": {
"voice": "en-US-AriaNeural",
# Popular: AriaNeural, JennyNeural, AndrewNeural, BrianNeural, SoniaNeural
@@ -547,6 +547,12 @@ DEFAULT_CONFIG = {
"model": "neuphonic/neutts-air-q4-gguf", # HuggingFace model repo
"device": "cpu", # cpu, cuda, or mps
},
"kittentts": {
"model": "KittenML/kitten-tts-nano-0.8-int8", # 25MB int8 default
"voice": "Jasper", # Jasper, Bella, Luna, Bruno, Rosie, Hugo, Kiki, Leo
"speed": 1.0,
"clean_text": True,
},
},
"stt": {

View File

@@ -443,6 +443,16 @@ def _print_setup_summary(config: dict, hermes_home):
tool_status.append(("Text-to-Speech (NeuTTS local)", True, None))
else:
tool_status.append(("Text-to-Speech (NeuTTS — not installed)", False, "run 'hermes setup tts'"))
elif tts_provider == "kittentts":
try:
import importlib.util
kittentts_ok = importlib.util.find_spec("kittentts") is not None
except Exception:
kittentts_ok = False
if kittentts_ok:
tool_status.append(("Text-to-Speech (KittenTTS local)", True, None))
else:
tool_status.append(("Text-to-Speech (KittenTTS — not installed)", False, "run 'hermes setup tts'"))
else:
tool_status.append(("Text-to-Speech (Edge TTS)", True, None))
@@ -891,6 +901,7 @@ def _install_neutts_deps() -> bool:
return False
else:
print_warning("espeak-ng is required for NeuTTS. Install it manually before using NeuTTS.")
return False
# Install neutts Python package
print()
@@ -910,8 +921,34 @@ def _install_neutts_deps() -> bool:
return False
def _install_kittentts_deps() -> bool:
"""Install KittenTTS dependencies with user approval. Returns True on success."""
import subprocess
import sys
wheel_url = (
"https://github.com/KittenML/KittenTTS/releases/download/"
"0.8.1/kittentts-0.8.1-py3-none-any.whl"
)
print()
print_info("Installing kittentts Python package (~25-80MB model downloaded on first use)...")
print()
try:
subprocess.run(
[sys.executable, "-m", "pip", "install", "-U", wheel_url, "soundfile", "--quiet"],
check=True, timeout=300,
)
print_success("kittentts installed successfully")
return True
except (subprocess.CalledProcessError, subprocess.TimeoutExpired) as e:
print_error(f"Failed to install kittentts: {e}")
print_info(f"Try manually: python -m pip install -U '{wheel_url}' soundfile")
return False
def _setup_tts_provider(config: dict):
"""Interactive TTS provider selection with install flow for NeuTTS."""
"""Interactive TTS provider selection with install flow for local providers."""
tts_config = config.get("tts", {})
current_provider = tts_config.get("provider", "edge")
subscription_features = get_nous_subscription_features(config)
@@ -923,6 +960,7 @@ def _setup_tts_provider(config: dict):
"minimax": "MiniMax TTS",
"mistral": "Mistral Voxtral TTS",
"neutts": "NeuTTS",
"kittentts": "KittenTTS",
}
current_label = provider_labels.get(current_provider, current_provider)
@@ -944,9 +982,10 @@ def _setup_tts_provider(config: dict):
"MiniMax TTS (high quality with voice cloning, needs API key)",
"Mistral Voxtral TTS (multilingual, native Opus, needs API key)",
"NeuTTS (local on-device, free, ~300MB model download)",
"KittenTTS (local on-device, free, lightweight ~25-80MB ONNX)",
]
)
providers.extend(["edge", "elevenlabs", "openai", "minimax", "mistral", "neutts"])
providers.extend(["edge", "elevenlabs", "openai", "minimax", "mistral", "neutts", "kittentts"])
choices.append(f"Keep current ({current_label})")
keep_current_idx = len(choices) - 1
idx = prompt_choice("Select TTS provider:", choices, keep_current_idx)
@@ -988,6 +1027,28 @@ def _setup_tts_provider(config: dict):
print_info("Skipping install. Set tts.provider to 'neutts' after installing manually.")
selected = "edge"
elif selected == "kittentts":
try:
import importlib.util
already_installed = importlib.util.find_spec("kittentts") is not None
except Exception:
already_installed = False
if already_installed:
print_success("KittenTTS is already installed")
else:
print()
print_info("KittenTTS is lightweight (~25-80MB, CPU-only, no API key required).")
print_info("Voices: Jasper, Bella, Luna, Bruno, Rosie, Hugo, Kiki, Leo")
print()
if prompt_yes_no("Install KittenTTS now?", True):
if not _install_kittentts_deps():
print_warning("KittenTTS installation incomplete. Falling back to Edge TTS.")
selected = "edge"
else:
print_info("Skipping install. Set tts.provider to 'kittentts' after installing manually.")
selected = "edge"
elif selected == "elevenlabs":
existing = get_env_value("ELEVENLABS_API_KEY")
if not existing:

View File

@@ -57,7 +57,7 @@ CONFIGURABLE_TOOLSETS = [
("moa", "🧠 Mixture of Agents", "mixture_of_agents"),
("tts", "🔊 Text-to-Speech", "text_to_speech"),
("skills", "📚 Skills", "list, view, manage"),
("todo", "📋 Task Planning", "todo, ultraplan"),
("todo", "📋 Task Planning", "todo"),
("memory", "💾 Memory", "persistent memory across sessions"),
("session_search", "🔎 Session Search", "search past conversations"),
("clarify", "❓ Clarifying Questions", "clarify"),
@@ -164,6 +164,14 @@ TOOL_CATEGORIES = {
],
"tts_provider": "mistral",
},
{
"name": "KittenTTS",
"badge": "local · free",
"tag": "Lightweight local ONNX TTS (~25MB), no API key",
"env_vars": [],
"tts_provider": "kittentts",
"post_setup": "kittentts",
},
],
},
"web": {
@@ -403,6 +411,36 @@ def _run_post_setup(post_setup_key: str):
_print_warning(" Node.js not found. Install Camofox via Docker:")
_print_info(" docker run -p 9377:9377 -e CAMOFOX_PORT=9377 jo-inc/camofox-browser")
elif post_setup_key == "kittentts":
try:
__import__("kittentts")
_print_success(" kittentts is already installed")
return
except ImportError:
pass
import subprocess
_print_info(" Installing kittentts (~25-80MB model, CPU-only)...")
wheel_url = (
"https://github.com/KittenML/KittenTTS/releases/download/"
"0.8.1/kittentts-0.8.1-py3-none-any.whl"
)
try:
result = subprocess.run(
[sys.executable, "-m", "pip", "install", "-U", wheel_url, "soundfile", "--quiet"],
capture_output=True, text=True, timeout=300,
)
if result.returncode == 0:
_print_success(" kittentts installed")
_print_info(" Voices: Jasper, Bella, Luna, Bruno, Rosie, Hugo, Kiki, Leo")
_print_info(" Models: KittenML/kitten-tts-nano-0.8-int8 (25MB), micro (41MB), mini (80MB)")
else:
_print_warning(" kittentts install failed:")
_print_info(f" {result.stderr.strip()[:300]}")
_print_info(f" Run manually: python -m pip install -U '{wheel_url}' soundfile")
except subprocess.TimeoutExpired:
_print_warning(" kittentts install timed out (>5min)")
_print_info(f" Run manually: python -m pip install -U '{wheel_url}' soundfile")
elif post_setup_key == "rl_training":
try:
__import__("tinker_atropos")

View File

@@ -294,32 +294,22 @@ class TestBuiltinDiscovery:
"tools.browser_tool",
"tools.clarify_tool",
"tools.code_execution_tool",
"tools.crisis_tool",
"tools.cronjob_tools",
"tools.delegate_tool",
"tools.file_tools",
"tools.homeassistant_tool",
"tools.image_generation_tool",
"tools.local_inference_tool",
"tools.memory_tool",
"tools.mixture_of_agents_tool",
"tools.process_registry",
"tools.rl_training_tool",
"tools.scavenger_fixer",
"tools.send_message_tool",
"tools.session_search_tool",
"tools.skill_manager_tool",
"tools.skills_tool",
"tools.sovereign_router",
"tools.sovereign_scavenger",
"tools.sovereign_teleport",
"tools.static_analyzer",
"tools.symbolic_verify",
"tools.terminal_tool",
"tools.todo_tool",
"tools.tts_tool",
"tools.ultraplan",
"tools.verify_tool",
"tools.vision_tools",
"tools.web_tools",
}

View File

@@ -0,0 +1,236 @@
"""Tests for the KittenTTS local provider in tools/tts_tool.py."""
import json
from unittest.mock import MagicMock, patch
import numpy as np
import pytest
@pytest.fixture(autouse=True)
def clean_env(monkeypatch):
for key in ("HERMES_SESSION_PLATFORM",):
monkeypatch.delenv(key, raising=False)
@pytest.fixture(autouse=True)
def clear_kittentts_cache():
"""Reset the module-level model cache between tests."""
from tools import tts_tool as _tt
_tt._kittentts_model_cache.clear()
yield
_tt._kittentts_model_cache.clear()
@pytest.fixture
def mock_kittentts_module():
"""Inject a fake kittentts + soundfile module that return stub objects."""
fake_model = MagicMock()
# 24kHz float32 PCM at ~2s of silence
fake_model.generate.return_value = np.zeros(48000, dtype=np.float32)
fake_cls = MagicMock(return_value=fake_model)
fake_kittentts = MagicMock()
fake_kittentts.KittenTTS = fake_cls
# Stub soundfile — the real package isn't installed in CI venv, and
# _generate_kittentts does `import soundfile as sf` at runtime.
fake_sf = MagicMock()
def _fake_write(path, audio, samplerate):
# Emulate writing a real file so downstream path checks succeed.
import pathlib
pathlib.Path(path).write_bytes(b"RIFF\x00\x00\x00\x00WAVEfmt fake")
fake_sf.write = _fake_write
with patch.dict(
"sys.modules",
{"kittentts": fake_kittentts, "soundfile": fake_sf},
):
yield fake_model, fake_cls
class TestGenerateKittenTts:
def test_successful_wav_generation(self, tmp_path, mock_kittentts_module):
from tools.tts_tool import _generate_kittentts
fake_model, fake_cls = mock_kittentts_module
output_path = str(tmp_path / "test.wav")
result = _generate_kittentts("Hello world", output_path, {})
assert result == output_path
assert (tmp_path / "test.wav").exists()
fake_cls.assert_called_once()
fake_model.generate.assert_called_once()
def test_config_passes_voice_speed_cleantext(self, tmp_path, mock_kittentts_module):
from tools.tts_tool import _generate_kittentts
fake_model, _ = mock_kittentts_module
config = {
"kittentts": {
"model": "KittenML/kitten-tts-mini-0.8",
"voice": "Luna",
"speed": 1.25,
"clean_text": False,
}
}
_generate_kittentts("Hi there", str(tmp_path / "out.wav"), config)
call_kwargs = fake_model.generate.call_args.kwargs
assert call_kwargs["voice"] == "Luna"
assert call_kwargs["speed"] == 1.25
assert call_kwargs["clean_text"] is False
def test_default_model_and_voice(self, tmp_path, mock_kittentts_module):
from tools.tts_tool import (
DEFAULT_KITTENTTS_MODEL,
DEFAULT_KITTENTTS_VOICE,
_generate_kittentts,
)
fake_model, fake_cls = mock_kittentts_module
_generate_kittentts("Hi", str(tmp_path / "out.wav"), {})
fake_cls.assert_called_once_with(DEFAULT_KITTENTTS_MODEL)
assert fake_model.generate.call_args.kwargs["voice"] == DEFAULT_KITTENTTS_VOICE
def test_model_is_cached_across_calls(self, tmp_path, mock_kittentts_module):
from tools.tts_tool import _generate_kittentts
_, fake_cls = mock_kittentts_module
_generate_kittentts("One", str(tmp_path / "a.wav"), {})
_generate_kittentts("Two", str(tmp_path / "b.wav"), {})
# Same model name → class instantiated exactly once
assert fake_cls.call_count == 1
def test_different_models_are_cached_separately(self, tmp_path, mock_kittentts_module):
from tools.tts_tool import _generate_kittentts
_, fake_cls = mock_kittentts_module
_generate_kittentts(
"A",
str(tmp_path / "a.wav"),
{"kittentts": {"model": "KittenML/kitten-tts-nano-0.8-int8"}},
)
_generate_kittentts(
"B",
str(tmp_path / "b.wav"),
{"kittentts": {"model": "KittenML/kitten-tts-mini-0.8"}},
)
assert fake_cls.call_count == 2
def test_non_wav_extension_triggers_ffmpeg_conversion(
self, tmp_path, mock_kittentts_module, monkeypatch
):
"""Non-.wav output path causes WAV → target ffmpeg conversion."""
from tools import tts_tool as _tt
calls = []
def fake_shutil_which(cmd):
return "/usr/bin/ffmpeg" if cmd == "ffmpeg" else None
def fake_run(cmd, check=False, timeout=None, **kw):
calls.append(cmd)
# Emulate ffmpeg writing the output file
import pathlib
out_path = cmd[-1]
pathlib.Path(out_path).write_bytes(b"fake-mp3-data")
return MagicMock(returncode=0)
monkeypatch.setattr(_tt.shutil, "which", fake_shutil_which)
monkeypatch.setattr(_tt.subprocess, "run", fake_run)
output_path = str(tmp_path / "test.mp3")
result = _tt._generate_kittentts("Hi", output_path, {})
assert result == output_path
assert len(calls) == 1
assert calls[0][0] == "/usr/bin/ffmpeg"
def test_missing_kittentts_raises_import_error(self, tmp_path, monkeypatch):
"""When kittentts package is not installed, _import_kittentts raises."""
import sys
monkeypatch.setitem(sys.modules, "kittentts", None)
from tools.tts_tool import _generate_kittentts
with pytest.raises((ImportError, TypeError)):
_generate_kittentts("Hi", str(tmp_path / "out.wav"), {})
class TestCheckKittenttsAvailable:
def test_reports_available_when_package_present(self, monkeypatch):
import importlib.util
from tools.tts_tool import _check_kittentts_available
fake_spec = MagicMock()
monkeypatch.setattr(
importlib.util,
"find_spec",
lambda name: fake_spec if name == "kittentts" else None,
)
assert _check_kittentts_available() is True
def test_reports_unavailable_when_package_missing(self, monkeypatch):
import importlib.util
from tools.tts_tool import _check_kittentts_available
monkeypatch.setattr(importlib.util, "find_spec", lambda name: None)
assert _check_kittentts_available() is False
class TestDispatcherBranch:
def test_kittentts_not_installed_returns_helpful_error(self, monkeypatch, tmp_path):
"""When provider=kittentts but package missing, return JSON error with setup hint."""
import sys
monkeypatch.setitem(sys.modules, "kittentts", None)
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
from tools.tts_tool import text_to_speech_tool
# Write a config telling it to use kittentts
import yaml
(tmp_path / "config.yaml").write_text(
yaml.safe_dump({"tts": {"provider": "kittentts"}})
)
result = json.loads(text_to_speech_tool(text="Hello"))
assert result["success"] is False
assert "kittentts" in result["error"].lower()
assert "hermes setup tts" in result["error"].lower()
def test_non_telegram_explicit_wav_path_is_preserved(
self, monkeypatch, tmp_path, mock_kittentts_module
):
"""Explicit WAV outputs should stay WAV outside Telegram sessions."""
import yaml
from tools import tts_tool as _tt
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
(tmp_path / "config.yaml").write_text(
yaml.safe_dump({"tts": {"provider": "kittentts"}})
)
def fail_convert(_path):
raise AssertionError("_convert_to_opus should not run outside Telegram")
monkeypatch.setattr(_tt, "_convert_to_opus", fail_convert)
result = json.loads(
_tt.text_to_speech_tool(
text="Hello from KittenTTS",
output_path=str(tmp_path / "out.wav"),
)
)
assert result["success"] is True
assert result["file_path"] == str(tmp_path / "out.wav")
assert (tmp_path / "out.wav").exists()

View File

@@ -1,81 +0,0 @@
import json
from pathlib import Path
from toolsets import resolve_toolset
from tools.registry import registry
def test_create_action_saves_markdown_and_json(tmp_path):
from tools.ultraplan import ultraplan_tool
result = json.loads(
ultraplan_tool(
action="create",
mission="Daily autonomous planning",
streams=[
{
"id": "A",
"name": "Backlog burn",
"phases": [
{"id": "A1", "name": "Triage", "artifact": "issue list"},
{"id": "A2", "name": "Ship", "dependencies": ["A1"], "artifact": "PR"},
],
}
],
base_dir=str(tmp_path),
)
)
assert result["success"] is True
assert Path(result["file_path"]).exists()
assert Path(result["json_path"]).exists()
assert "Work Streams" in Path(result["file_path"]).read_text(encoding="utf-8")
def test_load_action_returns_saved_plan(tmp_path):
from tools.ultraplan import ultraplan_tool
created = json.loads(
ultraplan_tool(
action="create",
date="20260422",
mission="Mission from saved plan",
base_dir=str(tmp_path),
)
)
loaded = json.loads(
ultraplan_tool(
action="load",
date="20260422",
base_dir=str(tmp_path),
)
)
assert created["success"] is True
assert loaded["success"] is True
assert loaded["plan"]["mission"] == "Mission from saved plan"
assert loaded["file_path"].endswith("ultraplan_20260422.md")
def test_cron_spec_returns_daily_schedule_and_prompt():
from tools.ultraplan import ultraplan_tool
result = json.loads(ultraplan_tool(action="cron_spec"))
assert result["success"] is True
assert result["schedule"] == "0 6 * * *"
assert "Ultraplan" in result["prompt"]
assert "ultraplan_YYYYMMDD.md" in result["prompt"]
def test_registry_registers_ultraplan_tool():
import tools.ultraplan # noqa: F401
entry = registry.get_entry("ultraplan")
assert entry is not None
assert entry.toolset == "todo"
def test_default_toolsets_include_ultraplan():
assert "ultraplan" in resolve_toolset("todo")
assert "ultraplan" in resolve_toolset("hermes-cli")

View File

@@ -2,13 +2,14 @@
"""
Text-to-Speech Tool Module
Supports six TTS providers:
Supports seven TTS providers:
- Edge TTS (default, free, no API key): Microsoft Edge neural voices
- ElevenLabs (premium): High-quality voices, needs ELEVENLABS_API_KEY
- OpenAI TTS: Good quality, needs OPENAI_API_KEY
- MiniMax TTS: High-quality with voice cloning, needs MINIMAX_API_KEY
- Mistral (Voxtral TTS): Multilingual, native Opus, needs MISTRAL_API_KEY
- NeuTTS (local, free, no API key): On-device TTS via neutts_cli, needs neutts installed
- KittenTTS (local, free, no API key): Lightweight on-device ONNX TTS via kittentts
Output formats:
- Opus (.ogg) for Telegram voice bubbles (requires ffmpeg for Edge TTS)
@@ -77,6 +78,12 @@ def _import_sounddevice():
return sd
def _import_kittentts():
"""Lazy import KittenTTS. Returns the class or raises ImportError."""
from kittentts import KittenTTS
return KittenTTS
# ===========================================================================
# Defaults
# ===========================================================================
@@ -86,6 +93,8 @@ DEFAULT_ELEVENLABS_VOICE_ID = "pNInz6obpgDQGcFmaJgB" # Adam
DEFAULT_ELEVENLABS_MODEL_ID = "eleven_multilingual_v2"
DEFAULT_ELEVENLABS_STREAMING_MODEL_ID = "eleven_flash_v2_5"
DEFAULT_OPENAI_MODEL = "gpt-4o-mini-tts"
DEFAULT_KITTENTTS_MODEL = "KittenML/kitten-tts-nano-0.8-int8" # 25MB
DEFAULT_KITTENTTS_VOICE = "Jasper"
DEFAULT_OPENAI_VOICE = "alloy"
DEFAULT_OPENAI_BASE_URL = "https://api.openai.com/v1"
DEFAULT_MINIMAX_MODEL = "speech-2.8-hd"
@@ -448,6 +457,15 @@ def _check_neutts_available() -> bool:
return False
def _check_kittentts_available() -> bool:
"""Check if the kittentts engine is importable (installed locally)."""
try:
import importlib.util
return importlib.util.find_spec("kittentts") is not None
except Exception:
return False
def _default_neutts_ref_audio() -> str:
"""Return path to the bundled default voice reference audio."""
return str(Path(__file__).parent / "neutts_samples" / "jo.wav")
@@ -511,6 +529,51 @@ def _generate_neutts(text: str, output_path: str, tts_config: Dict[str, Any]) ->
return output_path
# ===========================================================================
# Provider: KittenTTS (local, lightweight)
# ===========================================================================
# Module-level cache for KittenTTS model instances
_kittentts_model_cache: Dict[str, Any] = {}
def _generate_kittentts(text: str, output_path: str, tts_config: Dict[str, Any]) -> str:
"""Generate speech using the local KittenTTS ONNX model."""
KittenTTS = _import_kittentts()
kt_config = tts_config.get("kittentts", {})
model_name = kt_config.get("model", DEFAULT_KITTENTTS_MODEL)
voice = kt_config.get("voice", DEFAULT_KITTENTTS_VOICE)
speed = kt_config.get("speed", 1.0)
clean_text = kt_config.get("clean_text", True)
global _kittentts_model_cache
if model_name not in _kittentts_model_cache:
logger.info("[KittenTTS] Loading model: %s", model_name)
_kittentts_model_cache[model_name] = KittenTTS(model_name)
model = _kittentts_model_cache[model_name]
audio = model.generate(text, voice=voice, speed=speed, clean_text=clean_text)
import soundfile as sf
wav_path = output_path
if not output_path.endswith(".wav"):
wav_path = output_path.rsplit(".", 1)[0] + ".wav"
sf.write(wav_path, audio, 24000)
if wav_path != output_path:
ffmpeg = shutil.which("ffmpeg")
if ffmpeg:
conv_cmd = [ffmpeg, "-i", wav_path, "-y", "-loglevel", "error", output_path]
subprocess.run(conv_cmd, check=True, timeout=30)
os.remove(wav_path)
else:
os.rename(wav_path, output_path)
return output_path
# ===========================================================================
# Main tool function
# ===========================================================================
@@ -622,6 +685,19 @@ def text_to_speech_tool(
logger.info("Generating speech with NeuTTS (local)...")
_generate_neutts(text, file_str, tts_config)
elif provider == "kittentts":
try:
_import_kittentts()
except ImportError:
return json.dumps({
"success": False,
"error": "KittenTTS provider selected but 'kittentts' package not installed. "
"Run 'hermes setup tts' and choose KittenTTS, or install manually: "
"pip install https://github.com/KittenML/KittenTTS/releases/download/0.8.1/kittentts-0.8.1-py3-none-any.whl"
}, ensure_ascii=False)
logger.info("Generating speech with KittenTTS (local, lightweight)...")
_generate_kittentts(text, file_str, tts_config)
else:
# Default: Edge TTS (free), with NeuTTS as local fallback
edge_available = True
@@ -658,10 +734,10 @@ def text_to_speech_tool(
"error": f"TTS generation produced no output (provider: {provider})"
}, ensure_ascii=False)
# Try Opus conversion for Telegram compatibility
# Edge TTS outputs MP3, NeuTTS outputs WAV — both need ffmpeg conversion
# Try Opus conversion for Telegram compatibility only.
# Outside Telegram, preserve the caller's explicit output format.
voice_compatible = False
if provider in ("edge", "neutts", "minimax") and not file_str.endswith(".ogg"):
if want_opus and provider in ("edge", "neutts", "minimax", "kittentts") and not file_str.endswith(".ogg"):
opus_path = _convert_to_opus(file_str)
if opus_path:
file_str = opus_path
@@ -742,6 +818,8 @@ def check_tts_requirements() -> bool:
pass
if _check_neutts_available():
return True
if _check_kittentts_available():
return True
return False

View File

@@ -290,9 +290,6 @@ def load_ultraplan(date: str, base_dir: Path = None) -> Optional[Ultraplan]:
return None
DEFAULT_ULTRAPLAN_SCHEDULE = "0 6 * * *"
def generate_daily_cron_prompt() -> str:
"""Generate the prompt for the daily ultraplan cron job."""
return """Generate today's Ultraplan.
@@ -301,9 +298,9 @@ Steps:
1. Check open Gitea issues assigned to you
2. Check open PRs needing review
3. Check fleet health status
4. Decompose work into parallel streams with concrete phases and artifacts
5. Use the ultraplan tool to save ~/.timmy/cron/ultraplan_YYYYMMDD.md and the matching JSON sidecar
6. Optionally file a Gitea issue with the plan summary
4. Decompose work into parallel streams
5. Generate ultraplan_YYYYMMDD.md
6. File Gitea issue with the plan
Output format:
- Mission statement
@@ -311,176 +308,3 @@ Output format:
- Dependency map
- Success metrics
"""
def generate_daily_cron_job_spec(schedule: str = DEFAULT_ULTRAPLAN_SCHEDULE) -> Dict[str, str]:
"""Return a reusable cron job spec for daily Ultraplan generation."""
return {
"name": "Daily Ultraplan",
"schedule": schedule,
"prompt": generate_daily_cron_prompt(),
"path_pattern": "~/.timmy/cron/ultraplan_YYYYMMDD.md",
}
def _resolve_base_dir(base_dir: Optional[str | Path]) -> Path:
"""Normalize the requested Ultraplan base directory."""
if base_dir is None:
return Path.home() / ".timmy" / "cron"
return Path(base_dir).expanduser()
def ultraplan_tool(
action: str,
date: Optional[str] = None,
mission: str = "",
streams: Optional[List[Dict[str, Any]]] = None,
metrics: Optional[Dict[str, Any]] = None,
notes: str = "",
base_dir: Optional[str] = None,
) -> str:
"""Create/load Ultraplan artifacts and expose a daily cron spec."""
from tools.registry import tool_error, tool_result
action = (action or "").strip().lower()
resolved_base_dir = _resolve_base_dir(base_dir)
try:
if action == "create":
plan = create_ultraplan(date=date, mission=mission, streams=streams or [])
if metrics:
plan.metrics = metrics
if notes:
plan.notes = notes
md_path = save_ultraplan(plan, base_dir=resolved_base_dir)
json_path = resolved_base_dir / f"ultraplan_{plan.date}.json"
return tool_result(
success=True,
action="create",
date=plan.date,
file_path=str(md_path),
json_path=str(json_path),
plan=plan.to_dict(),
)
if action == "load":
plan_date = date or datetime.now().strftime("%Y%m%d")
plan = load_ultraplan(plan_date, base_dir=resolved_base_dir)
if plan is None:
return tool_error(
f"No Ultraplan found for {plan_date}",
success=False,
action="load",
date=plan_date,
)
return tool_result(
success=True,
action="load",
date=plan.date,
file_path=str(resolved_base_dir / f"ultraplan_{plan.date}.md"),
json_path=str(resolved_base_dir / f"ultraplan_{plan.date}.json"),
plan=plan.to_dict(),
markdown=plan.to_markdown(),
)
if action == "cron_spec":
spec = generate_daily_cron_job_spec()
return tool_result(success=True, action="cron_spec", **spec)
return tool_error(
f"Unknown Ultraplan action: {action}",
success=False,
action=action,
)
except Exception as e:
return tool_error(f"Ultraplan {action or 'tool'} failed: {e}", success=False, action=action)
ULTRAPLAN_SCHEMA = {
"name": "ultraplan",
"description": (
"Create or load daily Ultraplan planning artifacts under ~/.timmy/cron/ and "
"return a reusable cron spec for autonomous planning. Use this when you want "
"a concrete markdown/json plan file with streams, phases, dependencies, and metrics."
),
"parameters": {
"type": "object",
"properties": {
"action": {
"type": "string",
"enum": ["create", "load", "cron_spec"],
"description": "Operation to perform",
},
"date": {
"type": "string",
"description": "Plan date as YYYYMMDD. Defaults to today for create/load.",
},
"mission": {
"type": "string",
"description": "High-level mission statement for today's plan.",
},
"streams": {
"type": "array",
"description": "Optional work streams with phases/artifacts/dependencies for create.",
"items": {
"type": "object",
"properties": {
"id": {"type": "string"},
"name": {"type": "string"},
"phases": {
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {"type": "string"},
"name": {"type": "string"},
"description": {"type": "string"},
"artifact": {"type": "string"},
"dependencies": {
"type": "array",
"items": {"type": "string"},
},
},
"required": ["name"],
},
},
},
"required": ["name"],
},
},
"metrics": {
"type": "object",
"description": "Optional success metrics to store on the plan.",
"additionalProperties": True,
},
"notes": {
"type": "string",
"description": "Optional free-form notes appended to the saved plan.",
},
"base_dir": {
"type": "string",
"description": "Optional override for the Ultraplan storage directory.",
},
},
"required": ["action"],
},
}
from tools.registry import registry
registry.register(
name="ultraplan",
toolset="todo",
schema=ULTRAPLAN_SCHEMA,
handler=lambda args, **_kw: ultraplan_tool(
action=args.get("action", ""),
date=args.get("date"),
mission=args.get("mission", ""),
streams=args.get("streams"),
metrics=args.get("metrics"),
notes=args.get("notes", ""),
base_dir=args.get("base_dir"),
),
emoji="🗺️",
)

View File

@@ -47,7 +47,7 @@ _HERMES_CORE_TOOLS = [
# Text-to-speech
"text_to_speech",
# Planning & memory
"todo", "ultraplan", "memory",
"todo", "memory",
# Session history search
"session_search",
# Clarifying questions
@@ -157,8 +157,8 @@ TOOLSETS = {
},
"todo": {
"description": "Task planning and tracking for multi-step work, including daily Ultraplan artifacts",
"tools": ["todo", "ultraplan"],
"description": "Task planning and tracking for multi-step work",
"tools": ["todo"],
"includes": []
},

View File

@@ -10,7 +10,7 @@ Hermes Agent supports both text-to-speech output and voice message transcription
## Text-to-Speech
Convert text to speech with six providers:
Convert text to speech with seven providers:
| Provider | Quality | Cost | API Key |
|----------|---------|------|---------|
@@ -20,6 +20,7 @@ Convert text to speech with six providers:
| **MiniMax TTS** | Excellent | Paid | `MINIMAX_API_KEY` |
| **Mistral (Voxtral TTS)** | Excellent | Paid | `MISTRAL_API_KEY` |
| **NeuTTS** | Good | Free | None needed |
| **KittenTTS** | Good | Free (local) | None needed |
### Platform Delivery
@@ -35,7 +36,7 @@ Convert text to speech with six providers:
```yaml
# In ~/.hermes/config.yaml
tts:
provider: "edge" # "edge" | "elevenlabs" | "openai" | "minimax" | "mistral" | "neutts"
provider: "edge" # "edge" | "elevenlabs" | "openai" | "minimax" | "mistral" | "neutts" | "kittentts"
speed: 1.0 # Global speed multiplier (provider-specific settings override this)
edge:
voice: "en-US-AriaNeural" # 322 voices, 74 languages
@@ -62,6 +63,11 @@ tts:
ref_text: ''
model: neuphonic/neutts-air-q4-gguf
device: cpu
kittentts:
model: KittenML/kitten-tts-nano-0.8-int8 # 25MB int8 default; also micro and mini variants
voice: Jasper # Jasper, Bella, Luna, Bruno, Rosie, Hugo, Kiki, Leo
speed: 1.0
clean_text: true
```
**Speed control**: The global `tts.speed` value applies to all providers by default. Each provider can override it with its own `speed` setting (e.g., `tts.openai.speed: 1.5`). Provider-specific speed takes precedence over the global value. Default is `1.0` (normal speed).
@@ -74,6 +80,7 @@ Telegram voice bubbles require Opus/OGG audio format:
- **Edge TTS** (default) outputs MP3 and needs **ffmpeg** to convert:
- **MiniMax TTS** outputs MP3 and needs **ffmpeg** to convert for Telegram voice bubbles
- **NeuTTS** outputs WAV and also needs **ffmpeg** to convert for Telegram voice bubbles
- **KittenTTS** outputs WAV and also needs **ffmpeg** to convert for Telegram voice bubbles
```bash
# Ubuntu/Debian
@@ -86,7 +93,7 @@ brew install ffmpeg
sudo dnf install ffmpeg
```
Without ffmpeg, Edge TTS, MiniMax TTS, and NeuTTS audio are sent as regular audio files (playable, but shown as a rectangular player instead of a voice bubble).
Without ffmpeg, Edge TTS, MiniMax TTS, NeuTTS, and KittenTTS audio are sent as regular audio files (playable, but shown as a rectangular player instead of a voice bubble).
:::tip
If you want voice bubbles without installing ffmpeg, switch to the OpenAI, ElevenLabs, or Mistral provider.