Compare commits
3 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fc1db11f9b | ||
|
|
4b075f5055 | ||
|
|
7eace4ead9 |
@@ -1396,8 +1396,6 @@ def normalize_anthropic_response(
|
||||
"tool_use": "tool_calls",
|
||||
"max_tokens": "length",
|
||||
"stop_sequence": "stop",
|
||||
"refusal": "content_filter",
|
||||
"model_context_window_exceeded": "length",
|
||||
}
|
||||
finish_reason = stop_reason_map.get(response.stop_reason, "stop")
|
||||
|
||||
@@ -1411,42 +1409,3 @@ def normalize_anthropic_response(
|
||||
),
|
||||
finish_reason,
|
||||
)
|
||||
|
||||
|
||||
def normalize_anthropic_response_v2(
|
||||
response,
|
||||
strip_tool_prefix: bool = False,
|
||||
) -> "NormalizedResponse":
|
||||
"""Normalize Anthropic response to NormalizedResponse.
|
||||
|
||||
Wraps the existing normalize_anthropic_response() and maps its output
|
||||
to the shared transport types. This allows incremental migration
|
||||
without disturbing the legacy call sites.
|
||||
"""
|
||||
from agent.transports.types import NormalizedResponse, build_tool_call
|
||||
|
||||
assistant_msg, finish_reason = normalize_anthropic_response(response, strip_tool_prefix)
|
||||
|
||||
tool_calls = None
|
||||
if assistant_msg.tool_calls:
|
||||
tool_calls = [
|
||||
build_tool_call(
|
||||
id=tc.id,
|
||||
name=tc.function.name,
|
||||
arguments=tc.function.arguments,
|
||||
)
|
||||
for tc in assistant_msg.tool_calls
|
||||
]
|
||||
|
||||
provider_data = {}
|
||||
if getattr(assistant_msg, "reasoning_details", None):
|
||||
provider_data["reasoning_details"] = assistant_msg.reasoning_details
|
||||
|
||||
return NormalizedResponse(
|
||||
content=assistant_msg.content,
|
||||
tool_calls=tool_calls,
|
||||
finish_reason=finish_reason,
|
||||
reasoning=getattr(assistant_msg, "reasoning", None),
|
||||
usage=None,
|
||||
provider_data=provider_data or None,
|
||||
)
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
"""Transport layer types and registry for provider response normalization.
|
||||
|
||||
Usage:
|
||||
from agent.transports import get_transport
|
||||
transport = get_transport("anthropic_messages")
|
||||
result = transport.normalize_response(raw_response)
|
||||
"""
|
||||
|
||||
from agent.transports.types import ( # noqa: F401
|
||||
NormalizedResponse,
|
||||
ToolCall,
|
||||
Usage,
|
||||
build_tool_call,
|
||||
map_finish_reason,
|
||||
)
|
||||
|
||||
_REGISTRY: dict = {}
|
||||
|
||||
|
||||
def register_transport(api_mode: str, transport_cls: type) -> None:
|
||||
"""Register a transport class for an api_mode string."""
|
||||
_REGISTRY[api_mode] = transport_cls
|
||||
|
||||
|
||||
def get_transport(api_mode: str):
|
||||
"""Get a transport instance for the given api_mode.
|
||||
|
||||
Returns None if no transport is registered for this api_mode.
|
||||
This allows gradual migration — call sites can check for None
|
||||
and fall back to the legacy code path.
|
||||
"""
|
||||
if not _REGISTRY:
|
||||
_discover_transports()
|
||||
cls = _REGISTRY.get(api_mode)
|
||||
if cls is None:
|
||||
return None
|
||||
return cls()
|
||||
|
||||
|
||||
def _discover_transports() -> None:
|
||||
"""Import all transport modules to trigger auto-registration."""
|
||||
try:
|
||||
import agent.transports.anthropic # noqa: F401
|
||||
except ImportError:
|
||||
pass
|
||||
try:
|
||||
import agent.transports.codex # noqa: F401
|
||||
except ImportError:
|
||||
pass
|
||||
try:
|
||||
import agent.transports.chat_completions # noqa: F401
|
||||
except ImportError:
|
||||
pass
|
||||
try:
|
||||
import agent.transports.bedrock # noqa: F401
|
||||
except ImportError:
|
||||
pass
|
||||
@@ -1,95 +0,0 @@
|
||||
"""Anthropic Messages API transport.
|
||||
|
||||
Delegates to the existing adapter functions in agent/anthropic_adapter.py.
|
||||
This transport owns format conversion and normalization — NOT client lifecycle.
|
||||
"""
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from agent.transports.base import ProviderTransport
|
||||
from agent.transports.types import NormalizedResponse
|
||||
|
||||
|
||||
class AnthropicTransport(ProviderTransport):
|
||||
"""Transport for api_mode='anthropic_messages'."""
|
||||
|
||||
@property
|
||||
def api_mode(self) -> str:
|
||||
return "anthropic_messages"
|
||||
|
||||
def convert_messages(self, messages: List[Dict[str, Any]], **kwargs) -> Any:
|
||||
from agent.anthropic_adapter import convert_messages_to_anthropic
|
||||
|
||||
base_url = kwargs.get("base_url")
|
||||
return convert_messages_to_anthropic(messages, base_url=base_url)
|
||||
|
||||
def convert_tools(self, tools: List[Dict[str, Any]]) -> Any:
|
||||
from agent.anthropic_adapter import convert_tools_to_anthropic
|
||||
|
||||
return convert_tools_to_anthropic(tools)
|
||||
|
||||
def build_kwargs(
|
||||
self,
|
||||
model: str,
|
||||
messages: List[Dict[str, Any]],
|
||||
tools: Optional[List[Dict[str, Any]]] = None,
|
||||
**params,
|
||||
) -> Dict[str, Any]:
|
||||
from agent.anthropic_adapter import build_anthropic_kwargs
|
||||
|
||||
return build_anthropic_kwargs(
|
||||
model=model,
|
||||
messages=messages,
|
||||
tools=tools,
|
||||
max_tokens=params.get("max_tokens", 16384),
|
||||
reasoning_config=params.get("reasoning_config"),
|
||||
tool_choice=params.get("tool_choice"),
|
||||
is_oauth=params.get("is_oauth", False),
|
||||
preserve_dots=params.get("preserve_dots", False),
|
||||
context_length=params.get("context_length"),
|
||||
base_url=params.get("base_url"),
|
||||
fast_mode=params.get("fast_mode", False),
|
||||
)
|
||||
|
||||
def normalize_response(self, response: Any, **kwargs) -> NormalizedResponse:
|
||||
from agent.anthropic_adapter import normalize_anthropic_response_v2
|
||||
|
||||
strip_tool_prefix = kwargs.get("strip_tool_prefix", False)
|
||||
return normalize_anthropic_response_v2(response, strip_tool_prefix=strip_tool_prefix)
|
||||
|
||||
def validate_response(self, response: Any) -> bool:
|
||||
if response is None:
|
||||
return False
|
||||
content_blocks = getattr(response, "content", None)
|
||||
if not isinstance(content_blocks, list):
|
||||
return False
|
||||
if not content_blocks:
|
||||
return False
|
||||
return True
|
||||
|
||||
def extract_cache_stats(self, response: Any):
|
||||
usage = getattr(response, "usage", None)
|
||||
if usage is None:
|
||||
return None
|
||||
cached = getattr(usage, "cache_read_input_tokens", 0) or 0
|
||||
written = getattr(usage, "cache_creation_input_tokens", 0) or 0
|
||||
if cached or written:
|
||||
return {"cached_tokens": cached, "creation_tokens": written}
|
||||
return None
|
||||
|
||||
_STOP_REASON_MAP = {
|
||||
"end_turn": "stop",
|
||||
"tool_use": "tool_calls",
|
||||
"max_tokens": "length",
|
||||
"stop_sequence": "stop",
|
||||
"refusal": "content_filter",
|
||||
"model_context_window_exceeded": "length",
|
||||
}
|
||||
|
||||
def map_finish_reason(self, raw_reason: str) -> str:
|
||||
return self._STOP_REASON_MAP.get(raw_reason, "stop")
|
||||
|
||||
|
||||
from agent.transports import register_transport # noqa: E402
|
||||
|
||||
register_transport("anthropic_messages", AnthropicTransport)
|
||||
@@ -1,61 +0,0 @@
|
||||
"""Abstract base for provider transports.
|
||||
|
||||
A transport owns the data path for one api_mode:
|
||||
convert_messages → convert_tools → build_kwargs → normalize_response
|
||||
|
||||
It does NOT own: client construction, streaming, credential refresh,
|
||||
prompt caching, interrupt handling, or retry logic. Those stay on AIAgent.
|
||||
"""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from agent.transports.types import NormalizedResponse
|
||||
|
||||
|
||||
class ProviderTransport(ABC):
|
||||
"""Base class for provider-specific format conversion and normalization."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def api_mode(self) -> str:
|
||||
"""The api_mode string this transport handles."""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def convert_messages(self, messages: List[Dict[str, Any]], **kwargs) -> Any:
|
||||
"""Convert OpenAI-format messages to provider-native format."""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def convert_tools(self, tools: List[Dict[str, Any]]) -> Any:
|
||||
"""Convert OpenAI-format tool definitions to provider-native format."""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def build_kwargs(
|
||||
self,
|
||||
model: str,
|
||||
messages: List[Dict[str, Any]],
|
||||
tools: Optional[List[Dict[str, Any]]] = None,
|
||||
**params,
|
||||
) -> Dict[str, Any]:
|
||||
"""Build the complete provider kwargs dict."""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def normalize_response(self, response: Any, **kwargs) -> NormalizedResponse:
|
||||
"""Normalize a raw provider response to the shared NormalizedResponse type."""
|
||||
...
|
||||
|
||||
def validate_response(self, response: Any) -> bool:
|
||||
"""Optional structural validation for raw responses."""
|
||||
return True
|
||||
|
||||
def extract_cache_stats(self, response: Any) -> Optional[Dict[str, int]]:
|
||||
"""Optional cache stats extraction."""
|
||||
return None
|
||||
|
||||
def map_finish_reason(self, raw_reason: str) -> str:
|
||||
"""Optional stop-reason mapping. Defaults to passthrough."""
|
||||
return raw_reason
|
||||
@@ -1,58 +0,0 @@
|
||||
"""Shared types for normalized provider responses."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
|
||||
@dataclass
|
||||
class ToolCall:
|
||||
"""A normalized tool call from any provider."""
|
||||
|
||||
id: Optional[str]
|
||||
name: str
|
||||
arguments: str
|
||||
provider_data: Optional[Dict[str, Any]] = field(default=None, repr=False)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Usage:
|
||||
"""Token usage from an API response."""
|
||||
|
||||
prompt_tokens: int = 0
|
||||
completion_tokens: int = 0
|
||||
total_tokens: int = 0
|
||||
cached_tokens: int = 0
|
||||
|
||||
|
||||
@dataclass
|
||||
class NormalizedResponse:
|
||||
"""Normalized API response from any provider."""
|
||||
|
||||
content: Optional[str]
|
||||
tool_calls: Optional[List[ToolCall]]
|
||||
finish_reason: str
|
||||
reasoning: Optional[str] = None
|
||||
usage: Optional[Usage] = None
|
||||
provider_data: Optional[Dict[str, Any]] = field(default=None, repr=False)
|
||||
|
||||
|
||||
def build_tool_call(
|
||||
id: Optional[str],
|
||||
name: str,
|
||||
arguments: Any,
|
||||
**provider_fields: Any,
|
||||
) -> ToolCall:
|
||||
"""Build a ToolCall, auto-serialising dict arguments."""
|
||||
args_str = json.dumps(arguments) if isinstance(arguments, dict) else str(arguments)
|
||||
provider_data = dict(provider_fields) if provider_fields else None
|
||||
return ToolCall(id=id, name=name, arguments=args_str, provider_data=provider_data)
|
||||
|
||||
|
||||
def map_finish_reason(reason: Optional[str], mapping: Dict[str, str]) -> str:
|
||||
"""Translate a provider-specific stop reason to the normalized set."""
|
||||
if reason is None:
|
||||
return "stop"
|
||||
return mapping.get(reason, "stop")
|
||||
@@ -523,7 +523,7 @@ DEFAULT_CONFIG = {
|
||||
|
||||
# Text-to-speech configuration
|
||||
"tts": {
|
||||
"provider": "edge", # "edge" (free) | "elevenlabs" (premium) | "openai" | "minimax" | "mistral" | "neutts" (local)
|
||||
"provider": "edge", # "edge" (free) | "elevenlabs" (premium) | "openai" | "minimax" | "mistral" | "neutts" (local) | "kittentts" (local)
|
||||
"edge": {
|
||||
"voice": "en-US-AriaNeural",
|
||||
# Popular: AriaNeural, JennyNeural, AndrewNeural, BrianNeural, SoniaNeural
|
||||
@@ -547,6 +547,12 @@ DEFAULT_CONFIG = {
|
||||
"model": "neuphonic/neutts-air-q4-gguf", # HuggingFace model repo
|
||||
"device": "cpu", # cpu, cuda, or mps
|
||||
},
|
||||
"kittentts": {
|
||||
"model": "KittenML/kitten-tts-nano-0.8-int8", # 25MB int8 default
|
||||
"voice": "Jasper", # Jasper, Bella, Luna, Bruno, Rosie, Hugo, Kiki, Leo
|
||||
"speed": 1.0,
|
||||
"clean_text": True,
|
||||
},
|
||||
},
|
||||
|
||||
"stt": {
|
||||
|
||||
@@ -443,6 +443,16 @@ def _print_setup_summary(config: dict, hermes_home):
|
||||
tool_status.append(("Text-to-Speech (NeuTTS local)", True, None))
|
||||
else:
|
||||
tool_status.append(("Text-to-Speech (NeuTTS — not installed)", False, "run 'hermes setup tts'"))
|
||||
elif tts_provider == "kittentts":
|
||||
try:
|
||||
import importlib.util
|
||||
kittentts_ok = importlib.util.find_spec("kittentts") is not None
|
||||
except Exception:
|
||||
kittentts_ok = False
|
||||
if kittentts_ok:
|
||||
tool_status.append(("Text-to-Speech (KittenTTS local)", True, None))
|
||||
else:
|
||||
tool_status.append(("Text-to-Speech (KittenTTS — not installed)", False, "run 'hermes setup tts'"))
|
||||
else:
|
||||
tool_status.append(("Text-to-Speech (Edge TTS)", True, None))
|
||||
|
||||
@@ -891,6 +901,7 @@ def _install_neutts_deps() -> bool:
|
||||
return False
|
||||
else:
|
||||
print_warning("espeak-ng is required for NeuTTS. Install it manually before using NeuTTS.")
|
||||
return False
|
||||
|
||||
# Install neutts Python package
|
||||
print()
|
||||
@@ -910,8 +921,34 @@ def _install_neutts_deps() -> bool:
|
||||
return False
|
||||
|
||||
|
||||
def _install_kittentts_deps() -> bool:
|
||||
"""Install KittenTTS dependencies with user approval. Returns True on success."""
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
wheel_url = (
|
||||
"https://github.com/KittenML/KittenTTS/releases/download/"
|
||||
"0.8.1/kittentts-0.8.1-py3-none-any.whl"
|
||||
)
|
||||
print()
|
||||
print_info("Installing kittentts Python package (~25-80MB model downloaded on first use)...")
|
||||
print()
|
||||
try:
|
||||
subprocess.run(
|
||||
[sys.executable, "-m", "pip", "install", "-U", wheel_url, "soundfile", "--quiet"],
|
||||
check=True, timeout=300,
|
||||
)
|
||||
print_success("kittentts installed successfully")
|
||||
return True
|
||||
except (subprocess.CalledProcessError, subprocess.TimeoutExpired) as e:
|
||||
print_error(f"Failed to install kittentts: {e}")
|
||||
print_info(f"Try manually: python -m pip install -U '{wheel_url}' soundfile")
|
||||
return False
|
||||
|
||||
|
||||
def _setup_tts_provider(config: dict):
|
||||
"""Interactive TTS provider selection with install flow for NeuTTS."""
|
||||
"""Interactive TTS provider selection with install flow for local providers."""
|
||||
|
||||
tts_config = config.get("tts", {})
|
||||
current_provider = tts_config.get("provider", "edge")
|
||||
subscription_features = get_nous_subscription_features(config)
|
||||
@@ -923,6 +960,7 @@ def _setup_tts_provider(config: dict):
|
||||
"minimax": "MiniMax TTS",
|
||||
"mistral": "Mistral Voxtral TTS",
|
||||
"neutts": "NeuTTS",
|
||||
"kittentts": "KittenTTS",
|
||||
}
|
||||
current_label = provider_labels.get(current_provider, current_provider)
|
||||
|
||||
@@ -944,9 +982,10 @@ def _setup_tts_provider(config: dict):
|
||||
"MiniMax TTS (high quality with voice cloning, needs API key)",
|
||||
"Mistral Voxtral TTS (multilingual, native Opus, needs API key)",
|
||||
"NeuTTS (local on-device, free, ~300MB model download)",
|
||||
"KittenTTS (local on-device, free, lightweight ~25-80MB ONNX)",
|
||||
]
|
||||
)
|
||||
providers.extend(["edge", "elevenlabs", "openai", "minimax", "mistral", "neutts"])
|
||||
providers.extend(["edge", "elevenlabs", "openai", "minimax", "mistral", "neutts", "kittentts"])
|
||||
choices.append(f"Keep current ({current_label})")
|
||||
keep_current_idx = len(choices) - 1
|
||||
idx = prompt_choice("Select TTS provider:", choices, keep_current_idx)
|
||||
@@ -988,6 +1027,28 @@ def _setup_tts_provider(config: dict):
|
||||
print_info("Skipping install. Set tts.provider to 'neutts' after installing manually.")
|
||||
selected = "edge"
|
||||
|
||||
elif selected == "kittentts":
|
||||
try:
|
||||
import importlib.util
|
||||
already_installed = importlib.util.find_spec("kittentts") is not None
|
||||
except Exception:
|
||||
already_installed = False
|
||||
|
||||
if already_installed:
|
||||
print_success("KittenTTS is already installed")
|
||||
else:
|
||||
print()
|
||||
print_info("KittenTTS is lightweight (~25-80MB, CPU-only, no API key required).")
|
||||
print_info("Voices: Jasper, Bella, Luna, Bruno, Rosie, Hugo, Kiki, Leo")
|
||||
print()
|
||||
if prompt_yes_no("Install KittenTTS now?", True):
|
||||
if not _install_kittentts_deps():
|
||||
print_warning("KittenTTS installation incomplete. Falling back to Edge TTS.")
|
||||
selected = "edge"
|
||||
else:
|
||||
print_info("Skipping install. Set tts.provider to 'kittentts' after installing manually.")
|
||||
selected = "edge"
|
||||
|
||||
elif selected == "elevenlabs":
|
||||
existing = get_env_value("ELEVENLABS_API_KEY")
|
||||
if not existing:
|
||||
|
||||
@@ -164,6 +164,14 @@ TOOL_CATEGORIES = {
|
||||
],
|
||||
"tts_provider": "mistral",
|
||||
},
|
||||
{
|
||||
"name": "KittenTTS",
|
||||
"badge": "local · free",
|
||||
"tag": "Lightweight local ONNX TTS (~25MB), no API key",
|
||||
"env_vars": [],
|
||||
"tts_provider": "kittentts",
|
||||
"post_setup": "kittentts",
|
||||
},
|
||||
],
|
||||
},
|
||||
"web": {
|
||||
@@ -403,6 +411,36 @@ def _run_post_setup(post_setup_key: str):
|
||||
_print_warning(" Node.js not found. Install Camofox via Docker:")
|
||||
_print_info(" docker run -p 9377:9377 -e CAMOFOX_PORT=9377 jo-inc/camofox-browser")
|
||||
|
||||
elif post_setup_key == "kittentts":
|
||||
try:
|
||||
__import__("kittentts")
|
||||
_print_success(" kittentts is already installed")
|
||||
return
|
||||
except ImportError:
|
||||
pass
|
||||
import subprocess
|
||||
_print_info(" Installing kittentts (~25-80MB model, CPU-only)...")
|
||||
wheel_url = (
|
||||
"https://github.com/KittenML/KittenTTS/releases/download/"
|
||||
"0.8.1/kittentts-0.8.1-py3-none-any.whl"
|
||||
)
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[sys.executable, "-m", "pip", "install", "-U", wheel_url, "soundfile", "--quiet"],
|
||||
capture_output=True, text=True, timeout=300,
|
||||
)
|
||||
if result.returncode == 0:
|
||||
_print_success(" kittentts installed")
|
||||
_print_info(" Voices: Jasper, Bella, Luna, Bruno, Rosie, Hugo, Kiki, Leo")
|
||||
_print_info(" Models: KittenML/kitten-tts-nano-0.8-int8 (25MB), micro (41MB), mini (80MB)")
|
||||
else:
|
||||
_print_warning(" kittentts install failed:")
|
||||
_print_info(f" {result.stderr.strip()[:300]}")
|
||||
_print_info(f" Run manually: python -m pip install -U '{wheel_url}' soundfile")
|
||||
except subprocess.TimeoutExpired:
|
||||
_print_warning(" kittentts install timed out (>5min)")
|
||||
_print_info(f" Run manually: python -m pip install -U '{wheel_url}' soundfile")
|
||||
|
||||
elif post_setup_key == "rl_training":
|
||||
try:
|
||||
__import__("tinker_atropos")
|
||||
|
||||
@@ -1,213 +0,0 @@
|
||||
"""Regression tests: normalize_anthropic_response_v2 vs v1.
|
||||
|
||||
Constructs mock Anthropic responses and asserts that the v2 function
|
||||
(returning NormalizedResponse) produces identical field values to the
|
||||
original v1 function (returning SimpleNamespace + finish_reason).
|
||||
"""
|
||||
|
||||
from types import SimpleNamespace
|
||||
|
||||
import pytest
|
||||
|
||||
from agent.anthropic_adapter import (
|
||||
normalize_anthropic_response,
|
||||
normalize_anthropic_response_v2,
|
||||
)
|
||||
from agent.transports.types import NormalizedResponse
|
||||
|
||||
|
||||
def _text_block(text: str):
|
||||
return SimpleNamespace(type="text", text=text)
|
||||
|
||||
|
||||
def _thinking_block(thinking: str, signature: str = "sig_abc"):
|
||||
return SimpleNamespace(type="thinking", thinking=thinking, signature=signature)
|
||||
|
||||
|
||||
def _tool_use_block(id: str, name: str, input: dict):
|
||||
return SimpleNamespace(type="tool_use", id=id, name=name, input=input)
|
||||
|
||||
|
||||
def _response(content_blocks, stop_reason="end_turn"):
|
||||
return SimpleNamespace(
|
||||
content=content_blocks,
|
||||
stop_reason=stop_reason,
|
||||
usage=SimpleNamespace(input_tokens=10, output_tokens=5),
|
||||
)
|
||||
|
||||
|
||||
class TestTextOnly:
|
||||
def setup_method(self):
|
||||
self.resp = _response([_text_block("Hello world")])
|
||||
self.v1_msg, self.v1_finish = normalize_anthropic_response(self.resp)
|
||||
self.v2 = normalize_anthropic_response_v2(self.resp)
|
||||
|
||||
def test_type(self):
|
||||
assert isinstance(self.v2, NormalizedResponse)
|
||||
|
||||
def test_content_matches(self):
|
||||
assert self.v2.content == self.v1_msg.content
|
||||
|
||||
def test_finish_reason_matches(self):
|
||||
assert self.v2.finish_reason == self.v1_finish
|
||||
|
||||
def test_no_tool_calls(self):
|
||||
assert self.v2.tool_calls is None
|
||||
assert self.v1_msg.tool_calls is None
|
||||
|
||||
def test_no_reasoning(self):
|
||||
assert self.v2.reasoning is None
|
||||
assert self.v1_msg.reasoning is None
|
||||
|
||||
|
||||
class TestWithToolCalls:
|
||||
def setup_method(self):
|
||||
self.resp = _response(
|
||||
[
|
||||
_text_block("I'll check that"),
|
||||
_tool_use_block("toolu_abc", "terminal", {"command": "ls"}),
|
||||
_tool_use_block("toolu_def", "read_file", {"path": "/tmp"}),
|
||||
],
|
||||
stop_reason="tool_use",
|
||||
)
|
||||
self.v1_msg, self.v1_finish = normalize_anthropic_response(self.resp)
|
||||
self.v2 = normalize_anthropic_response_v2(self.resp)
|
||||
|
||||
def test_finish_reason(self):
|
||||
assert self.v2.finish_reason == "tool_calls"
|
||||
assert self.v1_finish == "tool_calls"
|
||||
|
||||
def test_tool_call_count(self):
|
||||
assert len(self.v2.tool_calls) == 2
|
||||
assert len(self.v1_msg.tool_calls) == 2
|
||||
|
||||
def test_tool_call_ids_match(self):
|
||||
for i in range(2):
|
||||
assert self.v2.tool_calls[i].id == self.v1_msg.tool_calls[i].id
|
||||
|
||||
def test_tool_call_names_match(self):
|
||||
assert self.v2.tool_calls[0].name == "terminal"
|
||||
assert self.v2.tool_calls[1].name == "read_file"
|
||||
for i in range(2):
|
||||
assert self.v2.tool_calls[i].name == self.v1_msg.tool_calls[i].function.name
|
||||
|
||||
def test_tool_call_arguments_match(self):
|
||||
for i in range(2):
|
||||
assert self.v2.tool_calls[i].arguments == self.v1_msg.tool_calls[i].function.arguments
|
||||
|
||||
def test_content_preserved(self):
|
||||
assert self.v2.content == self.v1_msg.content
|
||||
assert "check that" in self.v2.content
|
||||
|
||||
|
||||
class TestWithThinking:
|
||||
def setup_method(self):
|
||||
self.resp = _response([
|
||||
_thinking_block("Let me think about this carefully..."),
|
||||
_text_block("The answer is 42."),
|
||||
])
|
||||
self.v1_msg, self.v1_finish = normalize_anthropic_response(self.resp)
|
||||
self.v2 = normalize_anthropic_response_v2(self.resp)
|
||||
|
||||
def test_reasoning_matches(self):
|
||||
assert self.v2.reasoning == self.v1_msg.reasoning
|
||||
assert "think about this" in self.v2.reasoning
|
||||
|
||||
def test_reasoning_details_in_provider_data(self):
|
||||
v1_details = self.v1_msg.reasoning_details
|
||||
v2_details = self.v2.provider_data.get("reasoning_details") if self.v2.provider_data else None
|
||||
assert v1_details is not None
|
||||
assert v2_details is not None
|
||||
assert len(v2_details) == len(v1_details)
|
||||
|
||||
def test_content_excludes_thinking(self):
|
||||
assert self.v2.content == "The answer is 42."
|
||||
|
||||
|
||||
class TestMixed:
|
||||
def setup_method(self):
|
||||
self.resp = _response(
|
||||
[
|
||||
_thinking_block("Planning my approach..."),
|
||||
_text_block("I'll run the command"),
|
||||
_tool_use_block("toolu_xyz", "terminal", {"command": "pwd"}),
|
||||
],
|
||||
stop_reason="tool_use",
|
||||
)
|
||||
self.v1_msg, self.v1_finish = normalize_anthropic_response(self.resp)
|
||||
self.v2 = normalize_anthropic_response_v2(self.resp)
|
||||
|
||||
def test_all_fields_present(self):
|
||||
assert self.v2.content is not None
|
||||
assert self.v2.tool_calls is not None
|
||||
assert self.v2.reasoning is not None
|
||||
assert self.v2.finish_reason == "tool_calls"
|
||||
|
||||
def test_content_matches(self):
|
||||
assert self.v2.content == self.v1_msg.content
|
||||
|
||||
def test_reasoning_matches(self):
|
||||
assert self.v2.reasoning == self.v1_msg.reasoning
|
||||
|
||||
def test_tool_call_matches(self):
|
||||
assert self.v2.tool_calls[0].id == self.v1_msg.tool_calls[0].id
|
||||
assert self.v2.tool_calls[0].name == self.v1_msg.tool_calls[0].function.name
|
||||
|
||||
|
||||
class TestStopReasons:
|
||||
@pytest.mark.parametrize("stop_reason,expected", [
|
||||
("end_turn", "stop"),
|
||||
("tool_use", "tool_calls"),
|
||||
("max_tokens", "length"),
|
||||
("stop_sequence", "stop"),
|
||||
("refusal", "content_filter"),
|
||||
("model_context_window_exceeded", "length"),
|
||||
("unknown_future_reason", "stop"),
|
||||
])
|
||||
def test_stop_reason_mapping(self, stop_reason, expected):
|
||||
resp = _response([_text_block("x")], stop_reason=stop_reason)
|
||||
_v1_msg, v1_finish = normalize_anthropic_response(resp)
|
||||
v2 = normalize_anthropic_response_v2(resp)
|
||||
assert v2.finish_reason == v1_finish == expected
|
||||
|
||||
|
||||
class TestStripToolPrefix:
|
||||
def test_prefix_stripped(self):
|
||||
resp = _response(
|
||||
[_tool_use_block("toolu_1", "mcp_terminal", {"cmd": "ls"})],
|
||||
stop_reason="tool_use",
|
||||
)
|
||||
v1_msg, _ = normalize_anthropic_response(resp, strip_tool_prefix=True)
|
||||
v2 = normalize_anthropic_response_v2(resp, strip_tool_prefix=True)
|
||||
assert v1_msg.tool_calls[0].function.name == "terminal"
|
||||
assert v2.tool_calls[0].name == "terminal"
|
||||
|
||||
def test_prefix_kept(self):
|
||||
resp = _response(
|
||||
[_tool_use_block("toolu_1", "mcp_terminal", {"cmd": "ls"})],
|
||||
stop_reason="tool_use",
|
||||
)
|
||||
v1_msg, _ = normalize_anthropic_response(resp, strip_tool_prefix=False)
|
||||
v2 = normalize_anthropic_response_v2(resp, strip_tool_prefix=False)
|
||||
assert v1_msg.tool_calls[0].function.name == "mcp_terminal"
|
||||
assert v2.tool_calls[0].name == "mcp_terminal"
|
||||
|
||||
|
||||
class TestEdgeCases:
|
||||
def test_empty_content_blocks(self):
|
||||
resp = _response([])
|
||||
v1_msg, _v1_finish = normalize_anthropic_response(resp)
|
||||
v2 = normalize_anthropic_response_v2(resp)
|
||||
assert v2.content == v1_msg.content
|
||||
assert v2.content is None
|
||||
|
||||
def test_no_reasoning_details_means_none_provider_data(self):
|
||||
resp = _response([_text_block("hi")])
|
||||
v2 = normalize_anthropic_response_v2(resp)
|
||||
assert v2.provider_data is None
|
||||
|
||||
def test_v2_returns_dataclass_not_namespace(self):
|
||||
resp = _response([_text_block("hi")])
|
||||
v2 = normalize_anthropic_response_v2(resp)
|
||||
assert isinstance(v2, NormalizedResponse)
|
||||
assert not isinstance(v2, SimpleNamespace)
|
||||
@@ -1,208 +0,0 @@
|
||||
"""Tests for the transport ABC, registry, and AnthropicTransport."""
|
||||
|
||||
from types import SimpleNamespace
|
||||
|
||||
import pytest
|
||||
|
||||
from agent.transports import _REGISTRY, get_transport, register_transport
|
||||
from agent.transports.base import ProviderTransport
|
||||
from agent.transports.types import NormalizedResponse
|
||||
|
||||
|
||||
class TestProviderTransportABC:
|
||||
def test_cannot_instantiate_abc(self):
|
||||
with pytest.raises(TypeError):
|
||||
ProviderTransport()
|
||||
|
||||
def test_concrete_must_implement_all_abstract(self):
|
||||
class Incomplete(ProviderTransport):
|
||||
@property
|
||||
def api_mode(self):
|
||||
return "test"
|
||||
|
||||
with pytest.raises(TypeError):
|
||||
Incomplete()
|
||||
|
||||
def test_minimal_concrete(self):
|
||||
class Minimal(ProviderTransport):
|
||||
@property
|
||||
def api_mode(self):
|
||||
return "test_minimal"
|
||||
|
||||
def convert_messages(self, messages, **kw):
|
||||
return messages
|
||||
|
||||
def convert_tools(self, tools):
|
||||
return tools
|
||||
|
||||
def build_kwargs(self, model, messages, tools=None, **params):
|
||||
return {"model": model, "messages": messages}
|
||||
|
||||
def normalize_response(self, response, **kw):
|
||||
return NormalizedResponse(content="ok", tool_calls=None, finish_reason="stop")
|
||||
|
||||
t = Minimal()
|
||||
assert t.api_mode == "test_minimal"
|
||||
assert t.validate_response(None) is True
|
||||
assert t.extract_cache_stats(None) is None
|
||||
assert t.map_finish_reason("end_turn") == "end_turn"
|
||||
|
||||
|
||||
class TestTransportRegistry:
|
||||
def test_get_unregistered_returns_none(self):
|
||||
assert get_transport("nonexistent_mode") is None
|
||||
|
||||
def test_anthropic_registered_on_import(self):
|
||||
import agent.transports.anthropic # noqa: F401
|
||||
|
||||
t = get_transport("anthropic_messages")
|
||||
assert t is not None
|
||||
assert t.api_mode == "anthropic_messages"
|
||||
|
||||
def test_register_and_get(self):
|
||||
class DummyTransport(ProviderTransport):
|
||||
@property
|
||||
def api_mode(self):
|
||||
return "dummy_test"
|
||||
|
||||
def convert_messages(self, messages, **kw):
|
||||
return messages
|
||||
|
||||
def convert_tools(self, tools):
|
||||
return tools
|
||||
|
||||
def build_kwargs(self, model, messages, tools=None, **params):
|
||||
return {}
|
||||
|
||||
def normalize_response(self, response, **kw):
|
||||
return NormalizedResponse(content=None, tool_calls=None, finish_reason="stop")
|
||||
|
||||
register_transport("dummy_test", DummyTransport)
|
||||
t = get_transport("dummy_test")
|
||||
assert t.api_mode == "dummy_test"
|
||||
_REGISTRY.pop("dummy_test", None)
|
||||
|
||||
|
||||
class TestAnthropicTransport:
|
||||
@pytest.fixture
|
||||
def transport(self):
|
||||
import agent.transports.anthropic # noqa: F401
|
||||
|
||||
return get_transport("anthropic_messages")
|
||||
|
||||
def test_api_mode(self, transport):
|
||||
assert transport.api_mode == "anthropic_messages"
|
||||
|
||||
def test_convert_tools_simple(self, transport):
|
||||
tools = [{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "test_tool",
|
||||
"description": "A test",
|
||||
"parameters": {"type": "object", "properties": {}},
|
||||
},
|
||||
}]
|
||||
result = transport.convert_tools(tools)
|
||||
assert len(result) == 1
|
||||
assert result[0]["name"] == "test_tool"
|
||||
assert "input_schema" in result[0]
|
||||
|
||||
def test_validate_response_none(self, transport):
|
||||
assert transport.validate_response(None) is False
|
||||
|
||||
def test_validate_response_empty_content(self, transport):
|
||||
r = SimpleNamespace(content=[])
|
||||
assert transport.validate_response(r) is False
|
||||
|
||||
def test_validate_response_valid(self, transport):
|
||||
r = SimpleNamespace(content=[SimpleNamespace(type="text", text="hello")])
|
||||
assert transport.validate_response(r) is True
|
||||
|
||||
def test_map_finish_reason(self, transport):
|
||||
assert transport.map_finish_reason("end_turn") == "stop"
|
||||
assert transport.map_finish_reason("tool_use") == "tool_calls"
|
||||
assert transport.map_finish_reason("max_tokens") == "length"
|
||||
assert transport.map_finish_reason("stop_sequence") == "stop"
|
||||
assert transport.map_finish_reason("refusal") == "content_filter"
|
||||
assert transport.map_finish_reason("model_context_window_exceeded") == "length"
|
||||
assert transport.map_finish_reason("unknown") == "stop"
|
||||
|
||||
def test_extract_cache_stats_none_usage(self, transport):
|
||||
r = SimpleNamespace(usage=None)
|
||||
assert transport.extract_cache_stats(r) is None
|
||||
|
||||
def test_extract_cache_stats_with_cache(self, transport):
|
||||
usage = SimpleNamespace(cache_read_input_tokens=100, cache_creation_input_tokens=50)
|
||||
r = SimpleNamespace(usage=usage)
|
||||
result = transport.extract_cache_stats(r)
|
||||
assert result == {"cached_tokens": 100, "creation_tokens": 50}
|
||||
|
||||
def test_extract_cache_stats_zero(self, transport):
|
||||
usage = SimpleNamespace(cache_read_input_tokens=0, cache_creation_input_tokens=0)
|
||||
r = SimpleNamespace(usage=usage)
|
||||
assert transport.extract_cache_stats(r) is None
|
||||
|
||||
def test_normalize_response_text(self, transport):
|
||||
r = SimpleNamespace(
|
||||
content=[SimpleNamespace(type="text", text="Hello world")],
|
||||
stop_reason="end_turn",
|
||||
usage=SimpleNamespace(input_tokens=10, output_tokens=5),
|
||||
model="claude-sonnet-4-6",
|
||||
)
|
||||
nr = transport.normalize_response(r)
|
||||
assert isinstance(nr, NormalizedResponse)
|
||||
assert nr.content == "Hello world"
|
||||
assert nr.tool_calls is None or nr.tool_calls == []
|
||||
assert nr.finish_reason == "stop"
|
||||
|
||||
def test_normalize_response_tool_calls(self, transport):
|
||||
r = SimpleNamespace(
|
||||
content=[
|
||||
SimpleNamespace(type="tool_use", id="toolu_123", name="terminal", input={"command": "ls"}),
|
||||
],
|
||||
stop_reason="tool_use",
|
||||
usage=SimpleNamespace(input_tokens=10, output_tokens=20),
|
||||
model="claude-sonnet-4-6",
|
||||
)
|
||||
nr = transport.normalize_response(r)
|
||||
assert nr.finish_reason == "tool_calls"
|
||||
assert len(nr.tool_calls) == 1
|
||||
tc = nr.tool_calls[0]
|
||||
assert tc.name == "terminal"
|
||||
assert tc.id == "toolu_123"
|
||||
assert '"command"' in tc.arguments
|
||||
|
||||
def test_normalize_response_thinking(self, transport):
|
||||
r = SimpleNamespace(
|
||||
content=[
|
||||
SimpleNamespace(type="thinking", thinking="Let me think..."),
|
||||
SimpleNamespace(type="text", text="The answer is 42"),
|
||||
],
|
||||
stop_reason="end_turn",
|
||||
usage=SimpleNamespace(input_tokens=10, output_tokens=15),
|
||||
model="claude-sonnet-4-6",
|
||||
)
|
||||
nr = transport.normalize_response(r)
|
||||
assert nr.content == "The answer is 42"
|
||||
assert nr.reasoning == "Let me think..."
|
||||
|
||||
def test_build_kwargs_returns_dict(self, transport):
|
||||
messages = [{"role": "user", "content": "Hello"}]
|
||||
kw = transport.build_kwargs(
|
||||
model="claude-sonnet-4-6",
|
||||
messages=messages,
|
||||
max_tokens=1024,
|
||||
)
|
||||
assert isinstance(kw, dict)
|
||||
assert "model" in kw
|
||||
assert "max_tokens" in kw
|
||||
assert "messages" in kw
|
||||
|
||||
def test_convert_messages_extracts_system(self, transport):
|
||||
messages = [
|
||||
{"role": "system", "content": "You are helpful."},
|
||||
{"role": "user", "content": "Hi"},
|
||||
]
|
||||
system, msgs = transport.convert_messages(messages)
|
||||
assert system is not None
|
||||
assert len(msgs) >= 1
|
||||
@@ -1,130 +0,0 @@
|
||||
"""Tests for agent/transports/types.py — dataclass construction + helpers."""
|
||||
|
||||
import json
|
||||
|
||||
from agent.transports.types import (
|
||||
NormalizedResponse,
|
||||
ToolCall,
|
||||
Usage,
|
||||
build_tool_call,
|
||||
map_finish_reason,
|
||||
)
|
||||
|
||||
|
||||
class TestToolCall:
|
||||
def test_basic_construction(self):
|
||||
tc = ToolCall(id="call_abc", name="terminal", arguments='{"cmd": "ls"}')
|
||||
assert tc.id == "call_abc"
|
||||
assert tc.name == "terminal"
|
||||
assert tc.arguments == '{"cmd": "ls"}'
|
||||
assert tc.provider_data is None
|
||||
|
||||
def test_none_id(self):
|
||||
tc = ToolCall(id=None, name="read_file", arguments="{}")
|
||||
assert tc.id is None
|
||||
|
||||
def test_provider_data(self):
|
||||
tc = ToolCall(
|
||||
id="call_x",
|
||||
name="t",
|
||||
arguments="{}",
|
||||
provider_data={"call_id": "call_x", "response_item_id": "fc_x"},
|
||||
)
|
||||
assert tc.provider_data["call_id"] == "call_x"
|
||||
assert tc.provider_data["response_item_id"] == "fc_x"
|
||||
|
||||
|
||||
class TestUsage:
|
||||
def test_defaults(self):
|
||||
u = Usage()
|
||||
assert u.prompt_tokens == 0
|
||||
assert u.completion_tokens == 0
|
||||
assert u.total_tokens == 0
|
||||
assert u.cached_tokens == 0
|
||||
|
||||
def test_explicit(self):
|
||||
u = Usage(prompt_tokens=100, completion_tokens=50, total_tokens=150, cached_tokens=80)
|
||||
assert u.total_tokens == 150
|
||||
|
||||
|
||||
class TestNormalizedResponse:
|
||||
def test_text_only(self):
|
||||
r = NormalizedResponse(content="hello", tool_calls=None, finish_reason="stop")
|
||||
assert r.content == "hello"
|
||||
assert r.tool_calls is None
|
||||
assert r.finish_reason == "stop"
|
||||
assert r.reasoning is None
|
||||
assert r.usage is None
|
||||
assert r.provider_data is None
|
||||
|
||||
def test_with_tool_calls(self):
|
||||
tcs = [ToolCall(id="call_1", name="terminal", arguments='{"cmd":"pwd"}')]
|
||||
r = NormalizedResponse(content=None, tool_calls=tcs, finish_reason="tool_calls")
|
||||
assert r.finish_reason == "tool_calls"
|
||||
assert len(r.tool_calls) == 1
|
||||
assert r.tool_calls[0].name == "terminal"
|
||||
|
||||
def test_with_reasoning(self):
|
||||
r = NormalizedResponse(
|
||||
content="answer",
|
||||
tool_calls=None,
|
||||
finish_reason="stop",
|
||||
reasoning="I thought about it",
|
||||
)
|
||||
assert r.reasoning == "I thought about it"
|
||||
|
||||
def test_with_provider_data(self):
|
||||
r = NormalizedResponse(
|
||||
content=None,
|
||||
tool_calls=None,
|
||||
finish_reason="stop",
|
||||
provider_data={"reasoning_details": [{"type": "thinking", "thinking": "hmm"}]},
|
||||
)
|
||||
assert r.provider_data["reasoning_details"][0]["type"] == "thinking"
|
||||
|
||||
|
||||
class TestBuildToolCall:
|
||||
def test_dict_arguments_serialized(self):
|
||||
tc = build_tool_call(id="call_1", name="terminal", arguments={"cmd": "ls"})
|
||||
assert tc.arguments == json.dumps({"cmd": "ls"})
|
||||
assert tc.provider_data is None
|
||||
|
||||
def test_string_arguments_passthrough(self):
|
||||
tc = build_tool_call(id="call_2", name="read_file", arguments='{"path": "/tmp"}')
|
||||
assert tc.arguments == '{"path": "/tmp"}'
|
||||
|
||||
def test_provider_fields(self):
|
||||
tc = build_tool_call(
|
||||
id="call_3",
|
||||
name="terminal",
|
||||
arguments="{}",
|
||||
call_id="call_3",
|
||||
response_item_id="fc_3",
|
||||
)
|
||||
assert tc.provider_data == {"call_id": "call_3", "response_item_id": "fc_3"}
|
||||
|
||||
def test_none_id(self):
|
||||
tc = build_tool_call(id=None, name="t", arguments="{}")
|
||||
assert tc.id is None
|
||||
|
||||
|
||||
class TestMapFinishReason:
|
||||
ANTHROPIC_MAP = {
|
||||
"end_turn": "stop",
|
||||
"tool_use": "tool_calls",
|
||||
"max_tokens": "length",
|
||||
"stop_sequence": "stop",
|
||||
"refusal": "content_filter",
|
||||
}
|
||||
|
||||
def test_known_reason(self):
|
||||
assert map_finish_reason("end_turn", self.ANTHROPIC_MAP) == "stop"
|
||||
assert map_finish_reason("tool_use", self.ANTHROPIC_MAP) == "tool_calls"
|
||||
assert map_finish_reason("max_tokens", self.ANTHROPIC_MAP) == "length"
|
||||
assert map_finish_reason("refusal", self.ANTHROPIC_MAP) == "content_filter"
|
||||
|
||||
def test_unknown_reason_defaults_to_stop(self):
|
||||
assert map_finish_reason("something_new", self.ANTHROPIC_MAP) == "stop"
|
||||
|
||||
def test_none_reason(self):
|
||||
assert map_finish_reason(None, self.ANTHROPIC_MAP) == "stop"
|
||||
236
tests/tools/test_tts_kittentts.py
Normal file
236
tests/tools/test_tts_kittentts.py
Normal file
@@ -0,0 +1,236 @@
|
||||
"""Tests for the KittenTTS local provider in tools/tts_tool.py."""
|
||||
|
||||
import json
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def clean_env(monkeypatch):
|
||||
for key in ("HERMES_SESSION_PLATFORM",):
|
||||
monkeypatch.delenv(key, raising=False)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def clear_kittentts_cache():
|
||||
"""Reset the module-level model cache between tests."""
|
||||
from tools import tts_tool as _tt
|
||||
_tt._kittentts_model_cache.clear()
|
||||
yield
|
||||
_tt._kittentts_model_cache.clear()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_kittentts_module():
|
||||
"""Inject a fake kittentts + soundfile module that return stub objects."""
|
||||
fake_model = MagicMock()
|
||||
# 24kHz float32 PCM at ~2s of silence
|
||||
fake_model.generate.return_value = np.zeros(48000, dtype=np.float32)
|
||||
fake_cls = MagicMock(return_value=fake_model)
|
||||
fake_kittentts = MagicMock()
|
||||
fake_kittentts.KittenTTS = fake_cls
|
||||
|
||||
# Stub soundfile — the real package isn't installed in CI venv, and
|
||||
# _generate_kittentts does `import soundfile as sf` at runtime.
|
||||
fake_sf = MagicMock()
|
||||
|
||||
def _fake_write(path, audio, samplerate):
|
||||
# Emulate writing a real file so downstream path checks succeed.
|
||||
import pathlib
|
||||
|
||||
pathlib.Path(path).write_bytes(b"RIFF\x00\x00\x00\x00WAVEfmt fake")
|
||||
|
||||
fake_sf.write = _fake_write
|
||||
|
||||
with patch.dict(
|
||||
"sys.modules",
|
||||
{"kittentts": fake_kittentts, "soundfile": fake_sf},
|
||||
):
|
||||
yield fake_model, fake_cls
|
||||
|
||||
|
||||
class TestGenerateKittenTts:
|
||||
def test_successful_wav_generation(self, tmp_path, mock_kittentts_module):
|
||||
from tools.tts_tool import _generate_kittentts
|
||||
|
||||
fake_model, fake_cls = mock_kittentts_module
|
||||
output_path = str(tmp_path / "test.wav")
|
||||
result = _generate_kittentts("Hello world", output_path, {})
|
||||
|
||||
assert result == output_path
|
||||
assert (tmp_path / "test.wav").exists()
|
||||
fake_cls.assert_called_once()
|
||||
fake_model.generate.assert_called_once()
|
||||
|
||||
def test_config_passes_voice_speed_cleantext(self, tmp_path, mock_kittentts_module):
|
||||
from tools.tts_tool import _generate_kittentts
|
||||
|
||||
fake_model, _ = mock_kittentts_module
|
||||
config = {
|
||||
"kittentts": {
|
||||
"model": "KittenML/kitten-tts-mini-0.8",
|
||||
"voice": "Luna",
|
||||
"speed": 1.25,
|
||||
"clean_text": False,
|
||||
}
|
||||
}
|
||||
_generate_kittentts("Hi there", str(tmp_path / "out.wav"), config)
|
||||
|
||||
call_kwargs = fake_model.generate.call_args.kwargs
|
||||
assert call_kwargs["voice"] == "Luna"
|
||||
assert call_kwargs["speed"] == 1.25
|
||||
assert call_kwargs["clean_text"] is False
|
||||
|
||||
def test_default_model_and_voice(self, tmp_path, mock_kittentts_module):
|
||||
from tools.tts_tool import (
|
||||
DEFAULT_KITTENTTS_MODEL,
|
||||
DEFAULT_KITTENTTS_VOICE,
|
||||
_generate_kittentts,
|
||||
)
|
||||
|
||||
fake_model, fake_cls = mock_kittentts_module
|
||||
_generate_kittentts("Hi", str(tmp_path / "out.wav"), {})
|
||||
|
||||
fake_cls.assert_called_once_with(DEFAULT_KITTENTTS_MODEL)
|
||||
assert fake_model.generate.call_args.kwargs["voice"] == DEFAULT_KITTENTTS_VOICE
|
||||
|
||||
def test_model_is_cached_across_calls(self, tmp_path, mock_kittentts_module):
|
||||
from tools.tts_tool import _generate_kittentts
|
||||
|
||||
_, fake_cls = mock_kittentts_module
|
||||
_generate_kittentts("One", str(tmp_path / "a.wav"), {})
|
||||
_generate_kittentts("Two", str(tmp_path / "b.wav"), {})
|
||||
|
||||
# Same model name → class instantiated exactly once
|
||||
assert fake_cls.call_count == 1
|
||||
|
||||
def test_different_models_are_cached_separately(self, tmp_path, mock_kittentts_module):
|
||||
from tools.tts_tool import _generate_kittentts
|
||||
|
||||
_, fake_cls = mock_kittentts_module
|
||||
_generate_kittentts(
|
||||
"A",
|
||||
str(tmp_path / "a.wav"),
|
||||
{"kittentts": {"model": "KittenML/kitten-tts-nano-0.8-int8"}},
|
||||
)
|
||||
_generate_kittentts(
|
||||
"B",
|
||||
str(tmp_path / "b.wav"),
|
||||
{"kittentts": {"model": "KittenML/kitten-tts-mini-0.8"}},
|
||||
)
|
||||
|
||||
assert fake_cls.call_count == 2
|
||||
|
||||
def test_non_wav_extension_triggers_ffmpeg_conversion(
|
||||
self, tmp_path, mock_kittentts_module, monkeypatch
|
||||
):
|
||||
"""Non-.wav output path causes WAV → target ffmpeg conversion."""
|
||||
from tools import tts_tool as _tt
|
||||
|
||||
calls = []
|
||||
|
||||
def fake_shutil_which(cmd):
|
||||
return "/usr/bin/ffmpeg" if cmd == "ffmpeg" else None
|
||||
|
||||
def fake_run(cmd, check=False, timeout=None, **kw):
|
||||
calls.append(cmd)
|
||||
# Emulate ffmpeg writing the output file
|
||||
import pathlib
|
||||
|
||||
out_path = cmd[-1]
|
||||
pathlib.Path(out_path).write_bytes(b"fake-mp3-data")
|
||||
return MagicMock(returncode=0)
|
||||
|
||||
monkeypatch.setattr(_tt.shutil, "which", fake_shutil_which)
|
||||
monkeypatch.setattr(_tt.subprocess, "run", fake_run)
|
||||
|
||||
output_path = str(tmp_path / "test.mp3")
|
||||
result = _tt._generate_kittentts("Hi", output_path, {})
|
||||
|
||||
assert result == output_path
|
||||
assert len(calls) == 1
|
||||
assert calls[0][0] == "/usr/bin/ffmpeg"
|
||||
|
||||
def test_missing_kittentts_raises_import_error(self, tmp_path, monkeypatch):
|
||||
"""When kittentts package is not installed, _import_kittentts raises."""
|
||||
import sys
|
||||
|
||||
monkeypatch.setitem(sys.modules, "kittentts", None)
|
||||
from tools.tts_tool import _generate_kittentts
|
||||
|
||||
with pytest.raises((ImportError, TypeError)):
|
||||
_generate_kittentts("Hi", str(tmp_path / "out.wav"), {})
|
||||
|
||||
|
||||
class TestCheckKittenttsAvailable:
|
||||
def test_reports_available_when_package_present(self, monkeypatch):
|
||||
import importlib.util
|
||||
from tools.tts_tool import _check_kittentts_available
|
||||
|
||||
fake_spec = MagicMock()
|
||||
monkeypatch.setattr(
|
||||
importlib.util,
|
||||
"find_spec",
|
||||
lambda name: fake_spec if name == "kittentts" else None,
|
||||
)
|
||||
assert _check_kittentts_available() is True
|
||||
|
||||
def test_reports_unavailable_when_package_missing(self, monkeypatch):
|
||||
import importlib.util
|
||||
from tools.tts_tool import _check_kittentts_available
|
||||
|
||||
monkeypatch.setattr(importlib.util, "find_spec", lambda name: None)
|
||||
assert _check_kittentts_available() is False
|
||||
|
||||
|
||||
class TestDispatcherBranch:
|
||||
def test_kittentts_not_installed_returns_helpful_error(self, monkeypatch, tmp_path):
|
||||
"""When provider=kittentts but package missing, return JSON error with setup hint."""
|
||||
import sys
|
||||
|
||||
monkeypatch.setitem(sys.modules, "kittentts", None)
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
|
||||
from tools.tts_tool import text_to_speech_tool
|
||||
|
||||
# Write a config telling it to use kittentts
|
||||
import yaml
|
||||
|
||||
(tmp_path / "config.yaml").write_text(
|
||||
yaml.safe_dump({"tts": {"provider": "kittentts"}})
|
||||
)
|
||||
|
||||
result = json.loads(text_to_speech_tool(text="Hello"))
|
||||
assert result["success"] is False
|
||||
assert "kittentts" in result["error"].lower()
|
||||
assert "hermes setup tts" in result["error"].lower()
|
||||
|
||||
def test_non_telegram_explicit_wav_path_is_preserved(
|
||||
self, monkeypatch, tmp_path, mock_kittentts_module
|
||||
):
|
||||
"""Explicit WAV outputs should stay WAV outside Telegram sessions."""
|
||||
import yaml
|
||||
from tools import tts_tool as _tt
|
||||
|
||||
monkeypatch.setenv("HERMES_HOME", str(tmp_path))
|
||||
(tmp_path / "config.yaml").write_text(
|
||||
yaml.safe_dump({"tts": {"provider": "kittentts"}})
|
||||
)
|
||||
|
||||
def fail_convert(_path):
|
||||
raise AssertionError("_convert_to_opus should not run outside Telegram")
|
||||
|
||||
monkeypatch.setattr(_tt, "_convert_to_opus", fail_convert)
|
||||
|
||||
result = json.loads(
|
||||
_tt.text_to_speech_tool(
|
||||
text="Hello from KittenTTS",
|
||||
output_path=str(tmp_path / "out.wav"),
|
||||
)
|
||||
)
|
||||
|
||||
assert result["success"] is True
|
||||
assert result["file_path"] == str(tmp_path / "out.wav")
|
||||
assert (tmp_path / "out.wav").exists()
|
||||
@@ -2,13 +2,14 @@
|
||||
"""
|
||||
Text-to-Speech Tool Module
|
||||
|
||||
Supports six TTS providers:
|
||||
Supports seven TTS providers:
|
||||
- Edge TTS (default, free, no API key): Microsoft Edge neural voices
|
||||
- ElevenLabs (premium): High-quality voices, needs ELEVENLABS_API_KEY
|
||||
- OpenAI TTS: Good quality, needs OPENAI_API_KEY
|
||||
- MiniMax TTS: High-quality with voice cloning, needs MINIMAX_API_KEY
|
||||
- Mistral (Voxtral TTS): Multilingual, native Opus, needs MISTRAL_API_KEY
|
||||
- NeuTTS (local, free, no API key): On-device TTS via neutts_cli, needs neutts installed
|
||||
- KittenTTS (local, free, no API key): Lightweight on-device ONNX TTS via kittentts
|
||||
|
||||
Output formats:
|
||||
- Opus (.ogg) for Telegram voice bubbles (requires ffmpeg for Edge TTS)
|
||||
@@ -77,6 +78,12 @@ def _import_sounddevice():
|
||||
return sd
|
||||
|
||||
|
||||
def _import_kittentts():
|
||||
"""Lazy import KittenTTS. Returns the class or raises ImportError."""
|
||||
from kittentts import KittenTTS
|
||||
return KittenTTS
|
||||
|
||||
|
||||
# ===========================================================================
|
||||
# Defaults
|
||||
# ===========================================================================
|
||||
@@ -86,6 +93,8 @@ DEFAULT_ELEVENLABS_VOICE_ID = "pNInz6obpgDQGcFmaJgB" # Adam
|
||||
DEFAULT_ELEVENLABS_MODEL_ID = "eleven_multilingual_v2"
|
||||
DEFAULT_ELEVENLABS_STREAMING_MODEL_ID = "eleven_flash_v2_5"
|
||||
DEFAULT_OPENAI_MODEL = "gpt-4o-mini-tts"
|
||||
DEFAULT_KITTENTTS_MODEL = "KittenML/kitten-tts-nano-0.8-int8" # 25MB
|
||||
DEFAULT_KITTENTTS_VOICE = "Jasper"
|
||||
DEFAULT_OPENAI_VOICE = "alloy"
|
||||
DEFAULT_OPENAI_BASE_URL = "https://api.openai.com/v1"
|
||||
DEFAULT_MINIMAX_MODEL = "speech-2.8-hd"
|
||||
@@ -448,6 +457,15 @@ def _check_neutts_available() -> bool:
|
||||
return False
|
||||
|
||||
|
||||
def _check_kittentts_available() -> bool:
|
||||
"""Check if the kittentts engine is importable (installed locally)."""
|
||||
try:
|
||||
import importlib.util
|
||||
return importlib.util.find_spec("kittentts") is not None
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _default_neutts_ref_audio() -> str:
|
||||
"""Return path to the bundled default voice reference audio."""
|
||||
return str(Path(__file__).parent / "neutts_samples" / "jo.wav")
|
||||
@@ -511,6 +529,51 @@ def _generate_neutts(text: str, output_path: str, tts_config: Dict[str, Any]) ->
|
||||
return output_path
|
||||
|
||||
|
||||
# ===========================================================================
|
||||
# Provider: KittenTTS (local, lightweight)
|
||||
# ===========================================================================
|
||||
|
||||
# Module-level cache for KittenTTS model instances
|
||||
_kittentts_model_cache: Dict[str, Any] = {}
|
||||
|
||||
|
||||
def _generate_kittentts(text: str, output_path: str, tts_config: Dict[str, Any]) -> str:
|
||||
"""Generate speech using the local KittenTTS ONNX model."""
|
||||
KittenTTS = _import_kittentts()
|
||||
kt_config = tts_config.get("kittentts", {})
|
||||
model_name = kt_config.get("model", DEFAULT_KITTENTTS_MODEL)
|
||||
voice = kt_config.get("voice", DEFAULT_KITTENTTS_VOICE)
|
||||
speed = kt_config.get("speed", 1.0)
|
||||
clean_text = kt_config.get("clean_text", True)
|
||||
|
||||
global _kittentts_model_cache
|
||||
if model_name not in _kittentts_model_cache:
|
||||
logger.info("[KittenTTS] Loading model: %s", model_name)
|
||||
_kittentts_model_cache[model_name] = KittenTTS(model_name)
|
||||
|
||||
model = _kittentts_model_cache[model_name]
|
||||
audio = model.generate(text, voice=voice, speed=speed, clean_text=clean_text)
|
||||
|
||||
import soundfile as sf
|
||||
|
||||
wav_path = output_path
|
||||
if not output_path.endswith(".wav"):
|
||||
wav_path = output_path.rsplit(".", 1)[0] + ".wav"
|
||||
|
||||
sf.write(wav_path, audio, 24000)
|
||||
|
||||
if wav_path != output_path:
|
||||
ffmpeg = shutil.which("ffmpeg")
|
||||
if ffmpeg:
|
||||
conv_cmd = [ffmpeg, "-i", wav_path, "-y", "-loglevel", "error", output_path]
|
||||
subprocess.run(conv_cmd, check=True, timeout=30)
|
||||
os.remove(wav_path)
|
||||
else:
|
||||
os.rename(wav_path, output_path)
|
||||
|
||||
return output_path
|
||||
|
||||
|
||||
# ===========================================================================
|
||||
# Main tool function
|
||||
# ===========================================================================
|
||||
@@ -622,6 +685,19 @@ def text_to_speech_tool(
|
||||
logger.info("Generating speech with NeuTTS (local)...")
|
||||
_generate_neutts(text, file_str, tts_config)
|
||||
|
||||
elif provider == "kittentts":
|
||||
try:
|
||||
_import_kittentts()
|
||||
except ImportError:
|
||||
return json.dumps({
|
||||
"success": False,
|
||||
"error": "KittenTTS provider selected but 'kittentts' package not installed. "
|
||||
"Run 'hermes setup tts' and choose KittenTTS, or install manually: "
|
||||
"pip install https://github.com/KittenML/KittenTTS/releases/download/0.8.1/kittentts-0.8.1-py3-none-any.whl"
|
||||
}, ensure_ascii=False)
|
||||
logger.info("Generating speech with KittenTTS (local, lightweight)...")
|
||||
_generate_kittentts(text, file_str, tts_config)
|
||||
|
||||
else:
|
||||
# Default: Edge TTS (free), with NeuTTS as local fallback
|
||||
edge_available = True
|
||||
@@ -658,10 +734,10 @@ def text_to_speech_tool(
|
||||
"error": f"TTS generation produced no output (provider: {provider})"
|
||||
}, ensure_ascii=False)
|
||||
|
||||
# Try Opus conversion for Telegram compatibility
|
||||
# Edge TTS outputs MP3, NeuTTS outputs WAV — both need ffmpeg conversion
|
||||
# Try Opus conversion for Telegram compatibility only.
|
||||
# Outside Telegram, preserve the caller's explicit output format.
|
||||
voice_compatible = False
|
||||
if provider in ("edge", "neutts", "minimax") and not file_str.endswith(".ogg"):
|
||||
if want_opus and provider in ("edge", "neutts", "minimax", "kittentts") and not file_str.endswith(".ogg"):
|
||||
opus_path = _convert_to_opus(file_str)
|
||||
if opus_path:
|
||||
file_str = opus_path
|
||||
@@ -742,6 +818,8 @@ def check_tts_requirements() -> bool:
|
||||
pass
|
||||
if _check_neutts_available():
|
||||
return True
|
||||
if _check_kittentts_available():
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ Hermes Agent supports both text-to-speech output and voice message transcription
|
||||
|
||||
## Text-to-Speech
|
||||
|
||||
Convert text to speech with six providers:
|
||||
Convert text to speech with seven providers:
|
||||
|
||||
| Provider | Quality | Cost | API Key |
|
||||
|----------|---------|------|---------|
|
||||
@@ -20,6 +20,7 @@ Convert text to speech with six providers:
|
||||
| **MiniMax TTS** | Excellent | Paid | `MINIMAX_API_KEY` |
|
||||
| **Mistral (Voxtral TTS)** | Excellent | Paid | `MISTRAL_API_KEY` |
|
||||
| **NeuTTS** | Good | Free | None needed |
|
||||
| **KittenTTS** | Good | Free (local) | None needed |
|
||||
|
||||
### Platform Delivery
|
||||
|
||||
@@ -35,7 +36,7 @@ Convert text to speech with six providers:
|
||||
```yaml
|
||||
# In ~/.hermes/config.yaml
|
||||
tts:
|
||||
provider: "edge" # "edge" | "elevenlabs" | "openai" | "minimax" | "mistral" | "neutts"
|
||||
provider: "edge" # "edge" | "elevenlabs" | "openai" | "minimax" | "mistral" | "neutts" | "kittentts"
|
||||
speed: 1.0 # Global speed multiplier (provider-specific settings override this)
|
||||
edge:
|
||||
voice: "en-US-AriaNeural" # 322 voices, 74 languages
|
||||
@@ -62,6 +63,11 @@ tts:
|
||||
ref_text: ''
|
||||
model: neuphonic/neutts-air-q4-gguf
|
||||
device: cpu
|
||||
kittentts:
|
||||
model: KittenML/kitten-tts-nano-0.8-int8 # 25MB int8 default; also micro and mini variants
|
||||
voice: Jasper # Jasper, Bella, Luna, Bruno, Rosie, Hugo, Kiki, Leo
|
||||
speed: 1.0
|
||||
clean_text: true
|
||||
```
|
||||
|
||||
**Speed control**: The global `tts.speed` value applies to all providers by default. Each provider can override it with its own `speed` setting (e.g., `tts.openai.speed: 1.5`). Provider-specific speed takes precedence over the global value. Default is `1.0` (normal speed).
|
||||
@@ -74,6 +80,7 @@ Telegram voice bubbles require Opus/OGG audio format:
|
||||
- **Edge TTS** (default) outputs MP3 and needs **ffmpeg** to convert:
|
||||
- **MiniMax TTS** outputs MP3 and needs **ffmpeg** to convert for Telegram voice bubbles
|
||||
- **NeuTTS** outputs WAV and also needs **ffmpeg** to convert for Telegram voice bubbles
|
||||
- **KittenTTS** outputs WAV and also needs **ffmpeg** to convert for Telegram voice bubbles
|
||||
|
||||
```bash
|
||||
# Ubuntu/Debian
|
||||
@@ -86,7 +93,7 @@ brew install ffmpeg
|
||||
sudo dnf install ffmpeg
|
||||
```
|
||||
|
||||
Without ffmpeg, Edge TTS, MiniMax TTS, and NeuTTS audio are sent as regular audio files (playable, but shown as a rectangular player instead of a voice bubble).
|
||||
Without ffmpeg, Edge TTS, MiniMax TTS, NeuTTS, and KittenTTS audio are sent as regular audio files (playable, but shown as a rectangular player instead of a voice bubble).
|
||||
|
||||
:::tip
|
||||
If you want voice bubbles without installing ffmpeg, switch to the OpenAI, ElevenLabs, or Mistral provider.
|
||||
|
||||
Reference in New Issue
Block a user