feat: add vllm-mlx as OpenAI-compatible local inference backend (#1069)
Adds vllm-mlx as a supported provider type in the cascade router. vllm-mlx offers 25-50% higher throughput than Ollama on Apple Silicon with ~15% lower memory usage, via a fully OpenAI-compatible API. Changes: - cascade.py: add `vllm_mlx` provider type with health check (strips /v1 suffix from base_url before calling /health) and `_call_vllm_mlx` method (reuses OpenAI client pointed at local server, no API key needed) - providers.yaml: add `vllm-mlx-local` entry (disabled by default, priority 2) with benchmark notes and setup instructions - test_router_cascade.py: tests for availability checks (no-requests fallback, healthy/down server, default URL) and completion routing (correct provider used, URL normalisation, metabolic-protocol bypass) Recommendation (documented in providers.yaml): prefer vllm-mlx over Ollama when throughput matters on Apple Silicon; stay on Ollama for broadest ecosystem compatibility and simpler setup. Refs #1069 Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -67,6 +67,29 @@ providers:
|
||||
capabilities: [text, creative, streaming]
|
||||
description: "Dolphin 3.0 8B with Morrowind system prompt and higher temperature"
|
||||
|
||||
# Secondary: vllm-mlx (OpenAI-compatible local backend, 25–50% faster than Ollama on Apple Silicon)
|
||||
# Evaluation results (EuroMLSys '26 / M3 Ultra benchmarks):
|
||||
# - 21–87% higher throughput than llama.cpp across configurations
|
||||
# - +38% to +59% speed advantage vs Ollama on M3 Ultra for Qwen3-14B
|
||||
# - ~15% lower memory usage than Ollama
|
||||
# - Full OpenAI-compatible API — tool calling works identically
|
||||
# Recommendation: Use over Ollama when throughput matters and Apple Silicon is available.
|
||||
# Stay on Ollama for broadest ecosystem compatibility and simpler setup.
|
||||
# To enable: start vllm-mlx server (`python -m vllm.entrypoints.openai.api_server
|
||||
# --model Qwen/Qwen2.5-14B-Instruct-MLX --port 8000`) then set enabled: true.
|
||||
- name: vllm-mlx-local
|
||||
type: vllm_mlx
|
||||
enabled: false # Enable when vllm-mlx server is running
|
||||
priority: 2
|
||||
base_url: "http://localhost:8000/v1"
|
||||
models:
|
||||
- name: Qwen/Qwen2.5-14B-Instruct-MLX
|
||||
default: true
|
||||
context_window: 32000
|
||||
capabilities: [text, tools, json, streaming]
|
||||
- name: mlx-community/Qwen2.5-7B-Instruct-4bit
|
||||
context_window: 32000
|
||||
capabilities: [text, tools, json, streaming]
|
||||
|
||||
# Tertiary: OpenAI (if API key available)
|
||||
- name: openai-backup
|
||||
|
||||
@@ -310,6 +310,22 @@ class CascadeRouter:
|
||||
logger.debug("Ollama provider check error: %s", exc)
|
||||
return False
|
||||
|
||||
elif provider.type == "vllm_mlx":
|
||||
# Check if local vllm-mlx server is running (OpenAI-compatible)
|
||||
if requests is None:
|
||||
return True
|
||||
try:
|
||||
base_url = provider.base_url or provider.url or "http://localhost:8000"
|
||||
# Strip /v1 suffix — health endpoint is at the root
|
||||
server_root = base_url.rstrip("/")
|
||||
if server_root.endswith("/v1"):
|
||||
server_root = server_root[:-3]
|
||||
response = requests.get(f"{server_root}/health", timeout=5)
|
||||
return response.status_code == 200
|
||||
except Exception as exc:
|
||||
logger.debug("vllm-mlx provider check error: %s", exc)
|
||||
return False
|
||||
|
||||
elif provider.type in ("openai", "anthropic", "grok"):
|
||||
# Check if API key is set
|
||||
return provider.api_key is not None and provider.api_key != ""
|
||||
@@ -619,6 +635,14 @@ class CascadeRouter:
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
elif provider.type == "vllm_mlx":
|
||||
result = await self._call_vllm_mlx(
|
||||
provider=provider,
|
||||
messages=messages,
|
||||
model=model or provider.get_default_model(),
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unknown provider type: {provider.type}")
|
||||
|
||||
@@ -815,6 +839,48 @@ class CascadeRouter:
|
||||
"model": response.model,
|
||||
}
|
||||
|
||||
async def _call_vllm_mlx(
|
||||
self,
|
||||
provider: Provider,
|
||||
messages: list[dict],
|
||||
model: str,
|
||||
temperature: float,
|
||||
max_tokens: int | None,
|
||||
) -> dict:
|
||||
"""Call vllm-mlx via its OpenAI-compatible API.
|
||||
|
||||
vllm-mlx exposes the same /v1/chat/completions endpoint as OpenAI,
|
||||
so we reuse the OpenAI client pointed at the local server.
|
||||
No API key is required for local deployments.
|
||||
"""
|
||||
import openai
|
||||
|
||||
base_url = provider.base_url or provider.url or "http://localhost:8000"
|
||||
# Ensure the base_url ends with /v1 as expected by the OpenAI client
|
||||
if not base_url.rstrip("/").endswith("/v1"):
|
||||
base_url = base_url.rstrip("/") + "/v1"
|
||||
|
||||
client = openai.AsyncOpenAI(
|
||||
api_key=provider.api_key or "no-key-required",
|
||||
base_url=base_url,
|
||||
timeout=self.config.timeout_seconds,
|
||||
)
|
||||
|
||||
kwargs: dict = {
|
||||
"model": model,
|
||||
"messages": messages,
|
||||
"temperature": temperature,
|
||||
}
|
||||
if max_tokens:
|
||||
kwargs["max_tokens"] = max_tokens
|
||||
|
||||
response = await client.chat.completions.create(**kwargs)
|
||||
|
||||
return {
|
||||
"content": response.choices[0].message.content,
|
||||
"model": response.model,
|
||||
}
|
||||
|
||||
def _record_success(self, provider: Provider, latency_ms: float) -> None:
|
||||
"""Record a successful request."""
|
||||
provider.metrics.total_requests += 1
|
||||
|
||||
@@ -489,6 +489,201 @@ class TestProviderAvailabilityCheck:
|
||||
|
||||
assert router._check_provider_available(provider) is False
|
||||
|
||||
def test_check_vllm_mlx_without_requests(self):
|
||||
"""Test vllm-mlx returns True when requests not available (fallback)."""
|
||||
router = CascadeRouter(config_path=Path("/nonexistent"))
|
||||
|
||||
provider = Provider(
|
||||
name="vllm-mlx-local",
|
||||
type="vllm_mlx",
|
||||
enabled=True,
|
||||
priority=2,
|
||||
base_url="http://localhost:8000/v1",
|
||||
)
|
||||
|
||||
import infrastructure.router.cascade as cascade_module
|
||||
|
||||
old_requests = cascade_module.requests
|
||||
cascade_module.requests = None
|
||||
try:
|
||||
assert router._check_provider_available(provider) is True
|
||||
finally:
|
||||
cascade_module.requests = old_requests
|
||||
|
||||
def test_check_vllm_mlx_server_healthy(self):
|
||||
"""Test vllm-mlx when health check succeeds."""
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
router = CascadeRouter(config_path=Path("/nonexistent"))
|
||||
|
||||
provider = Provider(
|
||||
name="vllm-mlx-local",
|
||||
type="vllm_mlx",
|
||||
enabled=True,
|
||||
priority=2,
|
||||
base_url="http://localhost:8000/v1",
|
||||
)
|
||||
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
|
||||
with patch("infrastructure.router.cascade.requests") as mock_requests:
|
||||
mock_requests.get.return_value = mock_response
|
||||
result = router._check_provider_available(provider)
|
||||
|
||||
assert result is True
|
||||
mock_requests.get.assert_called_once_with("http://localhost:8000/health", timeout=5)
|
||||
|
||||
def test_check_vllm_mlx_server_down(self):
|
||||
"""Test vllm-mlx when server is not running."""
|
||||
from unittest.mock import patch
|
||||
|
||||
router = CascadeRouter(config_path=Path("/nonexistent"))
|
||||
|
||||
provider = Provider(
|
||||
name="vllm-mlx-local",
|
||||
type="vllm_mlx",
|
||||
enabled=True,
|
||||
priority=2,
|
||||
base_url="http://localhost:8000/v1",
|
||||
)
|
||||
|
||||
with patch("infrastructure.router.cascade.requests") as mock_requests:
|
||||
mock_requests.get.side_effect = ConnectionRefusedError("Connection refused")
|
||||
result = router._check_provider_available(provider)
|
||||
|
||||
assert result is False
|
||||
|
||||
def test_check_vllm_mlx_default_url(self):
|
||||
"""Test vllm-mlx uses default localhost:8000 when no URL configured."""
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
router = CascadeRouter(config_path=Path("/nonexistent"))
|
||||
|
||||
provider = Provider(
|
||||
name="vllm-mlx-local",
|
||||
type="vllm_mlx",
|
||||
enabled=True,
|
||||
priority=2,
|
||||
)
|
||||
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
|
||||
with patch("infrastructure.router.cascade.requests") as mock_requests:
|
||||
mock_requests.get.return_value = mock_response
|
||||
router._check_provider_available(provider)
|
||||
|
||||
mock_requests.get.assert_called_once_with("http://localhost:8000/health", timeout=5)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
class TestVllmMlxProvider:
|
||||
"""Test vllm-mlx provider integration."""
|
||||
|
||||
async def test_complete_with_vllm_mlx(self):
|
||||
"""Test successful completion via vllm-mlx."""
|
||||
router = CascadeRouter(config_path=Path("/nonexistent"))
|
||||
|
||||
provider = Provider(
|
||||
name="vllm-mlx-local",
|
||||
type="vllm_mlx",
|
||||
enabled=True,
|
||||
priority=2,
|
||||
base_url="http://localhost:8000/v1",
|
||||
models=[{"name": "Qwen/Qwen2.5-14B-Instruct-MLX", "default": True}],
|
||||
)
|
||||
router.providers = [provider]
|
||||
|
||||
with patch.object(router, "_call_vllm_mlx") as mock_call:
|
||||
mock_call.return_value = {
|
||||
"content": "MLX response",
|
||||
"model": "Qwen/Qwen2.5-14B-Instruct-MLX",
|
||||
}
|
||||
|
||||
result = await router.complete(
|
||||
messages=[{"role": "user", "content": "Hi"}],
|
||||
)
|
||||
|
||||
assert result["content"] == "MLX response"
|
||||
assert result["provider"] == "vllm-mlx-local"
|
||||
assert result["model"] == "Qwen/Qwen2.5-14B-Instruct-MLX"
|
||||
|
||||
async def test_vllm_mlx_base_url_normalization(self):
|
||||
"""Test _call_vllm_mlx appends /v1 when missing."""
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
router = CascadeRouter(config_path=Path("/nonexistent"))
|
||||
|
||||
provider = Provider(
|
||||
name="vllm-mlx-local",
|
||||
type="vllm_mlx",
|
||||
enabled=True,
|
||||
priority=2,
|
||||
base_url="http://localhost:8000", # No /v1
|
||||
models=[{"name": "qwen-mlx", "default": True}],
|
||||
)
|
||||
|
||||
mock_choice = MagicMock()
|
||||
mock_choice.message.content = "hello"
|
||||
mock_response = MagicMock()
|
||||
mock_response.choices = [mock_choice]
|
||||
mock_response.model = "qwen-mlx"
|
||||
|
||||
captured_base_url = []
|
||||
|
||||
async def fake_create(**kwargs):
|
||||
return mock_response
|
||||
|
||||
with patch("openai.AsyncOpenAI") as mock_openai_cls:
|
||||
mock_client = MagicMock()
|
||||
mock_client.chat.completions.create = AsyncMock(side_effect=fake_create)
|
||||
mock_openai_cls.return_value = mock_client
|
||||
|
||||
await router._call_vllm_mlx(
|
||||
provider=provider,
|
||||
messages=[{"role": "user", "content": "hi"}],
|
||||
model="qwen-mlx",
|
||||
temperature=0.7,
|
||||
max_tokens=None,
|
||||
)
|
||||
|
||||
call_kwargs = mock_openai_cls.call_args
|
||||
base_url_used = call_kwargs.kwargs.get("base_url") or call_kwargs[1].get(
|
||||
"base_url"
|
||||
)
|
||||
assert base_url_used.endswith("/v1")
|
||||
|
||||
async def test_vllm_mlx_is_local_not_cloud(self):
|
||||
"""Confirm vllm_mlx is not subject to metabolic protocol cloud skip."""
|
||||
router = CascadeRouter(config_path=Path("/nonexistent"))
|
||||
|
||||
provider = Provider(
|
||||
name="vllm-mlx-local",
|
||||
type="vllm_mlx",
|
||||
enabled=True,
|
||||
priority=2,
|
||||
base_url="http://localhost:8000/v1",
|
||||
models=[{"name": "qwen-mlx", "default": True}],
|
||||
)
|
||||
router.providers = [provider]
|
||||
|
||||
# Quota monitor returns False (block cloud) — vllm_mlx should still be tried
|
||||
with patch("infrastructure.router.cascade._quota_monitor") as mock_qm:
|
||||
mock_qm.check.return_value = object()
|
||||
mock_qm.should_use_cloud.return_value = False
|
||||
|
||||
with patch.object(router, "_call_vllm_mlx") as mock_call:
|
||||
mock_call.return_value = {
|
||||
"content": "Local MLX response",
|
||||
"model": "qwen-mlx",
|
||||
}
|
||||
result = await router.complete(
|
||||
messages=[{"role": "user", "content": "hi"}],
|
||||
)
|
||||
|
||||
assert result["content"] == "Local MLX response"
|
||||
|
||||
|
||||
class TestCascadeRouterReload:
|
||||
"""Test hot-reload of providers.yaml."""
|
||||
|
||||
Reference in New Issue
Block a user