Compare commits
9 Commits
burn/94-17
...
feat/152-d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dabb96d315 | ||
|
|
69cef8a90f | ||
|
|
636d294896 | ||
| 492c1cdcfd | |||
| 6e583310a8 | |||
| 300918ee1e | |||
| f7ea01cb65 | |||
| d2edbdadc2 | |||
| c009d8df77 |
@@ -30,3 +30,4 @@ See [issues](https://forge.alexanderwhitestone.com/Timmy_Foundation/turboquant/i
|
||||
|
||||
## Docs
|
||||
- [Project Status](docs/PROJECT_STATUS.md) — Full project status and build specification
|
||||
- [DFlash on Apple Silicon](docs/DFLASH_APPLE_SILICON.md) — MLX benchmark planner, setup commands, and report workflow
|
||||
|
||||
189
benchmarks/dflash_apple_silicon.py
Normal file
189
benchmarks/dflash_apple_silicon.py
Normal file
@@ -0,0 +1,189 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Apple Silicon DFlash planning helpers and CLI (issue #152)."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import platform
|
||||
import subprocess
|
||||
from dataclasses import asdict, dataclass
|
||||
from pathlib import Path
|
||||
from typing import Iterable, Optional
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class DFlashPair:
|
||||
slug: str
|
||||
base_model: str
|
||||
draft_model: str
|
||||
estimated_total_weights_gb: float
|
||||
minimum_recommended_memory_gb: float
|
||||
draft_sliding_window_size: int = 4096
|
||||
|
||||
|
||||
SUPPORTED_PAIRS: tuple[DFlashPair, ...] = (
|
||||
DFlashPair(
|
||||
slug="qwen35-4b",
|
||||
base_model="Qwen/Qwen3.5-4B",
|
||||
draft_model="z-lab/Qwen3.5-4B-DFlash",
|
||||
estimated_total_weights_gb=9.68,
|
||||
minimum_recommended_memory_gb=16.0,
|
||||
),
|
||||
DFlashPair(
|
||||
slug="qwen35-9b",
|
||||
base_model="Qwen/Qwen3.5-9B",
|
||||
draft_model="z-lab/Qwen3.5-9B-DFlash",
|
||||
estimated_total_weights_gb=19.93,
|
||||
minimum_recommended_memory_gb=28.0,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def detect_total_memory_gb() -> float:
|
||||
"""Detect total system memory in GiB, rounded to a whole number for planning."""
|
||||
system = platform.system()
|
||||
if system == "Darwin":
|
||||
mem_bytes = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"]).strip())
|
||||
return round(mem_bytes / (1024 ** 3), 1)
|
||||
if system == "Linux":
|
||||
with open("/proc/meminfo", "r", encoding="utf-8") as handle:
|
||||
for line in handle:
|
||||
if line.startswith("MemTotal:"):
|
||||
mem_kb = int(line.split()[1])
|
||||
return round(mem_kb / (1024 ** 2), 1)
|
||||
raise RuntimeError(f"Unsupported platform for memory detection: {system}")
|
||||
|
||||
|
||||
def get_pair(slug: str) -> DFlashPair:
|
||||
for pair in SUPPORTED_PAIRS:
|
||||
if pair.slug == slug:
|
||||
return pair
|
||||
raise ValueError(f"Unknown DFlash pair: {slug}")
|
||||
|
||||
|
||||
def select_pair(total_memory_gb: float, preferred_slug: Optional[str] = None) -> DFlashPair:
|
||||
"""Pick the strongest upstream-supported pair likely to fit the machine."""
|
||||
if preferred_slug:
|
||||
return get_pair(preferred_slug)
|
||||
|
||||
fitting = [pair for pair in SUPPORTED_PAIRS if total_memory_gb >= pair.minimum_recommended_memory_gb]
|
||||
if fitting:
|
||||
return max(fitting, key=lambda pair: pair.minimum_recommended_memory_gb)
|
||||
return SUPPORTED_PAIRS[0]
|
||||
|
||||
|
||||
def build_mlx_benchmark_command(
|
||||
pair: DFlashPair,
|
||||
*,
|
||||
dataset: str = "gsm8k",
|
||||
max_samples: int = 128,
|
||||
enable_thinking: bool = True,
|
||||
) -> str:
|
||||
"""Build the upstream MLX benchmark command from the DFlash README."""
|
||||
parts = [
|
||||
"python -m dflash.benchmark --backend mlx",
|
||||
f"--model {pair.base_model}",
|
||||
f"--draft-model {pair.draft_model}",
|
||||
f"--dataset {dataset}",
|
||||
f"--max-samples {max_samples}",
|
||||
]
|
||||
if enable_thinking:
|
||||
parts.append("--enable-thinking")
|
||||
parts.append(f"--draft-sliding-window-size {pair.draft_sliding_window_size}")
|
||||
return " \\\n ".join(parts)
|
||||
|
||||
|
||||
def build_setup_commands(pair: DFlashPair) -> list[str]:
|
||||
return [
|
||||
"python3 -m venv .venv-dflash",
|
||||
"source .venv-dflash/bin/activate",
|
||||
"git clone https://github.com/z-lab/dflash.git",
|
||||
"cd dflash",
|
||||
"pip install -e .[mlx]",
|
||||
build_mlx_benchmark_command(pair),
|
||||
]
|
||||
|
||||
|
||||
def render_report_template(machine_label: str, pair: DFlashPair) -> str:
|
||||
command = build_mlx_benchmark_command(pair)
|
||||
return f"""# DFlash Apple Silicon Benchmark Report
|
||||
|
||||
## Machine
|
||||
- Label: {machine_label}
|
||||
- Selected pair: {pair.slug}
|
||||
- Base model: {pair.base_model}
|
||||
- Draft model: {pair.draft_model}
|
||||
- Estimated total weight footprint: {pair.estimated_total_weights_gb:.2f} GB
|
||||
|
||||
## Setup
|
||||
```bash
|
||||
python3 -m venv .venv-dflash
|
||||
source .venv-dflash/bin/activate
|
||||
git clone https://github.com/z-lab/dflash.git
|
||||
cd dflash
|
||||
pip install -e .[mlx]
|
||||
{command}
|
||||
```
|
||||
|
||||
## Baseline comparison
|
||||
Compare against **plain MLX or llama.cpp speculative decoding** on the same prompt set.
|
||||
|
||||
## Results
|
||||
- Throughput (tok/s):
|
||||
- Peak memory (GB):
|
||||
- Notes on acceptance / behavior:
|
||||
|
||||
## Verdict
|
||||
Worth operationalizing locally?
|
||||
- [ ] Yes
|
||||
- [ ] No
|
||||
- [ ] Needs more data
|
||||
|
||||
## Recommendation
|
||||
Explain whether this should become part of the local inference stack.
|
||||
"""
|
||||
|
||||
|
||||
def build_plan(total_memory_gb: float, preferred_slug: Optional[str] = None) -> dict:
|
||||
pair = select_pair(total_memory_gb=total_memory_gb, preferred_slug=preferred_slug)
|
||||
return {
|
||||
"machine_memory_gb": total_memory_gb,
|
||||
"selected_pair": asdict(pair),
|
||||
"setup_commands": build_setup_commands(pair),
|
||||
"benchmark_command": build_mlx_benchmark_command(pair),
|
||||
"baseline_note": "Compare against plain MLX or llama.cpp speculative decoding on the same prompt set.",
|
||||
}
|
||||
|
||||
|
||||
def write_output(path: Path, content: str) -> None:
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
path.write_text(content, encoding="utf-8")
|
||||
|
||||
|
||||
def main(argv: Optional[Iterable[str]] = None) -> int:
|
||||
parser = argparse.ArgumentParser(description="Plan Apple Silicon DFlash benchmarks")
|
||||
parser.add_argument("--memory-gb", type=float, default=None, help="Override detected total memory")
|
||||
parser.add_argument("--pair", choices=[pair.slug for pair in SUPPORTED_PAIRS], default=None)
|
||||
parser.add_argument("--machine-label", default="Apple Silicon Mac")
|
||||
parser.add_argument("--format", choices=["json", "markdown"], default="markdown")
|
||||
parser.add_argument("--output", default=None, help="Write plan/report to file instead of stdout")
|
||||
args = parser.parse_args(list(argv) if argv is not None else None)
|
||||
|
||||
memory_gb = args.memory_gb if args.memory_gb is not None else detect_total_memory_gb()
|
||||
pair = select_pair(total_memory_gb=memory_gb, preferred_slug=args.pair)
|
||||
|
||||
if args.format == "json":
|
||||
content = json.dumps(build_plan(memory_gb, preferred_slug=pair.slug), indent=2)
|
||||
else:
|
||||
content = render_report_template(args.machine_label, pair)
|
||||
|
||||
if args.output:
|
||||
write_output(Path(args.output), content)
|
||||
else:
|
||||
print(content)
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
@@ -1,56 +0,0 @@
|
||||
# TurboQuant M1 Mac Benchmark — 2026-04-15
|
||||
|
||||
**Status:** Template — run `benchmarks/m1_mac_benchmark.py` on M1 Mac to populate.
|
||||
**Issue:** #94
|
||||
|
||||
## Hardware
|
||||
|
||||
| Spec | Value |
|
||||
|------|-------|
|
||||
| Chip | Apple M1 (or M1 Pro/Max/Ultra) |
|
||||
| Memory | 8/16/32/64 GB unified |
|
||||
| P-cores | 4/6/8 |
|
||||
| E-cores | 2 |
|
||||
| GPU cores | 7/8/14/16/24/32 |
|
||||
| macOS | 14.x |
|
||||
|
||||
## Results
|
||||
|
||||
| Preset | KV Type | Bits/ch | Compression | Avg tok/s | Peak Memory | GSM8K | Tool Call |
|
||||
|--------|---------|---------|-------------|-----------|-------------|-------|-----------|
|
||||
| turboquant_k8v4 | turbo4 | 3.5 | 4.2x | TBD | TBD | TBD | TBD |
|
||||
| turboquant_4bit_nc | q4_0 | 4.0 | 3.5x | TBD | TBD | TBD | TBD |
|
||||
| turboquant_3bit_nc | q3_k | 3.0 | 5.0x | TBD | TBD | TBD | TBD |
|
||||
|
||||
## How to Run
|
||||
|
||||
```bash
|
||||
# 1. Start llama-server with each preset
|
||||
# turboquant_k8v4
|
||||
llama-server -m ~/models/gemma-4-q4_k_m.gguf --port 8081 -ctk turbo4 -ctv turbo4 -c 4096
|
||||
|
||||
# 2. Run benchmark
|
||||
cd turboquant
|
||||
python3 benchmarks/m1_mac_benchmark.py \
|
||||
--url http://localhost:8081 \
|
||||
--model gemma-4 \
|
||||
--eval gsm8k \
|
||||
--output benchmarks/m1-mac-$(date +%Y-%m-%d).md
|
||||
|
||||
# 3. Repeat for other presets (change -ctk/-ctv)
|
||||
# turboquant_4bit_nc: -ctk q4_0 -ctv q4_0
|
||||
# turboquant_3bit_nc: -ctk q3_k -ctv q3_k
|
||||
|
||||
# 4. Or use vLLM
|
||||
vllm serve google/gemma-4-31b-it --kv-cache-dtype turboquant_k8v4
|
||||
python3 benchmarks/m1_mac_benchmark.py --backend vllm --eval gsm8k
|
||||
```
|
||||
|
||||
## Recommendation
|
||||
|
||||
**Default:** TBD after benchmarks complete.
|
||||
|
||||
Decision criteria:
|
||||
- If turboquant_k8v4 GSM8K ≥ turboquant_4bit_nc GSM8K: use k8v4 (better compression, same quality)
|
||||
- If 3bit GSM8K drops >10%: don't use as default
|
||||
- Memory headroom: must fit model + KV within 70% of unified memory
|
||||
@@ -1,652 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
m1_mac_benchmark.py — Benchmark TurboQuant presets on Apple Silicon.
|
||||
|
||||
Runs all three TurboQuant presets through standardized benchmarks,
|
||||
measuring tokens/sec, peak memory, and quality. Produces a markdown
|
||||
results table for issue #94.
|
||||
|
||||
Presets:
|
||||
- turboquant_k8v4: PolarQuant WHT + 8-bit codebook + 4-bit QJL residual
|
||||
- turboquant_4bit_nc: 4-bit KV cache, no correction
|
||||
- turboquant_3bit_nc: 3-bit KV cache, no correction
|
||||
|
||||
Usage:
|
||||
# Full benchmark (requires llama-server running per preset)
|
||||
python3 benchmarks/m1_mac_benchmark.py
|
||||
|
||||
# Single preset
|
||||
python3 benchmarks/m1_mac_benchmark.py --preset turboquant_k8v4
|
||||
|
||||
# Custom server URL
|
||||
python3 benchmarks/m1_mac_benchmark.py --url http://localhost:8081
|
||||
|
||||
# With quality eval (GSM8K subset)
|
||||
python3 benchmarks/m1_mac_benchmark.py --eval gsm8k
|
||||
|
||||
# JSON output
|
||||
python3 benchmarks/m1_mac_benchmark.py --json
|
||||
|
||||
# Dry-run (validate framework without inference)
|
||||
python3 benchmarks/m1_mac_benchmark.py --dry-run
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from dataclasses import dataclass, field, asdict
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
try:
|
||||
import requests
|
||||
except ImportError:
|
||||
requests = None
|
||||
|
||||
# ── TurboQuant Presets ────────────────────────────────────────────────────────
|
||||
|
||||
@dataclass
|
||||
class Preset:
|
||||
"""A TurboQuant KV cache preset."""
|
||||
name: str
|
||||
kv_type: str # -ctk/-ctv value for llama-server
|
||||
bits_per_channel: float
|
||||
compression_ratio: float
|
||||
description: str
|
||||
# vLLM equivalent (for vllm serve --kv-cache-dtype)
|
||||
vllm_dtype: str = ""
|
||||
|
||||
|
||||
PRESETS = {
|
||||
"turboquant_k8v4": Preset(
|
||||
name="turboquant_k8v4",
|
||||
kv_type="turbo4",
|
||||
bits_per_channel=3.5,
|
||||
compression_ratio=4.2,
|
||||
description="PolarQuant WHT + 8-bit codebook + 4-bit QJL residual. Best quality/compression ratio.",
|
||||
vllm_dtype="turboquant_k8v4",
|
||||
),
|
||||
"turboquant_4bit_nc": Preset(
|
||||
name="turboquant_4bit_nc",
|
||||
kv_type="q4_0",
|
||||
bits_per_channel=4.0,
|
||||
compression_ratio=3.5,
|
||||
description="4-bit KV cache, no correction. Standard baseline.",
|
||||
vllm_dtype="turboquant_4bit_nc",
|
||||
),
|
||||
"turboquant_3bit_nc": Preset(
|
||||
name="turboquant_3bit_nc",
|
||||
kv_type="q3_k",
|
||||
bits_per_channel=3.0,
|
||||
compression_ratio=5.0,
|
||||
description="3-bit KV cache, no correction. Aggressive compression, lower quality.",
|
||||
vllm_dtype="turboquant_3bit_nc",
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
# ── Hardware Detection ────────────────────────────────────────────────────────
|
||||
|
||||
@dataclass
|
||||
class AppleSiliconInfo:
|
||||
"""Detected Apple Silicon hardware."""
|
||||
chip_name: str = ""
|
||||
total_memory_gb: float = 0.0
|
||||
performance_cores: int = 0
|
||||
efficiency_cores: int = 0
|
||||
gpu_cores: int = 0
|
||||
os_version: str = ""
|
||||
|
||||
|
||||
def detect_apple_silicon() -> AppleSiliconInfo:
|
||||
"""Detect Apple Silicon hardware details."""
|
||||
info = AppleSiliconInfo()
|
||||
|
||||
if platform.system() != "Darwin":
|
||||
return info
|
||||
|
||||
try:
|
||||
# Chip name
|
||||
result = subprocess.run(
|
||||
["sysctl", "-n", "machdep.cpu.brand_string"],
|
||||
capture_output=True, text=True, timeout=5
|
||||
)
|
||||
if result.returncode == 0:
|
||||
info.chip_name = result.stdout.strip()
|
||||
|
||||
# Memory
|
||||
result = subprocess.run(
|
||||
["sysctl", "-n", "hw.memsize"],
|
||||
capture_output=True, text=True, timeout=5
|
||||
)
|
||||
if result.returncode == 0:
|
||||
info.total_memory_gb = int(result.stdout.strip()) / (1024**3)
|
||||
|
||||
# CPU cores (performance vs efficiency)
|
||||
result = subprocess.run(
|
||||
["sysctl", "-n", "hw.perflevel0.physicalcpu"],
|
||||
capture_output=True, text=True, timeout=5
|
||||
)
|
||||
if result.returncode == 0:
|
||||
info.performance_cores = int(result.stdout.strip())
|
||||
|
||||
result = subprocess.run(
|
||||
["sysctl", "-n", "hw.perflevel1.physicalcpu"],
|
||||
capture_output=True, text=True, timeout=5
|
||||
)
|
||||
if result.returncode == 0:
|
||||
info.efficiency_cores = int(result.stdout.strip())
|
||||
|
||||
# OS version
|
||||
result = subprocess.run(
|
||||
["sw_vers", "-productVersion"],
|
||||
capture_output=True, text=True, timeout=5
|
||||
)
|
||||
if result.returncode == 0:
|
||||
info.os_version = result.stdout.strip()
|
||||
|
||||
# Try to get GPU core count from system_profiler (slow, optional)
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["system_profiler", "SPDisplaysDataType"],
|
||||
capture_output=True, text=True, timeout=10
|
||||
)
|
||||
if result.returncode == 0:
|
||||
gpu_match = re.search(r"(\d+)\s*(?:core|Core)", result.stdout)
|
||||
if gpu_match:
|
||||
info.gpu_cores = int(gpu_match.group(1))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
except Exception as e:
|
||||
print(f"Warning: Apple Silicon detection failed: {e}", file=sys.stderr)
|
||||
|
||||
return info
|
||||
|
||||
|
||||
# ── Benchmark Prompts ─────────────────────────────────────────────────────────
|
||||
|
||||
BENCHMARK_PROMPTS = {
|
||||
"summarization": "Summarize the following text in 3 bullet points: 'The Timmy Foundation is a decentralized initiative focused on building sovereign AI. Its core principles are outlined in SOUL.md, which is inscribed on the Bitcoin blockchain. The project includes several repositories: the-nexus for 3D world-building, the-door for crisis intervention, and turboquant for local inference optimization.'",
|
||||
"code_generation": "Write a Python function that takes a list of integers and returns the two numbers that add up to a target sum. Include type hints and a docstring.",
|
||||
"reasoning": "If a TurboQuant KV cache uses 3.5 bits per channel and the uncompressed baseline uses 16 bits, what is the compression ratio? Show your calculation.",
|
||||
"creative": "Write a haiku about a blockchain inscription that can never be erased.",
|
||||
"tool_use": "Call the get_weather function with location='San Francisco' and unit='celsius'.",
|
||||
}
|
||||
|
||||
GSM8K_PROBLEMS = [
|
||||
{
|
||||
"question": "Janet's ducks lay 16 eggs per day. She eats three for breakfast every morning and bakes muffins for her friends every day with four. She sells the remainder at the farmers' market daily for $2 per egg. How much does she make every day?",
|
||||
"answer": "18",
|
||||
},
|
||||
{
|
||||
"question": "A robe takes 2 bolts of blue fiber and half that much white fiber. How many bolts in total does it take?",
|
||||
"answer": "3",
|
||||
},
|
||||
{
|
||||
"question": "Josh decides to try flipping a house. He buys a house for $80,000 and puts $50,000 in repairs. This increased the value of the house by 150%. How much profit did he make?",
|
||||
"answer": "70000",
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
# ── Inference Backends ────────────────────────────────────────────────────────
|
||||
|
||||
@dataclass
|
||||
class BenchmarkResult:
|
||||
"""Result of a single benchmark run."""
|
||||
preset: str
|
||||
prompt_id: str
|
||||
tokens_per_sec: float = 0.0
|
||||
time_to_first_token_ms: float = 0.0
|
||||
total_tokens: int = 0
|
||||
elapsed_seconds: float = 0.0
|
||||
peak_memory_mb: float = 0.0
|
||||
output_text: str = ""
|
||||
error: str = ""
|
||||
|
||||
|
||||
def run_llama_server(prompt: str, url: str, model: str = "",
|
||||
kv_type: str = "f16", max_tokens: int = 256,
|
||||
timeout: int = 120) -> dict:
|
||||
"""Run a prompt against llama-server (OpenAI-compatible API)."""
|
||||
if requests is None:
|
||||
return {"error": "requests not installed"}
|
||||
|
||||
api_url = f"{url.rstrip('/')}/v1/chat/completions"
|
||||
start = time.time()
|
||||
ttft = None
|
||||
tokens = 0
|
||||
|
||||
try:
|
||||
resp = requests.post(api_url, json={
|
||||
"model": model or "local",
|
||||
"messages": [{"role": "user", "content": prompt}],
|
||||
"max_tokens": max_tokens,
|
||||
"temperature": 0.7,
|
||||
"stream": True,
|
||||
}, stream=True, timeout=timeout)
|
||||
|
||||
output_parts = []
|
||||
for line in resp.iter_lines():
|
||||
if not line:
|
||||
continue
|
||||
line = line.decode("utf-8", errors="replace")
|
||||
if line.startswith("data: "):
|
||||
data_str = line[6:]
|
||||
if data_str.strip() == "[DONE]":
|
||||
break
|
||||
try:
|
||||
chunk = json.loads(data_str)
|
||||
delta = chunk.get("choices", [{}])[0].get("delta", {})
|
||||
content = delta.get("content", "")
|
||||
if content:
|
||||
if ttft is None:
|
||||
ttft = (time.time() - start) * 1000
|
||||
tokens += 1
|
||||
output_parts.append(content)
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
elapsed = time.time() - start
|
||||
tps = tokens / elapsed if elapsed > 0 else 0.0
|
||||
|
||||
return {
|
||||
"tokens_per_sec": round(tps, 2),
|
||||
"time_to_first_token_ms": round(ttft, 1) if ttft else 0,
|
||||
"total_tokens": tokens,
|
||||
"elapsed_seconds": round(elapsed, 3),
|
||||
"output_text": "".join(output_parts),
|
||||
}
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
|
||||
def run_ollama(prompt: str, url: str = "http://localhost:11434",
|
||||
model: str = "gemma4:latest", timeout: int = 120) -> dict:
|
||||
"""Run a prompt against Ollama /api/generate."""
|
||||
if requests is None:
|
||||
return {"error": "requests not installed"}
|
||||
|
||||
api_url = f"{url.rstrip('/')}/api/generate"
|
||||
start = time.time()
|
||||
ttft = None
|
||||
tokens = 0
|
||||
|
||||
try:
|
||||
resp = requests.post(api_url, json={
|
||||
"model": model,
|
||||
"prompt": prompt,
|
||||
"stream": True,
|
||||
"options": {"num_predict": 256},
|
||||
}, stream=True, timeout=timeout)
|
||||
|
||||
output_parts = []
|
||||
for line in resp.iter_lines():
|
||||
if not line:
|
||||
continue
|
||||
try:
|
||||
chunk = json.loads(line)
|
||||
text = chunk.get("response", "")
|
||||
if text:
|
||||
if ttft is None:
|
||||
ttft = (time.time() - start) * 1000
|
||||
tokens += 1
|
||||
output_parts.append(text)
|
||||
if chunk.get("done", False):
|
||||
break
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
elapsed = time.time() - start
|
||||
tps = tokens / elapsed if elapsed > 0 else 0.0
|
||||
|
||||
return {
|
||||
"tokens_per_sec": round(tps, 2),
|
||||
"time_to_first_token_ms": round(ttft, 1) if ttft else 0,
|
||||
"total_tokens": tokens,
|
||||
"elapsed_seconds": round(elapsed, 3),
|
||||
"output_text": "".join(output_parts),
|
||||
}
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
|
||||
def run_vllm(prompt: str, model: str = "google/gemma-4-31b-it",
|
||||
kv_dtype: str = "turboquant_k8v4", timeout: int = 120) -> dict:
|
||||
"""Run via vLLM serve (OpenAI-compatible on localhost:8000)."""
|
||||
return run_llama_server(prompt, url="http://localhost:8000",
|
||||
model=model, kv_type=kv_dtype, timeout=timeout)
|
||||
|
||||
|
||||
# ── Quality Evaluation ────────────────────────────────────────────────────────
|
||||
|
||||
@dataclass
|
||||
class QualityResult:
|
||||
"""Quality evaluation result."""
|
||||
gsm8k_correct: int = 0
|
||||
gsm8k_total: int = 0
|
||||
gsm8k_accuracy: float = 0.0
|
||||
tool_call_detected: bool = False
|
||||
details: list = field(default_factory=list)
|
||||
|
||||
|
||||
def evaluate_gsm8k(output: str, expected: str) -> bool:
|
||||
"""Check if GSM8K answer is in the output."""
|
||||
# Extract the numeric answer from output
|
||||
numbers = re.findall(r'\b(\d[\d,]*)\b', output)
|
||||
if not numbers:
|
||||
return False
|
||||
# Check last number (most likely to be the answer)
|
||||
for num in reversed(numbers):
|
||||
clean = num.replace(",", "")
|
||||
if clean == expected:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def evaluate_tool_call(output: str) -> bool:
|
||||
"""Check if output contains a function/tool call."""
|
||||
indicators = [
|
||||
"get_weather", "function_call", "tool_use",
|
||||
"tool_call", '"name":', '"arguments":',
|
||||
"```json", "calling", "invoke",
|
||||
]
|
||||
return any(ind.lower() in output.lower() for ind in indicators)
|
||||
|
||||
|
||||
# ── Main Benchmark Runner ─────────────────────────────────────────────────────
|
||||
|
||||
@dataclass
|
||||
class PresetResult:
|
||||
"""Aggregate results for one preset."""
|
||||
preset: str
|
||||
kv_type: str
|
||||
bits_per_channel: float
|
||||
compression_ratio: float
|
||||
description: str
|
||||
benchmarks: list = field(default_factory=list)
|
||||
quality: Optional[QualityResult] = None
|
||||
avg_tokens_per_sec: float = 0.0
|
||||
peak_memory_mb: float = 0.0
|
||||
gsm8k_score: str = ""
|
||||
tool_call_accuracy: str = ""
|
||||
|
||||
|
||||
def run_preset_benchmark(
|
||||
preset_name: str,
|
||||
url: str = "http://localhost:8081",
|
||||
model: str = "",
|
||||
backend: str = "llama-server",
|
||||
eval_mode: str = "",
|
||||
timeout: int = 120,
|
||||
dry_run: bool = False,
|
||||
) -> PresetResult:
|
||||
"""Run all benchmarks for a single preset."""
|
||||
preset = PRESETS[preset_name]
|
||||
|
||||
result = PresetResult(
|
||||
preset=preset.name,
|
||||
kv_type=preset.kv_type,
|
||||
bits_per_channel=preset.bits_per_channel,
|
||||
compression_ratio=preset.compression_ratio,
|
||||
description=preset.description,
|
||||
)
|
||||
|
||||
if dry_run:
|
||||
result.avg_tokens_per_sec = 42.5
|
||||
result.peak_memory_mb = 8192.0
|
||||
result.gsm8k_score = "3/3 (100%)"
|
||||
result.tool_call_accuracy = "Yes"
|
||||
return result
|
||||
|
||||
# Run each benchmark prompt
|
||||
tps_values = []
|
||||
for prompt_id, prompt in BENCHMARK_PROMPTS.items():
|
||||
print(f" Running: {prompt_id}...", end=" ", flush=True)
|
||||
|
||||
if backend == "ollama":
|
||||
bench_result = run_ollama(prompt, url=url,
|
||||
model=model or "gemma4:latest",
|
||||
timeout=timeout)
|
||||
else:
|
||||
bench_result = run_llama_server(prompt, url=url,
|
||||
model=model, kv_type=preset.kv_type,
|
||||
timeout=timeout)
|
||||
|
||||
br = BenchmarkResult(
|
||||
preset=preset_name,
|
||||
prompt_id=prompt_id,
|
||||
**{k: v for k, v in bench_result.items() if k in BenchmarkResult.__dataclass_fields__}
|
||||
)
|
||||
result.benchmarks.append(br)
|
||||
|
||||
if br.tokens_per_sec > 0:
|
||||
tps_values.append(br.tokens_per_sec)
|
||||
print(f"{br.tokens_per_sec:.1f} tok/s")
|
||||
else:
|
||||
print(f"ERROR: {br.error}")
|
||||
|
||||
# Average tokens/sec
|
||||
result.avg_tokens_per_sec = round(
|
||||
sum(tps_values) / len(tps_values), 2
|
||||
) if tps_values else 0.0
|
||||
|
||||
# Peak memory (from system, not per-request)
|
||||
try:
|
||||
if sys.platform == "darwin":
|
||||
mem_result = subprocess.run(
|
||||
["ps", "-o", "rss=", "-p", str(os.getpid())],
|
||||
capture_output=True, text=True
|
||||
)
|
||||
if mem_result.returncode == 0:
|
||||
result.peak_memory_mb = int(mem_result.stdout.strip()) / 1024
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Quality evaluation
|
||||
if eval_mode == "gsm8k":
|
||||
quality = QualityResult()
|
||||
for problem in GSM8K_PROBLEMS:
|
||||
if backend == "ollama":
|
||||
eval_result = run_ollama(problem["question"], url=url,
|
||||
model=model or "gemma4:latest",
|
||||
timeout=timeout)
|
||||
else:
|
||||
eval_result = run_llama_server(problem["question"], url=url,
|
||||
model=model, kv_type=preset.kv_type,
|
||||
timeout=timeout)
|
||||
|
||||
output = eval_result.get("output_text", "")
|
||||
correct = evaluate_gsm8k(output, problem["answer"])
|
||||
if correct:
|
||||
quality.gsm8k_correct += 1
|
||||
quality.gsm8k_total += 1
|
||||
quality.details.append({
|
||||
"question": problem["question"][:50] + "...",
|
||||
"expected": problem["answer"],
|
||||
"correct": correct,
|
||||
})
|
||||
|
||||
quality.gsm8k_accuracy = quality.gsm8k_correct / quality.gsm8k_total if quality.gsm8k_total else 0
|
||||
result.gsm8k_score = f"{quality.gsm8k_correct}/{quality.gsm8k_total} ({quality.gsm8k_accuracy:.0%})"
|
||||
|
||||
# Tool calling test
|
||||
tool_result = run_llama_server(BENCHMARK_PROMPTS["tool_use"],
|
||||
url=url, model=model,
|
||||
kv_type=preset.kv_type, timeout=timeout)
|
||||
tool_output = tool_result.get("output_text", "")
|
||||
quality.tool_call_detected = evaluate_tool_call(tool_output)
|
||||
result.tool_call_accuracy = "Yes" if quality.tool_call_detected else "No"
|
||||
result.quality = quality
|
||||
|
||||
return result
|
||||
|
||||
|
||||
# ── Report Generation ─────────────────────────────────────────────────────────
|
||||
|
||||
def generate_markdown_report(
|
||||
hw: AppleSiliconInfo,
|
||||
results: list[PresetResult],
|
||||
model: str,
|
||||
context_length: int,
|
||||
) -> str:
|
||||
"""Generate markdown benchmark report."""
|
||||
date = datetime.now(timezone.utc).strftime("%Y-%m-%d")
|
||||
ts = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
||||
|
||||
lines = [
|
||||
f"# TurboQuant M1 Mac Benchmark — {date}",
|
||||
"",
|
||||
f"**Date:** {ts}",
|
||||
f"**Model:** {model}",
|
||||
f"**Context length:** {context_length}",
|
||||
"",
|
||||
"## Hardware",
|
||||
"",
|
||||
f"| Spec | Value |",
|
||||
f"|------|-------|",
|
||||
f"| Chip | {hw.chip_name or 'Unknown'} |",
|
||||
f"| Memory | {hw.total_memory_gb:.0f} GB unified |",
|
||||
f"| P-cores | {hw.performance_cores} |",
|
||||
f"| E-cores | {hw.efficiency_cores} |",
|
||||
f"| GPU cores | {hw.gpu_cores or 'N/A'} |",
|
||||
f"| macOS | {hw.os_version or 'Unknown'} |",
|
||||
"",
|
||||
"## Results",
|
||||
"",
|
||||
"| Preset | KV Type | Bits/ch | Compression | Avg tok/s | Peak Memory | GSM8K | Tool Call |",
|
||||
"|--------|---------|---------|-------------|-----------|-------------|-------|-----------|",
|
||||
]
|
||||
|
||||
for r in results:
|
||||
lines.append(
|
||||
f"| {r.preset} | {r.kv_type} | {r.bits_per_channel} | "
|
||||
f"{r.compression_ratio}x | {r.avg_tokens_per_sec:.1f} | "
|
||||
f"{r.peak_memory_mb:.0f} MB | {r.gsm8k_score or 'N/A'} | "
|
||||
f"{r.tool_call_accuracy or 'N/A'} |"
|
||||
)
|
||||
|
||||
lines.extend([
|
||||
"",
|
||||
"## Per-Prompt Breakdown",
|
||||
"",
|
||||
])
|
||||
|
||||
for r in results:
|
||||
lines.append(f"### {r.preset}")
|
||||
lines.append(f"_{r.description}_")
|
||||
lines.append("")
|
||||
lines.append("| Prompt | tok/s | TTFT (ms) | Tokens | Elapsed (s) |")
|
||||
lines.append("|--------|-------|-----------|--------|-------------|")
|
||||
for b in r.benchmarks:
|
||||
lines.append(
|
||||
f"| {b.prompt_id} | {b.tokens_per_sec:.1f} | "
|
||||
f"{b.time_to_first_token_ms:.0f} | {b.total_tokens} | "
|
||||
f"{b.elapsed_seconds:.2f} |"
|
||||
)
|
||||
lines.append("")
|
||||
|
||||
# Recommendation
|
||||
if results:
|
||||
best_quality = max(results, key=lambda r: r.avg_tokens_per_sec if r.bits_per_channel >= 3.5 else 0)
|
||||
lines.extend([
|
||||
"## Recommendation",
|
||||
"",
|
||||
f"**Default for M1 Mac:** `{best_quality.preset}` ({best_quality.kv_type})",
|
||||
"",
|
||||
f"Rationale: {best_quality.description}",
|
||||
"",
|
||||
])
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
# ── CLI ───────────────────────────────────────────────────────────────────────
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Benchmark TurboQuant presets on Apple Silicon"
|
||||
)
|
||||
parser.add_argument("--preset", choices=list(PRESETS.keys()),
|
||||
help="Run single preset (default: all)")
|
||||
parser.add_argument("--url", default="http://localhost:8081",
|
||||
help="Server URL (default: http://localhost:8081)")
|
||||
parser.add_argument("--model", default="",
|
||||
help="Model name (auto-detected if empty)")
|
||||
parser.add_argument("--backend", choices=["llama-server", "ollama", "vllm"],
|
||||
default="llama-server")
|
||||
parser.add_argument("--eval", choices=["", "gsm8k"], default="",
|
||||
help="Quality evaluation mode")
|
||||
parser.add_argument("--context", type=int, default=4096,
|
||||
help="Context length tested (for report)")
|
||||
parser.add_argument("--timeout", type=int, default=120)
|
||||
parser.add_argument("--json", action="store_true", help="JSON output")
|
||||
parser.add_argument("--output", help="Save markdown report to file")
|
||||
parser.add_argument("--dry-run", action="store_true",
|
||||
help="Validate framework without inference")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Detect hardware
|
||||
hw = detect_apple_silicon()
|
||||
if hw.chip_name:
|
||||
print(f"Hardware: {hw.chip_name}, {hw.total_memory_gb:.0f}GB, "
|
||||
f"{hw.performance_cores}P+{hw.efficiency_cores}E cores")
|
||||
else:
|
||||
print("Hardware: Non-Apple Silicon (running in simulation mode)")
|
||||
|
||||
# Determine presets to run
|
||||
preset_names = [args.preset] if args.preset else list(PRESETS.keys())
|
||||
|
||||
results = []
|
||||
for name in preset_names:
|
||||
print(f"\n--- {name} ---")
|
||||
preset_result = run_preset_benchmark(
|
||||
name, url=args.url, model=args.model,
|
||||
backend=args.backend, eval_mode=args.eval,
|
||||
timeout=args.timeout, dry_run=args.dry_run,
|
||||
)
|
||||
results.append(preset_result)
|
||||
|
||||
# Output
|
||||
if args.json:
|
||||
output = {
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"hardware": {
|
||||
"chip": hw.chip_name,
|
||||
"memory_gb": hw.total_memory_gb,
|
||||
"p_cores": hw.performance_cores,
|
||||
"e_cores": hw.efficiency_cores,
|
||||
"gpu_cores": hw.gpu_cores,
|
||||
"macos": hw.os_version,
|
||||
},
|
||||
"model": args.model or "auto",
|
||||
"context_length": args.context,
|
||||
"results": [asdict(r) for r in results],
|
||||
}
|
||||
print(json.dumps(output, indent=2, default=str))
|
||||
else:
|
||||
report = generate_markdown_report(hw, results, args.model, args.context)
|
||||
print("\n" + report)
|
||||
|
||||
# Save report
|
||||
output_path = args.output
|
||||
if not output_path:
|
||||
date = datetime.now(timezone.utc).strftime("%Y-%m-%d")
|
||||
output_path = f"benchmarks/m1-mac-{date}.md"
|
||||
|
||||
report = generate_markdown_report(hw, results, args.model, args.context)
|
||||
# Save locally for reference (actual commit happens via API)
|
||||
print(f"\nReport saved to {output_path}")
|
||||
|
||||
return results
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
41
benchmarks/reports/dflash_m3max_36gb.md
Normal file
41
benchmarks/reports/dflash_m3max_36gb.md
Normal file
@@ -0,0 +1,41 @@
|
||||
# DFlash Apple Silicon Benchmark Report
|
||||
|
||||
## Machine
|
||||
- Label: M3 Max 36GB
|
||||
- Selected pair: qwen35-9b
|
||||
- Base model: Qwen/Qwen3.5-9B
|
||||
- Draft model: z-lab/Qwen3.5-9B-DFlash
|
||||
- Estimated total weight footprint: 19.93 GB
|
||||
|
||||
## Setup
|
||||
```bash
|
||||
python3 -m venv .venv-dflash
|
||||
source .venv-dflash/bin/activate
|
||||
git clone https://github.com/z-lab/dflash.git
|
||||
cd dflash
|
||||
pip install -e .[mlx]
|
||||
python -m dflash.benchmark --backend mlx \
|
||||
--model Qwen/Qwen3.5-9B \
|
||||
--draft-model z-lab/Qwen3.5-9B-DFlash \
|
||||
--dataset gsm8k \
|
||||
--max-samples 128 \
|
||||
--enable-thinking \
|
||||
--draft-sliding-window-size 4096
|
||||
```
|
||||
|
||||
## Baseline comparison
|
||||
Compare against **plain MLX or llama.cpp speculative decoding** on the same prompt set.
|
||||
|
||||
## Results
|
||||
- Throughput (tok/s):
|
||||
- Peak memory (GB):
|
||||
- Notes on acceptance / behavior:
|
||||
|
||||
## Verdict
|
||||
Worth operationalizing locally?
|
||||
- [ ] Yes
|
||||
- [ ] No
|
||||
- [ ] Needs more data
|
||||
|
||||
## Recommendation
|
||||
Explain whether this should become part of the local inference stack.
|
||||
46
benchmarks/reports/dflash_m3max_36gb_qwen35_4b_pilot.md
Normal file
46
benchmarks/reports/dflash_m3max_36gb_qwen35_4b_pilot.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# DFlash Apple Silicon Pilot — Qwen3.5-4B on M3 Max 36GB
|
||||
|
||||
Date: 2026-04-21
|
||||
Machine: Apple M3 Max, 36 GB unified memory
|
||||
Repo issue: #152
|
||||
|
||||
## Command
|
||||
|
||||
```bash
|
||||
source /tmp/dflash-venv/bin/activate
|
||||
cd /tmp/dflash-upstream
|
||||
python -m dflash.benchmark --backend mlx \
|
||||
--model Qwen/Qwen3.5-4B \
|
||||
--draft-model z-lab/Qwen3.5-4B-DFlash \
|
||||
--dataset gsm8k \
|
||||
--max-samples 1 \
|
||||
--enable-thinking \
|
||||
--draft-sliding-window-size 4096
|
||||
```
|
||||
|
||||
## Result
|
||||
|
||||
- Dataset: `gsm8k`
|
||||
- Samples: `1`
|
||||
- Baseline throughput: `22.35 tok/s`
|
||||
- DFlash throughput: `46.78 tok/s`
|
||||
- Decoding speedup: `2.09x`
|
||||
- Average acceptance length: `6.48`
|
||||
|
||||
Acceptance length histogram:
|
||||
|
||||
```text
|
||||
['0.3%', '11.1%', '12.7%', '10.4%', '11.7%', '7.6%', '7.0%', '3.8%', '5.1%', '6.3%', '2.8%', '3.8%', '2.2%', '1.9%', '0.9%', '2.5%', '9.8%']
|
||||
```
|
||||
|
||||
## Caveats
|
||||
|
||||
- This is a **pilot**, not a decision-grade benchmark.
|
||||
- Only `1` sample was run, so the throughput number is directional.
|
||||
- No apples-to-apples baseline against plain MLX or llama.cpp speculative decoding is included yet.
|
||||
- The planner still recommends trying `Qwen/Qwen3.5-9B + z-lab/Qwen3.5-9B-DFlash` on this machine for the more meaningful fit test.
|
||||
|
||||
## Interim takeaway
|
||||
|
||||
DFlash is **real on Apple Silicon** and already shows a meaningful local speedup on a small matched pair.
|
||||
A `2.09x` pilot speedup on `Qwen3.5-4B` is enough evidence to keep pushing toward a proper benchmark slice in this repo.
|
||||
59
benchmarks/reports/dflash_m3max_36gb_qwen35_9b_timeout.md
Normal file
59
benchmarks/reports/dflash_m3max_36gb_qwen35_9b_timeout.md
Normal file
@@ -0,0 +1,59 @@
|
||||
# DFlash on Apple Silicon Failure Report — Qwen3.5-9B on M3 Max 36GB
|
||||
|
||||
Date: 2026-04-21
|
||||
Machine: Apple M3 Max, 36 GB unified memory
|
||||
Repo issue: #152
|
||||
|
||||
## Command
|
||||
|
||||
```bash
|
||||
source /tmp/dflash-venv/bin/activate
|
||||
cd /tmp/dflash-upstream
|
||||
python -m dflash.benchmark --backend mlx \
|
||||
--model Qwen/Qwen3.5-9B \
|
||||
--draft-model z-lab/Qwen3.5-9B-DFlash \
|
||||
--dataset gsm8k \
|
||||
--max-samples 1 \
|
||||
--enable-thinking \
|
||||
--draft-sliding-window-size 4096
|
||||
```
|
||||
|
||||
## Outcome
|
||||
|
||||
The benchmark did **not** complete successfully on this machine.
|
||||
|
||||
### Failure signature
|
||||
|
||||
```text
|
||||
libc++abi: terminating due to uncaught exception of type std::runtime_error:
|
||||
[METAL] Command buffer execution failed:
|
||||
Caused GPU Timeout Error (00000002:kIOGPUCommandBufferCallbackErrorTimeout)
|
||||
```
|
||||
|
||||
Additional shutdown noise:
|
||||
|
||||
```text
|
||||
bash: [11285: 1] tcsetattr: Inappropriate ioctl for device
|
||||
resource_tracker: There appear to be 1 leaked semaphore objects to clean up at shutdown
|
||||
```
|
||||
|
||||
## Interpretation
|
||||
|
||||
This is strong evidence that the `Qwen/Qwen3.5-9B + z-lab/Qwen3.5-9B-DFlash` pair is **not currently stable** on an M3 Max 36GB Mac under the upstream MLX benchmark path, at least with the default settings used here.
|
||||
|
||||
It may still be salvageable with:
|
||||
- smaller block size / different benchmark settings
|
||||
- a shorter generation target
|
||||
- a different prompt sample
|
||||
- upstream MLX / Metal fixes
|
||||
- newer Apple Silicon hardware
|
||||
|
||||
But as of this run, it should be treated as **experimental / failing** on this exact machine.
|
||||
|
||||
## Recommendation
|
||||
|
||||
For this Mac, the working local proof path is still:
|
||||
- `Qwen/Qwen3.5-4B`
|
||||
- `z-lab/Qwen3.5-4B-DFlash`
|
||||
|
||||
Use the 4B pair for reproducible local validation while the 9B Metal timeout is investigated separately.
|
||||
125
docs/DFLASH_APPLE_SILICON.md
Normal file
125
docs/DFLASH_APPLE_SILICON.md
Normal file
@@ -0,0 +1,125 @@
|
||||
# DFlash on Apple Silicon
|
||||
|
||||
This repo now carries a **Gitea-first benchmark harness** for evaluating whether upstream **DFlash on MLX** is worth adding to the local Apple Silicon inference stack.
|
||||
|
||||
## Why
|
||||
|
||||
The headline `Kimi K2.6 + DFlash` benchmark was measured on `8x MI300X` with huge RAM and ROCm patches. That exact recipe is not a fit for a `36 GB` Apple Silicon Mac.
|
||||
|
||||
What *is* relevant locally is the upstream `z-lab/dflash` MLX path, which can benchmark smaller matched target/draft pairs that fit on Apple Silicon.
|
||||
|
||||
## Current repo entry point
|
||||
|
||||
Use:
|
||||
|
||||
```bash
|
||||
python3 benchmarks/dflash_apple_silicon.py --machine-label "M3 Max 36GB"
|
||||
```
|
||||
|
||||
This prints a benchmark report template with:
|
||||
- the selected model/draft pair
|
||||
- exact setup commands
|
||||
- the upstream MLX benchmark command
|
||||
- baseline comparison guidance
|
||||
|
||||
Write the template to a file:
|
||||
|
||||
```bash
|
||||
python3 benchmarks/dflash_apple_silicon.py \
|
||||
--machine-label "M3 Max 36GB" \
|
||||
--output benchmarks/reports/dflash_m3max_36gb.md
|
||||
```
|
||||
|
||||
Emit the underlying plan as JSON:
|
||||
|
||||
```bash
|
||||
python3 benchmarks/dflash_apple_silicon.py --format json
|
||||
```
|
||||
|
||||
## Selection logic
|
||||
|
||||
Today the planner uses two upstream-supported MLX pairs:
|
||||
|
||||
- `qwen35-9b`
|
||||
- base: `Qwen/Qwen3.5-9B`
|
||||
- draft: `z-lab/Qwen3.5-9B-DFlash`
|
||||
- chosen for ~28 GB+ machines
|
||||
- `qwen35-4b`
|
||||
- base: `Qwen/Qwen3.5-4B`
|
||||
- draft: `z-lab/Qwen3.5-4B-DFlash`
|
||||
- fallback for tighter-memory Macs
|
||||
|
||||
On a `36 GB` Mac, the default recommendation is `qwen35-9b`.
|
||||
|
||||
## Pilot result already landed
|
||||
|
||||
A first live Apple Silicon run has already been captured in:
|
||||
|
||||
- `benchmarks/reports/dflash_m3max_36gb_qwen35_4b_pilot.md`
|
||||
|
||||
Pilot command:
|
||||
|
||||
```bash
|
||||
python -m dflash.benchmark --backend mlx \
|
||||
--model Qwen/Qwen3.5-4B \
|
||||
--draft-model z-lab/Qwen3.5-4B-DFlash \
|
||||
--dataset gsm8k \
|
||||
--max-samples 1 \
|
||||
--enable-thinking \
|
||||
--draft-sliding-window-size 4096
|
||||
```
|
||||
|
||||
Pilot outcome on this Mac:
|
||||
|
||||
- baseline throughput: `22.35 tok/s`
|
||||
- DFlash throughput: `46.78 tok/s`
|
||||
- decoding speedup: `2.09x`
|
||||
|
||||
Treat that as a **directional proof**, not a final decision benchmark. The next step is the fuller comparison slice against plain MLX or llama.cpp speculative decoding.
|
||||
|
||||
## Known 9B failure on this machine
|
||||
|
||||
A follow-up live run with:
|
||||
|
||||
- `Qwen/Qwen3.5-9B`
|
||||
- `z-lab/Qwen3.5-9B-DFlash`
|
||||
|
||||
failed on this same M3 Max 36GB Mac with:
|
||||
|
||||
```text
|
||||
[METAL] Command buffer execution failed:
|
||||
Caused GPU Timeout Error (00000002:kIOGPUCommandBufferCallbackErrorTimeout)
|
||||
```
|
||||
|
||||
That failure is recorded in:
|
||||
|
||||
- `benchmarks/reports/dflash_m3max_36gb_qwen35_9b_timeout.md`
|
||||
|
||||
So the current guidance is:
|
||||
- treat `qwen35-9b` as **experimental** on this machine
|
||||
- treat `qwen35-4b` as the current **known-working local proof path**
|
||||
- keep the issue open until we either stabilize the 9B path or clearly rule it out for this hardware tier
|
||||
|
||||
## Upstream benchmark command
|
||||
|
||||
The harness uses the upstream MLX benchmark syntax from `z-lab/dflash`:
|
||||
|
||||
```bash
|
||||
python -m dflash.benchmark --backend mlx \
|
||||
--model Qwen/Qwen3.5-9B \
|
||||
--draft-model z-lab/Qwen3.5-9B-DFlash \
|
||||
--dataset gsm8k \
|
||||
--max-samples 128 \
|
||||
--enable-thinking \
|
||||
--draft-sliding-window-size 4096
|
||||
```
|
||||
|
||||
## What remains
|
||||
|
||||
This PR adds the **planner + report template** so the benchmark is reproducible from the repo.
|
||||
The issue remains open until a real Apple Silicon run lands with:
|
||||
|
||||
- measured throughput
|
||||
- measured memory
|
||||
- a baseline comparison against plain MLX or llama.cpp speculative decoding
|
||||
- a recommendation on whether to operationalize DFlash locally
|
||||
548
evolution/quant_selector.py
Normal file
548
evolution/quant_selector.py
Normal file
@@ -0,0 +1,548 @@
|
||||
"""Auto-select TurboQuant compression level based on available VRAM/RAM.
|
||||
|
||||
Detects hardware resources at startup and picks the highest quality
|
||||
quantization level that fits within available memory. Supports Apple
|
||||
Silicon unified memory, NVIDIA GPUs (via nvidia-smi), and CPU-only fallback.
|
||||
|
||||
Usage:
|
||||
from evolution.quant_selector import select_quant_level
|
||||
|
||||
selection = select_quant_level(model_size_gb=14.0, context_length=32768)
|
||||
print(selection.level) # "turbo4"
|
||||
print(selection.reasoning) # "M4 Max 36GB unified: turbo4 fits 14.0GB model + ..."
|
||||
print(selection.env_vars) # {"TURBO_LAYER_ADAPTIVE": "7"}
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import platform
|
||||
import subprocess
|
||||
import sys
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ── Quant Level Definitions ───────────────────────────────────────────────────
|
||||
|
||||
@dataclass
|
||||
class QuantLevel:
|
||||
"""A TurboQuant compression level with its memory characteristics."""
|
||||
name: str # e.g. "turbo4"
|
||||
bits_per_channel: float # e.g. 3.5 for turbo4
|
||||
compression_ratio: float # vs uncompressed KV cache
|
||||
quality_label: str # "best", "high", "balanced", "fast"
|
||||
layer_adaptive: int # TURBO_LAYER_ADAPTIVE value (0-7)
|
||||
kv_type: str # -ctk/-ctv flag value
|
||||
min_memory_headroom_gb: float # Minimum free memory to recommend this level
|
||||
description: str = ""
|
||||
|
||||
|
||||
# Ordered from highest quality to most aggressive compression
|
||||
QUANT_LEVELS = [
|
||||
QuantLevel(
|
||||
name="turbo4",
|
||||
bits_per_channel=3.5,
|
||||
compression_ratio=4.2,
|
||||
quality_label="best",
|
||||
layer_adaptive=7,
|
||||
kv_type="turbo4",
|
||||
min_memory_headroom_gb=4.0,
|
||||
description="PolarQuant + QJL 4-bit. Best quality, ~4.2x KV compression."
|
||||
),
|
||||
QuantLevel(
|
||||
name="turbo3",
|
||||
bits_per_channel=2.5,
|
||||
compression_ratio=6.0,
|
||||
quality_label="high",
|
||||
layer_adaptive=5,
|
||||
kv_type="turbo3",
|
||||
min_memory_headroom_gb=3.0,
|
||||
description="3-bit TurboQuant. High quality, ~6x KV compression."
|
||||
),
|
||||
QuantLevel(
|
||||
name="turbo2",
|
||||
bits_per_channel=1.5,
|
||||
compression_ratio=10.0,
|
||||
quality_label="balanced",
|
||||
layer_adaptive=3,
|
||||
kv_type="turbo2",
|
||||
min_memory_headroom_gb=2.0,
|
||||
description="2-bit TurboQuant. Balanced, ~10x KV compression."
|
||||
),
|
||||
QuantLevel(
|
||||
name="q4_0",
|
||||
bits_per_channel=4.0,
|
||||
compression_ratio=3.5,
|
||||
quality_label="fast",
|
||||
layer_adaptive=0,
|
||||
kv_type="q4_0",
|
||||
min_memory_headroom_gb=1.5,
|
||||
description="Standard 4-bit quant. Fast fallback, no TurboQuant."
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
# ── Hardware Detection ────────────────────────────────────────────────────────
|
||||
|
||||
@dataclass
|
||||
class HardwareInfo:
|
||||
"""Detected hardware resources."""
|
||||
total_memory_gb: float
|
||||
available_memory_gb: float
|
||||
gpu_memory_gb: Optional[float] = None
|
||||
gpu_name: Optional[str] = None
|
||||
is_apple_silicon: bool = False
|
||||
chip_name: Optional[str] = None
|
||||
cpu_cores: int = 0
|
||||
detection_method: str = ""
|
||||
|
||||
|
||||
def detect_hardware() -> HardwareInfo:
|
||||
"""Detect available memory and GPU resources."""
|
||||
system = platform.system()
|
||||
|
||||
if system == "Darwin":
|
||||
return _detect_apple_silicon()
|
||||
elif system == "Linux":
|
||||
return _detect_linux()
|
||||
else:
|
||||
return _detect_generic(system)
|
||||
|
||||
|
||||
def _detect_apple_silicon() -> HardwareInfo:
|
||||
"""Detect Apple Silicon unified memory."""
|
||||
info = HardwareInfo(
|
||||
total_memory_gb=0,
|
||||
available_memory_gb=0,
|
||||
is_apple_silicon=True,
|
||||
detection_method="sysctl",
|
||||
)
|
||||
|
||||
try:
|
||||
# Get total memory
|
||||
result = subprocess.run(
|
||||
["sysctl", "-n", "hw.memsize"],
|
||||
capture_output=True, text=True, timeout=5
|
||||
)
|
||||
if result.returncode == 0:
|
||||
info.total_memory_gb = int(result.stdout.strip()) / (1024**3)
|
||||
|
||||
# Get chip name
|
||||
result = subprocess.run(
|
||||
["sysctl", "-n", "machdep.cpu.brand_string"],
|
||||
capture_output=True, text=True, timeout=5
|
||||
)
|
||||
if result.returncode == 0:
|
||||
info.chip_name = result.stdout.strip()
|
||||
|
||||
# Try to get GPU name (Apple Silicon)
|
||||
result = subprocess.run(
|
||||
["system_profiler", "SPDisplaysDataType"],
|
||||
capture_output=True, text=True, timeout=10
|
||||
)
|
||||
if result.returncode == 0:
|
||||
for line in result.stdout.split("\n"):
|
||||
if "Chipset" in line or "GPU" in line:
|
||||
info.gpu_name = line.split(":")[-1].strip()
|
||||
break
|
||||
|
||||
# Estimate available memory (vm_stat)
|
||||
result = subprocess.run(
|
||||
["vm_stat"],
|
||||
capture_output=True, text=True, timeout=5
|
||||
)
|
||||
if result.returncode == 0:
|
||||
page_size = 4096 # macOS default
|
||||
free_pages = 0
|
||||
for line in result.stdout.split("\n"):
|
||||
if "Pages free:" in line:
|
||||
try:
|
||||
free_pages = int(line.split(":")[-1].strip().rstrip("."))
|
||||
except ValueError:
|
||||
pass
|
||||
# Available ≈ free + some speculative (conservative: just free)
|
||||
info.available_memory_gb = (free_pages * page_size) / (1024**3)
|
||||
|
||||
# Fallback if vm_stat parsing failed
|
||||
if info.available_memory_gb < 1:
|
||||
# Conservative: 70% of total
|
||||
info.available_memory_gb = info.total_memory_gb * 0.70
|
||||
|
||||
# Apple Silicon shares memory — GPU memory = total memory
|
||||
info.gpu_memory_gb = info.total_memory_gb
|
||||
|
||||
# Detect CPU cores
|
||||
result = subprocess.run(
|
||||
["sysctl", "-n", "hw.ncpu"],
|
||||
capture_output=True, text=True, timeout=5
|
||||
)
|
||||
if result.returncode == 0:
|
||||
info.cpu_cores = int(result.stdout.strip())
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Apple Silicon detection failed: {e}")
|
||||
# Fallback
|
||||
info.total_memory_gb = 16.0
|
||||
info.available_memory_gb = 12.0
|
||||
info.detection_method = "fallback"
|
||||
|
||||
return info
|
||||
|
||||
|
||||
def _detect_linux() -> HardwareInfo:
|
||||
"""Detect Linux system with optional NVIDIA GPU."""
|
||||
info = HardwareInfo(
|
||||
total_memory_gb=0,
|
||||
available_memory_gb=0,
|
||||
detection_method="proc",
|
||||
)
|
||||
|
||||
try:
|
||||
# Read /proc/meminfo
|
||||
with open("/proc/meminfo", "r") as f:
|
||||
meminfo = f.read()
|
||||
|
||||
for line in meminfo.split("\n"):
|
||||
if line.startswith("MemTotal:"):
|
||||
kb = int(line.split()[1])
|
||||
info.total_memory_gb = kb / (1024 * 1024)
|
||||
elif line.startswith("MemAvailable:"):
|
||||
kb = int(line.split()[1])
|
||||
info.available_memory_gb = kb / (1024 * 1024)
|
||||
|
||||
# CPU cores
|
||||
info.cpu_cores = os.cpu_count() or 1
|
||||
|
||||
# Check for NVIDIA GPU
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["nvidia-smi", "--query-gpu=name,memory.total,memory.free",
|
||||
"--format=csv,noheader,nounits"],
|
||||
capture_output=True, text=True, timeout=10
|
||||
)
|
||||
if result.returncode == 0 and result.stdout.strip():
|
||||
lines = result.stdout.strip().split("\n")
|
||||
if lines:
|
||||
parts = lines[0].split(", ")
|
||||
if len(parts) >= 3:
|
||||
info.gpu_name = parts[0].strip()
|
||||
info.gpu_memory_gb = float(parts[1]) / 1024 # MB to GB
|
||||
gpu_free = float(parts[2]) / 1024
|
||||
# Use GPU free for VRAM-based selection
|
||||
info.available_memory_gb = max(info.available_memory_gb, gpu_free)
|
||||
info.detection_method = "nvidia-smi"
|
||||
except (FileNotFoundError, subprocess.TimeoutExpired):
|
||||
pass # No NVIDIA GPU
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Linux detection failed: {e}")
|
||||
info.total_memory_gb = 16.0
|
||||
info.available_memory_gb = 12.0
|
||||
info.detection_method = "fallback"
|
||||
|
||||
return info
|
||||
|
||||
|
||||
def _detect_generic(system: str) -> HardwareInfo:
|
||||
"""Fallback detection for unknown systems."""
|
||||
import psutil
|
||||
mem = psutil.virtual_memory()
|
||||
return HardwareInfo(
|
||||
total_memory_gb=mem.total / (1024**3),
|
||||
available_memory_gb=mem.available / (1024**3),
|
||||
cpu_cores=os.cpu_count() or 1,
|
||||
detection_method="psutil",
|
||||
)
|
||||
|
||||
|
||||
# ── KV Cache Memory Estimation ───────────────────────────────────────────────
|
||||
|
||||
def estimate_kv_cache_gb(
|
||||
context_length: int,
|
||||
num_layers: int = 48,
|
||||
num_kv_heads: int = 8,
|
||||
head_dim: int = 128,
|
||||
bits_per_channel: float = 3.5,
|
||||
) -> float:
|
||||
"""Estimate KV cache memory for given parameters.
|
||||
|
||||
Formula: 2 (K+V) × layers × kv_heads × head_dim × context_length × bits/8
|
||||
"""
|
||||
bytes_per_element = bits_per_channel / 8.0
|
||||
total_bytes = 2 * num_layers * num_kv_heads * head_dim * context_length * bytes_per_element
|
||||
return total_bytes / (1024**3)
|
||||
|
||||
|
||||
def estimate_model_memory_gb(model_size_gb: float, quant_type: str = "q4_k_m") -> float:
|
||||
"""Estimate model weights memory. Returns loaded size in GB.
|
||||
|
||||
This is a rough estimate — actual depends on exact quant format.
|
||||
"""
|
||||
# Common quant ratios (vs fp16)
|
||||
quant_multipliers = {
|
||||
"f16": 1.0,
|
||||
"q8_0": 0.5,
|
||||
"q6_k": 0.42,
|
||||
"q5_k_m": 0.37,
|
||||
"q4_k_m": 0.32,
|
||||
"q3_k_m": 0.27,
|
||||
"q2_k": 0.22,
|
||||
}
|
||||
# model_size_gb is already quantized size
|
||||
return model_size_gb
|
||||
|
||||
|
||||
# ── Selection Logic ───────────────────────────────────────────────────────────
|
||||
|
||||
@dataclass
|
||||
class QuantSelection:
|
||||
"""Result of quantization level selection."""
|
||||
level: QuantLevel
|
||||
hardware: HardwareInfo
|
||||
reasoning: str
|
||||
total_required_gb: float
|
||||
available_gb: float
|
||||
headroom_gb: float
|
||||
env_vars: dict = field(default_factory=dict)
|
||||
server_flags: dict = field(default_factory=dict)
|
||||
warnings: list = field(default_factory=list)
|
||||
|
||||
|
||||
def select_quant_level(
|
||||
model_size_gb: float = 14.0,
|
||||
context_length: int = 32768,
|
||||
num_layers: int = 48,
|
||||
num_kv_heads: int = 8,
|
||||
head_dim: int = 128,
|
||||
preferred_level: Optional[str] = None,
|
||||
force_cpu: bool = False,
|
||||
) -> QuantSelection:
|
||||
"""Select the best quantization level for available hardware.
|
||||
|
||||
Args:
|
||||
model_size_gb: Size of the model weights in GB
|
||||
context_length: Target context length
|
||||
num_layers: Number of transformer layers
|
||||
num_kv_heads: Number of KV attention heads
|
||||
head_dim: Dimension per attention head
|
||||
preferred_level: Force a specific level (still checks if it fits)
|
||||
force_cpu: If True, ignore GPU memory
|
||||
|
||||
Returns:
|
||||
QuantSelection with the chosen level and reasoning
|
||||
"""
|
||||
hw = detect_hardware()
|
||||
|
||||
if force_cpu:
|
||||
hw.gpu_memory_gb = None
|
||||
hw.gpu_name = None
|
||||
|
||||
# Use the most restrictive memory constraint
|
||||
# For Apple Silicon: unified memory, use total
|
||||
# For NVIDIA: use GPU VRAM
|
||||
# For CPU-only: use system RAM
|
||||
if hw.gpu_memory_gb and hw.gpu_name:
|
||||
memory_pool_gb = hw.gpu_memory_gb
|
||||
memory_label = f"{hw.gpu_name} {hw.gpu_memory_gb:.0f}GB VRAM"
|
||||
elif hw.is_apple_silicon:
|
||||
memory_pool_gb = hw.total_memory_gb
|
||||
memory_label = f"{hw.chip_name or 'Apple Silicon'} {hw.total_memory_gb:.0f}GB unified"
|
||||
else:
|
||||
memory_pool_gb = hw.total_memory_gb
|
||||
memory_label = f"{hw.cpu_cores}c CPU {hw.total_memory_gb:.0f}GB RAM"
|
||||
|
||||
model_mem = estimate_model_memory_gb(model_size_gb)
|
||||
|
||||
# Try levels from best to most compressed
|
||||
chosen = None
|
||||
for level in QUANT_LEVELS:
|
||||
if preferred_level and level.name != preferred_level:
|
||||
continue
|
||||
|
||||
kv_mem = estimate_kv_cache_gb(
|
||||
context_length, num_layers, num_kv_heads, head_dim,
|
||||
level.bits_per_channel
|
||||
)
|
||||
total_required = model_mem + kv_mem
|
||||
headroom = memory_pool_gb - total_required
|
||||
|
||||
if headroom >= level.min_memory_headroom_gb:
|
||||
chosen = level
|
||||
break
|
||||
|
||||
if preferred_level and level.name == preferred_level:
|
||||
# User forced this level but it doesn't fit
|
||||
chosen = level
|
||||
break
|
||||
|
||||
if chosen is None:
|
||||
# Nothing fits — pick the most aggressive compression, not the q4_0 fallback.
|
||||
chosen = max(QUANT_LEVELS, key=lambda level: level.compression_ratio)
|
||||
logger.warning(f"No quant level fits in {memory_pool_gb:.1f}GB. Using {chosen.name}.")
|
||||
|
||||
# Calculate final numbers
|
||||
kv_mem = estimate_kv_cache_gb(
|
||||
context_length, num_layers, num_kv_heads, head_dim,
|
||||
chosen.bits_per_channel
|
||||
)
|
||||
total_required = model_mem + kv_mem
|
||||
headroom = memory_pool_gb - total_required
|
||||
|
||||
# Build reasoning
|
||||
reasoning_parts = [
|
||||
f"{memory_label}:",
|
||||
f"{chosen.name} ({chosen.quality_label}, {chosen.bits_per_channel:.1f}b/ch,",
|
||||
f"{chosen.compression_ratio:.1f}x compression)",
|
||||
f"fits {model_mem:.1f}GB model + {kv_mem:.1f}GB KV cache",
|
||||
f"@ {context_length}K context = {total_required:.1f}GB / {memory_pool_gb:.0f}GB",
|
||||
f"({headroom:.1f}GB headroom)"
|
||||
]
|
||||
reasoning = " ".join(reasoning_parts)
|
||||
|
||||
# Build environment variables for llama.cpp
|
||||
env_vars = {
|
||||
"TURBO_LAYER_ADAPTIVE": str(chosen.layer_adaptive),
|
||||
}
|
||||
|
||||
# Build server flags
|
||||
server_flags = {
|
||||
"-ctk": chosen.kv_type,
|
||||
"-ctv": chosen.kv_type,
|
||||
"-c": str(context_length),
|
||||
}
|
||||
|
||||
# Warnings
|
||||
warnings = []
|
||||
if headroom < 2.0:
|
||||
warnings.append(
|
||||
f"Low headroom ({headroom:.1f}GB). Consider reducing context length or model size."
|
||||
)
|
||||
if headroom < 0:
|
||||
warnings.append(
|
||||
f"OVERCOMMITTED: needs {total_required:.1f}GB but only {memory_pool_gb:.0f}GB available. "
|
||||
f"Inference may fail or swap heavily."
|
||||
)
|
||||
|
||||
selection = QuantSelection(
|
||||
level=chosen,
|
||||
hardware=hw,
|
||||
reasoning=reasoning,
|
||||
total_required_gb=total_required,
|
||||
available_gb=memory_pool_gb,
|
||||
headroom_gb=headroom,
|
||||
env_vars=env_vars,
|
||||
server_flags=server_flags,
|
||||
warnings=warnings,
|
||||
)
|
||||
|
||||
logger.info(f"Quant selection: {reasoning}")
|
||||
for w in warnings:
|
||||
logger.warning(w)
|
||||
|
||||
return selection
|
||||
|
||||
|
||||
# ── CLI ───────────────────────────────────────────────────────────────────────
|
||||
|
||||
def main():
|
||||
"""CLI entry point for quant level selection."""
|
||||
import argparse
|
||||
import json
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Auto-select TurboQuant compression level based on available hardware"
|
||||
)
|
||||
parser.add_argument("--model-size", type=float, default=14.0,
|
||||
help="Model size in GB (default: 14.0)")
|
||||
parser.add_argument("--context", type=int, default=32768,
|
||||
help="Target context length (default: 32768)")
|
||||
parser.add_argument("--layers", type=int, default=48,
|
||||
help="Number of transformer layers (default: 48)")
|
||||
parser.add_argument("--kv-heads", type=int, default=8,
|
||||
help="Number of KV attention heads (default: 8)")
|
||||
parser.add_argument("--head-dim", type=int, default=128,
|
||||
help="Dimension per attention head (default: 128)")
|
||||
parser.add_argument("--prefer", type=str, default=None,
|
||||
choices=[l.name for l in QUANT_LEVELS],
|
||||
help="Prefer a specific quant level")
|
||||
parser.add_argument("--force-cpu", action="store_true",
|
||||
help="Ignore GPU, use CPU memory only")
|
||||
parser.add_argument("--json", action="store_true",
|
||||
help="JSON output for automation")
|
||||
parser.add_argument("--detect-only", action="store_true",
|
||||
help="Only detect hardware, don't select")
|
||||
args = parser.parse_args()
|
||||
|
||||
logging.basicConfig(level=logging.INFO, format="%(message)s")
|
||||
|
||||
if args.detect_only:
|
||||
hw = detect_hardware()
|
||||
if args.json:
|
||||
print(json.dumps(hw.__dict__, default=str, indent=2))
|
||||
else:
|
||||
print(f"Total memory: {hw.total_memory_gb:.1f} GB")
|
||||
print(f"Available: {hw.available_memory_gb:.1f} GB")
|
||||
if hw.gpu_memory_gb:
|
||||
print(f"GPU memory: {hw.gpu_memory_gb:.1f} GB")
|
||||
if hw.gpu_name:
|
||||
print(f"GPU: {hw.gpu_name}")
|
||||
if hw.is_apple_silicon:
|
||||
print(f"Chip: {hw.chip_name or 'Apple Silicon'}")
|
||||
print(f"CPU cores: {hw.cpu_cores}")
|
||||
print(f"Detection: {hw.detection_method}")
|
||||
return
|
||||
|
||||
selection = select_quant_level(
|
||||
model_size_gb=args.model_size,
|
||||
context_length=args.context,
|
||||
num_layers=args.layers,
|
||||
num_kv_heads=args.kv_heads,
|
||||
head_dim=args.head_dim,
|
||||
preferred_level=args.prefer,
|
||||
force_cpu=args.force_cpu,
|
||||
)
|
||||
|
||||
if args.json:
|
||||
result = {
|
||||
"level": selection.level.name,
|
||||
"bits_per_channel": selection.level.bits_per_channel,
|
||||
"compression_ratio": selection.level.compression_ratio,
|
||||
"quality": selection.level.quality_label,
|
||||
"reasoning": selection.reasoning,
|
||||
"total_required_gb": round(selection.total_required_gb, 2),
|
||||
"available_gb": round(selection.available_gb, 1),
|
||||
"headroom_gb": round(selection.headroom_gb, 2),
|
||||
"env_vars": selection.env_vars,
|
||||
"server_flags": selection.server_flags,
|
||||
"warnings": selection.warnings,
|
||||
"hardware": {
|
||||
"total_memory_gb": round(selection.hardware.total_memory_gb, 1),
|
||||
"gpu_name": selection.hardware.gpu_name,
|
||||
"is_apple_silicon": selection.hardware.is_apple_silicon,
|
||||
"chip_name": selection.hardware.chip_name,
|
||||
"cpu_cores": selection.hardware.cpu_cores,
|
||||
},
|
||||
}
|
||||
print(json.dumps(result, indent=2))
|
||||
else:
|
||||
print(f"Selected: {selection.level.name} ({selection.level.quality_label})")
|
||||
print(f" {selection.reasoning}")
|
||||
print()
|
||||
print(f"Environment variables:")
|
||||
for k, v in selection.env_vars.items():
|
||||
print(f" export {k}={v}")
|
||||
print()
|
||||
print(f"Server flags:")
|
||||
for k, v in selection.server_flags.items():
|
||||
print(f" {k} {v}")
|
||||
if selection.warnings:
|
||||
print()
|
||||
for w in selection.warnings:
|
||||
print(f" WARNING: {w}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
3
tests/conftest.py
Normal file
3
tests/conftest.py
Normal file
@@ -0,0 +1,3 @@
|
||||
"""Pytest configuration for turboquant."""
|
||||
import sys, os
|
||||
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
||||
58
tests/test_dflash_apple_silicon.py
Normal file
58
tests/test_dflash_apple_silicon.py
Normal file
@@ -0,0 +1,58 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for Apple Silicon DFlash benchmark planning helpers (issue #152)."""
|
||||
|
||||
import os
|
||||
import sys
|
||||
from unittest.mock import patch
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
|
||||
|
||||
from benchmarks.dflash_apple_silicon import ( # noqa: E402
|
||||
build_mlx_benchmark_command,
|
||||
detect_total_memory_gb,
|
||||
render_report_template,
|
||||
select_pair,
|
||||
)
|
||||
|
||||
|
||||
class TestPairSelection:
|
||||
def test_prefers_qwen35_9b_on_36gb_mac(self):
|
||||
pair = select_pair(total_memory_gb=36)
|
||||
assert pair.slug == "qwen35-9b"
|
||||
assert pair.base_model == "Qwen/Qwen3.5-9B"
|
||||
assert pair.draft_model == "z-lab/Qwen3.5-9B-DFlash"
|
||||
|
||||
def test_falls_back_to_4b_when_memory_is_tight(self):
|
||||
pair = select_pair(total_memory_gb=20)
|
||||
assert pair.slug == "qwen35-4b"
|
||||
assert pair.base_model == "Qwen/Qwen3.5-4B"
|
||||
|
||||
|
||||
class TestCommandGeneration:
|
||||
def test_builds_upstream_mlx_benchmark_command(self):
|
||||
pair = select_pair(total_memory_gb=36)
|
||||
command = build_mlx_benchmark_command(pair, dataset="gsm8k", max_samples=64)
|
||||
assert "python -m dflash.benchmark --backend mlx" in command
|
||||
assert "--model Qwen/Qwen3.5-9B" in command
|
||||
assert "--draft-model z-lab/Qwen3.5-9B-DFlash" in command
|
||||
assert "--dataset gsm8k" in command
|
||||
assert "--max-samples 64" in command
|
||||
assert "--draft-sliding-window-size 4096" in command
|
||||
|
||||
|
||||
class TestReportTemplate:
|
||||
def test_report_template_mentions_baseline_and_verdict(self):
|
||||
pair = select_pair(total_memory_gb=36)
|
||||
report = render_report_template(machine_label="M3 Max 36GB", pair=pair)
|
||||
assert "DFlash Apple Silicon Benchmark Report" in report
|
||||
assert "M3 Max 36GB" in report
|
||||
assert "Qwen/Qwen3.5-9B" in report
|
||||
assert "plain MLX or llama.cpp speculative decoding" in report
|
||||
assert "Worth operationalizing locally?" in report
|
||||
|
||||
|
||||
class TestMemoryDetection:
|
||||
@patch("benchmarks.dflash_apple_silicon.platform.system", return_value="Darwin")
|
||||
@patch("benchmarks.dflash_apple_silicon.subprocess.check_output", return_value=b"38654705664\n")
|
||||
def test_detect_total_memory_gb_on_macos(self, _mock_sysctl, _mock_system):
|
||||
assert detect_total_memory_gb() == 36.0
|
||||
@@ -1,152 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for m1_mac_benchmark.py"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import pytest
|
||||
from unittest.mock import patch, MagicMock
|
||||
from datetime import datetime, timezone
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
|
||||
from benchmarks.m1_mac_benchmark import (
|
||||
Preset,
|
||||
AppleSiliconInfo,
|
||||
BenchmarkResult,
|
||||
PresetResult,
|
||||
QualityResult,
|
||||
PRESETS,
|
||||
detect_apple_silicon,
|
||||
evaluate_gsm8k,
|
||||
evaluate_tool_call,
|
||||
generate_markdown_report,
|
||||
run_preset_benchmark,
|
||||
)
|
||||
|
||||
|
||||
class TestPresets:
|
||||
def test_all_presets_defined(self):
|
||||
assert "turboquant_k8v4" in PRESETS
|
||||
assert "turboquant_4bit_nc" in PRESETS
|
||||
assert "turboquant_3bit_nc" in PRESETS
|
||||
|
||||
def test_preset_fields(self):
|
||||
for name, preset in PRESETS.items():
|
||||
assert preset.name == name
|
||||
assert preset.bits_per_channel > 0
|
||||
assert preset.compression_ratio > 1
|
||||
assert preset.kv_type
|
||||
assert preset.description
|
||||
|
||||
def test_presets_ordered_by_bits(self):
|
||||
"""k8v4 should be ~3.5b, 4bit should be 4.0, 3bit should be 3.0."""
|
||||
assert PRESETS["turboquant_4bit_nc"].bits_per_channel > PRESETS["turboquant_k8v4"].bits_per_channel
|
||||
assert PRESETS["turboquant_k8v4"].bits_per_channel > PRESETS["turboquant_3bit_nc"].bits_per_channel
|
||||
|
||||
|
||||
class TestGSM8KEval:
|
||||
def test_correct_answer(self):
|
||||
output = "Janet makes 9 + 9 = 18 dollars per day."
|
||||
assert evaluate_gsm8k(output, "18") is True
|
||||
|
||||
def test_correct_with_commas(self):
|
||||
output = "The profit is $70,000."
|
||||
assert evaluate_gsm8k(output, "70000") is True
|
||||
|
||||
def test_wrong_answer(self):
|
||||
output = "The answer is 42 dollars."
|
||||
assert evaluate_gsm8k(output, "18") is False
|
||||
|
||||
def test_no_number(self):
|
||||
output = "I'm not sure about this problem."
|
||||
assert evaluate_gsm8k(output, "18") is False
|
||||
|
||||
def test_correct_answer_not_last(self):
|
||||
"""If the answer appears in the reasoning, not just at the end."""
|
||||
output = "There are 16 eggs. She eats 3, uses 4. That leaves 9. She sells for $2 each = 18 dollars."
|
||||
assert evaluate_gsm8k(output, "18") is True
|
||||
|
||||
|
||||
class TestToolCallEval:
|
||||
def test_function_name(self):
|
||||
output = "I'll call get_weather with the parameters."
|
||||
assert evaluate_tool_call(output) is True
|
||||
|
||||
def test_json_format(self):
|
||||
output = '```json\n{"name": "get_weather", "arguments": {}}\n```'
|
||||
assert evaluate_tool_call(output) is True
|
||||
|
||||
def test_no_tool(self):
|
||||
output = "The weather in San Francisco is sunny."
|
||||
assert evaluate_tool_call(output) is False
|
||||
|
||||
|
||||
class TestMarkdownReport:
|
||||
def test_generates_report(self):
|
||||
hw = AppleSiliconInfo(
|
||||
chip_name="Apple M1 Max",
|
||||
total_memory_gb=32,
|
||||
performance_cores=8,
|
||||
efficiency_cores=2,
|
||||
gpu_cores=24,
|
||||
os_version="14.2",
|
||||
)
|
||||
results = [
|
||||
PresetResult(
|
||||
preset="turboquant_k8v4",
|
||||
kv_type="turbo4",
|
||||
bits_per_channel=3.5,
|
||||
compression_ratio=4.2,
|
||||
description="Best quality",
|
||||
avg_tokens_per_sec=45.2,
|
||||
peak_memory_mb=8192,
|
||||
gsm8k_score="2/3 (67%)",
|
||||
tool_call_accuracy="Yes",
|
||||
benchmarks=[BenchmarkResult(
|
||||
preset="turboquant_k8v4",
|
||||
prompt_id="summarization",
|
||||
tokens_per_sec=45.2,
|
||||
time_to_first_token_ms=150,
|
||||
total_tokens=128,
|
||||
elapsed_seconds=2.83,
|
||||
)],
|
||||
),
|
||||
]
|
||||
report = generate_markdown_report(hw, results, "gemma-4", 4096)
|
||||
|
||||
assert "TurboQuant M1 Mac Benchmark" in report
|
||||
assert "Apple M1 Max" in report
|
||||
assert "turboquant_k8v4" in report
|
||||
assert "45.2" in report
|
||||
assert "Recommendation" in report
|
||||
|
||||
def test_empty_results(self):
|
||||
hw = AppleSiliconInfo()
|
||||
report = generate_markdown_report(hw, [], "test", 4096)
|
||||
assert "TurboQuant M1 Mac Benchmark" in report
|
||||
|
||||
|
||||
class TestDryRun:
|
||||
def test_dry_run_returns_results(self):
|
||||
result = run_preset_benchmark("turboquant_k8v4", dry_run=True)
|
||||
assert result.preset == "turboquant_k8v4"
|
||||
assert result.avg_tokens_per_sec > 0
|
||||
assert result.peak_memory_mb > 0
|
||||
|
||||
def test_dry_run_all_presets(self):
|
||||
for name in PRESETS:
|
||||
result = run_preset_benchmark(name, dry_run=True)
|
||||
assert result.preset == name
|
||||
assert result.avg_tokens_per_sec > 0
|
||||
|
||||
|
||||
class TestHardwareDetection:
|
||||
@patch("benchmarks.m1_mac_benchmark.platform.system", return_value="Linux")
|
||||
def test_non_apple(self, mock_system):
|
||||
hw = detect_apple_silicon()
|
||||
assert hw.chip_name == ""
|
||||
|
||||
def test_returns_info_structure(self):
|
||||
hw = detect_apple_silicon()
|
||||
assert isinstance(hw, AppleSiliconInfo)
|
||||
assert isinstance(hw.total_memory_gb, float)
|
||||
177
tests/test_quant_selector.py
Normal file
177
tests/test_quant_selector.py
Normal file
@@ -0,0 +1,177 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for quant_selector.py"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import pytest
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
|
||||
from evolution.quant_selector import (
|
||||
QuantLevel,
|
||||
HardwareInfo,
|
||||
QUANT_LEVELS,
|
||||
detect_hardware,
|
||||
estimate_kv_cache_gb,
|
||||
estimate_model_memory_gb,
|
||||
select_quant_level,
|
||||
)
|
||||
|
||||
|
||||
class TestQuantLevels:
|
||||
def test_levels_keep_turboquant_quality_order_with_q4_fallback_last(self):
|
||||
"""TurboQuant levels should lead, with q4_0 reserved as the non-Turbo fallback."""
|
||||
names = [level.name for level in QUANT_LEVELS]
|
||||
assert names[:3] == ["turbo4", "turbo3", "turbo2"]
|
||||
assert names[-1] == "q4_0"
|
||||
|
||||
def test_all_levels_have_required_fields(self):
|
||||
for level in QUANT_LEVELS:
|
||||
assert level.name
|
||||
assert level.bits_per_channel > 0
|
||||
assert level.compression_ratio > 1
|
||||
assert level.quality_label
|
||||
assert level.layer_adaptive >= 0
|
||||
assert level.kv_type
|
||||
|
||||
|
||||
class TestKVEstimate:
|
||||
def test_basic_estimate(self):
|
||||
# 48 layers, 8 heads, 128 dim, 32K context, 3.5 bits
|
||||
kv_gb = estimate_kv_cache_gb(32768, 48, 8, 128, 3.5)
|
||||
assert kv_gb > 0
|
||||
assert kv_gb < 10 # Should be reasonable
|
||||
|
||||
def test_longer_context_larger(self):
|
||||
kv_32k = estimate_kv_cache_gb(32768, 48, 8, 128, 3.5)
|
||||
kv_128k = estimate_kv_cache_gb(131072, 48, 8, 128, 3.5)
|
||||
assert kv_128k > kv_32k
|
||||
|
||||
def test_higher_bits_larger(self):
|
||||
kv_4b = estimate_kv_cache_gb(32768, 48, 8, 128, 4.0)
|
||||
kv_2b = estimate_kv_cache_gb(32768, 48, 8, 128, 2.0)
|
||||
assert kv_4b > kv_2b
|
||||
|
||||
|
||||
class TestHardwareDetection:
|
||||
def test_detect_returns_info(self):
|
||||
hw = detect_hardware()
|
||||
assert hw.total_memory_gb > 0
|
||||
assert hw.available_memory_gb > 0
|
||||
assert hw.detection_method
|
||||
|
||||
@patch("evolution.quant_selector.platform.system", return_value="Linux")
|
||||
@patch("builtins.open", create=True)
|
||||
def test_linux_detection(self, mock_open, mock_system):
|
||||
mock_open.return_value.__enter__().read.return_value = (
|
||||
"MemTotal: 32000000 kB\n"
|
||||
"MemAvailable: 24000000 kB\n"
|
||||
)
|
||||
hw = _detect_linux_fallback()
|
||||
assert hw.total_memory_gb > 20
|
||||
|
||||
|
||||
def _detect_linux_fallback():
|
||||
"""Helper to test Linux detection with mocked /proc/meminfo."""
|
||||
from evolution.quant_selector import _detect_linux
|
||||
return _detect_linux()
|
||||
|
||||
|
||||
class TestSelection:
|
||||
def test_selects_turbo4_for_large_memory(self):
|
||||
"""With plenty of memory, should pick turbo4 (best quality)."""
|
||||
with patch("evolution.quant_selector.detect_hardware") as mock_hw:
|
||||
mock_hw.return_value = HardwareInfo(
|
||||
total_memory_gb=64,
|
||||
available_memory_gb=48,
|
||||
gpu_memory_gb=64,
|
||||
gpu_name="Test GPU",
|
||||
cpu_cores=16,
|
||||
detection_method="mock",
|
||||
)
|
||||
sel = select_quant_level(model_size_gb=14.0, context_length=32768)
|
||||
assert sel.level.name == "turbo4"
|
||||
assert sel.headroom_gb > 0
|
||||
|
||||
def test_selects_smaller_for_tight_memory(self):
|
||||
"""With tight memory, should pick a smaller quant."""
|
||||
with patch("evolution.quant_selector.detect_hardware") as mock_hw:
|
||||
mock_hw.return_value = HardwareInfo(
|
||||
total_memory_gb=16,
|
||||
available_memory_gb=12,
|
||||
gpu_memory_gb=16,
|
||||
gpu_name="Test GPU",
|
||||
cpu_cores=8,
|
||||
detection_method="mock",
|
||||
)
|
||||
sel = select_quant_level(model_size_gb=14.0, context_length=131072)
|
||||
# Should pick a smaller quant for 128K context on 16GB
|
||||
assert sel.level.bits_per_channel <= 4.0
|
||||
|
||||
def test_preferred_level(self):
|
||||
"""User can force a specific level."""
|
||||
with patch("evolution.quant_selector.detect_hardware") as mock_hw:
|
||||
mock_hw.return_value = HardwareInfo(
|
||||
total_memory_gb=64,
|
||||
available_memory_gb=48,
|
||||
cpu_cores=16,
|
||||
detection_method="mock",
|
||||
)
|
||||
sel = select_quant_level(
|
||||
model_size_gb=14.0, context_length=32768,
|
||||
preferred_level="turbo2"
|
||||
)
|
||||
assert sel.level.name == "turbo2"
|
||||
|
||||
def test_env_vars_populated(self):
|
||||
with patch("evolution.quant_selector.detect_hardware") as mock_hw:
|
||||
mock_hw.return_value = HardwareInfo(
|
||||
total_memory_gb=64,
|
||||
available_memory_gb=48,
|
||||
cpu_cores=16,
|
||||
detection_method="mock",
|
||||
)
|
||||
sel = select_quant_level(model_size_gb=14.0, context_length=32768)
|
||||
assert "TURBO_LAYER_ADAPTIVE" in sel.env_vars
|
||||
assert "-ctk" in sel.server_flags
|
||||
assert "-ctv" in sel.server_flags
|
||||
|
||||
def test_warnings_on_low_headroom(self):
|
||||
with patch("evolution.quant_selector.detect_hardware") as mock_hw:
|
||||
mock_hw.return_value = HardwareInfo(
|
||||
total_memory_gb=18,
|
||||
available_memory_gb=14,
|
||||
gpu_memory_gb=18,
|
||||
gpu_name="Test GPU",
|
||||
cpu_cores=8,
|
||||
detection_method="mock",
|
||||
)
|
||||
sel = select_quant_level(model_size_gb=16.0, context_length=65536)
|
||||
assert len(sel.warnings) > 0
|
||||
|
||||
def test_falls_back_to_turbo2_when_nothing_fits(self):
|
||||
with patch("evolution.quant_selector.detect_hardware") as mock_hw:
|
||||
mock_hw.return_value = HardwareInfo(
|
||||
total_memory_gb=8,
|
||||
available_memory_gb=6,
|
||||
gpu_memory_gb=8,
|
||||
gpu_name="Tiny GPU",
|
||||
cpu_cores=4,
|
||||
detection_method="mock",
|
||||
)
|
||||
sel = select_quant_level(model_size_gb=16.0, context_length=131072)
|
||||
assert sel.level.name == "turbo2"
|
||||
|
||||
def test_reasoning_contains_key_info(self):
|
||||
with patch("evolution.quant_selector.detect_hardware") as mock_hw:
|
||||
mock_hw.return_value = HardwareInfo(
|
||||
total_memory_gb=32,
|
||||
available_memory_gb=24,
|
||||
is_apple_silicon=True,
|
||||
chip_name="M4 Max",
|
||||
cpu_cores=16,
|
||||
detection_method="mock",
|
||||
)
|
||||
sel = select_quant_level(model_size_gb=14.0, context_length=32768)
|
||||
assert "turbo4" in sel.reasoning
|
||||
assert "M4 Max" in sel.reasoning or "32GB" in sel.reasoning
|
||||
338
tests/test_tool_call_integration.py
Normal file
338
tests/test_tool_call_integration.py
Normal file
@@ -0,0 +1,338 @@
|
||||
"""
|
||||
Integration test: turboquant compressed model passes hermes tool calls (issue #82).
|
||||
|
||||
Validates that a TurboQuant-compressed model can:
|
||||
1. Parse hermes tool schemas correctly
|
||||
2. Format tool calls in OpenAI-compatible format
|
||||
3. Pass through the hermes agent conversation loop
|
||||
|
||||
Tests are structured as contract tests -- they validate the schema/format
|
||||
compatibility without requiring a running model server. The live inference
|
||||
test is skipped by default (requires llama-server with TurboQuant model).
|
||||
|
||||
Usage:
|
||||
pytest tests/test_tool_call_integration.py -v
|
||||
pytest tests/test_tool_call_integration.py -v -k live # run live test if server available
|
||||
"""
|
||||
import json
|
||||
import os
|
||||
import pathlib
|
||||
import re
|
||||
import unittest
|
||||
|
||||
import pytest
|
||||
|
||||
ROOT = pathlib.Path(__file__).resolve().parents[1]
|
||||
PROFILE_PATH = ROOT / "profiles" / "hermes-profile-gemma4-turboquant.yaml"
|
||||
BENCHMARKS_DIR = ROOT / "benchmarks"
|
||||
|
||||
|
||||
class TestHermesProfileSchema(unittest.TestCase):
|
||||
"""Validate the hermes profile YAML has required fields for tool calling."""
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
import yaml
|
||||
cls.profile = yaml.safe_load(PROFILE_PATH.read_text())
|
||||
|
||||
def test_profile_has_providers(self):
|
||||
assert "providers" in self.profile, "Profile must define providers"
|
||||
assert "primary" in self.profile["providers"], "Must have primary provider"
|
||||
|
||||
def test_primary_provider_has_endpoint(self):
|
||||
primary = self.profile["providers"]["primary"]
|
||||
assert "endpoint" in primary, "Primary provider must have endpoint"
|
||||
assert primary["endpoint"].startswith("http"), "Endpoint must be HTTP(S) URL"
|
||||
|
||||
def test_primary_provider_has_api_path(self):
|
||||
primary = self.profile["providers"]["primary"]
|
||||
assert "api_path" in primary, "Primary provider must have api_path"
|
||||
assert "/chat/completions" in primary["api_path"], (
|
||||
"api_path should be OpenAI-compatible /chat/completions"
|
||||
)
|
||||
|
||||
def test_turboquant_settings_present(self):
|
||||
primary = self.profile["providers"]["primary"]
|
||||
assert "turboquant" in primary, "Must have turboquant config section"
|
||||
tq = primary["turboquant"]
|
||||
assert tq.get("enabled") is True, "TurboQuant must be enabled"
|
||||
assert tq.get("kv_type") in ("turbo2", "turbo3", "turbo4"), (
|
||||
"kv_type must be turbo2, turbo3, or turbo4"
|
||||
)
|
||||
|
||||
def test_context_window_configured(self):
|
||||
primary = self.profile["providers"]["primary"]
|
||||
assert "context" in primary, "Must have context config"
|
||||
ctx = primary["context"]
|
||||
assert ctx.get("max_tokens", 0) >= 8192, (
|
||||
"max_tokens should be >= 8192 for TurboQuant value proposition"
|
||||
)
|
||||
|
||||
|
||||
class TestToolSchemaCompatibility(unittest.TestCase):
|
||||
"""Verify hermes tool schemas serialize to valid JSON for OpenAI tool_calls."""
|
||||
|
||||
SAMPLE_TOOL_SCHEMAS = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "read_file",
|
||||
"description": "Read a text file with line numbers.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"path": {"type": "string", "description": "File path"},
|
||||
"offset": {"type": "integer", "default": 1},
|
||||
"limit": {"type": "integer", "default": 500},
|
||||
},
|
||||
"required": ["path"],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "execute_code",
|
||||
"description": "Run a Python script.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"code": {"type": "string", "description": "Python code"},
|
||||
},
|
||||
"required": ["code"],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "web_search",
|
||||
"description": "Search the web.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {"type": "string"},
|
||||
"max_results": {"type": "integer", "default": 5},
|
||||
},
|
||||
"required": ["query"],
|
||||
},
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
def test_tool_schemas_serialize_to_json(self):
|
||||
"""Tool schemas must serialize without errors."""
|
||||
serialized = json.dumps(self.SAMPLE_TOOL_SCHEMAS)
|
||||
assert len(serialized) > 0
|
||||
parsed = json.loads(serialized)
|
||||
assert len(parsed) == len(self.SAMPLE_TOOL_SCHEMAS)
|
||||
|
||||
def test_tool_schemas_have_required_openai_fields(self):
|
||||
"""Each tool schema must have the fields OpenAI expects."""
|
||||
for tool in self.SAMPLE_TOOL_SCHEMAS:
|
||||
assert tool["type"] == "function", "Tool type must be 'function'"
|
||||
fn = tool["function"]
|
||||
assert "name" in fn, "Function must have name"
|
||||
assert "description" in fn, "Function must have description"
|
||||
assert "parameters" in fn, "Function must have parameters"
|
||||
params = fn["parameters"]
|
||||
assert params["type"] == "object", "Parameters type must be 'object'"
|
||||
assert "properties" in params, "Parameters must have properties"
|
||||
|
||||
def test_tool_call_response_format(self):
|
||||
"""Verify tool_call response matches OpenAI format."""
|
||||
tool_call = {
|
||||
"id": "call_abc123",
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "read_file",
|
||||
"arguments": json.dumps({"path": "/tmp/test.txt"}),
|
||||
},
|
||||
}
|
||||
args = json.loads(tool_call["function"]["arguments"])
|
||||
assert args["path"] == "/tmp/test.txt"
|
||||
assert tool_call["function"]["name"] in [
|
||||
t["function"]["name"] for t in self.SAMPLE_TOOL_SCHEMAS
|
||||
]
|
||||
|
||||
def test_tool_names_are_valid_identifiers(self):
|
||||
"""Tool names must be valid Python identifiers for hermes dispatch."""
|
||||
for tool in self.SAMPLE_TOOL_SCHEMAS:
|
||||
name = tool["function"]["name"]
|
||||
assert re.match(r"^[a-zA-Z_][a-zA-Z0-9_]*$", name), (
|
||||
f"Tool name \'{name}\' is not a valid identifier"
|
||||
)
|
||||
|
||||
|
||||
class TestTurboquantServerConfig(unittest.TestCase):
|
||||
"""Validate server startup configuration matches hermes profile."""
|
||||
|
||||
def test_server_command_has_turboquant_flags(self):
|
||||
"""The server command in the profile must include -ctk/-ctv flags."""
|
||||
profile_text = PROFILE_PATH.read_text()
|
||||
assert "-ctk" in profile_text, "Profile server command must include -ctk flag"
|
||||
assert "-ctv" in profile_text, "Profile server command must include -ctv flag"
|
||||
|
||||
def test_server_command_has_context_flag(self):
|
||||
"""Server command must set context size."""
|
||||
profile_text = PROFILE_PATH.read_text()
|
||||
assert re.search(r"-c\s+\d+", profile_text), (
|
||||
"Server command must include -c <context_size> flag"
|
||||
)
|
||||
|
||||
def test_layer_adaptive_env_var(self):
|
||||
"""Profile must set TURBO_LAYER_ADAPTIVE env var."""
|
||||
profile_text = PROFILE_PATH.read_text()
|
||||
assert "TURBO_LAYER_ADAPTIVE" in profile_text, (
|
||||
"Profile must configure TURBO_LAYER_ADAPTIVE"
|
||||
)
|
||||
|
||||
|
||||
class TestBenchmarkData(unittest.TestCase):
|
||||
"""Validate benchmark test prompts include tool-call test cases."""
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
prompts_path = BENCHMARKS_DIR / "test_prompts.json"
|
||||
cls.prompts = json.loads(prompts_path.read_text())
|
||||
|
||||
def test_has_tool_call_test_prompt(self):
|
||||
"""Benchmark prompts must include a tool-call format test."""
|
||||
categories = [p.get("category") for p in self.prompts]
|
||||
assert "tool_call_format" in categories, (
|
||||
"Benchmark must include a tool_call_format test case"
|
||||
)
|
||||
|
||||
def test_tool_call_prompt_expects_json(self):
|
||||
"""Tool call test prompt must expect JSON in the response."""
|
||||
tool_prompt = next(
|
||||
p for p in self.prompts if p.get("category") == "tool_call_format"
|
||||
)
|
||||
pattern = tool_prompt.get("expected_pattern", "")
|
||||
assert "json" in pattern.lower() or "\\{" in pattern, (
|
||||
"Tool call prompt must expect JSON-formatted response"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
not os.environ.get("TURBOQUANT_SERVER_URL"),
|
||||
reason="No TurboQuant server available (set TURBOQUANT_SERVER_URL to run)",
|
||||
)
|
||||
class TestLiveToolCallIntegration:
|
||||
"""Live integration test -- requires running llama-server with TurboQuant."""
|
||||
|
||||
def test_server_health(self):
|
||||
"""Server must respond to /v1/models endpoint."""
|
||||
import requests
|
||||
url = os.environ["TURBOQUANT_SERVER_URL"]
|
||||
resp = requests.get(f"{url}/v1/models", timeout=10)
|
||||
assert resp.status_code == 200
|
||||
data = resp.json()
|
||||
assert "data" in data
|
||||
assert len(data["data"]) > 0
|
||||
|
||||
def test_tool_call_completion(self):
|
||||
"""Model must return a valid tool_call for a read_file prompt."""
|
||||
import requests
|
||||
url = os.environ["TURBOQUANT_SERVER_URL"]
|
||||
tools = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "read_file",
|
||||
"description": "Read a file",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {"path": {"type": "string"}},
|
||||
"required": ["path"],
|
||||
},
|
||||
},
|
||||
}
|
||||
]
|
||||
resp = requests.post(
|
||||
f"{url}/v1/chat/completions",
|
||||
json={
|
||||
"model": "gemma-4",
|
||||
"messages": [
|
||||
{"role": "user", "content": "Read the file at /tmp/test.txt"}
|
||||
],
|
||||
"tools": tools,
|
||||
"tool_choice": "auto",
|
||||
},
|
||||
timeout=120,
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
data = resp.json()
|
||||
choice = data["choices"][0]
|
||||
msg = choice["message"]
|
||||
if "tool_calls" in msg and msg["tool_calls"]:
|
||||
tc = msg["tool_calls"][0]
|
||||
assert tc["type"] == "function"
|
||||
assert tc["function"]["name"] == "read_file"
|
||||
args = json.loads(tc["function"]["arguments"])
|
||||
assert "path" in args
|
||||
else:
|
||||
assert len(msg.get("content", "")) > 0
|
||||
|
||||
def test_tool_call_with_multiple_tools(self):
|
||||
"""Model must handle multiple available tools."""
|
||||
import requests
|
||||
url = os.environ["TURBOQUANT_SERVER_URL"]
|
||||
tools = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "read_file",
|
||||
"description": "Read a file",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {"path": {"type": "string"}},
|
||||
"required": ["path"],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "web_search",
|
||||
"description": "Search the web",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {"query": {"type": "string"}},
|
||||
"required": ["query"],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "execute_code",
|
||||
"description": "Run Python code",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {"code": {"type": "string"}},
|
||||
"required": ["code"],
|
||||
},
|
||||
},
|
||||
},
|
||||
]
|
||||
resp = requests.post(
|
||||
f"{url}/v1/chat/completions",
|
||||
json={
|
||||
"model": "gemma-4",
|
||||
"messages": [
|
||||
{"role": "user", "content": "Search the web for 'bitcoin price'"}
|
||||
],
|
||||
"tools": tools,
|
||||
"tool_choice": "auto",
|
||||
},
|
||||
timeout=120,
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
data = resp.json()
|
||||
assert "choices" in data
|
||||
assert len(data["choices"]) > 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
Reference in New Issue
Block a user