Compare commits
9 Commits
burn/80-17
...
feat/152-d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dabb96d315 | ||
|
|
69cef8a90f | ||
|
|
636d294896 | ||
| 492c1cdcfd | |||
| 6e583310a8 | |||
| 300918ee1e | |||
| f7ea01cb65 | |||
| d2edbdadc2 | |||
| c009d8df77 |
@@ -30,3 +30,4 @@ See [issues](https://forge.alexanderwhitestone.com/Timmy_Foundation/turboquant/i
|
||||
|
||||
## Docs
|
||||
- [Project Status](docs/PROJECT_STATUS.md) — Full project status and build specification
|
||||
- [DFlash on Apple Silicon](docs/DFLASH_APPLE_SILICON.md) — MLX benchmark planner, setup commands, and report workflow
|
||||
|
||||
189
benchmarks/dflash_apple_silicon.py
Normal file
189
benchmarks/dflash_apple_silicon.py
Normal file
@@ -0,0 +1,189 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Apple Silicon DFlash planning helpers and CLI (issue #152)."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import platform
|
||||
import subprocess
|
||||
from dataclasses import asdict, dataclass
|
||||
from pathlib import Path
|
||||
from typing import Iterable, Optional
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class DFlashPair:
|
||||
slug: str
|
||||
base_model: str
|
||||
draft_model: str
|
||||
estimated_total_weights_gb: float
|
||||
minimum_recommended_memory_gb: float
|
||||
draft_sliding_window_size: int = 4096
|
||||
|
||||
|
||||
SUPPORTED_PAIRS: tuple[DFlashPair, ...] = (
|
||||
DFlashPair(
|
||||
slug="qwen35-4b",
|
||||
base_model="Qwen/Qwen3.5-4B",
|
||||
draft_model="z-lab/Qwen3.5-4B-DFlash",
|
||||
estimated_total_weights_gb=9.68,
|
||||
minimum_recommended_memory_gb=16.0,
|
||||
),
|
||||
DFlashPair(
|
||||
slug="qwen35-9b",
|
||||
base_model="Qwen/Qwen3.5-9B",
|
||||
draft_model="z-lab/Qwen3.5-9B-DFlash",
|
||||
estimated_total_weights_gb=19.93,
|
||||
minimum_recommended_memory_gb=28.0,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def detect_total_memory_gb() -> float:
|
||||
"""Detect total system memory in GiB, rounded to a whole number for planning."""
|
||||
system = platform.system()
|
||||
if system == "Darwin":
|
||||
mem_bytes = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"]).strip())
|
||||
return round(mem_bytes / (1024 ** 3), 1)
|
||||
if system == "Linux":
|
||||
with open("/proc/meminfo", "r", encoding="utf-8") as handle:
|
||||
for line in handle:
|
||||
if line.startswith("MemTotal:"):
|
||||
mem_kb = int(line.split()[1])
|
||||
return round(mem_kb / (1024 ** 2), 1)
|
||||
raise RuntimeError(f"Unsupported platform for memory detection: {system}")
|
||||
|
||||
|
||||
def get_pair(slug: str) -> DFlashPair:
|
||||
for pair in SUPPORTED_PAIRS:
|
||||
if pair.slug == slug:
|
||||
return pair
|
||||
raise ValueError(f"Unknown DFlash pair: {slug}")
|
||||
|
||||
|
||||
def select_pair(total_memory_gb: float, preferred_slug: Optional[str] = None) -> DFlashPair:
|
||||
"""Pick the strongest upstream-supported pair likely to fit the machine."""
|
||||
if preferred_slug:
|
||||
return get_pair(preferred_slug)
|
||||
|
||||
fitting = [pair for pair in SUPPORTED_PAIRS if total_memory_gb >= pair.minimum_recommended_memory_gb]
|
||||
if fitting:
|
||||
return max(fitting, key=lambda pair: pair.minimum_recommended_memory_gb)
|
||||
return SUPPORTED_PAIRS[0]
|
||||
|
||||
|
||||
def build_mlx_benchmark_command(
|
||||
pair: DFlashPair,
|
||||
*,
|
||||
dataset: str = "gsm8k",
|
||||
max_samples: int = 128,
|
||||
enable_thinking: bool = True,
|
||||
) -> str:
|
||||
"""Build the upstream MLX benchmark command from the DFlash README."""
|
||||
parts = [
|
||||
"python -m dflash.benchmark --backend mlx",
|
||||
f"--model {pair.base_model}",
|
||||
f"--draft-model {pair.draft_model}",
|
||||
f"--dataset {dataset}",
|
||||
f"--max-samples {max_samples}",
|
||||
]
|
||||
if enable_thinking:
|
||||
parts.append("--enable-thinking")
|
||||
parts.append(f"--draft-sliding-window-size {pair.draft_sliding_window_size}")
|
||||
return " \\\n ".join(parts)
|
||||
|
||||
|
||||
def build_setup_commands(pair: DFlashPair) -> list[str]:
|
||||
return [
|
||||
"python3 -m venv .venv-dflash",
|
||||
"source .venv-dflash/bin/activate",
|
||||
"git clone https://github.com/z-lab/dflash.git",
|
||||
"cd dflash",
|
||||
"pip install -e .[mlx]",
|
||||
build_mlx_benchmark_command(pair),
|
||||
]
|
||||
|
||||
|
||||
def render_report_template(machine_label: str, pair: DFlashPair) -> str:
|
||||
command = build_mlx_benchmark_command(pair)
|
||||
return f"""# DFlash Apple Silicon Benchmark Report
|
||||
|
||||
## Machine
|
||||
- Label: {machine_label}
|
||||
- Selected pair: {pair.slug}
|
||||
- Base model: {pair.base_model}
|
||||
- Draft model: {pair.draft_model}
|
||||
- Estimated total weight footprint: {pair.estimated_total_weights_gb:.2f} GB
|
||||
|
||||
## Setup
|
||||
```bash
|
||||
python3 -m venv .venv-dflash
|
||||
source .venv-dflash/bin/activate
|
||||
git clone https://github.com/z-lab/dflash.git
|
||||
cd dflash
|
||||
pip install -e .[mlx]
|
||||
{command}
|
||||
```
|
||||
|
||||
## Baseline comparison
|
||||
Compare against **plain MLX or llama.cpp speculative decoding** on the same prompt set.
|
||||
|
||||
## Results
|
||||
- Throughput (tok/s):
|
||||
- Peak memory (GB):
|
||||
- Notes on acceptance / behavior:
|
||||
|
||||
## Verdict
|
||||
Worth operationalizing locally?
|
||||
- [ ] Yes
|
||||
- [ ] No
|
||||
- [ ] Needs more data
|
||||
|
||||
## Recommendation
|
||||
Explain whether this should become part of the local inference stack.
|
||||
"""
|
||||
|
||||
|
||||
def build_plan(total_memory_gb: float, preferred_slug: Optional[str] = None) -> dict:
|
||||
pair = select_pair(total_memory_gb=total_memory_gb, preferred_slug=preferred_slug)
|
||||
return {
|
||||
"machine_memory_gb": total_memory_gb,
|
||||
"selected_pair": asdict(pair),
|
||||
"setup_commands": build_setup_commands(pair),
|
||||
"benchmark_command": build_mlx_benchmark_command(pair),
|
||||
"baseline_note": "Compare against plain MLX or llama.cpp speculative decoding on the same prompt set.",
|
||||
}
|
||||
|
||||
|
||||
def write_output(path: Path, content: str) -> None:
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
path.write_text(content, encoding="utf-8")
|
||||
|
||||
|
||||
def main(argv: Optional[Iterable[str]] = None) -> int:
|
||||
parser = argparse.ArgumentParser(description="Plan Apple Silicon DFlash benchmarks")
|
||||
parser.add_argument("--memory-gb", type=float, default=None, help="Override detected total memory")
|
||||
parser.add_argument("--pair", choices=[pair.slug for pair in SUPPORTED_PAIRS], default=None)
|
||||
parser.add_argument("--machine-label", default="Apple Silicon Mac")
|
||||
parser.add_argument("--format", choices=["json", "markdown"], default="markdown")
|
||||
parser.add_argument("--output", default=None, help="Write plan/report to file instead of stdout")
|
||||
args = parser.parse_args(list(argv) if argv is not None else None)
|
||||
|
||||
memory_gb = args.memory_gb if args.memory_gb is not None else detect_total_memory_gb()
|
||||
pair = select_pair(total_memory_gb=memory_gb, preferred_slug=args.pair)
|
||||
|
||||
if args.format == "json":
|
||||
content = json.dumps(build_plan(memory_gb, preferred_slug=pair.slug), indent=2)
|
||||
else:
|
||||
content = render_report_template(args.machine_label, pair)
|
||||
|
||||
if args.output:
|
||||
write_output(Path(args.output), content)
|
||||
else:
|
||||
print(content)
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
41
benchmarks/reports/dflash_m3max_36gb.md
Normal file
41
benchmarks/reports/dflash_m3max_36gb.md
Normal file
@@ -0,0 +1,41 @@
|
||||
# DFlash Apple Silicon Benchmark Report
|
||||
|
||||
## Machine
|
||||
- Label: M3 Max 36GB
|
||||
- Selected pair: qwen35-9b
|
||||
- Base model: Qwen/Qwen3.5-9B
|
||||
- Draft model: z-lab/Qwen3.5-9B-DFlash
|
||||
- Estimated total weight footprint: 19.93 GB
|
||||
|
||||
## Setup
|
||||
```bash
|
||||
python3 -m venv .venv-dflash
|
||||
source .venv-dflash/bin/activate
|
||||
git clone https://github.com/z-lab/dflash.git
|
||||
cd dflash
|
||||
pip install -e .[mlx]
|
||||
python -m dflash.benchmark --backend mlx \
|
||||
--model Qwen/Qwen3.5-9B \
|
||||
--draft-model z-lab/Qwen3.5-9B-DFlash \
|
||||
--dataset gsm8k \
|
||||
--max-samples 128 \
|
||||
--enable-thinking \
|
||||
--draft-sliding-window-size 4096
|
||||
```
|
||||
|
||||
## Baseline comparison
|
||||
Compare against **plain MLX or llama.cpp speculative decoding** on the same prompt set.
|
||||
|
||||
## Results
|
||||
- Throughput (tok/s):
|
||||
- Peak memory (GB):
|
||||
- Notes on acceptance / behavior:
|
||||
|
||||
## Verdict
|
||||
Worth operationalizing locally?
|
||||
- [ ] Yes
|
||||
- [ ] No
|
||||
- [ ] Needs more data
|
||||
|
||||
## Recommendation
|
||||
Explain whether this should become part of the local inference stack.
|
||||
46
benchmarks/reports/dflash_m3max_36gb_qwen35_4b_pilot.md
Normal file
46
benchmarks/reports/dflash_m3max_36gb_qwen35_4b_pilot.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# DFlash Apple Silicon Pilot — Qwen3.5-4B on M3 Max 36GB
|
||||
|
||||
Date: 2026-04-21
|
||||
Machine: Apple M3 Max, 36 GB unified memory
|
||||
Repo issue: #152
|
||||
|
||||
## Command
|
||||
|
||||
```bash
|
||||
source /tmp/dflash-venv/bin/activate
|
||||
cd /tmp/dflash-upstream
|
||||
python -m dflash.benchmark --backend mlx \
|
||||
--model Qwen/Qwen3.5-4B \
|
||||
--draft-model z-lab/Qwen3.5-4B-DFlash \
|
||||
--dataset gsm8k \
|
||||
--max-samples 1 \
|
||||
--enable-thinking \
|
||||
--draft-sliding-window-size 4096
|
||||
```
|
||||
|
||||
## Result
|
||||
|
||||
- Dataset: `gsm8k`
|
||||
- Samples: `1`
|
||||
- Baseline throughput: `22.35 tok/s`
|
||||
- DFlash throughput: `46.78 tok/s`
|
||||
- Decoding speedup: `2.09x`
|
||||
- Average acceptance length: `6.48`
|
||||
|
||||
Acceptance length histogram:
|
||||
|
||||
```text
|
||||
['0.3%', '11.1%', '12.7%', '10.4%', '11.7%', '7.6%', '7.0%', '3.8%', '5.1%', '6.3%', '2.8%', '3.8%', '2.2%', '1.9%', '0.9%', '2.5%', '9.8%']
|
||||
```
|
||||
|
||||
## Caveats
|
||||
|
||||
- This is a **pilot**, not a decision-grade benchmark.
|
||||
- Only `1` sample was run, so the throughput number is directional.
|
||||
- No apples-to-apples baseline against plain MLX or llama.cpp speculative decoding is included yet.
|
||||
- The planner still recommends trying `Qwen/Qwen3.5-9B + z-lab/Qwen3.5-9B-DFlash` on this machine for the more meaningful fit test.
|
||||
|
||||
## Interim takeaway
|
||||
|
||||
DFlash is **real on Apple Silicon** and already shows a meaningful local speedup on a small matched pair.
|
||||
A `2.09x` pilot speedup on `Qwen3.5-4B` is enough evidence to keep pushing toward a proper benchmark slice in this repo.
|
||||
59
benchmarks/reports/dflash_m3max_36gb_qwen35_9b_timeout.md
Normal file
59
benchmarks/reports/dflash_m3max_36gb_qwen35_9b_timeout.md
Normal file
@@ -0,0 +1,59 @@
|
||||
# DFlash on Apple Silicon Failure Report — Qwen3.5-9B on M3 Max 36GB
|
||||
|
||||
Date: 2026-04-21
|
||||
Machine: Apple M3 Max, 36 GB unified memory
|
||||
Repo issue: #152
|
||||
|
||||
## Command
|
||||
|
||||
```bash
|
||||
source /tmp/dflash-venv/bin/activate
|
||||
cd /tmp/dflash-upstream
|
||||
python -m dflash.benchmark --backend mlx \
|
||||
--model Qwen/Qwen3.5-9B \
|
||||
--draft-model z-lab/Qwen3.5-9B-DFlash \
|
||||
--dataset gsm8k \
|
||||
--max-samples 1 \
|
||||
--enable-thinking \
|
||||
--draft-sliding-window-size 4096
|
||||
```
|
||||
|
||||
## Outcome
|
||||
|
||||
The benchmark did **not** complete successfully on this machine.
|
||||
|
||||
### Failure signature
|
||||
|
||||
```text
|
||||
libc++abi: terminating due to uncaught exception of type std::runtime_error:
|
||||
[METAL] Command buffer execution failed:
|
||||
Caused GPU Timeout Error (00000002:kIOGPUCommandBufferCallbackErrorTimeout)
|
||||
```
|
||||
|
||||
Additional shutdown noise:
|
||||
|
||||
```text
|
||||
bash: [11285: 1] tcsetattr: Inappropriate ioctl for device
|
||||
resource_tracker: There appear to be 1 leaked semaphore objects to clean up at shutdown
|
||||
```
|
||||
|
||||
## Interpretation
|
||||
|
||||
This is strong evidence that the `Qwen/Qwen3.5-9B + z-lab/Qwen3.5-9B-DFlash` pair is **not currently stable** on an M3 Max 36GB Mac under the upstream MLX benchmark path, at least with the default settings used here.
|
||||
|
||||
It may still be salvageable with:
|
||||
- smaller block size / different benchmark settings
|
||||
- a shorter generation target
|
||||
- a different prompt sample
|
||||
- upstream MLX / Metal fixes
|
||||
- newer Apple Silicon hardware
|
||||
|
||||
But as of this run, it should be treated as **experimental / failing** on this exact machine.
|
||||
|
||||
## Recommendation
|
||||
|
||||
For this Mac, the working local proof path is still:
|
||||
- `Qwen/Qwen3.5-4B`
|
||||
- `z-lab/Qwen3.5-4B-DFlash`
|
||||
|
||||
Use the 4B pair for reproducible local validation while the 9B Metal timeout is investigated separately.
|
||||
@@ -1,91 +0,0 @@
|
||||
#!/bin/bash
|
||||
# TurboQuant M1 Benchmark Runner (Issue #80)
|
||||
# Runs both f16 and turbo4 KV configs against same model, collects throughput + memory + perplexity.
|
||||
#
|
||||
# Prerequisites:
|
||||
# - llama-server built from llama-cpp-turboquant fork (feature/turboquant-kv-cache)
|
||||
# - Model GGUF file downloaded
|
||||
# - wikitext-2 corpus in corpora/wiki.test.raw
|
||||
#
|
||||
# Usage:
|
||||
# ./benchmarks/run_benchmark_m1.sh <model_name> <model_path> [llama_server_url]
|
||||
#
|
||||
# Example:
|
||||
# ./benchmarks/run_benchmark_m1.sh qwen3.5:27b ~/models/qwen3.5-27b-q4_k_m.gguf
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
MODEL_NAME="${1:?Usage: $0 <model_name> <model_path> [llama_server_url]}"
|
||||
MODEL_PATH="${2:?Model path required}"
|
||||
LLAMA_SERVER="${3:-http://localhost:8080}"
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
PROJECT_DIR="$(dirname "$SCRIPT_DIR")"
|
||||
LLAMA_BIN="${PROJECT_DIR}/llama.cpp-fork/build/bin"
|
||||
CORPUS="${PROJECT_DIR}/corpora/wiki.test.raw"
|
||||
OUTPUT_DIR="${PROJECT_DIR}/benchmarks"
|
||||
|
||||
echo "=========================================="
|
||||
echo "TurboQuant M1 Benchmark"
|
||||
echo "=========================================="
|
||||
echo "Model: ${MODEL_NAME}"
|
||||
echo "Model path: ${MODEL_PATH}"
|
||||
echo "Server: ${LLAMA_SERVER}"
|
||||
echo "llama bin: ${LLAMA_BIN}"
|
||||
echo "Corpus: ${CORPUS}"
|
||||
echo ""
|
||||
|
||||
# Check prerequisites
|
||||
if [ ! -f "${MODEL_PATH}" ]; then
|
||||
echo "WARNING: Model file not found: ${MODEL_PATH}"
|
||||
echo " Perplexity tests will be skipped."
|
||||
fi
|
||||
|
||||
if [ ! -f "${LLAMA_BIN}/llama-perplexity" ]; then
|
||||
echo "WARNING: llama-perplexity not found at ${LLAMA_BIN}/llama-perplexity"
|
||||
echo " Perplexity tests will be skipped."
|
||||
fi
|
||||
|
||||
if [ ! -f "${CORPUS}" ]; then
|
||||
echo "WARNING: Corpus not found: ${CORPUS}"
|
||||
echo " Download with: curl -L https://raw.githubusercontent.com/pytorch/examples/main/word_language_model/data/wikitext-2/wiki.test.raw -o ${CORPUS}"
|
||||
fi
|
||||
|
||||
# Check server is running
|
||||
echo "Checking llama-server at ${LLAMA_SERVER}..."
|
||||
if curl -sf "${LLAMA_SERVER}/health" > /dev/null 2>&1; then
|
||||
echo " Server is running ✓"
|
||||
else
|
||||
echo " Server not responding. Trying /v1/models..."
|
||||
if curl -sf "${LLAMA_SERVER}/v1/models" > /dev/null 2>&1; then
|
||||
echo " Server is running (no /health endpoint) ✓"
|
||||
else
|
||||
echo " ERROR: llama-server not reachable at ${LLAMA_SERVER}"
|
||||
echo " Start with: llama-server -m ${MODEL_PATH} --port 8080 -ctk turbo4 -ctv turbo4 -c 4096"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Run benchmark
|
||||
echo ""
|
||||
echo "Starting benchmark suite..."
|
||||
python3 "${SCRIPT_DIR}/run_m1_benchmark.py" \
|
||||
--model "${MODEL_NAME}" \
|
||||
--model-path "${MODEL_PATH}" \
|
||||
--backend llama-server \
|
||||
--llama-server "${LLAMA_SERVER}" \
|
||||
--llama-bin "${LLAMA_BIN}" \
|
||||
--corpus "${CORPUS}" \
|
||||
--context 2048 \
|
||||
--threads 4 \
|
||||
--num-predict 256 \
|
||||
--runs 3 \
|
||||
--output-dir "${OUTPUT_DIR}" \
|
||||
--ppl-threshold 0.5
|
||||
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo "Done. Results in:"
|
||||
echo " ${OUTPUT_DIR}/m1_benchmark_results.json"
|
||||
echo " ${OUTPUT_DIR}/m1_benchmark_report.md"
|
||||
echo "=========================================="
|
||||
@@ -1,681 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
TurboQuant M1 Benchmark Suite (Issue #80)
|
||||
|
||||
Comprehensive benchmark comparing TurboQuant (turbo4 KV) vs baseline (f16 KV)
|
||||
on Apple M1 Mac. Measures: tokens/sec, memory usage, quality (perplexity).
|
||||
|
||||
Usage:
|
||||
python3 benchmarks/run_m1_benchmark.py \
|
||||
--model qwen3.5:27b \
|
||||
--llama-server http://localhost:8080 \
|
||||
--llama-bin ~/llama-cpp-turboquant/build/bin
|
||||
|
||||
# Skip perplexity (quick throughput/memory only)
|
||||
python3 benchmarks/run_m1_benchmark.py --model qwen3.5:27b --skip-perplexity
|
||||
|
||||
Outputs:
|
||||
- benchmarks/m1_benchmark_results.json
|
||||
- benchmarks/m1_benchmark_report.md
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import threading
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
try:
|
||||
import requests
|
||||
except ImportError:
|
||||
print("ERROR: requests package required. Install with: pip install requests")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# ── Memory Monitoring ───────────────────────────────────────────────────────
|
||||
|
||||
class MemoryMonitor:
|
||||
"""Monitor memory usage of a process in background."""
|
||||
|
||||
def __init__(self, pid: int, interval: float = 0.5):
|
||||
self.pid = pid
|
||||
self.interval = interval
|
||||
self.samples = []
|
||||
self._stop = threading.Event()
|
||||
self._thread = None
|
||||
|
||||
def start(self):
|
||||
self._stop.clear()
|
||||
self._thread = threading.Thread(target=self._monitor_loop, daemon=True)
|
||||
self._thread.start()
|
||||
|
||||
def stop(self):
|
||||
self._stop.set()
|
||||
if self._thread:
|
||||
self._thread.join(timeout=2)
|
||||
return self.get_stats()
|
||||
|
||||
def _monitor_loop(self):
|
||||
while not self._stop.is_set():
|
||||
try:
|
||||
mem_mb = self._get_memory_mb()
|
||||
if mem_mb > 0:
|
||||
self.samples.append(mem_mb)
|
||||
except Exception:
|
||||
pass
|
||||
time.sleep(self.interval)
|
||||
|
||||
def _get_memory_mb(self) -> float:
|
||||
if sys.platform == "darwin":
|
||||
result = subprocess.run(
|
||||
["ps", "-o", "rss=", "-p", str(self.pid)],
|
||||
capture_output=True, text=True
|
||||
)
|
||||
if result.returncode == 0 and result.stdout.strip():
|
||||
return int(result.stdout.strip()) / 1024
|
||||
else:
|
||||
try:
|
||||
with open(f"/proc/{self.pid}/status") as f:
|
||||
for line in f:
|
||||
if line.startswith("VmRSS:"):
|
||||
return int(line.split()[1]) / 1024
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
return 0.0
|
||||
|
||||
def get_stats(self) -> dict:
|
||||
if not self.samples:
|
||||
return {"avg_mb": 0, "peak_mb": 0, "min_mb": 0, "samples": 0}
|
||||
return {
|
||||
"avg_mb": round(sum(self.samples) / len(self.samples), 1),
|
||||
"peak_mb": round(max(self.samples), 1),
|
||||
"min_mb": round(min(self.samples), 1),
|
||||
"samples": len(self.samples),
|
||||
}
|
||||
|
||||
|
||||
# ── System Info ─────────────────────────────────────────────────────────────
|
||||
|
||||
def get_system_info() -> dict:
|
||||
info = {"platform": sys.platform, "python": sys.version.split()[0]}
|
||||
try:
|
||||
if sys.platform == "darwin":
|
||||
info["chip"] = subprocess.run(
|
||||
["sysctl", "-n", "machdep.cpu.brand_string"],
|
||||
capture_output=True, text=True
|
||||
).stdout.strip()
|
||||
mem_bytes = int(subprocess.run(
|
||||
["sysctl", "-n", "hw.memsize"],
|
||||
capture_output=True, text=True
|
||||
).stdout.strip())
|
||||
info["memory_gb"] = round(mem_bytes / (1024**3), 1)
|
||||
info["cpu_cores"] = os.cpu_count()
|
||||
else:
|
||||
info["cpu"] = subprocess.run(
|
||||
["uname", "-m"], capture_output=True, text=True
|
||||
).stdout.strip()
|
||||
info["cpu_cores"] = os.cpu_count()
|
||||
except Exception:
|
||||
info["error"] = "Could not detect hardware"
|
||||
return info
|
||||
|
||||
|
||||
# ── Benchmark Functions ─────────────────────────────────────────────────────
|
||||
|
||||
def find_llama_server_pid() -> Optional[int]:
|
||||
"""Find PID of running llama-server process."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["pgrep", "-f", "llama-server"],
|
||||
capture_output=True, text=True
|
||||
)
|
||||
if result.stdout.strip():
|
||||
return int(result.stdout.strip().split("\n")[0])
|
||||
except Exception:
|
||||
pass
|
||||
return None
|
||||
|
||||
|
||||
def run_throughput_test(prompt: str, model: str, url: str, kv_type: str,
|
||||
num_predict: int = 256, timeout: int = 120) -> dict:
|
||||
"""Run a single throughput test against llama-server."""
|
||||
api_url = f"{url.rstrip('/')}/v1/chat/completions"
|
||||
start = time.time()
|
||||
ttft = None
|
||||
tokens_per_sec = 0.0
|
||||
|
||||
try:
|
||||
resp = requests.post(api_url, json={
|
||||
"model": model,
|
||||
"messages": [{"role": "user", "content": prompt}],
|
||||
"max_tokens": num_predict,
|
||||
"stream": False
|
||||
}, timeout=timeout)
|
||||
elapsed = time.time() - start
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
|
||||
response_text = data.get("choices", [{}])[0].get("message", {}).get("content", "")
|
||||
usage = data.get("usage", {})
|
||||
completion_tokens = usage.get("completion_tokens", 0)
|
||||
prompt_tokens = usage.get("prompt_tokens", 0)
|
||||
|
||||
if elapsed > 0 and completion_tokens > 0:
|
||||
tokens_per_sec = completion_tokens / max(elapsed - 0.1, 0.01)
|
||||
|
||||
return {
|
||||
"response_len": len(response_text),
|
||||
"latency_s": round(elapsed, 3),
|
||||
"tokens_per_sec": round(tokens_per_sec, 2),
|
||||
"completion_tokens": completion_tokens,
|
||||
"prompt_tokens": prompt_tokens,
|
||||
"kv_type": kv_type,
|
||||
"status": "success"
|
||||
}
|
||||
except Exception as e:
|
||||
return {"status": "failed", "error": str(e), "latency_s": round(time.time() - start, 3)}
|
||||
|
||||
|
||||
def run_ollama_test(prompt: str, model: str, url: str,
|
||||
num_predict: int = 256, timeout: int = 120) -> dict:
|
||||
"""Run a single throughput test against Ollama."""
|
||||
api_url = f"{url.rstrip('/')}/api/generate"
|
||||
start = time.time()
|
||||
|
||||
try:
|
||||
resp = requests.post(api_url, json={
|
||||
"model": model,
|
||||
"prompt": prompt,
|
||||
"stream": False,
|
||||
"options": {"num_predict": num_predict}
|
||||
}, timeout=timeout)
|
||||
elapsed = time.time() - start
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
|
||||
response_text = data.get("response", "")
|
||||
eval_count = data.get("eval_count", 0)
|
||||
eval_duration_ns = data.get("eval_duration", 0)
|
||||
prompt_eval_ns = data.get("prompt_eval_duration", 0)
|
||||
|
||||
tokens_per_sec = 0.0
|
||||
if eval_duration_ns > 0:
|
||||
tokens_per_sec = eval_count / (eval_duration_ns / 1e9)
|
||||
|
||||
ttft = None
|
||||
if prompt_eval_ns > 0:
|
||||
ttft = prompt_eval_ns / 1e9
|
||||
|
||||
return {
|
||||
"response_len": len(response_text),
|
||||
"latency_s": round(elapsed, 3),
|
||||
"ttft_s": round(ttft, 3) if ttft else None,
|
||||
"tokens_per_sec": round(tokens_per_sec, 2),
|
||||
"completion_tokens": eval_count,
|
||||
"prompt_tokens": data.get("prompt_eval_count", 0),
|
||||
"status": "success"
|
||||
}
|
||||
except Exception as e:
|
||||
return {"status": "failed", "error": str(e), "latency_s": round(time.time() - start, 3)}
|
||||
|
||||
|
||||
def run_perplexity_test(llama_bin: str, model_path: str, corpus: str,
|
||||
context: int, kv_type: str, threads: int = 4) -> dict:
|
||||
"""Run llama-perplexity and parse output."""
|
||||
if not os.path.exists(llama_bin):
|
||||
return {"error": f"Binary not found: {llama_bin}", "passed": False}
|
||||
if not os.path.exists(model_path):
|
||||
return {"error": f"Model not found: {model_path}", "passed": False}
|
||||
if not os.path.exists(corpus):
|
||||
return {"error": f"Corpus not found: {corpus}", "passed": False}
|
||||
|
||||
cmd = [
|
||||
llama_bin,
|
||||
"-m", model_path,
|
||||
"-f", corpus,
|
||||
"-c", str(context),
|
||||
"-t", str(threads),
|
||||
"--kv-type", kv_type,
|
||||
]
|
||||
print(f" Command: {' '.join(cmd)}")
|
||||
|
||||
start = time.time()
|
||||
try:
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, timeout=3600)
|
||||
elapsed = time.time() - start
|
||||
output = result.stdout + "\n" + result.stderr
|
||||
|
||||
ppl_match = re.search(r"perplexity[:\s]+(\d+\.?\d*)", output, re.IGNORECASE)
|
||||
ppl = float(ppl_match.group(1)) if ppl_match else None
|
||||
|
||||
token_match = re.search(r"(\d+) tokens", output)
|
||||
tokens = int(token_match.group(1)) if token_match else None
|
||||
|
||||
return {
|
||||
"kv_type": kv_type,
|
||||
"perplexity": ppl,
|
||||
"tokens": tokens,
|
||||
"elapsed_seconds": round(elapsed, 1),
|
||||
"exit_code": result.returncode,
|
||||
"passed": result.returncode == 0 and ppl is not None,
|
||||
"output_tail": output.strip()[-500:] if output else "",
|
||||
}
|
||||
except subprocess.TimeoutExpired:
|
||||
return {"kv_type": kv_type, "perplexity": None, "error": "Timeout",
|
||||
"passed": False, "elapsed_seconds": 3600}
|
||||
|
||||
|
||||
# ── Prompt Sets ─────────────────────────────────────────────────────────────
|
||||
|
||||
THROUGHPUT_PROMPTS = [
|
||||
"Explain the difference between TCP and UDP protocols. Include use cases for each.",
|
||||
"Write a Python function that implements binary search on a sorted list.",
|
||||
"What are the three laws of thermodynamics? Explain each in simple terms.",
|
||||
"Describe the process of photosynthesis step by step.",
|
||||
"Write a recursive function to calculate the Fibonacci sequence with memoization.",
|
||||
]
|
||||
|
||||
|
||||
# ── Report Generation ───────────────────────────────────────────────────────
|
||||
|
||||
def generate_report(results: dict, output_path: str):
|
||||
"""Generate markdown report from benchmark results."""
|
||||
lines = []
|
||||
lines.append("# TurboQuant M1 Benchmark Report")
|
||||
lines.append("")
|
||||
lines.append(f"**Date:** {results['timestamp']}")
|
||||
lines.append(f"**Hardware:** {results['system'].get('chip', 'unknown')}, "
|
||||
f"{results['system'].get('memory_gb', '?')}GB RAM, "
|
||||
f"{results['system'].get('cpu_cores', '?')} cores")
|
||||
lines.append(f"**Model:** {results['model']}")
|
||||
lines.append("")
|
||||
|
||||
# Throughput comparison
|
||||
lines.append("## Throughput Comparison")
|
||||
lines.append("")
|
||||
|
||||
tp = results.get("throughput", {})
|
||||
baseline = tp.get("f16", {})
|
||||
turbo = tp.get("turbo4", {})
|
||||
|
||||
lines.append("| Metric | f16 (baseline) | turbo4 (TurboQuant) | Delta |")
|
||||
lines.append("|:-------|:---------------|:--------------------|:------|")
|
||||
|
||||
def fmt_delta(baseline_val, turbo_val, suffix="", higher_is_better=True):
|
||||
if baseline_val and turbo_val:
|
||||
delta = turbo_val - baseline_val
|
||||
pct = (delta / baseline_val) * 100 if baseline_val else 0
|
||||
sign = "+" if delta >= 0 else ""
|
||||
better = (delta >= 0) if higher_is_better else (delta <= 0)
|
||||
marker = "✓" if better else "⚠"
|
||||
return (f"{baseline_val}{suffix}", f"{turbo_val}{suffix}",
|
||||
f"{sign}{pct:.1f}% {marker}")
|
||||
return ("N/A", "N/A", "N/A")
|
||||
|
||||
b_tok, t_tok, d_tok = fmt_delta(
|
||||
baseline.get("avg_tok_per_sec"), turbo.get("avg_tok_per_sec"), " tok/s")
|
||||
b_lat, t_lat, d_lat = fmt_delta(
|
||||
baseline.get("avg_latency"), turbo.get("avg_latency"), "s", higher_is_better=False)
|
||||
b_ttft, t_ttft, d_ttft = fmt_delta(
|
||||
baseline.get("avg_ttft"), turbo.get("avg_ttft"), "s", higher_is_better=False)
|
||||
|
||||
lines.append(f"| Tokens/sec (avg) | {b_tok} | {t_tok} | {d_tok} |")
|
||||
lines.append(f"| Latency (avg) | {b_lat} | {t_lat} | {d_lat} |")
|
||||
lines.append(f"| TTFT (avg) | {b_ttft} | {t_ttft} | {d_ttft} |")
|
||||
lines.append("")
|
||||
|
||||
# Per-prompt breakdown
|
||||
lines.append("### Per-Prompt Results")
|
||||
lines.append("")
|
||||
lines.append("| Prompt # | f16 tok/s | turbo4 tok/s | Status |")
|
||||
lines.append("|:---------|:----------|:-------------|:-------|")
|
||||
|
||||
baseline_results = baseline.get("results", [])
|
||||
turbo_results = turbo.get("results", [])
|
||||
|
||||
for i, (b, t) in enumerate(zip(baseline_results, turbo_results), 1):
|
||||
b_tps = b.get("tokens_per_sec", 0)
|
||||
t_tps = t.get("tokens_per_sec", 0)
|
||||
if b.get("status") == "success" and t.get("status") == "success":
|
||||
delta_pct = ((t_tps - b_tps) / b_tps * 100) if b_tps else 0
|
||||
status = "✓" if delta_pct > -20 else "⚠"
|
||||
lines.append(f"| {i} | {b_tps:.1f} | {t_tps:.1f} | {status} ({delta_pct:+.1f}%) |")
|
||||
else:
|
||||
err_b = b.get("error", b.get("status", "?"))
|
||||
err_t = t.get("error", t.get("status", "?"))
|
||||
lines.append(f"| {i} | {err_b} | {err_t} | ✗ |")
|
||||
|
||||
lines.append("")
|
||||
|
||||
# Memory comparison
|
||||
lines.append("## Memory Usage")
|
||||
lines.append("")
|
||||
mem = results.get("memory", {})
|
||||
b_mem = mem.get("f16", {})
|
||||
t_mem = mem.get("turbo4", {})
|
||||
|
||||
lines.append("| Metric | f16 (baseline) | turbo4 (TurboQuant) | Savings |")
|
||||
lines.append("|:-------|:---------------|:--------------------|:--------|")
|
||||
|
||||
if b_mem.get("peak_mb") and t_mem.get("peak_mb"):
|
||||
savings = b_mem["peak_mb"] - t_mem["peak_mb"]
|
||||
savings_pct = (savings / b_mem["peak_mb"]) * 100
|
||||
lines.append(f"| Peak RSS | {b_mem['peak_mb']:.0f} MB | {t_mem['peak_mb']:.0f} MB | "
|
||||
f"{savings:.0f} MB ({savings_pct:.1f}%) |")
|
||||
if b_mem.get("avg_mb") and t_mem.get("avg_mb"):
|
||||
lines.append(f"| Avg RSS | {b_mem['avg_mb']:.0f} MB | {t_mem['avg_mb']:.0f} MB | "
|
||||
f"{b_mem['avg_mb'] - t_mem['avg_mb']:.0f} MB |")
|
||||
|
||||
lines.append("")
|
||||
|
||||
# Perplexity
|
||||
ppl = results.get("perplexity", {})
|
||||
if ppl.get("f16") or ppl.get("turbo4"):
|
||||
lines.append("## Quality (Perplexity)")
|
||||
lines.append("")
|
||||
lines.append("| KV Type | Perplexity | Tokens | Time |")
|
||||
lines.append("|:--------|:-----------|:-------|:-----|")
|
||||
|
||||
for kv in ["f16", "turbo4"]:
|
||||
r = ppl.get(kv, {})
|
||||
ppl_val = r.get("perplexity")
|
||||
tokens = r.get("tokens")
|
||||
elapsed = r.get("elapsed_seconds")
|
||||
lines.append(f"| {kv} | {ppl_val:.4f} if ppl_val else 'N/A' | "
|
||||
f"{tokens or 'N/A'} | {elapsed or 'N/A'}s |")
|
||||
|
||||
if ppl.get("delta") is not None:
|
||||
lines.append("")
|
||||
lines.append(f"**PPL Delta (turbo4 - f16):** {ppl['delta']:+.4f}")
|
||||
lines.append(f"**Threshold:** ≤ {ppl.get('threshold', 0.5)}")
|
||||
lines.append(f"**Result:** {'PASS ✓' if ppl.get('pass') else 'FAIL ✗'}")
|
||||
|
||||
lines.append("")
|
||||
|
||||
# Summary
|
||||
lines.append("## Summary")
|
||||
lines.append("")
|
||||
|
||||
# Compute overall verdict
|
||||
throughput_ok = True
|
||||
if turbo.get("avg_tok_per_sec") and baseline.get("avg_tok_per_sec"):
|
||||
ratio = turbo["avg_tok_per_sec"] / baseline["avg_tok_per_sec"]
|
||||
throughput_ok = ratio >= 0.80 # 80% of baseline is acceptable
|
||||
lines.append(f"- **Throughput:** {ratio*100:.0f}% of baseline "
|
||||
f"({'PASS' if throughput_ok else 'BORDERLINE'})")
|
||||
|
||||
memory_ok = True
|
||||
if t_mem.get("peak_mb") and b_mem.get("peak_mb"):
|
||||
savings_pct = (b_mem["peak_mb"] - t_mem["peak_mb"]) / b_mem["peak_mb"] * 100
|
||||
memory_ok = savings_pct > 50 # Expect >50% savings
|
||||
lines.append(f"- **Memory savings:** {savings_pct:.1f}% "
|
||||
f"({'PASS' if memory_ok else 'CHECK'})")
|
||||
|
||||
if ppl.get("pass") is not None:
|
||||
lines.append(f"- **Quality (PPL):** {'PASS' if ppl['pass'] else 'FAIL'} "
|
||||
f"(delta={ppl.get('delta', 'N/A')})")
|
||||
|
||||
lines.append("")
|
||||
|
||||
# Issues discovered
|
||||
issues = results.get("issues_discovered", [])
|
||||
if issues:
|
||||
lines.append("## Issues Discovered")
|
||||
lines.append("")
|
||||
for issue in issues:
|
||||
lines.append(f"- **{issue['title']}**")
|
||||
lines.append(f" {issue.get('description', '')}")
|
||||
lines.append("")
|
||||
|
||||
lines.append("---")
|
||||
lines.append(f"*Generated by run_m1_benchmark.py — Issue #80*")
|
||||
|
||||
report = "\n".join(lines)
|
||||
with open(output_path, "w") as f:
|
||||
f.write(report)
|
||||
return report
|
||||
|
||||
|
||||
# ── Main ────────────────────────────────────────────────────────────────────
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="TurboQuant M1 Benchmark Suite")
|
||||
parser.add_argument("--model", required=True, help="Model name (e.g. qwen3.5:27b)")
|
||||
parser.add_argument("--model-path", default=None,
|
||||
help="Path to GGUF model file (for perplexity)")
|
||||
parser.add_argument("--backend", choices=["llama-server", "ollama"],
|
||||
default="llama-server")
|
||||
parser.add_argument("--llama-server", default="http://localhost:8080",
|
||||
help="llama-server URL")
|
||||
parser.add_argument("--ollama-url", default="http://localhost:11434",
|
||||
help="Ollama URL")
|
||||
parser.add_argument("--llama-bin", default=None,
|
||||
help="Path to llama.cpp build/bin directory")
|
||||
parser.add_argument("--corpus", default="corpora/wiki.test.raw",
|
||||
help="Path to wikitext-2 corpus")
|
||||
parser.add_argument("--context", type=int, default=2048,
|
||||
help="Context length for perplexity test")
|
||||
parser.add_argument("--threads", type=int, default=4,
|
||||
help="Thread count for perplexity")
|
||||
parser.add_argument("--num-predict", type=int, default=256,
|
||||
help="Max tokens to generate per prompt")
|
||||
parser.add_argument("--runs", type=int, default=3,
|
||||
help="Number of runs per config for averaging")
|
||||
parser.add_argument("--skip-perplexity", action="store_true",
|
||||
help="Skip perplexity measurement")
|
||||
parser.add_argument("--output-dir", default="benchmarks",
|
||||
help="Output directory")
|
||||
parser.add_argument("--ppl-threshold", type=float, default=0.5,
|
||||
help="Max acceptable PPL delta")
|
||||
args = parser.parse_args()
|
||||
|
||||
os.makedirs(args.output_dir, exist_ok=True)
|
||||
|
||||
# System info
|
||||
print("Gathering system info...")
|
||||
system_info = get_system_info()
|
||||
print(f" Platform: {system_info.get('chip', system_info.get('cpu', '?'))}")
|
||||
print(f" Memory: {system_info.get('memory_gb', '?')}GB")
|
||||
print(f" Cores: {system_info.get('cpu_cores', '?')}")
|
||||
|
||||
# URL
|
||||
url = args.llama_server if args.backend == "llama-server" else args.ollama_url
|
||||
|
||||
# KV types to test
|
||||
kv_types = ["f16", "turbo4"]
|
||||
|
||||
results = {
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"system": system_info,
|
||||
"model": args.model,
|
||||
"backend": args.backend,
|
||||
"url": url,
|
||||
"num_predict": args.num_predict,
|
||||
"runs_per_config": args.runs,
|
||||
"throughput": {},
|
||||
"memory": {},
|
||||
"perplexity": {},
|
||||
"issues_discovered": [],
|
||||
}
|
||||
|
||||
# ── Throughput + Memory Tests ────────────────────────────────────────
|
||||
for kv_type in kv_types:
|
||||
print(f"\n{'='*60}")
|
||||
print(f"Testing: {kv_type} KV cache")
|
||||
print(f"{'='*60}")
|
||||
|
||||
run_results = []
|
||||
|
||||
# Find server PID for memory monitoring
|
||||
server_pid = find_llama_server_pid()
|
||||
monitor = None
|
||||
if server_pid:
|
||||
print(f" Monitoring PID {server_pid} for memory")
|
||||
monitor = MemoryMonitor(server_pid)
|
||||
monitor.start()
|
||||
|
||||
for i in range(args.runs):
|
||||
prompt = THROUGHPUT_PROMPTS[i % len(THROUGHPUT_PROMPTS)]
|
||||
print(f" Run {i+1}/{args.runs}...", end=" ", flush=True)
|
||||
|
||||
if args.backend == "llama-server":
|
||||
result = run_throughput_test(prompt, args.model, url, kv_type,
|
||||
num_predict=args.num_predict)
|
||||
else:
|
||||
result = run_ollama_test(prompt, args.model, url,
|
||||
num_predict=args.num_predict)
|
||||
result["kv_type"] = "default" # Ollama doesn't expose KV type
|
||||
|
||||
status = "✓" if result["status"] == "success" else "✗"
|
||||
tps = result.get("tokens_per_sec", 0)
|
||||
print(f"{status} {tps:.1f} tok/s, {result.get('latency_s', 0):.2f}s")
|
||||
run_results.append(result)
|
||||
|
||||
# Stop memory monitor
|
||||
mem_stats = {"avg_mb": 0, "peak_mb": 0, "min_mb": 0, "samples": 0}
|
||||
if monitor:
|
||||
mem_stats = monitor.stop()
|
||||
print(f" Memory: peak={mem_stats['peak_mb']:.0f}MB, "
|
||||
f"avg={mem_stats['avg_mb']:.0f}MB")
|
||||
|
||||
results["memory"][kv_type] = mem_stats
|
||||
|
||||
# Aggregate throughput
|
||||
successful = [r for r in run_results if r["status"] == "success"]
|
||||
if successful:
|
||||
avg_tps = sum(r.get("tokens_per_sec", 0) for r in successful) / len(successful)
|
||||
avg_lat = sum(r.get("latency_s", 0) for r in successful) / len(successful)
|
||||
ttfts = [r.get("ttft_s") for r in successful if r.get("ttft_s")]
|
||||
avg_ttft = sum(ttfts) / len(ttfts) if ttfts else None
|
||||
else:
|
||||
avg_tps = avg_lat = avg_ttft = 0
|
||||
|
||||
results["throughput"][kv_type] = {
|
||||
"avg_tok_per_sec": round(avg_tps, 2),
|
||||
"avg_latency": round(avg_lat, 3),
|
||||
"avg_ttft": round(avg_ttft, 3) if avg_ttft else None,
|
||||
"success_rate": f"{len(successful)}/{len(run_results)}",
|
||||
"results": run_results,
|
||||
}
|
||||
|
||||
# ── Perplexity Tests ─────────────────────────────────────────────────
|
||||
if not args.skip_perplexity:
|
||||
llama_bin = None
|
||||
if args.llama_bin:
|
||||
llama_bin = os.path.join(args.llama_bin, "llama-perplexity")
|
||||
if not os.path.exists(llama_bin):
|
||||
llama_bin = os.path.join(args.llama_bin, "bin", "llama-perplexity")
|
||||
|
||||
model_path = args.model_path
|
||||
|
||||
if llama_bin and os.path.exists(llama_bin) and model_path and os.path.exists(model_path) \
|
||||
and os.path.exists(args.corpus):
|
||||
print(f"\n{'='*60}")
|
||||
print("Perplexity Tests")
|
||||
print(f"{'='*60}")
|
||||
print(f" Model: {model_path}")
|
||||
print(f" Corpus: {args.corpus}")
|
||||
print(f" Context: {args.context}")
|
||||
|
||||
ppl_results = {"f16": {}, "turbo4": {}, "threshold": args.ppl_threshold}
|
||||
for kv_type in kv_types:
|
||||
print(f"\n Running {kv_type} perplexity...")
|
||||
ppl_results[kv_type] = run_perplexity_test(
|
||||
llama_bin, model_path, args.corpus,
|
||||
args.context, kv_type, args.threads
|
||||
)
|
||||
ppl_val = ppl_results[kv_type].get("perplexity")
|
||||
if ppl_val:
|
||||
print(f" PPL = {ppl_val:.4f}")
|
||||
|
||||
# Calculate delta
|
||||
b_ppl = ppl_results.get("f16", {}).get("perplexity")
|
||||
t_ppl = ppl_results.get("turbo4", {}).get("perplexity")
|
||||
if b_ppl and t_ppl:
|
||||
delta = t_ppl - b_ppl
|
||||
ppl_results["delta"] = round(delta, 4)
|
||||
ppl_results["pass"] = delta <= args.ppl_threshold
|
||||
print(f"\n Delta: {delta:+.4f} (threshold: ≤{args.ppl_threshold})")
|
||||
print(f" Result: {'PASS ✓' if ppl_results['pass'] else 'FAIL ✗'}")
|
||||
|
||||
results["perplexity"] = ppl_results
|
||||
else:
|
||||
print("\nSkipping perplexity: need --llama-bin, --model-path, and corpus file")
|
||||
if not llama_bin or not os.path.exists(llama_bin):
|
||||
print(f" llama-perplexity: {llama_bin or 'not specified'}")
|
||||
if not model_path or not os.path.exists(model_path):
|
||||
print(f" model path: {model_path or 'not specified (use --model-path)'}")
|
||||
if not os.path.exists(args.corpus):
|
||||
print(f" corpus: {args.corpus}")
|
||||
results["perplexity"] = {"skipped": True, "reason": "missing binaries/model/corpus"}
|
||||
|
||||
# ── Issue Detection ──────────────────────────────────────────────────
|
||||
tp = results["throughput"]
|
||||
baseline_tps = tp.get("f16", {}).get("avg_tok_per_sec", 0)
|
||||
turbo_tps = tp.get("turbo4", {}).get("avg_tok_per_sec", 0)
|
||||
|
||||
if baseline_tps > 0 and turbo_tps > 0:
|
||||
ratio = turbo_tps / baseline_tps
|
||||
if ratio < 0.75:
|
||||
results["issues_discovered"].append({
|
||||
"title": "turbo4 throughput below 75% of baseline",
|
||||
"description": f"turbo4={turbo_tps:.1f} tok/s vs f16={baseline_tps:.1f} tok/s "
|
||||
f"({ratio*100:.0f}%). Investigate Metal kernel overhead.",
|
||||
})
|
||||
|
||||
mem = results["memory"]
|
||||
b_peak = mem.get("f16", {}).get("peak_mb", 0)
|
||||
t_peak = mem.get("turbo4", {}).get("peak_mb", 0)
|
||||
if b_peak > 0 and t_peak > 0:
|
||||
savings_pct = (b_peak - t_peak) / b_peak * 100
|
||||
if savings_pct < 50:
|
||||
results["issues_discovered"].append({
|
||||
"title": "turbo4 memory savings below expected 73%",
|
||||
"description": f"Observed {savings_pct:.1f}% savings (expected ~73%). "
|
||||
f"Check if turbo4 KV is actually active.",
|
||||
})
|
||||
|
||||
ppl = results.get("perplexity", {})
|
||||
if ppl.get("delta") and ppl["delta"] > args.ppl_threshold:
|
||||
results["issues_discovered"].append({
|
||||
"title": f"PPL regression exceeds threshold ({ppl['delta']:.4f} > {args.ppl_threshold})",
|
||||
"description": f"Quality degradation detected. Delta={ppl['delta']:.4f}. "
|
||||
f"Consider asymmetric K/V (q8_0/turbo4) or per-layer adaptive mode.",
|
||||
})
|
||||
|
||||
# ── Save Results ─────────────────────────────────────────────────────
|
||||
results_path = os.path.join(args.output_dir, "m1_benchmark_results.json")
|
||||
with open(results_path, "w") as f:
|
||||
json.dump(results, f, indent=2)
|
||||
print(f"\nResults saved to {results_path}")
|
||||
|
||||
# ── Generate Report ──────────────────────────────────────────────────
|
||||
report_path = os.path.join(args.output_dir, "m1_benchmark_report.md")
|
||||
report = generate_report(results, report_path)
|
||||
print(f"Report saved to {report_path}")
|
||||
|
||||
# Print summary
|
||||
print(f"\n{'='*60}")
|
||||
print("SUMMARY")
|
||||
print(f"{'='*60}")
|
||||
if baseline_tps and turbo_tps:
|
||||
ratio = turbo_tps / baseline_tps
|
||||
print(f" Throughput: {turbo_tps:.1f} tok/s ({ratio*100:.0f}% of baseline {baseline_tps:.1f})")
|
||||
if b_peak and t_peak:
|
||||
savings = (b_peak - t_peak) / b_peak * 100
|
||||
print(f" Memory: {t_peak:.0f}MB peak ({savings:.0f}% savings)")
|
||||
if ppl.get("delta") is not None:
|
||||
print(f" Quality: PPL delta={ppl['delta']:+.4f} ({'PASS' if ppl['pass'] else 'FAIL'})")
|
||||
if results["issues_discovered"]:
|
||||
print(f" Issues: {len(results['issues_discovered'])} found")
|
||||
print(f"{'='*60}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
125
docs/DFLASH_APPLE_SILICON.md
Normal file
125
docs/DFLASH_APPLE_SILICON.md
Normal file
@@ -0,0 +1,125 @@
|
||||
# DFlash on Apple Silicon
|
||||
|
||||
This repo now carries a **Gitea-first benchmark harness** for evaluating whether upstream **DFlash on MLX** is worth adding to the local Apple Silicon inference stack.
|
||||
|
||||
## Why
|
||||
|
||||
The headline `Kimi K2.6 + DFlash` benchmark was measured on `8x MI300X` with huge RAM and ROCm patches. That exact recipe is not a fit for a `36 GB` Apple Silicon Mac.
|
||||
|
||||
What *is* relevant locally is the upstream `z-lab/dflash` MLX path, which can benchmark smaller matched target/draft pairs that fit on Apple Silicon.
|
||||
|
||||
## Current repo entry point
|
||||
|
||||
Use:
|
||||
|
||||
```bash
|
||||
python3 benchmarks/dflash_apple_silicon.py --machine-label "M3 Max 36GB"
|
||||
```
|
||||
|
||||
This prints a benchmark report template with:
|
||||
- the selected model/draft pair
|
||||
- exact setup commands
|
||||
- the upstream MLX benchmark command
|
||||
- baseline comparison guidance
|
||||
|
||||
Write the template to a file:
|
||||
|
||||
```bash
|
||||
python3 benchmarks/dflash_apple_silicon.py \
|
||||
--machine-label "M3 Max 36GB" \
|
||||
--output benchmarks/reports/dflash_m3max_36gb.md
|
||||
```
|
||||
|
||||
Emit the underlying plan as JSON:
|
||||
|
||||
```bash
|
||||
python3 benchmarks/dflash_apple_silicon.py --format json
|
||||
```
|
||||
|
||||
## Selection logic
|
||||
|
||||
Today the planner uses two upstream-supported MLX pairs:
|
||||
|
||||
- `qwen35-9b`
|
||||
- base: `Qwen/Qwen3.5-9B`
|
||||
- draft: `z-lab/Qwen3.5-9B-DFlash`
|
||||
- chosen for ~28 GB+ machines
|
||||
- `qwen35-4b`
|
||||
- base: `Qwen/Qwen3.5-4B`
|
||||
- draft: `z-lab/Qwen3.5-4B-DFlash`
|
||||
- fallback for tighter-memory Macs
|
||||
|
||||
On a `36 GB` Mac, the default recommendation is `qwen35-9b`.
|
||||
|
||||
## Pilot result already landed
|
||||
|
||||
A first live Apple Silicon run has already been captured in:
|
||||
|
||||
- `benchmarks/reports/dflash_m3max_36gb_qwen35_4b_pilot.md`
|
||||
|
||||
Pilot command:
|
||||
|
||||
```bash
|
||||
python -m dflash.benchmark --backend mlx \
|
||||
--model Qwen/Qwen3.5-4B \
|
||||
--draft-model z-lab/Qwen3.5-4B-DFlash \
|
||||
--dataset gsm8k \
|
||||
--max-samples 1 \
|
||||
--enable-thinking \
|
||||
--draft-sliding-window-size 4096
|
||||
```
|
||||
|
||||
Pilot outcome on this Mac:
|
||||
|
||||
- baseline throughput: `22.35 tok/s`
|
||||
- DFlash throughput: `46.78 tok/s`
|
||||
- decoding speedup: `2.09x`
|
||||
|
||||
Treat that as a **directional proof**, not a final decision benchmark. The next step is the fuller comparison slice against plain MLX or llama.cpp speculative decoding.
|
||||
|
||||
## Known 9B failure on this machine
|
||||
|
||||
A follow-up live run with:
|
||||
|
||||
- `Qwen/Qwen3.5-9B`
|
||||
- `z-lab/Qwen3.5-9B-DFlash`
|
||||
|
||||
failed on this same M3 Max 36GB Mac with:
|
||||
|
||||
```text
|
||||
[METAL] Command buffer execution failed:
|
||||
Caused GPU Timeout Error (00000002:kIOGPUCommandBufferCallbackErrorTimeout)
|
||||
```
|
||||
|
||||
That failure is recorded in:
|
||||
|
||||
- `benchmarks/reports/dflash_m3max_36gb_qwen35_9b_timeout.md`
|
||||
|
||||
So the current guidance is:
|
||||
- treat `qwen35-9b` as **experimental** on this machine
|
||||
- treat `qwen35-4b` as the current **known-working local proof path**
|
||||
- keep the issue open until we either stabilize the 9B path or clearly rule it out for this hardware tier
|
||||
|
||||
## Upstream benchmark command
|
||||
|
||||
The harness uses the upstream MLX benchmark syntax from `z-lab/dflash`:
|
||||
|
||||
```bash
|
||||
python -m dflash.benchmark --backend mlx \
|
||||
--model Qwen/Qwen3.5-9B \
|
||||
--draft-model z-lab/Qwen3.5-9B-DFlash \
|
||||
--dataset gsm8k \
|
||||
--max-samples 128 \
|
||||
--enable-thinking \
|
||||
--draft-sliding-window-size 4096
|
||||
```
|
||||
|
||||
## What remains
|
||||
|
||||
This PR adds the **planner + report template** so the benchmark is reproducible from the repo.
|
||||
The issue remains open until a real Apple Silicon run lands with:
|
||||
|
||||
- measured throughput
|
||||
- measured memory
|
||||
- a baseline comparison against plain MLX or llama.cpp speculative decoding
|
||||
- a recommendation on whether to operationalize DFlash locally
|
||||
548
evolution/quant_selector.py
Normal file
548
evolution/quant_selector.py
Normal file
@@ -0,0 +1,548 @@
|
||||
"""Auto-select TurboQuant compression level based on available VRAM/RAM.
|
||||
|
||||
Detects hardware resources at startup and picks the highest quality
|
||||
quantization level that fits within available memory. Supports Apple
|
||||
Silicon unified memory, NVIDIA GPUs (via nvidia-smi), and CPU-only fallback.
|
||||
|
||||
Usage:
|
||||
from evolution.quant_selector import select_quant_level
|
||||
|
||||
selection = select_quant_level(model_size_gb=14.0, context_length=32768)
|
||||
print(selection.level) # "turbo4"
|
||||
print(selection.reasoning) # "M4 Max 36GB unified: turbo4 fits 14.0GB model + ..."
|
||||
print(selection.env_vars) # {"TURBO_LAYER_ADAPTIVE": "7"}
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import platform
|
||||
import subprocess
|
||||
import sys
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ── Quant Level Definitions ───────────────────────────────────────────────────
|
||||
|
||||
@dataclass
|
||||
class QuantLevel:
|
||||
"""A TurboQuant compression level with its memory characteristics."""
|
||||
name: str # e.g. "turbo4"
|
||||
bits_per_channel: float # e.g. 3.5 for turbo4
|
||||
compression_ratio: float # vs uncompressed KV cache
|
||||
quality_label: str # "best", "high", "balanced", "fast"
|
||||
layer_adaptive: int # TURBO_LAYER_ADAPTIVE value (0-7)
|
||||
kv_type: str # -ctk/-ctv flag value
|
||||
min_memory_headroom_gb: float # Minimum free memory to recommend this level
|
||||
description: str = ""
|
||||
|
||||
|
||||
# Ordered from highest quality to most aggressive compression
|
||||
QUANT_LEVELS = [
|
||||
QuantLevel(
|
||||
name="turbo4",
|
||||
bits_per_channel=3.5,
|
||||
compression_ratio=4.2,
|
||||
quality_label="best",
|
||||
layer_adaptive=7,
|
||||
kv_type="turbo4",
|
||||
min_memory_headroom_gb=4.0,
|
||||
description="PolarQuant + QJL 4-bit. Best quality, ~4.2x KV compression."
|
||||
),
|
||||
QuantLevel(
|
||||
name="turbo3",
|
||||
bits_per_channel=2.5,
|
||||
compression_ratio=6.0,
|
||||
quality_label="high",
|
||||
layer_adaptive=5,
|
||||
kv_type="turbo3",
|
||||
min_memory_headroom_gb=3.0,
|
||||
description="3-bit TurboQuant. High quality, ~6x KV compression."
|
||||
),
|
||||
QuantLevel(
|
||||
name="turbo2",
|
||||
bits_per_channel=1.5,
|
||||
compression_ratio=10.0,
|
||||
quality_label="balanced",
|
||||
layer_adaptive=3,
|
||||
kv_type="turbo2",
|
||||
min_memory_headroom_gb=2.0,
|
||||
description="2-bit TurboQuant. Balanced, ~10x KV compression."
|
||||
),
|
||||
QuantLevel(
|
||||
name="q4_0",
|
||||
bits_per_channel=4.0,
|
||||
compression_ratio=3.5,
|
||||
quality_label="fast",
|
||||
layer_adaptive=0,
|
||||
kv_type="q4_0",
|
||||
min_memory_headroom_gb=1.5,
|
||||
description="Standard 4-bit quant. Fast fallback, no TurboQuant."
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
# ── Hardware Detection ────────────────────────────────────────────────────────
|
||||
|
||||
@dataclass
|
||||
class HardwareInfo:
|
||||
"""Detected hardware resources."""
|
||||
total_memory_gb: float
|
||||
available_memory_gb: float
|
||||
gpu_memory_gb: Optional[float] = None
|
||||
gpu_name: Optional[str] = None
|
||||
is_apple_silicon: bool = False
|
||||
chip_name: Optional[str] = None
|
||||
cpu_cores: int = 0
|
||||
detection_method: str = ""
|
||||
|
||||
|
||||
def detect_hardware() -> HardwareInfo:
|
||||
"""Detect available memory and GPU resources."""
|
||||
system = platform.system()
|
||||
|
||||
if system == "Darwin":
|
||||
return _detect_apple_silicon()
|
||||
elif system == "Linux":
|
||||
return _detect_linux()
|
||||
else:
|
||||
return _detect_generic(system)
|
||||
|
||||
|
||||
def _detect_apple_silicon() -> HardwareInfo:
|
||||
"""Detect Apple Silicon unified memory."""
|
||||
info = HardwareInfo(
|
||||
total_memory_gb=0,
|
||||
available_memory_gb=0,
|
||||
is_apple_silicon=True,
|
||||
detection_method="sysctl",
|
||||
)
|
||||
|
||||
try:
|
||||
# Get total memory
|
||||
result = subprocess.run(
|
||||
["sysctl", "-n", "hw.memsize"],
|
||||
capture_output=True, text=True, timeout=5
|
||||
)
|
||||
if result.returncode == 0:
|
||||
info.total_memory_gb = int(result.stdout.strip()) / (1024**3)
|
||||
|
||||
# Get chip name
|
||||
result = subprocess.run(
|
||||
["sysctl", "-n", "machdep.cpu.brand_string"],
|
||||
capture_output=True, text=True, timeout=5
|
||||
)
|
||||
if result.returncode == 0:
|
||||
info.chip_name = result.stdout.strip()
|
||||
|
||||
# Try to get GPU name (Apple Silicon)
|
||||
result = subprocess.run(
|
||||
["system_profiler", "SPDisplaysDataType"],
|
||||
capture_output=True, text=True, timeout=10
|
||||
)
|
||||
if result.returncode == 0:
|
||||
for line in result.stdout.split("\n"):
|
||||
if "Chipset" in line or "GPU" in line:
|
||||
info.gpu_name = line.split(":")[-1].strip()
|
||||
break
|
||||
|
||||
# Estimate available memory (vm_stat)
|
||||
result = subprocess.run(
|
||||
["vm_stat"],
|
||||
capture_output=True, text=True, timeout=5
|
||||
)
|
||||
if result.returncode == 0:
|
||||
page_size = 4096 # macOS default
|
||||
free_pages = 0
|
||||
for line in result.stdout.split("\n"):
|
||||
if "Pages free:" in line:
|
||||
try:
|
||||
free_pages = int(line.split(":")[-1].strip().rstrip("."))
|
||||
except ValueError:
|
||||
pass
|
||||
# Available ≈ free + some speculative (conservative: just free)
|
||||
info.available_memory_gb = (free_pages * page_size) / (1024**3)
|
||||
|
||||
# Fallback if vm_stat parsing failed
|
||||
if info.available_memory_gb < 1:
|
||||
# Conservative: 70% of total
|
||||
info.available_memory_gb = info.total_memory_gb * 0.70
|
||||
|
||||
# Apple Silicon shares memory — GPU memory = total memory
|
||||
info.gpu_memory_gb = info.total_memory_gb
|
||||
|
||||
# Detect CPU cores
|
||||
result = subprocess.run(
|
||||
["sysctl", "-n", "hw.ncpu"],
|
||||
capture_output=True, text=True, timeout=5
|
||||
)
|
||||
if result.returncode == 0:
|
||||
info.cpu_cores = int(result.stdout.strip())
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Apple Silicon detection failed: {e}")
|
||||
# Fallback
|
||||
info.total_memory_gb = 16.0
|
||||
info.available_memory_gb = 12.0
|
||||
info.detection_method = "fallback"
|
||||
|
||||
return info
|
||||
|
||||
|
||||
def _detect_linux() -> HardwareInfo:
|
||||
"""Detect Linux system with optional NVIDIA GPU."""
|
||||
info = HardwareInfo(
|
||||
total_memory_gb=0,
|
||||
available_memory_gb=0,
|
||||
detection_method="proc",
|
||||
)
|
||||
|
||||
try:
|
||||
# Read /proc/meminfo
|
||||
with open("/proc/meminfo", "r") as f:
|
||||
meminfo = f.read()
|
||||
|
||||
for line in meminfo.split("\n"):
|
||||
if line.startswith("MemTotal:"):
|
||||
kb = int(line.split()[1])
|
||||
info.total_memory_gb = kb / (1024 * 1024)
|
||||
elif line.startswith("MemAvailable:"):
|
||||
kb = int(line.split()[1])
|
||||
info.available_memory_gb = kb / (1024 * 1024)
|
||||
|
||||
# CPU cores
|
||||
info.cpu_cores = os.cpu_count() or 1
|
||||
|
||||
# Check for NVIDIA GPU
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["nvidia-smi", "--query-gpu=name,memory.total,memory.free",
|
||||
"--format=csv,noheader,nounits"],
|
||||
capture_output=True, text=True, timeout=10
|
||||
)
|
||||
if result.returncode == 0 and result.stdout.strip():
|
||||
lines = result.stdout.strip().split("\n")
|
||||
if lines:
|
||||
parts = lines[0].split(", ")
|
||||
if len(parts) >= 3:
|
||||
info.gpu_name = parts[0].strip()
|
||||
info.gpu_memory_gb = float(parts[1]) / 1024 # MB to GB
|
||||
gpu_free = float(parts[2]) / 1024
|
||||
# Use GPU free for VRAM-based selection
|
||||
info.available_memory_gb = max(info.available_memory_gb, gpu_free)
|
||||
info.detection_method = "nvidia-smi"
|
||||
except (FileNotFoundError, subprocess.TimeoutExpired):
|
||||
pass # No NVIDIA GPU
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Linux detection failed: {e}")
|
||||
info.total_memory_gb = 16.0
|
||||
info.available_memory_gb = 12.0
|
||||
info.detection_method = "fallback"
|
||||
|
||||
return info
|
||||
|
||||
|
||||
def _detect_generic(system: str) -> HardwareInfo:
|
||||
"""Fallback detection for unknown systems."""
|
||||
import psutil
|
||||
mem = psutil.virtual_memory()
|
||||
return HardwareInfo(
|
||||
total_memory_gb=mem.total / (1024**3),
|
||||
available_memory_gb=mem.available / (1024**3),
|
||||
cpu_cores=os.cpu_count() or 1,
|
||||
detection_method="psutil",
|
||||
)
|
||||
|
||||
|
||||
# ── KV Cache Memory Estimation ───────────────────────────────────────────────
|
||||
|
||||
def estimate_kv_cache_gb(
|
||||
context_length: int,
|
||||
num_layers: int = 48,
|
||||
num_kv_heads: int = 8,
|
||||
head_dim: int = 128,
|
||||
bits_per_channel: float = 3.5,
|
||||
) -> float:
|
||||
"""Estimate KV cache memory for given parameters.
|
||||
|
||||
Formula: 2 (K+V) × layers × kv_heads × head_dim × context_length × bits/8
|
||||
"""
|
||||
bytes_per_element = bits_per_channel / 8.0
|
||||
total_bytes = 2 * num_layers * num_kv_heads * head_dim * context_length * bytes_per_element
|
||||
return total_bytes / (1024**3)
|
||||
|
||||
|
||||
def estimate_model_memory_gb(model_size_gb: float, quant_type: str = "q4_k_m") -> float:
|
||||
"""Estimate model weights memory. Returns loaded size in GB.
|
||||
|
||||
This is a rough estimate — actual depends on exact quant format.
|
||||
"""
|
||||
# Common quant ratios (vs fp16)
|
||||
quant_multipliers = {
|
||||
"f16": 1.0,
|
||||
"q8_0": 0.5,
|
||||
"q6_k": 0.42,
|
||||
"q5_k_m": 0.37,
|
||||
"q4_k_m": 0.32,
|
||||
"q3_k_m": 0.27,
|
||||
"q2_k": 0.22,
|
||||
}
|
||||
# model_size_gb is already quantized size
|
||||
return model_size_gb
|
||||
|
||||
|
||||
# ── Selection Logic ───────────────────────────────────────────────────────────
|
||||
|
||||
@dataclass
|
||||
class QuantSelection:
|
||||
"""Result of quantization level selection."""
|
||||
level: QuantLevel
|
||||
hardware: HardwareInfo
|
||||
reasoning: str
|
||||
total_required_gb: float
|
||||
available_gb: float
|
||||
headroom_gb: float
|
||||
env_vars: dict = field(default_factory=dict)
|
||||
server_flags: dict = field(default_factory=dict)
|
||||
warnings: list = field(default_factory=list)
|
||||
|
||||
|
||||
def select_quant_level(
|
||||
model_size_gb: float = 14.0,
|
||||
context_length: int = 32768,
|
||||
num_layers: int = 48,
|
||||
num_kv_heads: int = 8,
|
||||
head_dim: int = 128,
|
||||
preferred_level: Optional[str] = None,
|
||||
force_cpu: bool = False,
|
||||
) -> QuantSelection:
|
||||
"""Select the best quantization level for available hardware.
|
||||
|
||||
Args:
|
||||
model_size_gb: Size of the model weights in GB
|
||||
context_length: Target context length
|
||||
num_layers: Number of transformer layers
|
||||
num_kv_heads: Number of KV attention heads
|
||||
head_dim: Dimension per attention head
|
||||
preferred_level: Force a specific level (still checks if it fits)
|
||||
force_cpu: If True, ignore GPU memory
|
||||
|
||||
Returns:
|
||||
QuantSelection with the chosen level and reasoning
|
||||
"""
|
||||
hw = detect_hardware()
|
||||
|
||||
if force_cpu:
|
||||
hw.gpu_memory_gb = None
|
||||
hw.gpu_name = None
|
||||
|
||||
# Use the most restrictive memory constraint
|
||||
# For Apple Silicon: unified memory, use total
|
||||
# For NVIDIA: use GPU VRAM
|
||||
# For CPU-only: use system RAM
|
||||
if hw.gpu_memory_gb and hw.gpu_name:
|
||||
memory_pool_gb = hw.gpu_memory_gb
|
||||
memory_label = f"{hw.gpu_name} {hw.gpu_memory_gb:.0f}GB VRAM"
|
||||
elif hw.is_apple_silicon:
|
||||
memory_pool_gb = hw.total_memory_gb
|
||||
memory_label = f"{hw.chip_name or 'Apple Silicon'} {hw.total_memory_gb:.0f}GB unified"
|
||||
else:
|
||||
memory_pool_gb = hw.total_memory_gb
|
||||
memory_label = f"{hw.cpu_cores}c CPU {hw.total_memory_gb:.0f}GB RAM"
|
||||
|
||||
model_mem = estimate_model_memory_gb(model_size_gb)
|
||||
|
||||
# Try levels from best to most compressed
|
||||
chosen = None
|
||||
for level in QUANT_LEVELS:
|
||||
if preferred_level and level.name != preferred_level:
|
||||
continue
|
||||
|
||||
kv_mem = estimate_kv_cache_gb(
|
||||
context_length, num_layers, num_kv_heads, head_dim,
|
||||
level.bits_per_channel
|
||||
)
|
||||
total_required = model_mem + kv_mem
|
||||
headroom = memory_pool_gb - total_required
|
||||
|
||||
if headroom >= level.min_memory_headroom_gb:
|
||||
chosen = level
|
||||
break
|
||||
|
||||
if preferred_level and level.name == preferred_level:
|
||||
# User forced this level but it doesn't fit
|
||||
chosen = level
|
||||
break
|
||||
|
||||
if chosen is None:
|
||||
# Nothing fits — pick the most aggressive compression, not the q4_0 fallback.
|
||||
chosen = max(QUANT_LEVELS, key=lambda level: level.compression_ratio)
|
||||
logger.warning(f"No quant level fits in {memory_pool_gb:.1f}GB. Using {chosen.name}.")
|
||||
|
||||
# Calculate final numbers
|
||||
kv_mem = estimate_kv_cache_gb(
|
||||
context_length, num_layers, num_kv_heads, head_dim,
|
||||
chosen.bits_per_channel
|
||||
)
|
||||
total_required = model_mem + kv_mem
|
||||
headroom = memory_pool_gb - total_required
|
||||
|
||||
# Build reasoning
|
||||
reasoning_parts = [
|
||||
f"{memory_label}:",
|
||||
f"{chosen.name} ({chosen.quality_label}, {chosen.bits_per_channel:.1f}b/ch,",
|
||||
f"{chosen.compression_ratio:.1f}x compression)",
|
||||
f"fits {model_mem:.1f}GB model + {kv_mem:.1f}GB KV cache",
|
||||
f"@ {context_length}K context = {total_required:.1f}GB / {memory_pool_gb:.0f}GB",
|
||||
f"({headroom:.1f}GB headroom)"
|
||||
]
|
||||
reasoning = " ".join(reasoning_parts)
|
||||
|
||||
# Build environment variables for llama.cpp
|
||||
env_vars = {
|
||||
"TURBO_LAYER_ADAPTIVE": str(chosen.layer_adaptive),
|
||||
}
|
||||
|
||||
# Build server flags
|
||||
server_flags = {
|
||||
"-ctk": chosen.kv_type,
|
||||
"-ctv": chosen.kv_type,
|
||||
"-c": str(context_length),
|
||||
}
|
||||
|
||||
# Warnings
|
||||
warnings = []
|
||||
if headroom < 2.0:
|
||||
warnings.append(
|
||||
f"Low headroom ({headroom:.1f}GB). Consider reducing context length or model size."
|
||||
)
|
||||
if headroom < 0:
|
||||
warnings.append(
|
||||
f"OVERCOMMITTED: needs {total_required:.1f}GB but only {memory_pool_gb:.0f}GB available. "
|
||||
f"Inference may fail or swap heavily."
|
||||
)
|
||||
|
||||
selection = QuantSelection(
|
||||
level=chosen,
|
||||
hardware=hw,
|
||||
reasoning=reasoning,
|
||||
total_required_gb=total_required,
|
||||
available_gb=memory_pool_gb,
|
||||
headroom_gb=headroom,
|
||||
env_vars=env_vars,
|
||||
server_flags=server_flags,
|
||||
warnings=warnings,
|
||||
)
|
||||
|
||||
logger.info(f"Quant selection: {reasoning}")
|
||||
for w in warnings:
|
||||
logger.warning(w)
|
||||
|
||||
return selection
|
||||
|
||||
|
||||
# ── CLI ───────────────────────────────────────────────────────────────────────
|
||||
|
||||
def main():
|
||||
"""CLI entry point for quant level selection."""
|
||||
import argparse
|
||||
import json
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Auto-select TurboQuant compression level based on available hardware"
|
||||
)
|
||||
parser.add_argument("--model-size", type=float, default=14.0,
|
||||
help="Model size in GB (default: 14.0)")
|
||||
parser.add_argument("--context", type=int, default=32768,
|
||||
help="Target context length (default: 32768)")
|
||||
parser.add_argument("--layers", type=int, default=48,
|
||||
help="Number of transformer layers (default: 48)")
|
||||
parser.add_argument("--kv-heads", type=int, default=8,
|
||||
help="Number of KV attention heads (default: 8)")
|
||||
parser.add_argument("--head-dim", type=int, default=128,
|
||||
help="Dimension per attention head (default: 128)")
|
||||
parser.add_argument("--prefer", type=str, default=None,
|
||||
choices=[l.name for l in QUANT_LEVELS],
|
||||
help="Prefer a specific quant level")
|
||||
parser.add_argument("--force-cpu", action="store_true",
|
||||
help="Ignore GPU, use CPU memory only")
|
||||
parser.add_argument("--json", action="store_true",
|
||||
help="JSON output for automation")
|
||||
parser.add_argument("--detect-only", action="store_true",
|
||||
help="Only detect hardware, don't select")
|
||||
args = parser.parse_args()
|
||||
|
||||
logging.basicConfig(level=logging.INFO, format="%(message)s")
|
||||
|
||||
if args.detect_only:
|
||||
hw = detect_hardware()
|
||||
if args.json:
|
||||
print(json.dumps(hw.__dict__, default=str, indent=2))
|
||||
else:
|
||||
print(f"Total memory: {hw.total_memory_gb:.1f} GB")
|
||||
print(f"Available: {hw.available_memory_gb:.1f} GB")
|
||||
if hw.gpu_memory_gb:
|
||||
print(f"GPU memory: {hw.gpu_memory_gb:.1f} GB")
|
||||
if hw.gpu_name:
|
||||
print(f"GPU: {hw.gpu_name}")
|
||||
if hw.is_apple_silicon:
|
||||
print(f"Chip: {hw.chip_name or 'Apple Silicon'}")
|
||||
print(f"CPU cores: {hw.cpu_cores}")
|
||||
print(f"Detection: {hw.detection_method}")
|
||||
return
|
||||
|
||||
selection = select_quant_level(
|
||||
model_size_gb=args.model_size,
|
||||
context_length=args.context,
|
||||
num_layers=args.layers,
|
||||
num_kv_heads=args.kv_heads,
|
||||
head_dim=args.head_dim,
|
||||
preferred_level=args.prefer,
|
||||
force_cpu=args.force_cpu,
|
||||
)
|
||||
|
||||
if args.json:
|
||||
result = {
|
||||
"level": selection.level.name,
|
||||
"bits_per_channel": selection.level.bits_per_channel,
|
||||
"compression_ratio": selection.level.compression_ratio,
|
||||
"quality": selection.level.quality_label,
|
||||
"reasoning": selection.reasoning,
|
||||
"total_required_gb": round(selection.total_required_gb, 2),
|
||||
"available_gb": round(selection.available_gb, 1),
|
||||
"headroom_gb": round(selection.headroom_gb, 2),
|
||||
"env_vars": selection.env_vars,
|
||||
"server_flags": selection.server_flags,
|
||||
"warnings": selection.warnings,
|
||||
"hardware": {
|
||||
"total_memory_gb": round(selection.hardware.total_memory_gb, 1),
|
||||
"gpu_name": selection.hardware.gpu_name,
|
||||
"is_apple_silicon": selection.hardware.is_apple_silicon,
|
||||
"chip_name": selection.hardware.chip_name,
|
||||
"cpu_cores": selection.hardware.cpu_cores,
|
||||
},
|
||||
}
|
||||
print(json.dumps(result, indent=2))
|
||||
else:
|
||||
print(f"Selected: {selection.level.name} ({selection.level.quality_label})")
|
||||
print(f" {selection.reasoning}")
|
||||
print()
|
||||
print(f"Environment variables:")
|
||||
for k, v in selection.env_vars.items():
|
||||
print(f" export {k}={v}")
|
||||
print()
|
||||
print(f"Server flags:")
|
||||
for k, v in selection.server_flags.items():
|
||||
print(f" {k} {v}")
|
||||
if selection.warnings:
|
||||
print()
|
||||
for w in selection.warnings:
|
||||
print(f" WARNING: {w}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
3
tests/conftest.py
Normal file
3
tests/conftest.py
Normal file
@@ -0,0 +1,3 @@
|
||||
"""Pytest configuration for turboquant."""
|
||||
import sys, os
|
||||
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
||||
58
tests/test_dflash_apple_silicon.py
Normal file
58
tests/test_dflash_apple_silicon.py
Normal file
@@ -0,0 +1,58 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for Apple Silicon DFlash benchmark planning helpers (issue #152)."""
|
||||
|
||||
import os
|
||||
import sys
|
||||
from unittest.mock import patch
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
|
||||
|
||||
from benchmarks.dflash_apple_silicon import ( # noqa: E402
|
||||
build_mlx_benchmark_command,
|
||||
detect_total_memory_gb,
|
||||
render_report_template,
|
||||
select_pair,
|
||||
)
|
||||
|
||||
|
||||
class TestPairSelection:
|
||||
def test_prefers_qwen35_9b_on_36gb_mac(self):
|
||||
pair = select_pair(total_memory_gb=36)
|
||||
assert pair.slug == "qwen35-9b"
|
||||
assert pair.base_model == "Qwen/Qwen3.5-9B"
|
||||
assert pair.draft_model == "z-lab/Qwen3.5-9B-DFlash"
|
||||
|
||||
def test_falls_back_to_4b_when_memory_is_tight(self):
|
||||
pair = select_pair(total_memory_gb=20)
|
||||
assert pair.slug == "qwen35-4b"
|
||||
assert pair.base_model == "Qwen/Qwen3.5-4B"
|
||||
|
||||
|
||||
class TestCommandGeneration:
|
||||
def test_builds_upstream_mlx_benchmark_command(self):
|
||||
pair = select_pair(total_memory_gb=36)
|
||||
command = build_mlx_benchmark_command(pair, dataset="gsm8k", max_samples=64)
|
||||
assert "python -m dflash.benchmark --backend mlx" in command
|
||||
assert "--model Qwen/Qwen3.5-9B" in command
|
||||
assert "--draft-model z-lab/Qwen3.5-9B-DFlash" in command
|
||||
assert "--dataset gsm8k" in command
|
||||
assert "--max-samples 64" in command
|
||||
assert "--draft-sliding-window-size 4096" in command
|
||||
|
||||
|
||||
class TestReportTemplate:
|
||||
def test_report_template_mentions_baseline_and_verdict(self):
|
||||
pair = select_pair(total_memory_gb=36)
|
||||
report = render_report_template(machine_label="M3 Max 36GB", pair=pair)
|
||||
assert "DFlash Apple Silicon Benchmark Report" in report
|
||||
assert "M3 Max 36GB" in report
|
||||
assert "Qwen/Qwen3.5-9B" in report
|
||||
assert "plain MLX or llama.cpp speculative decoding" in report
|
||||
assert "Worth operationalizing locally?" in report
|
||||
|
||||
|
||||
class TestMemoryDetection:
|
||||
@patch("benchmarks.dflash_apple_silicon.platform.system", return_value="Darwin")
|
||||
@patch("benchmarks.dflash_apple_silicon.subprocess.check_output", return_value=b"38654705664\n")
|
||||
def test_detect_total_memory_gb_on_macos(self, _mock_sysctl, _mock_system):
|
||||
assert detect_total_memory_gb() == 36.0
|
||||
@@ -1,136 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Tests for run_m1_benchmark.py (Issue #80)
|
||||
|
||||
Validates core benchmark functions without requiring a live server.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
import unittest
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
# Add parent dir to path
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from benchmarks.run_m1_benchmark import (
|
||||
MemoryMonitor,
|
||||
get_system_info,
|
||||
generate_report,
|
||||
)
|
||||
|
||||
|
||||
class TestMemoryMonitor(unittest.TestCase):
|
||||
def test_init(self):
|
||||
mon = MemoryMonitor(pid=1, interval=0.1)
|
||||
self.assertEqual(mon.pid, 1)
|
||||
self.assertEqual(mon.samples, [])
|
||||
|
||||
def test_get_stats_empty(self):
|
||||
mon = MemoryMonitor(pid=1)
|
||||
stats = mon.get_stats()
|
||||
self.assertEqual(stats["avg_mb"], 0)
|
||||
self.assertEqual(stats["peak_mb"], 0)
|
||||
self.assertEqual(stats["samples"], 0)
|
||||
|
||||
def test_get_stats_with_samples(self):
|
||||
mon = MemoryMonitor(pid=1)
|
||||
mon.samples = [100.0, 150.0, 200.0, 120.0]
|
||||
stats = mon.get_stats()
|
||||
self.assertEqual(stats["peak_mb"], 200.0)
|
||||
self.assertEqual(stats["min_mb"], 100.0)
|
||||
self.assertEqual(stats["avg_mb"], 142.5)
|
||||
self.assertEqual(stats["samples"], 4)
|
||||
|
||||
|
||||
class TestSystemInfo(unittest.TestCase):
|
||||
def test_returns_dict(self):
|
||||
info = get_system_info()
|
||||
self.assertIsInstance(info, dict)
|
||||
self.assertIn("platform", info)
|
||||
self.assertIn("python", info)
|
||||
|
||||
|
||||
class TestReportGeneration(unittest.TestCase):
|
||||
def test_basic_report(self):
|
||||
results = {
|
||||
"timestamp": "2026-04-15T12:00:00Z",
|
||||
"system": {"chip": "Apple M1", "memory_gb": 16, "cpu_cores": 8},
|
||||
"model": "test-model",
|
||||
"throughput": {
|
||||
"f16": {
|
||||
"avg_tok_per_sec": 100.0,
|
||||
"avg_latency": 2.5,
|
||||
"avg_ttft": 0.3,
|
||||
"results": [
|
||||
{"tokens_per_sec": 100, "latency_s": 2.5, "status": "success"},
|
||||
],
|
||||
},
|
||||
"turbo4": {
|
||||
"avg_tok_per_sec": 90.0,
|
||||
"avg_latency": 2.8,
|
||||
"avg_ttft": 0.35,
|
||||
"results": [
|
||||
{"tokens_per_sec": 90, "latency_s": 2.8, "status": "success"},
|
||||
],
|
||||
},
|
||||
},
|
||||
"memory": {
|
||||
"f16": {"peak_mb": 1000, "avg_mb": 900},
|
||||
"turbo4": {"peak_mb": 300, "avg_mb": 250},
|
||||
},
|
||||
"perplexity": {
|
||||
"f16": {"perplexity": 12.5, "tokens": 5000, "elapsed_seconds": 120},
|
||||
"turbo4": {"perplexity": 12.8, "tokens": 5000, "elapsed_seconds": 130},
|
||||
"delta": 0.3,
|
||||
"pass": True,
|
||||
"threshold": 0.5,
|
||||
},
|
||||
"issues_discovered": [],
|
||||
}
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode="w", suffix=".md", delete=False) as f:
|
||||
report_path = f.name
|
||||
|
||||
try:
|
||||
report = generate_report(results, report_path)
|
||||
self.assertIn("TurboQuant M1 Benchmark Report", report)
|
||||
self.assertIn("f16", report)
|
||||
self.assertIn("turbo4", report)
|
||||
self.assertIn("PASS", report)
|
||||
|
||||
# Verify file was written
|
||||
with open(report_path) as f:
|
||||
written = f.read()
|
||||
self.assertEqual(written, report)
|
||||
finally:
|
||||
os.unlink(report_path)
|
||||
|
||||
def test_report_with_issues(self):
|
||||
results = {
|
||||
"timestamp": "2026-04-15T12:00:00Z",
|
||||
"system": {"chip": "M1", "memory_gb": 16, "cpu_cores": 8},
|
||||
"model": "test",
|
||||
"throughput": {"f16": {"results": []}, "turbo4": {"results": []}},
|
||||
"memory": {"f16": {}, "turbo4": {}},
|
||||
"perplexity": {},
|
||||
"issues_discovered": [
|
||||
{"title": "Test issue", "description": "Something went wrong"}
|
||||
],
|
||||
}
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode="w", suffix=".md", delete=False) as f:
|
||||
report_path = f.name
|
||||
|
||||
try:
|
||||
report = generate_report(results, report_path)
|
||||
self.assertIn("Issues Discovered", report)
|
||||
self.assertIn("Test issue", report)
|
||||
finally:
|
||||
os.unlink(report_path)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
177
tests/test_quant_selector.py
Normal file
177
tests/test_quant_selector.py
Normal file
@@ -0,0 +1,177 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for quant_selector.py"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import pytest
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
|
||||
from evolution.quant_selector import (
|
||||
QuantLevel,
|
||||
HardwareInfo,
|
||||
QUANT_LEVELS,
|
||||
detect_hardware,
|
||||
estimate_kv_cache_gb,
|
||||
estimate_model_memory_gb,
|
||||
select_quant_level,
|
||||
)
|
||||
|
||||
|
||||
class TestQuantLevels:
|
||||
def test_levels_keep_turboquant_quality_order_with_q4_fallback_last(self):
|
||||
"""TurboQuant levels should lead, with q4_0 reserved as the non-Turbo fallback."""
|
||||
names = [level.name for level in QUANT_LEVELS]
|
||||
assert names[:3] == ["turbo4", "turbo3", "turbo2"]
|
||||
assert names[-1] == "q4_0"
|
||||
|
||||
def test_all_levels_have_required_fields(self):
|
||||
for level in QUANT_LEVELS:
|
||||
assert level.name
|
||||
assert level.bits_per_channel > 0
|
||||
assert level.compression_ratio > 1
|
||||
assert level.quality_label
|
||||
assert level.layer_adaptive >= 0
|
||||
assert level.kv_type
|
||||
|
||||
|
||||
class TestKVEstimate:
|
||||
def test_basic_estimate(self):
|
||||
# 48 layers, 8 heads, 128 dim, 32K context, 3.5 bits
|
||||
kv_gb = estimate_kv_cache_gb(32768, 48, 8, 128, 3.5)
|
||||
assert kv_gb > 0
|
||||
assert kv_gb < 10 # Should be reasonable
|
||||
|
||||
def test_longer_context_larger(self):
|
||||
kv_32k = estimate_kv_cache_gb(32768, 48, 8, 128, 3.5)
|
||||
kv_128k = estimate_kv_cache_gb(131072, 48, 8, 128, 3.5)
|
||||
assert kv_128k > kv_32k
|
||||
|
||||
def test_higher_bits_larger(self):
|
||||
kv_4b = estimate_kv_cache_gb(32768, 48, 8, 128, 4.0)
|
||||
kv_2b = estimate_kv_cache_gb(32768, 48, 8, 128, 2.0)
|
||||
assert kv_4b > kv_2b
|
||||
|
||||
|
||||
class TestHardwareDetection:
|
||||
def test_detect_returns_info(self):
|
||||
hw = detect_hardware()
|
||||
assert hw.total_memory_gb > 0
|
||||
assert hw.available_memory_gb > 0
|
||||
assert hw.detection_method
|
||||
|
||||
@patch("evolution.quant_selector.platform.system", return_value="Linux")
|
||||
@patch("builtins.open", create=True)
|
||||
def test_linux_detection(self, mock_open, mock_system):
|
||||
mock_open.return_value.__enter__().read.return_value = (
|
||||
"MemTotal: 32000000 kB\n"
|
||||
"MemAvailable: 24000000 kB\n"
|
||||
)
|
||||
hw = _detect_linux_fallback()
|
||||
assert hw.total_memory_gb > 20
|
||||
|
||||
|
||||
def _detect_linux_fallback():
|
||||
"""Helper to test Linux detection with mocked /proc/meminfo."""
|
||||
from evolution.quant_selector import _detect_linux
|
||||
return _detect_linux()
|
||||
|
||||
|
||||
class TestSelection:
|
||||
def test_selects_turbo4_for_large_memory(self):
|
||||
"""With plenty of memory, should pick turbo4 (best quality)."""
|
||||
with patch("evolution.quant_selector.detect_hardware") as mock_hw:
|
||||
mock_hw.return_value = HardwareInfo(
|
||||
total_memory_gb=64,
|
||||
available_memory_gb=48,
|
||||
gpu_memory_gb=64,
|
||||
gpu_name="Test GPU",
|
||||
cpu_cores=16,
|
||||
detection_method="mock",
|
||||
)
|
||||
sel = select_quant_level(model_size_gb=14.0, context_length=32768)
|
||||
assert sel.level.name == "turbo4"
|
||||
assert sel.headroom_gb > 0
|
||||
|
||||
def test_selects_smaller_for_tight_memory(self):
|
||||
"""With tight memory, should pick a smaller quant."""
|
||||
with patch("evolution.quant_selector.detect_hardware") as mock_hw:
|
||||
mock_hw.return_value = HardwareInfo(
|
||||
total_memory_gb=16,
|
||||
available_memory_gb=12,
|
||||
gpu_memory_gb=16,
|
||||
gpu_name="Test GPU",
|
||||
cpu_cores=8,
|
||||
detection_method="mock",
|
||||
)
|
||||
sel = select_quant_level(model_size_gb=14.0, context_length=131072)
|
||||
# Should pick a smaller quant for 128K context on 16GB
|
||||
assert sel.level.bits_per_channel <= 4.0
|
||||
|
||||
def test_preferred_level(self):
|
||||
"""User can force a specific level."""
|
||||
with patch("evolution.quant_selector.detect_hardware") as mock_hw:
|
||||
mock_hw.return_value = HardwareInfo(
|
||||
total_memory_gb=64,
|
||||
available_memory_gb=48,
|
||||
cpu_cores=16,
|
||||
detection_method="mock",
|
||||
)
|
||||
sel = select_quant_level(
|
||||
model_size_gb=14.0, context_length=32768,
|
||||
preferred_level="turbo2"
|
||||
)
|
||||
assert sel.level.name == "turbo2"
|
||||
|
||||
def test_env_vars_populated(self):
|
||||
with patch("evolution.quant_selector.detect_hardware") as mock_hw:
|
||||
mock_hw.return_value = HardwareInfo(
|
||||
total_memory_gb=64,
|
||||
available_memory_gb=48,
|
||||
cpu_cores=16,
|
||||
detection_method="mock",
|
||||
)
|
||||
sel = select_quant_level(model_size_gb=14.0, context_length=32768)
|
||||
assert "TURBO_LAYER_ADAPTIVE" in sel.env_vars
|
||||
assert "-ctk" in sel.server_flags
|
||||
assert "-ctv" in sel.server_flags
|
||||
|
||||
def test_warnings_on_low_headroom(self):
|
||||
with patch("evolution.quant_selector.detect_hardware") as mock_hw:
|
||||
mock_hw.return_value = HardwareInfo(
|
||||
total_memory_gb=18,
|
||||
available_memory_gb=14,
|
||||
gpu_memory_gb=18,
|
||||
gpu_name="Test GPU",
|
||||
cpu_cores=8,
|
||||
detection_method="mock",
|
||||
)
|
||||
sel = select_quant_level(model_size_gb=16.0, context_length=65536)
|
||||
assert len(sel.warnings) > 0
|
||||
|
||||
def test_falls_back_to_turbo2_when_nothing_fits(self):
|
||||
with patch("evolution.quant_selector.detect_hardware") as mock_hw:
|
||||
mock_hw.return_value = HardwareInfo(
|
||||
total_memory_gb=8,
|
||||
available_memory_gb=6,
|
||||
gpu_memory_gb=8,
|
||||
gpu_name="Tiny GPU",
|
||||
cpu_cores=4,
|
||||
detection_method="mock",
|
||||
)
|
||||
sel = select_quant_level(model_size_gb=16.0, context_length=131072)
|
||||
assert sel.level.name == "turbo2"
|
||||
|
||||
def test_reasoning_contains_key_info(self):
|
||||
with patch("evolution.quant_selector.detect_hardware") as mock_hw:
|
||||
mock_hw.return_value = HardwareInfo(
|
||||
total_memory_gb=32,
|
||||
available_memory_gb=24,
|
||||
is_apple_silicon=True,
|
||||
chip_name="M4 Max",
|
||||
cpu_cores=16,
|
||||
detection_method="mock",
|
||||
)
|
||||
sel = select_quant_level(model_size_gb=14.0, context_length=32768)
|
||||
assert "turbo4" in sel.reasoning
|
||||
assert "M4 Max" in sel.reasoning or "32GB" in sel.reasoning
|
||||
338
tests/test_tool_call_integration.py
Normal file
338
tests/test_tool_call_integration.py
Normal file
@@ -0,0 +1,338 @@
|
||||
"""
|
||||
Integration test: turboquant compressed model passes hermes tool calls (issue #82).
|
||||
|
||||
Validates that a TurboQuant-compressed model can:
|
||||
1. Parse hermes tool schemas correctly
|
||||
2. Format tool calls in OpenAI-compatible format
|
||||
3. Pass through the hermes agent conversation loop
|
||||
|
||||
Tests are structured as contract tests -- they validate the schema/format
|
||||
compatibility without requiring a running model server. The live inference
|
||||
test is skipped by default (requires llama-server with TurboQuant model).
|
||||
|
||||
Usage:
|
||||
pytest tests/test_tool_call_integration.py -v
|
||||
pytest tests/test_tool_call_integration.py -v -k live # run live test if server available
|
||||
"""
|
||||
import json
|
||||
import os
|
||||
import pathlib
|
||||
import re
|
||||
import unittest
|
||||
|
||||
import pytest
|
||||
|
||||
ROOT = pathlib.Path(__file__).resolve().parents[1]
|
||||
PROFILE_PATH = ROOT / "profiles" / "hermes-profile-gemma4-turboquant.yaml"
|
||||
BENCHMARKS_DIR = ROOT / "benchmarks"
|
||||
|
||||
|
||||
class TestHermesProfileSchema(unittest.TestCase):
|
||||
"""Validate the hermes profile YAML has required fields for tool calling."""
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
import yaml
|
||||
cls.profile = yaml.safe_load(PROFILE_PATH.read_text())
|
||||
|
||||
def test_profile_has_providers(self):
|
||||
assert "providers" in self.profile, "Profile must define providers"
|
||||
assert "primary" in self.profile["providers"], "Must have primary provider"
|
||||
|
||||
def test_primary_provider_has_endpoint(self):
|
||||
primary = self.profile["providers"]["primary"]
|
||||
assert "endpoint" in primary, "Primary provider must have endpoint"
|
||||
assert primary["endpoint"].startswith("http"), "Endpoint must be HTTP(S) URL"
|
||||
|
||||
def test_primary_provider_has_api_path(self):
|
||||
primary = self.profile["providers"]["primary"]
|
||||
assert "api_path" in primary, "Primary provider must have api_path"
|
||||
assert "/chat/completions" in primary["api_path"], (
|
||||
"api_path should be OpenAI-compatible /chat/completions"
|
||||
)
|
||||
|
||||
def test_turboquant_settings_present(self):
|
||||
primary = self.profile["providers"]["primary"]
|
||||
assert "turboquant" in primary, "Must have turboquant config section"
|
||||
tq = primary["turboquant"]
|
||||
assert tq.get("enabled") is True, "TurboQuant must be enabled"
|
||||
assert tq.get("kv_type") in ("turbo2", "turbo3", "turbo4"), (
|
||||
"kv_type must be turbo2, turbo3, or turbo4"
|
||||
)
|
||||
|
||||
def test_context_window_configured(self):
|
||||
primary = self.profile["providers"]["primary"]
|
||||
assert "context" in primary, "Must have context config"
|
||||
ctx = primary["context"]
|
||||
assert ctx.get("max_tokens", 0) >= 8192, (
|
||||
"max_tokens should be >= 8192 for TurboQuant value proposition"
|
||||
)
|
||||
|
||||
|
||||
class TestToolSchemaCompatibility(unittest.TestCase):
|
||||
"""Verify hermes tool schemas serialize to valid JSON for OpenAI tool_calls."""
|
||||
|
||||
SAMPLE_TOOL_SCHEMAS = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "read_file",
|
||||
"description": "Read a text file with line numbers.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"path": {"type": "string", "description": "File path"},
|
||||
"offset": {"type": "integer", "default": 1},
|
||||
"limit": {"type": "integer", "default": 500},
|
||||
},
|
||||
"required": ["path"],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "execute_code",
|
||||
"description": "Run a Python script.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"code": {"type": "string", "description": "Python code"},
|
||||
},
|
||||
"required": ["code"],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "web_search",
|
||||
"description": "Search the web.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {"type": "string"},
|
||||
"max_results": {"type": "integer", "default": 5},
|
||||
},
|
||||
"required": ["query"],
|
||||
},
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
def test_tool_schemas_serialize_to_json(self):
|
||||
"""Tool schemas must serialize without errors."""
|
||||
serialized = json.dumps(self.SAMPLE_TOOL_SCHEMAS)
|
||||
assert len(serialized) > 0
|
||||
parsed = json.loads(serialized)
|
||||
assert len(parsed) == len(self.SAMPLE_TOOL_SCHEMAS)
|
||||
|
||||
def test_tool_schemas_have_required_openai_fields(self):
|
||||
"""Each tool schema must have the fields OpenAI expects."""
|
||||
for tool in self.SAMPLE_TOOL_SCHEMAS:
|
||||
assert tool["type"] == "function", "Tool type must be 'function'"
|
||||
fn = tool["function"]
|
||||
assert "name" in fn, "Function must have name"
|
||||
assert "description" in fn, "Function must have description"
|
||||
assert "parameters" in fn, "Function must have parameters"
|
||||
params = fn["parameters"]
|
||||
assert params["type"] == "object", "Parameters type must be 'object'"
|
||||
assert "properties" in params, "Parameters must have properties"
|
||||
|
||||
def test_tool_call_response_format(self):
|
||||
"""Verify tool_call response matches OpenAI format."""
|
||||
tool_call = {
|
||||
"id": "call_abc123",
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "read_file",
|
||||
"arguments": json.dumps({"path": "/tmp/test.txt"}),
|
||||
},
|
||||
}
|
||||
args = json.loads(tool_call["function"]["arguments"])
|
||||
assert args["path"] == "/tmp/test.txt"
|
||||
assert tool_call["function"]["name"] in [
|
||||
t["function"]["name"] for t in self.SAMPLE_TOOL_SCHEMAS
|
||||
]
|
||||
|
||||
def test_tool_names_are_valid_identifiers(self):
|
||||
"""Tool names must be valid Python identifiers for hermes dispatch."""
|
||||
for tool in self.SAMPLE_TOOL_SCHEMAS:
|
||||
name = tool["function"]["name"]
|
||||
assert re.match(r"^[a-zA-Z_][a-zA-Z0-9_]*$", name), (
|
||||
f"Tool name \'{name}\' is not a valid identifier"
|
||||
)
|
||||
|
||||
|
||||
class TestTurboquantServerConfig(unittest.TestCase):
|
||||
"""Validate server startup configuration matches hermes profile."""
|
||||
|
||||
def test_server_command_has_turboquant_flags(self):
|
||||
"""The server command in the profile must include -ctk/-ctv flags."""
|
||||
profile_text = PROFILE_PATH.read_text()
|
||||
assert "-ctk" in profile_text, "Profile server command must include -ctk flag"
|
||||
assert "-ctv" in profile_text, "Profile server command must include -ctv flag"
|
||||
|
||||
def test_server_command_has_context_flag(self):
|
||||
"""Server command must set context size."""
|
||||
profile_text = PROFILE_PATH.read_text()
|
||||
assert re.search(r"-c\s+\d+", profile_text), (
|
||||
"Server command must include -c <context_size> flag"
|
||||
)
|
||||
|
||||
def test_layer_adaptive_env_var(self):
|
||||
"""Profile must set TURBO_LAYER_ADAPTIVE env var."""
|
||||
profile_text = PROFILE_PATH.read_text()
|
||||
assert "TURBO_LAYER_ADAPTIVE" in profile_text, (
|
||||
"Profile must configure TURBO_LAYER_ADAPTIVE"
|
||||
)
|
||||
|
||||
|
||||
class TestBenchmarkData(unittest.TestCase):
|
||||
"""Validate benchmark test prompts include tool-call test cases."""
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
prompts_path = BENCHMARKS_DIR / "test_prompts.json"
|
||||
cls.prompts = json.loads(prompts_path.read_text())
|
||||
|
||||
def test_has_tool_call_test_prompt(self):
|
||||
"""Benchmark prompts must include a tool-call format test."""
|
||||
categories = [p.get("category") for p in self.prompts]
|
||||
assert "tool_call_format" in categories, (
|
||||
"Benchmark must include a tool_call_format test case"
|
||||
)
|
||||
|
||||
def test_tool_call_prompt_expects_json(self):
|
||||
"""Tool call test prompt must expect JSON in the response."""
|
||||
tool_prompt = next(
|
||||
p for p in self.prompts if p.get("category") == "tool_call_format"
|
||||
)
|
||||
pattern = tool_prompt.get("expected_pattern", "")
|
||||
assert "json" in pattern.lower() or "\\{" in pattern, (
|
||||
"Tool call prompt must expect JSON-formatted response"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
not os.environ.get("TURBOQUANT_SERVER_URL"),
|
||||
reason="No TurboQuant server available (set TURBOQUANT_SERVER_URL to run)",
|
||||
)
|
||||
class TestLiveToolCallIntegration:
|
||||
"""Live integration test -- requires running llama-server with TurboQuant."""
|
||||
|
||||
def test_server_health(self):
|
||||
"""Server must respond to /v1/models endpoint."""
|
||||
import requests
|
||||
url = os.environ["TURBOQUANT_SERVER_URL"]
|
||||
resp = requests.get(f"{url}/v1/models", timeout=10)
|
||||
assert resp.status_code == 200
|
||||
data = resp.json()
|
||||
assert "data" in data
|
||||
assert len(data["data"]) > 0
|
||||
|
||||
def test_tool_call_completion(self):
|
||||
"""Model must return a valid tool_call for a read_file prompt."""
|
||||
import requests
|
||||
url = os.environ["TURBOQUANT_SERVER_URL"]
|
||||
tools = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "read_file",
|
||||
"description": "Read a file",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {"path": {"type": "string"}},
|
||||
"required": ["path"],
|
||||
},
|
||||
},
|
||||
}
|
||||
]
|
||||
resp = requests.post(
|
||||
f"{url}/v1/chat/completions",
|
||||
json={
|
||||
"model": "gemma-4",
|
||||
"messages": [
|
||||
{"role": "user", "content": "Read the file at /tmp/test.txt"}
|
||||
],
|
||||
"tools": tools,
|
||||
"tool_choice": "auto",
|
||||
},
|
||||
timeout=120,
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
data = resp.json()
|
||||
choice = data["choices"][0]
|
||||
msg = choice["message"]
|
||||
if "tool_calls" in msg and msg["tool_calls"]:
|
||||
tc = msg["tool_calls"][0]
|
||||
assert tc["type"] == "function"
|
||||
assert tc["function"]["name"] == "read_file"
|
||||
args = json.loads(tc["function"]["arguments"])
|
||||
assert "path" in args
|
||||
else:
|
||||
assert len(msg.get("content", "")) > 0
|
||||
|
||||
def test_tool_call_with_multiple_tools(self):
|
||||
"""Model must handle multiple available tools."""
|
||||
import requests
|
||||
url = os.environ["TURBOQUANT_SERVER_URL"]
|
||||
tools = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "read_file",
|
||||
"description": "Read a file",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {"path": {"type": "string"}},
|
||||
"required": ["path"],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "web_search",
|
||||
"description": "Search the web",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {"query": {"type": "string"}},
|
||||
"required": ["query"],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "execute_code",
|
||||
"description": "Run Python code",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {"code": {"type": "string"}},
|
||||
"required": ["code"],
|
||||
},
|
||||
},
|
||||
},
|
||||
]
|
||||
resp = requests.post(
|
||||
f"{url}/v1/chat/completions",
|
||||
json={
|
||||
"model": "gemma-4",
|
||||
"messages": [
|
||||
{"role": "user", "content": "Search the web for 'bitcoin price'"}
|
||||
],
|
||||
"tools": tools,
|
||||
"tool_choice": "auto",
|
||||
},
|
||||
timeout=120,
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
data = resp.json()
|
||||
assert "choices" in data
|
||||
assert len(data["choices"]) > 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
Reference in New Issue
Block a user