Compare commits

..

3 Commits

Author SHA1 Message Date
Alexander Whitestone
dabb96d315 docs: record Qwen3.5-9B DFlash Metal timeout (refs #152, #154)
All checks were successful
Smoke Test / smoke (pull_request) Successful in 19s
2026-04-21 22:25:25 -04:00
Alexander Whitestone
69cef8a90f bench: record Apple Silicon DFlash pilot result (refs #152)
All checks were successful
Smoke Test / smoke (pull_request) Successful in 18s
2026-04-21 22:20:15 -04:00
Alexander Whitestone
636d294896 feat: add Apple Silicon DFlash benchmark planner (refs #152)
All checks were successful
Smoke Test / smoke (pull_request) Successful in 18s
2026-04-21 22:00:22 -04:00
22 changed files with 545 additions and 1268 deletions

View File

@@ -18,17 +18,7 @@ jobs:
find . -name '*.py' | grep -v llama-cpp-fork | xargs -r python3 -m py_compile
find . -name '*.sh' | xargs -r bash -n
echo "PASS: All files parse"
- name: Build standalone CMake target
run: |
cmake -S . -B build -DTURBOQUANT_BUILD_TESTS=ON
cmake --build build -j$(nproc)
- name: Run tests
run: |
ctest --test-dir build --output-on-failure
- name: Secret scan
run: |
if grep -rE 'sk-or-|sk-ant-|ghp_|AKIA' . --include='*.yml' --include='*.py' --include='*.sh' 2>/dev/null | grep -v .gitea | grep -v llama-cpp-fork; then exit 1; fi
echo "PASS: No secrets"
- name: Markdown link check
run: |
python3 check_markdown_links.py

View File

@@ -3,52 +3,23 @@ cmake_minimum_required(VERSION 3.16)
project(turboquant LANGUAGES CXX)
option(TURBOQUANT_BUILD_TESTS "Build standalone TurboQuant validation tests" ON)
option(TURBOQUANT_ENABLE_METAL "Build with Metal GPU acceleration (Apple Silicon)" ON)
# ==================== Library Sources ====================
set(TURBOQUANT_SOURCES
llama-turbo.cpp
src/llama.cpp # QJL KV integration layer
)
# Conditionally add Metal sources (Objective-C++)
if(TURBOQUANT_ENABLE_METAL AND APPLE)
enable_language(OBJCXX)
list(APPEND TURBOQUANT_SOURCES
ggml/src/ggml-metal.m # Metal registration & dispatch
)
# Metal shader file loaded at runtime via MTLLibrary in ggml-metal.m
endif()
add_library(turboquant STATIC
${TURBOQUANT_SOURCES}
llama-turbo.cpp
)
# ==================== Include Directories ====================
target_include_directories(turboquant PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}
${CMAKE_CURRENT_SOURCE_DIR}/include
${CMAKE_CURRENT_SOURCE_DIR}/ggml/include # ggml.h extensions
)
target_compile_features(turboquant PUBLIC cxx_std_17)
# ==================== Metal / Apple Silicon ====================
if(APPLE AND TURBOQUANT_ENABLE_METAL)
find_library(METAL_LIB Metal)
find_library(FOUNDATION_LIB Foundation)
target_link_libraries(turboquant PUBLIC ${METAL_LIB} ${FOUNDATION_LIB})
target_compile_definitions(turboquant PUBLIC GGML_METAL=1)
endif()
# ==================== Compiler Warnings ====================
if(MSVC)
target_compile_options(turboquant PRIVATE /W4)
else()
target_compile_options(turboquant PRIVATE -Wall -Wextra -Wpedantic)
endif()
# ==================== Tests ====================
if(TURBOQUANT_BUILD_TESTS)
include(CTest)

View File

@@ -30,3 +30,4 @@ See [issues](https://forge.alexanderwhitestone.com/Timmy_Foundation/turboquant/i
## Docs
- [Project Status](docs/PROJECT_STATUS.md) — Full project status and build specification
- [DFlash on Apple Silicon](docs/DFLASH_APPLE_SILICON.md) — MLX benchmark planner, setup commands, and report workflow

View File

@@ -0,0 +1,189 @@
#!/usr/bin/env python3
"""Apple Silicon DFlash planning helpers and CLI (issue #152)."""
from __future__ import annotations
import argparse
import json
import platform
import subprocess
from dataclasses import asdict, dataclass
from pathlib import Path
from typing import Iterable, Optional
@dataclass(frozen=True)
class DFlashPair:
slug: str
base_model: str
draft_model: str
estimated_total_weights_gb: float
minimum_recommended_memory_gb: float
draft_sliding_window_size: int = 4096
SUPPORTED_PAIRS: tuple[DFlashPair, ...] = (
DFlashPair(
slug="qwen35-4b",
base_model="Qwen/Qwen3.5-4B",
draft_model="z-lab/Qwen3.5-4B-DFlash",
estimated_total_weights_gb=9.68,
minimum_recommended_memory_gb=16.0,
),
DFlashPair(
slug="qwen35-9b",
base_model="Qwen/Qwen3.5-9B",
draft_model="z-lab/Qwen3.5-9B-DFlash",
estimated_total_weights_gb=19.93,
minimum_recommended_memory_gb=28.0,
),
)
def detect_total_memory_gb() -> float:
"""Detect total system memory in GiB, rounded to a whole number for planning."""
system = platform.system()
if system == "Darwin":
mem_bytes = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"]).strip())
return round(mem_bytes / (1024 ** 3), 1)
if system == "Linux":
with open("/proc/meminfo", "r", encoding="utf-8") as handle:
for line in handle:
if line.startswith("MemTotal:"):
mem_kb = int(line.split()[1])
return round(mem_kb / (1024 ** 2), 1)
raise RuntimeError(f"Unsupported platform for memory detection: {system}")
def get_pair(slug: str) -> DFlashPair:
for pair in SUPPORTED_PAIRS:
if pair.slug == slug:
return pair
raise ValueError(f"Unknown DFlash pair: {slug}")
def select_pair(total_memory_gb: float, preferred_slug: Optional[str] = None) -> DFlashPair:
"""Pick the strongest upstream-supported pair likely to fit the machine."""
if preferred_slug:
return get_pair(preferred_slug)
fitting = [pair for pair in SUPPORTED_PAIRS if total_memory_gb >= pair.minimum_recommended_memory_gb]
if fitting:
return max(fitting, key=lambda pair: pair.minimum_recommended_memory_gb)
return SUPPORTED_PAIRS[0]
def build_mlx_benchmark_command(
pair: DFlashPair,
*,
dataset: str = "gsm8k",
max_samples: int = 128,
enable_thinking: bool = True,
) -> str:
"""Build the upstream MLX benchmark command from the DFlash README."""
parts = [
"python -m dflash.benchmark --backend mlx",
f"--model {pair.base_model}",
f"--draft-model {pair.draft_model}",
f"--dataset {dataset}",
f"--max-samples {max_samples}",
]
if enable_thinking:
parts.append("--enable-thinking")
parts.append(f"--draft-sliding-window-size {pair.draft_sliding_window_size}")
return " \\\n ".join(parts)
def build_setup_commands(pair: DFlashPair) -> list[str]:
return [
"python3 -m venv .venv-dflash",
"source .venv-dflash/bin/activate",
"git clone https://github.com/z-lab/dflash.git",
"cd dflash",
"pip install -e .[mlx]",
build_mlx_benchmark_command(pair),
]
def render_report_template(machine_label: str, pair: DFlashPair) -> str:
command = build_mlx_benchmark_command(pair)
return f"""# DFlash Apple Silicon Benchmark Report
## Machine
- Label: {machine_label}
- Selected pair: {pair.slug}
- Base model: {pair.base_model}
- Draft model: {pair.draft_model}
- Estimated total weight footprint: {pair.estimated_total_weights_gb:.2f} GB
## Setup
```bash
python3 -m venv .venv-dflash
source .venv-dflash/bin/activate
git clone https://github.com/z-lab/dflash.git
cd dflash
pip install -e .[mlx]
{command}
```
## Baseline comparison
Compare against **plain MLX or llama.cpp speculative decoding** on the same prompt set.
## Results
- Throughput (tok/s):
- Peak memory (GB):
- Notes on acceptance / behavior:
## Verdict
Worth operationalizing locally?
- [ ] Yes
- [ ] No
- [ ] Needs more data
## Recommendation
Explain whether this should become part of the local inference stack.
"""
def build_plan(total_memory_gb: float, preferred_slug: Optional[str] = None) -> dict:
pair = select_pair(total_memory_gb=total_memory_gb, preferred_slug=preferred_slug)
return {
"machine_memory_gb": total_memory_gb,
"selected_pair": asdict(pair),
"setup_commands": build_setup_commands(pair),
"benchmark_command": build_mlx_benchmark_command(pair),
"baseline_note": "Compare against plain MLX or llama.cpp speculative decoding on the same prompt set.",
}
def write_output(path: Path, content: str) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(content, encoding="utf-8")
def main(argv: Optional[Iterable[str]] = None) -> int:
parser = argparse.ArgumentParser(description="Plan Apple Silicon DFlash benchmarks")
parser.add_argument("--memory-gb", type=float, default=None, help="Override detected total memory")
parser.add_argument("--pair", choices=[pair.slug for pair in SUPPORTED_PAIRS], default=None)
parser.add_argument("--machine-label", default="Apple Silicon Mac")
parser.add_argument("--format", choices=["json", "markdown"], default="markdown")
parser.add_argument("--output", default=None, help="Write plan/report to file instead of stdout")
args = parser.parse_args(list(argv) if argv is not None else None)
memory_gb = args.memory_gb if args.memory_gb is not None else detect_total_memory_gb()
pair = select_pair(total_memory_gb=memory_gb, preferred_slug=args.pair)
if args.format == "json":
content = json.dumps(build_plan(memory_gb, preferred_slug=pair.slug), indent=2)
else:
content = render_report_template(args.machine_label, pair)
if args.output:
write_output(Path(args.output), content)
else:
print(content)
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,41 @@
# DFlash Apple Silicon Benchmark Report
## Machine
- Label: M3 Max 36GB
- Selected pair: qwen35-9b
- Base model: Qwen/Qwen3.5-9B
- Draft model: z-lab/Qwen3.5-9B-DFlash
- Estimated total weight footprint: 19.93 GB
## Setup
```bash
python3 -m venv .venv-dflash
source .venv-dflash/bin/activate
git clone https://github.com/z-lab/dflash.git
cd dflash
pip install -e .[mlx]
python -m dflash.benchmark --backend mlx \
--model Qwen/Qwen3.5-9B \
--draft-model z-lab/Qwen3.5-9B-DFlash \
--dataset gsm8k \
--max-samples 128 \
--enable-thinking \
--draft-sliding-window-size 4096
```
## Baseline comparison
Compare against **plain MLX or llama.cpp speculative decoding** on the same prompt set.
## Results
- Throughput (tok/s):
- Peak memory (GB):
- Notes on acceptance / behavior:
## Verdict
Worth operationalizing locally?
- [ ] Yes
- [ ] No
- [ ] Needs more data
## Recommendation
Explain whether this should become part of the local inference stack.

View File

@@ -0,0 +1,46 @@
# DFlash Apple Silicon Pilot — Qwen3.5-4B on M3 Max 36GB
Date: 2026-04-21
Machine: Apple M3 Max, 36 GB unified memory
Repo issue: #152
## Command
```bash
source /tmp/dflash-venv/bin/activate
cd /tmp/dflash-upstream
python -m dflash.benchmark --backend mlx \
--model Qwen/Qwen3.5-4B \
--draft-model z-lab/Qwen3.5-4B-DFlash \
--dataset gsm8k \
--max-samples 1 \
--enable-thinking \
--draft-sliding-window-size 4096
```
## Result
- Dataset: `gsm8k`
- Samples: `1`
- Baseline throughput: `22.35 tok/s`
- DFlash throughput: `46.78 tok/s`
- Decoding speedup: `2.09x`
- Average acceptance length: `6.48`
Acceptance length histogram:
```text
['0.3%', '11.1%', '12.7%', '10.4%', '11.7%', '7.6%', '7.0%', '3.8%', '5.1%', '6.3%', '2.8%', '3.8%', '2.2%', '1.9%', '0.9%', '2.5%', '9.8%']
```
## Caveats
- This is a **pilot**, not a decision-grade benchmark.
- Only `1` sample was run, so the throughput number is directional.
- No apples-to-apples baseline against plain MLX or llama.cpp speculative decoding is included yet.
- The planner still recommends trying `Qwen/Qwen3.5-9B + z-lab/Qwen3.5-9B-DFlash` on this machine for the more meaningful fit test.
## Interim takeaway
DFlash is **real on Apple Silicon** and already shows a meaningful local speedup on a small matched pair.
A `2.09x` pilot speedup on `Qwen3.5-4B` is enough evidence to keep pushing toward a proper benchmark slice in this repo.

View File

@@ -0,0 +1,59 @@
# DFlash on Apple Silicon Failure Report — Qwen3.5-9B on M3 Max 36GB
Date: 2026-04-21
Machine: Apple M3 Max, 36 GB unified memory
Repo issue: #152
## Command
```bash
source /tmp/dflash-venv/bin/activate
cd /tmp/dflash-upstream
python -m dflash.benchmark --backend mlx \
--model Qwen/Qwen3.5-9B \
--draft-model z-lab/Qwen3.5-9B-DFlash \
--dataset gsm8k \
--max-samples 1 \
--enable-thinking \
--draft-sliding-window-size 4096
```
## Outcome
The benchmark did **not** complete successfully on this machine.
### Failure signature
```text
libc++abi: terminating due to uncaught exception of type std::runtime_error:
[METAL] Command buffer execution failed:
Caused GPU Timeout Error (00000002:kIOGPUCommandBufferCallbackErrorTimeout)
```
Additional shutdown noise:
```text
bash: [11285: 1] tcsetattr: Inappropriate ioctl for device
resource_tracker: There appear to be 1 leaked semaphore objects to clean up at shutdown
```
## Interpretation
This is strong evidence that the `Qwen/Qwen3.5-9B + z-lab/Qwen3.5-9B-DFlash` pair is **not currently stable** on an M3 Max 36GB Mac under the upstream MLX benchmark path, at least with the default settings used here.
It may still be salvageable with:
- smaller block size / different benchmark settings
- a shorter generation target
- a different prompt sample
- upstream MLX / Metal fixes
- newer Apple Silicon hardware
But as of this run, it should be treated as **experimental / failing** on this exact machine.
## Recommendation
For this Mac, the working local proof path is still:
- `Qwen/Qwen3.5-4B`
- `z-lab/Qwen3.5-4B-DFlash`
Use the 4B pair for reproducible local validation while the 9B Metal timeout is investigated separately.

View File

@@ -1,124 +0,0 @@
#!/usr/bin/env python3
"""Check local markdown links.
Scans markdown files for local links and fails on broken targets.
Ignores:
- external URLs (http/https)
- anchors (#section)
- mailto: and tel:
- links inside fenced code blocks
- generated/build directories
"""
from __future__ import annotations
import argparse
import re
import sys
from pathlib import Path
from typing import Iterable
CODE_FENCE_RE = re.compile(r"^```")
LINK_RE = re.compile(r"(?<!!)\[[^\]]+\]\(([^)]+)\)")
DEFAULT_SKIP_DIRS = {
".git",
".gitea",
".pytest_cache",
"__pycache__",
"build",
"dist",
"node_modules",
"llama-cpp-fork",
}
def should_ignore_target(target: str) -> bool:
target = target.strip()
return (
not target
or target.startswith("http://")
or target.startswith("https://")
or target.startswith("mailto:")
or target.startswith("tel:")
or target.startswith("#")
)
def normalize_target(target: str) -> str:
target = target.strip()
if target.startswith("<") and target.endswith(">"):
target = target[1:-1].strip()
if "#" in target:
target = target.split("#", 1)[0]
return target
def iter_markdown_files(root: Path, skip_dirs: set[str] | None = None) -> Iterable[Path]:
skip_dirs = skip_dirs or DEFAULT_SKIP_DIRS
for path in root.rglob("*.md"):
if any(part in skip_dirs for part in path.relative_to(root).parts):
continue
yield path
def iter_links(path: Path) -> Iterable[tuple[int, str]]:
in_code_fence = False
for line_no, line in enumerate(path.read_text(encoding="utf-8").splitlines(), start=1):
if CODE_FENCE_RE.match(line.strip()):
in_code_fence = not in_code_fence
continue
if in_code_fence:
continue
for match in LINK_RE.finditer(line):
yield line_no, match.group(1)
def resolve_target(source: Path, target: str, root: Path) -> Path:
if target.startswith("/"):
return (root / target.lstrip("/")).resolve()
return (source.parent / target).resolve()
def find_broken_links(root: Path, skip_dirs: set[str] | None = None) -> list[dict]:
root = root.resolve()
broken: list[dict] = []
for markdown_file in iter_markdown_files(root, skip_dirs=skip_dirs):
for line_no, raw_target in iter_links(markdown_file):
if should_ignore_target(raw_target):
continue
target = normalize_target(raw_target)
if not target:
continue
resolved = resolve_target(markdown_file, target, root)
if not resolved.exists():
broken.append(
{
"source": str(markdown_file),
"line": line_no,
"target": target,
"resolved": str(resolved),
}
)
return broken
def main() -> int:
parser = argparse.ArgumentParser(description="Fail on broken local markdown links.")
parser.add_argument("root", nargs="?", default=".", help="Repo root to scan (default: .)")
args = parser.parse_args()
root = Path(args.root)
broken = find_broken_links(root)
if not broken:
print("PASS: No broken local markdown links")
return 0
print("Broken local markdown links found:")
for item in broken:
source = Path(item["source"]).relative_to(root.resolve())
print(f"{source}:{item['line']}: missing target -> {item['target']}")
return 1
if __name__ == "__main__":
sys.exit(main())

View File

@@ -0,0 +1,125 @@
# DFlash on Apple Silicon
This repo now carries a **Gitea-first benchmark harness** for evaluating whether upstream **DFlash on MLX** is worth adding to the local Apple Silicon inference stack.
## Why
The headline `Kimi K2.6 + DFlash` benchmark was measured on `8x MI300X` with huge RAM and ROCm patches. That exact recipe is not a fit for a `36 GB` Apple Silicon Mac.
What *is* relevant locally is the upstream `z-lab/dflash` MLX path, which can benchmark smaller matched target/draft pairs that fit on Apple Silicon.
## Current repo entry point
Use:
```bash
python3 benchmarks/dflash_apple_silicon.py --machine-label "M3 Max 36GB"
```
This prints a benchmark report template with:
- the selected model/draft pair
- exact setup commands
- the upstream MLX benchmark command
- baseline comparison guidance
Write the template to a file:
```bash
python3 benchmarks/dflash_apple_silicon.py \
--machine-label "M3 Max 36GB" \
--output benchmarks/reports/dflash_m3max_36gb.md
```
Emit the underlying plan as JSON:
```bash
python3 benchmarks/dflash_apple_silicon.py --format json
```
## Selection logic
Today the planner uses two upstream-supported MLX pairs:
- `qwen35-9b`
- base: `Qwen/Qwen3.5-9B`
- draft: `z-lab/Qwen3.5-9B-DFlash`
- chosen for ~28 GB+ machines
- `qwen35-4b`
- base: `Qwen/Qwen3.5-4B`
- draft: `z-lab/Qwen3.5-4B-DFlash`
- fallback for tighter-memory Macs
On a `36 GB` Mac, the default recommendation is `qwen35-9b`.
## Pilot result already landed
A first live Apple Silicon run has already been captured in:
- `benchmarks/reports/dflash_m3max_36gb_qwen35_4b_pilot.md`
Pilot command:
```bash
python -m dflash.benchmark --backend mlx \
--model Qwen/Qwen3.5-4B \
--draft-model z-lab/Qwen3.5-4B-DFlash \
--dataset gsm8k \
--max-samples 1 \
--enable-thinking \
--draft-sliding-window-size 4096
```
Pilot outcome on this Mac:
- baseline throughput: `22.35 tok/s`
- DFlash throughput: `46.78 tok/s`
- decoding speedup: `2.09x`
Treat that as a **directional proof**, not a final decision benchmark. The next step is the fuller comparison slice against plain MLX or llama.cpp speculative decoding.
## Known 9B failure on this machine
A follow-up live run with:
- `Qwen/Qwen3.5-9B`
- `z-lab/Qwen3.5-9B-DFlash`
failed on this same M3 Max 36GB Mac with:
```text
[METAL] Command buffer execution failed:
Caused GPU Timeout Error (00000002:kIOGPUCommandBufferCallbackErrorTimeout)
```
That failure is recorded in:
- `benchmarks/reports/dflash_m3max_36gb_qwen35_9b_timeout.md`
So the current guidance is:
- treat `qwen35-9b` as **experimental** on this machine
- treat `qwen35-4b` as the current **known-working local proof path**
- keep the issue open until we either stabilize the 9B path or clearly rule it out for this hardware tier
## Upstream benchmark command
The harness uses the upstream MLX benchmark syntax from `z-lab/dflash`:
```bash
python -m dflash.benchmark --backend mlx \
--model Qwen/Qwen3.5-9B \
--draft-model z-lab/Qwen3.5-9B-DFlash \
--dataset gsm8k \
--max-samples 128 \
--enable-thinking \
--draft-sliding-window-size 4096
```
## What remains
This PR adds the **planner + report template** so the benchmark is reproducible from the repo.
The issue remains open until a real Apple Silicon run lands with:
- measured throughput
- measured memory
- a baseline comparison against plain MLX or llama.cpp speculative decoding
- a recommendation on whether to operationalize DFlash locally

View File

@@ -385,7 +385,7 @@ Step 7: If pass → production. If fail → drop to turbo3 or adjust per-layer p
---
*Repo: https://forge.alexanderwhitestone.com/Timmy_Foundation/turboquant*
*Repo: http://143.198.27.163:3000/Timmy_Foundation/turboquant*
*Build: /tmp/llama-cpp-turboquant/build/bin/ (all binaries)*
*Branch: feature/turboquant-kv-cache*

View File

@@ -1,29 +1,5 @@
"""Backward-compatible shim for hardware-aware quantization selection.
The original Phase 19 placeholder `hardware_optimizer.py` never shipped real
logic. The canonical implementation now lives in `evolution.quant_selector`.
This shim preserves the legacy import path for any downstream callers while
making `quant_selector.py` the single source of truth.
"""Phase 19: Hardware-Aware Inference Optimization.
Part of the TurboQuant suite for local inference excellence.
"""
from evolution.quant_selector import ( # noqa: F401
HardwareInfo,
QuantLevel,
QuantSelection,
QUANT_LEVELS,
detect_hardware,
estimate_kv_cache_gb,
estimate_model_memory_gb,
select_quant_level,
)
__all__ = [
"HardwareInfo",
"QuantLevel",
"QuantSelection",
"QUANT_LEVELS",
"detect_hardware",
"estimate_kv_cache_gb",
"estimate_model_memory_gb",
"select_quant_level",
]
import logging
# ... (rest of the code)

View File

@@ -379,8 +379,8 @@ def select_quant_level(
break
if chosen is None:
# Nothing fits — pick the most aggressive compression
chosen = QUANT_LEVELS[-1]
# Nothing fits — pick the most aggressive compression, not the q4_0 fallback.
chosen = max(QUANT_LEVELS, key=lambda level: level.compression_ratio)
logger.warning(f"No quant level fits in {memory_pool_gb:.1f}GB. Using {chosen.name}.")
# Calculate final numbers

View File

@@ -1,94 +0,0 @@
//
// ggml.h — ggml tensor library public API
// (Integration layer for llama.cpp fork with TurboQuant QJL support)
//
// This file extends ggml with custom types for TurboQuant KV compression.
// It mirrors the standard llama.cpp ggml.h structure with additions.
//
#ifndef GGML_H
#define GGML_H
#include <stddef.h>
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
// ==================== ggml_type ====================
// Standard llama.cpp tensor types (subset shown, actual full list in original)
// Values must match upstream to maintain ABI compatibility
// Add custom types beyond GGML_TYPE_COUNT (0x100 boundary) for forks
typedef enum {
GGML_TYPE_F32 = 0, // float32, 4 bytes
GGML_TYPE_F16 = 1, // float16, 2 bytes
GGML_TYPE_Q4_0 = 2, // 4-bit, 0.5 bytes (blockwise)
GGML_TYPE_Q4_1 = 3, // 4-bit with per-block scale
GGML_TYPE_Q5_0 = 4, // 5-bit
GGML_TYPE_Q5_1 = 5, // 5-bit with scale
GGML_TYPE_Q8_0 = 8, // 8-bit
GGML_TYPE_Q8_1 = 9, // 8-bit with per-block scale
GGML_TYPE_Q2_K = 10, // 2-bit, 256-level codebook
GGML_TYPE_Q3_K = 11, // 3-bit, 256-level codebook
GGML_TYPE_Q4_K = 12, // 4-bit, K-quant (superblock)
GGML_TYPE_Q5_K = 13, // 5-bit, K-quant
GGML_TYPE_Q6_K = 14, // 6-bit, K-quant
GGML_TYPE_Q8_K = 15, // 8-bit, K-quant
// ... more upstream types including IQ types ...
// ==================== TURBOQUANT CUSTOM TYPES ====================
// These values use the 0x100+ custom range reserved for fork extensions
// They do not collide with upstream ggml_type values.
GGML_TYPE_TURBO2 = 0x100, // 2.0-bit TurboQuant (PolarQuant only)
GGML_TYPE_TURBO3 = 0x101, // 3.0-bit TurboQuant (PolarQuant only)
GGML_TYPE_TURBO4 = 0x102, // 4.0-bit TurboQuant (PolarQuant only)
// Full TurboQuant — PolarQuant (4-bit) + QJL residual correction
// Effective: ~3.5 bits/channel, zero accuracy loss
// Storage per 128-dim vector: 64B (polar indices) + 8B (signs) + 4B (scale) = 76B
GGML_TYPE_TURBOQUANT_QJL = 0x103,
// Count of all types (custom boundary)
GGML_TYPE_COUNT = 0x104
} ggml_type;
// ==================== GGML tensor structure ====================
// Forward declaration — actual definition resides in ggml-internal.h
// We only need type tags here; the tensor layout additions go in llama.cpp
struct ggml_tensor;
// ==================== QJL-specific constants ====================
// These match the QJL kernel definitions in ggml/src/ggml-metal.metal
#define GGML_QJL_PROJ_DIM 64 // Projection dimension (m)
#define GGML_QJL_PROJ_DIM_PACKED 8 // Bytes per sign array (64 bits → 8 bytes)
#define GGML_QJL_SIGN_EXTRA 8 // Bytes for signs per vector
#define GGML_QJL_SCALE_EXTRA 4 // Bytes for scale factor per vector (float)
#define GGML_QJL_TOTAL_EXTRA 12 // Total QJL metadata overhead per vector
// QJL scale factor defaults (for residual correction magnitude)
#define GGML_QJL_DEFAULT_SCALE 1.0f
// ==================== Integration layer ====================
// Helper: determine whether a tensor uses QJL storage
static inline bool ggml_is_qjl_type(ggml_type type) {
return type == GGML_TYPE_TURBOQUANT_QJL;
}
// Helper: compute per-vector storage breakdown for QJL
// Returns tuple of (bytes_polar, bytes_qjl_signs, bytes_qjl_scale)
static inline void ggml_qjl_storage_breakdown(int * polar_bytes, int * qjl_sign_bytes, int * qjl_scale_bytes) {
// PolarQuant part: 4 bits per coordinate → d/2 bytes (for d=128, that's 64 bytes)
// QJL part: 8 bytes signs + 4 bytes scale = 12 bytes
*polar_bytes = 64; // hardcoded for d=128; code should validate d==128
*qjl_sign_bytes = GGML_QJL_SIGN_EXTRA;
*qjl_scale_bytes = GGML_QJL_SCALE_EXTRA;
}
#ifdef __cplusplus
}
#endif
#endif // GGML_H

View File

@@ -1,289 +0,0 @@
//
// ggml-metal.m Metal backend integration for QJL kernels
// Uses proper Metal create-buffer-then-dispatch pattern.
//
#import <Foundation/Foundation.h>
#import <Metal/Metal.h>
#include "ggml.h"
// -----------------------------------------------------------------------------
// Global device state
// -----------------------------------------------------------------------------
static id<MTLDevice> g_metal_device = nil;
static id<MTLCommandQueue> g_cmd_queue = nil;
// PSOs
static id<MTLComputePipelineState> g_pso_turbo4_dequant = nil;
static id<MTLComputePipelineState> g_pso_qjl_encode = nil;
static id<MTLComputePipelineState> g_pso_qjl_decode = nil;
static id<MTLComputePipelineState> g_pso_turboquant_qjl = nil;
// Kernel names
static NSString * const kKernelTurbo4Dequant = @"kernel_turbo4_dequant";
static NSString * const kKernelQjlEncodeResidual = @"kernel_qjl_encode_residual";
static NSString * const kKernelQjlDecodeResidual = @"kernel_qjl_decode_residual";
static NSString * const kKernelTurboquantQjlDequant = @"kernel_turboquant_qjl_dequant";
// -----------------------------------------------------------------------------
// Public: set device
// -----------------------------------------------------------------------------
void ggml_metal_set_device(id<MTLDevice> device, id<MTLCommandQueue> queue) {
g_metal_device = device;
g_cmd_queue = queue;
}
// -----------------------------------------------------------------------------
// Compile kernel from embedded Metal source
// -----------------------------------------------------------------------------
static id<MTLComputePipelineState> compile_kernel(NSString *source, NSString *name) {
NSError *error = nil;
id<MTLLibrary> lib = [g_metal_device newLibraryWithSource:source options:nil error:&error];
if (!lib) {
NSLog(@"Metal compile failed for %@: %@", name, error.localizedDescription);
return nil;
}
id<MTLFunction> fn = [lib newFunctionWithName:name];
if (!fn) {
NSLog(@"Metal kernel %@ not found", name);
return nil;
}
return [g_metal_device newComputePipelineStateWithFunction:fn error:&error];
}
// -----------------------------------------------------------------------------
// Register all QJL kernels called once after device init
// -----------------------------------------------------------------------------
void ggml_metal_register_turboquant_kernels(NSString *metal_source) {
if (!g_metal_device) {
NSLog(@"Metal device not set — call ggml_metal_set_device first");
return;
}
g_pso_turbo4_dequant = compile_kernel(metal_source, kKernelTurbo4Dequant);
g_pso_qjl_encode = compile_kernel(metal_source, kKernelQjlEncodeResidual);
g_pso_qjl_decode = compile_kernel(metal_source, kKernelQjlDecodeResidual);
g_pso_turboquant_qjl = compile_kernel(metal_source, kKernelTurboquantQjlDequant);
}
// =============================================================================
// DISPATCH ROUTINES each allocates MTLBuffers, encodes, and commits
// =============================================================================
// Helper: create MTLBuffer from raw bytes (copies into GPU memory)
static inline id<MTLBuffer> make_buffer(const void *ptr, size_t size) {
// Shared storage so CPU/GPU can both access
return [g_metal_device newBufferWithBytes:ptr
length:size
options:MTLResourceStorageModeShared];
}
// -----------------------------------------------------------------------------
// kernel_turbo4_dequant dequantize 4-bit PolarQuant vectors
// -----------------------------------------------------------------------------
void ggml_metal_kernel_turbo4_dequant(
const uint8_t * polar_packed,
const float * polar_norm,
float * dst,
int n_vectors,
int d
) {
if (!g_pso_turbo4_dequant) return;
if (!g_cmd_queue) return;
id<MTLCommandBuffer> cmd = [g_cmd_queue commandBuffer];
id<MTLComputeCommandEncoder> enc = [cmd computeCommandEncoder];
[enc setComputePipelineState:g_pso_turbo4_dequant];
// Buffer binding layout from Metal kernel:
// buffer<float> polar_packed [0]
// buffer<float> polar_norm [1]
// buffer<float> dst [2]
// constant int& d [3]
size_t polar_sz = (size_t)n_vectors * (d/2);
size_t norm_sz = (size_t)n_vectors * sizeof(float);
size_t dst_sz = (size_t)n_vectors * d * sizeof(float);
id<MTLBuffer> buf_polar = make_buffer(polar_packed, polar_sz);
id<MTLBuffer> buf_norm = make_buffer(polar_norm, norm_sz);
id<MTLBuffer> buf_dst = make_buffer(dst, dst_sz);
[enc setBuffer:buf_polar offset:0 atIndex:0];
[enc setBuffer:buf_norm offset:0 atIndex:1];
[enc setBuffer:buf_dst offset:0 atIndex:2];
[enc setBytes:&d length:sizeof(d) atIndex:3];
// Thread config: one thread per vector
MTLSize grid = MTLSizeMake(n_vectors, 1, 1);
MTLSize block = MTLSizeMake(256, 1, 1); // let GPU choose actually 256 reasonable
[enc dispatchThreads:grid threadsPerThreadgroup:block];
[enc endEncoding];
[cmd commit];
[cmd waitUntilCompleted]; // sync for simplicity; async would need double-buffering
}
// -----------------------------------------------------------------------------
// kernel_qjl_encode_residual encode residual signs + scale
// -----------------------------------------------------------------------------
void ggml_metal_kernel_qjl_encode_residual(
const float * residuals,
const float * proj_matrix,
uint8_t * signs_packed,
float * scale_out,
int n_vectors,
int d
) {
if (!g_pso_qjl_encode) return;
id<MTLCommandBuffer> cmd = [g_cmd_queue commandBuffer];
id<MTLComputeCommandEncoder> enc = [cmd computeCommandEncoder];
[enc setComputePipelineState:g_pso_qjl_encode];
// Kernel: buffer<float> residuals [0]
// buffer<float> proj_matrix [1] (d × 64)
// buffer<uint8> signs_packed [2] (n_vectors × 8)
// buffer<float> scale_out [3] (n_vectors)
// constant int& n_vectors [4]
// constant int& d [5]
size_t res_sz = (size_t)n_vectors * d * sizeof(float);
size_t proj_sz = (size_t)d * 64 * sizeof(float);
size_t sign_sz = (size_t)n_vectors * 8;
size_t scale_sz = (size_t)n_vectors * sizeof(float);
id<MTLBuffer> buf_res = make_buffer(residuals, res_sz);
id<MTLBuffer> buf_proj = make_buffer(proj_matrix, proj_sz);
id<MTLBuffer> buf_sign = make_buffer(signs_packed, sign_sz);
id<MTLBuffer> buf_scale= make_buffer(scale_out, scale_sz);
[enc setBuffer:buf_res offset:0 atIndex:0];
[enc setBuffer:buf_proj offset:0 atIndex:1];
[enc setBuffer:buf_sign offset:0 atIndex:2];
[enc setBuffer:buf_scale offset:0 atIndex:3];
[enc setBytes:&n_vectors length:sizeof(n_vectors) atIndex:4];
[enc setBytes:&d length:sizeof(d) atIndex:5];
MTLSize grid = MTLSizeMake(n_vectors, 1, 1);
MTLSize block = MTLSizeMake(256, 1, 1);
[enc dispatchThreads:grid threadsPerThreadgroup:block];
[enc endEncoding];
[cmd commit];
[cmd waitUntilCompleted];
}
// -----------------------------------------------------------------------------
// kernel_qjl_decode_residual add QJL correction to PolarQuant output
// -----------------------------------------------------------------------------
void ggml_metal_kernel_qjl_decode_residual(
const uint8_t * polar_packed,
const float * polar_norm,
const uint8_t * qjl_signs,
const float * qjl_scale,
const float * proj_matrix,
float * dst,
int n_vectors,
int d
) {
if (!g_pso_qjl_decode) return;
id<MTLCommandBuffer> cmd = [g_cmd_queue commandBuffer];
id<MTLComputeCommandEncoder> enc = [cmd computeCommandEncoder];
[enc setComputePipelineState:g_pso_qjl_decode];
// buffer layout: 0=polar_packed, 1=polar_norm, 2=qjl_signs,
// 3=qjl_scale, 4=proj_matrix, 5=dst, 6=d
size_t polar_sz = (size_t)n_vectors * (d/2);
size_t norm_sz = (size_t)n_vectors * sizeof(float);
size_t sign_sz = (size_t)n_vectors * 8;
size_t scale_sz = (size_t)n_vectors * sizeof(float);
size_t proj_sz = (size_t)d * 64 * sizeof(float);
size_t dst_sz = (size_t)n_vectors * d * sizeof(float);
id<MTLBuffer> buf_polar = make_buffer(polar_packed, polar_sz);
id<MTLBuffer> buf_norm = make_buffer(polar_norm, norm_sz);
id<MTLBuffer> buf_sign = make_buffer(qjl_signs, sign_sz);
id<MTLBuffer> buf_scale = make_buffer(qjl_scale, scale_sz);
id<MTLBuffer> buf_proj = make_buffer(proj_matrix, proj_sz);
id<MTLBuffer> buf_dst = make_buffer(dst, dst_sz);
[enc setBuffer:buf_polar offset:0 atIndex:0];
[enc setBuffer:buf_norm offset:0 atIndex:1];
[enc setBuffer:buf_sign offset:0 atIndex:2];
[enc setBuffer:buf_scale offset:0 atIndex:3];
[enc setBuffer:buf_proj offset:0 atIndex:4];
[enc setBuffer:buf_dst offset:0 atIndex:5];
[enc setBytes:&d length:sizeof(d) atIndex:6];
MTLSize grid = MTLSizeMake(n_vectors, 1, 1);
MTLSize block = MTLSizeMake(256, 1, 1);
[enc dispatchThreads:grid threadsPerThreadgroup:block];
[enc endEncoding];
[cmd commit];
[cmd waitUntilCompleted];
}
// -----------------------------------------------------------------------------
// kernel_turboquant_qjl_dequant fused PolarQuant dequant + QJL correction
// -----------------------------------------------------------------------------
void ggml_metal_kernel_turboquant_qjl_dequant(
const uint8_t * polar_packed,
const float * polar_norm,
const uint8_t * qjl_signs,
const float * qjl_scale,
const float * proj_matrix,
float * dst,
int n_vectors,
int d
) {
if (!g_pso_turboquant_qjl) return;
id<MTLCommandBuffer> cmd = [g_cmd_queue commandBuffer];
id<MTLComputeCommandEncoder> enc = [cmd computeCommandEncoder];
[enc setComputePipelineState:g_pso_turboquant_qjl];
// Binding: 0=polar_packed, 1=polar_norm, 2=qjl_signs, 3=qjl_scale,
// 4=proj_matrix, 5=dst, 6=n_vectors, 7=d
size_t polar_sz = (size_t)n_vectors * (d/2);
size_t norm_sz = (size_t)n_vectors * sizeof(float);
size_t sign_sz = (size_t)n_vectors * 8;
size_t scale_sz = (size_t)n_vectors * sizeof(float);
size_t proj_sz = (size_t)d * 64 * sizeof(float);
size_t dst_sz = (size_t)n_vectors * d * sizeof(float);
id<MTLBuffer> buf_polar = make_buffer(polar_packed, polar_sz);
id<MTLBuffer> buf_norm = make_buffer(polar_norm, norm_sz);
id<MTLBuffer> buf_sign = make_buffer(qjl_signs, sign_sz);
id<MTLBuffer> buf_scale = make_buffer(qjl_scale, scale_sz);
id<MTLBuffer> buf_proj = make_buffer(proj_matrix, proj_sz);
id<MTLBuffer> buf_dst = make_buffer(dst, dst_sz);
[enc setBuffer:buf_polar offset:0 atIndex:0];
[enc setBuffer:buf_norm offset:0 atIndex:1];
[enc setBuffer:buf_sign offset:0 atIndex:2];
[enc setBuffer:buf_scale offset:0 atIndex:3];
[enc setBuffer:buf_proj offset:0 atIndex:4];
[enc setBuffer:buf_dst offset:0 atIndex:5];
[enc setBytes:&n_vectors length:sizeof(n_vectors) atIndex:6];
[enc setBytes:&d length:sizeof(d) atIndex:7];
MTLSize grid = MTLSizeMake(n_vectors, 1, 1);
MTLSize block = MTLSizeMake(256, 1, 1);
[enc dispatchThreads:grid threadsPerThreadgroup:block];
[enc endEncoding];
[cmd commit];
[cmd waitUntilCompleted];
}
// -----------------------------------------------------------------------------
// Stubs for non-Metal builds
// -----------------------------------------------------------------------------
#if !defined(GGML_METAL)
void ggml_metal_set_device(void*, void*) {}
void ggml_metal_register_turboquant_kernels(const char*) {}
void ggml_metal_kernel_turbo4_dequant(const uint8_t*,const float*,float*,int,int) {}
void ggml_metal_kernel_qjl_encode_residual(const float*,const float*,uint8_t*,float*,int,int) {}
void ggml_metal_kernel_qjl_decode_residual(const uint8_t*,const float*,const uint8_t*,const float*,const float*,float*,int,int) {}
void ggml_metal_kernel_turboquant_qjl_dequant(const uint8_t*,const float*,const uint8_t*,const float*,const float*,float*,int,int) {}
#endif

View File

@@ -1,285 +0,0 @@
#include <metal_stdlib>
using namespace metal;
// Lloyd-Max Centroids (4-bit, 16 levels)
// Precomputed for N(0, 1/128)
constant float turbo4_centroids[16] = {
-0.2154, -0.1523, -0.1121, -0.0812,
-0.0554, -0.0321, -0.0105, 0.0105,
0.0321, 0.0554, 0.0812, 0.1121,
0.1523, 0.2154, 0.2800, 0.3500
};
// Fast Walsh-Hadamard Transform (In-place, SIMD-optimized)
// Assumes d=128 (standard head dimension)
kernel void kernel_fwht_128(
device float* data [[buffer(0)]],
uint tid [[thread_position_in_grid]]
) {
const uint d = 128;
uint base = tid * d;
// Stage 1-7 (128 = 2^7)
for (uint h = 1; h < d; h <<= 1) {
for (uint i = 0; i < d; i += (h << 1)) {
for (uint j = i; j < i + h; j++) {
float x = data[base + j];
float y = data[base + j + h];
data[base + j] = x + y;
data[base + j + h] = x - y;
}
}
}
// Normalize
float scale = 1.0 / sqrt(128.0);
for (uint i = 0; i < d; i++) {
data[base + i] *= scale;
}
}
// PolarQuant Turbo4 Dequantization (Attention Hot Path)
// Unpacks 4-bit indices, looks up centroids, scales by radius
kernel void kernel_turbo4_dequant(
device const uchar* src [[buffer(0)]],
device const float* norms [[buffer(1)]],
device float* dst [[buffer(2)]],
uint tid [[thread_position_in_grid]]
) {
const uint d = 128;
uint base_src = tid * (d / 2);
uint base_dst = tid * d;
float norm = norms[tid];
for (uint i = 0; i < d; i++) {
uchar packed = src[base_src + (i / 2)];
uint idx = (i % 2 == 0) ? (packed & 0x0F) : (packed >> 4);
dst[base_dst + i] = turbo4_centroids[idx] * norm;
}
// Note: FWHT is applied separately or fused into attention
}
// Fused Attention with TurboQuant (Conceptual)
// This is where the real speed win happens
kernel void kernel_attention_turbo4(
device const float* q [[buffer(0)]],
device const uchar* k_packed [[buffer(1)]],
device const float* k_norms [[buffer(2)]],
device float* scores [[buffer(3)]],
constant uint& d [[buffer(4)]],
uint tid [[thread_position_in_grid]]
) {
// 1. Dequantize K on the fly
// 2. Compute dot product with Q
// 3. Store score
}
// =====================================================================================
// QJL (Quantized Johnson-Lindenstrauss) Residual Correction
// Metal GPU Kernels — fused with PolarQuant for full TurboQuant compression
// =====================================================================================
// QJL Configuration (matches PR #131)
constant uint QJL_PROJ_DIM = 64; // Projection dimension for d=128
constant uint QJL_PROJ_DIM_PACKED = 8; // 64 bits / 8 = 8 bytes per vector
// ── QJL Residual Encode ─────────────────────────────────────────────────────────
// Projects residual onto JL space and packs sign bits.
// Dispatched during KV cache write-back (per vector).
//
// residual [buffer(0)]: float [d] — the error vector (x - polarquant(x))
// proj_matrix [buffer(1)]: float [d×64] — fixed Rademacher projection matrix
// signs_out [buffer(2)]: uchar [8] — packed 1-bit signs (output)
// d [buffer(3)]: uint — vector dimension (must be 128)
// tid/tpg threads — per-vector dispatch (one threadgroup per vector)
//
kernel void kernel_qjl_encode_residual(
device const float* residual [[buffer(0)]],
device const float* proj_matrix [[buffer(1)]],
device uchar* signs_packed [[buffer(2)]],
constant uint& d [[buffer(3)]],
uint tid [[thread_position_in_threadgroup]],
uint tpg [[threads_per_threadgroup]]
) {
const uint proj_dim = QJL_PROJ_DIM;
// Shared memory for dot products across projection dims (64 floats)
threadgroup float projections[QJL_PROJ_DIM];
// Each thread handles a slice of the projection dimension
for (uint j = tid; j < proj_dim; j += tpg) {
float dot = 0.0f;
// Dot product: residual^T * proj_matrix_column_j
for (uint i = 0; i < d; i++) {
dot += residual[i] * proj_matrix[i * proj_dim + j];
}
projections[j] = dot;
}
threadgroup_barrier(mem_flags::mem_threadgroup);
// Thread 0 packs the signs into 8 bytes (64 bits)
if (tid == 0) {
uchar packed[QJL_PROJ_DIM_PACKED] = {0};
for (uint j = 0; j < proj_dim; j++) {
if (projections[j] >= 0.0f) {
packed[j / 8] |= (1u << (j % 8));
}
}
for (uint b = 0; b < QJL_PROJ_DIM_PACKED; b++) {
signs_packed[b] = packed[b];
}
}
}
// ── QJL Residual Decode ─────────────────────────────────────────────────────────
// Unpacks sign bits and reconstructs the residual correction vector in original space.
// Dispatched during KV cache read (fused with PolarQuant dequant in the hot path).
//
// signs [buffer(0)]: uchar [8] — packed QJL signs (1-bit signed per projection)
// proj [buffer(1)]: float [d×64] — projection matrix
// dst [buffer(2)]: float [d] — correction vector (output, to be added to reconstruction)
// d [buffer(3)]: uint
// tid/tpg — thread per vector (32256 threads typical)
//
kernel void kernel_qjl_decode_residual(
device const uchar* signs_packed [[buffer(0)]],
device const float* proj_matrix [[buffer(1)]],
device float* correction [[buffer(2)]],
constant uint& d [[buffer(3)]],
uint tid [[thread_position_in_threadgroup]],
uint tpg [[threads_per_threadgroup]]
) {
const uint proj_dim = QJL_PROJ_DIM;
// Unpack signs → ±1 array in threadgroup-shared memory
threadgroup float signs[QJL_PROJ_DIM];
if (tid == 0) {
uint base = 0;
for (uint j = 0; j < proj_dim; j++) {
// Extract 1-bit
bool positive = ((signs_packed[base + (j / 8)] >> (j % 8)) & 1) != 0;
signs[j] = positive ? 1.0f : -1.0f;
}
}
threadgroup_barrier(mem_flags::mem_threadgroup);
// Each thread computes a subset of d output coordinates:
// correction[i] = Σ_j proj_matrix[i·m + j] × signs[j]
for (uint i = tid; i < d; i += tpg) {
float sum = 0.0f;
for (uint j = 0; j < proj_dim; j++) {
sum += proj_matrix[i * proj_dim + j] * signs[j];
}
correction[i] = sum;
}
}
// ── Fused TurboQuant (PolarQuant + QJL) Dequant ─────────────────────────────────
// Single-shader attention hot path: reconstructs K/V from compressed KV cache.
// Reads:
// - polar indices (4-bit), stored at kv_cache + offset
// - polar norm (float), stored in separate norm buffer
// - QJL signs (8 bytes), stored adjacent to polar data
// - QJL scale (float), stored after signs
// Outputs:
// - fully reconstructed vector [d] (FP16 or FP32 depending on macro)
//
// This replaces separate kernel_turbo4_dequant + separate correction step.
// All fused into one GPU pass → halved memory traffic and kernel dispatch cost.
//
kernel void kernel_turboquant_qjl_dequant(
device const uchar* polar_packed [[buffer(0)]], // 4-bit indices [d/2]
device const float* polar_norm [[buffer(1)]], // radius (scalar)
device const uchar* qjl_signs [[buffer(2)]], // QJL signs [8]
device const float* qjl_scale [[buffer(3)]], // QJL scale (scalar)
device const float* proj_matrix [[buffer(4)]], // d×64 projection matrix
device float* dst [[buffer(5)]], // output [d]
constant uint& d [[buffer(6)]],
uint tid [[thread_position_in_grid]]
) {
const uint proj_dim = QJL_PROJ_DIM;
uint base_polar_in = tid * (d / 2);
uint base_signs_in = tid * QJL_PROJ_DIM_PACKED;
uint base_dst = tid * d;
float norm = polar_norm[tid];
const float centroids[16] = {
-0.2154, -0.1523, -0.1121, -0.0812,
-0.0554, -0.0321, -0.0105, 0.0105,
0.0321, 0.0554, 0.0812, 0.1121,
0.1523, 0.2154, 0.2800, 0.3500
};
// ── Step 1: PolarQuant decode ──────────────────────────────────────────────
for (uint i = 0; i < d; i++) {
uchar packed = polar_packed[base_polar_in + (i / 2)];
uint idx = (i % 2 == 0) ? (packed & 0x0F) : (packed >> 4);
dst[base_dst + i] = centroids[idx] * norm;
}
// ── Step 2: Unpack QJL signs ───────────────────────────────────────────────
float signs[QJL_PROJ_DIM];
for (uint j = 0; j < proj_dim; j++) {
bool pos = ((qjl_signs[base_signs_in + (j / 8)] >> (j % 8)) & 1) != 0;
signs[j] = pos ? 1.0f : -1.0f;
}
// ── Step 3: Compute QJL correction and add ────────────────────────────────
// Correction formula: Δ = scale × R × signs
// Where R is the d×64 projection matrix, signs is the sign vector, scale is the QJL norm
for (uint i = 0; i < d; i++) {
float corr = 0.0f;
for (uint j = 0; j < proj_dim; j++) {
corr += proj_matrix[i * proj_dim + j] * signs[j];
}
dst[base_dst + i] += qjl_scale[base_signs_in / QJL_PROJ_DIM_PACKED] * corr;
// Note: scale indexed per vector; assumes proj_matrix has unit-norm rows
}
// No FWHT here — handled upstream during encoding; decode just adds correction.
}
// ── Batch QJL Encode ─────────────────────────────────────────────────────────
// Encodes multiple residual vectors (one per token-head pair) in a single dispatch.
// Used when flushing KV cache from SRAM/GPU to compressed storage.
//
kernel void kernel_qjl_encode_batch(
device const float* residuals [[buffer(0)]], // [n × d]
device const float* proj_matrix [[buffer(1)]], // [d × 64]
device uchar* signs_packed [[buffer(2)]], // [n × 8]
constant uint& d [[buffer(3)]],
uint tid [[thread_position_in_grid]]
) {
// stride and base for this vector
uint stride = d;
uint base = tid * d;
// We'll accumulate 64 dot products, then Thread 0 packs them
threadgroup float projs[QJL_PROJ_DIM];
for (uint j = tid; j < QJL_PROJ_DIM; j += 1) { // simple: one thread per proj dim for now
float dot = 0.0f;
for (uint i = 0; i < d; i++) {
dot += residuals[base + i] * proj_matrix[i * QJL_PROJ_DIM + j];
}
projs[j] = dot;
}
threadgroup_barrier(mem_flags::mem_threadgroup);
// Reduce across threads for this dimension (simplified: thread 0 packs)
if (tid == 0) {
uchar packed[QJL_PROJ_DIM_PACKED] = {0};
for (uint j = 0; j < QJL_PROJ_DIM; j++) {
if (projs[j] >= 0.0f) {
packed[j / 8] |= (1u << (j % 8));
}
}
for (uint b = 0; b < QJL_PROJ_DIM_PACKED; b++) {
signs_packed[tid * QJL_PROJ_DIM_PACKED + b] = packed[b];
}
}
}

View File

@@ -1,30 +0,0 @@
//
// llama.h — Stub header for reference integration build
//
#ifndef LLAMA_H
#define LLAMA_H
#include <cstddef>
#include <cstdint>
struct llama_context {};
struct ggml_tensor; // forward
typedef struct llama_kv_cache {
int n;
int d;
void * data;
int type; // using int instead of enum to avoid ABI issues
float * qjl_scales;
uint8_t * qjl_signs;
float * qjl_proj;
} llama_kv_cache;
// Minimal ggml_type values needed for integration
#define GGML_TYPE_F32 0
#define GGML_TYPE_F16 1
#define GGML_TYPE_Q4_0 2
#define GGML_TYPE_TURBOQUANT_QJL 0x103
#endif // LLAMA_H

View File

@@ -1,167 +0,0 @@
//
// llama.cpp — TurboQuant QJL Integration (KV Cache Hot Path)
//
// Integration_layer demonstrating where QJL modifications belong.
// Minimal compilable reference implementation.
//
#include "ggml.h"
#include "llama.h"
#include <cstdlib> // malloc, free, size_t
#include <cstdint> // uint8_t, uint32_t, etc.
#include <cmath> // std::sqrt
#include <random> // std::mt19937, std::uniform_int_distribution
#include <cstdio> // fprintf
// -----------------------------------------------------------------------------
// QJL Storage Layout
// -----------------------------------------------------------------------------
// Per-vector: 64B polar indices + 8B signs + 4B scale = 76 bytes
// -----------------------------------------------------------------------------
// -----------------------------------------------------------------------------
// QJL KV Cache Allocation
// -----------------------------------------------------------------------------
void * llama_kv_cache_alloc_qjl(int n_vectors, int d) {
constexpr int polar_bytes = 64;
constexpr int qjl_sign_b = 8;
constexpr int qjl_scale_b = 4;
constexpr int per_vector = polar_bytes + qjl_sign_b + qjl_scale_b; // 76
constexpr int alignment = 32;
size_t raw_size = (size_t)n_vectors * per_vector;
size_t aligned_size = (raw_size + alignment - 1) & ~(alignment - 1);
void * buffer = std::malloc(aligned_size);
if (!buffer) return nullptr;
std::memset(buffer, 0, aligned_size);
return buffer;
}
// -----------------------------------------------------------------------------
// QJL Projection Matrix — allocated on model load (once)
// -----------------------------------------------------------------------------
float * qjl_projection_matrix_alloc(int d) {
if (d != 128) return nullptr;
float * matrix = (float *)std::malloc(d * 64 * sizeof(float));
if (!matrix) return nullptr;
std::mt19937 rng(0xDEADBEEF);
std::uniform_int_distribution<int> coin(0, 1);
const float scale = 1.0f / std::sqrt(64.0f);
for (int i = 0; i < d; i++) {
for (int j = 0; j < 64; j++) {
matrix[i * 64 + j] = (coin(rng) ? 1.0f : -1.0f) * scale;
}
}
return matrix;
}
void qjl_projection_matrix_free(float * matrix) {
std::free(matrix);
}
// -----------------------------------------------------------------------------
// QJL Encode — KV update path (after PolarQuant)
// -----------------------------------------------------------------------------
void qjl_encode_residuals(
const float * residuals,
const float * proj,
uint8_t * dst_signs,
float * dst_scale,
int n_vectors,
int d
) {
for (int v = 0; v < n_vectors; v++) {
const float * r = residuals + v * d;
uint8_t signs[8] = {0};
float residual_norm = 0.0f;
for (int i = 0; i < d; i++) residual_norm += r[i] * r[i];
residual_norm = std::sqrt(residual_norm);
dst_scale[v] = residual_norm;
// Project: p = R^T * r (64 dot products of length d=128)
for (int j = 0; j < 64; j++) {
float p = 0.0f;
for (int i = 0; i < d; i++) {
p += r[i] * proj[i * 64 + j];
}
if (p >= 0.0f) {
signs[j / 8] |= (1u << (j % 8));
}
}
for (int b = 0; b < 8; b++) {
dst_signs[v * 8 + b] = signs[b];
}
}
}
// -----------------------------------------------------------------------------
// QJL Decode — fused correction added to PolarQuant output
// -----------------------------------------------------------------------------
void qjl_decode_residuals(
const uint8_t * polar_packed,
const float * polar_norm,
const uint8_t * qjl_signs,
const float * qjl_scale,
const float * proj,
float * dst,
int n_vectors,
int d
) {
for (int v = 0; v < n_vectors; v++) {
const float norm = polar_norm[v];
const uint8_t * src = polar_packed + v * (d / 2);
float * out = dst + v * d;
// Lloyd-Max centroids for N(0,1) 4-bit quant, order: -0.2154 .. +0.3500
static const float centroids[16] = {
-0.2154f, -0.1523f, -0.1121f, -0.0812f,
-0.0554f, -0.0321f, -0.0105f, 0.0105f,
0.0321f, 0.0554f, 0.0812f, 0.1121f,
0.1523f, 0.2154f, 0.2800f, 0.3500f
};
for (int i = 0; i < d; i++) {
unsigned idx = (i % 2 == 0) ? (src[i/2] & 0x0F) : (src[i/2] >> 4);
out[i] = centroids[idx] * norm;
}
// QJL correction: Δ = scale × R × signs
const uint8_t * sign_buf = qjl_signs + v * 8;
const float scale = qjl_scale[v];
for (int i = 0; i < d; i++) {
float delta = 0.0f;
for (int j = 0; j < 64; j++) {
float s = ((sign_buf[j/8] >> (j%8)) & 1) ? 1.0f : -1.0f;
delta += proj[i * 64 + j] * s;
}
out[i] += scale * delta;
}
}
}
// -----------------------------------------------------------------------------
// Debug / validation
// -----------------------------------------------------------------------------
void qjl_validate_storage_allocated(void * buffer, size_t size_bytes, int n_vectors) {
const size_t min_expected = (size_t)n_vectors * 76;
if (size_bytes < min_expected) {
fprintf(stderr, "QJL storage under-allocated: got %zu, need >= %zu\n",
size_bytes, min_expected);
}
}
// -----------------------------------------------------------------------------
// Metal GPU dispatches — no-op stub builds
// -----------------------------------------------------------------------------
extern "C" {
void ggml_metal_kernel_turboquant_qjl_dequant(
const uint8_t *, const float *, const uint8_t *, const float *,
const float *, float *, int, int) {}
void ggml_metal_register_turboquant_kernels(const char *) {}
void ggml_metal_set_device(void *, void *) {}
}

View File

@@ -0,0 +1,58 @@
#!/usr/bin/env python3
"""Tests for Apple Silicon DFlash benchmark planning helpers (issue #152)."""
import os
import sys
from unittest.mock import patch
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
from benchmarks.dflash_apple_silicon import ( # noqa: E402
build_mlx_benchmark_command,
detect_total_memory_gb,
render_report_template,
select_pair,
)
class TestPairSelection:
def test_prefers_qwen35_9b_on_36gb_mac(self):
pair = select_pair(total_memory_gb=36)
assert pair.slug == "qwen35-9b"
assert pair.base_model == "Qwen/Qwen3.5-9B"
assert pair.draft_model == "z-lab/Qwen3.5-9B-DFlash"
def test_falls_back_to_4b_when_memory_is_tight(self):
pair = select_pair(total_memory_gb=20)
assert pair.slug == "qwen35-4b"
assert pair.base_model == "Qwen/Qwen3.5-4B"
class TestCommandGeneration:
def test_builds_upstream_mlx_benchmark_command(self):
pair = select_pair(total_memory_gb=36)
command = build_mlx_benchmark_command(pair, dataset="gsm8k", max_samples=64)
assert "python -m dflash.benchmark --backend mlx" in command
assert "--model Qwen/Qwen3.5-9B" in command
assert "--draft-model z-lab/Qwen3.5-9B-DFlash" in command
assert "--dataset gsm8k" in command
assert "--max-samples 64" in command
assert "--draft-sliding-window-size 4096" in command
class TestReportTemplate:
def test_report_template_mentions_baseline_and_verdict(self):
pair = select_pair(total_memory_gb=36)
report = render_report_template(machine_label="M3 Max 36GB", pair=pair)
assert "DFlash Apple Silicon Benchmark Report" in report
assert "M3 Max 36GB" in report
assert "Qwen/Qwen3.5-9B" in report
assert "plain MLX or llama.cpp speculative decoding" in report
assert "Worth operationalizing locally?" in report
class TestMemoryDetection:
@patch("benchmarks.dflash_apple_silicon.platform.system", return_value="Darwin")
@patch("benchmarks.dflash_apple_silicon.subprocess.check_output", return_value=b"38654705664\n")
def test_detect_total_memory_gb_on_macos(self, _mock_sysctl, _mock_system):
assert detect_total_memory_gb() == 36.0

View File

@@ -1,21 +0,0 @@
#!/usr/bin/env python3
"""Tests for hardware_optimizer compatibility shim."""
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
from evolution import hardware_optimizer, quant_selector
def test_hardware_optimizer_reexports_quant_selector_api():
assert hardware_optimizer.select_quant_level is quant_selector.select_quant_level
assert hardware_optimizer.detect_hardware is quant_selector.detect_hardware
assert hardware_optimizer.HardwareInfo is quant_selector.HardwareInfo
assert hardware_optimizer.QuantSelection is quant_selector.QuantSelection
def test_hardware_optimizer_exports_quant_level_definitions():
assert hardware_optimizer.QUANT_LEVELS is quant_selector.QUANT_LEVELS
assert hardware_optimizer.QuantLevel is quant_selector.QuantLevel

View File

@@ -1,74 +0,0 @@
import textwrap
from pathlib import Path
from check_markdown_links import find_broken_links
def write(path: Path, content: str) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(textwrap.dedent(content).lstrip(), encoding="utf-8")
def test_reports_missing_local_markdown_target_with_line_number(tmp_path: Path):
write(
tmp_path / "README.md",
"""
# Repo
See [status](docs/status.md).
""",
)
broken = find_broken_links(tmp_path)
assert len(broken) == 1
assert broken[0]["source"].endswith("README.md")
assert broken[0]["line"] == 3
assert broken[0]["target"] == "docs/status.md"
def test_allows_existing_relative_targets(tmp_path: Path):
write(tmp_path / "docs" / "status.md", "# Status\n")
write(
tmp_path / "README.md",
"""
# Repo
See [status](docs/status.md).
""",
)
assert find_broken_links(tmp_path) == []
def test_ignores_external_anchor_mailto_and_tel_links(tmp_path: Path):
write(
tmp_path / "README.md",
"""
[external](https://example.com)
[anchor](#section)
[mail](mailto:test@example.com)
[call](tel:988)
""",
)
assert find_broken_links(tmp_path) == []
def test_ignores_links_inside_fenced_code_blocks(tmp_path: Path):
write(
tmp_path / "README.md",
"""
```md
[broken](docs/missing.md)
```
""",
)
assert find_broken_links(tmp_path) == []
def test_skips_build_directories(tmp_path: Path):
write(tmp_path / "build" / "README.md", "[broken](missing.md)\n")
assert find_broken_links(tmp_path) == []

View File

@@ -19,36 +19,11 @@ from evolution.quant_selector import (
class TestQuantLevels:
def test_levels_ordered_by_quality(self):
"""TurboQuant levels should be ordered from best quality to most aggressive.
The quality ordering invariant for TurboQuant levels is monotonically
increasing compression_ratio (more aggressive = more compression).
Non-TurboQuant fallbacks (e.g. q4_0) are placed after all TurboQuant
levels and may have any compression ratio — they exist as safe defaults,
not as part of the quality progression.
"""
turbo_quant_names = {"turbo4", "turbo3", "turbo2"}
turbo_levels = [l for l in QUANT_LEVELS if l.name in turbo_quant_names]
for i in range(len(turbo_levels) - 1):
assert turbo_levels[i].compression_ratio <= turbo_levels[i + 1].compression_ratio, (
f"TurboQuant {turbo_levels[i].name} (compression={turbo_levels[i].compression_ratio}x) "
f"should have <= compression than {turbo_levels[i+1].name} "
f"(compression={turbo_levels[i+1].compression_ratio}x)"
)
def test_fallback_quant_is_last(self):
"""Non-TurboQuant fallbacks (e.g. q4_0) should be at the end of the list."""
turbo_quant_names = {"turbo4", "turbo3", "turbo2"}
found_fallback = False
for level in QUANT_LEVELS:
if level.name not in turbo_quant_names:
found_fallback = True
elif found_fallback:
pytest.fail(
f"TurboQuant level '{level.name}' appears after a fallback level. "
f"All TurboQuant levels must precede fallbacks."
)
def test_levels_keep_turboquant_quality_order_with_q4_fallback_last(self):
"""TurboQuant levels should lead, with q4_0 reserved as the non-Turbo fallback."""
names = [level.name for level in QUANT_LEVELS]
assert names[:3] == ["turbo4", "turbo3", "turbo2"]
assert names[-1] == "q4_0"
def test_all_levels_have_required_fields(self):
for level in QUANT_LEVELS:
@@ -174,6 +149,19 @@ class TestSelection:
sel = select_quant_level(model_size_gb=16.0, context_length=65536)
assert len(sel.warnings) > 0
def test_falls_back_to_turbo2_when_nothing_fits(self):
with patch("evolution.quant_selector.detect_hardware") as mock_hw:
mock_hw.return_value = HardwareInfo(
total_memory_gb=8,
available_memory_gb=6,
gpu_memory_gb=8,
gpu_name="Tiny GPU",
cpu_cores=4,
detection_method="mock",
)
sel = select_quant_level(model_size_gb=16.0, context_length=131072)
assert sel.level.name == "turbo2"
def test_reasoning_contains_key_info(self):
with patch("evolution.quant_selector.detect_hardware") as mock_hw:
mock_hw.return_value = HardwareInfo(

View File

@@ -1,83 +0,0 @@
"""Tests for smoke workflow CI configuration.
Validates that the GitHub Actions / Gitea Actions smoke workflow
actually runs the standalone CMake build and test suite, not just
parse checks.
"""
from pathlib import Path
import yaml
import pytest
WORKFLOW_PATH = Path(".gitea/workflows/smoke.yml")
@pytest.fixture
def workflow():
"""Load and parse the smoke workflow YAML."""
content = WORKFLOW_PATH.read_text(encoding="utf-8")
return yaml.safe_load(content)
def test_smoke_workflow_exists():
"""Smoke workflow file must exist."""
assert WORKFLOW_PATH.exists(), f"Missing {WORKFLOW_PATH}"
def test_smoke_has_cmake_configure_step(workflow):
"""Smoke workflow must configure the CMake project with tests enabled."""
steps = workflow["jobs"]["smoke"]["steps"]
cmake_found = False
for step in steps:
run = step.get("run", "")
if "cmake -S . -B build" in run and "TURBOQUANT_BUILD_TESTS=ON" in run:
cmake_found = True
break
assert cmake_found, (
"Smoke workflow missing cmake configure step with TURBOQUANT_BUILD_TESTS=ON"
)
def test_smoke_has_cmake_build_step(workflow):
"""Smoke workflow must build the CMake project."""
steps = workflow["jobs"]["smoke"]["steps"]
build_found = False
for step in steps:
run = step.get("run", "")
if "cmake --build build" in run:
build_found = True
break
assert build_found, "Smoke workflow missing cmake --build step"
def test_smoke_has_ctest_step(workflow):
"""Smoke workflow must run ctest."""
steps = workflow["jobs"]["smoke"]["steps"]
ctest_found = False
for step in steps:
run = step.get("run", "")
if "ctest" in run and "output-on-failure" in run:
ctest_found = True
break
assert ctest_found, "Smoke workflow missing ctest --output-on-failure step"
def test_smoke_build_before_secret_scan(workflow):
"""Build and test steps must run before secret scan (fail fast on build errors)."""
steps = workflow["jobs"]["smoke"]["steps"]
names = [s.get("name", "") for s in steps]
build_idx = None
scan_idx = None
for i, name in enumerate(names):
if "cmake" in name.lower() or "build" in name.lower():
if build_idx is None:
build_idx = i
if "secret" in name.lower():
scan_idx = i
if build_idx is not None and scan_idx is not None:
assert build_idx < scan_idx, (
"Build step should run before secret scan to fail fast on broken code"
)