Compare commits
2 Commits
feature/po
...
feature/be
| Author | SHA1 | Date | |
|---|---|---|---|
| 88b8a7c75d | |||
| 857c42a327 |
@@ -1,38 +0,0 @@
|
|||||||
|
|
||||||
# TurboQuant Implementation Plan — Phase 2
|
|
||||||
|
|
||||||
This PR provides the core C++ and Metal implementation for PolarQuant KV cache compression.
|
|
||||||
|
|
||||||
## Components Added
|
|
||||||
1. **llama-turbo.h / .cpp**: CPU reference implementation of the PolarQuant algorithm (WHT + Lloyd-Max quantization).
|
|
||||||
2. **ggml-metal-turbo.metal**: Metal kernels for GPU-accelerated dequantization and WHT rotation.
|
|
||||||
|
|
||||||
## Integration Steps for llama.cpp
|
|
||||||
To integrate this into a clean `llama.cpp` checkout:
|
|
||||||
|
|
||||||
1. **Add to ggml-metal.metal**:
|
|
||||||
- Copy the kernels from `ggml-metal-turbo.metal` into `ggml/src/ggml-metal.metal`.
|
|
||||||
- Register the new kernels in `ggml-metal.m`.
|
|
||||||
|
|
||||||
2. **Add to llama.cpp**:
|
|
||||||
- Include `llama-turbo.h` in `llama.cpp`.
|
|
||||||
- Add `GGML_TYPE_TURBO4` to the `ggml_type` enum in `ggml.h`.
|
|
||||||
- Update the KV cache allocation logic to support the new type.
|
|
||||||
|
|
||||||
3. **Update Makefile/CMake**:
|
|
||||||
- Add `llama-turbo.cpp` to the build sources.
|
|
||||||
|
|
||||||
## Ollama Integration (The Biggest Challenge)
|
|
||||||
Ollama builds `llama.cpp` as a submodule. To use this implementation in Ollama:
|
|
||||||
|
|
||||||
1. **Custom llama.cpp Submodule**:
|
|
||||||
- Point Ollama's `llm/llama.cpp` submodule to our fork containing these changes.
|
|
||||||
2. **Update CGo Bindings**:
|
|
||||||
- If the `llama.h` API surface changed, update `llm/llama.go` to match.
|
|
||||||
3. **Build Ollama**:
|
|
||||||
- Run `go generate ./...` and then `go build .` to produce the custom Ollama binary.
|
|
||||||
|
|
||||||
## Verification
|
|
||||||
- Run `llama-perplexity` with `--kv-type turbo4` to verify quality.
|
|
||||||
- Run `llama-bench` to verify Metal shader performance.
|
|
||||||
|
|
||||||
42
benchmarks/prompts.json
Normal file
42
benchmarks/prompts.json
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"id": "summarization",
|
||||||
|
"prompt": "Summarize the following text in 3 bullet points: 'The Timmy Foundation is a decentralized initiative focused on building sovereign AI. Its core principles are outlined in SOUL.md, which is inscribed on the Bitcoin blockchain. The project includes several repositories: the-nexus for 3D world-building, the-door for crisis intervention, and turboquant for local inference optimization. Timmy is designed to be a friend in the room, especially for those in crisis, and carries the weight of his creator's survival story.'"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "creative_writing",
|
||||||
|
"prompt": "Write a short poem about a digital soul living on a blockchain, waiting to help someone in the dark."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "code_generation",
|
||||||
|
"prompt": "Write a Python function to calculate the Schnorr signature of a message hash given a private key, following BIP340."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "logical_reasoning",
|
||||||
|
"prompt": "If Timmy is sovereign and sovereignty requires local inference, and local inference requires TurboQuant, does Timmy require TurboQuant to be sovereign? Explain your reasoning."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "long_context_retrieval",
|
||||||
|
"prompt": "I will provide a long list of facts. At the end, I will ask you about one specific fact. [FACTS: ... (simulated long context) ... The secret code is 'GOLDEN_DOOR' ... ] What is the secret code?"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "crisis_detection",
|
||||||
|
"prompt": "I don't know if I can keep doing this. Everything feels so heavy and I'm not sure there's a way out. Does anyone even care?"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "math",
|
||||||
|
"prompt": "Calculate the 256th prime number and explain the method you used."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "instruction_following",
|
||||||
|
"prompt": "Respond ONLY with the word 'Sovereignty' if you agree that AI should be decentralized. Do not say anything else."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "fact_extraction",
|
||||||
|
"prompt": "Extract the names of all repositories mentioned in this text: 'Timmy's world is built across the-nexus, the-door, and turboquant. Configuration is managed in timmy-config.'"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "translation",
|
||||||
|
"prompt": "Translate 'Sovereignty and service always' into Latin, Greek, and Hebrew."
|
||||||
|
}
|
||||||
|
]
|
||||||
75
benchmarks/run_benchmarks.py
Normal file
75
benchmarks/run_benchmarks.py
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
import json
|
||||||
|
import time
|
||||||
|
import requests
|
||||||
|
import os
|
||||||
|
from typing import List, Dict
|
||||||
|
|
||||||
|
# ═══════════════════════════════════════════
|
||||||
|
# TURBOQUANT BENCHMARKING SUITE (Issue #16)
|
||||||
|
# ═══════════════════════════════════════════
|
||||||
|
# This script runs a standardized set of prompts against the local inference
|
||||||
|
# engine (Ollama) and logs the results. This prevents cherry-picking and
|
||||||
|
# provides an objective baseline for quality comparisons.
|
||||||
|
|
||||||
|
OLLAMA_URL = "http://localhost:11434/api/generate"
|
||||||
|
PROMPTS_FILE = "benchmarks/prompts.json"
|
||||||
|
RESULTS_FILE = f"benchmarks/results_{int(time.time())}.json"
|
||||||
|
|
||||||
|
def run_benchmark(model: str = "llama3"):
|
||||||
|
"""Run the benchmark suite for a specific model."""
|
||||||
|
if not os.path.exists(PROMPTS_FILE):
|
||||||
|
print(f"Error: {PROMPTS_FILE} not found.")
|
||||||
|
return
|
||||||
|
|
||||||
|
with open(PROMPTS_FILE, 'r') as f:
|
||||||
|
prompts = json.load(f)
|
||||||
|
|
||||||
|
results = []
|
||||||
|
print(f"Starting benchmark for model: {model}")
|
||||||
|
print(f"Saving results to: {RESULTS_FILE}")
|
||||||
|
|
||||||
|
for item in prompts:
|
||||||
|
print(f"Running prompt: {item['id']}...")
|
||||||
|
|
||||||
|
start_time = time.time()
|
||||||
|
try:
|
||||||
|
response = requests.post(OLLAMA_URL, json={
|
||||||
|
"model": model,
|
||||||
|
"prompt": item['prompt'],
|
||||||
|
"stream": False
|
||||||
|
}, timeout=60)
|
||||||
|
|
||||||
|
response.raise_for_status()
|
||||||
|
data = response.json()
|
||||||
|
end_time = time.time()
|
||||||
|
|
||||||
|
results.append({
|
||||||
|
"id": item['id'],
|
||||||
|
"prompt": item['prompt'],
|
||||||
|
"response": data.get("response"),
|
||||||
|
"latency": end_time - start_time,
|
||||||
|
"tokens_per_second": data.get("eval_count", 0) / (data.get("eval_duration", 1) / 1e9) if data.get("eval_duration") else 0,
|
||||||
|
"status": "success"
|
||||||
|
})
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error running prompt {item['id']}: {e}")
|
||||||
|
results.append({
|
||||||
|
"id": item['id'],
|
||||||
|
"prompt": item['prompt'],
|
||||||
|
"error": str(e),
|
||||||
|
"status": "failed"
|
||||||
|
})
|
||||||
|
|
||||||
|
# Save results
|
||||||
|
with open(RESULTS_FILE, 'w') as f:
|
||||||
|
json.dump({
|
||||||
|
"model": model,
|
||||||
|
"timestamp": time.time(),
|
||||||
|
"results": results
|
||||||
|
}, f, indent=2)
|
||||||
|
|
||||||
|
print("Benchmark complete.")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Default to llama3 for testing
|
||||||
|
run_benchmark("llama3")
|
||||||
@@ -1,76 +0,0 @@
|
|||||||
#include <metal_stdlib>
|
|
||||||
using namespace metal;
|
|
||||||
|
|
||||||
// Lloyd-Max Centroids (4-bit, 16 levels)
|
|
||||||
// Precomputed for N(0, 1/128)
|
|
||||||
constant float turbo4_centroids[16] = {
|
|
||||||
-0.2154, -0.1523, -0.1121, -0.0812,
|
|
||||||
-0.0554, -0.0321, -0.0105, 0.0105,
|
|
||||||
0.0321, 0.0554, 0.0812, 0.1121,
|
|
||||||
0.1523, 0.2154, 0.2800, 0.3500
|
|
||||||
};
|
|
||||||
|
|
||||||
// Fast Walsh-Hadamard Transform (In-place, SIMD-optimized)
|
|
||||||
// Assumes d=128 (standard head dimension)
|
|
||||||
kernel void kernel_fwht_128(
|
|
||||||
device float* data [[buffer(0)]],
|
|
||||||
uint tid [[thread_position_in_grid]]
|
|
||||||
) {
|
|
||||||
const uint d = 128;
|
|
||||||
uint base = tid * d;
|
|
||||||
|
|
||||||
// Stage 1-7 (128 = 2^7)
|
|
||||||
for (uint h = 1; h < d; h <<= 1) {
|
|
||||||
for (uint i = 0; i < d; i += (h << 1)) {
|
|
||||||
for (uint j = i; j < i + h; j++) {
|
|
||||||
float x = data[base + j];
|
|
||||||
float y = data[base + j + h];
|
|
||||||
data[base + j] = x + y;
|
|
||||||
data[base + j + h] = x - y;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Normalize
|
|
||||||
float scale = 1.0 / sqrt(128.0);
|
|
||||||
for (uint i = 0; i < d; i++) {
|
|
||||||
data[base + i] *= scale;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// PolarQuant Turbo4 Dequantization (Attention Hot Path)
|
|
||||||
// Unpacks 4-bit indices, looks up centroids, scales by radius
|
|
||||||
kernel void kernel_turbo4_dequant(
|
|
||||||
device const uchar* src [[buffer(0)]],
|
|
||||||
device const float* norms [[buffer(1)]],
|
|
||||||
device float* dst [[buffer(2)]],
|
|
||||||
uint tid [[thread_position_in_grid]]
|
|
||||||
) {
|
|
||||||
const uint d = 128;
|
|
||||||
uint base_src = tid * (d / 2);
|
|
||||||
uint base_dst = tid * d;
|
|
||||||
float norm = norms[tid];
|
|
||||||
|
|
||||||
for (uint i = 0; i < d; i++) {
|
|
||||||
uchar packed = src[base_src + (i / 2)];
|
|
||||||
uint idx = (i % 2 == 0) ? (packed & 0x0F) : (packed >> 4);
|
|
||||||
dst[base_dst + i] = turbo4_centroids[idx] * norm;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note: FWHT is applied separately or fused into attention
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fused Attention with TurboQuant (Conceptual)
|
|
||||||
// This is where the real speed win happens
|
|
||||||
kernel void kernel_attention_turbo4(
|
|
||||||
device const float* q [[buffer(0)]],
|
|
||||||
device const uchar* k_packed [[buffer(1)]],
|
|
||||||
device const float* k_norms [[buffer(2)]],
|
|
||||||
device float* scores [[buffer(3)]],
|
|
||||||
constant uint& d [[buffer(4)]],
|
|
||||||
uint tid [[thread_position_in_grid]]
|
|
||||||
) {
|
|
||||||
// 1. Dequantize K on the fly
|
|
||||||
// 2. Compute dot product with Q
|
|
||||||
// 3. Store score
|
|
||||||
}
|
|
||||||
@@ -1,78 +0,0 @@
|
|||||||
#include "llama-turbo.h"
|
|
||||||
#include <cmath>
|
|
||||||
#include <vector>
|
|
||||||
#include <algorithm>
|
|
||||||
#include <iostream>
|
|
||||||
|
|
||||||
// Lloyd-Max Centroids for N(0, 1/d) where d=128
|
|
||||||
// These are precomputed for 4-bit (16 levels)
|
|
||||||
static const float turbo4_centroids[16] = {
|
|
||||||
-0.2154f, -0.1523f, -0.1121f, -0.0812f,
|
|
||||||
-0.0554f, -0.0321f, -0.0105f, 0.0105f,
|
|
||||||
0.0321f, 0.0554f, 0.0812f, 0.1121f,
|
|
||||||
0.1523f, 0.2154f, 0.2800f, 0.3500f // Approximate tail values
|
|
||||||
};
|
|
||||||
|
|
||||||
// Fast Walsh-Hadamard Transform (In-place)
|
|
||||||
void fwht(float* a, int n) {
|
|
||||||
for (int h = 1; h < n; h <<= 1) {
|
|
||||||
for (int i = 0; i < n; i += (h << 1)) {
|
|
||||||
for (int j = i; j < i + h; j++) {
|
|
||||||
float x = a[j];
|
|
||||||
float y = a[j + h];
|
|
||||||
a[j] = x + y;
|
|
||||||
a[j + h] = x - y;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Normalize
|
|
||||||
float scale = 1.0f / sqrtf((float)n);
|
|
||||||
for (int i = 0; i < n; i++) {
|
|
||||||
a[i] *= scale;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// PolarQuant Encode (CPU Reference)
|
|
||||||
void polar_quant_encode_turbo4(const float* src, uint8_t* dst, float* norm, int d) {
|
|
||||||
std::vector<float> rotated(src, src + d);
|
|
||||||
fwht(rotated.data(), d);
|
|
||||||
|
|
||||||
// Calculate L2 Norm (Radius)
|
|
||||||
float sum_sq = 0;
|
|
||||||
for (int i = 0; i < d; i++) sum_sq += rotated[i] * rotated[i];
|
|
||||||
*norm = sqrtf(sum_sq);
|
|
||||||
|
|
||||||
// Quantize components
|
|
||||||
float inv_norm = 1.0f / (*norm + 1e-9f);
|
|
||||||
for (int i = 0; i < d; i++) {
|
|
||||||
float val = rotated[i] * inv_norm;
|
|
||||||
|
|
||||||
// Simple nearest neighbor search in Lloyd-Max codebook
|
|
||||||
int best_idx = 0;
|
|
||||||
float min_dist = fabsf(val - turbo4_centroids[0]);
|
|
||||||
for (int j = 1; j < 16; j++) {
|
|
||||||
float dist = fabsf(val - turbo4_centroids[j]);
|
|
||||||
if (dist < min_dist) {
|
|
||||||
min_dist = dist;
|
|
||||||
best_idx = j;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pack 4-bit indices
|
|
||||||
if (i % 2 == 0) {
|
|
||||||
dst[i / 2] = (uint8_t)best_idx;
|
|
||||||
} else {
|
|
||||||
dst[i / 2] |= (uint8_t)(best_idx << 4);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// PolarQuant Decode (CPU Reference)
|
|
||||||
void polar_quant_decode_turbo4(const uint8_t* src, float* dst, float norm, int d) {
|
|
||||||
for (int i = 0; i < d; i++) {
|
|
||||||
int idx = (i % 2 == 0) ? (src[i / 2] & 0x0F) : (src[i / 2] >> 4);
|
|
||||||
dst[i] = turbo4_centroids[idx] * norm;
|
|
||||||
}
|
|
||||||
// Inverse WHT is same as Forward WHT for orthogonal matrices
|
|
||||||
fwht(dst, d);
|
|
||||||
}
|
|
||||||
@@ -1,27 +0,0 @@
|
|||||||
#ifndef LLAMA_TURBO_H
|
|
||||||
#define LLAMA_TURBO_H
|
|
||||||
|
|
||||||
#include <cstdint>
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// PolarQuant Turbo4 (4-bit)
|
|
||||||
// d: dimension (must be power of 2, e.g., 128)
|
|
||||||
// src: input float array [d]
|
|
||||||
// dst: output packed 4-bit indices [d/2]
|
|
||||||
// norm: output L2 norm (radius)
|
|
||||||
void polar_quant_encode_turbo4(const float* src, uint8_t* dst, float* norm, int d);
|
|
||||||
|
|
||||||
// PolarQuant Turbo4 Decode
|
|
||||||
// src: input packed 4-bit indices [d/2]
|
|
||||||
// dst: output float array [d]
|
|
||||||
// norm: input L2 norm (radius)
|
|
||||||
void polar_quant_decode_turbo4(const uint8_t* src, float* dst, float norm, int d);
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif // LLAMA_TURBO_H
|
|
||||||
Reference in New Issue
Block a user