Compare commits

..

8 Commits

Author SHA1 Message Date
ec6c1faa89 test: Fix C tests for sanitizer compatibility (#71)
Some checks failed
Build & Test with Sanitizers / Python Tests (pull_request) Failing after 24s
Build & Test with Sanitizers / C Build (Normal) (pull_request) Successful in 16s
Build & Test with Sanitizers / C Build (AddressSanitizer) (pull_request) Successful in 26s
Build & Test with Sanitizers / C Build (UBSan) (pull_request) Successful in 31s
Build & Test with Sanitizers / Smoke Test (pull_request) Successful in 27s
Smoke Test / smoke (pull_request) Successful in 23s
2026-04-15 03:07:33 +00:00
2bc47218f2 ci: Add CI workflow with sanitizer jobs (#71) 2026-04-15 03:07:29 +00:00
eaff2eea0c build: Add Makefile with sanitizer support (#71) 2026-04-15 03:07:26 +00:00
7a7ce0e652 burn: add long-session quality test (Issue #12) (#39)
All checks were successful
Smoke Test / smoke (push) Successful in 11s
Squash merge: add long-session quality test (closes #12)
2026-04-13 19:59:22 +00:00
9224a0162b Merge pull request 'fix: repair smoke test — exclude llama-cpp-fork build artifacts' (#38) from ci/fix-smoke-test into main
All checks were successful
Smoke Test / smoke (push) Successful in 6s
2026-04-13 19:53:38 +00:00
Alexander Whitestone
f4ceac76ce fix: repair smoke test — exclude llama-cpp-fork build artifacts
All checks were successful
Smoke Test / smoke (pull_request) Successful in 5s
1. YAML parse: CMakeConfigureLog.yaml has multiple documents
2. JSON parse: tsconfig.json and pyrightconfig.json use JSON5
   comments (not valid for Python's json.tool)
3. Also fixed: json.tool can't handle multiple files via xargs;
   switched to while-read loop
Excluded llama-cpp-fork/ from all parse checks and secret scan.
2026-04-13 10:22:13 -04:00
ab4020cca0 feat: multi-backend benchmark suite with TTFT + memory tracking (#37)
Some checks failed
Smoke Test / smoke (push) Failing after 4s
Auto-merged by Timmy overnight cycle
2026-04-13 14:05:17 +00:00
383e1fab2e fix: consolidate project reports and cleanup muda
Some checks failed
Smoke Test / smoke (push) Failing after 4s
Merge PR #36: fix: consolidate project reports and cleanup muda
2026-04-13 03:00:10 +00:00
6 changed files with 1101 additions and 65 deletions

73
.gitea/workflows/ci.yml Normal file
View File

@@ -0,0 +1,73 @@
name: Build & Test with Sanitizers
on:
pull_request:
push:
branches: [main]
jobs:
python-tests:
name: Python Tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install dependencies
run: pip install pytest
- name: Run Python tests
run: pytest tests/test_polar_quant.py -v
c-build-normal:
name: C Build (Normal)
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Build
run: make all
- name: Run tests
run: make test
c-build-asan:
name: C Build (AddressSanitizer)
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Build with ASan
run: make asan
- name: Run tests with ASan
run: |
export ASAN_OPTIONS=detect_leaks=1:halt_on_error=1
make test-asan
c-build-ubsan:
name: C Build (UBSan)
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Build with UBSan
run: make ubsan
- name: Run tests with UBSan
run: |
export UBSAN_OPTIONS=halt_on_error=1:print_stacktrace=1
make test-ubsan
smoke:
name: Smoke Test
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Parse check
run: |
find . -name '*.yml' -o -name '*.yaml' | grep -v .gitea | grep -v llama-cpp-fork | xargs -r python3 -c "import sys,yaml; [yaml.safe_load(open(f)) for f in sys.argv[1:]]"
find . -name '*.json' | grep -v llama-cpp-fork | while read f; do python3 -m json.tool "$f" > /dev/null || exit 1; done
find . -name '*.py' | grep -v llama-cpp-fork | xargs -r python3 -m py_compile
find . -name '*.sh' | xargs -r bash -n
echo "PASS: All files parse"
- name: Secret scan
run: |
if grep -rE 'sk-or-|sk-ant-|ghp_|AKIA' . --include='*.yml' --include='*.py' --include='*.sh' 2>/dev/null | grep -v .gitea | grep -v llama-cpp-fork; then exit 1; fi
echo "PASS: No secrets"

View File

@@ -13,12 +13,12 @@ jobs:
python-version: '3.11'
- name: Parse check
run: |
find . -name '*.yml' -o -name '*.yaml' | grep -v .gitea | xargs -r python3 -c "import sys,yaml; [yaml.safe_load(open(f)) for f in sys.argv[1:]]"
find . -name '*.json' | xargs -r python3 -m json.tool > /dev/null
find . -name '*.py' | xargs -r python3 -m py_compile
find . -name '*.yml' -o -name '*.yaml' | grep -v .gitea | grep -v llama-cpp-fork | xargs -r python3 -c "import sys,yaml; [yaml.safe_load(open(f)) for f in sys.argv[1:]]"
find . -name '*.json' | grep -v llama-cpp-fork | while read f; do python3 -m json.tool "$f" > /dev/null || exit 1; done
find . -name '*.py' | grep -v llama-cpp-fork | xargs -r python3 -m py_compile
find . -name '*.sh' | xargs -r bash -n
echo "PASS: All files parse"
- name: Secret scan
run: |
if grep -rE 'sk-or-|sk-ant-|ghp_|AKIA' . --include='*.yml' --include='*.py' --include='*.sh' 2>/dev/null | grep -v .gitea; then exit 1; fi
if grep -rE 'sk-or-|sk-ant-|ghp_|AKIA' . --include='*.yml' --include='*.py' --include='*.sh' 2>/dev/null | grep -v .gitea | grep -v llama-cpp-fork; then exit 1; fi
echo "PASS: No secrets"

53
Makefile Normal file
View File

@@ -0,0 +1,53 @@
# TurboQuant Build System
# Supports: normal, debug, sanitizer builds
CC = g++
CFLAGS = -Wall -Wextra -O2 -std=c++11 -std=c++11
CFLAGS_DEBUG = -Wall -Wextra -O0 -g -std=c++11 -std=c++11
CFLAGS_ASAN = -Wall -Wextra -O1 -g -std=c++11 -fsanitize=address -fsanitize=undefined -fno-omit-frame-pointer
LDFLAGS = -lm
LDFLAGS_ASAN = -lm -fsanitize=address -fsanitize=undefined
SRCS = llama-turbo.cpp
OBJS = $(SRCS:.cpp=.o)
TEST_SRC = tests/test_polar_quant.c
TEST_BIN = test_polar_quant
.PHONY: all clean test test-asan test-ubsan
all: $(TEST_BIN)
# Normal build
$(TEST_BIN): $(TEST_SRC) $(SRCS)
$(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS)
# Debug build
debug: $(TEST_SRC) $(SRCS)
$(CC) $(CFLAGS_DEBUG) -o $(TEST_BIN)_debug $^ $(LDFLAGS)
# AddressSanitizer build
asan: $(TEST_SRC) $(SRCS)
$(CC) $(CFLAGS_ASAN) -o $(TEST_BIN)_asan $^ $(LDFLAGS_ASAN)
# UBSan only build
ubsan: $(TEST_SRC) $(SRCS)
$(CC) -Wall -Wextra -O1 -g -fsanitize=undefined -fno-omit-frame-pointer \
-o $(TEST_BIN)_ubsan $^ $(LDFLAGS)
# Run tests
test: $(TEST_BIN)
./$(TEST_BIN)
# Run tests with AddressSanitizer
test-asan: asan
./$(TEST_BIN)_asan
# Run tests with UBSan
test-ubsan: ubsan
./$(TEST_BIN)_ubsan
# Run all sanitizer tests
test-sanitizers: test-asan test-ubsan
clean:
rm -f $(TEST_BIN) $(TEST_BIN)_debug $(TEST_BIN)_asan $(TEST_BIN)_ubsan $(OBJS)

View File

@@ -1,75 +1,227 @@
#!/usr/bin/env python3
"""
TurboQuant Benchmarking Suite — Multi-Backend (Issue #29)
Supports Ollama and llama-server backends with KV cache type configuration.
Measures: TTFT, tokens/sec, latency, peak memory.
Usage:
# Ollama (default)
python3 benchmarks/run_benchmarks.py --backend ollama --model llama3
# llama-server with turbo4 KV
python3 benchmarks/run_benchmarks.py --backend llama-server \
--url http://localhost:11434 --model qwen3.5 --kv-type turbo4
"""
import argparse
import json
import time
import requests
import os
from typing import List, Dict
import re
import subprocess
import sys
import time
from datetime import datetime, timezone
from typing import List, Dict, Optional
# ═══════════════════════════════════════════
# TURBOQUANT BENCHMARKING SUITE (Issue #16)
# ═══════════════════════════════════════════
# This script runs a standardized set of prompts against the local inference
# engine (Ollama) and logs the results. This prevents cherry-picking and
# provides an objective baseline for quality comparisons.
import requests
OLLAMA_URL = "http://localhost:11434/api/generate"
PROMPTS_FILE = "benchmarks/prompts.json"
RESULTS_FILE = f"benchmarks/results_{int(time.time())}.json"
def run_benchmark(model: str = "llama3"):
"""Run the benchmark suite for a specific model."""
if not os.path.exists(PROMPTS_FILE):
print(f"Error: {PROMPTS_FILE} not found.")
return
def get_peak_memory_mb() -> float:
"""Get peak RSS of current process in MB (macOS/Linux)."""
try:
if sys.platform == "darwin":
result = subprocess.run(["ps", "-o", "rss=", "-p", str(os.getpid())],
capture_output=True, text=True)
return int(result.stdout.strip()) / 1024
else:
with open(f"/proc/{os.getpid()}/status") as f:
for line in f:
if line.startswith("VmHWM:"):
return int(line.split()[1]) / 1024
except Exception:
pass
return 0.0
with open(PROMPTS_FILE, 'r') as f:
def run_ollama(prompt: str, model: str, url: str, timeout: int = 120) -> dict:
"""Run a prompt against Ollama /api/generate."""
api_url = f"{url.rstrip('/')}/api/generate"
start = time.time()
ttft = None
tokens_per_sec = 0.0
try:
resp = requests.post(api_url, json={
"model": model,
"prompt": prompt,
"stream": False,
"options": {"num_predict": 512}
}, timeout=timeout)
elapsed = time.time() - start
resp.raise_for_status()
data = resp.json()
response_text = data.get("response", "")
eval_count = data.get("eval_count", 0)
eval_duration_ns = data.get("eval_duration", 0)
prompt_eval_ns = data.get("prompt_eval_duration", 0)
if eval_duration_ns > 0:
tokens_per_sec = eval_count / (eval_duration_ns / 1e9)
if prompt_eval_ns > 0:
ttft = prompt_eval_ns / 1e9
return {
"response": response_text,
"latency_s": round(elapsed, 3),
"ttft_s": round(ttft, 3) if ttft else None,
"tokens_per_sec": round(tokens_per_sec, 2),
"eval_count": eval_count,
"status": "success"
}
except Exception as e:
return {"status": "failed", "error": str(e), "latency_s": round(time.time() - start, 3)}
def run_llama_server(prompt: str, model: str, url: str, kv_type: str = "f16",
timeout: int = 120) -> dict:
"""Run a prompt against llama-server OpenAI-compatible API."""
api_url = f"{url.rstrip('/')}/v1/chat/completions"
start = time.time()
ttft = None
tokens_per_sec = 0.0
try:
resp = requests.post(api_url, json={
"model": model,
"messages": [{"role": "user", "content": prompt}],
"max_tokens": 512,
"stream": False
}, timeout=timeout)
elapsed = time.time() - start
resp.raise_for_status()
data = resp.json()
response_text = data.get("choices", [{}])[0].get("message", {}).get("content", "")
usage = data.get("usage", {})
completion_tokens = usage.get("completion_tokens", 0)
prompt_tokens = usage.get("prompt_tokens", 0)
# llama-server includes timing in x_* headers or we estimate
if elapsed > 0 and completion_tokens > 0:
# Subtract estimated prompt eval time (rough)
tokens_per_sec = completion_tokens / max(elapsed - 0.1, 0.01)
return {
"response": response_text,
"latency_s": round(elapsed, 3),
"ttft_s": round(ttft, 3) if ttft else None,
"tokens_per_sec": round(tokens_per_sec, 2),
"completion_tokens": completion_tokens,
"prompt_tokens": prompt_tokens,
"kv_type": kv_type,
"status": "success"
}
except Exception as e:
return {"status": "failed", "error": str(e), "latency_s": round(time.time() - start, 3)}
def run_benchmark_suite(backend: str, model: str, url: str, kv_type: str,
prompts_file: str, output_file: str, timeout: int = 120):
"""Run the full benchmark suite."""
if not os.path.exists(prompts_file):
print(f"ERROR: {prompts_file} not found")
sys.exit(1)
with open(prompts_file) as f:
prompts = json.load(f)
run_fn = run_ollama if backend == "ollama" else run_llama_server
mem_before = get_peak_memory_mb()
results = []
print(f"Starting benchmark for model: {model}")
print(f"Saving results to: {RESULTS_FILE}")
print(f"\n{'='*60}")
print(f"Backend: {backend} | Model: {model} | KV: {kv_type}")
print(f"URL: {url}")
print(f"Prompts: {len(prompts)} | Output: {output_file}")
print(f"{'='*60}\n")
for item in prompts:
print(f"Running prompt: {item['id']}...")
start_time = time.time()
try:
response = requests.post(OLLAMA_URL, json={
"model": model,
"prompt": item['prompt'],
"stream": False
}, timeout=60)
response.raise_for_status()
data = response.json()
end_time = time.time()
results.append({
"id": item['id'],
"prompt": item['prompt'],
"response": data.get("response"),
"latency": end_time - start_time,
"tokens_per_second": data.get("eval_count", 0) / (data.get("eval_duration", 1) / 1e9) if data.get("eval_duration") else 0,
"status": "success"
})
except Exception as e:
print(f"Error running prompt {item['id']}: {e}")
results.append({
"id": item['id'],
"prompt": item['prompt'],
"error": str(e),
"status": "failed"
})
pid = item.get("id", item.get("category", "unknown"))
prompt = item["prompt"]
print(f"[{pid}] Running...", end=" ", flush=True)
extra = {"kv_type": kv_type} if backend == "llama-server" else {}
result = run_fn(prompt, model, url, timeout=timeout)
result["id"] = pid
result["prompt_preview"] = prompt[:120]
result.update(extra)
status = "" if result["status"] == "success" else ""
tps = result.get("tokens_per_sec", 0)
lat = result.get("latency_s", 0)
print(f"{status} {tps:.1f} tok/s, {lat:.2f}s")
results.append(result)
mem_after = get_peak_memory_mb()
suite = {
"timestamp": datetime.now(timezone.utc).isoformat(),
"backend": backend,
"model": model,
"kv_type": kv_type,
"url": url,
"prompts_file": prompts_file,
"memory_mb": round(max(mem_before, mem_after), 1),
"results": results,
"summary": {
"total": len(results),
"success": sum(1 for r in results if r["status"] == "success"),
"failed": sum(1 for r in results if r["status"] == "failed"),
"avg_tok_per_sec": round(
sum(r.get("tokens_per_sec", 0) for r in results if r["status"] == "success")
/ max(sum(1 for r in results if r["status"] == "success"), 1), 2
),
"avg_latency_s": round(
sum(r.get("latency_s", 0) for r in results if r["status"] == "success")
/ max(sum(1 for r in results if r["status"] == "success"), 1), 3
),
}
}
os.makedirs(os.path.dirname(output_file) or ".", exist_ok=True)
with open(output_file, "w") as f:
json.dump(suite, f, indent=2)
s = suite["summary"]
print(f"\n{'='*60}")
print(f"RESULTS: {s['success']}/{s['total']} success | "
f"Avg {s['avg_tok_per_sec']:.1f} tok/s | "
f"Avg {s['avg_latency_s']:.2f}s latency")
print(f"{'='*60}")
print(f"Saved to {output_file}")
def main():
parser = argparse.ArgumentParser(description="TurboQuant Benchmark Suite")
parser.add_argument("--backend", choices=["ollama", "llama-server"], default="ollama")
parser.add_argument("--model", required=True, help="Model name")
parser.add_argument("--url", default="http://localhost:11434", help="Backend URL")
parser.add_argument("--kv-type", default="f16", help="KV cache type (llama-server only)")
parser.add_argument("--prompts", default="benchmarks/prompts.json", help="Prompts file")
parser.add_argument("--output", default=None, help="Output file (auto-generated if omitted)")
parser.add_argument("--timeout", type=int, default=120, help="Per-prompt timeout (s)")
args = parser.parse_args()
if args.output is None:
ts = int(time.time())
args.output = f"benchmarks/results_{args.backend}_{args.kv_type}_{ts}.json"
run_benchmark_suite(args.backend, args.model, args.url, args.kv_type,
args.prompts, args.output, args.timeout)
# Save results
with open(RESULTS_FILE, 'w') as f:
json.dump({
"model": model,
"timestamp": time.time(),
"results": results
}, f, indent=2)
print("Benchmark complete.")
if __name__ == "__main__":
# Default to llama3 for testing
run_benchmark("llama3")
main()

View File

@@ -0,0 +1,495 @@
#!/usr/bin/env python3
"""
TurboQuant Long-Session Quality Test (Issue #12)
Runs a 50-turn multi-step reasoning conversation to detect quality degradation
under sustained context pressure. Compares TurboQuant KV vs FP16 KV baseline.
Conversation flow (repeating cycle):
turns 1-10: code generation
turns 11-20: debugging (introduce bugs, ask to fix)
turns 21-30: refactoring (improve structure)
turns 31-40: testing (write tests, verify)
turns 41-50: iteration (modify and extend)
Usage:
# Ollama backend (default)
python3 benchmarks/run_long_session.py \\
--backend ollama --model llama3 --turns 50
# llama-server backend with KV type
python3 benchmarks/run_long_session.py \\
--backend llama-server --url http://localhost:8080 \\
--model qwen3.5 --kv-type turbo4 --turns 50
# Compare two runs
python3 benchmarks/run_long_session.py --compare run_turbo4.json run_fp16.json
Acceptance Criteria (Issue #12):
- 50-turn conversation on both TurboQuant and FP16
- Quality comparison documented
- Degradation flagged with turn number where it appears
"""
import argparse
import json
import os
import re
import sys
import time
import hashlib
from datetime import datetime, timezone
from pathlib import Path
from typing import Optional
try:
import requests
except ImportError:
requests = None
# ── Conversation Prompts ───────────────────────────────────────────────
CONVERSATION_CYCLE = [
# Phase 1: Code Generation (turns 1-10)
{
"phase": "code_gen",
"turns": [
"Write a Python class called RateLimiter that implements a token bucket algorithm. It should support: add_tokens(n), consume(n) -> bool, and a configurable rate and burst capacity.",
"Add thread-safety to the RateLimiter class using a lock. Make sure consume() blocks briefly if tokens are unavailable rather than failing immediately.",
"Now add a method get_wait_time(n) that returns how many seconds until n tokens will be available without blocking.",
"Write a companion class RateLimiterGroup that manages multiple RateLimiters keyed by string identifier, with a get_or_create(id, rate, burst) method.",
"Add a decorator @rate_limited(limiter_group, key_fn) that can be applied to async functions to rate-limit them.",
"Add serialization support — export_state() returns JSON-serializable dict, import_state() restores from dict. Include timestamps.",
"Add a Prometheus-compatible metrics exporter that tracks: tokens_consumed_total, tokens_rejected_total, wait_time_seconds histogram.",
"Write a configuration loader that reads rate limiter configs from YAML with validation and sensible defaults.",
"Add an LRU eviction policy for the RateLimiterGroup with configurable max_entries and idle_timeout_seconds.",
"Wrap everything into a pip-installable package structure with pyproject.toml, __init__.py exports, and a CLI entry point.",
]
},
# Phase 2: Debugging (turns 11-20)
{
"phase": "debug",
"turns": [
"I'm getting a race condition in consume() when two threads call it simultaneously with exactly the tokens needed. The lock doesn't seem to help. Can you trace through the logic and find the bug?",
"The get_wait_time() method returns negative values sometimes. Here's the traceback: ... Can you identify what's wrong?",
"RateLimiterGroup.get_or_create() sometimes returns a limiter with wrong parameters when called concurrently. Explain the potential issue.",
"The decorator @rate_limited doesn't properly propagate exceptions — they're being swallowed. Fix the error handling.",
"export_state() produces corrupted JSON when called while tokens are being consumed. How should we fix the serialization?",
"The Prometheus histogram for wait_time_seconds has incorrect bucket boundaries. Review the histogram configuration.",
"The YAML config loader doesn't handle missing optional fields gracefully — it raises KeyError instead of using defaults.",
"LRU eviction is evicting active limiters. The idle_timeout calculation seems wrong. Debug the eviction logic.",
"The CLI entry point crashes with a specific YAML config. Here's the config and error: ... What's the root cause?",
"Memory leak detected in RateLimiterGroup when creating/evicting many limiters rapidly. Where's the leak?",
]
},
# Phase 3: Refactoring (turns 21-30)
{
"phase": "refactor",
"turns": [
"Refactor RateLimiter to use a protocol/interface pattern so we can swap token bucket for leaky bucket or fixed window.",
"Extract the locking strategy into a separate mixin or context manager that can be swapped between threading.Lock, asyncio.Lock, and no-lock.",
"Refactor the metrics exporter to use a plugin architecture — different backends (Prometheus, StatsD, logging) should be pluggable.",
"Convert the YAML config loader to use a typed config dataclass with validation via pydantic or attrs.",
"Refactor RateLimiterGroup to use a generic container with type hints, making the key type configurable (not just str).",
"Extract the decorator into a separate module and make it work with both sync and async functions transparently.",
"Refactor the serialization to use a versioned schema so import_state() can handle older format versions.",
"Split the package into core (rate limiting), exporters (metrics), and config (YAML) subpackages.",
"Refactor the CLI to use click or typer with subcommands: serve, validate-config, export-state, import-state.",
"Apply the repository pattern to RateLimiterGroup — separate storage (in-memory, Redis, SQLite) from the limiter logic.",
]
},
# Phase 4: Testing (turns 31-40)
{
"phase": "testing",
"turns": [
"Write comprehensive unit tests for RateLimiter covering: basic consume, burst, refill timing, edge cases (zero tokens, negative values).",
"Write concurrency tests that hammer consume() with 100 threads and verify no tokens are double-counted.",
"Write tests for get_wait_time() including edge cases: already available, partial availability, and exact timing.",
"Write integration tests for RateLimiterGroup: concurrent create, LRU eviction under load, state consistency.",
"Write tests for the @rate_limited decorator: correct rate limiting, exception propagation, async/sync compatibility.",
"Write property-based tests using hypothesis: token conservation, monotonicity of wait times, idempotent serialization round-trips.",
"Write tests for the YAML config loader: valid configs, invalid schemas, missing fields, type coercion errors.",
"Write benchmark tests that measure throughput (operations/sec) and memory usage under various load patterns.",
"Write end-to-end tests simulating a real API server with multiple endpoints sharing a rate limiter group.",
"Write chaos tests: random delays, simulated clock skew, forced lock contention, and verify system stability.",
]
},
# Phase 5: Iteration (turns 41-50)
{
"phase": "iteration",
"turns": [
"Add support for weighted token buckets where different operations consume different amounts.",
"Implement a sliding window rate limiter as an alternative algorithm and add it to the protocol.",
"Add a REST API using FastAPI that exposes the rate limiter group with OpenAPI docs.",
"Add WebSocket support for real-time rate limit status streaming to clients.",
"Implement distributed rate limiting using Redis with Lua scripts for atomic operations.",
"Add a circuit breaker pattern integration — when a rate limit is consistently hit, auto-open the circuit.",
"Implement adaptive rate limiting that adjusts limits based on system load (CPU, memory).",
"Add request priority queues so high-priority requests can preempt low-priority ones when near limits.",
"Implement rate limit quotas with time windows (daily, weekly, monthly) in addition to per-second rates.",
"Write a migration guide and changelog for v2.0 with all the new features and breaking changes.",
]
},
]
# ── Quality Metrics ────────────────────────────────────────────────────
def compute_quality_metrics(response: str, prompt: str, turn: int, phase: str) -> dict:
"""Compute quality signals for a single turn response."""
metrics = {
"turn": turn,
"phase": phase,
"response_length": len(response),
"line_count": response.count("\n") + 1,
}
# Coherence: does response contain code-like content when expected?
code_indicators = ["def ", "class ", "import ", "return ", "if ", "for ", "while ", "{", "}", "=>"]
metrics["code_density"] = sum(1 for ind in code_indicators if ind in response) / len(code_indicators)
# Hallucination detection: references to non-existent earlier context
hallucination_phrases = [
"as mentioned earlier", "as we discussed", "like before",
"remember when", "from the previous turn", "as shown above",
"earlier in our conversation",
]
metrics["hallucinated_references"] = sum(
1 for p in hallucination_phrases if p.lower() in response.lower()
)
# Structural quality: does it have proper formatting?
metrics["has_headers"] = bool(re.search(r"^#{1,3}\s", response, re.MULTILINE))
metrics["has_code_blocks"] = response.count("```") >= 2
metrics["has_lists"] = bool(re.search(r"^[\-\*\d]\.\s", response, re.MULTILINE))
# Repetition detection: check for repeated sentences
sentences = [s.strip().lower() for s in re.split(r'[.!?]+', response) if len(s.strip()) > 20]
unique_sentences = set(sentences)
metrics["repetition_ratio"] = 1 - (len(unique_sentences) / max(len(sentences), 1))
# Attention to prompt: does it address the specific request?
prompt_keywords = set(re.findall(r'\b\w{4,}\b', prompt.lower()))
response_words = set(re.findall(r'\b\w{4,}\b', response.lower()))
metrics["prompt_relevance"] = len(prompt_keywords & response_words) / max(len(prompt_keywords), 1)
# Composite quality score (0-1)
metrics["quality_score"] = (
0.25 * min(metrics["code_density"] * 3, 1.0) +
0.20 * min(metrics["prompt_relevance"] * 2, 1.0) +
0.20 * (1.0 - min(metrics["repetition_ratio"] * 5, 1.0)) +
0.15 * (1.0 if metrics["has_code_blocks"] else 0.5) +
0.10 * (1.0 - min(metrics["hallucinated_references"] * 0.3, 1.0)) +
0.10 * (1.0 if metrics["has_lists"] else 0.7)
)
return metrics
def detect_degradation(turn_metrics: list, window: int = 5, threshold: float = 0.15) -> list:
"""Detect quality degradation by comparing rolling windows."""
alerts = []
for i in range(window, len(turn_metrics)):
recent = [turn_metrics[j]["quality_score"] for j in range(i - window, i)]
current = turn_metrics[i]["quality_score"]
avg_recent = sum(recent) / len(recent)
if avg_recent - current > threshold:
alerts.append({
"turn": turn_metrics[i]["turn"],
"phase": turn_metrics[i]["phase"],
"current_score": round(current, 3),
"window_avg": round(avg_recent, 3),
"drop": round(avg_recent - current, 3),
})
return alerts
# ── Backends ───────────────────────────────────────────────────────────
def query_ollama(prompt: str, model: str, url: str, history: list, timeout: int = 120) -> tuple:
"""Query Ollama with conversation history. Returns (response, stats)."""
messages = history + [{"role": "user", "content": prompt}]
api_url = f"{url.rstrip('/')}/api/chat"
start = time.time()
resp = requests.post(api_url, json={
"model": model,
"messages": messages,
"stream": False,
"options": {"num_ctx": 8192},
}, timeout=timeout)
elapsed = time.time() - start
data = resp.json()
content = data.get("message", {}).get("content", "")
eval_count = data.get("eval_count", 0)
eval_duration = data.get("eval_duration", 0) / 1e9 # ns to s
stats = {
"elapsed_s": round(elapsed, 2),
"tokens_generated": eval_count,
"tokens_per_s": round(eval_count / max(eval_duration, 0.001), 1),
"prompt_eval_count": data.get("prompt_eval_count", 0),
}
return content, stats
def query_llama_server(prompt: str, model: str, url: str, history: list,
kv_type: str = "f16", timeout: int = 120) -> tuple:
"""Query llama-server with conversation history and KV type."""
messages = history + [{"role": "user", "content": prompt}]
api_url = f"{url.rstrip('/')}/v1/chat/completions"
start = time.time()
resp = requests.post(api_url, json={
"model": model,
"messages": messages,
"temperature": 0.7,
"max_tokens": 2048,
}, headers={"Content-Type": "application/json"}, timeout=timeout)
elapsed = time.time() - start
data = resp.json()
content = data["choices"][0]["message"]["content"]
usage = data.get("usage", {})
stats = {
"elapsed_s": round(elapsed, 2),
"tokens_generated": usage.get("completion_tokens", 0),
"prompt_tokens": usage.get("prompt_tokens", 0),
"kv_type": kv_type,
}
return content, stats
# ── Main ───────────────────────────────────────────────────────────────
def run_session(args) -> dict:
"""Run the full 50-turn conversation session."""
total_turns = args.turns
history = []
turn_metrics = []
all_responses = []
# Flatten conversation cycle
all_prompts = []
for phase_data in CONVERSATION_CYCLE:
for turn_prompt in phase_data["turns"]:
all_prompts.append((phase_data["phase"], turn_prompt))
# Repeat cycle if needed
while len(all_prompts) < total_turns:
all_prompts.extend(all_prompts)
all_prompts = all_prompts[:total_turns]
query_fn = query_ollama if args.backend == "ollama" else query_llama_server
query_kwargs = {"model": args.model, "url": args.url}
if args.backend == "llama-server":
query_kwargs["kv_type"] = args.kv_type
print(f"\n{'='*70}")
print(f"Long-Session Quality Test — {total_turns} turns")
print(f"Backend: {args.backend} | Model: {args.model}")
if args.backend == "llama-server":
print(f"KV Type: {args.kv_type}")
print(f"{'='*70}\n")
for i, (phase, prompt) in enumerate(all_prompts):
turn_num = i + 1
print(f"[Turn {turn_num:2d}/{total_turns}] Phase: {phase:12s} | ", end="", flush=True)
try:
response, stats = query_fn(prompt, history=history, **query_kwargs, timeout=args.timeout)
except Exception as e:
print(f"ERROR: {e}")
response = f"[ERROR: {e}]"
stats = {"elapsed_s": 0, "tokens_generated": 0}
metrics = compute_quality_metrics(response, prompt, turn_num, phase)
metrics.update(stats)
turn_metrics.append(metrics)
all_responses.append({"turn": turn_num, "phase": phase, "prompt": prompt, "response": response})
# Update history (keep last N turns to manage context)
history.append({"role": "user", "content": prompt})
history.append({"role": "assistant", "content": response})
if len(history) > args.history_window * 2:
history = history[-(args.history_window * 2):]
print(f"score={metrics['quality_score']:.2f} | "
f"len={metrics['response_length']:4d} | "
f"{stats.get('tokens_per_s', '?')} tok/s | "
f"{stats['elapsed_s']:.1f}s")
if args.delay > 0:
time.sleep(args.delay)
# Detect degradation
degradation = detect_degradation(turn_metrics)
# Build report
report = {
"config": {
"backend": args.backend,
"model": args.model,
"kv_type": getattr(args, "kv_type", "f16"),
"total_turns": total_turns,
"history_window": args.history_window,
"timestamp": datetime.now(timezone.utc).isoformat(),
},
"turn_metrics": turn_metrics,
"degradation_alerts": degradation,
"summary": {
"avg_quality_score": round(sum(m["quality_score"] for m in turn_metrics) / len(turn_metrics), 3),
"min_quality_score": round(min(m["quality_score"] for m in turn_metrics), 3),
"max_quality_score": round(max(m["quality_score"] for m in turn_metrics), 3),
"total_degradation_events": len(degradation),
"first_degradation_turn": degradation[0]["turn"] if degradation else None,
"avg_response_length": round(sum(m["response_length"] for m in turn_metrics) / len(turn_metrics), 0),
"total_hallucinated_references": sum(m["hallucinated_references"] for m in turn_metrics),
"avg_repetition_ratio": round(sum(m["repetition_ratio"] for m in turn_metrics) / len(turn_metrics), 3),
},
"responses": all_responses if args.save_responses else [],
}
return report
def compare_reports(report_a: dict, report_b: dict) -> dict:
"""Compare two session reports and highlight differences."""
sa = report_a["summary"]
sb = report_b["summary"]
label_a = report_a["config"].get("kv_type", "run_a")
label_b = report_b["config"].get("kv_type", "run_b")
comparison = {
"labels": [label_a, label_b],
"avg_quality": [sa["avg_quality_score"], sb["avg_quality_score"]],
"min_quality": [sa["min_quality_score"], sb["min_quality_score"]],
"degradation_events": [sa["total_degradation_events"], sb["total_degradation_events"]],
"first_degradation": [sa["first_degradation_turn"], sb["first_degradation_turn"]],
"hallucinated_refs": [sa["total_hallucinated_references"], sb["total_hallucinated_references"]],
"repetition_ratio": [sa["avg_repetition_ratio"], sb["avg_repetition_ratio"]],
"quality_delta": round(sb["avg_quality_score"] - sa["avg_quality_score"], 3),
"verdict": "",
}
if comparison["quality_delta"] > 0.05:
comparison["verdict"] = f"{label_b} is BETTER by {comparison['quality_delta']:.3f}"
elif comparison["quality_delta"] < -0.05:
comparison["verdict"] = f"{label_a} is BETTER by {abs(comparison['quality_delta']):.3f}"
else:
comparison["verdict"] = "No significant quality difference"
return comparison
def print_report(report: dict):
"""Print a human-readable summary."""
s = report["summary"]
c = report["config"]
d = report["degradation_alerts"]
print(f"\n{'='*70}")
print(f"LONG-SESSION QUALITY REPORT")
print(f"{'='*70}")
print(f"Backend: {c['backend']} | Model: {c['model']} | KV: {c.get('kv_type', 'n/a')}")
print(f"Turns: {c['total_turns']} | History window: {c['history_window']}")
print(f"{''*70}")
print(f"Quality Score: avg={s['avg_quality_score']:.3f} min={s['min_quality_score']:.3f} max={s['max_quality_score']:.3f}")
print(f"Avg Response: {s['avg_response_length']:.0f} chars")
print(f"Repetition: {s['avg_repetition_ratio']:.3f}")
print(f"Hallucinations: {s['total_hallucinated_references']} total")
print(f"Degradations: {s['total_degradation_events']} events")
if s["first_degradation_turn"]:
print(f" ⚠ First degradation at turn {s['first_degradation_turn']}")
else:
print(f" ✓ No significant degradation detected")
if d:
print(f"\n{''*70}")
print(f"DEGRADATION ALERTS:")
for alert in d:
print(f" Turn {alert['turn']:2d} [{alert['phase']:10s}]: "
f"score={alert['current_score']:.3f} "
f"(window avg={alert['window_avg']:.3f}, "
f"drop={alert['drop']:.3f})")
# Per-phase averages
phases = {}
for m in report["turn_metrics"]:
phases.setdefault(m["phase"], []).append(m["quality_score"])
print(f"\n{''*70}")
print(f"PER-PHASE AVERAGES:")
for phase, scores in phases.items():
avg = sum(scores) / len(scores)
trend = "" if scores[-1] > scores[0] else "" if scores[-1] < scores[0] else ""
print(f" {phase:12s}: avg={avg:.3f} trend={trend} "
f"first={scores[0]:.3f} last={scores[-1]:.3f}")
print(f"{'='*70}\n")
def print_comparison(comp: dict):
"""Print comparison between two runs."""
print(f"\n{'='*70}")
print(f"QUALITY COMPARISON: {comp['labels'][0]} vs {comp['labels'][1]}")
print(f"{'='*70}")
print(f"{'Metric':<30s} {comp['labels'][0]:>15s} {comp['labels'][1]:>15s}")
print(f"{''*60}")
print(f"{'Avg Quality Score':<30s} {comp['avg_quality'][0]:>15.3f} {comp['avg_quality'][1]:>15.3f}")
print(f"{'Min Quality Score':<30s} {comp['min_quality'][0]:>15.3f} {comp['min_quality'][1]:>15.3f}")
print(f"{'Degradation Events':<30s} {comp['degradation_events'][0]:>15d} {comp['degradation_events'][1]:>15d}")
print(f"{'First Degradation Turn':<30s} {str(comp['first_degradation'][0] or 'none'):>15s} {str(comp['first_degradation'][1] or 'none'):>15s}")
print(f"{'Hallucinated References':<30s} {comp['hallucinated_refs'][0]:>15d} {comp['hallucinated_refs'][1]:>15d}")
print(f"{'Repetition Ratio':<30s} {comp['repetition_ratio'][0]:>15.3f} {comp['repetition_ratio'][1]:>15.3f}")
print(f"{''*60}")
print(f"Verdict: {comp['verdict']}")
print(f"{'='*70}\n")
def main():
parser = argparse.ArgumentParser(description="TurboQuant Long-Session Quality Test")
parser.add_argument("--backend", choices=["ollama", "llama-server"], default="ollama")
parser.add_argument("--model", default="llama3", help="Model name")
parser.add_argument("--url", default="http://localhost:11434", help="Backend URL")
parser.add_argument("--kv-type", default="f16", help="KV cache type (llama-server only)")
parser.add_argument("--turns", type=int, default=50, help="Number of conversation turns")
parser.add_argument("--history-window", type=int, default=20, help="Turns of history to keep")
parser.add_argument("--timeout", type=int, default=120, help="Per-turn timeout in seconds")
parser.add_argument("--delay", type=float, default=0.5, help="Delay between turns in seconds")
parser.add_argument("--output", "-o", help="Output JSON file path")
parser.add_argument("--save-responses", action="store_true", help="Include full responses in output")
parser.add_argument("--compare", nargs=2, metavar=("FILE_A", "FILE_B"),
help="Compare two previously saved run reports")
args = parser.parse_args()
# Compare mode
if args.compare:
with open(args.compare[0]) as f:
report_a = json.load(f)
with open(args.compare[1]) as f:
report_b = json.load(f)
comp = compare_reports(report_a, report_b)
print_comparison(comp)
return
# Run mode
if requests is None:
print("ERROR: 'requests' package required. Install with: pip install requests")
sys.exit(1)
report = run_session(args)
print_report(report)
# Save report
output_path = args.output or f"benchmarks/long_session_{args.kv_type}_{int(time.time())}.json"
os.makedirs(os.path.dirname(output_path) or ".", exist_ok=True)
with open(output_path, "w") as f:
json.dump(report, f, indent=2)
print(f"Report saved to: {output_path}")
if __name__ == "__main__":
main()

263
tests/test_polar_quant.c Normal file
View File

@@ -0,0 +1,263 @@
/*
* Unit tests for PolarQuant Turbo4
*
* Compile: gcc -o test_polar_quant test_polar_quant.c llama-turbo.cpp -lm
* Run: ./test_polar_quant
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <assert.h>
#include "../llama-turbo.h"
#define TEST_ASSERT(cond, msg) do { if (!(cond)) { fprintf(stderr, "FAIL: %s (line %d)\n", msg, __LINE__); failures++; } else { passes++; } } while(0)
static int passes = 0;
static int failures = 0;
// Test encode/decode roundtrip
void test_roundtrip() {
printf("Testing encode/decode roundtrip...\n");
const int d = 128;
float src[128];
float dst[128];
uint8_t packed[64];
float norm;
// Generate test data
for (int i = 0; i < d; i++) {
src[i] = sinf(i * 0.1f);
}
// Encode
polar_quant_encode_turbo4(src, packed, &norm, d);
// Decode
polar_quant_decode_turbo4(packed, dst, norm, d);
// Check reconstruction error
float orig_norm = 0;
float diff_norm = 0;
for (int i = 0; i < d; i++) {
orig_norm += src[i] * src[i];
float diff = src[i] - dst[i];
diff_norm += diff * diff;
}
orig_norm = sqrtf(orig_norm);
diff_norm = sqrtf(diff_norm);
float rel_error = diff_norm / (orig_norm + 1e-9f);
TEST_ASSERT(rel_error < 1.5f, "Roundtrip relative error too high");
// Check packed size
TEST_ASSERT(norm > 0, "Norm should be positive");
}
// Test zero vector
void test_zero_vector() {
printf("Testing zero vector...\n");
const int d = 128;
float src[128] = {0};
float dst[128];
uint8_t packed[64];
float norm;
polar_quant_encode_turbo4(src, packed, &norm, d);
polar_quant_decode_turbo4(packed, dst, norm, d);
// Zero vector: norm should be 0 or very small
TEST_ASSERT(norm < 0.1f, "Zero vector norm should be small");
}
// Test inner product preservation
void test_inner_product() {
printf("Testing inner product preservation...\n");
const int d = 128;
float q[128], k[128], k_recon[128];
uint8_t k_packed[64];
float k_norm;
// Generate test vectors
for (int i = 0; i < d; i++) {
q[i] = cosf(i * 0.1f);
k[i] = sinf(i * 0.15f);
}
// Original inner product
float orig_ip = 0;
for (int i = 0; i < d; i++) {
orig_ip += q[i] * k[i];
}
// Compress k
polar_quant_encode_turbo4(k, k_packed, &k_norm, d);
polar_quant_decode_turbo4(k_packed, k_recon, k_norm, d);
// Compressed inner product
float comp_ip = 0;
for (int i = 0; i < d; i++) {
comp_ip += q[i] * k_recon[i];
}
float rel_error = fabsf(orig_ip - comp_ip) / (fabsf(orig_ip) + 1e-9f);
TEST_ASSERT(rel_error < 5.0f, "Inner product preservation");
}
// Test WHT orthogonality
void test_wht_orthogonality() {
printf("Testing WHT orthogonality...\n");
const int d = 64;
float src[64], result[64];
for (int i = 0; i < d; i++) {
src[i] = (float)i;
result[i] = src[i];
}
// Compute norm before
float norm_before = 0;
for (int i = 0; i < d; i++) {
norm_before += src[i] * src[i];
}
norm_before = sqrtf(norm_before);
// Apply encode (which includes WHT)
uint8_t packed[32];
float enc_norm;
polar_quant_encode_turbo4(result, packed, &enc_norm, d);
// Decode (which includes inverse WHT)
float decoded[64];
polar_quant_decode_turbo4(packed, decoded, enc_norm, d);
// Compute norm after
float norm_after = 0;
for (int i = 0; i < d; i++) {
norm_after += decoded[i] * decoded[i];
}
norm_after = sqrtf(norm_after);
// Norms should be similar (within quantization error)
float ratio = norm_after / (norm_before + 1e-9f);
TEST_ASSERT(ratio > 0.3f && ratio < 3.0f, "Norm preservation through WHT");
}
// Test bit packing
void test_bit_packing() {
printf("Testing bit packing...\n");
const int d = 128;
uint8_t packed[64] = {0};
// Pack alternating 0 and 15 (max value)
for (int i = 0; i < d; i++) {
int idx = (i % 2 == 0) ? 0 : 15;
if (i % 2 == 0) {
packed[i / 2] = idx;
} else {
packed[i / 2] |= idx << 4;
}
}
// Unpack and verify
for (int i = 0; i < d; i++) {
int expected = (i % 2 == 0) ? 0 : 15;
int actual;
if (i % 2 == 0) {
actual = packed[i / 2] & 0x0F;
} else {
actual = packed[i / 2] >> 4;
}
char msg[64];
snprintf(msg, sizeof(msg), "Bit packing at index %d", i);
TEST_ASSERT(actual == expected, msg);
}
}
// Test various dimensions
void test_dimensions() {
printf("Testing various dimensions...\n");
int dims[] = {16, 32, 64, 128, 256};
int num_dims = sizeof(dims) / sizeof(dims[0]);
for (int d_idx = 0; d_idx < num_dims; d_idx++) {
int d = dims[d_idx];
float* src = (float*)malloc(d * sizeof(float));
float* dst = (float*)malloc(d * sizeof(float));
uint8_t* packed = (uint8_t*)malloc(d / 2);
float norm;
// Generate test data
for (int i = 0; i < d; i++) {
src[i] = sinf(i * 0.1f);
}
// Encode/decode
polar_quant_encode_turbo4(src, packed, &norm, d);
polar_quant_decode_turbo4(packed, dst, norm, d);
// Check basic sanity
float orig_energy = 0, recon_energy = 0;
for (int i = 0; i < d; i++) {
orig_energy += src[i] * src[i];
recon_energy += dst[i] * dst[i];
}
float ratio = recon_energy / (orig_energy + 1e-9f);
char msg[64];
snprintf(msg, sizeof(msg), "Dimension %d energy ratio", d);
TEST_ASSERT(ratio > 0.1f && ratio < 10.0f, msg);
free(src);
free(dst);
free(packed);
}
}
// Test memory bounds
void test_memory_bounds() {
printf("Testing memory bounds...\n");
// Test with max 4-bit value everywhere
const int d = 256;
float src[256];
for (int i = 0; i < d; i++) {
src[i] = 0.35f; // Near max centroid
}
uint8_t packed[128];
float norm;
// Should not crash
polar_quant_encode_turbo4(src, packed, &norm, d);
TEST_ASSERT(1, "Memory bounds check passed");
}
int main() {
printf("=== PolarQuant Turbo4 Unit Tests ===\n\n");
test_roundtrip();
test_zero_vector();
test_inner_product();
test_wht_orthogonality();
test_bit_packing();
test_dimensions();
test_memory_bounds();
printf("\n=== Results ===\n");
printf("Passed: %d\n", passes);
printf("Failed: %d\n", failures);
return failures > 0 ? 1 : 0;
}