Compare commits

..

1 Commits

Author SHA1 Message Date
Alexander Whitestone
450b40862a feat: multi-backend benchmark suite with TTFT + memory tracking (refs #29)
Some checks failed
Smoke Test / smoke (pull_request) Failing after 5s
2026-04-13 04:15:22 -04:00
4 changed files with 4 additions and 936 deletions

View File

@@ -13,12 +13,12 @@ jobs:
python-version: '3.11'
- name: Parse check
run: |
find . -name '*.yml' -o -name '*.yaml' | grep -v .gitea | grep -v llama-cpp-fork | xargs -r python3 -c "import sys,yaml; [yaml.safe_load(open(f)) for f in sys.argv[1:]]"
find . -name '*.json' | grep -v llama-cpp-fork | while read f; do python3 -m json.tool "$f" > /dev/null || exit 1; done
find . -name '*.py' | grep -v llama-cpp-fork | xargs -r python3 -m py_compile
find . -name '*.yml' -o -name '*.yaml' | grep -v .gitea | xargs -r python3 -c "import sys,yaml; [yaml.safe_load(open(f)) for f in sys.argv[1:]]"
find . -name '*.json' | xargs -r python3 -m json.tool > /dev/null
find . -name '*.py' | xargs -r python3 -m py_compile
find . -name '*.sh' | xargs -r bash -n
echo "PASS: All files parse"
- name: Secret scan
run: |
if grep -rE 'sk-or-|sk-ant-|ghp_|AKIA' . --include='*.yml' --include='*.py' --include='*.sh' 2>/dev/null | grep -v .gitea | grep -v llama-cpp-fork; then exit 1; fi
if grep -rE 'sk-or-|sk-ant-|ghp_|AKIA' . --include='*.yml' --include='*.py' --include='*.sh' 2>/dev/null | grep -v .gitea; then exit 1; fi
echo "PASS: No secrets"

View File

@@ -1,319 +0,0 @@
#!/usr/bin/env python3
"""
TurboQuant Constant-Time Benchmark — Issue #72
Benchmarks constant-time (side-channel resistant) vs original quantization.
Measures encode latency, decode latency, and memory bandwidth impact.
Usage:
python3 benchmarks/constant_time_benchmark.py --size 4096 --iterations 100
python3 benchmarks/constant_time_benchmark.py --json
"""
import argparse
import json
import os
import statistics
import sys
import time
from datetime import datetime, timezone
from pathlib import Path
from typing import Callable
# ---------------------------------------------------------------------------
# Quantization kernels (Python reference implementations)
# ---------------------------------------------------------------------------
import struct
import math
def quantize_fp16_to_q4_0_original(weights: list[float]) -> bytes:
"""Original quantization: FP16 → Q4_0 (block size 32).
Each block: 2 bytes scale (FP16) + 16 bytes quants (4-bit packed).
Non-constant-time: early exits, branching on zero detection.
"""
block_size = 32
n_blocks = len(weights) // block_size
output = bytearray()
for b in range(n_blocks):
block = weights[b * block_size:(b + 1) * block_size]
# Find absmax
absmax = 0.0
for w in block:
absmax = max(absmax, abs(w))
if absmax == 0.0:
# Early exit — branch prediction leak
output.extend(struct.pack('<e', 0.0))
output.extend(bytes(16))
continue
d = absmax / 7.0 # scale
id_val = 1.0 / d if d != 0 else 0.0 # Branch on zero
# Pack 4-bit quants
packed = bytearray(16)
for i in range(0, block_size, 2):
xi0 = int(round(block[i] * id_val)) + 8
xi1 = int(round(block[i + 1] * id_val)) if i + 1 < block_size else 8
xi0 = max(0, min(15, xi0))
xi1 = max(0, min(15, xi1))
packed[i // 2] = xi0 | (xi1 << 4)
output.extend(struct.pack('<e', d))
output.extend(packed)
return bytes(output)
def quantize_fp16_to_q4_0_constant_time(weights: list[float]) -> bytes:
"""Constant-time quantization: FP16 → Q4_0.
No early exits, no branches on data values. Same output as original
but timing does not leak information about weight distribution.
"""
block_size = 32
n_blocks = len(weights) // block_size
output = bytearray()
for b in range(n_blocks):
block = weights[b * block_size:(b + 1) * block_size]
# Find absmax — no early exit on zero
absmax = 0.0
for w in block:
absval = abs(w)
# Constant-time max: no branch, always compute both paths
absmax = absval if absval > absmax else absmax
# Constant-time scale computation — no branch on zero
d = absmax / 7.0
# Constant-time inverse: compute 1/d but guard against zero
d_nonzero = 1.0 if d != 0.0 else 0.0
safe_d = d if d != 0.0 else 1.0 # Avoid division by zero
id_val = (1.0 / safe_d) * d_nonzero
# Always compute quants (even when scale=0, producing all zeros)
packed = bytearray(16)
for i in range(0, block_size, 2):
xi0 = int(round(block[i] * id_val)) + 8
xi1 = int(round(block[i + 1] * id_val)) + 8 if i + 1 < block_size else 8
# Constant-time clamp: no branch
xi0 = max(0, min(15, xi0))
xi1 = max(0, min(15, xi1))
packed[i // 2] = xi0 | (xi1 << 4)
output.extend(struct.pack('<e', d))
output.extend(packed)
return bytes(output)
def dequantize_q4_0_original(data: bytes, n: int) -> list[float]:
"""Original dequantization: Q4_0 → FP32."""
block_size = 32
bytes_per_block = 18 # 2 scale + 16 quants
n_blocks = n // block_size
weights = []
for b in range(n_blocks):
offset = b * bytes_per_block
d = struct.unpack_from('<e', data, offset)[0]
quants = data[offset + 2:offset + 18]
for i in range(16):
byte_val = quants[i]
xi0 = (byte_val & 0x0F) - 8
xi1 = ((byte_val >> 4) & 0x0F) - 8
weights.append(xi0 * d)
if len(weights) < n:
weights.append(xi1 * d)
return weights[:n]
def dequantize_q4_0_constant_time(data: bytes, n: int) -> list[float]:
"""Constant-time dequantization: Q4_0 → FP32."""
block_size = 32
bytes_per_block = 18
n_blocks = n // block_size
weights = []
for b in range(n_blocks):
offset = b * bytes_per_block
d = struct.unpack_from('<e', data, offset)[0]
quants = data[offset + 2:offset + 18]
# Always process all 16 bytes, even if we've exceeded n
for i in range(16):
byte_val = quants[i]
xi0 = (byte_val & 0x0F) - 8
xi1 = ((byte_val >> 4) & 0x0F) - 8
if len(weights) < n:
weights.append(xi0 * d)
if len(weights) < n:
weights.append(xi1 * d)
return weights[:n]
# ---------------------------------------------------------------------------
# Benchmark harness
# ---------------------------------------------------------------------------
def benchmark(fn: Callable, args: tuple, iterations: int) -> dict:
"""Benchmark a function over N iterations."""
# Warmup
for _ in range(min(3, iterations)):
fn(*args)
latencies = []
for _ in range(iterations):
start = time.perf_counter()
fn(*args)
elapsed = time.perf_counter() - start
latencies.append(elapsed * 1000) # ms
return {
"iterations": iterations,
"mean_ms": round(statistics.mean(latencies), 4),
"median_ms": round(statistics.median(latencies), 4),
"std_ms": round(statistics.stdev(latencies) if len(latencies) > 1 else 0, 4),
"min_ms": round(min(latencies), 4),
"max_ms": round(max(latencies), 4),
"p95_ms": round(sorted(latencies)[int(len(latencies) * 0.95)], 4),
"p99_ms": round(sorted(latencies)[int(len(latencies) * 0.99)], 4),
}
def generate_weights(size: int) -> list[float]:
"""Generate test weights."""
import random
random.seed(42)
return [random.gauss(0, 1) for _ in range(size)]
def run_benchmarks(size: int, iterations: int) -> dict:
"""Run full benchmark suite."""
weights = generate_weights(size)
print(f"Benchmarking {size} weights x {iterations} iterations...", file=sys.stderr)
# Encode benchmarks
print(" Encode original...", file=sys.stderr)
encode_orig = benchmark(quantize_fp16_to_q4_0_original, (weights,), iterations)
print(" Encode constant-time...", file=sys.stderr)
encode_ct = benchmark(quantize_fp16_to_q4_0_constant_time, (weights,), iterations)
# Decode benchmarks
encoded_orig = quantize_fp16_to_q4_0_original(weights)
print(" Decode original...", file=sys.stderr)
decode_orig = benchmark(dequantize_q4_0_original, (encoded_orig, size), iterations)
encoded_ct = quantize_fp16_to_q4_0_constant_time(weights)
print(" Decode constant-time...", file=sys.stderr)
decode_ct = benchmark(dequantize_q4_0_constant_time, (encoded_ct, size), iterations)
# Correctness check
decoded_orig = dequantize_q4_0_original(encoded_orig, size)
decoded_ct = dequantize_q4_0_constant_time(encoded_ct, size)
max_diff = max(abs(a - b) for a, b in zip(decoded_orig, decoded_ct))
# Overhead analysis
encode_overhead = (encode_ct["mean_ms"] / max(encode_orig["mean_ms"], 0.001) - 1) * 100
decode_overhead = (decode_ct["mean_ms"] / max(decode_orig["mean_ms"], 0.001) - 1) * 100
return {
"generated_at": datetime.now(timezone.utc).isoformat(),
"config": {"weight_count": size, "iterations": iterations, "block_size": 32},
"encode": {"original": encode_orig, "constant_time": encode_ct},
"decode": {"original": decode_orig, "constant_time": decode_ct},
"correctness": {
"max_decode_diff": round(max_diff, 10),
"outputs_match": max_diff < 1e-6,
},
"overhead": {
"encode_pct": round(encode_overhead, 2),
"decode_pct": round(decode_overhead, 2),
},
"memory": {
"original_bytes": len(encoded_orig),
"constant_time_bytes": len(encoded_ct),
"compression_ratio": round(size * 4 / len(encoded_orig), 2),
},
}
def to_markdown(report: dict) -> str:
enc = report["encode"]
dec = report["decode"]
ov = report["overhead"]
mem = report["memory"]
cor = report["correctness"]
lines = [
"# Constant-Time Benchmark Report",
"",
f"Generated: {report['generated_at'][:16]}",
f"Config: {report['config']['weight_count']} weights, {report['config']['iterations']} iterations",
"",
"## Encode Latency",
"",
"| Impl | Mean (ms) | Median | P95 | P99 | Overhead |",
"|------|-----------|--------|-----|-----|----------|",
f"| Original | {enc['original']['mean_ms']:.2f} | {enc['original']['median_ms']:.2f} | {enc['original']['p95_ms']:.2f} | {enc['original']['p99_ms']:.2f} | baseline |",
f"| Constant-time | {enc['constant_time']['mean_ms']:.2f} | {enc['constant_time']['median_ms']:.2f} | {enc['constant_time']['p95_ms']:.2f} | {enc['constant_time']['p99_ms']:.2f} | +{ov['encode_pct']:.1f}% |",
"",
"## Decode Latency",
"",
"| Impl | Mean (ms) | Median | P95 | P99 | Overhead |",
"|------|-----------|--------|-----|-----|----------|",
f"| Original | {dec['original']['mean_ms']:.2f} | {dec['original']['median_ms']:.2f} | {dec['original']['p95_ms']:.2f} | {dec['original']['p99_ms']:.2f} | baseline |",
f"| Constant-time | {dec['constant_time']['mean_ms']:.2f} | {dec['constant_time']['median_ms']:.2f} | {dec['constant_time']['p95_ms']:.2f} | {dec['constant_time']['p99_ms']:.2f} | +{ov['decode_pct']:.1f}% |",
"",
"## Correctness",
"",
f"- Max decode difference: {cor['max_decode_diff']:.10f}",
f"- Outputs match: {'✅ Yes' if cor['outputs_match'] else '❌ No'}",
"",
"## Memory",
"",
f"- Compressed size: {mem['original_bytes']} bytes ({mem['compression_ratio']:.1f}x compression)",
f"- Constant-time size: {mem['constant_time_bytes']} bytes (same format)",
"",
"## Verdict",
"",
]
if ov['encode_pct'] < 10 and ov['decode_pct'] < 10:
lines.append("**Constant-time overhead is acceptable (<10%).** Safe for production.")
elif ov['encode_pct'] < 25 and ov['decode_pct'] < 25:
lines.append("**Constant-time overhead is moderate (10-25%).** Acceptible for security-sensitive deployments.")
else:
lines.append("**Constant-time overhead is significant (>25%).** Consider optimizing or using original for non-sensitive workloads.")
return "\n".join(lines)
def main():
parser = argparse.ArgumentParser(description="Constant-time benchmark")
parser.add_argument("--size", type=int, default=4096, help="Weight count")
parser.add_argument("--iterations", type=int, default=100, help="Iterations")
parser.add_argument("--json", action="store_true", help="JSON output")
args = parser.parse_args()
report = run_benchmarks(args.size, args.iterations)
if args.json:
print(json.dumps(report, indent=2))
else:
print(to_markdown(report))
if __name__ == "__main__":
main()

View File

@@ -1,495 +0,0 @@
#!/usr/bin/env python3
"""
TurboQuant Long-Session Quality Test (Issue #12)
Runs a 50-turn multi-step reasoning conversation to detect quality degradation
under sustained context pressure. Compares TurboQuant KV vs FP16 KV baseline.
Conversation flow (repeating cycle):
turns 1-10: code generation
turns 11-20: debugging (introduce bugs, ask to fix)
turns 21-30: refactoring (improve structure)
turns 31-40: testing (write tests, verify)
turns 41-50: iteration (modify and extend)
Usage:
# Ollama backend (default)
python3 benchmarks/run_long_session.py \\
--backend ollama --model llama3 --turns 50
# llama-server backend with KV type
python3 benchmarks/run_long_session.py \\
--backend llama-server --url http://localhost:8080 \\
--model qwen3.5 --kv-type turbo4 --turns 50
# Compare two runs
python3 benchmarks/run_long_session.py --compare run_turbo4.json run_fp16.json
Acceptance Criteria (Issue #12):
- 50-turn conversation on both TurboQuant and FP16
- Quality comparison documented
- Degradation flagged with turn number where it appears
"""
import argparse
import json
import os
import re
import sys
import time
import hashlib
from datetime import datetime, timezone
from pathlib import Path
from typing import Optional
try:
import requests
except ImportError:
requests = None
# ── Conversation Prompts ───────────────────────────────────────────────
CONVERSATION_CYCLE = [
# Phase 1: Code Generation (turns 1-10)
{
"phase": "code_gen",
"turns": [
"Write a Python class called RateLimiter that implements a token bucket algorithm. It should support: add_tokens(n), consume(n) -> bool, and a configurable rate and burst capacity.",
"Add thread-safety to the RateLimiter class using a lock. Make sure consume() blocks briefly if tokens are unavailable rather than failing immediately.",
"Now add a method get_wait_time(n) that returns how many seconds until n tokens will be available without blocking.",
"Write a companion class RateLimiterGroup that manages multiple RateLimiters keyed by string identifier, with a get_or_create(id, rate, burst) method.",
"Add a decorator @rate_limited(limiter_group, key_fn) that can be applied to async functions to rate-limit them.",
"Add serialization support — export_state() returns JSON-serializable dict, import_state() restores from dict. Include timestamps.",
"Add a Prometheus-compatible metrics exporter that tracks: tokens_consumed_total, tokens_rejected_total, wait_time_seconds histogram.",
"Write a configuration loader that reads rate limiter configs from YAML with validation and sensible defaults.",
"Add an LRU eviction policy for the RateLimiterGroup with configurable max_entries and idle_timeout_seconds.",
"Wrap everything into a pip-installable package structure with pyproject.toml, __init__.py exports, and a CLI entry point.",
]
},
# Phase 2: Debugging (turns 11-20)
{
"phase": "debug",
"turns": [
"I'm getting a race condition in consume() when two threads call it simultaneously with exactly the tokens needed. The lock doesn't seem to help. Can you trace through the logic and find the bug?",
"The get_wait_time() method returns negative values sometimes. Here's the traceback: ... Can you identify what's wrong?",
"RateLimiterGroup.get_or_create() sometimes returns a limiter with wrong parameters when called concurrently. Explain the potential issue.",
"The decorator @rate_limited doesn't properly propagate exceptions — they're being swallowed. Fix the error handling.",
"export_state() produces corrupted JSON when called while tokens are being consumed. How should we fix the serialization?",
"The Prometheus histogram for wait_time_seconds has incorrect bucket boundaries. Review the histogram configuration.",
"The YAML config loader doesn't handle missing optional fields gracefully — it raises KeyError instead of using defaults.",
"LRU eviction is evicting active limiters. The idle_timeout calculation seems wrong. Debug the eviction logic.",
"The CLI entry point crashes with a specific YAML config. Here's the config and error: ... What's the root cause?",
"Memory leak detected in RateLimiterGroup when creating/evicting many limiters rapidly. Where's the leak?",
]
},
# Phase 3: Refactoring (turns 21-30)
{
"phase": "refactor",
"turns": [
"Refactor RateLimiter to use a protocol/interface pattern so we can swap token bucket for leaky bucket or fixed window.",
"Extract the locking strategy into a separate mixin or context manager that can be swapped between threading.Lock, asyncio.Lock, and no-lock.",
"Refactor the metrics exporter to use a plugin architecture — different backends (Prometheus, StatsD, logging) should be pluggable.",
"Convert the YAML config loader to use a typed config dataclass with validation via pydantic or attrs.",
"Refactor RateLimiterGroup to use a generic container with type hints, making the key type configurable (not just str).",
"Extract the decorator into a separate module and make it work with both sync and async functions transparently.",
"Refactor the serialization to use a versioned schema so import_state() can handle older format versions.",
"Split the package into core (rate limiting), exporters (metrics), and config (YAML) subpackages.",
"Refactor the CLI to use click or typer with subcommands: serve, validate-config, export-state, import-state.",
"Apply the repository pattern to RateLimiterGroup — separate storage (in-memory, Redis, SQLite) from the limiter logic.",
]
},
# Phase 4: Testing (turns 31-40)
{
"phase": "testing",
"turns": [
"Write comprehensive unit tests for RateLimiter covering: basic consume, burst, refill timing, edge cases (zero tokens, negative values).",
"Write concurrency tests that hammer consume() with 100 threads and verify no tokens are double-counted.",
"Write tests for get_wait_time() including edge cases: already available, partial availability, and exact timing.",
"Write integration tests for RateLimiterGroup: concurrent create, LRU eviction under load, state consistency.",
"Write tests for the @rate_limited decorator: correct rate limiting, exception propagation, async/sync compatibility.",
"Write property-based tests using hypothesis: token conservation, monotonicity of wait times, idempotent serialization round-trips.",
"Write tests for the YAML config loader: valid configs, invalid schemas, missing fields, type coercion errors.",
"Write benchmark tests that measure throughput (operations/sec) and memory usage under various load patterns.",
"Write end-to-end tests simulating a real API server with multiple endpoints sharing a rate limiter group.",
"Write chaos tests: random delays, simulated clock skew, forced lock contention, and verify system stability.",
]
},
# Phase 5: Iteration (turns 41-50)
{
"phase": "iteration",
"turns": [
"Add support for weighted token buckets where different operations consume different amounts.",
"Implement a sliding window rate limiter as an alternative algorithm and add it to the protocol.",
"Add a REST API using FastAPI that exposes the rate limiter group with OpenAPI docs.",
"Add WebSocket support for real-time rate limit status streaming to clients.",
"Implement distributed rate limiting using Redis with Lua scripts for atomic operations.",
"Add a circuit breaker pattern integration — when a rate limit is consistently hit, auto-open the circuit.",
"Implement adaptive rate limiting that adjusts limits based on system load (CPU, memory).",
"Add request priority queues so high-priority requests can preempt low-priority ones when near limits.",
"Implement rate limit quotas with time windows (daily, weekly, monthly) in addition to per-second rates.",
"Write a migration guide and changelog for v2.0 with all the new features and breaking changes.",
]
},
]
# ── Quality Metrics ────────────────────────────────────────────────────
def compute_quality_metrics(response: str, prompt: str, turn: int, phase: str) -> dict:
"""Compute quality signals for a single turn response."""
metrics = {
"turn": turn,
"phase": phase,
"response_length": len(response),
"line_count": response.count("\n") + 1,
}
# Coherence: does response contain code-like content when expected?
code_indicators = ["def ", "class ", "import ", "return ", "if ", "for ", "while ", "{", "}", "=>"]
metrics["code_density"] = sum(1 for ind in code_indicators if ind in response) / len(code_indicators)
# Hallucination detection: references to non-existent earlier context
hallucination_phrases = [
"as mentioned earlier", "as we discussed", "like before",
"remember when", "from the previous turn", "as shown above",
"earlier in our conversation",
]
metrics["hallucinated_references"] = sum(
1 for p in hallucination_phrases if p.lower() in response.lower()
)
# Structural quality: does it have proper formatting?
metrics["has_headers"] = bool(re.search(r"^#{1,3}\s", response, re.MULTILINE))
metrics["has_code_blocks"] = response.count("```") >= 2
metrics["has_lists"] = bool(re.search(r"^[\-\*\d]\.\s", response, re.MULTILINE))
# Repetition detection: check for repeated sentences
sentences = [s.strip().lower() for s in re.split(r'[.!?]+', response) if len(s.strip()) > 20]
unique_sentences = set(sentences)
metrics["repetition_ratio"] = 1 - (len(unique_sentences) / max(len(sentences), 1))
# Attention to prompt: does it address the specific request?
prompt_keywords = set(re.findall(r'\b\w{4,}\b', prompt.lower()))
response_words = set(re.findall(r'\b\w{4,}\b', response.lower()))
metrics["prompt_relevance"] = len(prompt_keywords & response_words) / max(len(prompt_keywords), 1)
# Composite quality score (0-1)
metrics["quality_score"] = (
0.25 * min(metrics["code_density"] * 3, 1.0) +
0.20 * min(metrics["prompt_relevance"] * 2, 1.0) +
0.20 * (1.0 - min(metrics["repetition_ratio"] * 5, 1.0)) +
0.15 * (1.0 if metrics["has_code_blocks"] else 0.5) +
0.10 * (1.0 - min(metrics["hallucinated_references"] * 0.3, 1.0)) +
0.10 * (1.0 if metrics["has_lists"] else 0.7)
)
return metrics
def detect_degradation(turn_metrics: list, window: int = 5, threshold: float = 0.15) -> list:
"""Detect quality degradation by comparing rolling windows."""
alerts = []
for i in range(window, len(turn_metrics)):
recent = [turn_metrics[j]["quality_score"] for j in range(i - window, i)]
current = turn_metrics[i]["quality_score"]
avg_recent = sum(recent) / len(recent)
if avg_recent - current > threshold:
alerts.append({
"turn": turn_metrics[i]["turn"],
"phase": turn_metrics[i]["phase"],
"current_score": round(current, 3),
"window_avg": round(avg_recent, 3),
"drop": round(avg_recent - current, 3),
})
return alerts
# ── Backends ───────────────────────────────────────────────────────────
def query_ollama(prompt: str, model: str, url: str, history: list, timeout: int = 120) -> tuple:
"""Query Ollama with conversation history. Returns (response, stats)."""
messages = history + [{"role": "user", "content": prompt}]
api_url = f"{url.rstrip('/')}/api/chat"
start = time.time()
resp = requests.post(api_url, json={
"model": model,
"messages": messages,
"stream": False,
"options": {"num_ctx": 8192},
}, timeout=timeout)
elapsed = time.time() - start
data = resp.json()
content = data.get("message", {}).get("content", "")
eval_count = data.get("eval_count", 0)
eval_duration = data.get("eval_duration", 0) / 1e9 # ns to s
stats = {
"elapsed_s": round(elapsed, 2),
"tokens_generated": eval_count,
"tokens_per_s": round(eval_count / max(eval_duration, 0.001), 1),
"prompt_eval_count": data.get("prompt_eval_count", 0),
}
return content, stats
def query_llama_server(prompt: str, model: str, url: str, history: list,
kv_type: str = "f16", timeout: int = 120) -> tuple:
"""Query llama-server with conversation history and KV type."""
messages = history + [{"role": "user", "content": prompt}]
api_url = f"{url.rstrip('/')}/v1/chat/completions"
start = time.time()
resp = requests.post(api_url, json={
"model": model,
"messages": messages,
"temperature": 0.7,
"max_tokens": 2048,
}, headers={"Content-Type": "application/json"}, timeout=timeout)
elapsed = time.time() - start
data = resp.json()
content = data["choices"][0]["message"]["content"]
usage = data.get("usage", {})
stats = {
"elapsed_s": round(elapsed, 2),
"tokens_generated": usage.get("completion_tokens", 0),
"prompt_tokens": usage.get("prompt_tokens", 0),
"kv_type": kv_type,
}
return content, stats
# ── Main ───────────────────────────────────────────────────────────────
def run_session(args) -> dict:
"""Run the full 50-turn conversation session."""
total_turns = args.turns
history = []
turn_metrics = []
all_responses = []
# Flatten conversation cycle
all_prompts = []
for phase_data in CONVERSATION_CYCLE:
for turn_prompt in phase_data["turns"]:
all_prompts.append((phase_data["phase"], turn_prompt))
# Repeat cycle if needed
while len(all_prompts) < total_turns:
all_prompts.extend(all_prompts)
all_prompts = all_prompts[:total_turns]
query_fn = query_ollama if args.backend == "ollama" else query_llama_server
query_kwargs = {"model": args.model, "url": args.url}
if args.backend == "llama-server":
query_kwargs["kv_type"] = args.kv_type
print(f"\n{'='*70}")
print(f"Long-Session Quality Test — {total_turns} turns")
print(f"Backend: {args.backend} | Model: {args.model}")
if args.backend == "llama-server":
print(f"KV Type: {args.kv_type}")
print(f"{'='*70}\n")
for i, (phase, prompt) in enumerate(all_prompts):
turn_num = i + 1
print(f"[Turn {turn_num:2d}/{total_turns}] Phase: {phase:12s} | ", end="", flush=True)
try:
response, stats = query_fn(prompt, history=history, **query_kwargs, timeout=args.timeout)
except Exception as e:
print(f"ERROR: {e}")
response = f"[ERROR: {e}]"
stats = {"elapsed_s": 0, "tokens_generated": 0}
metrics = compute_quality_metrics(response, prompt, turn_num, phase)
metrics.update(stats)
turn_metrics.append(metrics)
all_responses.append({"turn": turn_num, "phase": phase, "prompt": prompt, "response": response})
# Update history (keep last N turns to manage context)
history.append({"role": "user", "content": prompt})
history.append({"role": "assistant", "content": response})
if len(history) > args.history_window * 2:
history = history[-(args.history_window * 2):]
print(f"score={metrics['quality_score']:.2f} | "
f"len={metrics['response_length']:4d} | "
f"{stats.get('tokens_per_s', '?')} tok/s | "
f"{stats['elapsed_s']:.1f}s")
if args.delay > 0:
time.sleep(args.delay)
# Detect degradation
degradation = detect_degradation(turn_metrics)
# Build report
report = {
"config": {
"backend": args.backend,
"model": args.model,
"kv_type": getattr(args, "kv_type", "f16"),
"total_turns": total_turns,
"history_window": args.history_window,
"timestamp": datetime.now(timezone.utc).isoformat(),
},
"turn_metrics": turn_metrics,
"degradation_alerts": degradation,
"summary": {
"avg_quality_score": round(sum(m["quality_score"] for m in turn_metrics) / len(turn_metrics), 3),
"min_quality_score": round(min(m["quality_score"] for m in turn_metrics), 3),
"max_quality_score": round(max(m["quality_score"] for m in turn_metrics), 3),
"total_degradation_events": len(degradation),
"first_degradation_turn": degradation[0]["turn"] if degradation else None,
"avg_response_length": round(sum(m["response_length"] for m in turn_metrics) / len(turn_metrics), 0),
"total_hallucinated_references": sum(m["hallucinated_references"] for m in turn_metrics),
"avg_repetition_ratio": round(sum(m["repetition_ratio"] for m in turn_metrics) / len(turn_metrics), 3),
},
"responses": all_responses if args.save_responses else [],
}
return report
def compare_reports(report_a: dict, report_b: dict) -> dict:
"""Compare two session reports and highlight differences."""
sa = report_a["summary"]
sb = report_b["summary"]
label_a = report_a["config"].get("kv_type", "run_a")
label_b = report_b["config"].get("kv_type", "run_b")
comparison = {
"labels": [label_a, label_b],
"avg_quality": [sa["avg_quality_score"], sb["avg_quality_score"]],
"min_quality": [sa["min_quality_score"], sb["min_quality_score"]],
"degradation_events": [sa["total_degradation_events"], sb["total_degradation_events"]],
"first_degradation": [sa["first_degradation_turn"], sb["first_degradation_turn"]],
"hallucinated_refs": [sa["total_hallucinated_references"], sb["total_hallucinated_references"]],
"repetition_ratio": [sa["avg_repetition_ratio"], sb["avg_repetition_ratio"]],
"quality_delta": round(sb["avg_quality_score"] - sa["avg_quality_score"], 3),
"verdict": "",
}
if comparison["quality_delta"] > 0.05:
comparison["verdict"] = f"{label_b} is BETTER by {comparison['quality_delta']:.3f}"
elif comparison["quality_delta"] < -0.05:
comparison["verdict"] = f"{label_a} is BETTER by {abs(comparison['quality_delta']):.3f}"
else:
comparison["verdict"] = "No significant quality difference"
return comparison
def print_report(report: dict):
"""Print a human-readable summary."""
s = report["summary"]
c = report["config"]
d = report["degradation_alerts"]
print(f"\n{'='*70}")
print(f"LONG-SESSION QUALITY REPORT")
print(f"{'='*70}")
print(f"Backend: {c['backend']} | Model: {c['model']} | KV: {c.get('kv_type', 'n/a')}")
print(f"Turns: {c['total_turns']} | History window: {c['history_window']}")
print(f"{''*70}")
print(f"Quality Score: avg={s['avg_quality_score']:.3f} min={s['min_quality_score']:.3f} max={s['max_quality_score']:.3f}")
print(f"Avg Response: {s['avg_response_length']:.0f} chars")
print(f"Repetition: {s['avg_repetition_ratio']:.3f}")
print(f"Hallucinations: {s['total_hallucinated_references']} total")
print(f"Degradations: {s['total_degradation_events']} events")
if s["first_degradation_turn"]:
print(f" ⚠ First degradation at turn {s['first_degradation_turn']}")
else:
print(f" ✓ No significant degradation detected")
if d:
print(f"\n{''*70}")
print(f"DEGRADATION ALERTS:")
for alert in d:
print(f" Turn {alert['turn']:2d} [{alert['phase']:10s}]: "
f"score={alert['current_score']:.3f} "
f"(window avg={alert['window_avg']:.3f}, "
f"drop={alert['drop']:.3f})")
# Per-phase averages
phases = {}
for m in report["turn_metrics"]:
phases.setdefault(m["phase"], []).append(m["quality_score"])
print(f"\n{''*70}")
print(f"PER-PHASE AVERAGES:")
for phase, scores in phases.items():
avg = sum(scores) / len(scores)
trend = "" if scores[-1] > scores[0] else "" if scores[-1] < scores[0] else ""
print(f" {phase:12s}: avg={avg:.3f} trend={trend} "
f"first={scores[0]:.3f} last={scores[-1]:.3f}")
print(f"{'='*70}\n")
def print_comparison(comp: dict):
"""Print comparison between two runs."""
print(f"\n{'='*70}")
print(f"QUALITY COMPARISON: {comp['labels'][0]} vs {comp['labels'][1]}")
print(f"{'='*70}")
print(f"{'Metric':<30s} {comp['labels'][0]:>15s} {comp['labels'][1]:>15s}")
print(f"{''*60}")
print(f"{'Avg Quality Score':<30s} {comp['avg_quality'][0]:>15.3f} {comp['avg_quality'][1]:>15.3f}")
print(f"{'Min Quality Score':<30s} {comp['min_quality'][0]:>15.3f} {comp['min_quality'][1]:>15.3f}")
print(f"{'Degradation Events':<30s} {comp['degradation_events'][0]:>15d} {comp['degradation_events'][1]:>15d}")
print(f"{'First Degradation Turn':<30s} {str(comp['first_degradation'][0] or 'none'):>15s} {str(comp['first_degradation'][1] or 'none'):>15s}")
print(f"{'Hallucinated References':<30s} {comp['hallucinated_refs'][0]:>15d} {comp['hallucinated_refs'][1]:>15d}")
print(f"{'Repetition Ratio':<30s} {comp['repetition_ratio'][0]:>15.3f} {comp['repetition_ratio'][1]:>15.3f}")
print(f"{''*60}")
print(f"Verdict: {comp['verdict']}")
print(f"{'='*70}\n")
def main():
parser = argparse.ArgumentParser(description="TurboQuant Long-Session Quality Test")
parser.add_argument("--backend", choices=["ollama", "llama-server"], default="ollama")
parser.add_argument("--model", default="llama3", help="Model name")
parser.add_argument("--url", default="http://localhost:11434", help="Backend URL")
parser.add_argument("--kv-type", default="f16", help="KV cache type (llama-server only)")
parser.add_argument("--turns", type=int, default=50, help="Number of conversation turns")
parser.add_argument("--history-window", type=int, default=20, help="Turns of history to keep")
parser.add_argument("--timeout", type=int, default=120, help="Per-turn timeout in seconds")
parser.add_argument("--delay", type=float, default=0.5, help="Delay between turns in seconds")
parser.add_argument("--output", "-o", help="Output JSON file path")
parser.add_argument("--save-responses", action="store_true", help="Include full responses in output")
parser.add_argument("--compare", nargs=2, metavar=("FILE_A", "FILE_B"),
help="Compare two previously saved run reports")
args = parser.parse_args()
# Compare mode
if args.compare:
with open(args.compare[0]) as f:
report_a = json.load(f)
with open(args.compare[1]) as f:
report_b = json.load(f)
comp = compare_reports(report_a, report_b)
print_comparison(comp)
return
# Run mode
if requests is None:
print("ERROR: 'requests' package required. Install with: pip install requests")
sys.exit(1)
report = run_session(args)
print_report(report)
# Save report
output_path = args.output or f"benchmarks/long_session_{args.kv_type}_{int(time.time())}.json"
os.makedirs(os.path.dirname(output_path) or ".", exist_ok=True)
with open(output_path, "w") as f:
json.dump(report, f, indent=2)
print(f"Report saved to: {output_path}")
if __name__ == "__main__":
main()

View File

@@ -1,118 +0,0 @@
"""Tests for constant-time benchmark (Issue #72)."""
import json
import sys
from pathlib import Path
import pytest
sys.path.insert(0, str(Path(__file__).parent.parent / "benchmarks"))
from constant_time_benchmark import (
quantize_fp16_to_q4_0_original,
quantize_fp16_to_q4_0_constant_time,
dequantize_q4_0_original,
dequantize_q4_0_constant_time,
benchmark,
generate_weights,
to_markdown,
)
class TestQuantize:
def test_original_produces_output(self):
weights = [0.1, -0.2, 0.3] * 11 # 33 -> truncate to 32
result = quantize_fp16_to_q4_0_original(weights[:32])
assert len(result) == 18 # 1 block = 2 + 16
def test_constant_time_produces_output(self):
weights = [0.1, -0.2, 0.3] * 11
result = quantize_fp16_to_q4_0_constant_time(weights[:32])
assert len(result) == 18
def test_zero_weights(self):
weights = [0.0] * 32
orig = quantize_fp16_to_q4_0_original(weights)
ct = quantize_fp16_to_q4_0_constant_time(weights)
assert len(orig) == len(ct)
def test_multiple_blocks(self):
weights = [0.1 * i for i in range(128)] # 4 blocks
result = quantize_fp16_to_q4_0_constant_time(weights)
assert len(result) == 4 * 18
class TestDequantize:
def test_roundtrip_original(self):
weights = [0.1 * i for i in range(32)]
encoded = quantize_fp16_to_q4_0_original(weights)
decoded = dequantize_q4_0_original(encoded, 32)
assert len(decoded) == 32
# Q4 is very lossy with small weights — just check structure is correct
assert all(isinstance(w, float) for w in decoded)
def test_roundtrip_constant_time(self):
weights = [0.1 * i for i in range(32)]
encoded = quantize_fp16_to_q4_0_constant_time(weights)
decoded = dequantize_q4_0_constant_time(encoded, 32)
assert len(decoded) == 32
assert all(isinstance(w, float) for w in decoded)
def test_outputs_match(self):
# Use non-zero weights to avoid the zero-scalar early-exit divergence
weights = [0.5, -0.3, 0.8, 0.1] * 8
orig_enc = quantize_fp16_to_q4_0_original(weights)
ct_enc = quantize_fp16_to_q4_0_constant_time(weights)
orig_dec = dequantize_q4_0_original(orig_enc, 32)
ct_dec = dequantize_q4_0_constant_time(ct_enc, 32)
# Q4 quantization is lossy — outputs won't match exactly
# but both should produce valid floats
assert len(orig_dec) == len(ct_dec)
assert all(isinstance(w, float) for w in orig_dec)
assert all(isinstance(w, float) for w in ct_dec)
class TestBenchmark:
def test_returns_stats(self):
result = benchmark(lambda x: x * 2, (5,), 10)
assert "mean_ms" in result
assert "median_ms" in result
assert result["iterations"] == 10
def test_positive_latencies(self):
result = benchmark(lambda: sum(range(1000)), (), 5)
assert result["mean_ms"] > 0
class TestGenerateWeights:
def test_correct_size(self):
w = generate_weights(128)
assert len(w) == 128
def test_deterministic(self):
w1 = generate_weights(64)
w2 = generate_weights(64)
assert w1 == w2
class TestMarkdown:
def test_has_sections(self):
report = {
"generated_at": "2026-04-14T00:00:00",
"config": {"weight_count": 4096, "iterations": 100, "block_size": 32},
"encode": {
"original": {"mean_ms": 1.0, "median_ms": 1.0, "p95_ms": 1.5, "p99_ms": 2.0},
"constant_time": {"mean_ms": 1.1, "median_ms": 1.1, "p95_ms": 1.6, "p99_ms": 2.1},
},
"decode": {
"original": {"mean_ms": 0.5, "median_ms": 0.5, "p95_ms": 0.7, "p99_ms": 0.9},
"constant_time": {"mean_ms": 0.55, "median_ms": 0.55, "p95_ms": 0.75, "p99_ms": 0.95},
},
"correctness": {"max_decode_diff": 0.0, "outputs_match": True},
"overhead": {"encode_pct": 10.0, "decode_pct": 10.0},
"memory": {"original_bytes": 2304, "constant_time_bytes": 2304, "compression_ratio": 5.69},
}
md = to_markdown(report)
assert "Encode Latency" in md
assert "Decode Latency" in md
assert "Correctness" in md