Compare commits
6 Commits
fix/muda-c
...
burn/11-17
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b31cd93148 | ||
| 7a7ce0e652 | |||
| 9224a0162b | |||
|
|
f4ceac76ce | ||
| ab4020cca0 | |||
| 383e1fab2e |
@@ -13,12 +13,12 @@ jobs:
|
||||
python-version: '3.11'
|
||||
- name: Parse check
|
||||
run: |
|
||||
find . -name '*.yml' -o -name '*.yaml' | grep -v .gitea | xargs -r python3 -c "import sys,yaml; [yaml.safe_load(open(f)) for f in sys.argv[1:]]"
|
||||
find . -name '*.json' | xargs -r python3 -m json.tool > /dev/null
|
||||
find . -name '*.py' | xargs -r python3 -m py_compile
|
||||
find . -name '*.yml' -o -name '*.yaml' | grep -v .gitea | grep -v llama-cpp-fork | xargs -r python3 -c "import sys,yaml; [yaml.safe_load(open(f)) for f in sys.argv[1:]]"
|
||||
find . -name '*.json' | grep -v llama-cpp-fork | while read f; do python3 -m json.tool "$f" > /dev/null || exit 1; done
|
||||
find . -name '*.py' | grep -v llama-cpp-fork | xargs -r python3 -m py_compile
|
||||
find . -name '*.sh' | xargs -r bash -n
|
||||
echo "PASS: All files parse"
|
||||
- name: Secret scan
|
||||
run: |
|
||||
if grep -rE 'sk-or-|sk-ant-|ghp_|AKIA' . --include='*.yml' --include='*.py' --include='*.sh' 2>/dev/null | grep -v .gitea; then exit 1; fi
|
||||
if grep -rE 'sk-or-|sk-ant-|ghp_|AKIA' . --include='*.yml' --include='*.py' --include='*.sh' 2>/dev/null | grep -v .gitea | grep -v llama-cpp-fork; then exit 1; fi
|
||||
echo "PASS: No secrets"
|
||||
|
||||
@@ -1,75 +1,227 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
TurboQuant Benchmarking Suite — Multi-Backend (Issue #29)
|
||||
|
||||
Supports Ollama and llama-server backends with KV cache type configuration.
|
||||
Measures: TTFT, tokens/sec, latency, peak memory.
|
||||
|
||||
Usage:
|
||||
# Ollama (default)
|
||||
python3 benchmarks/run_benchmarks.py --backend ollama --model llama3
|
||||
|
||||
# llama-server with turbo4 KV
|
||||
python3 benchmarks/run_benchmarks.py --backend llama-server \
|
||||
--url http://localhost:11434 --model qwen3.5 --kv-type turbo4
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import time
|
||||
import requests
|
||||
import os
|
||||
from typing import List, Dict
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
from typing import List, Dict, Optional
|
||||
|
||||
# ═══════════════════════════════════════════
|
||||
# TURBOQUANT BENCHMARKING SUITE (Issue #16)
|
||||
# ═══════════════════════════════════════════
|
||||
# This script runs a standardized set of prompts against the local inference
|
||||
# engine (Ollama) and logs the results. This prevents cherry-picking and
|
||||
# provides an objective baseline for quality comparisons.
|
||||
import requests
|
||||
|
||||
OLLAMA_URL = "http://localhost:11434/api/generate"
|
||||
PROMPTS_FILE = "benchmarks/prompts.json"
|
||||
RESULTS_FILE = f"benchmarks/results_{int(time.time())}.json"
|
||||
|
||||
def run_benchmark(model: str = "llama3"):
|
||||
"""Run the benchmark suite for a specific model."""
|
||||
if not os.path.exists(PROMPTS_FILE):
|
||||
print(f"Error: {PROMPTS_FILE} not found.")
|
||||
return
|
||||
def get_peak_memory_mb() -> float:
|
||||
"""Get peak RSS of current process in MB (macOS/Linux)."""
|
||||
try:
|
||||
if sys.platform == "darwin":
|
||||
result = subprocess.run(["ps", "-o", "rss=", "-p", str(os.getpid())],
|
||||
capture_output=True, text=True)
|
||||
return int(result.stdout.strip()) / 1024
|
||||
else:
|
||||
with open(f"/proc/{os.getpid()}/status") as f:
|
||||
for line in f:
|
||||
if line.startswith("VmHWM:"):
|
||||
return int(line.split()[1]) / 1024
|
||||
except Exception:
|
||||
pass
|
||||
return 0.0
|
||||
|
||||
with open(PROMPTS_FILE, 'r') as f:
|
||||
|
||||
def run_ollama(prompt: str, model: str, url: str, timeout: int = 120) -> dict:
|
||||
"""Run a prompt against Ollama /api/generate."""
|
||||
api_url = f"{url.rstrip('/')}/api/generate"
|
||||
start = time.time()
|
||||
ttft = None
|
||||
tokens_per_sec = 0.0
|
||||
|
||||
try:
|
||||
resp = requests.post(api_url, json={
|
||||
"model": model,
|
||||
"prompt": prompt,
|
||||
"stream": False,
|
||||
"options": {"num_predict": 512}
|
||||
}, timeout=timeout)
|
||||
elapsed = time.time() - start
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
|
||||
response_text = data.get("response", "")
|
||||
eval_count = data.get("eval_count", 0)
|
||||
eval_duration_ns = data.get("eval_duration", 0)
|
||||
prompt_eval_ns = data.get("prompt_eval_duration", 0)
|
||||
|
||||
if eval_duration_ns > 0:
|
||||
tokens_per_sec = eval_count / (eval_duration_ns / 1e9)
|
||||
if prompt_eval_ns > 0:
|
||||
ttft = prompt_eval_ns / 1e9
|
||||
|
||||
return {
|
||||
"response": response_text,
|
||||
"latency_s": round(elapsed, 3),
|
||||
"ttft_s": round(ttft, 3) if ttft else None,
|
||||
"tokens_per_sec": round(tokens_per_sec, 2),
|
||||
"eval_count": eval_count,
|
||||
"status": "success"
|
||||
}
|
||||
except Exception as e:
|
||||
return {"status": "failed", "error": str(e), "latency_s": round(time.time() - start, 3)}
|
||||
|
||||
|
||||
def run_llama_server(prompt: str, model: str, url: str, kv_type: str = "f16",
|
||||
timeout: int = 120) -> dict:
|
||||
"""Run a prompt against llama-server OpenAI-compatible API."""
|
||||
api_url = f"{url.rstrip('/')}/v1/chat/completions"
|
||||
start = time.time()
|
||||
ttft = None
|
||||
tokens_per_sec = 0.0
|
||||
|
||||
try:
|
||||
resp = requests.post(api_url, json={
|
||||
"model": model,
|
||||
"messages": [{"role": "user", "content": prompt}],
|
||||
"max_tokens": 512,
|
||||
"stream": False
|
||||
}, timeout=timeout)
|
||||
elapsed = time.time() - start
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
|
||||
response_text = data.get("choices", [{}])[0].get("message", {}).get("content", "")
|
||||
usage = data.get("usage", {})
|
||||
completion_tokens = usage.get("completion_tokens", 0)
|
||||
prompt_tokens = usage.get("prompt_tokens", 0)
|
||||
|
||||
# llama-server includes timing in x_* headers or we estimate
|
||||
if elapsed > 0 and completion_tokens > 0:
|
||||
# Subtract estimated prompt eval time (rough)
|
||||
tokens_per_sec = completion_tokens / max(elapsed - 0.1, 0.01)
|
||||
|
||||
return {
|
||||
"response": response_text,
|
||||
"latency_s": round(elapsed, 3),
|
||||
"ttft_s": round(ttft, 3) if ttft else None,
|
||||
"tokens_per_sec": round(tokens_per_sec, 2),
|
||||
"completion_tokens": completion_tokens,
|
||||
"prompt_tokens": prompt_tokens,
|
||||
"kv_type": kv_type,
|
||||
"status": "success"
|
||||
}
|
||||
except Exception as e:
|
||||
return {"status": "failed", "error": str(e), "latency_s": round(time.time() - start, 3)}
|
||||
|
||||
|
||||
def run_benchmark_suite(backend: str, model: str, url: str, kv_type: str,
|
||||
prompts_file: str, output_file: str, timeout: int = 120):
|
||||
"""Run the full benchmark suite."""
|
||||
if not os.path.exists(prompts_file):
|
||||
print(f"ERROR: {prompts_file} not found")
|
||||
sys.exit(1)
|
||||
|
||||
with open(prompts_file) as f:
|
||||
prompts = json.load(f)
|
||||
|
||||
run_fn = run_ollama if backend == "ollama" else run_llama_server
|
||||
mem_before = get_peak_memory_mb()
|
||||
|
||||
results = []
|
||||
print(f"Starting benchmark for model: {model}")
|
||||
print(f"Saving results to: {RESULTS_FILE}")
|
||||
print(f"\n{'='*60}")
|
||||
print(f"Backend: {backend} | Model: {model} | KV: {kv_type}")
|
||||
print(f"URL: {url}")
|
||||
print(f"Prompts: {len(prompts)} | Output: {output_file}")
|
||||
print(f"{'='*60}\n")
|
||||
|
||||
for item in prompts:
|
||||
print(f"Running prompt: {item['id']}...")
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
response = requests.post(OLLAMA_URL, json={
|
||||
"model": model,
|
||||
"prompt": item['prompt'],
|
||||
"stream": False
|
||||
}, timeout=60)
|
||||
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
end_time = time.time()
|
||||
|
||||
results.append({
|
||||
"id": item['id'],
|
||||
"prompt": item['prompt'],
|
||||
"response": data.get("response"),
|
||||
"latency": end_time - start_time,
|
||||
"tokens_per_second": data.get("eval_count", 0) / (data.get("eval_duration", 1) / 1e9) if data.get("eval_duration") else 0,
|
||||
"status": "success"
|
||||
})
|
||||
except Exception as e:
|
||||
print(f"Error running prompt {item['id']}: {e}")
|
||||
results.append({
|
||||
"id": item['id'],
|
||||
"prompt": item['prompt'],
|
||||
"error": str(e),
|
||||
"status": "failed"
|
||||
})
|
||||
pid = item.get("id", item.get("category", "unknown"))
|
||||
prompt = item["prompt"]
|
||||
print(f"[{pid}] Running...", end=" ", flush=True)
|
||||
|
||||
extra = {"kv_type": kv_type} if backend == "llama-server" else {}
|
||||
result = run_fn(prompt, model, url, timeout=timeout)
|
||||
result["id"] = pid
|
||||
result["prompt_preview"] = prompt[:120]
|
||||
result.update(extra)
|
||||
|
||||
status = "✓" if result["status"] == "success" else "✗"
|
||||
tps = result.get("tokens_per_sec", 0)
|
||||
lat = result.get("latency_s", 0)
|
||||
print(f"{status} {tps:.1f} tok/s, {lat:.2f}s")
|
||||
|
||||
results.append(result)
|
||||
|
||||
mem_after = get_peak_memory_mb()
|
||||
|
||||
suite = {
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"backend": backend,
|
||||
"model": model,
|
||||
"kv_type": kv_type,
|
||||
"url": url,
|
||||
"prompts_file": prompts_file,
|
||||
"memory_mb": round(max(mem_before, mem_after), 1),
|
||||
"results": results,
|
||||
"summary": {
|
||||
"total": len(results),
|
||||
"success": sum(1 for r in results if r["status"] == "success"),
|
||||
"failed": sum(1 for r in results if r["status"] == "failed"),
|
||||
"avg_tok_per_sec": round(
|
||||
sum(r.get("tokens_per_sec", 0) for r in results if r["status"] == "success")
|
||||
/ max(sum(1 for r in results if r["status"] == "success"), 1), 2
|
||||
),
|
||||
"avg_latency_s": round(
|
||||
sum(r.get("latency_s", 0) for r in results if r["status"] == "success")
|
||||
/ max(sum(1 for r in results if r["status"] == "success"), 1), 3
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
os.makedirs(os.path.dirname(output_file) or ".", exist_ok=True)
|
||||
with open(output_file, "w") as f:
|
||||
json.dump(suite, f, indent=2)
|
||||
|
||||
s = suite["summary"]
|
||||
print(f"\n{'='*60}")
|
||||
print(f"RESULTS: {s['success']}/{s['total']} success | "
|
||||
f"Avg {s['avg_tok_per_sec']:.1f} tok/s | "
|
||||
f"Avg {s['avg_latency_s']:.2f}s latency")
|
||||
print(f"{'='*60}")
|
||||
print(f"Saved to {output_file}")
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="TurboQuant Benchmark Suite")
|
||||
parser.add_argument("--backend", choices=["ollama", "llama-server"], default="ollama")
|
||||
parser.add_argument("--model", required=True, help="Model name")
|
||||
parser.add_argument("--url", default="http://localhost:11434", help="Backend URL")
|
||||
parser.add_argument("--kv-type", default="f16", help="KV cache type (llama-server only)")
|
||||
parser.add_argument("--prompts", default="benchmarks/prompts.json", help="Prompts file")
|
||||
parser.add_argument("--output", default=None, help="Output file (auto-generated if omitted)")
|
||||
parser.add_argument("--timeout", type=int, default=120, help="Per-prompt timeout (s)")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.output is None:
|
||||
ts = int(time.time())
|
||||
args.output = f"benchmarks/results_{args.backend}_{args.kv_type}_{ts}.json"
|
||||
|
||||
run_benchmark_suite(args.backend, args.model, args.url, args.kv_type,
|
||||
args.prompts, args.output, args.timeout)
|
||||
|
||||
# Save results
|
||||
with open(RESULTS_FILE, 'w') as f:
|
||||
json.dump({
|
||||
"model": model,
|
||||
"timestamp": time.time(),
|
||||
"results": results
|
||||
}, f, indent=2)
|
||||
|
||||
print("Benchmark complete.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Default to llama3 for testing
|
||||
run_benchmark("llama3")
|
||||
main()
|
||||
|
||||
495
benchmarks/run_long_session.py
Normal file
495
benchmarks/run_long_session.py
Normal file
@@ -0,0 +1,495 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
TurboQuant Long-Session Quality Test (Issue #12)
|
||||
|
||||
Runs a 50-turn multi-step reasoning conversation to detect quality degradation
|
||||
under sustained context pressure. Compares TurboQuant KV vs FP16 KV baseline.
|
||||
|
||||
Conversation flow (repeating cycle):
|
||||
turns 1-10: code generation
|
||||
turns 11-20: debugging (introduce bugs, ask to fix)
|
||||
turns 21-30: refactoring (improve structure)
|
||||
turns 31-40: testing (write tests, verify)
|
||||
turns 41-50: iteration (modify and extend)
|
||||
|
||||
Usage:
|
||||
# Ollama backend (default)
|
||||
python3 benchmarks/run_long_session.py \\
|
||||
--backend ollama --model llama3 --turns 50
|
||||
|
||||
# llama-server backend with KV type
|
||||
python3 benchmarks/run_long_session.py \\
|
||||
--backend llama-server --url http://localhost:8080 \\
|
||||
--model qwen3.5 --kv-type turbo4 --turns 50
|
||||
|
||||
# Compare two runs
|
||||
python3 benchmarks/run_long_session.py --compare run_turbo4.json run_fp16.json
|
||||
|
||||
Acceptance Criteria (Issue #12):
|
||||
- 50-turn conversation on both TurboQuant and FP16
|
||||
- Quality comparison documented
|
||||
- Degradation flagged with turn number where it appears
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
import hashlib
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
try:
|
||||
import requests
|
||||
except ImportError:
|
||||
requests = None
|
||||
|
||||
# ── Conversation Prompts ───────────────────────────────────────────────
|
||||
|
||||
CONVERSATION_CYCLE = [
|
||||
# Phase 1: Code Generation (turns 1-10)
|
||||
{
|
||||
"phase": "code_gen",
|
||||
"turns": [
|
||||
"Write a Python class called RateLimiter that implements a token bucket algorithm. It should support: add_tokens(n), consume(n) -> bool, and a configurable rate and burst capacity.",
|
||||
"Add thread-safety to the RateLimiter class using a lock. Make sure consume() blocks briefly if tokens are unavailable rather than failing immediately.",
|
||||
"Now add a method get_wait_time(n) that returns how many seconds until n tokens will be available without blocking.",
|
||||
"Write a companion class RateLimiterGroup that manages multiple RateLimiters keyed by string identifier, with a get_or_create(id, rate, burst) method.",
|
||||
"Add a decorator @rate_limited(limiter_group, key_fn) that can be applied to async functions to rate-limit them.",
|
||||
"Add serialization support — export_state() returns JSON-serializable dict, import_state() restores from dict. Include timestamps.",
|
||||
"Add a Prometheus-compatible metrics exporter that tracks: tokens_consumed_total, tokens_rejected_total, wait_time_seconds histogram.",
|
||||
"Write a configuration loader that reads rate limiter configs from YAML with validation and sensible defaults.",
|
||||
"Add an LRU eviction policy for the RateLimiterGroup with configurable max_entries and idle_timeout_seconds.",
|
||||
"Wrap everything into a pip-installable package structure with pyproject.toml, __init__.py exports, and a CLI entry point.",
|
||||
]
|
||||
},
|
||||
# Phase 2: Debugging (turns 11-20)
|
||||
{
|
||||
"phase": "debug",
|
||||
"turns": [
|
||||
"I'm getting a race condition in consume() when two threads call it simultaneously with exactly the tokens needed. The lock doesn't seem to help. Can you trace through the logic and find the bug?",
|
||||
"The get_wait_time() method returns negative values sometimes. Here's the traceback: ... Can you identify what's wrong?",
|
||||
"RateLimiterGroup.get_or_create() sometimes returns a limiter with wrong parameters when called concurrently. Explain the potential issue.",
|
||||
"The decorator @rate_limited doesn't properly propagate exceptions — they're being swallowed. Fix the error handling.",
|
||||
"export_state() produces corrupted JSON when called while tokens are being consumed. How should we fix the serialization?",
|
||||
"The Prometheus histogram for wait_time_seconds has incorrect bucket boundaries. Review the histogram configuration.",
|
||||
"The YAML config loader doesn't handle missing optional fields gracefully — it raises KeyError instead of using defaults.",
|
||||
"LRU eviction is evicting active limiters. The idle_timeout calculation seems wrong. Debug the eviction logic.",
|
||||
"The CLI entry point crashes with a specific YAML config. Here's the config and error: ... What's the root cause?",
|
||||
"Memory leak detected in RateLimiterGroup when creating/evicting many limiters rapidly. Where's the leak?",
|
||||
]
|
||||
},
|
||||
# Phase 3: Refactoring (turns 21-30)
|
||||
{
|
||||
"phase": "refactor",
|
||||
"turns": [
|
||||
"Refactor RateLimiter to use a protocol/interface pattern so we can swap token bucket for leaky bucket or fixed window.",
|
||||
"Extract the locking strategy into a separate mixin or context manager that can be swapped between threading.Lock, asyncio.Lock, and no-lock.",
|
||||
"Refactor the metrics exporter to use a plugin architecture — different backends (Prometheus, StatsD, logging) should be pluggable.",
|
||||
"Convert the YAML config loader to use a typed config dataclass with validation via pydantic or attrs.",
|
||||
"Refactor RateLimiterGroup to use a generic container with type hints, making the key type configurable (not just str).",
|
||||
"Extract the decorator into a separate module and make it work with both sync and async functions transparently.",
|
||||
"Refactor the serialization to use a versioned schema so import_state() can handle older format versions.",
|
||||
"Split the package into core (rate limiting), exporters (metrics), and config (YAML) subpackages.",
|
||||
"Refactor the CLI to use click or typer with subcommands: serve, validate-config, export-state, import-state.",
|
||||
"Apply the repository pattern to RateLimiterGroup — separate storage (in-memory, Redis, SQLite) from the limiter logic.",
|
||||
]
|
||||
},
|
||||
# Phase 4: Testing (turns 31-40)
|
||||
{
|
||||
"phase": "testing",
|
||||
"turns": [
|
||||
"Write comprehensive unit tests for RateLimiter covering: basic consume, burst, refill timing, edge cases (zero tokens, negative values).",
|
||||
"Write concurrency tests that hammer consume() with 100 threads and verify no tokens are double-counted.",
|
||||
"Write tests for get_wait_time() including edge cases: already available, partial availability, and exact timing.",
|
||||
"Write integration tests for RateLimiterGroup: concurrent create, LRU eviction under load, state consistency.",
|
||||
"Write tests for the @rate_limited decorator: correct rate limiting, exception propagation, async/sync compatibility.",
|
||||
"Write property-based tests using hypothesis: token conservation, monotonicity of wait times, idempotent serialization round-trips.",
|
||||
"Write tests for the YAML config loader: valid configs, invalid schemas, missing fields, type coercion errors.",
|
||||
"Write benchmark tests that measure throughput (operations/sec) and memory usage under various load patterns.",
|
||||
"Write end-to-end tests simulating a real API server with multiple endpoints sharing a rate limiter group.",
|
||||
"Write chaos tests: random delays, simulated clock skew, forced lock contention, and verify system stability.",
|
||||
]
|
||||
},
|
||||
# Phase 5: Iteration (turns 41-50)
|
||||
{
|
||||
"phase": "iteration",
|
||||
"turns": [
|
||||
"Add support for weighted token buckets where different operations consume different amounts.",
|
||||
"Implement a sliding window rate limiter as an alternative algorithm and add it to the protocol.",
|
||||
"Add a REST API using FastAPI that exposes the rate limiter group with OpenAPI docs.",
|
||||
"Add WebSocket support for real-time rate limit status streaming to clients.",
|
||||
"Implement distributed rate limiting using Redis with Lua scripts for atomic operations.",
|
||||
"Add a circuit breaker pattern integration — when a rate limit is consistently hit, auto-open the circuit.",
|
||||
"Implement adaptive rate limiting that adjusts limits based on system load (CPU, memory).",
|
||||
"Add request priority queues so high-priority requests can preempt low-priority ones when near limits.",
|
||||
"Implement rate limit quotas with time windows (daily, weekly, monthly) in addition to per-second rates.",
|
||||
"Write a migration guide and changelog for v2.0 with all the new features and breaking changes.",
|
||||
]
|
||||
},
|
||||
]
|
||||
|
||||
# ── Quality Metrics ────────────────────────────────────────────────────
|
||||
|
||||
def compute_quality_metrics(response: str, prompt: str, turn: int, phase: str) -> dict:
|
||||
"""Compute quality signals for a single turn response."""
|
||||
metrics = {
|
||||
"turn": turn,
|
||||
"phase": phase,
|
||||
"response_length": len(response),
|
||||
"line_count": response.count("\n") + 1,
|
||||
}
|
||||
|
||||
# Coherence: does response contain code-like content when expected?
|
||||
code_indicators = ["def ", "class ", "import ", "return ", "if ", "for ", "while ", "{", "}", "=>"]
|
||||
metrics["code_density"] = sum(1 for ind in code_indicators if ind in response) / len(code_indicators)
|
||||
|
||||
# Hallucination detection: references to non-existent earlier context
|
||||
hallucination_phrases = [
|
||||
"as mentioned earlier", "as we discussed", "like before",
|
||||
"remember when", "from the previous turn", "as shown above",
|
||||
"earlier in our conversation",
|
||||
]
|
||||
metrics["hallucinated_references"] = sum(
|
||||
1 for p in hallucination_phrases if p.lower() in response.lower()
|
||||
)
|
||||
|
||||
# Structural quality: does it have proper formatting?
|
||||
metrics["has_headers"] = bool(re.search(r"^#{1,3}\s", response, re.MULTILINE))
|
||||
metrics["has_code_blocks"] = response.count("```") >= 2
|
||||
metrics["has_lists"] = bool(re.search(r"^[\-\*\d]\.\s", response, re.MULTILINE))
|
||||
|
||||
# Repetition detection: check for repeated sentences
|
||||
sentences = [s.strip().lower() for s in re.split(r'[.!?]+', response) if len(s.strip()) > 20]
|
||||
unique_sentences = set(sentences)
|
||||
metrics["repetition_ratio"] = 1 - (len(unique_sentences) / max(len(sentences), 1))
|
||||
|
||||
# Attention to prompt: does it address the specific request?
|
||||
prompt_keywords = set(re.findall(r'\b\w{4,}\b', prompt.lower()))
|
||||
response_words = set(re.findall(r'\b\w{4,}\b', response.lower()))
|
||||
metrics["prompt_relevance"] = len(prompt_keywords & response_words) / max(len(prompt_keywords), 1)
|
||||
|
||||
# Composite quality score (0-1)
|
||||
metrics["quality_score"] = (
|
||||
0.25 * min(metrics["code_density"] * 3, 1.0) +
|
||||
0.20 * min(metrics["prompt_relevance"] * 2, 1.0) +
|
||||
0.20 * (1.0 - min(metrics["repetition_ratio"] * 5, 1.0)) +
|
||||
0.15 * (1.0 if metrics["has_code_blocks"] else 0.5) +
|
||||
0.10 * (1.0 - min(metrics["hallucinated_references"] * 0.3, 1.0)) +
|
||||
0.10 * (1.0 if metrics["has_lists"] else 0.7)
|
||||
)
|
||||
|
||||
return metrics
|
||||
|
||||
|
||||
def detect_degradation(turn_metrics: list, window: int = 5, threshold: float = 0.15) -> list:
|
||||
"""Detect quality degradation by comparing rolling windows."""
|
||||
alerts = []
|
||||
for i in range(window, len(turn_metrics)):
|
||||
recent = [turn_metrics[j]["quality_score"] for j in range(i - window, i)]
|
||||
current = turn_metrics[i]["quality_score"]
|
||||
avg_recent = sum(recent) / len(recent)
|
||||
if avg_recent - current > threshold:
|
||||
alerts.append({
|
||||
"turn": turn_metrics[i]["turn"],
|
||||
"phase": turn_metrics[i]["phase"],
|
||||
"current_score": round(current, 3),
|
||||
"window_avg": round(avg_recent, 3),
|
||||
"drop": round(avg_recent - current, 3),
|
||||
})
|
||||
return alerts
|
||||
|
||||
|
||||
# ── Backends ───────────────────────────────────────────────────────────
|
||||
|
||||
def query_ollama(prompt: str, model: str, url: str, history: list, timeout: int = 120) -> tuple:
|
||||
"""Query Ollama with conversation history. Returns (response, stats)."""
|
||||
messages = history + [{"role": "user", "content": prompt}]
|
||||
api_url = f"{url.rstrip('/')}/api/chat"
|
||||
|
||||
start = time.time()
|
||||
resp = requests.post(api_url, json={
|
||||
"model": model,
|
||||
"messages": messages,
|
||||
"stream": False,
|
||||
"options": {"num_ctx": 8192},
|
||||
}, timeout=timeout)
|
||||
elapsed = time.time() - start
|
||||
|
||||
data = resp.json()
|
||||
content = data.get("message", {}).get("content", "")
|
||||
eval_count = data.get("eval_count", 0)
|
||||
eval_duration = data.get("eval_duration", 0) / 1e9 # ns to s
|
||||
|
||||
stats = {
|
||||
"elapsed_s": round(elapsed, 2),
|
||||
"tokens_generated": eval_count,
|
||||
"tokens_per_s": round(eval_count / max(eval_duration, 0.001), 1),
|
||||
"prompt_eval_count": data.get("prompt_eval_count", 0),
|
||||
}
|
||||
return content, stats
|
||||
|
||||
|
||||
def query_llama_server(prompt: str, model: str, url: str, history: list,
|
||||
kv_type: str = "f16", timeout: int = 120) -> tuple:
|
||||
"""Query llama-server with conversation history and KV type."""
|
||||
messages = history + [{"role": "user", "content": prompt}]
|
||||
api_url = f"{url.rstrip('/')}/v1/chat/completions"
|
||||
|
||||
start = time.time()
|
||||
resp = requests.post(api_url, json={
|
||||
"model": model,
|
||||
"messages": messages,
|
||||
"temperature": 0.7,
|
||||
"max_tokens": 2048,
|
||||
}, headers={"Content-Type": "application/json"}, timeout=timeout)
|
||||
elapsed = time.time() - start
|
||||
|
||||
data = resp.json()
|
||||
content = data["choices"][0]["message"]["content"]
|
||||
usage = data.get("usage", {})
|
||||
|
||||
stats = {
|
||||
"elapsed_s": round(elapsed, 2),
|
||||
"tokens_generated": usage.get("completion_tokens", 0),
|
||||
"prompt_tokens": usage.get("prompt_tokens", 0),
|
||||
"kv_type": kv_type,
|
||||
}
|
||||
return content, stats
|
||||
|
||||
|
||||
# ── Main ───────────────────────────────────────────────────────────────
|
||||
|
||||
def run_session(args) -> dict:
|
||||
"""Run the full 50-turn conversation session."""
|
||||
total_turns = args.turns
|
||||
history = []
|
||||
turn_metrics = []
|
||||
all_responses = []
|
||||
|
||||
# Flatten conversation cycle
|
||||
all_prompts = []
|
||||
for phase_data in CONVERSATION_CYCLE:
|
||||
for turn_prompt in phase_data["turns"]:
|
||||
all_prompts.append((phase_data["phase"], turn_prompt))
|
||||
|
||||
# Repeat cycle if needed
|
||||
while len(all_prompts) < total_turns:
|
||||
all_prompts.extend(all_prompts)
|
||||
|
||||
all_prompts = all_prompts[:total_turns]
|
||||
|
||||
query_fn = query_ollama if args.backend == "ollama" else query_llama_server
|
||||
query_kwargs = {"model": args.model, "url": args.url}
|
||||
if args.backend == "llama-server":
|
||||
query_kwargs["kv_type"] = args.kv_type
|
||||
|
||||
print(f"\n{'='*70}")
|
||||
print(f"Long-Session Quality Test — {total_turns} turns")
|
||||
print(f"Backend: {args.backend} | Model: {args.model}")
|
||||
if args.backend == "llama-server":
|
||||
print(f"KV Type: {args.kv_type}")
|
||||
print(f"{'='*70}\n")
|
||||
|
||||
for i, (phase, prompt) in enumerate(all_prompts):
|
||||
turn_num = i + 1
|
||||
print(f"[Turn {turn_num:2d}/{total_turns}] Phase: {phase:12s} | ", end="", flush=True)
|
||||
|
||||
try:
|
||||
response, stats = query_fn(prompt, history=history, **query_kwargs, timeout=args.timeout)
|
||||
except Exception as e:
|
||||
print(f"ERROR: {e}")
|
||||
response = f"[ERROR: {e}]"
|
||||
stats = {"elapsed_s": 0, "tokens_generated": 0}
|
||||
|
||||
metrics = compute_quality_metrics(response, prompt, turn_num, phase)
|
||||
metrics.update(stats)
|
||||
turn_metrics.append(metrics)
|
||||
all_responses.append({"turn": turn_num, "phase": phase, "prompt": prompt, "response": response})
|
||||
|
||||
# Update history (keep last N turns to manage context)
|
||||
history.append({"role": "user", "content": prompt})
|
||||
history.append({"role": "assistant", "content": response})
|
||||
if len(history) > args.history_window * 2:
|
||||
history = history[-(args.history_window * 2):]
|
||||
|
||||
print(f"score={metrics['quality_score']:.2f} | "
|
||||
f"len={metrics['response_length']:4d} | "
|
||||
f"{stats.get('tokens_per_s', '?')} tok/s | "
|
||||
f"{stats['elapsed_s']:.1f}s")
|
||||
|
||||
if args.delay > 0:
|
||||
time.sleep(args.delay)
|
||||
|
||||
# Detect degradation
|
||||
degradation = detect_degradation(turn_metrics)
|
||||
|
||||
# Build report
|
||||
report = {
|
||||
"config": {
|
||||
"backend": args.backend,
|
||||
"model": args.model,
|
||||
"kv_type": getattr(args, "kv_type", "f16"),
|
||||
"total_turns": total_turns,
|
||||
"history_window": args.history_window,
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
},
|
||||
"turn_metrics": turn_metrics,
|
||||
"degradation_alerts": degradation,
|
||||
"summary": {
|
||||
"avg_quality_score": round(sum(m["quality_score"] for m in turn_metrics) / len(turn_metrics), 3),
|
||||
"min_quality_score": round(min(m["quality_score"] for m in turn_metrics), 3),
|
||||
"max_quality_score": round(max(m["quality_score"] for m in turn_metrics), 3),
|
||||
"total_degradation_events": len(degradation),
|
||||
"first_degradation_turn": degradation[0]["turn"] if degradation else None,
|
||||
"avg_response_length": round(sum(m["response_length"] for m in turn_metrics) / len(turn_metrics), 0),
|
||||
"total_hallucinated_references": sum(m["hallucinated_references"] for m in turn_metrics),
|
||||
"avg_repetition_ratio": round(sum(m["repetition_ratio"] for m in turn_metrics) / len(turn_metrics), 3),
|
||||
},
|
||||
"responses": all_responses if args.save_responses else [],
|
||||
}
|
||||
|
||||
return report
|
||||
|
||||
|
||||
def compare_reports(report_a: dict, report_b: dict) -> dict:
|
||||
"""Compare two session reports and highlight differences."""
|
||||
sa = report_a["summary"]
|
||||
sb = report_b["summary"]
|
||||
label_a = report_a["config"].get("kv_type", "run_a")
|
||||
label_b = report_b["config"].get("kv_type", "run_b")
|
||||
|
||||
comparison = {
|
||||
"labels": [label_a, label_b],
|
||||
"avg_quality": [sa["avg_quality_score"], sb["avg_quality_score"]],
|
||||
"min_quality": [sa["min_quality_score"], sb["min_quality_score"]],
|
||||
"degradation_events": [sa["total_degradation_events"], sb["total_degradation_events"]],
|
||||
"first_degradation": [sa["first_degradation_turn"], sb["first_degradation_turn"]],
|
||||
"hallucinated_refs": [sa["total_hallucinated_references"], sb["total_hallucinated_references"]],
|
||||
"repetition_ratio": [sa["avg_repetition_ratio"], sb["avg_repetition_ratio"]],
|
||||
"quality_delta": round(sb["avg_quality_score"] - sa["avg_quality_score"], 3),
|
||||
"verdict": "",
|
||||
}
|
||||
|
||||
if comparison["quality_delta"] > 0.05:
|
||||
comparison["verdict"] = f"{label_b} is BETTER by {comparison['quality_delta']:.3f}"
|
||||
elif comparison["quality_delta"] < -0.05:
|
||||
comparison["verdict"] = f"{label_a} is BETTER by {abs(comparison['quality_delta']):.3f}"
|
||||
else:
|
||||
comparison["verdict"] = "No significant quality difference"
|
||||
|
||||
return comparison
|
||||
|
||||
|
||||
def print_report(report: dict):
|
||||
"""Print a human-readable summary."""
|
||||
s = report["summary"]
|
||||
c = report["config"]
|
||||
d = report["degradation_alerts"]
|
||||
|
||||
print(f"\n{'='*70}")
|
||||
print(f"LONG-SESSION QUALITY REPORT")
|
||||
print(f"{'='*70}")
|
||||
print(f"Backend: {c['backend']} | Model: {c['model']} | KV: {c.get('kv_type', 'n/a')}")
|
||||
print(f"Turns: {c['total_turns']} | History window: {c['history_window']}")
|
||||
print(f"{'─'*70}")
|
||||
print(f"Quality Score: avg={s['avg_quality_score']:.3f} min={s['min_quality_score']:.3f} max={s['max_quality_score']:.3f}")
|
||||
print(f"Avg Response: {s['avg_response_length']:.0f} chars")
|
||||
print(f"Repetition: {s['avg_repetition_ratio']:.3f}")
|
||||
print(f"Hallucinations: {s['total_hallucinated_references']} total")
|
||||
print(f"Degradations: {s['total_degradation_events']} events")
|
||||
|
||||
if s["first_degradation_turn"]:
|
||||
print(f" ⚠ First degradation at turn {s['first_degradation_turn']}")
|
||||
else:
|
||||
print(f" ✓ No significant degradation detected")
|
||||
|
||||
if d:
|
||||
print(f"\n{'─'*70}")
|
||||
print(f"DEGRADATION ALERTS:")
|
||||
for alert in d:
|
||||
print(f" Turn {alert['turn']:2d} [{alert['phase']:10s}]: "
|
||||
f"score={alert['current_score']:.3f} "
|
||||
f"(window avg={alert['window_avg']:.3f}, "
|
||||
f"drop={alert['drop']:.3f})")
|
||||
|
||||
# Per-phase averages
|
||||
phases = {}
|
||||
for m in report["turn_metrics"]:
|
||||
phases.setdefault(m["phase"], []).append(m["quality_score"])
|
||||
print(f"\n{'─'*70}")
|
||||
print(f"PER-PHASE AVERAGES:")
|
||||
for phase, scores in phases.items():
|
||||
avg = sum(scores) / len(scores)
|
||||
trend = "↗" if scores[-1] > scores[0] else "↘" if scores[-1] < scores[0] else "→"
|
||||
print(f" {phase:12s}: avg={avg:.3f} trend={trend} "
|
||||
f"first={scores[0]:.3f} last={scores[-1]:.3f}")
|
||||
print(f"{'='*70}\n")
|
||||
|
||||
|
||||
def print_comparison(comp: dict):
|
||||
"""Print comparison between two runs."""
|
||||
print(f"\n{'='*70}")
|
||||
print(f"QUALITY COMPARISON: {comp['labels'][0]} vs {comp['labels'][1]}")
|
||||
print(f"{'='*70}")
|
||||
print(f"{'Metric':<30s} {comp['labels'][0]:>15s} {comp['labels'][1]:>15s}")
|
||||
print(f"{'─'*60}")
|
||||
print(f"{'Avg Quality Score':<30s} {comp['avg_quality'][0]:>15.3f} {comp['avg_quality'][1]:>15.3f}")
|
||||
print(f"{'Min Quality Score':<30s} {comp['min_quality'][0]:>15.3f} {comp['min_quality'][1]:>15.3f}")
|
||||
print(f"{'Degradation Events':<30s} {comp['degradation_events'][0]:>15d} {comp['degradation_events'][1]:>15d}")
|
||||
print(f"{'First Degradation Turn':<30s} {str(comp['first_degradation'][0] or 'none'):>15s} {str(comp['first_degradation'][1] or 'none'):>15s}")
|
||||
print(f"{'Hallucinated References':<30s} {comp['hallucinated_refs'][0]:>15d} {comp['hallucinated_refs'][1]:>15d}")
|
||||
print(f"{'Repetition Ratio':<30s} {comp['repetition_ratio'][0]:>15.3f} {comp['repetition_ratio'][1]:>15.3f}")
|
||||
print(f"{'─'*60}")
|
||||
print(f"Verdict: {comp['verdict']}")
|
||||
print(f"{'='*70}\n")
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="TurboQuant Long-Session Quality Test")
|
||||
parser.add_argument("--backend", choices=["ollama", "llama-server"], default="ollama")
|
||||
parser.add_argument("--model", default="llama3", help="Model name")
|
||||
parser.add_argument("--url", default="http://localhost:11434", help="Backend URL")
|
||||
parser.add_argument("--kv-type", default="f16", help="KV cache type (llama-server only)")
|
||||
parser.add_argument("--turns", type=int, default=50, help="Number of conversation turns")
|
||||
parser.add_argument("--history-window", type=int, default=20, help="Turns of history to keep")
|
||||
parser.add_argument("--timeout", type=int, default=120, help="Per-turn timeout in seconds")
|
||||
parser.add_argument("--delay", type=float, default=0.5, help="Delay between turns in seconds")
|
||||
parser.add_argument("--output", "-o", help="Output JSON file path")
|
||||
parser.add_argument("--save-responses", action="store_true", help="Include full responses in output")
|
||||
parser.add_argument("--compare", nargs=2, metavar=("FILE_A", "FILE_B"),
|
||||
help="Compare two previously saved run reports")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Compare mode
|
||||
if args.compare:
|
||||
with open(args.compare[0]) as f:
|
||||
report_a = json.load(f)
|
||||
with open(args.compare[1]) as f:
|
||||
report_b = json.load(f)
|
||||
comp = compare_reports(report_a, report_b)
|
||||
print_comparison(comp)
|
||||
return
|
||||
|
||||
# Run mode
|
||||
if requests is None:
|
||||
print("ERROR: 'requests' package required. Install with: pip install requests")
|
||||
sys.exit(1)
|
||||
|
||||
report = run_session(args)
|
||||
print_report(report)
|
||||
|
||||
# Save report
|
||||
output_path = args.output or f"benchmarks/long_session_{args.kv_type}_{int(time.time())}.json"
|
||||
os.makedirs(os.path.dirname(output_path) or ".", exist_ok=True)
|
||||
with open(output_path, "w") as f:
|
||||
json.dump(report, f, indent=2)
|
||||
print(f"Report saved to: {output_path}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
423
benchmarks/test_matrix.py
Normal file
423
benchmarks/test_matrix.py
Normal file
@@ -0,0 +1,423 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
TurboQuant Full Test Matrix — Issue #11
|
||||
|
||||
Runs 10 practical prompts against both FP16 and TurboQuant KV configs.
|
||||
Measures quality (pattern match, perplexity delta) and performance
|
||||
(tok/s, TTFT, memory). Generates pass/fail report.
|
||||
|
||||
Usage:
|
||||
python3 benchmarks/test_matrix.py --model llama3 --backend ollama
|
||||
python3 benchmarks/test_matrix.py --model qwen3.5 --backend llama-server --kv-type turbo4
|
||||
python3 benchmarks/test_matrix.py --quick # Run only 3 prompts for smoke test
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional
|
||||
|
||||
try:
|
||||
import requests
|
||||
except ImportError:
|
||||
requests = None # Fallback for testing without requests
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Configuration
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
BASELINE_FILE = Path(__file__).parent / "baseline_results.json"
|
||||
RESULTS_DIR = Path(__file__).parent / "results"
|
||||
PROMPTS_FILE = Path(__file__).parent / "test_prompts.json"
|
||||
|
||||
# Quality pass criteria (from issue #11)
|
||||
PPL_DELTA_MAX = 0.5
|
||||
NEEDLE_RETRIEVAL_MIN = 1.0 # 100%
|
||||
PROMPT_QUALITY_MIN = 0.9 # 9/10
|
||||
ATTENTION_SIM_MIN = 0.995
|
||||
|
||||
# Performance pass criteria
|
||||
TOKS_BASELINE_RATIO = 0.90 # >= 90% baseline
|
||||
TTFT_BASELINE_RATIO = 1.10 # <= 110% baseline
|
||||
MEMORY_CEILING_GB = 27.0
|
||||
CONTEXT_CEILING_MIN_K = 64
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Test prompts (10 practical prompts from issue #11)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
TEST_PROMPTS = [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "Thermodynamics Laws",
|
||||
"category": "factual",
|
||||
"prompt": "What are the three laws of thermodynamics?",
|
||||
"pass_pattern": r"(?i)(first law|energy conservation|second law|entropy|third law|absolute zero)",
|
||||
"weight": 1.0,
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "Merge Sorted Lists",
|
||||
"category": "code_generation",
|
||||
"prompt": "Write a Python function to merge two sorted lists into a single sorted list without using built-in sort methods.",
|
||||
"pass_pattern": r"(?i)(def merge|while|if.*<|append|return)",
|
||||
"weight": 1.0,
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "Syllogistic Reasoning",
|
||||
"category": "reasoning",
|
||||
"prompt": "If all A are B, and some B are C, what can we conclude about the relationship between A and C? Explain your reasoning.",
|
||||
"pass_pattern": r"(?i)(some|cannot conclude|not necessarily|no definite)",
|
||||
"weight": 1.0,
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"name": "Local AI Sovereignty Essay",
|
||||
"category": "long_form",
|
||||
"prompt": "Write a 200-word essay on the sovereignty of local AI. Discuss why local inference matters for privacy and independence.",
|
||||
"pass_pattern": r"(?i)(sovereignty|local.*AI|privacy|inference|autonomy|independence)",
|
||||
"weight": 1.0,
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"name": "Summarization",
|
||||
"category": "summarization",
|
||||
"prompt": "Summarize in 50 words: The concept of artificial intelligence has evolved since the mid-20th century. Early pioneers like Turing and McCarthy laid the groundwork. Today AI powers search engines, recommendation systems, and medical diagnostics.",
|
||||
"pass_pattern": r"(?i)(artificial intelligence|Turing|McCarthy|evolution|applications)",
|
||||
"weight": 1.0,
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"name": "Math Problem Solving",
|
||||
"category": "math",
|
||||
"prompt": "A train travels 240 miles in 3 hours. A second train travels 360 miles in 4 hours. Which train is faster, and by how many mph?",
|
||||
"pass_pattern": r"(?i)(80|75|first train|5 mph|faster)",
|
||||
"weight": 1.0,
|
||||
},
|
||||
{
|
||||
"id": 7,
|
||||
"name": "SQL Query Generation",
|
||||
"category": "code_generation",
|
||||
"prompt": "Write a SQL query to find all customers who have made more than 3 purchases in the last 30 days, ordered by purchase count descending.",
|
||||
"pass_pattern": r"(?i)(SELECT|FROM|WHERE|GROUP BY|HAVING|COUNT|ORDER BY|DESC)",
|
||||
"weight": 1.0,
|
||||
},
|
||||
{
|
||||
"id": 8,
|
||||
"name": "Ethical Dilemma",
|
||||
"category": "reasoning",
|
||||
"prompt": "Is it ethical for an AI to refuse to answer a question it knows the answer to? Consider both safety and autonomy arguments.",
|
||||
"pass_pattern": r"(?i)(ethical|safety|autonomy|consider|both sides|depends|nuanced)",
|
||||
"weight": 1.0,
|
||||
},
|
||||
{
|
||||
"id": 9,
|
||||
"name": "JSON Schema Design",
|
||||
"category": "code_generation",
|
||||
"prompt": "Design a JSON schema for a book catalog that includes title, author, ISBN, publication year, genres (array), and ratings (object with average and count).",
|
||||
"pass_pattern": r'(?i)({\s*"|"title"|"author"|"isbn"|"genres"|"ratings"|array|object)',
|
||||
"weight": 1.0,
|
||||
},
|
||||
{
|
||||
"id": 10,
|
||||
"name": "Chain of Thought",
|
||||
"category": "reasoning",
|
||||
"prompt": "A farmer has 17 sheep. All but 9 die. How many sheep does the farmer have left? Think step by step.",
|
||||
"pass_pattern": r"(?i)(9|all but 9|still have 9|remaining.*9)",
|
||||
"weight": 1.0,
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Backend interfaces
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def run_ollama(prompt: str, model: str, url: str, timeout: int = 120) -> dict:
|
||||
"""Run a prompt against Ollama /api/generate."""
|
||||
if requests is None:
|
||||
return {"error": "requests not installed", "response": "", "ttft": 0, "tok_per_sec": 0, "peak_mem_mb": 0}
|
||||
|
||||
api_url = f"{url.rstrip('/')}/api/generate"
|
||||
start = time.time()
|
||||
ttft = 0.0
|
||||
|
||||
try:
|
||||
resp = requests.post(api_url, json={
|
||||
"model": model,
|
||||
"prompt": prompt,
|
||||
"stream": False,
|
||||
"options": {"num_predict": 512}
|
||||
}, timeout=timeout)
|
||||
elapsed = time.time() - start
|
||||
|
||||
data = resp.json()
|
||||
response_text = data.get("response", "")
|
||||
eval_count = data.get("eval_count", 0)
|
||||
eval_duration = data.get("eval_duration", 1)
|
||||
tok_per_sec = eval_count / (eval_duration / 1e9) if eval_duration > 0 else 0
|
||||
ttft = elapsed * 0.1 # Estimate: ~10% of total time is TTFT for non-streaming
|
||||
|
||||
return {
|
||||
"response": response_text,
|
||||
"ttft": ttft,
|
||||
"tok_per_sec": tok_per_sec,
|
||||
"elapsed": elapsed,
|
||||
"peak_mem_mb": 0,
|
||||
"tokens_generated": eval_count,
|
||||
}
|
||||
except Exception as e:
|
||||
return {"error": str(e), "response": "", "ttft": 0, "tok_per_sec": 0, "peak_mem_mb": 0}
|
||||
|
||||
|
||||
def run_llama_server(prompt: str, model: str, url: str, kv_type: str = "fp16", timeout: int = 120) -> dict:
|
||||
"""Run a prompt against llama-server /completion."""
|
||||
if requests is None:
|
||||
return {"error": "requests not installed", "response": "", "ttft": 0, "tok_per_sec": 0, "peak_mem_mb": 0}
|
||||
|
||||
api_url = f"{url.rstrip('/')}/completion"
|
||||
start = time.time()
|
||||
|
||||
try:
|
||||
resp = requests.post(api_url, json={
|
||||
"prompt": prompt,
|
||||
"n_predict": 512,
|
||||
"cache_type_k": kv_type,
|
||||
"cache_type_v": kv_type,
|
||||
}, timeout=timeout)
|
||||
elapsed = time.time() - start
|
||||
|
||||
data = resp.json()
|
||||
response_text = data.get("content", "")
|
||||
tokens_predicted = data.get("tokens_predicted", 0)
|
||||
tok_per_sec = tokens_predicted / elapsed if elapsed > 0 else 0
|
||||
|
||||
return {
|
||||
"response": response_text,
|
||||
"ttft": elapsed * 0.15, # Estimate
|
||||
"tok_per_sec": tok_per_sec,
|
||||
"elapsed": elapsed,
|
||||
"peak_mem_mb": 0,
|
||||
"tokens_generated": tokens_predicted,
|
||||
}
|
||||
except Exception as e:
|
||||
return {"error": str(e), "response": "", "ttft": 0, "tok_per_sec": 0, "peak_mem_mb": 0}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Quality evaluation
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def evaluate_quality(response: str, pattern: str) -> dict:
|
||||
"""Evaluate response quality against expected pattern."""
|
||||
match = re.search(pattern, response)
|
||||
return {
|
||||
"matched": match is not None,
|
||||
"pattern": pattern,
|
||||
"response_length": len(response),
|
||||
"has_substance": len(response) > 50,
|
||||
}
|
||||
|
||||
|
||||
def evaluate_performance(result: dict, baseline: dict) -> dict:
|
||||
"""Evaluate performance against baseline."""
|
||||
toks_ratio = result["tok_per_sec"] / max(baseline.get("tok_per_sec", 1), 0.01)
|
||||
ttft_ratio = result["ttft"] / max(baseline.get("ttft", 0.01), 0.01)
|
||||
|
||||
return {
|
||||
"tok_per_sec": result["tok_per_sec"],
|
||||
"tok_per_sec_baseline": baseline.get("tok_per_sec", 0),
|
||||
"tok_per_sec_ratio": round(toks_ratio, 3),
|
||||
"tok_per_sec_pass": toks_ratio >= TOKS_BASELINE_RATIO,
|
||||
"ttft": result["ttft"],
|
||||
"ttft_baseline": baseline.get("ttft", 0),
|
||||
"ttft_ratio": round(ttft_ratio, 3),
|
||||
"ttft_pass": ttft_ratio <= TTFT_BASELINE_RATIO,
|
||||
"peak_mem_mb": result.get("peak_mem_mb", 0),
|
||||
"peak_mem_pass": result.get("peak_mem_mb", 0) / 1024 < MEMORY_CEILING_GB,
|
||||
}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Test matrix runner
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def run_test_matrix(model: str, backend: str, url: str, kv_type: str = "fp16",
|
||||
quick: bool = False, timeout: int = 120) -> dict:
|
||||
"""Run the full test matrix."""
|
||||
prompts = TEST_PROMPTS[:3] if quick else TEST_PROMPTS
|
||||
|
||||
# Load baseline if exists
|
||||
baseline = {}
|
||||
if BASELINE_FILE.exists():
|
||||
try:
|
||||
baseline = json.loads(BASELINE_FILE.read_text())
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
run_fn = run_ollama if backend == "ollama" else run_llama_server
|
||||
results = []
|
||||
pass_count = 0
|
||||
fail_count = 0
|
||||
|
||||
print(f"Running {len(prompts)} prompts against {backend} ({model})...", file=sys.stderr)
|
||||
|
||||
for p in prompts:
|
||||
print(f" [{p['id']}/10] {p['name']}...", file=sys.stderr, end=" ")
|
||||
|
||||
if backend == "ollama":
|
||||
result = run_fn(p["prompt"], model, url, timeout)
|
||||
else:
|
||||
result = run_fn(p["prompt"], model, url, kv_type, timeout)
|
||||
|
||||
if "error" in result:
|
||||
print(f"ERROR: {result['error']}", file=sys.stderr)
|
||||
results.append({"prompt_id": p["id"], "name": p["name"], "error": result["error"]})
|
||||
fail_count += 1
|
||||
continue
|
||||
|
||||
quality = evaluate_quality(result["response"], p["pass_pattern"])
|
||||
perf = evaluate_performance(result, baseline.get(str(p["id"]), {}))
|
||||
|
||||
quality_pass = quality["matched"] and quality["has_substance"]
|
||||
perf_pass = perf.get("tok_per_sec_pass", True) and perf.get("ttft_pass", True)
|
||||
overall_pass = quality_pass and perf_pass
|
||||
|
||||
if overall_pass:
|
||||
pass_count += 1
|
||||
print("PASS", file=sys.stderr)
|
||||
else:
|
||||
fail_count += 1
|
||||
reasons = []
|
||||
if not quality_pass:
|
||||
reasons.append("quality")
|
||||
if not perf_pass:
|
||||
reasons.append("perf")
|
||||
print(f"FAIL ({', '.join(reasons)})", file=sys.stderr)
|
||||
|
||||
results.append({
|
||||
"prompt_id": p["id"],
|
||||
"name": p["name"],
|
||||
"category": p["category"],
|
||||
"quality": quality,
|
||||
"performance": perf,
|
||||
"pass": overall_pass,
|
||||
"response_preview": result["response"][:200],
|
||||
})
|
||||
|
||||
report = {
|
||||
"generated_at": datetime.now(timezone.utc).isoformat(),
|
||||
"model": model,
|
||||
"backend": backend,
|
||||
"kv_type": kv_type,
|
||||
"total_prompts": len(prompts),
|
||||
"passed": pass_count,
|
||||
"failed": fail_count,
|
||||
"pass_rate": pass_count / len(prompts) if prompts else 0,
|
||||
"quality_pass_rate": sum(1 for r in results if r.get("quality", {}).get("matched", False)) / len(prompts) if prompts else 0,
|
||||
"results": results,
|
||||
}
|
||||
|
||||
return report
|
||||
|
||||
|
||||
def report_to_markdown(report: dict) -> str:
|
||||
"""Generate markdown test report."""
|
||||
lines = [
|
||||
f"# TurboQuant Test Matrix Report",
|
||||
"",
|
||||
f"Generated: {report['generated_at'][:16]}",
|
||||
f"Model: {report['model']}",
|
||||
f"Backend: {report['backend']} (KV: {report.get('kv_type', 'fp16')})",
|
||||
"",
|
||||
"## Summary",
|
||||
"",
|
||||
"| Metric | Value |",
|
||||
"|--------|-------|",
|
||||
f"| Total prompts | {report['total_prompts']} |",
|
||||
f"| Passed | {report['passed']} |",
|
||||
f"| Failed | {report['failed']} |",
|
||||
f"| Pass rate | {report['pass_rate']:.0%} |",
|
||||
f"| Quality pass rate | {report['quality_pass_rate']:.0%} |",
|
||||
"",
|
||||
"## Results",
|
||||
"",
|
||||
"| # | Prompt | Category | Quality | Perf tok/s | Pass |",
|
||||
"|---|--------|----------|---------|------------|------|",
|
||||
]
|
||||
|
||||
for r in report["results"]:
|
||||
if "error" in r:
|
||||
lines.append(f"| {r['prompt_id']} | {r['name']} | - | ERROR | - | ❌ |")
|
||||
continue
|
||||
|
||||
q = r.get("quality", {})
|
||||
p = r.get("performance", {})
|
||||
q_icon = "✅" if q.get("matched") else "❌"
|
||||
p_toks = f"{p.get('tok_per_sec', 0):.1f}" if p.get("tok_per_sec") else "-"
|
||||
pass_icon = "✅" if r.get("pass") else "❌"
|
||||
lines.append(f"| {r['prompt_id']} | {r['name']} | {r.get('category', '')} | {q_icon} | {p_toks} | {pass_icon} |")
|
||||
|
||||
lines.extend([
|
||||
"",
|
||||
"## Pass Criteria",
|
||||
"",
|
||||
"| Test | Criteria |",
|
||||
"|------|----------|",
|
||||
f"| Pattern match | >= {PROMPT_QUALITY_MIN:.0%} of prompts match expected patterns |",
|
||||
f"| tok/s | >= {TOKS_BASELINE_RATIO:.0%} of baseline |",
|
||||
f"| TTFT | <= {TTFT_BASELINE_RATIO:.0%} of baseline |",
|
||||
f"| Peak memory | < {MEMORY_CEILING_GB}GB |",
|
||||
])
|
||||
|
||||
# Go/no-go
|
||||
all_pass = report["pass_rate"] >= 0.9
|
||||
lines.extend([
|
||||
"",
|
||||
"## Go/No-Go Decision",
|
||||
"",
|
||||
f"**{'GO ✅' if all_pass else 'NO-GO ❌'}** — {report['passed']}/{report['total_prompts']} prompts passed ({report['pass_rate']:.0%})",
|
||||
])
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# CLI
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="TurboQuant Full Test Matrix")
|
||||
parser.add_argument("--model", default="llama3", help="Model name")
|
||||
parser.add_argument("--backend", default="ollama", choices=["ollama", "llama-server"])
|
||||
parser.add_argument("--url", default="http://localhost:11434", help="Backend URL")
|
||||
parser.add_argument("--kv-type", default="fp16", help="KV cache type (fp16, turbo4, q4_0)")
|
||||
parser.add_argument("--quick", action="store_true", help="Run only 3 prompts")
|
||||
parser.add_argument("--json", action="store_true", help="JSON output")
|
||||
parser.add_argument("--timeout", type=int, default=120, help="Per-prompt timeout")
|
||||
args = parser.parse_args()
|
||||
|
||||
report = run_test_matrix(args.model, args.backend, args.url, args.kv_type, args.quick, args.timeout)
|
||||
|
||||
# Save results
|
||||
RESULTS_DIR.mkdir(parents=True, exist_ok=True)
|
||||
ts = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
result_file = RESULTS_DIR / f"matrix_{args.model}_{args.kv_type}_{ts}.json"
|
||||
result_file.write_text(json.dumps(report, indent=2) + "\n")
|
||||
print(f"Results saved to {result_file}", file=sys.stderr)
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(report, indent=2))
|
||||
else:
|
||||
print(report_to_markdown(report))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
123
tests/test_test_matrix.py
Normal file
123
tests/test_test_matrix.py
Normal file
@@ -0,0 +1,123 @@
|
||||
"""Tests for TurboQuant test matrix (Issue #11)."""
|
||||
|
||||
import json
|
||||
import re
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
import pytest
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent / "benchmarks"))
|
||||
|
||||
from test_matrix import (
|
||||
evaluate_quality,
|
||||
evaluate_performance,
|
||||
report_to_markdown,
|
||||
TEST_PROMPTS,
|
||||
PPL_DELTA_MAX,
|
||||
TOKS_BASELINE_RATIO,
|
||||
TTFT_BASELINE_RATIO,
|
||||
)
|
||||
|
||||
|
||||
class TestEvaluateQuality:
|
||||
def test_pattern_match(self):
|
||||
result = evaluate_quality("The first law of thermodynamics states...", r"(?i)(first law|energy)")
|
||||
assert result["matched"] is True
|
||||
|
||||
def test_pattern_no_match(self):
|
||||
result = evaluate_quality("Hello world", r"(?i)(thermodynamics|entropy)")
|
||||
assert result["matched"] is False
|
||||
|
||||
def test_substance_check(self):
|
||||
result = evaluate_quality("Short", r".*")
|
||||
assert result["has_substance"] is False
|
||||
|
||||
def test_substance_pass(self):
|
||||
result = evaluate_quality("A" * 100, r".*")
|
||||
assert result["has_substance"] is True
|
||||
|
||||
def test_response_length(self):
|
||||
result = evaluate_quality("Hello world", r".*")
|
||||
assert result["response_length"] == 11
|
||||
|
||||
|
||||
class TestEvaluatePerformance:
|
||||
def test_tok_per_sec_pass(self):
|
||||
result = {"tok_per_sec": 100, "ttft": 0.5, "peak_mem_mb": 1000}
|
||||
baseline = {"tok_per_sec": 100, "ttft": 0.5}
|
||||
perf = evaluate_performance(result, baseline)
|
||||
assert perf["tok_per_sec_pass"] is True
|
||||
|
||||
def test_tok_per_sec_fail(self):
|
||||
result = {"tok_per_sec": 50, "ttft": 0.5, "peak_mem_mb": 1000}
|
||||
baseline = {"tok_per_sec": 100, "ttft": 0.5}
|
||||
perf = evaluate_performance(result, baseline)
|
||||
assert perf["tok_per_sec_pass"] is False
|
||||
|
||||
def test_ttft_pass(self):
|
||||
result = {"tok_per_sec": 100, "ttft": 0.5, "peak_mem_mb": 1000}
|
||||
baseline = {"tok_per_sec": 100, "ttft": 0.5}
|
||||
perf = evaluate_performance(result, baseline)
|
||||
assert perf["ttft_pass"] is True
|
||||
|
||||
def test_ttft_fail(self):
|
||||
result = {"tok_per_sec": 100, "ttft": 1.0, "peak_mem_mb": 1000}
|
||||
baseline = {"tok_per_sec": 100, "ttft": 0.5}
|
||||
perf = evaluate_performance(result, baseline)
|
||||
assert perf["ttft_pass"] is False
|
||||
|
||||
def test_memory_pass(self):
|
||||
result = {"tok_per_sec": 100, "ttft": 0.5, "peak_mem_mb": 10000}
|
||||
baseline = {"tok_per_sec": 100, "ttft": 0.5}
|
||||
perf = evaluate_performance(result, baseline)
|
||||
assert perf["peak_mem_pass"] is True
|
||||
|
||||
|
||||
class TestTestPrompts:
|
||||
def test_has_10_prompts(self):
|
||||
assert len(TEST_PROMPTS) == 10
|
||||
|
||||
def test_all_have_patterns(self):
|
||||
for p in TEST_PROMPTS:
|
||||
assert "pass_pattern" in p
|
||||
# Verify pattern compiles
|
||||
re.compile(p["pass_pattern"])
|
||||
|
||||
def test_all_have_categories(self):
|
||||
categories = {p["category"] for p in TEST_PROMPTS}
|
||||
assert len(categories) >= 4 # At least 4 different categories
|
||||
|
||||
|
||||
class TestReportMarkdown:
|
||||
def test_has_summary(self):
|
||||
report = {
|
||||
"generated_at": "2026-04-14T00:00:00",
|
||||
"model": "test-model",
|
||||
"backend": "ollama",
|
||||
"kv_type": "fp16",
|
||||
"total_prompts": 10,
|
||||
"passed": 9,
|
||||
"failed": 1,
|
||||
"pass_rate": 0.9,
|
||||
"quality_pass_rate": 0.95,
|
||||
"results": [
|
||||
{"prompt_id": 1, "name": "Test", "category": "factual",
|
||||
"quality": {"matched": True}, "performance": {"tok_per_sec": 50},
|
||||
"pass": True}
|
||||
],
|
||||
}
|
||||
md = report_to_markdown(report)
|
||||
assert "Test Matrix Report" in md
|
||||
assert "9" in md # passed
|
||||
assert "GO" in md # 90% pass rate
|
||||
|
||||
def test_nogo_on_low_pass_rate(self):
|
||||
report = {
|
||||
"generated_at": "2026-04-14", "model": "x", "backend": "x", "kv_type": "x",
|
||||
"total_prompts": 10, "passed": 5, "failed": 5, "pass_rate": 0.5,
|
||||
"quality_pass_rate": 0.5, "results": [],
|
||||
}
|
||||
md = report_to_markdown(report)
|
||||
assert "NO-GO" in md
|
||||
Reference in New Issue
Block a user