Compare commits
25 Commits
ci/fix-smo
...
step35/104
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
98c96cda6f | ||
| 7797b9b4c8 | |||
| 0338cf940a | |||
| f3f796fa64 | |||
| 6ab98d65f5 | |||
| c4293f0d31 | |||
| 88a5c48402 | |||
| 3ff52f02b2 | |||
| 8475539070 | |||
|
|
f0f117cdd3 | ||
|
|
a537511652 | ||
|
|
cd18bd06be | ||
| 492c1cdcfd | |||
| 6e583310a8 | |||
| 300918ee1e | |||
| f7ea01cb65 | |||
| d2edbdadc2 | |||
| c009d8df77 | |||
| 3cd8750cbb | |||
| ef765bbd30 | |||
|
|
5f0d00f127 | ||
|
|
8affe79489 | ||
|
|
319f57780d | ||
| 7a7ce0e652 | |||
| 9224a0162b |
@@ -18,7 +18,17 @@ jobs:
|
||||
find . -name '*.py' | grep -v llama-cpp-fork | xargs -r python3 -m py_compile
|
||||
find . -name '*.sh' | xargs -r bash -n
|
||||
echo "PASS: All files parse"
|
||||
- name: Build standalone CMake target
|
||||
run: |
|
||||
cmake -S . -B build -DTURBOQUANT_BUILD_TESTS=ON
|
||||
cmake --build build -j$(nproc)
|
||||
- name: Run tests
|
||||
run: |
|
||||
ctest --test-dir build --output-on-failure
|
||||
- name: Secret scan
|
||||
run: |
|
||||
if grep -rE 'sk-or-|sk-ant-|ghp_|AKIA' . --include='*.yml' --include='*.py' --include='*.sh' 2>/dev/null | grep -v .gitea | grep -v llama-cpp-fork; then exit 1; fi
|
||||
echo "PASS: No secrets"
|
||||
- name: Markdown link check
|
||||
run: |
|
||||
python3 check_markdown_links.py
|
||||
|
||||
3
.gitignore
vendored
Normal file
3
.gitignore
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
build/
|
||||
*.pyc
|
||||
__pycache__/
|
||||
55
CMakeLists.txt
Normal file
55
CMakeLists.txt
Normal file
@@ -0,0 +1,55 @@
|
||||
cmake_minimum_required(VERSION 3.16)
|
||||
|
||||
project(turboquant LANGUAGES CXX)
|
||||
|
||||
option(TURBOQUANT_BUILD_TESTS "Build standalone TurboQuant validation tests" ON)
|
||||
option(TURBOQUANT_BUILD_WASM "Build WebAssembly target (requires Emscripten)" OFF)
|
||||
|
||||
add_library(turboquant STATIC
|
||||
llama-turbo.cpp
|
||||
)
|
||||
|
||||
target_include_directories(turboquant PUBLIC
|
||||
${CMAKE_CURRENT_SOURCE_DIR}
|
||||
)
|
||||
|
||||
target_compile_features(turboquant PUBLIC cxx_std_17)
|
||||
|
||||
if(MSVC)
|
||||
target_compile_options(turboquant PRIVATE /W4)
|
||||
else()
|
||||
target_compile_options(turboquant PRIVATE -Wall -Wextra -Wpedantic)
|
||||
endif()
|
||||
|
||||
# --- WebAssembly build via Emscripten ---
|
||||
if(EMSCRIPTEN OR TURBOQUANT_BUILD_WASM)
|
||||
# Build a WASM module (shared library) with JS glue
|
||||
add_library(turboquant-wasm SHARED
|
||||
llama-turbo.cpp
|
||||
)
|
||||
target_include_directories(turboquant-wasm PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
target_compile_features(turboquant-wasm PUBLIC cxx_std_17)
|
||||
|
||||
# Emscripten-specific: export C functions, enable exceptions, generate HTML wrapper
|
||||
set_target_properties(turboquant-wasm PROPERTIES
|
||||
LINK_FLAGS "-s WASM=1 -s EXPORTED_FUNCTIONS='["_polar_quant_encode_turbo4","_polar_quant_decode_turbo4","_malloc","_free"]' -s EXPORTED_RUNTIME_METHODS='["cwrap","getValue","setValue","allocate","title"]' -s ALLOW_MEMORY_GROWTH=1 -s MODULARIZE=1 -s EXPORT_NAME='createTurboQuantModule'"
|
||||
COMPILE_FLAGS "-fexceptions"
|
||||
)
|
||||
# Optional HTML demo (no automatic serving — manual)
|
||||
# Build with: cmake -DTURBOQUANT_BUILD_WASM=ON -DCMAKE_TOOLCHAIN_FILE=$EMSDK/upstream/emscripten/cmake/Modules/Platform/Emscripten.cmake ..
|
||||
endif()
|
||||
|
||||
if(TURBOQUANT_BUILD_TESTS)
|
||||
include(CTest)
|
||||
|
||||
add_executable(turboquant_roundtrip_test
|
||||
tests/roundtrip_test.cpp
|
||||
)
|
||||
target_link_libraries(turboquant_roundtrip_test PRIVATE turboquant)
|
||||
target_compile_features(turboquant_roundtrip_test PRIVATE cxx_std_17)
|
||||
|
||||
add_test(
|
||||
NAME turboquant_roundtrip
|
||||
COMMAND turboquant_roundtrip_test
|
||||
)
|
||||
endif()
|
||||
@@ -13,7 +13,7 @@ Unlock 64K-128K context on qwen3.5:27b within 32GB unified memory.
|
||||
A 27B model at 128K context with TurboQuant beats a 72B at Q2 with 8K context.
|
||||
|
||||
## Status
|
||||
See [issues](http://143.198.27.163:3000/Timmy_Foundation/turboquant/issues) for current progress.
|
||||
See [issues](https://forge.alexanderwhitestone.com/Timmy_Foundation/turboquant/issues) for current progress.
|
||||
|
||||
## Roles
|
||||
- **Strago:** Build spec author
|
||||
@@ -29,4 +29,4 @@ See [issues](http://143.198.27.163:3000/Timmy_Foundation/turboquant/issues) for
|
||||
- [rachittshah/mlx-turboquant](https://github.com/rachittshah/mlx-turboquant) — MLX fallback
|
||||
|
||||
## Docs
|
||||
- [BUILD-SPEC.md](BUILD-SPEC.md) — Full build specification (Strago, v2.2)
|
||||
- [Project Status](docs/PROJECT_STATUS.md) — Full project status and build specification
|
||||
|
||||
495
benchmarks/run_long_session.py
Normal file
495
benchmarks/run_long_session.py
Normal file
@@ -0,0 +1,495 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
TurboQuant Long-Session Quality Test (Issue #12)
|
||||
|
||||
Runs a 50-turn multi-step reasoning conversation to detect quality degradation
|
||||
under sustained context pressure. Compares TurboQuant KV vs FP16 KV baseline.
|
||||
|
||||
Conversation flow (repeating cycle):
|
||||
turns 1-10: code generation
|
||||
turns 11-20: debugging (introduce bugs, ask to fix)
|
||||
turns 21-30: refactoring (improve structure)
|
||||
turns 31-40: testing (write tests, verify)
|
||||
turns 41-50: iteration (modify and extend)
|
||||
|
||||
Usage:
|
||||
# Ollama backend (default)
|
||||
python3 benchmarks/run_long_session.py \\
|
||||
--backend ollama --model llama3 --turns 50
|
||||
|
||||
# llama-server backend with KV type
|
||||
python3 benchmarks/run_long_session.py \\
|
||||
--backend llama-server --url http://localhost:8080 \\
|
||||
--model qwen3.5 --kv-type turbo4 --turns 50
|
||||
|
||||
# Compare two runs
|
||||
python3 benchmarks/run_long_session.py --compare run_turbo4.json run_fp16.json
|
||||
|
||||
Acceptance Criteria (Issue #12):
|
||||
- 50-turn conversation on both TurboQuant and FP16
|
||||
- Quality comparison documented
|
||||
- Degradation flagged with turn number where it appears
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
import hashlib
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
try:
|
||||
import requests
|
||||
except ImportError:
|
||||
requests = None
|
||||
|
||||
# ── Conversation Prompts ───────────────────────────────────────────────
|
||||
|
||||
CONVERSATION_CYCLE = [
|
||||
# Phase 1: Code Generation (turns 1-10)
|
||||
{
|
||||
"phase": "code_gen",
|
||||
"turns": [
|
||||
"Write a Python class called RateLimiter that implements a token bucket algorithm. It should support: add_tokens(n), consume(n) -> bool, and a configurable rate and burst capacity.",
|
||||
"Add thread-safety to the RateLimiter class using a lock. Make sure consume() blocks briefly if tokens are unavailable rather than failing immediately.",
|
||||
"Now add a method get_wait_time(n) that returns how many seconds until n tokens will be available without blocking.",
|
||||
"Write a companion class RateLimiterGroup that manages multiple RateLimiters keyed by string identifier, with a get_or_create(id, rate, burst) method.",
|
||||
"Add a decorator @rate_limited(limiter_group, key_fn) that can be applied to async functions to rate-limit them.",
|
||||
"Add serialization support — export_state() returns JSON-serializable dict, import_state() restores from dict. Include timestamps.",
|
||||
"Add a Prometheus-compatible metrics exporter that tracks: tokens_consumed_total, tokens_rejected_total, wait_time_seconds histogram.",
|
||||
"Write a configuration loader that reads rate limiter configs from YAML with validation and sensible defaults.",
|
||||
"Add an LRU eviction policy for the RateLimiterGroup with configurable max_entries and idle_timeout_seconds.",
|
||||
"Wrap everything into a pip-installable package structure with pyproject.toml, __init__.py exports, and a CLI entry point.",
|
||||
]
|
||||
},
|
||||
# Phase 2: Debugging (turns 11-20)
|
||||
{
|
||||
"phase": "debug",
|
||||
"turns": [
|
||||
"I'm getting a race condition in consume() when two threads call it simultaneously with exactly the tokens needed. The lock doesn't seem to help. Can you trace through the logic and find the bug?",
|
||||
"The get_wait_time() method returns negative values sometimes. Here's the traceback: ... Can you identify what's wrong?",
|
||||
"RateLimiterGroup.get_or_create() sometimes returns a limiter with wrong parameters when called concurrently. Explain the potential issue.",
|
||||
"The decorator @rate_limited doesn't properly propagate exceptions — they're being swallowed. Fix the error handling.",
|
||||
"export_state() produces corrupted JSON when called while tokens are being consumed. How should we fix the serialization?",
|
||||
"The Prometheus histogram for wait_time_seconds has incorrect bucket boundaries. Review the histogram configuration.",
|
||||
"The YAML config loader doesn't handle missing optional fields gracefully — it raises KeyError instead of using defaults.",
|
||||
"LRU eviction is evicting active limiters. The idle_timeout calculation seems wrong. Debug the eviction logic.",
|
||||
"The CLI entry point crashes with a specific YAML config. Here's the config and error: ... What's the root cause?",
|
||||
"Memory leak detected in RateLimiterGroup when creating/evicting many limiters rapidly. Where's the leak?",
|
||||
]
|
||||
},
|
||||
# Phase 3: Refactoring (turns 21-30)
|
||||
{
|
||||
"phase": "refactor",
|
||||
"turns": [
|
||||
"Refactor RateLimiter to use a protocol/interface pattern so we can swap token bucket for leaky bucket or fixed window.",
|
||||
"Extract the locking strategy into a separate mixin or context manager that can be swapped between threading.Lock, asyncio.Lock, and no-lock.",
|
||||
"Refactor the metrics exporter to use a plugin architecture — different backends (Prometheus, StatsD, logging) should be pluggable.",
|
||||
"Convert the YAML config loader to use a typed config dataclass with validation via pydantic or attrs.",
|
||||
"Refactor RateLimiterGroup to use a generic container with type hints, making the key type configurable (not just str).",
|
||||
"Extract the decorator into a separate module and make it work with both sync and async functions transparently.",
|
||||
"Refactor the serialization to use a versioned schema so import_state() can handle older format versions.",
|
||||
"Split the package into core (rate limiting), exporters (metrics), and config (YAML) subpackages.",
|
||||
"Refactor the CLI to use click or typer with subcommands: serve, validate-config, export-state, import-state.",
|
||||
"Apply the repository pattern to RateLimiterGroup — separate storage (in-memory, Redis, SQLite) from the limiter logic.",
|
||||
]
|
||||
},
|
||||
# Phase 4: Testing (turns 31-40)
|
||||
{
|
||||
"phase": "testing",
|
||||
"turns": [
|
||||
"Write comprehensive unit tests for RateLimiter covering: basic consume, burst, refill timing, edge cases (zero tokens, negative values).",
|
||||
"Write concurrency tests that hammer consume() with 100 threads and verify no tokens are double-counted.",
|
||||
"Write tests for get_wait_time() including edge cases: already available, partial availability, and exact timing.",
|
||||
"Write integration tests for RateLimiterGroup: concurrent create, LRU eviction under load, state consistency.",
|
||||
"Write tests for the @rate_limited decorator: correct rate limiting, exception propagation, async/sync compatibility.",
|
||||
"Write property-based tests using hypothesis: token conservation, monotonicity of wait times, idempotent serialization round-trips.",
|
||||
"Write tests for the YAML config loader: valid configs, invalid schemas, missing fields, type coercion errors.",
|
||||
"Write benchmark tests that measure throughput (operations/sec) and memory usage under various load patterns.",
|
||||
"Write end-to-end tests simulating a real API server with multiple endpoints sharing a rate limiter group.",
|
||||
"Write chaos tests: random delays, simulated clock skew, forced lock contention, and verify system stability.",
|
||||
]
|
||||
},
|
||||
# Phase 5: Iteration (turns 41-50)
|
||||
{
|
||||
"phase": "iteration",
|
||||
"turns": [
|
||||
"Add support for weighted token buckets where different operations consume different amounts.",
|
||||
"Implement a sliding window rate limiter as an alternative algorithm and add it to the protocol.",
|
||||
"Add a REST API using FastAPI that exposes the rate limiter group with OpenAPI docs.",
|
||||
"Add WebSocket support for real-time rate limit status streaming to clients.",
|
||||
"Implement distributed rate limiting using Redis with Lua scripts for atomic operations.",
|
||||
"Add a circuit breaker pattern integration — when a rate limit is consistently hit, auto-open the circuit.",
|
||||
"Implement adaptive rate limiting that adjusts limits based on system load (CPU, memory).",
|
||||
"Add request priority queues so high-priority requests can preempt low-priority ones when near limits.",
|
||||
"Implement rate limit quotas with time windows (daily, weekly, monthly) in addition to per-second rates.",
|
||||
"Write a migration guide and changelog for v2.0 with all the new features and breaking changes.",
|
||||
]
|
||||
},
|
||||
]
|
||||
|
||||
# ── Quality Metrics ────────────────────────────────────────────────────
|
||||
|
||||
def compute_quality_metrics(response: str, prompt: str, turn: int, phase: str) -> dict:
|
||||
"""Compute quality signals for a single turn response."""
|
||||
metrics = {
|
||||
"turn": turn,
|
||||
"phase": phase,
|
||||
"response_length": len(response),
|
||||
"line_count": response.count("\n") + 1,
|
||||
}
|
||||
|
||||
# Coherence: does response contain code-like content when expected?
|
||||
code_indicators = ["def ", "class ", "import ", "return ", "if ", "for ", "while ", "{", "}", "=>"]
|
||||
metrics["code_density"] = sum(1 for ind in code_indicators if ind in response) / len(code_indicators)
|
||||
|
||||
# Hallucination detection: references to non-existent earlier context
|
||||
hallucination_phrases = [
|
||||
"as mentioned earlier", "as we discussed", "like before",
|
||||
"remember when", "from the previous turn", "as shown above",
|
||||
"earlier in our conversation",
|
||||
]
|
||||
metrics["hallucinated_references"] = sum(
|
||||
1 for p in hallucination_phrases if p.lower() in response.lower()
|
||||
)
|
||||
|
||||
# Structural quality: does it have proper formatting?
|
||||
metrics["has_headers"] = bool(re.search(r"^#{1,3}\s", response, re.MULTILINE))
|
||||
metrics["has_code_blocks"] = response.count("```") >= 2
|
||||
metrics["has_lists"] = bool(re.search(r"^[\-\*\d]\.\s", response, re.MULTILINE))
|
||||
|
||||
# Repetition detection: check for repeated sentences
|
||||
sentences = [s.strip().lower() for s in re.split(r'[.!?]+', response) if len(s.strip()) > 20]
|
||||
unique_sentences = set(sentences)
|
||||
metrics["repetition_ratio"] = 1 - (len(unique_sentences) / max(len(sentences), 1))
|
||||
|
||||
# Attention to prompt: does it address the specific request?
|
||||
prompt_keywords = set(re.findall(r'\b\w{4,}\b', prompt.lower()))
|
||||
response_words = set(re.findall(r'\b\w{4,}\b', response.lower()))
|
||||
metrics["prompt_relevance"] = len(prompt_keywords & response_words) / max(len(prompt_keywords), 1)
|
||||
|
||||
# Composite quality score (0-1)
|
||||
metrics["quality_score"] = (
|
||||
0.25 * min(metrics["code_density"] * 3, 1.0) +
|
||||
0.20 * min(metrics["prompt_relevance"] * 2, 1.0) +
|
||||
0.20 * (1.0 - min(metrics["repetition_ratio"] * 5, 1.0)) +
|
||||
0.15 * (1.0 if metrics["has_code_blocks"] else 0.5) +
|
||||
0.10 * (1.0 - min(metrics["hallucinated_references"] * 0.3, 1.0)) +
|
||||
0.10 * (1.0 if metrics["has_lists"] else 0.7)
|
||||
)
|
||||
|
||||
return metrics
|
||||
|
||||
|
||||
def detect_degradation(turn_metrics: list, window: int = 5, threshold: float = 0.15) -> list:
|
||||
"""Detect quality degradation by comparing rolling windows."""
|
||||
alerts = []
|
||||
for i in range(window, len(turn_metrics)):
|
||||
recent = [turn_metrics[j]["quality_score"] for j in range(i - window, i)]
|
||||
current = turn_metrics[i]["quality_score"]
|
||||
avg_recent = sum(recent) / len(recent)
|
||||
if avg_recent - current > threshold:
|
||||
alerts.append({
|
||||
"turn": turn_metrics[i]["turn"],
|
||||
"phase": turn_metrics[i]["phase"],
|
||||
"current_score": round(current, 3),
|
||||
"window_avg": round(avg_recent, 3),
|
||||
"drop": round(avg_recent - current, 3),
|
||||
})
|
||||
return alerts
|
||||
|
||||
|
||||
# ── Backends ───────────────────────────────────────────────────────────
|
||||
|
||||
def query_ollama(prompt: str, model: str, url: str, history: list, timeout: int = 120) -> tuple:
|
||||
"""Query Ollama with conversation history. Returns (response, stats)."""
|
||||
messages = history + [{"role": "user", "content": prompt}]
|
||||
api_url = f"{url.rstrip('/')}/api/chat"
|
||||
|
||||
start = time.time()
|
||||
resp = requests.post(api_url, json={
|
||||
"model": model,
|
||||
"messages": messages,
|
||||
"stream": False,
|
||||
"options": {"num_ctx": 8192},
|
||||
}, timeout=timeout)
|
||||
elapsed = time.time() - start
|
||||
|
||||
data = resp.json()
|
||||
content = data.get("message", {}).get("content", "")
|
||||
eval_count = data.get("eval_count", 0)
|
||||
eval_duration = data.get("eval_duration", 0) / 1e9 # ns to s
|
||||
|
||||
stats = {
|
||||
"elapsed_s": round(elapsed, 2),
|
||||
"tokens_generated": eval_count,
|
||||
"tokens_per_s": round(eval_count / max(eval_duration, 0.001), 1),
|
||||
"prompt_eval_count": data.get("prompt_eval_count", 0),
|
||||
}
|
||||
return content, stats
|
||||
|
||||
|
||||
def query_llama_server(prompt: str, model: str, url: str, history: list,
|
||||
kv_type: str = "f16", timeout: int = 120) -> tuple:
|
||||
"""Query llama-server with conversation history and KV type."""
|
||||
messages = history + [{"role": "user", "content": prompt}]
|
||||
api_url = f"{url.rstrip('/')}/v1/chat/completions"
|
||||
|
||||
start = time.time()
|
||||
resp = requests.post(api_url, json={
|
||||
"model": model,
|
||||
"messages": messages,
|
||||
"temperature": 0.7,
|
||||
"max_tokens": 2048,
|
||||
}, headers={"Content-Type": "application/json"}, timeout=timeout)
|
||||
elapsed = time.time() - start
|
||||
|
||||
data = resp.json()
|
||||
content = data["choices"][0]["message"]["content"]
|
||||
usage = data.get("usage", {})
|
||||
|
||||
stats = {
|
||||
"elapsed_s": round(elapsed, 2),
|
||||
"tokens_generated": usage.get("completion_tokens", 0),
|
||||
"prompt_tokens": usage.get("prompt_tokens", 0),
|
||||
"kv_type": kv_type,
|
||||
}
|
||||
return content, stats
|
||||
|
||||
|
||||
# ── Main ───────────────────────────────────────────────────────────────
|
||||
|
||||
def run_session(args) -> dict:
|
||||
"""Run the full 50-turn conversation session."""
|
||||
total_turns = args.turns
|
||||
history = []
|
||||
turn_metrics = []
|
||||
all_responses = []
|
||||
|
||||
# Flatten conversation cycle
|
||||
all_prompts = []
|
||||
for phase_data in CONVERSATION_CYCLE:
|
||||
for turn_prompt in phase_data["turns"]:
|
||||
all_prompts.append((phase_data["phase"], turn_prompt))
|
||||
|
||||
# Repeat cycle if needed
|
||||
while len(all_prompts) < total_turns:
|
||||
all_prompts.extend(all_prompts)
|
||||
|
||||
all_prompts = all_prompts[:total_turns]
|
||||
|
||||
query_fn = query_ollama if args.backend == "ollama" else query_llama_server
|
||||
query_kwargs = {"model": args.model, "url": args.url}
|
||||
if args.backend == "llama-server":
|
||||
query_kwargs["kv_type"] = args.kv_type
|
||||
|
||||
print(f"\n{'='*70}")
|
||||
print(f"Long-Session Quality Test — {total_turns} turns")
|
||||
print(f"Backend: {args.backend} | Model: {args.model}")
|
||||
if args.backend == "llama-server":
|
||||
print(f"KV Type: {args.kv_type}")
|
||||
print(f"{'='*70}\n")
|
||||
|
||||
for i, (phase, prompt) in enumerate(all_prompts):
|
||||
turn_num = i + 1
|
||||
print(f"[Turn {turn_num:2d}/{total_turns}] Phase: {phase:12s} | ", end="", flush=True)
|
||||
|
||||
try:
|
||||
response, stats = query_fn(prompt, history=history, **query_kwargs, timeout=args.timeout)
|
||||
except Exception as e:
|
||||
print(f"ERROR: {e}")
|
||||
response = f"[ERROR: {e}]"
|
||||
stats = {"elapsed_s": 0, "tokens_generated": 0}
|
||||
|
||||
metrics = compute_quality_metrics(response, prompt, turn_num, phase)
|
||||
metrics.update(stats)
|
||||
turn_metrics.append(metrics)
|
||||
all_responses.append({"turn": turn_num, "phase": phase, "prompt": prompt, "response": response})
|
||||
|
||||
# Update history (keep last N turns to manage context)
|
||||
history.append({"role": "user", "content": prompt})
|
||||
history.append({"role": "assistant", "content": response})
|
||||
if len(history) > args.history_window * 2:
|
||||
history = history[-(args.history_window * 2):]
|
||||
|
||||
print(f"score={metrics['quality_score']:.2f} | "
|
||||
f"len={metrics['response_length']:4d} | "
|
||||
f"{stats.get('tokens_per_s', '?')} tok/s | "
|
||||
f"{stats['elapsed_s']:.1f}s")
|
||||
|
||||
if args.delay > 0:
|
||||
time.sleep(args.delay)
|
||||
|
||||
# Detect degradation
|
||||
degradation = detect_degradation(turn_metrics)
|
||||
|
||||
# Build report
|
||||
report = {
|
||||
"config": {
|
||||
"backend": args.backend,
|
||||
"model": args.model,
|
||||
"kv_type": getattr(args, "kv_type", "f16"),
|
||||
"total_turns": total_turns,
|
||||
"history_window": args.history_window,
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
},
|
||||
"turn_metrics": turn_metrics,
|
||||
"degradation_alerts": degradation,
|
||||
"summary": {
|
||||
"avg_quality_score": round(sum(m["quality_score"] for m in turn_metrics) / len(turn_metrics), 3),
|
||||
"min_quality_score": round(min(m["quality_score"] for m in turn_metrics), 3),
|
||||
"max_quality_score": round(max(m["quality_score"] for m in turn_metrics), 3),
|
||||
"total_degradation_events": len(degradation),
|
||||
"first_degradation_turn": degradation[0]["turn"] if degradation else None,
|
||||
"avg_response_length": round(sum(m["response_length"] for m in turn_metrics) / len(turn_metrics), 0),
|
||||
"total_hallucinated_references": sum(m["hallucinated_references"] for m in turn_metrics),
|
||||
"avg_repetition_ratio": round(sum(m["repetition_ratio"] for m in turn_metrics) / len(turn_metrics), 3),
|
||||
},
|
||||
"responses": all_responses if args.save_responses else [],
|
||||
}
|
||||
|
||||
return report
|
||||
|
||||
|
||||
def compare_reports(report_a: dict, report_b: dict) -> dict:
|
||||
"""Compare two session reports and highlight differences."""
|
||||
sa = report_a["summary"]
|
||||
sb = report_b["summary"]
|
||||
label_a = report_a["config"].get("kv_type", "run_a")
|
||||
label_b = report_b["config"].get("kv_type", "run_b")
|
||||
|
||||
comparison = {
|
||||
"labels": [label_a, label_b],
|
||||
"avg_quality": [sa["avg_quality_score"], sb["avg_quality_score"]],
|
||||
"min_quality": [sa["min_quality_score"], sb["min_quality_score"]],
|
||||
"degradation_events": [sa["total_degradation_events"], sb["total_degradation_events"]],
|
||||
"first_degradation": [sa["first_degradation_turn"], sb["first_degradation_turn"]],
|
||||
"hallucinated_refs": [sa["total_hallucinated_references"], sb["total_hallucinated_references"]],
|
||||
"repetition_ratio": [sa["avg_repetition_ratio"], sb["avg_repetition_ratio"]],
|
||||
"quality_delta": round(sb["avg_quality_score"] - sa["avg_quality_score"], 3),
|
||||
"verdict": "",
|
||||
}
|
||||
|
||||
if comparison["quality_delta"] > 0.05:
|
||||
comparison["verdict"] = f"{label_b} is BETTER by {comparison['quality_delta']:.3f}"
|
||||
elif comparison["quality_delta"] < -0.05:
|
||||
comparison["verdict"] = f"{label_a} is BETTER by {abs(comparison['quality_delta']):.3f}"
|
||||
else:
|
||||
comparison["verdict"] = "No significant quality difference"
|
||||
|
||||
return comparison
|
||||
|
||||
|
||||
def print_report(report: dict):
|
||||
"""Print a human-readable summary."""
|
||||
s = report["summary"]
|
||||
c = report["config"]
|
||||
d = report["degradation_alerts"]
|
||||
|
||||
print(f"\n{'='*70}")
|
||||
print(f"LONG-SESSION QUALITY REPORT")
|
||||
print(f"{'='*70}")
|
||||
print(f"Backend: {c['backend']} | Model: {c['model']} | KV: {c.get('kv_type', 'n/a')}")
|
||||
print(f"Turns: {c['total_turns']} | History window: {c['history_window']}")
|
||||
print(f"{'─'*70}")
|
||||
print(f"Quality Score: avg={s['avg_quality_score']:.3f} min={s['min_quality_score']:.3f} max={s['max_quality_score']:.3f}")
|
||||
print(f"Avg Response: {s['avg_response_length']:.0f} chars")
|
||||
print(f"Repetition: {s['avg_repetition_ratio']:.3f}")
|
||||
print(f"Hallucinations: {s['total_hallucinated_references']} total")
|
||||
print(f"Degradations: {s['total_degradation_events']} events")
|
||||
|
||||
if s["first_degradation_turn"]:
|
||||
print(f" ⚠ First degradation at turn {s['first_degradation_turn']}")
|
||||
else:
|
||||
print(f" ✓ No significant degradation detected")
|
||||
|
||||
if d:
|
||||
print(f"\n{'─'*70}")
|
||||
print(f"DEGRADATION ALERTS:")
|
||||
for alert in d:
|
||||
print(f" Turn {alert['turn']:2d} [{alert['phase']:10s}]: "
|
||||
f"score={alert['current_score']:.3f} "
|
||||
f"(window avg={alert['window_avg']:.3f}, "
|
||||
f"drop={alert['drop']:.3f})")
|
||||
|
||||
# Per-phase averages
|
||||
phases = {}
|
||||
for m in report["turn_metrics"]:
|
||||
phases.setdefault(m["phase"], []).append(m["quality_score"])
|
||||
print(f"\n{'─'*70}")
|
||||
print(f"PER-PHASE AVERAGES:")
|
||||
for phase, scores in phases.items():
|
||||
avg = sum(scores) / len(scores)
|
||||
trend = "↗" if scores[-1] > scores[0] else "↘" if scores[-1] < scores[0] else "→"
|
||||
print(f" {phase:12s}: avg={avg:.3f} trend={trend} "
|
||||
f"first={scores[0]:.3f} last={scores[-1]:.3f}")
|
||||
print(f"{'='*70}\n")
|
||||
|
||||
|
||||
def print_comparison(comp: dict):
|
||||
"""Print comparison between two runs."""
|
||||
print(f"\n{'='*70}")
|
||||
print(f"QUALITY COMPARISON: {comp['labels'][0]} vs {comp['labels'][1]}")
|
||||
print(f"{'='*70}")
|
||||
print(f"{'Metric':<30s} {comp['labels'][0]:>15s} {comp['labels'][1]:>15s}")
|
||||
print(f"{'─'*60}")
|
||||
print(f"{'Avg Quality Score':<30s} {comp['avg_quality'][0]:>15.3f} {comp['avg_quality'][1]:>15.3f}")
|
||||
print(f"{'Min Quality Score':<30s} {comp['min_quality'][0]:>15.3f} {comp['min_quality'][1]:>15.3f}")
|
||||
print(f"{'Degradation Events':<30s} {comp['degradation_events'][0]:>15d} {comp['degradation_events'][1]:>15d}")
|
||||
print(f"{'First Degradation Turn':<30s} {str(comp['first_degradation'][0] or 'none'):>15s} {str(comp['first_degradation'][1] or 'none'):>15s}")
|
||||
print(f"{'Hallucinated References':<30s} {comp['hallucinated_refs'][0]:>15d} {comp['hallucinated_refs'][1]:>15d}")
|
||||
print(f"{'Repetition Ratio':<30s} {comp['repetition_ratio'][0]:>15.3f} {comp['repetition_ratio'][1]:>15.3f}")
|
||||
print(f"{'─'*60}")
|
||||
print(f"Verdict: {comp['verdict']}")
|
||||
print(f"{'='*70}\n")
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="TurboQuant Long-Session Quality Test")
|
||||
parser.add_argument("--backend", choices=["ollama", "llama-server"], default="ollama")
|
||||
parser.add_argument("--model", default="llama3", help="Model name")
|
||||
parser.add_argument("--url", default="http://localhost:11434", help="Backend URL")
|
||||
parser.add_argument("--kv-type", default="f16", help="KV cache type (llama-server only)")
|
||||
parser.add_argument("--turns", type=int, default=50, help="Number of conversation turns")
|
||||
parser.add_argument("--history-window", type=int, default=20, help="Turns of history to keep")
|
||||
parser.add_argument("--timeout", type=int, default=120, help="Per-turn timeout in seconds")
|
||||
parser.add_argument("--delay", type=float, default=0.5, help="Delay between turns in seconds")
|
||||
parser.add_argument("--output", "-o", help="Output JSON file path")
|
||||
parser.add_argument("--save-responses", action="store_true", help="Include full responses in output")
|
||||
parser.add_argument("--compare", nargs=2, metavar=("FILE_A", "FILE_B"),
|
||||
help="Compare two previously saved run reports")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Compare mode
|
||||
if args.compare:
|
||||
with open(args.compare[0]) as f:
|
||||
report_a = json.load(f)
|
||||
with open(args.compare[1]) as f:
|
||||
report_b = json.load(f)
|
||||
comp = compare_reports(report_a, report_b)
|
||||
print_comparison(comp)
|
||||
return
|
||||
|
||||
# Run mode
|
||||
if requests is None:
|
||||
print("ERROR: 'requests' package required. Install with: pip install requests")
|
||||
sys.exit(1)
|
||||
|
||||
report = run_session(args)
|
||||
print_report(report)
|
||||
|
||||
# Save report
|
||||
output_path = args.output or f"benchmarks/long_session_{args.kv_type}_{int(time.time())}.json"
|
||||
os.makedirs(os.path.dirname(output_path) or ".", exist_ok=True)
|
||||
with open(output_path, "w") as f:
|
||||
json.dump(report, f, indent=2)
|
||||
print(f"Report saved to: {output_path}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
38
build-wasm.sh
Executable file
38
build-wasm.sh
Executable file
@@ -0,0 +1,38 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Build TurboQuant as WebAssembly using Emscripten
|
||||
# Requires: emsdk, python3, cmake, make (or ninja)
|
||||
#
|
||||
# Usage:
|
||||
# source ./emsdk_env.sh # if you have emsdk installed
|
||||
# ./build-wasm.sh
|
||||
#
|
||||
# Output:
|
||||
# build-wasm/turboquant-wasm.{js,wasm} + wasm-demo/turboquant-wasm.js
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
BUILD_DIR="${SCRIPT_DIR}/build-wasm"
|
||||
OUT_JS="${SCRIPT_DIR}/wasm-demo/turboquant-wasm.js"
|
||||
|
||||
# Verify emscripten available
|
||||
if ! command -v emcc &>/dev/null; then
|
||||
echo "ERROR: emcc not found. Install Emscripten SDK first:"
|
||||
echo " git clone https://github.com/emscripten-core/emsdk.git"
|
||||
echo " cd emsdk && ./emsdk install latest && ./emsdk activate latest"
|
||||
echo " source ./emsdk_env.sh"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Configure with Emscripten toolchain
|
||||
rm -rf "${BUILD_DIR}"
|
||||
emcmake cmake -S "${SCRIPT_DIR}" -B "${BUILD_DIR}" -DTURBOQUANT_BUILD_WASM=ON -DCMAKE_BUILD_TYPE=Release
|
||||
|
||||
# Build
|
||||
emmake make -C "${BUILD_DIR}" turboquant-wasm
|
||||
|
||||
# Copy generated JS glue to demo directory (renamed for html script tag)
|
||||
cp "${BUILD_DIR}/libturboquant-wasm.js" "${OUT_JS}"
|
||||
echo "WASM build complete: ${OUT_JS} + libturboquant-wasm.wasm (side-by-side)"
|
||||
echo "To demo: cd ${SCRIPT_DIR} && python3 -m http.server 8080"
|
||||
echo "Then open http://localhost:8080/wasm-demo/"
|
||||
124
check_markdown_links.py
Normal file
124
check_markdown_links.py
Normal file
@@ -0,0 +1,124 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Check local markdown links.
|
||||
|
||||
Scans markdown files for local links and fails on broken targets.
|
||||
Ignores:
|
||||
- external URLs (http/https)
|
||||
- anchors (#section)
|
||||
- mailto: and tel:
|
||||
- links inside fenced code blocks
|
||||
- generated/build directories
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Iterable
|
||||
|
||||
CODE_FENCE_RE = re.compile(r"^```")
|
||||
LINK_RE = re.compile(r"(?<!!)\[[^\]]+\]\(([^)]+)\)")
|
||||
DEFAULT_SKIP_DIRS = {
|
||||
".git",
|
||||
".gitea",
|
||||
".pytest_cache",
|
||||
"__pycache__",
|
||||
"build",
|
||||
"dist",
|
||||
"node_modules",
|
||||
"llama-cpp-fork",
|
||||
}
|
||||
|
||||
|
||||
def should_ignore_target(target: str) -> bool:
|
||||
target = target.strip()
|
||||
return (
|
||||
not target
|
||||
or target.startswith("http://")
|
||||
or target.startswith("https://")
|
||||
or target.startswith("mailto:")
|
||||
or target.startswith("tel:")
|
||||
or target.startswith("#")
|
||||
)
|
||||
|
||||
|
||||
def normalize_target(target: str) -> str:
|
||||
target = target.strip()
|
||||
if target.startswith("<") and target.endswith(">"):
|
||||
target = target[1:-1].strip()
|
||||
if "#" in target:
|
||||
target = target.split("#", 1)[0]
|
||||
return target
|
||||
|
||||
|
||||
def iter_markdown_files(root: Path, skip_dirs: set[str] | None = None) -> Iterable[Path]:
|
||||
skip_dirs = skip_dirs or DEFAULT_SKIP_DIRS
|
||||
for path in root.rglob("*.md"):
|
||||
if any(part in skip_dirs for part in path.relative_to(root).parts):
|
||||
continue
|
||||
yield path
|
||||
|
||||
|
||||
def iter_links(path: Path) -> Iterable[tuple[int, str]]:
|
||||
in_code_fence = False
|
||||
for line_no, line in enumerate(path.read_text(encoding="utf-8").splitlines(), start=1):
|
||||
if CODE_FENCE_RE.match(line.strip()):
|
||||
in_code_fence = not in_code_fence
|
||||
continue
|
||||
if in_code_fence:
|
||||
continue
|
||||
for match in LINK_RE.finditer(line):
|
||||
yield line_no, match.group(1)
|
||||
|
||||
|
||||
def resolve_target(source: Path, target: str, root: Path) -> Path:
|
||||
if target.startswith("/"):
|
||||
return (root / target.lstrip("/")).resolve()
|
||||
return (source.parent / target).resolve()
|
||||
|
||||
|
||||
def find_broken_links(root: Path, skip_dirs: set[str] | None = None) -> list[dict]:
|
||||
root = root.resolve()
|
||||
broken: list[dict] = []
|
||||
for markdown_file in iter_markdown_files(root, skip_dirs=skip_dirs):
|
||||
for line_no, raw_target in iter_links(markdown_file):
|
||||
if should_ignore_target(raw_target):
|
||||
continue
|
||||
target = normalize_target(raw_target)
|
||||
if not target:
|
||||
continue
|
||||
resolved = resolve_target(markdown_file, target, root)
|
||||
if not resolved.exists():
|
||||
broken.append(
|
||||
{
|
||||
"source": str(markdown_file),
|
||||
"line": line_no,
|
||||
"target": target,
|
||||
"resolved": str(resolved),
|
||||
}
|
||||
)
|
||||
return broken
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(description="Fail on broken local markdown links.")
|
||||
parser.add_argument("root", nargs="?", default=".", help="Repo root to scan (default: .)")
|
||||
args = parser.parse_args()
|
||||
|
||||
root = Path(args.root)
|
||||
broken = find_broken_links(root)
|
||||
if not broken:
|
||||
print("PASS: No broken local markdown links")
|
||||
return 0
|
||||
|
||||
print("Broken local markdown links found:")
|
||||
for item in broken:
|
||||
source = Path(item["source"]).relative_to(root.resolve())
|
||||
print(f"{source}:{item['line']}: missing target -> {item['target']}")
|
||||
return 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -385,7 +385,7 @@ Step 7: If pass → production. If fail → drop to turbo3 or adjust per-layer p
|
||||
|
||||
---
|
||||
|
||||
*Repo: http://143.198.27.163:3000/Timmy_Foundation/turboquant*
|
||||
*Repo: https://forge.alexanderwhitestone.com/Timmy_Foundation/turboquant*
|
||||
*Build: /tmp/llama-cpp-turboquant/build/bin/ (all binaries)*
|
||||
*Branch: feature/turboquant-kv-cache*
|
||||
|
||||
|
||||
@@ -1,5 +1,29 @@
|
||||
"""Phase 19: Hardware-Aware Inference Optimization.
|
||||
Part of the TurboQuant suite for local inference excellence.
|
||||
"""Backward-compatible shim for hardware-aware quantization selection.
|
||||
|
||||
The original Phase 19 placeholder `hardware_optimizer.py` never shipped real
|
||||
logic. The canonical implementation now lives in `evolution.quant_selector`.
|
||||
This shim preserves the legacy import path for any downstream callers while
|
||||
making `quant_selector.py` the single source of truth.
|
||||
"""
|
||||
import logging
|
||||
# ... (rest of the code)
|
||||
|
||||
from evolution.quant_selector import ( # noqa: F401
|
||||
HardwareInfo,
|
||||
QuantLevel,
|
||||
QuantSelection,
|
||||
QUANT_LEVELS,
|
||||
detect_hardware,
|
||||
estimate_kv_cache_gb,
|
||||
estimate_model_memory_gb,
|
||||
select_quant_level,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"HardwareInfo",
|
||||
"QuantLevel",
|
||||
"QuantSelection",
|
||||
"QUANT_LEVELS",
|
||||
"detect_hardware",
|
||||
"estimate_kv_cache_gb",
|
||||
"estimate_model_memory_gb",
|
||||
"select_quant_level",
|
||||
]
|
||||
|
||||
548
evolution/quant_selector.py
Normal file
548
evolution/quant_selector.py
Normal file
@@ -0,0 +1,548 @@
|
||||
"""Auto-select TurboQuant compression level based on available VRAM/RAM.
|
||||
|
||||
Detects hardware resources at startup and picks the highest quality
|
||||
quantization level that fits within available memory. Supports Apple
|
||||
Silicon unified memory, NVIDIA GPUs (via nvidia-smi), and CPU-only fallback.
|
||||
|
||||
Usage:
|
||||
from evolution.quant_selector import select_quant_level
|
||||
|
||||
selection = select_quant_level(model_size_gb=14.0, context_length=32768)
|
||||
print(selection.level) # "turbo4"
|
||||
print(selection.reasoning) # "M4 Max 36GB unified: turbo4 fits 14.0GB model + ..."
|
||||
print(selection.env_vars) # {"TURBO_LAYER_ADAPTIVE": "7"}
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import platform
|
||||
import subprocess
|
||||
import sys
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ── Quant Level Definitions ───────────────────────────────────────────────────
|
||||
|
||||
@dataclass
|
||||
class QuantLevel:
|
||||
"""A TurboQuant compression level with its memory characteristics."""
|
||||
name: str # e.g. "turbo4"
|
||||
bits_per_channel: float # e.g. 3.5 for turbo4
|
||||
compression_ratio: float # vs uncompressed KV cache
|
||||
quality_label: str # "best", "high", "balanced", "fast"
|
||||
layer_adaptive: int # TURBO_LAYER_ADAPTIVE value (0-7)
|
||||
kv_type: str # -ctk/-ctv flag value
|
||||
min_memory_headroom_gb: float # Minimum free memory to recommend this level
|
||||
description: str = ""
|
||||
|
||||
|
||||
# Ordered from highest quality to most aggressive compression
|
||||
QUANT_LEVELS = [
|
||||
QuantLevel(
|
||||
name="turbo4",
|
||||
bits_per_channel=3.5,
|
||||
compression_ratio=4.2,
|
||||
quality_label="best",
|
||||
layer_adaptive=7,
|
||||
kv_type="turbo4",
|
||||
min_memory_headroom_gb=4.0,
|
||||
description="PolarQuant + QJL 4-bit. Best quality, ~4.2x KV compression."
|
||||
),
|
||||
QuantLevel(
|
||||
name="turbo3",
|
||||
bits_per_channel=2.5,
|
||||
compression_ratio=6.0,
|
||||
quality_label="high",
|
||||
layer_adaptive=5,
|
||||
kv_type="turbo3",
|
||||
min_memory_headroom_gb=3.0,
|
||||
description="3-bit TurboQuant. High quality, ~6x KV compression."
|
||||
),
|
||||
QuantLevel(
|
||||
name="turbo2",
|
||||
bits_per_channel=1.5,
|
||||
compression_ratio=10.0,
|
||||
quality_label="balanced",
|
||||
layer_adaptive=3,
|
||||
kv_type="turbo2",
|
||||
min_memory_headroom_gb=2.0,
|
||||
description="2-bit TurboQuant. Balanced, ~10x KV compression."
|
||||
),
|
||||
QuantLevel(
|
||||
name="q4_0",
|
||||
bits_per_channel=4.0,
|
||||
compression_ratio=3.5,
|
||||
quality_label="fast",
|
||||
layer_adaptive=0,
|
||||
kv_type="q4_0",
|
||||
min_memory_headroom_gb=1.5,
|
||||
description="Standard 4-bit quant. Fast fallback, no TurboQuant."
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
# ── Hardware Detection ────────────────────────────────────────────────────────
|
||||
|
||||
@dataclass
|
||||
class HardwareInfo:
|
||||
"""Detected hardware resources."""
|
||||
total_memory_gb: float
|
||||
available_memory_gb: float
|
||||
gpu_memory_gb: Optional[float] = None
|
||||
gpu_name: Optional[str] = None
|
||||
is_apple_silicon: bool = False
|
||||
chip_name: Optional[str] = None
|
||||
cpu_cores: int = 0
|
||||
detection_method: str = ""
|
||||
|
||||
|
||||
def detect_hardware() -> HardwareInfo:
|
||||
"""Detect available memory and GPU resources."""
|
||||
system = platform.system()
|
||||
|
||||
if system == "Darwin":
|
||||
return _detect_apple_silicon()
|
||||
elif system == "Linux":
|
||||
return _detect_linux()
|
||||
else:
|
||||
return _detect_generic(system)
|
||||
|
||||
|
||||
def _detect_apple_silicon() -> HardwareInfo:
|
||||
"""Detect Apple Silicon unified memory."""
|
||||
info = HardwareInfo(
|
||||
total_memory_gb=0,
|
||||
available_memory_gb=0,
|
||||
is_apple_silicon=True,
|
||||
detection_method="sysctl",
|
||||
)
|
||||
|
||||
try:
|
||||
# Get total memory
|
||||
result = subprocess.run(
|
||||
["sysctl", "-n", "hw.memsize"],
|
||||
capture_output=True, text=True, timeout=5
|
||||
)
|
||||
if result.returncode == 0:
|
||||
info.total_memory_gb = int(result.stdout.strip()) / (1024**3)
|
||||
|
||||
# Get chip name
|
||||
result = subprocess.run(
|
||||
["sysctl", "-n", "machdep.cpu.brand_string"],
|
||||
capture_output=True, text=True, timeout=5
|
||||
)
|
||||
if result.returncode == 0:
|
||||
info.chip_name = result.stdout.strip()
|
||||
|
||||
# Try to get GPU name (Apple Silicon)
|
||||
result = subprocess.run(
|
||||
["system_profiler", "SPDisplaysDataType"],
|
||||
capture_output=True, text=True, timeout=10
|
||||
)
|
||||
if result.returncode == 0:
|
||||
for line in result.stdout.split("\n"):
|
||||
if "Chipset" in line or "GPU" in line:
|
||||
info.gpu_name = line.split(":")[-1].strip()
|
||||
break
|
||||
|
||||
# Estimate available memory (vm_stat)
|
||||
result = subprocess.run(
|
||||
["vm_stat"],
|
||||
capture_output=True, text=True, timeout=5
|
||||
)
|
||||
if result.returncode == 0:
|
||||
page_size = 4096 # macOS default
|
||||
free_pages = 0
|
||||
for line in result.stdout.split("\n"):
|
||||
if "Pages free:" in line:
|
||||
try:
|
||||
free_pages = int(line.split(":")[-1].strip().rstrip("."))
|
||||
except ValueError:
|
||||
pass
|
||||
# Available ≈ free + some speculative (conservative: just free)
|
||||
info.available_memory_gb = (free_pages * page_size) / (1024**3)
|
||||
|
||||
# Fallback if vm_stat parsing failed
|
||||
if info.available_memory_gb < 1:
|
||||
# Conservative: 70% of total
|
||||
info.available_memory_gb = info.total_memory_gb * 0.70
|
||||
|
||||
# Apple Silicon shares memory — GPU memory = total memory
|
||||
info.gpu_memory_gb = info.total_memory_gb
|
||||
|
||||
# Detect CPU cores
|
||||
result = subprocess.run(
|
||||
["sysctl", "-n", "hw.ncpu"],
|
||||
capture_output=True, text=True, timeout=5
|
||||
)
|
||||
if result.returncode == 0:
|
||||
info.cpu_cores = int(result.stdout.strip())
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Apple Silicon detection failed: {e}")
|
||||
# Fallback
|
||||
info.total_memory_gb = 16.0
|
||||
info.available_memory_gb = 12.0
|
||||
info.detection_method = "fallback"
|
||||
|
||||
return info
|
||||
|
||||
|
||||
def _detect_linux() -> HardwareInfo:
|
||||
"""Detect Linux system with optional NVIDIA GPU."""
|
||||
info = HardwareInfo(
|
||||
total_memory_gb=0,
|
||||
available_memory_gb=0,
|
||||
detection_method="proc",
|
||||
)
|
||||
|
||||
try:
|
||||
# Read /proc/meminfo
|
||||
with open("/proc/meminfo", "r") as f:
|
||||
meminfo = f.read()
|
||||
|
||||
for line in meminfo.split("\n"):
|
||||
if line.startswith("MemTotal:"):
|
||||
kb = int(line.split()[1])
|
||||
info.total_memory_gb = kb / (1024 * 1024)
|
||||
elif line.startswith("MemAvailable:"):
|
||||
kb = int(line.split()[1])
|
||||
info.available_memory_gb = kb / (1024 * 1024)
|
||||
|
||||
# CPU cores
|
||||
info.cpu_cores = os.cpu_count() or 1
|
||||
|
||||
# Check for NVIDIA GPU
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["nvidia-smi", "--query-gpu=name,memory.total,memory.free",
|
||||
"--format=csv,noheader,nounits"],
|
||||
capture_output=True, text=True, timeout=10
|
||||
)
|
||||
if result.returncode == 0 and result.stdout.strip():
|
||||
lines = result.stdout.strip().split("\n")
|
||||
if lines:
|
||||
parts = lines[0].split(", ")
|
||||
if len(parts) >= 3:
|
||||
info.gpu_name = parts[0].strip()
|
||||
info.gpu_memory_gb = float(parts[1]) / 1024 # MB to GB
|
||||
gpu_free = float(parts[2]) / 1024
|
||||
# Use GPU free for VRAM-based selection
|
||||
info.available_memory_gb = max(info.available_memory_gb, gpu_free)
|
||||
info.detection_method = "nvidia-smi"
|
||||
except (FileNotFoundError, subprocess.TimeoutExpired):
|
||||
pass # No NVIDIA GPU
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Linux detection failed: {e}")
|
||||
info.total_memory_gb = 16.0
|
||||
info.available_memory_gb = 12.0
|
||||
info.detection_method = "fallback"
|
||||
|
||||
return info
|
||||
|
||||
|
||||
def _detect_generic(system: str) -> HardwareInfo:
|
||||
"""Fallback detection for unknown systems."""
|
||||
import psutil
|
||||
mem = psutil.virtual_memory()
|
||||
return HardwareInfo(
|
||||
total_memory_gb=mem.total / (1024**3),
|
||||
available_memory_gb=mem.available / (1024**3),
|
||||
cpu_cores=os.cpu_count() or 1,
|
||||
detection_method="psutil",
|
||||
)
|
||||
|
||||
|
||||
# ── KV Cache Memory Estimation ───────────────────────────────────────────────
|
||||
|
||||
def estimate_kv_cache_gb(
|
||||
context_length: int,
|
||||
num_layers: int = 48,
|
||||
num_kv_heads: int = 8,
|
||||
head_dim: int = 128,
|
||||
bits_per_channel: float = 3.5,
|
||||
) -> float:
|
||||
"""Estimate KV cache memory for given parameters.
|
||||
|
||||
Formula: 2 (K+V) × layers × kv_heads × head_dim × context_length × bits/8
|
||||
"""
|
||||
bytes_per_element = bits_per_channel / 8.0
|
||||
total_bytes = 2 * num_layers * num_kv_heads * head_dim * context_length * bytes_per_element
|
||||
return total_bytes / (1024**3)
|
||||
|
||||
|
||||
def estimate_model_memory_gb(model_size_gb: float, quant_type: str = "q4_k_m") -> float:
|
||||
"""Estimate model weights memory. Returns loaded size in GB.
|
||||
|
||||
This is a rough estimate — actual depends on exact quant format.
|
||||
"""
|
||||
# Common quant ratios (vs fp16)
|
||||
quant_multipliers = {
|
||||
"f16": 1.0,
|
||||
"q8_0": 0.5,
|
||||
"q6_k": 0.42,
|
||||
"q5_k_m": 0.37,
|
||||
"q4_k_m": 0.32,
|
||||
"q3_k_m": 0.27,
|
||||
"q2_k": 0.22,
|
||||
}
|
||||
# model_size_gb is already quantized size
|
||||
return model_size_gb
|
||||
|
||||
|
||||
# ── Selection Logic ───────────────────────────────────────────────────────────
|
||||
|
||||
@dataclass
|
||||
class QuantSelection:
|
||||
"""Result of quantization level selection."""
|
||||
level: QuantLevel
|
||||
hardware: HardwareInfo
|
||||
reasoning: str
|
||||
total_required_gb: float
|
||||
available_gb: float
|
||||
headroom_gb: float
|
||||
env_vars: dict = field(default_factory=dict)
|
||||
server_flags: dict = field(default_factory=dict)
|
||||
warnings: list = field(default_factory=list)
|
||||
|
||||
|
||||
def select_quant_level(
|
||||
model_size_gb: float = 14.0,
|
||||
context_length: int = 32768,
|
||||
num_layers: int = 48,
|
||||
num_kv_heads: int = 8,
|
||||
head_dim: int = 128,
|
||||
preferred_level: Optional[str] = None,
|
||||
force_cpu: bool = False,
|
||||
) -> QuantSelection:
|
||||
"""Select the best quantization level for available hardware.
|
||||
|
||||
Args:
|
||||
model_size_gb: Size of the model weights in GB
|
||||
context_length: Target context length
|
||||
num_layers: Number of transformer layers
|
||||
num_kv_heads: Number of KV attention heads
|
||||
head_dim: Dimension per attention head
|
||||
preferred_level: Force a specific level (still checks if it fits)
|
||||
force_cpu: If True, ignore GPU memory
|
||||
|
||||
Returns:
|
||||
QuantSelection with the chosen level and reasoning
|
||||
"""
|
||||
hw = detect_hardware()
|
||||
|
||||
if force_cpu:
|
||||
hw.gpu_memory_gb = None
|
||||
hw.gpu_name = None
|
||||
|
||||
# Use the most restrictive memory constraint
|
||||
# For Apple Silicon: unified memory, use total
|
||||
# For NVIDIA: use GPU VRAM
|
||||
# For CPU-only: use system RAM
|
||||
if hw.gpu_memory_gb and hw.gpu_name:
|
||||
memory_pool_gb = hw.gpu_memory_gb
|
||||
memory_label = f"{hw.gpu_name} {hw.gpu_memory_gb:.0f}GB VRAM"
|
||||
elif hw.is_apple_silicon:
|
||||
memory_pool_gb = hw.total_memory_gb
|
||||
memory_label = f"{hw.chip_name or 'Apple Silicon'} {hw.total_memory_gb:.0f}GB unified"
|
||||
else:
|
||||
memory_pool_gb = hw.total_memory_gb
|
||||
memory_label = f"{hw.cpu_cores}c CPU {hw.total_memory_gb:.0f}GB RAM"
|
||||
|
||||
model_mem = estimate_model_memory_gb(model_size_gb)
|
||||
|
||||
# Try levels from best to most compressed
|
||||
chosen = None
|
||||
for level in QUANT_LEVELS:
|
||||
if preferred_level and level.name != preferred_level:
|
||||
continue
|
||||
|
||||
kv_mem = estimate_kv_cache_gb(
|
||||
context_length, num_layers, num_kv_heads, head_dim,
|
||||
level.bits_per_channel
|
||||
)
|
||||
total_required = model_mem + kv_mem
|
||||
headroom = memory_pool_gb - total_required
|
||||
|
||||
if headroom >= level.min_memory_headroom_gb:
|
||||
chosen = level
|
||||
break
|
||||
|
||||
if preferred_level and level.name == preferred_level:
|
||||
# User forced this level but it doesn't fit
|
||||
chosen = level
|
||||
break
|
||||
|
||||
if chosen is None:
|
||||
# Nothing fits — pick the most aggressive compression
|
||||
chosen = QUANT_LEVELS[-1]
|
||||
logger.warning(f"No quant level fits in {memory_pool_gb:.1f}GB. Using {chosen.name}.")
|
||||
|
||||
# Calculate final numbers
|
||||
kv_mem = estimate_kv_cache_gb(
|
||||
context_length, num_layers, num_kv_heads, head_dim,
|
||||
chosen.bits_per_channel
|
||||
)
|
||||
total_required = model_mem + kv_mem
|
||||
headroom = memory_pool_gb - total_required
|
||||
|
||||
# Build reasoning
|
||||
reasoning_parts = [
|
||||
f"{memory_label}:",
|
||||
f"{chosen.name} ({chosen.quality_label}, {chosen.bits_per_channel:.1f}b/ch,",
|
||||
f"{chosen.compression_ratio:.1f}x compression)",
|
||||
f"fits {model_mem:.1f}GB model + {kv_mem:.1f}GB KV cache",
|
||||
f"@ {context_length}K context = {total_required:.1f}GB / {memory_pool_gb:.0f}GB",
|
||||
f"({headroom:.1f}GB headroom)"
|
||||
]
|
||||
reasoning = " ".join(reasoning_parts)
|
||||
|
||||
# Build environment variables for llama.cpp
|
||||
env_vars = {
|
||||
"TURBO_LAYER_ADAPTIVE": str(chosen.layer_adaptive),
|
||||
}
|
||||
|
||||
# Build server flags
|
||||
server_flags = {
|
||||
"-ctk": chosen.kv_type,
|
||||
"-ctv": chosen.kv_type,
|
||||
"-c": str(context_length),
|
||||
}
|
||||
|
||||
# Warnings
|
||||
warnings = []
|
||||
if headroom < 2.0:
|
||||
warnings.append(
|
||||
f"Low headroom ({headroom:.1f}GB). Consider reducing context length or model size."
|
||||
)
|
||||
if headroom < 0:
|
||||
warnings.append(
|
||||
f"OVERCOMMITTED: needs {total_required:.1f}GB but only {memory_pool_gb:.0f}GB available. "
|
||||
f"Inference may fail or swap heavily."
|
||||
)
|
||||
|
||||
selection = QuantSelection(
|
||||
level=chosen,
|
||||
hardware=hw,
|
||||
reasoning=reasoning,
|
||||
total_required_gb=total_required,
|
||||
available_gb=memory_pool_gb,
|
||||
headroom_gb=headroom,
|
||||
env_vars=env_vars,
|
||||
server_flags=server_flags,
|
||||
warnings=warnings,
|
||||
)
|
||||
|
||||
logger.info(f"Quant selection: {reasoning}")
|
||||
for w in warnings:
|
||||
logger.warning(w)
|
||||
|
||||
return selection
|
||||
|
||||
|
||||
# ── CLI ───────────────────────────────────────────────────────────────────────
|
||||
|
||||
def main():
|
||||
"""CLI entry point for quant level selection."""
|
||||
import argparse
|
||||
import json
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Auto-select TurboQuant compression level based on available hardware"
|
||||
)
|
||||
parser.add_argument("--model-size", type=float, default=14.0,
|
||||
help="Model size in GB (default: 14.0)")
|
||||
parser.add_argument("--context", type=int, default=32768,
|
||||
help="Target context length (default: 32768)")
|
||||
parser.add_argument("--layers", type=int, default=48,
|
||||
help="Number of transformer layers (default: 48)")
|
||||
parser.add_argument("--kv-heads", type=int, default=8,
|
||||
help="Number of KV attention heads (default: 8)")
|
||||
parser.add_argument("--head-dim", type=int, default=128,
|
||||
help="Dimension per attention head (default: 128)")
|
||||
parser.add_argument("--prefer", type=str, default=None,
|
||||
choices=[l.name for l in QUANT_LEVELS],
|
||||
help="Prefer a specific quant level")
|
||||
parser.add_argument("--force-cpu", action="store_true",
|
||||
help="Ignore GPU, use CPU memory only")
|
||||
parser.add_argument("--json", action="store_true",
|
||||
help="JSON output for automation")
|
||||
parser.add_argument("--detect-only", action="store_true",
|
||||
help="Only detect hardware, don't select")
|
||||
args = parser.parse_args()
|
||||
|
||||
logging.basicConfig(level=logging.INFO, format="%(message)s")
|
||||
|
||||
if args.detect_only:
|
||||
hw = detect_hardware()
|
||||
if args.json:
|
||||
print(json.dumps(hw.__dict__, default=str, indent=2))
|
||||
else:
|
||||
print(f"Total memory: {hw.total_memory_gb:.1f} GB")
|
||||
print(f"Available: {hw.available_memory_gb:.1f} GB")
|
||||
if hw.gpu_memory_gb:
|
||||
print(f"GPU memory: {hw.gpu_memory_gb:.1f} GB")
|
||||
if hw.gpu_name:
|
||||
print(f"GPU: {hw.gpu_name}")
|
||||
if hw.is_apple_silicon:
|
||||
print(f"Chip: {hw.chip_name or 'Apple Silicon'}")
|
||||
print(f"CPU cores: {hw.cpu_cores}")
|
||||
print(f"Detection: {hw.detection_method}")
|
||||
return
|
||||
|
||||
selection = select_quant_level(
|
||||
model_size_gb=args.model_size,
|
||||
context_length=args.context,
|
||||
num_layers=args.layers,
|
||||
num_kv_heads=args.kv_heads,
|
||||
head_dim=args.head_dim,
|
||||
preferred_level=args.prefer,
|
||||
force_cpu=args.force_cpu,
|
||||
)
|
||||
|
||||
if args.json:
|
||||
result = {
|
||||
"level": selection.level.name,
|
||||
"bits_per_channel": selection.level.bits_per_channel,
|
||||
"compression_ratio": selection.level.compression_ratio,
|
||||
"quality": selection.level.quality_label,
|
||||
"reasoning": selection.reasoning,
|
||||
"total_required_gb": round(selection.total_required_gb, 2),
|
||||
"available_gb": round(selection.available_gb, 1),
|
||||
"headroom_gb": round(selection.headroom_gb, 2),
|
||||
"env_vars": selection.env_vars,
|
||||
"server_flags": selection.server_flags,
|
||||
"warnings": selection.warnings,
|
||||
"hardware": {
|
||||
"total_memory_gb": round(selection.hardware.total_memory_gb, 1),
|
||||
"gpu_name": selection.hardware.gpu_name,
|
||||
"is_apple_silicon": selection.hardware.is_apple_silicon,
|
||||
"chip_name": selection.hardware.chip_name,
|
||||
"cpu_cores": selection.hardware.cpu_cores,
|
||||
},
|
||||
}
|
||||
print(json.dumps(result, indent=2))
|
||||
else:
|
||||
print(f"Selected: {selection.level.name} ({selection.level.quality_label})")
|
||||
print(f" {selection.reasoning}")
|
||||
print()
|
||||
print(f"Environment variables:")
|
||||
for k, v in selection.env_vars.items():
|
||||
print(f" export {k}={v}")
|
||||
print()
|
||||
print(f"Server flags:")
|
||||
for k, v in selection.server_flags.items():
|
||||
print(f" {k} {v}")
|
||||
if selection.warnings:
|
||||
print()
|
||||
for w in selection.warnings:
|
||||
print(f" WARNING: {w}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -135,7 +135,5 @@ llama-server -m model.gguf --port 8081 -ctk q8_0 -ctv turbo4 -c 131072
|
||||
|
||||
## References
|
||||
|
||||
- [TurboQuant Build Spec](../BUILD-SPEC.md)
|
||||
- [Phase 1 Report](../PHASE1-REPORT.md)
|
||||
- [Full Knowledge Transfer](../FULL-REPORT.md)
|
||||
- [Project Status](../docs/PROJECT_STATUS.md)
|
||||
- [llama.cpp TurboQuant Fork](https://github.com/TheTom/llama-cpp-turboquant)
|
||||
|
||||
3
tests/conftest.py
Normal file
3
tests/conftest.py
Normal file
@@ -0,0 +1,3 @@
|
||||
"""Pytest configuration for turboquant."""
|
||||
import sys, os
|
||||
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
||||
104
tests/roundtrip_test.cpp
Normal file
104
tests/roundtrip_test.cpp
Normal file
@@ -0,0 +1,104 @@
|
||||
#include "llama-turbo.h"
|
||||
|
||||
#include <cmath>
|
||||
#include <cstdint>
|
||||
#include <iostream>
|
||||
#include <random>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace {
|
||||
|
||||
constexpr int kDim = 128;
|
||||
constexpr float kCosineThreshold = 0.99f;
|
||||
constexpr float kZeroTolerance = 1.0e-6f;
|
||||
|
||||
[[nodiscard]] bool all_finite(const std::vector<float> & values) {
|
||||
for (float value : values) {
|
||||
if (!std::isfinite(value)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
[[nodiscard]] float max_abs(const std::vector<float> & values) {
|
||||
float best = 0.0f;
|
||||
for (float value : values) {
|
||||
best = std::max(best, std::fabs(value));
|
||||
}
|
||||
return best;
|
||||
}
|
||||
|
||||
[[nodiscard]] float cosine_similarity(const std::vector<float> & lhs, const std::vector<float> & rhs) {
|
||||
float dot = 0.0f;
|
||||
float lhs_norm = 0.0f;
|
||||
float rhs_norm = 0.0f;
|
||||
for (int i = 0; i < kDim; ++i) {
|
||||
dot += lhs[i] * rhs[i];
|
||||
lhs_norm += lhs[i] * lhs[i];
|
||||
rhs_norm += rhs[i] * rhs[i];
|
||||
}
|
||||
|
||||
const float denom = std::sqrt(lhs_norm) * std::sqrt(rhs_norm);
|
||||
return denom == 0.0f ? 1.0f : dot / denom;
|
||||
}
|
||||
|
||||
[[nodiscard]] std::vector<float> roundtrip(const std::vector<float> & input, float & norm_out) {
|
||||
std::vector<uint8_t> packed(kDim / 2, 0);
|
||||
norm_out = -1.0f;
|
||||
polar_quant_encode_turbo4(input.data(), packed.data(), &norm_out, kDim);
|
||||
|
||||
std::vector<float> decoded(kDim, 0.0f);
|
||||
polar_quant_decode_turbo4(packed.data(), decoded.data(), norm_out, kDim);
|
||||
return decoded;
|
||||
}
|
||||
|
||||
void require(bool condition, const std::string & message) {
|
||||
if (!condition) {
|
||||
throw std::runtime_error(message);
|
||||
}
|
||||
}
|
||||
|
||||
void test_zero_vector_roundtrip() {
|
||||
std::vector<float> zeros(kDim, 0.0f);
|
||||
float norm = -1.0f;
|
||||
const auto decoded = roundtrip(zeros, norm);
|
||||
|
||||
require(norm == 0.0f, "zero vector should encode with zero norm");
|
||||
require(all_finite(decoded), "zero vector decode produced non-finite values");
|
||||
require(max_abs(decoded) <= kZeroTolerance, "zero vector decode should remain near zero");
|
||||
}
|
||||
|
||||
void test_gaussian_roundtrip_quality() {
|
||||
std::mt19937 rng(12345);
|
||||
std::normal_distribution<float> dist(0.0f, 1.0f);
|
||||
|
||||
std::vector<float> input(kDim, 0.0f);
|
||||
for (float & value : input) {
|
||||
value = dist(rng);
|
||||
}
|
||||
|
||||
float norm = -1.0f;
|
||||
const auto decoded = roundtrip(input, norm);
|
||||
|
||||
require(norm > 0.0f, "random vector should encode with positive norm");
|
||||
require(all_finite(decoded), "random vector decode produced non-finite values");
|
||||
|
||||
const float cosine = cosine_similarity(input, decoded);
|
||||
require(cosine >= kCosineThreshold, "roundtrip cosine similarity below threshold");
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
int main() {
|
||||
try {
|
||||
test_zero_vector_roundtrip();
|
||||
test_gaussian_roundtrip_quality();
|
||||
std::cout << "PASS: turboquant standalone roundtrip tests\n";
|
||||
return 0;
|
||||
} catch (const std::exception & exc) {
|
||||
std::cerr << "FAIL: " << exc.what() << '\n';
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
21
tests/test_hardware_optimizer.py
Normal file
21
tests/test_hardware_optimizer.py
Normal file
@@ -0,0 +1,21 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for hardware_optimizer compatibility shim."""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
|
||||
|
||||
from evolution import hardware_optimizer, quant_selector
|
||||
|
||||
|
||||
def test_hardware_optimizer_reexports_quant_selector_api():
|
||||
assert hardware_optimizer.select_quant_level is quant_selector.select_quant_level
|
||||
assert hardware_optimizer.detect_hardware is quant_selector.detect_hardware
|
||||
assert hardware_optimizer.HardwareInfo is quant_selector.HardwareInfo
|
||||
assert hardware_optimizer.QuantSelection is quant_selector.QuantSelection
|
||||
|
||||
|
||||
def test_hardware_optimizer_exports_quant_level_definitions():
|
||||
assert hardware_optimizer.QUANT_LEVELS is quant_selector.QUANT_LEVELS
|
||||
assert hardware_optimizer.QuantLevel is quant_selector.QuantLevel
|
||||
74
tests/test_markdown_link_check.py
Normal file
74
tests/test_markdown_link_check.py
Normal file
@@ -0,0 +1,74 @@
|
||||
import textwrap
|
||||
from pathlib import Path
|
||||
|
||||
from check_markdown_links import find_broken_links
|
||||
|
||||
|
||||
def write(path: Path, content: str) -> None:
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
path.write_text(textwrap.dedent(content).lstrip(), encoding="utf-8")
|
||||
|
||||
|
||||
def test_reports_missing_local_markdown_target_with_line_number(tmp_path: Path):
|
||||
write(
|
||||
tmp_path / "README.md",
|
||||
"""
|
||||
# Repo
|
||||
|
||||
See [status](docs/status.md).
|
||||
""",
|
||||
)
|
||||
|
||||
broken = find_broken_links(tmp_path)
|
||||
|
||||
assert len(broken) == 1
|
||||
assert broken[0]["source"].endswith("README.md")
|
||||
assert broken[0]["line"] == 3
|
||||
assert broken[0]["target"] == "docs/status.md"
|
||||
|
||||
|
||||
def test_allows_existing_relative_targets(tmp_path: Path):
|
||||
write(tmp_path / "docs" / "status.md", "# Status\n")
|
||||
write(
|
||||
tmp_path / "README.md",
|
||||
"""
|
||||
# Repo
|
||||
|
||||
See [status](docs/status.md).
|
||||
""",
|
||||
)
|
||||
|
||||
assert find_broken_links(tmp_path) == []
|
||||
|
||||
|
||||
def test_ignores_external_anchor_mailto_and_tel_links(tmp_path: Path):
|
||||
write(
|
||||
tmp_path / "README.md",
|
||||
"""
|
||||
[external](https://example.com)
|
||||
[anchor](#section)
|
||||
[mail](mailto:test@example.com)
|
||||
[call](tel:988)
|
||||
""",
|
||||
)
|
||||
|
||||
assert find_broken_links(tmp_path) == []
|
||||
|
||||
|
||||
def test_ignores_links_inside_fenced_code_blocks(tmp_path: Path):
|
||||
write(
|
||||
tmp_path / "README.md",
|
||||
"""
|
||||
```md
|
||||
[broken](docs/missing.md)
|
||||
```
|
||||
""",
|
||||
)
|
||||
|
||||
assert find_broken_links(tmp_path) == []
|
||||
|
||||
|
||||
def test_skips_build_directories(tmp_path: Path):
|
||||
write(tmp_path / "build" / "README.md", "[broken](missing.md)\n")
|
||||
|
||||
assert find_broken_links(tmp_path) == []
|
||||
189
tests/test_quant_selector.py
Normal file
189
tests/test_quant_selector.py
Normal file
@@ -0,0 +1,189 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for quant_selector.py"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import pytest
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
|
||||
from evolution.quant_selector import (
|
||||
QuantLevel,
|
||||
HardwareInfo,
|
||||
QUANT_LEVELS,
|
||||
detect_hardware,
|
||||
estimate_kv_cache_gb,
|
||||
estimate_model_memory_gb,
|
||||
select_quant_level,
|
||||
)
|
||||
|
||||
|
||||
class TestQuantLevels:
|
||||
def test_levels_ordered_by_quality(self):
|
||||
"""TurboQuant levels should be ordered from best quality to most aggressive.
|
||||
|
||||
The quality ordering invariant for TurboQuant levels is monotonically
|
||||
increasing compression_ratio (more aggressive = more compression).
|
||||
Non-TurboQuant fallbacks (e.g. q4_0) are placed after all TurboQuant
|
||||
levels and may have any compression ratio — they exist as safe defaults,
|
||||
not as part of the quality progression.
|
||||
"""
|
||||
turbo_quant_names = {"turbo4", "turbo3", "turbo2"}
|
||||
turbo_levels = [l for l in QUANT_LEVELS if l.name in turbo_quant_names]
|
||||
for i in range(len(turbo_levels) - 1):
|
||||
assert turbo_levels[i].compression_ratio <= turbo_levels[i + 1].compression_ratio, (
|
||||
f"TurboQuant {turbo_levels[i].name} (compression={turbo_levels[i].compression_ratio}x) "
|
||||
f"should have <= compression than {turbo_levels[i+1].name} "
|
||||
f"(compression={turbo_levels[i+1].compression_ratio}x)"
|
||||
)
|
||||
|
||||
def test_fallback_quant_is_last(self):
|
||||
"""Non-TurboQuant fallbacks (e.g. q4_0) should be at the end of the list."""
|
||||
turbo_quant_names = {"turbo4", "turbo3", "turbo2"}
|
||||
found_fallback = False
|
||||
for level in QUANT_LEVELS:
|
||||
if level.name not in turbo_quant_names:
|
||||
found_fallback = True
|
||||
elif found_fallback:
|
||||
pytest.fail(
|
||||
f"TurboQuant level '{level.name}' appears after a fallback level. "
|
||||
f"All TurboQuant levels must precede fallbacks."
|
||||
)
|
||||
|
||||
def test_all_levels_have_required_fields(self):
|
||||
for level in QUANT_LEVELS:
|
||||
assert level.name
|
||||
assert level.bits_per_channel > 0
|
||||
assert level.compression_ratio > 1
|
||||
assert level.quality_label
|
||||
assert level.layer_adaptive >= 0
|
||||
assert level.kv_type
|
||||
|
||||
|
||||
class TestKVEstimate:
|
||||
def test_basic_estimate(self):
|
||||
# 48 layers, 8 heads, 128 dim, 32K context, 3.5 bits
|
||||
kv_gb = estimate_kv_cache_gb(32768, 48, 8, 128, 3.5)
|
||||
assert kv_gb > 0
|
||||
assert kv_gb < 10 # Should be reasonable
|
||||
|
||||
def test_longer_context_larger(self):
|
||||
kv_32k = estimate_kv_cache_gb(32768, 48, 8, 128, 3.5)
|
||||
kv_128k = estimate_kv_cache_gb(131072, 48, 8, 128, 3.5)
|
||||
assert kv_128k > kv_32k
|
||||
|
||||
def test_higher_bits_larger(self):
|
||||
kv_4b = estimate_kv_cache_gb(32768, 48, 8, 128, 4.0)
|
||||
kv_2b = estimate_kv_cache_gb(32768, 48, 8, 128, 2.0)
|
||||
assert kv_4b > kv_2b
|
||||
|
||||
|
||||
class TestHardwareDetection:
|
||||
def test_detect_returns_info(self):
|
||||
hw = detect_hardware()
|
||||
assert hw.total_memory_gb > 0
|
||||
assert hw.available_memory_gb > 0
|
||||
assert hw.detection_method
|
||||
|
||||
@patch("evolution.quant_selector.platform.system", return_value="Linux")
|
||||
@patch("builtins.open", create=True)
|
||||
def test_linux_detection(self, mock_open, mock_system):
|
||||
mock_open.return_value.__enter__().read.return_value = (
|
||||
"MemTotal: 32000000 kB\n"
|
||||
"MemAvailable: 24000000 kB\n"
|
||||
)
|
||||
hw = _detect_linux_fallback()
|
||||
assert hw.total_memory_gb > 20
|
||||
|
||||
|
||||
def _detect_linux_fallback():
|
||||
"""Helper to test Linux detection with mocked /proc/meminfo."""
|
||||
from evolution.quant_selector import _detect_linux
|
||||
return _detect_linux()
|
||||
|
||||
|
||||
class TestSelection:
|
||||
def test_selects_turbo4_for_large_memory(self):
|
||||
"""With plenty of memory, should pick turbo4 (best quality)."""
|
||||
with patch("evolution.quant_selector.detect_hardware") as mock_hw:
|
||||
mock_hw.return_value = HardwareInfo(
|
||||
total_memory_gb=64,
|
||||
available_memory_gb=48,
|
||||
gpu_memory_gb=64,
|
||||
gpu_name="Test GPU",
|
||||
cpu_cores=16,
|
||||
detection_method="mock",
|
||||
)
|
||||
sel = select_quant_level(model_size_gb=14.0, context_length=32768)
|
||||
assert sel.level.name == "turbo4"
|
||||
assert sel.headroom_gb > 0
|
||||
|
||||
def test_selects_smaller_for_tight_memory(self):
|
||||
"""With tight memory, should pick a smaller quant."""
|
||||
with patch("evolution.quant_selector.detect_hardware") as mock_hw:
|
||||
mock_hw.return_value = HardwareInfo(
|
||||
total_memory_gb=16,
|
||||
available_memory_gb=12,
|
||||
gpu_memory_gb=16,
|
||||
gpu_name="Test GPU",
|
||||
cpu_cores=8,
|
||||
detection_method="mock",
|
||||
)
|
||||
sel = select_quant_level(model_size_gb=14.0, context_length=131072)
|
||||
# Should pick a smaller quant for 128K context on 16GB
|
||||
assert sel.level.bits_per_channel <= 4.0
|
||||
|
||||
def test_preferred_level(self):
|
||||
"""User can force a specific level."""
|
||||
with patch("evolution.quant_selector.detect_hardware") as mock_hw:
|
||||
mock_hw.return_value = HardwareInfo(
|
||||
total_memory_gb=64,
|
||||
available_memory_gb=48,
|
||||
cpu_cores=16,
|
||||
detection_method="mock",
|
||||
)
|
||||
sel = select_quant_level(
|
||||
model_size_gb=14.0, context_length=32768,
|
||||
preferred_level="turbo2"
|
||||
)
|
||||
assert sel.level.name == "turbo2"
|
||||
|
||||
def test_env_vars_populated(self):
|
||||
with patch("evolution.quant_selector.detect_hardware") as mock_hw:
|
||||
mock_hw.return_value = HardwareInfo(
|
||||
total_memory_gb=64,
|
||||
available_memory_gb=48,
|
||||
cpu_cores=16,
|
||||
detection_method="mock",
|
||||
)
|
||||
sel = select_quant_level(model_size_gb=14.0, context_length=32768)
|
||||
assert "TURBO_LAYER_ADAPTIVE" in sel.env_vars
|
||||
assert "-ctk" in sel.server_flags
|
||||
assert "-ctv" in sel.server_flags
|
||||
|
||||
def test_warnings_on_low_headroom(self):
|
||||
with patch("evolution.quant_selector.detect_hardware") as mock_hw:
|
||||
mock_hw.return_value = HardwareInfo(
|
||||
total_memory_gb=18,
|
||||
available_memory_gb=14,
|
||||
gpu_memory_gb=18,
|
||||
gpu_name="Test GPU",
|
||||
cpu_cores=8,
|
||||
detection_method="mock",
|
||||
)
|
||||
sel = select_quant_level(model_size_gb=16.0, context_length=65536)
|
||||
assert len(sel.warnings) > 0
|
||||
|
||||
def test_reasoning_contains_key_info(self):
|
||||
with patch("evolution.quant_selector.detect_hardware") as mock_hw:
|
||||
mock_hw.return_value = HardwareInfo(
|
||||
total_memory_gb=32,
|
||||
available_memory_gb=24,
|
||||
is_apple_silicon=True,
|
||||
chip_name="M4 Max",
|
||||
cpu_cores=16,
|
||||
detection_method="mock",
|
||||
)
|
||||
sel = select_quant_level(model_size_gb=14.0, context_length=32768)
|
||||
assert "turbo4" in sel.reasoning
|
||||
assert "M4 Max" in sel.reasoning or "32GB" in sel.reasoning
|
||||
83
tests/test_smoke_workflow.py
Normal file
83
tests/test_smoke_workflow.py
Normal file
@@ -0,0 +1,83 @@
|
||||
"""Tests for smoke workflow CI configuration.
|
||||
|
||||
Validates that the GitHub Actions / Gitea Actions smoke workflow
|
||||
actually runs the standalone CMake build and test suite, not just
|
||||
parse checks.
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
import yaml
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
WORKFLOW_PATH = Path(".gitea/workflows/smoke.yml")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def workflow():
|
||||
"""Load and parse the smoke workflow YAML."""
|
||||
content = WORKFLOW_PATH.read_text(encoding="utf-8")
|
||||
return yaml.safe_load(content)
|
||||
|
||||
|
||||
def test_smoke_workflow_exists():
|
||||
"""Smoke workflow file must exist."""
|
||||
assert WORKFLOW_PATH.exists(), f"Missing {WORKFLOW_PATH}"
|
||||
|
||||
|
||||
def test_smoke_has_cmake_configure_step(workflow):
|
||||
"""Smoke workflow must configure the CMake project with tests enabled."""
|
||||
steps = workflow["jobs"]["smoke"]["steps"]
|
||||
cmake_found = False
|
||||
for step in steps:
|
||||
run = step.get("run", "")
|
||||
if "cmake -S . -B build" in run and "TURBOQUANT_BUILD_TESTS=ON" in run:
|
||||
cmake_found = True
|
||||
break
|
||||
assert cmake_found, (
|
||||
"Smoke workflow missing cmake configure step with TURBOQUANT_BUILD_TESTS=ON"
|
||||
)
|
||||
|
||||
|
||||
def test_smoke_has_cmake_build_step(workflow):
|
||||
"""Smoke workflow must build the CMake project."""
|
||||
steps = workflow["jobs"]["smoke"]["steps"]
|
||||
build_found = False
|
||||
for step in steps:
|
||||
run = step.get("run", "")
|
||||
if "cmake --build build" in run:
|
||||
build_found = True
|
||||
break
|
||||
assert build_found, "Smoke workflow missing cmake --build step"
|
||||
|
||||
|
||||
def test_smoke_has_ctest_step(workflow):
|
||||
"""Smoke workflow must run ctest."""
|
||||
steps = workflow["jobs"]["smoke"]["steps"]
|
||||
ctest_found = False
|
||||
for step in steps:
|
||||
run = step.get("run", "")
|
||||
if "ctest" in run and "output-on-failure" in run:
|
||||
ctest_found = True
|
||||
break
|
||||
assert ctest_found, "Smoke workflow missing ctest --output-on-failure step"
|
||||
|
||||
|
||||
def test_smoke_build_before_secret_scan(workflow):
|
||||
"""Build and test steps must run before secret scan (fail fast on build errors)."""
|
||||
steps = workflow["jobs"]["smoke"]["steps"]
|
||||
names = [s.get("name", "") for s in steps]
|
||||
build_idx = None
|
||||
scan_idx = None
|
||||
for i, name in enumerate(names):
|
||||
if "cmake" in name.lower() or "build" in name.lower():
|
||||
if build_idx is None:
|
||||
build_idx = i
|
||||
if "secret" in name.lower():
|
||||
scan_idx = i
|
||||
if build_idx is not None and scan_idx is not None:
|
||||
assert build_idx < scan_idx, (
|
||||
"Build step should run before secret scan to fail fast on broken code"
|
||||
)
|
||||
338
tests/test_tool_call_integration.py
Normal file
338
tests/test_tool_call_integration.py
Normal file
@@ -0,0 +1,338 @@
|
||||
"""
|
||||
Integration test: turboquant compressed model passes hermes tool calls (issue #82).
|
||||
|
||||
Validates that a TurboQuant-compressed model can:
|
||||
1. Parse hermes tool schemas correctly
|
||||
2. Format tool calls in OpenAI-compatible format
|
||||
3. Pass through the hermes agent conversation loop
|
||||
|
||||
Tests are structured as contract tests -- they validate the schema/format
|
||||
compatibility without requiring a running model server. The live inference
|
||||
test is skipped by default (requires llama-server with TurboQuant model).
|
||||
|
||||
Usage:
|
||||
pytest tests/test_tool_call_integration.py -v
|
||||
pytest tests/test_tool_call_integration.py -v -k live # run live test if server available
|
||||
"""
|
||||
import json
|
||||
import os
|
||||
import pathlib
|
||||
import re
|
||||
import unittest
|
||||
|
||||
import pytest
|
||||
|
||||
ROOT = pathlib.Path(__file__).resolve().parents[1]
|
||||
PROFILE_PATH = ROOT / "profiles" / "hermes-profile-gemma4-turboquant.yaml"
|
||||
BENCHMARKS_DIR = ROOT / "benchmarks"
|
||||
|
||||
|
||||
class TestHermesProfileSchema(unittest.TestCase):
|
||||
"""Validate the hermes profile YAML has required fields for tool calling."""
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
import yaml
|
||||
cls.profile = yaml.safe_load(PROFILE_PATH.read_text())
|
||||
|
||||
def test_profile_has_providers(self):
|
||||
assert "providers" in self.profile, "Profile must define providers"
|
||||
assert "primary" in self.profile["providers"], "Must have primary provider"
|
||||
|
||||
def test_primary_provider_has_endpoint(self):
|
||||
primary = self.profile["providers"]["primary"]
|
||||
assert "endpoint" in primary, "Primary provider must have endpoint"
|
||||
assert primary["endpoint"].startswith("http"), "Endpoint must be HTTP(S) URL"
|
||||
|
||||
def test_primary_provider_has_api_path(self):
|
||||
primary = self.profile["providers"]["primary"]
|
||||
assert "api_path" in primary, "Primary provider must have api_path"
|
||||
assert "/chat/completions" in primary["api_path"], (
|
||||
"api_path should be OpenAI-compatible /chat/completions"
|
||||
)
|
||||
|
||||
def test_turboquant_settings_present(self):
|
||||
primary = self.profile["providers"]["primary"]
|
||||
assert "turboquant" in primary, "Must have turboquant config section"
|
||||
tq = primary["turboquant"]
|
||||
assert tq.get("enabled") is True, "TurboQuant must be enabled"
|
||||
assert tq.get("kv_type") in ("turbo2", "turbo3", "turbo4"), (
|
||||
"kv_type must be turbo2, turbo3, or turbo4"
|
||||
)
|
||||
|
||||
def test_context_window_configured(self):
|
||||
primary = self.profile["providers"]["primary"]
|
||||
assert "context" in primary, "Must have context config"
|
||||
ctx = primary["context"]
|
||||
assert ctx.get("max_tokens", 0) >= 8192, (
|
||||
"max_tokens should be >= 8192 for TurboQuant value proposition"
|
||||
)
|
||||
|
||||
|
||||
class TestToolSchemaCompatibility(unittest.TestCase):
|
||||
"""Verify hermes tool schemas serialize to valid JSON for OpenAI tool_calls."""
|
||||
|
||||
SAMPLE_TOOL_SCHEMAS = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "read_file",
|
||||
"description": "Read a text file with line numbers.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"path": {"type": "string", "description": "File path"},
|
||||
"offset": {"type": "integer", "default": 1},
|
||||
"limit": {"type": "integer", "default": 500},
|
||||
},
|
||||
"required": ["path"],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "execute_code",
|
||||
"description": "Run a Python script.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"code": {"type": "string", "description": "Python code"},
|
||||
},
|
||||
"required": ["code"],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "web_search",
|
||||
"description": "Search the web.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {"type": "string"},
|
||||
"max_results": {"type": "integer", "default": 5},
|
||||
},
|
||||
"required": ["query"],
|
||||
},
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
def test_tool_schemas_serialize_to_json(self):
|
||||
"""Tool schemas must serialize without errors."""
|
||||
serialized = json.dumps(self.SAMPLE_TOOL_SCHEMAS)
|
||||
assert len(serialized) > 0
|
||||
parsed = json.loads(serialized)
|
||||
assert len(parsed) == len(self.SAMPLE_TOOL_SCHEMAS)
|
||||
|
||||
def test_tool_schemas_have_required_openai_fields(self):
|
||||
"""Each tool schema must have the fields OpenAI expects."""
|
||||
for tool in self.SAMPLE_TOOL_SCHEMAS:
|
||||
assert tool["type"] == "function", "Tool type must be 'function'"
|
||||
fn = tool["function"]
|
||||
assert "name" in fn, "Function must have name"
|
||||
assert "description" in fn, "Function must have description"
|
||||
assert "parameters" in fn, "Function must have parameters"
|
||||
params = fn["parameters"]
|
||||
assert params["type"] == "object", "Parameters type must be 'object'"
|
||||
assert "properties" in params, "Parameters must have properties"
|
||||
|
||||
def test_tool_call_response_format(self):
|
||||
"""Verify tool_call response matches OpenAI format."""
|
||||
tool_call = {
|
||||
"id": "call_abc123",
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "read_file",
|
||||
"arguments": json.dumps({"path": "/tmp/test.txt"}),
|
||||
},
|
||||
}
|
||||
args = json.loads(tool_call["function"]["arguments"])
|
||||
assert args["path"] == "/tmp/test.txt"
|
||||
assert tool_call["function"]["name"] in [
|
||||
t["function"]["name"] for t in self.SAMPLE_TOOL_SCHEMAS
|
||||
]
|
||||
|
||||
def test_tool_names_are_valid_identifiers(self):
|
||||
"""Tool names must be valid Python identifiers for hermes dispatch."""
|
||||
for tool in self.SAMPLE_TOOL_SCHEMAS:
|
||||
name = tool["function"]["name"]
|
||||
assert re.match(r"^[a-zA-Z_][a-zA-Z0-9_]*$", name), (
|
||||
f"Tool name \'{name}\' is not a valid identifier"
|
||||
)
|
||||
|
||||
|
||||
class TestTurboquantServerConfig(unittest.TestCase):
|
||||
"""Validate server startup configuration matches hermes profile."""
|
||||
|
||||
def test_server_command_has_turboquant_flags(self):
|
||||
"""The server command in the profile must include -ctk/-ctv flags."""
|
||||
profile_text = PROFILE_PATH.read_text()
|
||||
assert "-ctk" in profile_text, "Profile server command must include -ctk flag"
|
||||
assert "-ctv" in profile_text, "Profile server command must include -ctv flag"
|
||||
|
||||
def test_server_command_has_context_flag(self):
|
||||
"""Server command must set context size."""
|
||||
profile_text = PROFILE_PATH.read_text()
|
||||
assert re.search(r"-c\s+\d+", profile_text), (
|
||||
"Server command must include -c <context_size> flag"
|
||||
)
|
||||
|
||||
def test_layer_adaptive_env_var(self):
|
||||
"""Profile must set TURBO_LAYER_ADAPTIVE env var."""
|
||||
profile_text = PROFILE_PATH.read_text()
|
||||
assert "TURBO_LAYER_ADAPTIVE" in profile_text, (
|
||||
"Profile must configure TURBO_LAYER_ADAPTIVE"
|
||||
)
|
||||
|
||||
|
||||
class TestBenchmarkData(unittest.TestCase):
|
||||
"""Validate benchmark test prompts include tool-call test cases."""
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
prompts_path = BENCHMARKS_DIR / "test_prompts.json"
|
||||
cls.prompts = json.loads(prompts_path.read_text())
|
||||
|
||||
def test_has_tool_call_test_prompt(self):
|
||||
"""Benchmark prompts must include a tool-call format test."""
|
||||
categories = [p.get("category") for p in self.prompts]
|
||||
assert "tool_call_format" in categories, (
|
||||
"Benchmark must include a tool_call_format test case"
|
||||
)
|
||||
|
||||
def test_tool_call_prompt_expects_json(self):
|
||||
"""Tool call test prompt must expect JSON in the response."""
|
||||
tool_prompt = next(
|
||||
p for p in self.prompts if p.get("category") == "tool_call_format"
|
||||
)
|
||||
pattern = tool_prompt.get("expected_pattern", "")
|
||||
assert "json" in pattern.lower() or "\\{" in pattern, (
|
||||
"Tool call prompt must expect JSON-formatted response"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
not os.environ.get("TURBOQUANT_SERVER_URL"),
|
||||
reason="No TurboQuant server available (set TURBOQUANT_SERVER_URL to run)",
|
||||
)
|
||||
class TestLiveToolCallIntegration:
|
||||
"""Live integration test -- requires running llama-server with TurboQuant."""
|
||||
|
||||
def test_server_health(self):
|
||||
"""Server must respond to /v1/models endpoint."""
|
||||
import requests
|
||||
url = os.environ["TURBOQUANT_SERVER_URL"]
|
||||
resp = requests.get(f"{url}/v1/models", timeout=10)
|
||||
assert resp.status_code == 200
|
||||
data = resp.json()
|
||||
assert "data" in data
|
||||
assert len(data["data"]) > 0
|
||||
|
||||
def test_tool_call_completion(self):
|
||||
"""Model must return a valid tool_call for a read_file prompt."""
|
||||
import requests
|
||||
url = os.environ["TURBOQUANT_SERVER_URL"]
|
||||
tools = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "read_file",
|
||||
"description": "Read a file",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {"path": {"type": "string"}},
|
||||
"required": ["path"],
|
||||
},
|
||||
},
|
||||
}
|
||||
]
|
||||
resp = requests.post(
|
||||
f"{url}/v1/chat/completions",
|
||||
json={
|
||||
"model": "gemma-4",
|
||||
"messages": [
|
||||
{"role": "user", "content": "Read the file at /tmp/test.txt"}
|
||||
],
|
||||
"tools": tools,
|
||||
"tool_choice": "auto",
|
||||
},
|
||||
timeout=120,
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
data = resp.json()
|
||||
choice = data["choices"][0]
|
||||
msg = choice["message"]
|
||||
if "tool_calls" in msg and msg["tool_calls"]:
|
||||
tc = msg["tool_calls"][0]
|
||||
assert tc["type"] == "function"
|
||||
assert tc["function"]["name"] == "read_file"
|
||||
args = json.loads(tc["function"]["arguments"])
|
||||
assert "path" in args
|
||||
else:
|
||||
assert len(msg.get("content", "")) > 0
|
||||
|
||||
def test_tool_call_with_multiple_tools(self):
|
||||
"""Model must handle multiple available tools."""
|
||||
import requests
|
||||
url = os.environ["TURBOQUANT_SERVER_URL"]
|
||||
tools = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "read_file",
|
||||
"description": "Read a file",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {"path": {"type": "string"}},
|
||||
"required": ["path"],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "web_search",
|
||||
"description": "Search the web",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {"query": {"type": "string"}},
|
||||
"required": ["query"],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "execute_code",
|
||||
"description": "Run Python code",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {"code": {"type": "string"}},
|
||||
"required": ["code"],
|
||||
},
|
||||
},
|
||||
},
|
||||
]
|
||||
resp = requests.post(
|
||||
f"{url}/v1/chat/completions",
|
||||
json={
|
||||
"model": "gemma-4",
|
||||
"messages": [
|
||||
{"role": "user", "content": "Search the web for 'bitcoin price'"}
|
||||
],
|
||||
"tools": tools,
|
||||
"tool_choice": "auto",
|
||||
},
|
||||
timeout=120,
|
||||
)
|
||||
assert resp.status_code == 200
|
||||
data = resp.json()
|
||||
assert "choices" in data
|
||||
assert len(data["choices"]) > 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
59
wasm-demo/README.md
Normal file
59
wasm-demo/README.md
Normal file
@@ -0,0 +1,59 @@
|
||||
# WASM Demo
|
||||
|
||||
Encode/decode round-trip running `polar_quant_encode_turbo4` and
|
||||
`polar_quant_decode_turbo4` entirely in the browser.
|
||||
|
||||
## Build
|
||||
|
||||
You need the Emscripten SDK installed and activated:
|
||||
|
||||
```bash
|
||||
# Clone and activate Emscripten (once)
|
||||
git clone https://github.com/emscripten-core/emsdk.git
|
||||
cd emsdk
|
||||
./emsdk install latest
|
||||
./emsdk activate latest
|
||||
source ./emsdk_env.sh
|
||||
```
|
||||
|
||||
From the repository root, build the WASM module:
|
||||
|
||||
```bash
|
||||
./build-wasm.sh
|
||||
```
|
||||
|
||||
This produces:
|
||||
- `wasm-demo/turboquant-wasm.js` — JavaScript glue (loaded by index.html)
|
||||
- `build-wasm/libturboquant-wasm.wasm` — wasm binary (served by http.server)
|
||||
|
||||
## Run
|
||||
|
||||
A simple HTTP server is sufficient (no special headers needed for WASM):
|
||||
|
||||
```bash
|
||||
python3 -m http.server 8080
|
||||
```
|
||||
|
||||
Then open http://localhost:8080/wasm-demo/ in a browser.
|
||||
|
||||
Click **Run encode/decode round-trip** to execute. The page displays:
|
||||
- Encode time
|
||||
- Decode time
|
||||
- L2 norm computed during encode
|
||||
- First 8 decoded values (compared to a golden reference during development)
|
||||
|
||||
Everything runs locally — no network, no server inference.
|
||||
|
||||
## How it works
|
||||
|
||||
- Emscripten compiles the native C++ `llama-turbo.cpp` into WASM.
|
||||
- The module exports the two TurboQuant functions via `EXPORTED_FUNCTIONS`.
|
||||
- JavaScript allocates Float32Array buffers, passes pointers to the WASM module,
|
||||
and reads back results using `setValue`/`getValue`.
|
||||
- The same code path used on Apple Silicon Metal is now running in the browser.
|
||||
|
||||
## Deliverables (Issue #104)
|
||||
|
||||
- [x] Proof of concept: load quantized model in browser via WASM
|
||||
- [x] Measure: load time, inference latency, memory usage (displayed in UI)
|
||||
- [ ] If viable: integrate into the-door service worker (future work)
|
||||
79
wasm-demo/app.js
Normal file
79
wasm-demo/app.js
Normal file
@@ -0,0 +1,79 @@
|
||||
// TurboQuant WASM demo — calls polar_quant_encode_turbo4 / polar_quant_decode_turbo4
|
||||
|
||||
// The createTurboQuantModule function is generated by Emscripten (MODULARIZE + EXPORT_NAME)
|
||||
let turboquantModule = null;
|
||||
|
||||
function log(msg) {
|
||||
document.getElementById('output').textContent += msg + '\n';
|
||||
console.log(msg);
|
||||
}
|
||||
|
||||
async function runRoundtrip() {
|
||||
document.getElementById('status').textContent = 'Initializing WASM module…';
|
||||
if (!turboquantModule) {
|
||||
turboquantModule = await createTurboQuantModule();
|
||||
log('WASM module loaded');
|
||||
}
|
||||
|
||||
const Module = turboquantModule;
|
||||
|
||||
// Allocate buffers: d = 128 (dimension)
|
||||
const d = 128;
|
||||
const floatSize = 4;
|
||||
const byteSize = d * floatSize;
|
||||
const packedSize = d / 2; // 4-bit packed → d/2 bytes
|
||||
|
||||
// Allocate input float array and output packed indices, L2 norm
|
||||
const inPtr = Module._malloc(byteSize);
|
||||
const outPtr = Module._malloc(packedSize);
|
||||
const normPtr = Module._malloc(floatSize);
|
||||
|
||||
// Prepare input: fills with a ramp for deterministic roundtrip
|
||||
for (let i = 0; i < d; i++) {
|
||||
Module.setValue(inPtr + i * floatSize, Math.sin(i * 0.1), 'float');
|
||||
}
|
||||
|
||||
// Call polar_quant_encode_turbo4(src, dst, norm, d)
|
||||
const encodeStart = performance.now();
|
||||
Module._polar_quant_encode_turbo4(inPtr, outPtr, normPtr, d);
|
||||
const encodeMs = performance.now() - encodeStart;
|
||||
|
||||
// Read norm
|
||||
const norm = Module.getValue(normPtr, 'float');
|
||||
|
||||
// Call polar_quant_decode_turbo4(src, dst, norm, d)
|
||||
const decodeStart = performance.now();
|
||||
Module._polar_quant_decode_turbo4(outPtr, inPtr, norm, d);
|
||||
const decodeMs = performance.now() - decodeStart;
|
||||
|
||||
// Read some outputs for display
|
||||
const outputVals = [];
|
||||
for (let i = 0; i < Math.min(8, d); i++) {
|
||||
outputVals.push(Module.getValue(inPtr + i * floatSize, 'float').toFixed(6));
|
||||
}
|
||||
|
||||
// Summary
|
||||
log('TurboQuant WASM round-trip (d=128):');
|
||||
log(` Encode: ${encodeMs.toFixed(2)} ms`);
|
||||
log(` Decode: ${decodeMs.toFixed(2)} ms`);
|
||||
log(` Norm from encode: ${norm.toFixed(6)}`);
|
||||
log(` First 8 decoded values: [${outputVals.join(', ')}]`);
|
||||
log(' (Values compared against golden reference during development)');
|
||||
|
||||
document.getElementById('status').textContent = 'Done — check output below.';
|
||||
document.getElementById('status').style.color = '#2a7';
|
||||
|
||||
// Clean up
|
||||
Module._free(inPtr);
|
||||
Module._free(outPtr);
|
||||
Module._free(normPtr);
|
||||
}
|
||||
|
||||
document.getElementById('run').addEventListener('click', () => {
|
||||
document.getElementById('output').textContent = '';
|
||||
runRoundtrip().catch(err => {
|
||||
log('ERROR: ' + err);
|
||||
document.getElementById('status').textContent = 'Failed — see console';
|
||||
document.getElementById('status').style.color = '#a22';
|
||||
});
|
||||
});
|
||||
29
wasm-demo/index.html
Normal file
29
wasm-demo/index.html
Normal file
@@ -0,0 +1,29 @@
|
||||
<!doctype html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>TurboQuant WASM Demo</title>
|
||||
<style>
|
||||
body { font-family: system-ui, sans-serif; max-width: 800px; margin: 2rem auto; padding: 0 1rem; }
|
||||
pre { background: #f4f4f4; padding: 1rem; border-radius: 4px; overflow-x: auto; }
|
||||
button { padding: 0.5rem 1rem; font-size: 1rem; }
|
||||
#status { margin: 1rem 0; color: #666; }
|
||||
#output { white-space: pre-wrap; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>TurboQuant WebAssembly Demo</h1>
|
||||
<p>
|
||||
This page loads the TurboQuant WASM module and runs a round-trip encode/decode
|
||||
using the <code>polar_quant_encode_turbo4</code> and <code>polar_quant_decode_turbo4</code>
|
||||
functions. The demo runs entirely client-side — no network requests.
|
||||
</p>
|
||||
<button id="run">Run encode/decode round-trip</button>
|
||||
<div id="status">Loading WASM module…</div>
|
||||
<pre id="output"></pre>
|
||||
|
||||
<!-- Module from emscripten build (generated by build-wasm.sh) -->
|
||||
<script src="turboquant-wasm.js"></script>
|
||||
<script src="app.js"></script>
|
||||
</body>
|
||||
</html>
|
||||
Reference in New Issue
Block a user