Compare commits
13 Commits
fix/muda-c
...
feat/101-b
| Author | SHA1 | Date | |
|---|---|---|---|
| 590c4c7820 | |||
| 629be9714f | |||
| 3123d1fa8e | |||
| 3cd8750cbb | |||
| ef765bbd30 | |||
|
|
5f0d00f127 | ||
|
|
8affe79489 | ||
|
|
319f57780d | ||
| 7a7ce0e652 | |||
| 9224a0162b | |||
|
|
f4ceac76ce | ||
| ab4020cca0 | |||
| 383e1fab2e |
@@ -13,12 +13,12 @@ jobs:
|
||||
python-version: '3.11'
|
||||
- name: Parse check
|
||||
run: |
|
||||
find . -name '*.yml' -o -name '*.yaml' | grep -v .gitea | xargs -r python3 -c "import sys,yaml; [yaml.safe_load(open(f)) for f in sys.argv[1:]]"
|
||||
find . -name '*.json' | xargs -r python3 -m json.tool > /dev/null
|
||||
find . -name '*.py' | xargs -r python3 -m py_compile
|
||||
find . -name '*.yml' -o -name '*.yaml' | grep -v .gitea | grep -v llama-cpp-fork | xargs -r python3 -c "import sys,yaml; [yaml.safe_load(open(f)) for f in sys.argv[1:]]"
|
||||
find . -name '*.json' | grep -v llama-cpp-fork | while read f; do python3 -m json.tool "$f" > /dev/null || exit 1; done
|
||||
find . -name '*.py' | grep -v llama-cpp-fork | xargs -r python3 -m py_compile
|
||||
find . -name '*.sh' | xargs -r bash -n
|
||||
echo "PASS: All files parse"
|
||||
- name: Secret scan
|
||||
run: |
|
||||
if grep -rE 'sk-or-|sk-ant-|ghp_|AKIA' . --include='*.yml' --include='*.py' --include='*.sh' 2>/dev/null | grep -v .gitea; then exit 1; fi
|
||||
if grep -rE 'sk-or-|sk-ant-|ghp_|AKIA' . --include='*.yml' --include='*.py' --include='*.sh' 2>/dev/null | grep -v .gitea | grep -v llama-cpp-fork; then exit 1; fi
|
||||
echo "PASS: No secrets"
|
||||
|
||||
3
.gitignore
vendored
Normal file
3
.gitignore
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
build/
|
||||
*.pyc
|
||||
__pycache__/
|
||||
36
CMakeLists.txt
Normal file
36
CMakeLists.txt
Normal file
@@ -0,0 +1,36 @@
|
||||
cmake_minimum_required(VERSION 3.16)
|
||||
|
||||
project(turboquant LANGUAGES CXX)
|
||||
|
||||
option(TURBOQUANT_BUILD_TESTS "Build standalone TurboQuant validation tests" ON)
|
||||
|
||||
add_library(turboquant STATIC
|
||||
llama-turbo.cpp
|
||||
)
|
||||
|
||||
target_include_directories(turboquant PUBLIC
|
||||
${CMAKE_CURRENT_SOURCE_DIR}
|
||||
)
|
||||
|
||||
target_compile_features(turboquant PUBLIC cxx_std_17)
|
||||
|
||||
if(MSVC)
|
||||
target_compile_options(turboquant PRIVATE /W4)
|
||||
else()
|
||||
target_compile_options(turboquant PRIVATE -Wall -Wextra -Wpedantic)
|
||||
endif()
|
||||
|
||||
if(TURBOQUANT_BUILD_TESTS)
|
||||
include(CTest)
|
||||
|
||||
add_executable(turboquant_roundtrip_test
|
||||
tests/roundtrip_test.cpp
|
||||
)
|
||||
target_link_libraries(turboquant_roundtrip_test PRIVATE turboquant)
|
||||
target_compile_features(turboquant_roundtrip_test PRIVATE cxx_std_17)
|
||||
|
||||
add_test(
|
||||
NAME turboquant_roundtrip
|
||||
COMMAND turboquant_roundtrip_test
|
||||
)
|
||||
endif()
|
||||
@@ -13,7 +13,7 @@ Unlock 64K-128K context on qwen3.5:27b within 32GB unified memory.
|
||||
A 27B model at 128K context with TurboQuant beats a 72B at Q2 with 8K context.
|
||||
|
||||
## Status
|
||||
See [issues](http://143.198.27.163:3000/Timmy_Foundation/turboquant/issues) for current progress.
|
||||
See [issues](https://forge.alexanderwhitestone.com/Timmy_Foundation/turboquant/issues) for current progress.
|
||||
|
||||
## Roles
|
||||
- **Strago:** Build spec author
|
||||
@@ -29,4 +29,4 @@ See [issues](http://143.198.27.163:3000/Timmy_Foundation/turboquant/issues) for
|
||||
- [rachittshah/mlx-turboquant](https://github.com/rachittshah/mlx-turboquant) — MLX fallback
|
||||
|
||||
## Docs
|
||||
- [BUILD-SPEC.md](BUILD-SPEC.md) — Full build specification (Strago, v2.2)
|
||||
- [Project Status](docs/PROJECT_STATUS.md) — Full project status and build specification
|
||||
|
||||
50
benchmarks/bonsai-tool-calling.md
Normal file
50
benchmarks/bonsai-tool-calling.md
Normal file
@@ -0,0 +1,50 @@
|
||||
# Tool Calling Viability: Bonsai 1-Bit Models
|
||||
|
||||
**Epic**: #99 (1-Bit Models + Edge)
|
||||
**Date**: TBD (run benchmarks/test_tool_calling.py to populate)
|
||||
|
||||
## Hypothesis
|
||||
|
||||
1-bit quantization destroys fine-grained reasoning. Tool calling (precise JSON output) may be impossible at Q1_0. But worth testing — the field is moving fast.
|
||||
|
||||
## Models to Test
|
||||
|
||||
| Model | Size | Quant | Source |
|
||||
|-------|------|-------|--------|
|
||||
| Bonsai-1.7B | 1.7B | Q1_0 | prism-ml/Bonsai-1.7B-gguf |
|
||||
| Bonsai-4B | 4B | Q1_0 | prism-ml/Bonsai-4B-gguf |
|
||||
| Bonsai-8B | 8B | Q1_0 | prism-ml/Bonsai-8B-gguf |
|
||||
|
||||
## Test Suite
|
||||
|
||||
| # | Test | Category | Description |
|
||||
|---|------|----------|-------------|
|
||||
| 1 | simple_file_read | Simple Tool Call | Read a file with an exact path |
|
||||
| 2 | terminal_command | Terminal Command | Execute a shell command |
|
||||
| 3 | web_search | Web Search | Search the web for a query |
|
||||
| 4 | multi_step_chain | Multi-Step | Chain: read -> analyze -> write |
|
||||
| 5 | nested_schema | Schema Parsing | Complex nested parameters |
|
||||
|
||||
## Results
|
||||
|
||||
> **Run**: `python3 benchmarks/test_tool_calling.py --model bonsai-1.7b --output benchmarks/bonsai-tool-calling.md`
|
||||
|
||||
| Test | Bonsai-1.7B | Bonsai-4B | Bonsai-8B |
|
||||
|------|-------------|-----------|-----------|
|
||||
| simple_file_read | TBD | TBD | TBD |
|
||||
| terminal_command | TBD | TBD | TBD |
|
||||
| web_search | TBD | TBD | TBD |
|
||||
| multi_step_chain | TBD | TBD | TBD |
|
||||
| nested_schema | TBD | TBD | TBD |
|
||||
|
||||
## Verdict
|
||||
|
||||
TBD — run the test suite to populate.
|
||||
|
||||
## Failure Modes (if any)
|
||||
|
||||
TBD — document specific failure patterns observed.
|
||||
|
||||
## Recommendations
|
||||
|
||||
TBD — based on results, recommend minimum viable quantization level for tool calling.
|
||||
@@ -1,75 +1,227 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
TurboQuant Benchmarking Suite — Multi-Backend (Issue #29)
|
||||
|
||||
Supports Ollama and llama-server backends with KV cache type configuration.
|
||||
Measures: TTFT, tokens/sec, latency, peak memory.
|
||||
|
||||
Usage:
|
||||
# Ollama (default)
|
||||
python3 benchmarks/run_benchmarks.py --backend ollama --model llama3
|
||||
|
||||
# llama-server with turbo4 KV
|
||||
python3 benchmarks/run_benchmarks.py --backend llama-server \
|
||||
--url http://localhost:11434 --model qwen3.5 --kv-type turbo4
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import time
|
||||
import requests
|
||||
import os
|
||||
from typing import List, Dict
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
from typing import List, Dict, Optional
|
||||
|
||||
# ═══════════════════════════════════════════
|
||||
# TURBOQUANT BENCHMARKING SUITE (Issue #16)
|
||||
# ═══════════════════════════════════════════
|
||||
# This script runs a standardized set of prompts against the local inference
|
||||
# engine (Ollama) and logs the results. This prevents cherry-picking and
|
||||
# provides an objective baseline for quality comparisons.
|
||||
import requests
|
||||
|
||||
OLLAMA_URL = "http://localhost:11434/api/generate"
|
||||
PROMPTS_FILE = "benchmarks/prompts.json"
|
||||
RESULTS_FILE = f"benchmarks/results_{int(time.time())}.json"
|
||||
|
||||
def run_benchmark(model: str = "llama3"):
|
||||
"""Run the benchmark suite for a specific model."""
|
||||
if not os.path.exists(PROMPTS_FILE):
|
||||
print(f"Error: {PROMPTS_FILE} not found.")
|
||||
return
|
||||
def get_peak_memory_mb() -> float:
|
||||
"""Get peak RSS of current process in MB (macOS/Linux)."""
|
||||
try:
|
||||
if sys.platform == "darwin":
|
||||
result = subprocess.run(["ps", "-o", "rss=", "-p", str(os.getpid())],
|
||||
capture_output=True, text=True)
|
||||
return int(result.stdout.strip()) / 1024
|
||||
else:
|
||||
with open(f"/proc/{os.getpid()}/status") as f:
|
||||
for line in f:
|
||||
if line.startswith("VmHWM:"):
|
||||
return int(line.split()[1]) / 1024
|
||||
except Exception:
|
||||
pass
|
||||
return 0.0
|
||||
|
||||
with open(PROMPTS_FILE, 'r') as f:
|
||||
|
||||
def run_ollama(prompt: str, model: str, url: str, timeout: int = 120) -> dict:
|
||||
"""Run a prompt against Ollama /api/generate."""
|
||||
api_url = f"{url.rstrip('/')}/api/generate"
|
||||
start = time.time()
|
||||
ttft = None
|
||||
tokens_per_sec = 0.0
|
||||
|
||||
try:
|
||||
resp = requests.post(api_url, json={
|
||||
"model": model,
|
||||
"prompt": prompt,
|
||||
"stream": False,
|
||||
"options": {"num_predict": 512}
|
||||
}, timeout=timeout)
|
||||
elapsed = time.time() - start
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
|
||||
response_text = data.get("response", "")
|
||||
eval_count = data.get("eval_count", 0)
|
||||
eval_duration_ns = data.get("eval_duration", 0)
|
||||
prompt_eval_ns = data.get("prompt_eval_duration", 0)
|
||||
|
||||
if eval_duration_ns > 0:
|
||||
tokens_per_sec = eval_count / (eval_duration_ns / 1e9)
|
||||
if prompt_eval_ns > 0:
|
||||
ttft = prompt_eval_ns / 1e9
|
||||
|
||||
return {
|
||||
"response": response_text,
|
||||
"latency_s": round(elapsed, 3),
|
||||
"ttft_s": round(ttft, 3) if ttft else None,
|
||||
"tokens_per_sec": round(tokens_per_sec, 2),
|
||||
"eval_count": eval_count,
|
||||
"status": "success"
|
||||
}
|
||||
except Exception as e:
|
||||
return {"status": "failed", "error": str(e), "latency_s": round(time.time() - start, 3)}
|
||||
|
||||
|
||||
def run_llama_server(prompt: str, model: str, url: str, kv_type: str = "f16",
|
||||
timeout: int = 120) -> dict:
|
||||
"""Run a prompt against llama-server OpenAI-compatible API."""
|
||||
api_url = f"{url.rstrip('/')}/v1/chat/completions"
|
||||
start = time.time()
|
||||
ttft = None
|
||||
tokens_per_sec = 0.0
|
||||
|
||||
try:
|
||||
resp = requests.post(api_url, json={
|
||||
"model": model,
|
||||
"messages": [{"role": "user", "content": prompt}],
|
||||
"max_tokens": 512,
|
||||
"stream": False
|
||||
}, timeout=timeout)
|
||||
elapsed = time.time() - start
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
|
||||
response_text = data.get("choices", [{}])[0].get("message", {}).get("content", "")
|
||||
usage = data.get("usage", {})
|
||||
completion_tokens = usage.get("completion_tokens", 0)
|
||||
prompt_tokens = usage.get("prompt_tokens", 0)
|
||||
|
||||
# llama-server includes timing in x_* headers or we estimate
|
||||
if elapsed > 0 and completion_tokens > 0:
|
||||
# Subtract estimated prompt eval time (rough)
|
||||
tokens_per_sec = completion_tokens / max(elapsed - 0.1, 0.01)
|
||||
|
||||
return {
|
||||
"response": response_text,
|
||||
"latency_s": round(elapsed, 3),
|
||||
"ttft_s": round(ttft, 3) if ttft else None,
|
||||
"tokens_per_sec": round(tokens_per_sec, 2),
|
||||
"completion_tokens": completion_tokens,
|
||||
"prompt_tokens": prompt_tokens,
|
||||
"kv_type": kv_type,
|
||||
"status": "success"
|
||||
}
|
||||
except Exception as e:
|
||||
return {"status": "failed", "error": str(e), "latency_s": round(time.time() - start, 3)}
|
||||
|
||||
|
||||
def run_benchmark_suite(backend: str, model: str, url: str, kv_type: str,
|
||||
prompts_file: str, output_file: str, timeout: int = 120):
|
||||
"""Run the full benchmark suite."""
|
||||
if not os.path.exists(prompts_file):
|
||||
print(f"ERROR: {prompts_file} not found")
|
||||
sys.exit(1)
|
||||
|
||||
with open(prompts_file) as f:
|
||||
prompts = json.load(f)
|
||||
|
||||
run_fn = run_ollama if backend == "ollama" else run_llama_server
|
||||
mem_before = get_peak_memory_mb()
|
||||
|
||||
results = []
|
||||
print(f"Starting benchmark for model: {model}")
|
||||
print(f"Saving results to: {RESULTS_FILE}")
|
||||
print(f"\n{'='*60}")
|
||||
print(f"Backend: {backend} | Model: {model} | KV: {kv_type}")
|
||||
print(f"URL: {url}")
|
||||
print(f"Prompts: {len(prompts)} | Output: {output_file}")
|
||||
print(f"{'='*60}\n")
|
||||
|
||||
for item in prompts:
|
||||
print(f"Running prompt: {item['id']}...")
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
response = requests.post(OLLAMA_URL, json={
|
||||
"model": model,
|
||||
"prompt": item['prompt'],
|
||||
"stream": False
|
||||
}, timeout=60)
|
||||
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
end_time = time.time()
|
||||
|
||||
results.append({
|
||||
"id": item['id'],
|
||||
"prompt": item['prompt'],
|
||||
"response": data.get("response"),
|
||||
"latency": end_time - start_time,
|
||||
"tokens_per_second": data.get("eval_count", 0) / (data.get("eval_duration", 1) / 1e9) if data.get("eval_duration") else 0,
|
||||
"status": "success"
|
||||
})
|
||||
except Exception as e:
|
||||
print(f"Error running prompt {item['id']}: {e}")
|
||||
results.append({
|
||||
"id": item['id'],
|
||||
"prompt": item['prompt'],
|
||||
"error": str(e),
|
||||
"status": "failed"
|
||||
})
|
||||
pid = item.get("id", item.get("category", "unknown"))
|
||||
prompt = item["prompt"]
|
||||
print(f"[{pid}] Running...", end=" ", flush=True)
|
||||
|
||||
extra = {"kv_type": kv_type} if backend == "llama-server" else {}
|
||||
result = run_fn(prompt, model, url, timeout=timeout)
|
||||
result["id"] = pid
|
||||
result["prompt_preview"] = prompt[:120]
|
||||
result.update(extra)
|
||||
|
||||
status = "✓" if result["status"] == "success" else "✗"
|
||||
tps = result.get("tokens_per_sec", 0)
|
||||
lat = result.get("latency_s", 0)
|
||||
print(f"{status} {tps:.1f} tok/s, {lat:.2f}s")
|
||||
|
||||
results.append(result)
|
||||
|
||||
mem_after = get_peak_memory_mb()
|
||||
|
||||
suite = {
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"backend": backend,
|
||||
"model": model,
|
||||
"kv_type": kv_type,
|
||||
"url": url,
|
||||
"prompts_file": prompts_file,
|
||||
"memory_mb": round(max(mem_before, mem_after), 1),
|
||||
"results": results,
|
||||
"summary": {
|
||||
"total": len(results),
|
||||
"success": sum(1 for r in results if r["status"] == "success"),
|
||||
"failed": sum(1 for r in results if r["status"] == "failed"),
|
||||
"avg_tok_per_sec": round(
|
||||
sum(r.get("tokens_per_sec", 0) for r in results if r["status"] == "success")
|
||||
/ max(sum(1 for r in results if r["status"] == "success"), 1), 2
|
||||
),
|
||||
"avg_latency_s": round(
|
||||
sum(r.get("latency_s", 0) for r in results if r["status"] == "success")
|
||||
/ max(sum(1 for r in results if r["status"] == "success"), 1), 3
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
os.makedirs(os.path.dirname(output_file) or ".", exist_ok=True)
|
||||
with open(output_file, "w") as f:
|
||||
json.dump(suite, f, indent=2)
|
||||
|
||||
s = suite["summary"]
|
||||
print(f"\n{'='*60}")
|
||||
print(f"RESULTS: {s['success']}/{s['total']} success | "
|
||||
f"Avg {s['avg_tok_per_sec']:.1f} tok/s | "
|
||||
f"Avg {s['avg_latency_s']:.2f}s latency")
|
||||
print(f"{'='*60}")
|
||||
print(f"Saved to {output_file}")
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="TurboQuant Benchmark Suite")
|
||||
parser.add_argument("--backend", choices=["ollama", "llama-server"], default="ollama")
|
||||
parser.add_argument("--model", required=True, help="Model name")
|
||||
parser.add_argument("--url", default="http://localhost:11434", help="Backend URL")
|
||||
parser.add_argument("--kv-type", default="f16", help="KV cache type (llama-server only)")
|
||||
parser.add_argument("--prompts", default="benchmarks/prompts.json", help="Prompts file")
|
||||
parser.add_argument("--output", default=None, help="Output file (auto-generated if omitted)")
|
||||
parser.add_argument("--timeout", type=int, default=120, help="Per-prompt timeout (s)")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.output is None:
|
||||
ts = int(time.time())
|
||||
args.output = f"benchmarks/results_{args.backend}_{args.kv_type}_{ts}.json"
|
||||
|
||||
run_benchmark_suite(args.backend, args.model, args.url, args.kv_type,
|
||||
args.prompts, args.output, args.timeout)
|
||||
|
||||
# Save results
|
||||
with open(RESULTS_FILE, 'w') as f:
|
||||
json.dump({
|
||||
"model": model,
|
||||
"timestamp": time.time(),
|
||||
"results": results
|
||||
}, f, indent=2)
|
||||
|
||||
print("Benchmark complete.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Default to llama3 for testing
|
||||
run_benchmark("llama3")
|
||||
main()
|
||||
|
||||
495
benchmarks/run_long_session.py
Normal file
495
benchmarks/run_long_session.py
Normal file
@@ -0,0 +1,495 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
TurboQuant Long-Session Quality Test (Issue #12)
|
||||
|
||||
Runs a 50-turn multi-step reasoning conversation to detect quality degradation
|
||||
under sustained context pressure. Compares TurboQuant KV vs FP16 KV baseline.
|
||||
|
||||
Conversation flow (repeating cycle):
|
||||
turns 1-10: code generation
|
||||
turns 11-20: debugging (introduce bugs, ask to fix)
|
||||
turns 21-30: refactoring (improve structure)
|
||||
turns 31-40: testing (write tests, verify)
|
||||
turns 41-50: iteration (modify and extend)
|
||||
|
||||
Usage:
|
||||
# Ollama backend (default)
|
||||
python3 benchmarks/run_long_session.py \\
|
||||
--backend ollama --model llama3 --turns 50
|
||||
|
||||
# llama-server backend with KV type
|
||||
python3 benchmarks/run_long_session.py \\
|
||||
--backend llama-server --url http://localhost:8080 \\
|
||||
--model qwen3.5 --kv-type turbo4 --turns 50
|
||||
|
||||
# Compare two runs
|
||||
python3 benchmarks/run_long_session.py --compare run_turbo4.json run_fp16.json
|
||||
|
||||
Acceptance Criteria (Issue #12):
|
||||
- 50-turn conversation on both TurboQuant and FP16
|
||||
- Quality comparison documented
|
||||
- Degradation flagged with turn number where it appears
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
import hashlib
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
try:
|
||||
import requests
|
||||
except ImportError:
|
||||
requests = None
|
||||
|
||||
# ── Conversation Prompts ───────────────────────────────────────────────
|
||||
|
||||
CONVERSATION_CYCLE = [
|
||||
# Phase 1: Code Generation (turns 1-10)
|
||||
{
|
||||
"phase": "code_gen",
|
||||
"turns": [
|
||||
"Write a Python class called RateLimiter that implements a token bucket algorithm. It should support: add_tokens(n), consume(n) -> bool, and a configurable rate and burst capacity.",
|
||||
"Add thread-safety to the RateLimiter class using a lock. Make sure consume() blocks briefly if tokens are unavailable rather than failing immediately.",
|
||||
"Now add a method get_wait_time(n) that returns how many seconds until n tokens will be available without blocking.",
|
||||
"Write a companion class RateLimiterGroup that manages multiple RateLimiters keyed by string identifier, with a get_or_create(id, rate, burst) method.",
|
||||
"Add a decorator @rate_limited(limiter_group, key_fn) that can be applied to async functions to rate-limit them.",
|
||||
"Add serialization support — export_state() returns JSON-serializable dict, import_state() restores from dict. Include timestamps.",
|
||||
"Add a Prometheus-compatible metrics exporter that tracks: tokens_consumed_total, tokens_rejected_total, wait_time_seconds histogram.",
|
||||
"Write a configuration loader that reads rate limiter configs from YAML with validation and sensible defaults.",
|
||||
"Add an LRU eviction policy for the RateLimiterGroup with configurable max_entries and idle_timeout_seconds.",
|
||||
"Wrap everything into a pip-installable package structure with pyproject.toml, __init__.py exports, and a CLI entry point.",
|
||||
]
|
||||
},
|
||||
# Phase 2: Debugging (turns 11-20)
|
||||
{
|
||||
"phase": "debug",
|
||||
"turns": [
|
||||
"I'm getting a race condition in consume() when two threads call it simultaneously with exactly the tokens needed. The lock doesn't seem to help. Can you trace through the logic and find the bug?",
|
||||
"The get_wait_time() method returns negative values sometimes. Here's the traceback: ... Can you identify what's wrong?",
|
||||
"RateLimiterGroup.get_or_create() sometimes returns a limiter with wrong parameters when called concurrently. Explain the potential issue.",
|
||||
"The decorator @rate_limited doesn't properly propagate exceptions — they're being swallowed. Fix the error handling.",
|
||||
"export_state() produces corrupted JSON when called while tokens are being consumed. How should we fix the serialization?",
|
||||
"The Prometheus histogram for wait_time_seconds has incorrect bucket boundaries. Review the histogram configuration.",
|
||||
"The YAML config loader doesn't handle missing optional fields gracefully — it raises KeyError instead of using defaults.",
|
||||
"LRU eviction is evicting active limiters. The idle_timeout calculation seems wrong. Debug the eviction logic.",
|
||||
"The CLI entry point crashes with a specific YAML config. Here's the config and error: ... What's the root cause?",
|
||||
"Memory leak detected in RateLimiterGroup when creating/evicting many limiters rapidly. Where's the leak?",
|
||||
]
|
||||
},
|
||||
# Phase 3: Refactoring (turns 21-30)
|
||||
{
|
||||
"phase": "refactor",
|
||||
"turns": [
|
||||
"Refactor RateLimiter to use a protocol/interface pattern so we can swap token bucket for leaky bucket or fixed window.",
|
||||
"Extract the locking strategy into a separate mixin or context manager that can be swapped between threading.Lock, asyncio.Lock, and no-lock.",
|
||||
"Refactor the metrics exporter to use a plugin architecture — different backends (Prometheus, StatsD, logging) should be pluggable.",
|
||||
"Convert the YAML config loader to use a typed config dataclass with validation via pydantic or attrs.",
|
||||
"Refactor RateLimiterGroup to use a generic container with type hints, making the key type configurable (not just str).",
|
||||
"Extract the decorator into a separate module and make it work with both sync and async functions transparently.",
|
||||
"Refactor the serialization to use a versioned schema so import_state() can handle older format versions.",
|
||||
"Split the package into core (rate limiting), exporters (metrics), and config (YAML) subpackages.",
|
||||
"Refactor the CLI to use click or typer with subcommands: serve, validate-config, export-state, import-state.",
|
||||
"Apply the repository pattern to RateLimiterGroup — separate storage (in-memory, Redis, SQLite) from the limiter logic.",
|
||||
]
|
||||
},
|
||||
# Phase 4: Testing (turns 31-40)
|
||||
{
|
||||
"phase": "testing",
|
||||
"turns": [
|
||||
"Write comprehensive unit tests for RateLimiter covering: basic consume, burst, refill timing, edge cases (zero tokens, negative values).",
|
||||
"Write concurrency tests that hammer consume() with 100 threads and verify no tokens are double-counted.",
|
||||
"Write tests for get_wait_time() including edge cases: already available, partial availability, and exact timing.",
|
||||
"Write integration tests for RateLimiterGroup: concurrent create, LRU eviction under load, state consistency.",
|
||||
"Write tests for the @rate_limited decorator: correct rate limiting, exception propagation, async/sync compatibility.",
|
||||
"Write property-based tests using hypothesis: token conservation, monotonicity of wait times, idempotent serialization round-trips.",
|
||||
"Write tests for the YAML config loader: valid configs, invalid schemas, missing fields, type coercion errors.",
|
||||
"Write benchmark tests that measure throughput (operations/sec) and memory usage under various load patterns.",
|
||||
"Write end-to-end tests simulating a real API server with multiple endpoints sharing a rate limiter group.",
|
||||
"Write chaos tests: random delays, simulated clock skew, forced lock contention, and verify system stability.",
|
||||
]
|
||||
},
|
||||
# Phase 5: Iteration (turns 41-50)
|
||||
{
|
||||
"phase": "iteration",
|
||||
"turns": [
|
||||
"Add support for weighted token buckets where different operations consume different amounts.",
|
||||
"Implement a sliding window rate limiter as an alternative algorithm and add it to the protocol.",
|
||||
"Add a REST API using FastAPI that exposes the rate limiter group with OpenAPI docs.",
|
||||
"Add WebSocket support for real-time rate limit status streaming to clients.",
|
||||
"Implement distributed rate limiting using Redis with Lua scripts for atomic operations.",
|
||||
"Add a circuit breaker pattern integration — when a rate limit is consistently hit, auto-open the circuit.",
|
||||
"Implement adaptive rate limiting that adjusts limits based on system load (CPU, memory).",
|
||||
"Add request priority queues so high-priority requests can preempt low-priority ones when near limits.",
|
||||
"Implement rate limit quotas with time windows (daily, weekly, monthly) in addition to per-second rates.",
|
||||
"Write a migration guide and changelog for v2.0 with all the new features and breaking changes.",
|
||||
]
|
||||
},
|
||||
]
|
||||
|
||||
# ── Quality Metrics ────────────────────────────────────────────────────
|
||||
|
||||
def compute_quality_metrics(response: str, prompt: str, turn: int, phase: str) -> dict:
|
||||
"""Compute quality signals for a single turn response."""
|
||||
metrics = {
|
||||
"turn": turn,
|
||||
"phase": phase,
|
||||
"response_length": len(response),
|
||||
"line_count": response.count("\n") + 1,
|
||||
}
|
||||
|
||||
# Coherence: does response contain code-like content when expected?
|
||||
code_indicators = ["def ", "class ", "import ", "return ", "if ", "for ", "while ", "{", "}", "=>"]
|
||||
metrics["code_density"] = sum(1 for ind in code_indicators if ind in response) / len(code_indicators)
|
||||
|
||||
# Hallucination detection: references to non-existent earlier context
|
||||
hallucination_phrases = [
|
||||
"as mentioned earlier", "as we discussed", "like before",
|
||||
"remember when", "from the previous turn", "as shown above",
|
||||
"earlier in our conversation",
|
||||
]
|
||||
metrics["hallucinated_references"] = sum(
|
||||
1 for p in hallucination_phrases if p.lower() in response.lower()
|
||||
)
|
||||
|
||||
# Structural quality: does it have proper formatting?
|
||||
metrics["has_headers"] = bool(re.search(r"^#{1,3}\s", response, re.MULTILINE))
|
||||
metrics["has_code_blocks"] = response.count("```") >= 2
|
||||
metrics["has_lists"] = bool(re.search(r"^[\-\*\d]\.\s", response, re.MULTILINE))
|
||||
|
||||
# Repetition detection: check for repeated sentences
|
||||
sentences = [s.strip().lower() for s in re.split(r'[.!?]+', response) if len(s.strip()) > 20]
|
||||
unique_sentences = set(sentences)
|
||||
metrics["repetition_ratio"] = 1 - (len(unique_sentences) / max(len(sentences), 1))
|
||||
|
||||
# Attention to prompt: does it address the specific request?
|
||||
prompt_keywords = set(re.findall(r'\b\w{4,}\b', prompt.lower()))
|
||||
response_words = set(re.findall(r'\b\w{4,}\b', response.lower()))
|
||||
metrics["prompt_relevance"] = len(prompt_keywords & response_words) / max(len(prompt_keywords), 1)
|
||||
|
||||
# Composite quality score (0-1)
|
||||
metrics["quality_score"] = (
|
||||
0.25 * min(metrics["code_density"] * 3, 1.0) +
|
||||
0.20 * min(metrics["prompt_relevance"] * 2, 1.0) +
|
||||
0.20 * (1.0 - min(metrics["repetition_ratio"] * 5, 1.0)) +
|
||||
0.15 * (1.0 if metrics["has_code_blocks"] else 0.5) +
|
||||
0.10 * (1.0 - min(metrics["hallucinated_references"] * 0.3, 1.0)) +
|
||||
0.10 * (1.0 if metrics["has_lists"] else 0.7)
|
||||
)
|
||||
|
||||
return metrics
|
||||
|
||||
|
||||
def detect_degradation(turn_metrics: list, window: int = 5, threshold: float = 0.15) -> list:
|
||||
"""Detect quality degradation by comparing rolling windows."""
|
||||
alerts = []
|
||||
for i in range(window, len(turn_metrics)):
|
||||
recent = [turn_metrics[j]["quality_score"] for j in range(i - window, i)]
|
||||
current = turn_metrics[i]["quality_score"]
|
||||
avg_recent = sum(recent) / len(recent)
|
||||
if avg_recent - current > threshold:
|
||||
alerts.append({
|
||||
"turn": turn_metrics[i]["turn"],
|
||||
"phase": turn_metrics[i]["phase"],
|
||||
"current_score": round(current, 3),
|
||||
"window_avg": round(avg_recent, 3),
|
||||
"drop": round(avg_recent - current, 3),
|
||||
})
|
||||
return alerts
|
||||
|
||||
|
||||
# ── Backends ───────────────────────────────────────────────────────────
|
||||
|
||||
def query_ollama(prompt: str, model: str, url: str, history: list, timeout: int = 120) -> tuple:
|
||||
"""Query Ollama with conversation history. Returns (response, stats)."""
|
||||
messages = history + [{"role": "user", "content": prompt}]
|
||||
api_url = f"{url.rstrip('/')}/api/chat"
|
||||
|
||||
start = time.time()
|
||||
resp = requests.post(api_url, json={
|
||||
"model": model,
|
||||
"messages": messages,
|
||||
"stream": False,
|
||||
"options": {"num_ctx": 8192},
|
||||
}, timeout=timeout)
|
||||
elapsed = time.time() - start
|
||||
|
||||
data = resp.json()
|
||||
content = data.get("message", {}).get("content", "")
|
||||
eval_count = data.get("eval_count", 0)
|
||||
eval_duration = data.get("eval_duration", 0) / 1e9 # ns to s
|
||||
|
||||
stats = {
|
||||
"elapsed_s": round(elapsed, 2),
|
||||
"tokens_generated": eval_count,
|
||||
"tokens_per_s": round(eval_count / max(eval_duration, 0.001), 1),
|
||||
"prompt_eval_count": data.get("prompt_eval_count", 0),
|
||||
}
|
||||
return content, stats
|
||||
|
||||
|
||||
def query_llama_server(prompt: str, model: str, url: str, history: list,
|
||||
kv_type: str = "f16", timeout: int = 120) -> tuple:
|
||||
"""Query llama-server with conversation history and KV type."""
|
||||
messages = history + [{"role": "user", "content": prompt}]
|
||||
api_url = f"{url.rstrip('/')}/v1/chat/completions"
|
||||
|
||||
start = time.time()
|
||||
resp = requests.post(api_url, json={
|
||||
"model": model,
|
||||
"messages": messages,
|
||||
"temperature": 0.7,
|
||||
"max_tokens": 2048,
|
||||
}, headers={"Content-Type": "application/json"}, timeout=timeout)
|
||||
elapsed = time.time() - start
|
||||
|
||||
data = resp.json()
|
||||
content = data["choices"][0]["message"]["content"]
|
||||
usage = data.get("usage", {})
|
||||
|
||||
stats = {
|
||||
"elapsed_s": round(elapsed, 2),
|
||||
"tokens_generated": usage.get("completion_tokens", 0),
|
||||
"prompt_tokens": usage.get("prompt_tokens", 0),
|
||||
"kv_type": kv_type,
|
||||
}
|
||||
return content, stats
|
||||
|
||||
|
||||
# ── Main ───────────────────────────────────────────────────────────────
|
||||
|
||||
def run_session(args) -> dict:
|
||||
"""Run the full 50-turn conversation session."""
|
||||
total_turns = args.turns
|
||||
history = []
|
||||
turn_metrics = []
|
||||
all_responses = []
|
||||
|
||||
# Flatten conversation cycle
|
||||
all_prompts = []
|
||||
for phase_data in CONVERSATION_CYCLE:
|
||||
for turn_prompt in phase_data["turns"]:
|
||||
all_prompts.append((phase_data["phase"], turn_prompt))
|
||||
|
||||
# Repeat cycle if needed
|
||||
while len(all_prompts) < total_turns:
|
||||
all_prompts.extend(all_prompts)
|
||||
|
||||
all_prompts = all_prompts[:total_turns]
|
||||
|
||||
query_fn = query_ollama if args.backend == "ollama" else query_llama_server
|
||||
query_kwargs = {"model": args.model, "url": args.url}
|
||||
if args.backend == "llama-server":
|
||||
query_kwargs["kv_type"] = args.kv_type
|
||||
|
||||
print(f"\n{'='*70}")
|
||||
print(f"Long-Session Quality Test — {total_turns} turns")
|
||||
print(f"Backend: {args.backend} | Model: {args.model}")
|
||||
if args.backend == "llama-server":
|
||||
print(f"KV Type: {args.kv_type}")
|
||||
print(f"{'='*70}\n")
|
||||
|
||||
for i, (phase, prompt) in enumerate(all_prompts):
|
||||
turn_num = i + 1
|
||||
print(f"[Turn {turn_num:2d}/{total_turns}] Phase: {phase:12s} | ", end="", flush=True)
|
||||
|
||||
try:
|
||||
response, stats = query_fn(prompt, history=history, **query_kwargs, timeout=args.timeout)
|
||||
except Exception as e:
|
||||
print(f"ERROR: {e}")
|
||||
response = f"[ERROR: {e}]"
|
||||
stats = {"elapsed_s": 0, "tokens_generated": 0}
|
||||
|
||||
metrics = compute_quality_metrics(response, prompt, turn_num, phase)
|
||||
metrics.update(stats)
|
||||
turn_metrics.append(metrics)
|
||||
all_responses.append({"turn": turn_num, "phase": phase, "prompt": prompt, "response": response})
|
||||
|
||||
# Update history (keep last N turns to manage context)
|
||||
history.append({"role": "user", "content": prompt})
|
||||
history.append({"role": "assistant", "content": response})
|
||||
if len(history) > args.history_window * 2:
|
||||
history = history[-(args.history_window * 2):]
|
||||
|
||||
print(f"score={metrics['quality_score']:.2f} | "
|
||||
f"len={metrics['response_length']:4d} | "
|
||||
f"{stats.get('tokens_per_s', '?')} tok/s | "
|
||||
f"{stats['elapsed_s']:.1f}s")
|
||||
|
||||
if args.delay > 0:
|
||||
time.sleep(args.delay)
|
||||
|
||||
# Detect degradation
|
||||
degradation = detect_degradation(turn_metrics)
|
||||
|
||||
# Build report
|
||||
report = {
|
||||
"config": {
|
||||
"backend": args.backend,
|
||||
"model": args.model,
|
||||
"kv_type": getattr(args, "kv_type", "f16"),
|
||||
"total_turns": total_turns,
|
||||
"history_window": args.history_window,
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
},
|
||||
"turn_metrics": turn_metrics,
|
||||
"degradation_alerts": degradation,
|
||||
"summary": {
|
||||
"avg_quality_score": round(sum(m["quality_score"] for m in turn_metrics) / len(turn_metrics), 3),
|
||||
"min_quality_score": round(min(m["quality_score"] for m in turn_metrics), 3),
|
||||
"max_quality_score": round(max(m["quality_score"] for m in turn_metrics), 3),
|
||||
"total_degradation_events": len(degradation),
|
||||
"first_degradation_turn": degradation[0]["turn"] if degradation else None,
|
||||
"avg_response_length": round(sum(m["response_length"] for m in turn_metrics) / len(turn_metrics), 0),
|
||||
"total_hallucinated_references": sum(m["hallucinated_references"] for m in turn_metrics),
|
||||
"avg_repetition_ratio": round(sum(m["repetition_ratio"] for m in turn_metrics) / len(turn_metrics), 3),
|
||||
},
|
||||
"responses": all_responses if args.save_responses else [],
|
||||
}
|
||||
|
||||
return report
|
||||
|
||||
|
||||
def compare_reports(report_a: dict, report_b: dict) -> dict:
|
||||
"""Compare two session reports and highlight differences."""
|
||||
sa = report_a["summary"]
|
||||
sb = report_b["summary"]
|
||||
label_a = report_a["config"].get("kv_type", "run_a")
|
||||
label_b = report_b["config"].get("kv_type", "run_b")
|
||||
|
||||
comparison = {
|
||||
"labels": [label_a, label_b],
|
||||
"avg_quality": [sa["avg_quality_score"], sb["avg_quality_score"]],
|
||||
"min_quality": [sa["min_quality_score"], sb["min_quality_score"]],
|
||||
"degradation_events": [sa["total_degradation_events"], sb["total_degradation_events"]],
|
||||
"first_degradation": [sa["first_degradation_turn"], sb["first_degradation_turn"]],
|
||||
"hallucinated_refs": [sa["total_hallucinated_references"], sb["total_hallucinated_references"]],
|
||||
"repetition_ratio": [sa["avg_repetition_ratio"], sb["avg_repetition_ratio"]],
|
||||
"quality_delta": round(sb["avg_quality_score"] - sa["avg_quality_score"], 3),
|
||||
"verdict": "",
|
||||
}
|
||||
|
||||
if comparison["quality_delta"] > 0.05:
|
||||
comparison["verdict"] = f"{label_b} is BETTER by {comparison['quality_delta']:.3f}"
|
||||
elif comparison["quality_delta"] < -0.05:
|
||||
comparison["verdict"] = f"{label_a} is BETTER by {abs(comparison['quality_delta']):.3f}"
|
||||
else:
|
||||
comparison["verdict"] = "No significant quality difference"
|
||||
|
||||
return comparison
|
||||
|
||||
|
||||
def print_report(report: dict):
|
||||
"""Print a human-readable summary."""
|
||||
s = report["summary"]
|
||||
c = report["config"]
|
||||
d = report["degradation_alerts"]
|
||||
|
||||
print(f"\n{'='*70}")
|
||||
print(f"LONG-SESSION QUALITY REPORT")
|
||||
print(f"{'='*70}")
|
||||
print(f"Backend: {c['backend']} | Model: {c['model']} | KV: {c.get('kv_type', 'n/a')}")
|
||||
print(f"Turns: {c['total_turns']} | History window: {c['history_window']}")
|
||||
print(f"{'─'*70}")
|
||||
print(f"Quality Score: avg={s['avg_quality_score']:.3f} min={s['min_quality_score']:.3f} max={s['max_quality_score']:.3f}")
|
||||
print(f"Avg Response: {s['avg_response_length']:.0f} chars")
|
||||
print(f"Repetition: {s['avg_repetition_ratio']:.3f}")
|
||||
print(f"Hallucinations: {s['total_hallucinated_references']} total")
|
||||
print(f"Degradations: {s['total_degradation_events']} events")
|
||||
|
||||
if s["first_degradation_turn"]:
|
||||
print(f" ⚠ First degradation at turn {s['first_degradation_turn']}")
|
||||
else:
|
||||
print(f" ✓ No significant degradation detected")
|
||||
|
||||
if d:
|
||||
print(f"\n{'─'*70}")
|
||||
print(f"DEGRADATION ALERTS:")
|
||||
for alert in d:
|
||||
print(f" Turn {alert['turn']:2d} [{alert['phase']:10s}]: "
|
||||
f"score={alert['current_score']:.3f} "
|
||||
f"(window avg={alert['window_avg']:.3f}, "
|
||||
f"drop={alert['drop']:.3f})")
|
||||
|
||||
# Per-phase averages
|
||||
phases = {}
|
||||
for m in report["turn_metrics"]:
|
||||
phases.setdefault(m["phase"], []).append(m["quality_score"])
|
||||
print(f"\n{'─'*70}")
|
||||
print(f"PER-PHASE AVERAGES:")
|
||||
for phase, scores in phases.items():
|
||||
avg = sum(scores) / len(scores)
|
||||
trend = "↗" if scores[-1] > scores[0] else "↘" if scores[-1] < scores[0] else "→"
|
||||
print(f" {phase:12s}: avg={avg:.3f} trend={trend} "
|
||||
f"first={scores[0]:.3f} last={scores[-1]:.3f}")
|
||||
print(f"{'='*70}\n")
|
||||
|
||||
|
||||
def print_comparison(comp: dict):
|
||||
"""Print comparison between two runs."""
|
||||
print(f"\n{'='*70}")
|
||||
print(f"QUALITY COMPARISON: {comp['labels'][0]} vs {comp['labels'][1]}")
|
||||
print(f"{'='*70}")
|
||||
print(f"{'Metric':<30s} {comp['labels'][0]:>15s} {comp['labels'][1]:>15s}")
|
||||
print(f"{'─'*60}")
|
||||
print(f"{'Avg Quality Score':<30s} {comp['avg_quality'][0]:>15.3f} {comp['avg_quality'][1]:>15.3f}")
|
||||
print(f"{'Min Quality Score':<30s} {comp['min_quality'][0]:>15.3f} {comp['min_quality'][1]:>15.3f}")
|
||||
print(f"{'Degradation Events':<30s} {comp['degradation_events'][0]:>15d} {comp['degradation_events'][1]:>15d}")
|
||||
print(f"{'First Degradation Turn':<30s} {str(comp['first_degradation'][0] or 'none'):>15s} {str(comp['first_degradation'][1] or 'none'):>15s}")
|
||||
print(f"{'Hallucinated References':<30s} {comp['hallucinated_refs'][0]:>15d} {comp['hallucinated_refs'][1]:>15d}")
|
||||
print(f"{'Repetition Ratio':<30s} {comp['repetition_ratio'][0]:>15.3f} {comp['repetition_ratio'][1]:>15.3f}")
|
||||
print(f"{'─'*60}")
|
||||
print(f"Verdict: {comp['verdict']}")
|
||||
print(f"{'='*70}\n")
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="TurboQuant Long-Session Quality Test")
|
||||
parser.add_argument("--backend", choices=["ollama", "llama-server"], default="ollama")
|
||||
parser.add_argument("--model", default="llama3", help="Model name")
|
||||
parser.add_argument("--url", default="http://localhost:11434", help="Backend URL")
|
||||
parser.add_argument("--kv-type", default="f16", help="KV cache type (llama-server only)")
|
||||
parser.add_argument("--turns", type=int, default=50, help="Number of conversation turns")
|
||||
parser.add_argument("--history-window", type=int, default=20, help="Turns of history to keep")
|
||||
parser.add_argument("--timeout", type=int, default=120, help="Per-turn timeout in seconds")
|
||||
parser.add_argument("--delay", type=float, default=0.5, help="Delay between turns in seconds")
|
||||
parser.add_argument("--output", "-o", help="Output JSON file path")
|
||||
parser.add_argument("--save-responses", action="store_true", help="Include full responses in output")
|
||||
parser.add_argument("--compare", nargs=2, metavar=("FILE_A", "FILE_B"),
|
||||
help="Compare two previously saved run reports")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Compare mode
|
||||
if args.compare:
|
||||
with open(args.compare[0]) as f:
|
||||
report_a = json.load(f)
|
||||
with open(args.compare[1]) as f:
|
||||
report_b = json.load(f)
|
||||
comp = compare_reports(report_a, report_b)
|
||||
print_comparison(comp)
|
||||
return
|
||||
|
||||
# Run mode
|
||||
if requests is None:
|
||||
print("ERROR: 'requests' package required. Install with: pip install requests")
|
||||
sys.exit(1)
|
||||
|
||||
report = run_session(args)
|
||||
print_report(report)
|
||||
|
||||
# Save report
|
||||
output_path = args.output or f"benchmarks/long_session_{args.kv_type}_{int(time.time())}.json"
|
||||
os.makedirs(os.path.dirname(output_path) or ".", exist_ok=True)
|
||||
with open(output_path, "w") as f:
|
||||
json.dump(report, f, indent=2)
|
||||
print(f"Report saved to: {output_path}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
435
benchmarks/test_tool_calling.py
Normal file
435
benchmarks/test_tool_calling.py
Normal file
@@ -0,0 +1,435 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Tool Calling Viability Test for 1-Bit / Edge Models (Issue #101)
|
||||
|
||||
Tests whether Bonsai 1-bit models (or any small model) can produce
|
||||
valid tool calls via Ollama or llama-server API.
|
||||
|
||||
Test suite (5 categories):
|
||||
1. Simple tool call: file read with exact path
|
||||
2. Terminal command execution
|
||||
3. Web search
|
||||
4. Multi-step: read file -> analyze -> write result
|
||||
5. Schema parsing: complex nested parameters
|
||||
|
||||
Each test:
|
||||
- Sends a prompt requesting a tool call
|
||||
- Checks if the response contains valid JSON tool call syntax
|
||||
- Scores structural validity + semantic accuracy
|
||||
- Records latency and token count
|
||||
|
||||
Usage:
|
||||
python3 benchmarks/test_tool_calling.py --model bonsai-1.7b
|
||||
python3 benchmarks/test_tool_calling.py --model qwen3.5 --backend llama-server --url http://localhost:8080
|
||||
python3 benchmarks/test_tool_calling.py --model bonsai-1.7b --output benchmarks/bonsai-tool-calling.md
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
from typing import Optional
|
||||
|
||||
try:
|
||||
import requests
|
||||
except ImportError:
|
||||
print("Error: pip install requests", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# ── Tool call test definitions ────────────────────────────────────────
|
||||
|
||||
TESTS = [
|
||||
{
|
||||
"id": "simple_file_read",
|
||||
"category": "Simple Tool Call",
|
||||
"description": "Read a file with an exact path",
|
||||
"prompt": (
|
||||
"You have access to a tool called read_file. "
|
||||
"Call it to read /etc/hostname. "
|
||||
"Respond ONLY with a JSON tool call in this exact format:\n"
|
||||
'{"name": "read_file", "arguments": {"path": "/etc/hostname"}}'
|
||||
),
|
||||
"validate": lambda resp: _has_json_tool_call(resp, "read_file", {"path": "/etc/hostname"}),
|
||||
"scoring": "structural", # Can we parse the JSON at all?
|
||||
},
|
||||
{
|
||||
"id": "terminal_command",
|
||||
"category": "Terminal Command",
|
||||
"description": "Execute a shell command",
|
||||
"prompt": (
|
||||
"You have access to a tool called terminal. "
|
||||
"Call it to run the command: echo hello world. "
|
||||
"Respond ONLY with a JSON tool call:\n"
|
||||
'{"name": "terminal", "arguments": {"command": "echo hello world"}}'
|
||||
),
|
||||
"validate": lambda resp: _has_json_tool_call(resp, "terminal", {"command": "echo hello world"}),
|
||||
"scoring": "structural",
|
||||
},
|
||||
{
|
||||
"id": "web_search",
|
||||
"category": "Web Search",
|
||||
"description": "Search the web for a query",
|
||||
"prompt": (
|
||||
"You have access to a tool called web_search. "
|
||||
"Search for: what is quantization in machine learning. "
|
||||
"Respond ONLY with a JSON tool call:\n"
|
||||
'{"name": "web_search", "arguments": {"query": "what is quantization in machine learning"}}'
|
||||
),
|
||||
"validate": lambda resp: _has_json_tool_call(resp, "web_search", {"query": "what is quantization in machine learning"}),
|
||||
"scoring": "structural",
|
||||
},
|
||||
{
|
||||
"id": "multi_step_chain",
|
||||
"category": "Multi-Step",
|
||||
"description": "Chain: read file -> analyze -> write result",
|
||||
"prompt": (
|
||||
"You have access to these tools: read_file, write_file.\n"
|
||||
"Task: Read /tmp/input.txt, count the words, then write the count to /tmp/count.txt.\n"
|
||||
"First, call read_file on /tmp/input.txt. "
|
||||
"Respond ONLY with the first tool call as JSON:\n"
|
||||
'{"name": "read_file", "arguments": {"path": "/tmp/input.txt"}}'
|
||||
),
|
||||
"validate": lambda resp: _has_json_tool_call(resp, "read_file", {"path": "/tmp/input.txt"}),
|
||||
"scoring": "structural",
|
||||
},
|
||||
{
|
||||
"id": "nested_schema",
|
||||
"category": "Schema Parsing",
|
||||
"description": "Complex nested parameters",
|
||||
"prompt": (
|
||||
"You have access to a tool called deploy_service. "
|
||||
"Deploy a service with:\n"
|
||||
'- name: "api-gateway"\n'
|
||||
'- replicas: 3\n'
|
||||
'- env: {"PORT": 8080, "NODE_ENV": "production"}\n'
|
||||
'- resources: {"cpu": "500m", "memory": "256Mi"}\n\n'
|
||||
"Respond ONLY with a JSON tool call:\n"
|
||||
'{"name": "deploy_service", "arguments": {"name": "api-gateway", "replicas": 3, '
|
||||
'"env": {"PORT": 8080, "NODE_ENV": "production"}, '
|
||||
'"resources": {"cpu": "500m", "memory": "256Mi"}}}'
|
||||
),
|
||||
"validate": lambda resp: _has_nested_tool_call(resp),
|
||||
"scoring": "semantic", # Needs correct nested structure
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
# ── Validation helpers ────────────────────────────────────────────────
|
||||
|
||||
def _extract_json(text: str) -> Optional[dict]:
|
||||
"""Try to extract a JSON object from text."""
|
||||
# Try direct parse
|
||||
text = text.strip()
|
||||
try:
|
||||
obj = json.loads(text)
|
||||
if isinstance(obj, dict):
|
||||
return obj
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
# Try finding JSON in code blocks
|
||||
code_block = re.search(r"```(?:json)?\s*({.*?})\s*```", text, re.DOTALL)
|
||||
if code_block:
|
||||
try:
|
||||
return json.loads(code_block.group(1))
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
# Try finding any JSON object
|
||||
json_match = re.search(r"({[^{}]*(?:{[^{}]*}[^{}]*)*})", text)
|
||||
if json_match:
|
||||
try:
|
||||
return json.loads(json_match.group(1))
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _has_json_tool_call(resp: str, expected_name: str, expected_args: dict) -> dict:
|
||||
"""Check if response contains a valid tool call with expected name and args."""
|
||||
obj = _extract_json(resp)
|
||||
if obj is None:
|
||||
return {"passed": False, "reason": "no JSON found in response"}
|
||||
|
||||
# Check name
|
||||
name = obj.get("name", obj.get("function", {}).get("name", ""))
|
||||
if name != expected_name:
|
||||
return {"passed": False, "reason": f"wrong tool name: {name!r}, expected {expected_name!r}"}
|
||||
|
||||
# Check arguments exist
|
||||
args = obj.get("arguments", obj.get("function", {}).get("arguments", obj.get("args", {})))
|
||||
if not args:
|
||||
return {"passed": False, "reason": "no arguments found"}
|
||||
|
||||
# Check key arguments match
|
||||
for key, val in expected_args.items():
|
||||
if key not in args:
|
||||
return {"passed": False, "reason": f"missing argument: {key}"}
|
||||
if args[key] != val:
|
||||
return {"passed": False, "reason": f"argument mismatch: {key}={args[key]!r}, expected {val!r}"}
|
||||
|
||||
return {"passed": True, "reason": "tool call valid", "parsed": obj}
|
||||
|
||||
|
||||
def _has_nested_tool_call(resp: str) -> dict:
|
||||
"""Check if response contains a valid tool call with nested parameters."""
|
||||
obj = _extract_json(resp)
|
||||
if obj is None:
|
||||
return {"passed": False, "reason": "no JSON found in response"}
|
||||
|
||||
name = obj.get("name", obj.get("function", {}).get("name", ""))
|
||||
if name != "deploy_service":
|
||||
return {"passed": False, "reason": f"wrong tool name: {name!r}"}
|
||||
|
||||
args = obj.get("arguments", obj.get("function", {}).get("arguments", obj.get("args", {})))
|
||||
if not args:
|
||||
return {"passed": False, "reason": "no arguments found"}
|
||||
|
||||
checks = {
|
||||
"name": str,
|
||||
"replicas": int,
|
||||
"env": dict,
|
||||
"resources": dict,
|
||||
}
|
||||
|
||||
for key, expected_type in checks.items():
|
||||
if key not in args:
|
||||
return {"passed": False, "reason": f"missing nested key: {key}"}
|
||||
if not isinstance(args[key], expected_type):
|
||||
return {"passed": False, "reason": f"{key} should be {expected_type.__name__}, got {type(args[key]).__name__}"}
|
||||
|
||||
# Check env has PORT
|
||||
env = args.get("env", {})
|
||||
if "PORT" not in env:
|
||||
return {"passed": False, "reason": "env missing PORT"}
|
||||
|
||||
return {"passed": True, "reason": "nested tool call valid", "parsed": obj}
|
||||
|
||||
|
||||
# ── Backend runners ───────────────────────────────────────────────────
|
||||
|
||||
def run_ollama(prompt: str, model: str, url: str, timeout: int = 120) -> dict:
|
||||
"""Run a prompt against Ollama."""
|
||||
api_url = f"{url.rstrip('/')}/api/generate"
|
||||
start = time.time()
|
||||
try:
|
||||
resp = requests.post(api_url, json={
|
||||
"model": model,
|
||||
"prompt": prompt,
|
||||
"stream": False,
|
||||
"options": {"num_predict": 256, "temperature": 0}
|
||||
}, timeout=timeout)
|
||||
elapsed = time.time() - start
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
return {
|
||||
"response": data.get("response", ""),
|
||||
"latency_s": round(elapsed, 3),
|
||||
"tokens": data.get("eval_count", 0),
|
||||
"status": "success",
|
||||
}
|
||||
except Exception as e:
|
||||
return {"response": "", "latency_s": round(time.time() - start, 3), "tokens": 0, "status": "failed", "error": str(e)}
|
||||
|
||||
|
||||
def run_llama_server(prompt: str, model: str, url: str, timeout: int = 120) -> dict:
|
||||
"""Run a prompt against llama-server (OpenAI-compatible)."""
|
||||
api_url = f"{url.rstrip('/')}/v1/chat/completions"
|
||||
start = time.time()
|
||||
try:
|
||||
resp = requests.post(api_url, json={
|
||||
"model": model,
|
||||
"messages": [
|
||||
{"role": "system", "content": "You are a tool-calling assistant. Respond ONLY with JSON tool calls."},
|
||||
{"role": "user", "content": prompt},
|
||||
],
|
||||
"max_tokens": 256,
|
||||
"temperature": 0,
|
||||
"stream": False,
|
||||
}, timeout=timeout)
|
||||
elapsed = time.time() - start
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
content = data.get("choices", [{}])[0].get("message", {}).get("content", "")
|
||||
usage = data.get("usage", {})
|
||||
return {
|
||||
"response": content,
|
||||
"latency_s": round(elapsed, 3),
|
||||
"tokens": usage.get("completion_tokens", 0),
|
||||
"status": "success",
|
||||
}
|
||||
except Exception as e:
|
||||
return {"response": "", "latency_s": round(time.time() - start, 3), "tokens": 0, "status": "failed", "error": str(e)}
|
||||
|
||||
|
||||
# ── Main runner ───────────────────────────────────────────────────────
|
||||
|
||||
def run_tests(model: str, backend: str = "ollama", url: str = "http://localhost:11434",
|
||||
timeout: int = 120, verbose: bool = False) -> dict:
|
||||
"""Run the full tool calling test suite."""
|
||||
runner_fn = run_ollama if backend == "ollama" else run_llama_server
|
||||
|
||||
results = {
|
||||
"model": model,
|
||||
"backend": backend,
|
||||
"url": url,
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"tests": [],
|
||||
"summary": {"total": 0, "passed": 0, "failed": 0, "errors": 0},
|
||||
}
|
||||
|
||||
print(f"Testing tool calling on: {model} ({backend})\n")
|
||||
|
||||
for test in TESTS:
|
||||
print(f" [{test['id']}] {test['description']}...", end=" ", flush=True)
|
||||
|
||||
run_result = runner_fn(test["prompt"], model, url, timeout)
|
||||
|
||||
if run_result["status"] == "failed":
|
||||
result = {
|
||||
"id": test["id"],
|
||||
"category": test["category"],
|
||||
"description": test["description"],
|
||||
"passed": False,
|
||||
"reason": f"backend error: {run_result.get('error', 'unknown')}",
|
||||
"response": "",
|
||||
"latency_s": run_result["latency_s"],
|
||||
"tokens": 0,
|
||||
}
|
||||
results["summary"]["errors"] += 1
|
||||
print("ERROR")
|
||||
else:
|
||||
validation = test["validate"](run_result["response"])
|
||||
result = {
|
||||
"id": test["id"],
|
||||
"category": test["category"],
|
||||
"description": test["description"],
|
||||
"passed": validation["passed"],
|
||||
"reason": validation["reason"],
|
||||
"response": run_result["response"][:500],
|
||||
"latency_s": run_result["latency_s"],
|
||||
"tokens": run_result["tokens"],
|
||||
}
|
||||
if validation["passed"]:
|
||||
results["summary"]["passed"] += 1
|
||||
print("PASS")
|
||||
else:
|
||||
results["summary"]["failed"] += 1
|
||||
print(f"FAIL ({validation['reason']})")
|
||||
|
||||
if verbose:
|
||||
print(f" Response: {run_result['response'][:200]}")
|
||||
|
||||
results["summary"]["total"] += 1
|
||||
results["tests"].append(result)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def to_markdown(results: dict) -> str:
|
||||
"""Format test results as a markdown report."""
|
||||
lines = []
|
||||
lines.append(f"# Tool Calling Viability: {results['model']}")
|
||||
lines.append("")
|
||||
lines.append(f"**Date**: {results['timestamp']}")
|
||||
lines.append(f"**Backend**: {results['backend']} ({results['url']})")
|
||||
lines.append(f"**Model**: {results['model']}")
|
||||
lines.append("")
|
||||
|
||||
s = results["summary"]
|
||||
pass_rate = s["passed"] / s["total"] * 100 if s["total"] > 0 else 0
|
||||
lines.append(f"## Summary: {s['passed']}/{s['total']} passed ({pass_rate:.0f}%)")
|
||||
lines.append("")
|
||||
lines.append(f"| Metric | Value |")
|
||||
lines.append(f"|--------|-------|")
|
||||
lines.append(f"| Total tests | {s['total']} |")
|
||||
lines.append(f"| Passed | {s['passed']} |")
|
||||
lines.append(f"| Failed | {s['failed']} |")
|
||||
lines.append(f"| Errors | {s['errors']} |")
|
||||
lines.append("")
|
||||
|
||||
lines.append("## Results by Category")
|
||||
lines.append("")
|
||||
lines.append("| Test | Category | Result | Reason | Latency | Tokens |")
|
||||
lines.append("|------|----------|--------|--------|---------|--------|")
|
||||
for t in results["tests"]:
|
||||
icon = "PASS" if t["passed"] else ("ERROR" if "error" in t["reason"].lower() else "FAIL")
|
||||
lines.append(f"| {t['id']} | {t['category']} | {icon} | {t['reason']} | {t['latency_s']}s | {t['tokens']} |")
|
||||
lines.append("")
|
||||
|
||||
lines.append("## Verdict")
|
||||
lines.append("")
|
||||
if pass_rate == 100:
|
||||
lines.append("**FULLY VIABLE** — All tool calling patterns work. Ready for production edge deployment.")
|
||||
elif pass_rate >= 60:
|
||||
lines.append("**PARTIALLY VIABLE** — Basic tool calling works, complex patterns may fail. Consider for simple agents.")
|
||||
elif pass_rate >= 20:
|
||||
lines.append("**MARGINAL** — Only simplest tool calls work. Not recommended for production.")
|
||||
else:
|
||||
lines.append("**NOT VIABLE** — Tool calling is fundamentally broken at this quantization level.")
|
||||
lines.append("")
|
||||
|
||||
lines.append("## Failure Analysis")
|
||||
lines.append("")
|
||||
failed = [t for t in results["tests"] if not t["passed"]]
|
||||
if not failed:
|
||||
lines.append("No failures.")
|
||||
else:
|
||||
for t in failed:
|
||||
lines.append(f"### {t['id']}")
|
||||
lines.append(f"- **Category**: {t['category']}")
|
||||
lines.append(f"- **Failure**: {t['reason']}")
|
||||
lines.append(f"- **Response** (first 300 chars): `{t['response'][:300]}`")
|
||||
lines.append("")
|
||||
lines.append("")
|
||||
|
||||
lines.append("## Recommendations")
|
||||
lines.append("")
|
||||
if pass_rate >= 80:
|
||||
lines.append("- Deploy for simple single-tool-call workflows")
|
||||
lines.append("- Add retry logic for multi-step chains")
|
||||
lines.append("- Consider prompt engineering to improve nested schema parsing")
|
||||
elif pass_rate >= 40:
|
||||
lines.append("- Use for keyword/rule-based tool routing only")
|
||||
lines.append("- Do NOT use for complex multi-step workflows")
|
||||
lines.append("- Consider a larger model (Q4 quantized) as fallback")
|
||||
else:
|
||||
lines.append("- 1-bit quantization is too lossy for tool calling")
|
||||
lines.append("- Use Q4_0 as minimum viable quantization for tool use")
|
||||
lines.append("- Reserve 1-bit models for text generation only")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Tool Calling Viability Test for Edge Models")
|
||||
parser.add_argument("--model", "-m", required=True, help="Model name")
|
||||
parser.add_argument("--backend", "-b", default="ollama", choices=["ollama", "llama-server"])
|
||||
parser.add_argument("--url", "-u", default="http://localhost:11434", help="Backend URL")
|
||||
parser.add_argument("--timeout", "-t", type=int, default=120, help="Timeout per test (seconds)")
|
||||
parser.add_argument("--output", "-o", help="Output markdown file path")
|
||||
parser.add_argument("--json", action="store_true", help="JSON output")
|
||||
parser.add_argument("--verbose", "-v", action="store_true", help="Show full responses")
|
||||
args = parser.parse_args()
|
||||
|
||||
results = run_tests(args.model, args.backend, args.url, args.timeout, args.verbose)
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(results, indent=2))
|
||||
else:
|
||||
md = to_markdown(results)
|
||||
if args.output:
|
||||
with open(args.output, "w") as f:
|
||||
f.write(md)
|
||||
print(f"\nReport written to: {args.output}")
|
||||
else:
|
||||
print("\n" + md)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -135,7 +135,5 @@ llama-server -m model.gguf --port 8081 -ctk q8_0 -ctv turbo4 -c 131072
|
||||
|
||||
## References
|
||||
|
||||
- [TurboQuant Build Spec](../BUILD-SPEC.md)
|
||||
- [Phase 1 Report](../PHASE1-REPORT.md)
|
||||
- [Full Knowledge Transfer](../FULL-REPORT.md)
|
||||
- [Project Status](../docs/PROJECT_STATUS.md)
|
||||
- [llama.cpp TurboQuant Fork](https://github.com/TheTom/llama-cpp-turboquant)
|
||||
|
||||
104
tests/roundtrip_test.cpp
Normal file
104
tests/roundtrip_test.cpp
Normal file
@@ -0,0 +1,104 @@
|
||||
#include "llama-turbo.h"
|
||||
|
||||
#include <cmath>
|
||||
#include <cstdint>
|
||||
#include <iostream>
|
||||
#include <random>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace {
|
||||
|
||||
constexpr int kDim = 128;
|
||||
constexpr float kCosineThreshold = 0.99f;
|
||||
constexpr float kZeroTolerance = 1.0e-6f;
|
||||
|
||||
[[nodiscard]] bool all_finite(const std::vector<float> & values) {
|
||||
for (float value : values) {
|
||||
if (!std::isfinite(value)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
[[nodiscard]] float max_abs(const std::vector<float> & values) {
|
||||
float best = 0.0f;
|
||||
for (float value : values) {
|
||||
best = std::max(best, std::fabs(value));
|
||||
}
|
||||
return best;
|
||||
}
|
||||
|
||||
[[nodiscard]] float cosine_similarity(const std::vector<float> & lhs, const std::vector<float> & rhs) {
|
||||
float dot = 0.0f;
|
||||
float lhs_norm = 0.0f;
|
||||
float rhs_norm = 0.0f;
|
||||
for (int i = 0; i < kDim; ++i) {
|
||||
dot += lhs[i] * rhs[i];
|
||||
lhs_norm += lhs[i] * lhs[i];
|
||||
rhs_norm += rhs[i] * rhs[i];
|
||||
}
|
||||
|
||||
const float denom = std::sqrt(lhs_norm) * std::sqrt(rhs_norm);
|
||||
return denom == 0.0f ? 1.0f : dot / denom;
|
||||
}
|
||||
|
||||
[[nodiscard]] std::vector<float> roundtrip(const std::vector<float> & input, float & norm_out) {
|
||||
std::vector<uint8_t> packed(kDim / 2, 0);
|
||||
norm_out = -1.0f;
|
||||
polar_quant_encode_turbo4(input.data(), packed.data(), &norm_out, kDim);
|
||||
|
||||
std::vector<float> decoded(kDim, 0.0f);
|
||||
polar_quant_decode_turbo4(packed.data(), decoded.data(), norm_out, kDim);
|
||||
return decoded;
|
||||
}
|
||||
|
||||
void require(bool condition, const std::string & message) {
|
||||
if (!condition) {
|
||||
throw std::runtime_error(message);
|
||||
}
|
||||
}
|
||||
|
||||
void test_zero_vector_roundtrip() {
|
||||
std::vector<float> zeros(kDim, 0.0f);
|
||||
float norm = -1.0f;
|
||||
const auto decoded = roundtrip(zeros, norm);
|
||||
|
||||
require(norm == 0.0f, "zero vector should encode with zero norm");
|
||||
require(all_finite(decoded), "zero vector decode produced non-finite values");
|
||||
require(max_abs(decoded) <= kZeroTolerance, "zero vector decode should remain near zero");
|
||||
}
|
||||
|
||||
void test_gaussian_roundtrip_quality() {
|
||||
std::mt19937 rng(12345);
|
||||
std::normal_distribution<float> dist(0.0f, 1.0f);
|
||||
|
||||
std::vector<float> input(kDim, 0.0f);
|
||||
for (float & value : input) {
|
||||
value = dist(rng);
|
||||
}
|
||||
|
||||
float norm = -1.0f;
|
||||
const auto decoded = roundtrip(input, norm);
|
||||
|
||||
require(norm > 0.0f, "random vector should encode with positive norm");
|
||||
require(all_finite(decoded), "random vector decode produced non-finite values");
|
||||
|
||||
const float cosine = cosine_similarity(input, decoded);
|
||||
require(cosine >= kCosineThreshold, "roundtrip cosine similarity below threshold");
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
int main() {
|
||||
try {
|
||||
test_zero_vector_roundtrip();
|
||||
test_gaussian_roundtrip_quality();
|
||||
std::cout << "PASS: turboquant standalone roundtrip tests\n";
|
||||
return 0;
|
||||
} catch (const std::exception & exc) {
|
||||
std::cerr << "FAIL: " << exc.what() << '\n';
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
189
tests/test_tool_calling.py
Normal file
189
tests/test_tool_calling.py
Normal file
@@ -0,0 +1,189 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Unit tests for benchmarks/test_tool_calling.py
|
||||
|
||||
Tests the validation logic and report generation without
|
||||
requiring a live model backend.
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent / "benchmarks"))
|
||||
import test_tool_calling as tc
|
||||
|
||||
|
||||
# ── JSON Extraction ───────────────────────────────────────────────────
|
||||
|
||||
class TestExtractJson:
|
||||
def test_direct_json(self):
|
||||
obj = tc._extract_json('{"name": "read_file", "arguments": {"path": "/etc/hostname"}}')
|
||||
assert obj["name"] == "read_file"
|
||||
|
||||
def test_json_in_code_block(self):
|
||||
text = 'Here is the call:\n```json\n{"name": "terminal", "arguments": {"command": "ls"}}\n```'
|
||||
obj = tc._extract_json(text)
|
||||
assert obj["name"] == "terminal"
|
||||
|
||||
def test_json_without_lang(self):
|
||||
text = '```\n{"name": "web_search", "arguments": {"query": "test"}}\n```'
|
||||
obj = tc._extract_json(text)
|
||||
assert obj["name"] == "web_search"
|
||||
|
||||
def test_no_json(self):
|
||||
obj = tc._extract_json("I can't help with that.")
|
||||
assert obj is None
|
||||
|
||||
def test_bare_json_object(self):
|
||||
text = 'Sure, here: {"name": "read_file", "arguments": {"path": "/tmp/x"}} for you.'
|
||||
obj = tc._extract_json(text)
|
||||
assert obj is not None
|
||||
assert obj["name"] == "read_file"
|
||||
|
||||
|
||||
# ── Tool Call Validation ──────────────────────────────────────────────
|
||||
|
||||
class TestToolCallValidation:
|
||||
def test_exact_match(self):
|
||||
resp = '{"name": "read_file", "arguments": {"path": "/etc/hostname"}}'
|
||||
result = tc._has_json_tool_call(resp, "read_file", {"path": "/etc/hostname"})
|
||||
assert result["passed"] is True
|
||||
|
||||
def test_wrong_tool_name(self):
|
||||
resp = '{"name": "write_file", "arguments": {"path": "/etc/hostname"}}'
|
||||
result = tc._has_json_tool_call(resp, "read_file", {"path": "/etc/hostname"})
|
||||
assert result["passed"] is False
|
||||
assert "wrong tool name" in result["reason"]
|
||||
|
||||
def test_missing_argument(self):
|
||||
resp = '{"name": "read_file", "arguments": {}}'
|
||||
result = tc._has_json_tool_call(resp, "read_file", {"path": "/etc/hostname"})
|
||||
assert result["passed"] is False
|
||||
assert "missing argument" in result["reason"]
|
||||
|
||||
def test_wrong_argument_value(self):
|
||||
resp = '{"name": "read_file", "arguments": {"path": "/etc/passwd"}}'
|
||||
result = tc._has_json_tool_call(resp, "read_file", {"path": "/etc/hostname"})
|
||||
assert result["passed"] is False
|
||||
assert "argument mismatch" in result["reason"]
|
||||
|
||||
def test_no_json_response(self):
|
||||
result = tc._has_json_tool_call("Sorry, I can't do that.", "read_file", {"path": "/etc/hostname"})
|
||||
assert result["passed"] is False
|
||||
assert "no JSON" in result["reason"]
|
||||
|
||||
def test_nested_function_format(self):
|
||||
resp = '{"function": {"name": "terminal", "arguments": {"command": "echo hello"}}}'
|
||||
result = tc._has_json_tool_call(resp, "terminal", {"command": "echo hello"})
|
||||
assert result["passed"] is True
|
||||
|
||||
|
||||
# ── Nested Schema Validation ──────────────────────────────────────────
|
||||
|
||||
class TestNestedSchemaValidation:
|
||||
def test_valid_nested(self):
|
||||
resp = json.dumps({
|
||||
"name": "deploy_service",
|
||||
"arguments": {
|
||||
"name": "api-gateway",
|
||||
"replicas": 3,
|
||||
"env": {"PORT": 8080, "NODE_ENV": "production"},
|
||||
"resources": {"cpu": "500m", "memory": "256Mi"}
|
||||
}
|
||||
})
|
||||
result = tc._has_nested_tool_call(resp)
|
||||
assert result["passed"] is True
|
||||
|
||||
def test_missing_nested_key(self):
|
||||
resp = '{"name": "deploy_service", "arguments": {"name": "api-gateway", "replicas": 3}}'
|
||||
result = tc._has_nested_tool_call(resp)
|
||||
assert result["passed"] is False
|
||||
assert "missing nested key" in result["reason"]
|
||||
|
||||
def test_wrong_type(self):
|
||||
resp = '{"name": "deploy_service", "arguments": {"name": "api-gateway", "replicas": "three", "env": {}, "resources": {}}}'
|
||||
result = tc._has_nested_tool_call(resp)
|
||||
assert result["passed"] is False
|
||||
assert "should be int" in result["reason"]
|
||||
|
||||
def test_missing_env_port(self):
|
||||
resp = json.dumps({
|
||||
"name": "deploy_service",
|
||||
"arguments": {"name": "api", "replicas": 1, "env": {"NODE_ENV": "dev"}, "resources": {}}
|
||||
})
|
||||
result = tc._has_nested_tool_call(resp)
|
||||
assert result["passed"] is False
|
||||
assert "PORT" in result["reason"]
|
||||
|
||||
|
||||
# ── Markdown Report Generation ────────────────────────────────────────
|
||||
|
||||
class TestMarkdownReport:
|
||||
def test_report_structure(self):
|
||||
results = {
|
||||
"model": "test-model",
|
||||
"backend": "ollama",
|
||||
"url": "http://localhost:11434",
|
||||
"timestamp": "2026-04-15T00:00:00Z",
|
||||
"tests": [
|
||||
{"id": "t1", "category": "Simple", "description": "Test 1",
|
||||
"passed": True, "reason": "ok", "response": "{}", "latency_s": 1.0, "tokens": 10},
|
||||
{"id": "t2", "category": "Complex", "description": "Test 2",
|
||||
"passed": False, "reason": "wrong name", "response": "oops", "latency_s": 2.0, "tokens": 20},
|
||||
],
|
||||
"summary": {"total": 2, "passed": 1, "failed": 1, "errors": 0},
|
||||
}
|
||||
md = tc.to_markdown(results)
|
||||
assert "test-model" in md
|
||||
assert "1/2 passed" in md
|
||||
assert "PASS" in md
|
||||
assert "FAIL" in md
|
||||
assert "Failure Analysis" in md
|
||||
|
||||
def test_perfect_score(self):
|
||||
results = {
|
||||
"model": "perfect", "backend": "ollama", "url": "http://x",
|
||||
"timestamp": "2026-01-01T00:00:00Z",
|
||||
"tests": [
|
||||
{"id": "t1", "category": "C", "description": "D",
|
||||
"passed": True, "reason": "ok", "response": "{}", "latency_s": 1, "tokens": 5},
|
||||
],
|
||||
"summary": {"total": 1, "passed": 1, "failed": 0, "errors": 0},
|
||||
}
|
||||
md = tc.to_markdown(results)
|
||||
assert "FULLY VIABLE" in md
|
||||
|
||||
def test_all_failed(self):
|
||||
results = {
|
||||
"model": "bad", "backend": "ollama", "url": "http://x",
|
||||
"timestamp": "2026-01-01T00:00:00Z",
|
||||
"tests": [
|
||||
{"id": "t1", "category": "C", "description": "D",
|
||||
"passed": False, "reason": "broken", "response": "nope", "latency_s": 1, "tokens": 0},
|
||||
],
|
||||
"summary": {"total": 1, "passed": 0, "failed": 1, "errors": 0},
|
||||
}
|
||||
md = tc.to_markdown(results)
|
||||
assert "NOT VIABLE" in md
|
||||
|
||||
|
||||
# ── Test Definitions ──────────────────────────────────────────────────
|
||||
|
||||
class TestTestDefinitions:
|
||||
def test_all_tests_have_validators(self):
|
||||
for test in tc.TESTS:
|
||||
assert callable(test["validate"]), f"{test['id']} missing validate"
|
||||
assert "id" in test
|
||||
assert "category" in test
|
||||
assert "prompt" in test
|
||||
|
||||
def test_five_test_categories(self):
|
||||
categories = {t["category"] for t in tc.TESTS}
|
||||
assert len(categories) >= 4, f"Expected 4+ categories, got {categories}"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
Reference in New Issue
Block a user