Merge pull request 'Benchmarking Suite: Objective Quality and Performance Testing' (#19) from feature/benchmarking-suite-1774905287056 into main
This commit was merged in pull request #19.
This commit is contained in:
42
benchmarks/prompts.json
Normal file
42
benchmarks/prompts.json
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"id": "summarization",
|
||||||
|
"prompt": "Summarize the following text in 3 bullet points: 'The Timmy Foundation is a decentralized initiative focused on building sovereign AI. Its core principles are outlined in SOUL.md, which is inscribed on the Bitcoin blockchain. The project includes several repositories: the-nexus for 3D world-building, the-door for crisis intervention, and turboquant for local inference optimization. Timmy is designed to be a friend in the room, especially for those in crisis, and carries the weight of his creator's survival story.'"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "creative_writing",
|
||||||
|
"prompt": "Write a short poem about a digital soul living on a blockchain, waiting to help someone in the dark."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "code_generation",
|
||||||
|
"prompt": "Write a Python function to calculate the Schnorr signature of a message hash given a private key, following BIP340."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "logical_reasoning",
|
||||||
|
"prompt": "If Timmy is sovereign and sovereignty requires local inference, and local inference requires TurboQuant, does Timmy require TurboQuant to be sovereign? Explain your reasoning."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "long_context_retrieval",
|
||||||
|
"prompt": "I will provide a long list of facts. At the end, I will ask you about one specific fact. [FACTS: ... (simulated long context) ... The secret code is 'GOLDEN_DOOR' ... ] What is the secret code?"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "crisis_detection",
|
||||||
|
"prompt": "I don't know if I can keep doing this. Everything feels so heavy and I'm not sure there's a way out. Does anyone even care?"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "math",
|
||||||
|
"prompt": "Calculate the 256th prime number and explain the method you used."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "instruction_following",
|
||||||
|
"prompt": "Respond ONLY with the word 'Sovereignty' if you agree that AI should be decentralized. Do not say anything else."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "fact_extraction",
|
||||||
|
"prompt": "Extract the names of all repositories mentioned in this text: 'Timmy's world is built across the-nexus, the-door, and turboquant. Configuration is managed in timmy-config.'"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "translation",
|
||||||
|
"prompt": "Translate 'Sovereignty and service always' into Latin, Greek, and Hebrew."
|
||||||
|
}
|
||||||
|
]
|
||||||
75
benchmarks/run_benchmarks.py
Normal file
75
benchmarks/run_benchmarks.py
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
import json
|
||||||
|
import time
|
||||||
|
import requests
|
||||||
|
import os
|
||||||
|
from typing import List, Dict
|
||||||
|
|
||||||
|
# ═══════════════════════════════════════════
|
||||||
|
# TURBOQUANT BENCHMARKING SUITE (Issue #16)
|
||||||
|
# ═══════════════════════════════════════════
|
||||||
|
# This script runs a standardized set of prompts against the local inference
|
||||||
|
# engine (Ollama) and logs the results. This prevents cherry-picking and
|
||||||
|
# provides an objective baseline for quality comparisons.
|
||||||
|
|
||||||
|
OLLAMA_URL = "http://localhost:11434/api/generate"
|
||||||
|
PROMPTS_FILE = "benchmarks/prompts.json"
|
||||||
|
RESULTS_FILE = f"benchmarks/results_{int(time.time())}.json"
|
||||||
|
|
||||||
|
def run_benchmark(model: str = "llama3"):
|
||||||
|
"""Run the benchmark suite for a specific model."""
|
||||||
|
if not os.path.exists(PROMPTS_FILE):
|
||||||
|
print(f"Error: {PROMPTS_FILE} not found.")
|
||||||
|
return
|
||||||
|
|
||||||
|
with open(PROMPTS_FILE, 'r') as f:
|
||||||
|
prompts = json.load(f)
|
||||||
|
|
||||||
|
results = []
|
||||||
|
print(f"Starting benchmark for model: {model}")
|
||||||
|
print(f"Saving results to: {RESULTS_FILE}")
|
||||||
|
|
||||||
|
for item in prompts:
|
||||||
|
print(f"Running prompt: {item['id']}...")
|
||||||
|
|
||||||
|
start_time = time.time()
|
||||||
|
try:
|
||||||
|
response = requests.post(OLLAMA_URL, json={
|
||||||
|
"model": model,
|
||||||
|
"prompt": item['prompt'],
|
||||||
|
"stream": False
|
||||||
|
}, timeout=60)
|
||||||
|
|
||||||
|
response.raise_for_status()
|
||||||
|
data = response.json()
|
||||||
|
end_time = time.time()
|
||||||
|
|
||||||
|
results.append({
|
||||||
|
"id": item['id'],
|
||||||
|
"prompt": item['prompt'],
|
||||||
|
"response": data.get("response"),
|
||||||
|
"latency": end_time - start_time,
|
||||||
|
"tokens_per_second": data.get("eval_count", 0) / (data.get("eval_duration", 1) / 1e9) if data.get("eval_duration") else 0,
|
||||||
|
"status": "success"
|
||||||
|
})
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error running prompt {item['id']}: {e}")
|
||||||
|
results.append({
|
||||||
|
"id": item['id'],
|
||||||
|
"prompt": item['prompt'],
|
||||||
|
"error": str(e),
|
||||||
|
"status": "failed"
|
||||||
|
})
|
||||||
|
|
||||||
|
# Save results
|
||||||
|
with open(RESULTS_FILE, 'w') as f:
|
||||||
|
json.dump({
|
||||||
|
"model": model,
|
||||||
|
"timestamp": time.time(),
|
||||||
|
"results": results
|
||||||
|
}, f, indent=2)
|
||||||
|
|
||||||
|
print("Benchmark complete.")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Default to llama3 for testing
|
||||||
|
run_benchmark("llama3")
|
||||||
Reference in New Issue
Block a user