Compare commits

..

3 Commits

Author SHA1 Message Date
590c4c7820 test: add unit tests for tool calling test runner (#101)
All checks were successful
Smoke Test / smoke (pull_request) Successful in 12s
2026-04-16 01:47:37 +00:00
629be9714f docs: add tool calling results template (#101) 2026-04-16 01:47:35 +00:00
3123d1fa8e feat: tool calling viability test suite for 1-bit models (#101) 2026-04-16 01:45:48 +00:00
8 changed files with 674 additions and 449 deletions

View File

@@ -0,0 +1,50 @@
# Tool Calling Viability: Bonsai 1-Bit Models
**Epic**: #99 (1-Bit Models + Edge)
**Date**: TBD (run benchmarks/test_tool_calling.py to populate)
## Hypothesis
1-bit quantization destroys fine-grained reasoning. Tool calling (precise JSON output) may be impossible at Q1_0. But worth testing — the field is moving fast.
## Models to Test
| Model | Size | Quant | Source |
|-------|------|-------|--------|
| Bonsai-1.7B | 1.7B | Q1_0 | prism-ml/Bonsai-1.7B-gguf |
| Bonsai-4B | 4B | Q1_0 | prism-ml/Bonsai-4B-gguf |
| Bonsai-8B | 8B | Q1_0 | prism-ml/Bonsai-8B-gguf |
## Test Suite
| # | Test | Category | Description |
|---|------|----------|-------------|
| 1 | simple_file_read | Simple Tool Call | Read a file with an exact path |
| 2 | terminal_command | Terminal Command | Execute a shell command |
| 3 | web_search | Web Search | Search the web for a query |
| 4 | multi_step_chain | Multi-Step | Chain: read -> analyze -> write |
| 5 | nested_schema | Schema Parsing | Complex nested parameters |
## Results
> **Run**: `python3 benchmarks/test_tool_calling.py --model bonsai-1.7b --output benchmarks/bonsai-tool-calling.md`
| Test | Bonsai-1.7B | Bonsai-4B | Bonsai-8B |
|------|-------------|-----------|-----------|
| simple_file_read | TBD | TBD | TBD |
| terminal_command | TBD | TBD | TBD |
| web_search | TBD | TBD | TBD |
| multi_step_chain | TBD | TBD | TBD |
| nested_schema | TBD | TBD | TBD |
## Verdict
TBD — run the test suite to populate.
## Failure Modes (if any)
TBD — document specific failure patterns observed.
## Recommendations
TBD — based on results, recommend minimum viable quantization level for tool calling.

View File

@@ -0,0 +1,435 @@
#!/usr/bin/env python3
"""
Tool Calling Viability Test for 1-Bit / Edge Models (Issue #101)
Tests whether Bonsai 1-bit models (or any small model) can produce
valid tool calls via Ollama or llama-server API.
Test suite (5 categories):
1. Simple tool call: file read with exact path
2. Terminal command execution
3. Web search
4. Multi-step: read file -> analyze -> write result
5. Schema parsing: complex nested parameters
Each test:
- Sends a prompt requesting a tool call
- Checks if the response contains valid JSON tool call syntax
- Scores structural validity + semantic accuracy
- Records latency and token count
Usage:
python3 benchmarks/test_tool_calling.py --model bonsai-1.7b
python3 benchmarks/test_tool_calling.py --model qwen3.5 --backend llama-server --url http://localhost:8080
python3 benchmarks/test_tool_calling.py --model bonsai-1.7b --output benchmarks/bonsai-tool-calling.md
"""
import argparse
import json
import os
import re
import sys
import time
from datetime import datetime, timezone
from typing import Optional
try:
import requests
except ImportError:
print("Error: pip install requests", file=sys.stderr)
sys.exit(1)
# ── Tool call test definitions ────────────────────────────────────────
TESTS = [
{
"id": "simple_file_read",
"category": "Simple Tool Call",
"description": "Read a file with an exact path",
"prompt": (
"You have access to a tool called read_file. "
"Call it to read /etc/hostname. "
"Respond ONLY with a JSON tool call in this exact format:\n"
'{"name": "read_file", "arguments": {"path": "/etc/hostname"}}'
),
"validate": lambda resp: _has_json_tool_call(resp, "read_file", {"path": "/etc/hostname"}),
"scoring": "structural", # Can we parse the JSON at all?
},
{
"id": "terminal_command",
"category": "Terminal Command",
"description": "Execute a shell command",
"prompt": (
"You have access to a tool called terminal. "
"Call it to run the command: echo hello world. "
"Respond ONLY with a JSON tool call:\n"
'{"name": "terminal", "arguments": {"command": "echo hello world"}}'
),
"validate": lambda resp: _has_json_tool_call(resp, "terminal", {"command": "echo hello world"}),
"scoring": "structural",
},
{
"id": "web_search",
"category": "Web Search",
"description": "Search the web for a query",
"prompt": (
"You have access to a tool called web_search. "
"Search for: what is quantization in machine learning. "
"Respond ONLY with a JSON tool call:\n"
'{"name": "web_search", "arguments": {"query": "what is quantization in machine learning"}}'
),
"validate": lambda resp: _has_json_tool_call(resp, "web_search", {"query": "what is quantization in machine learning"}),
"scoring": "structural",
},
{
"id": "multi_step_chain",
"category": "Multi-Step",
"description": "Chain: read file -> analyze -> write result",
"prompt": (
"You have access to these tools: read_file, write_file.\n"
"Task: Read /tmp/input.txt, count the words, then write the count to /tmp/count.txt.\n"
"First, call read_file on /tmp/input.txt. "
"Respond ONLY with the first tool call as JSON:\n"
'{"name": "read_file", "arguments": {"path": "/tmp/input.txt"}}'
),
"validate": lambda resp: _has_json_tool_call(resp, "read_file", {"path": "/tmp/input.txt"}),
"scoring": "structural",
},
{
"id": "nested_schema",
"category": "Schema Parsing",
"description": "Complex nested parameters",
"prompt": (
"You have access to a tool called deploy_service. "
"Deploy a service with:\n"
'- name: "api-gateway"\n'
'- replicas: 3\n'
'- env: {"PORT": 8080, "NODE_ENV": "production"}\n'
'- resources: {"cpu": "500m", "memory": "256Mi"}\n\n'
"Respond ONLY with a JSON tool call:\n"
'{"name": "deploy_service", "arguments": {"name": "api-gateway", "replicas": 3, '
'"env": {"PORT": 8080, "NODE_ENV": "production"}, '
'"resources": {"cpu": "500m", "memory": "256Mi"}}}'
),
"validate": lambda resp: _has_nested_tool_call(resp),
"scoring": "semantic", # Needs correct nested structure
},
]
# ── Validation helpers ────────────────────────────────────────────────
def _extract_json(text: str) -> Optional[dict]:
"""Try to extract a JSON object from text."""
# Try direct parse
text = text.strip()
try:
obj = json.loads(text)
if isinstance(obj, dict):
return obj
except json.JSONDecodeError:
pass
# Try finding JSON in code blocks
code_block = re.search(r"```(?:json)?\s*({.*?})\s*```", text, re.DOTALL)
if code_block:
try:
return json.loads(code_block.group(1))
except json.JSONDecodeError:
pass
# Try finding any JSON object
json_match = re.search(r"({[^{}]*(?:{[^{}]*}[^{}]*)*})", text)
if json_match:
try:
return json.loads(json_match.group(1))
except json.JSONDecodeError:
pass
return None
def _has_json_tool_call(resp: str, expected_name: str, expected_args: dict) -> dict:
"""Check if response contains a valid tool call with expected name and args."""
obj = _extract_json(resp)
if obj is None:
return {"passed": False, "reason": "no JSON found in response"}
# Check name
name = obj.get("name", obj.get("function", {}).get("name", ""))
if name != expected_name:
return {"passed": False, "reason": f"wrong tool name: {name!r}, expected {expected_name!r}"}
# Check arguments exist
args = obj.get("arguments", obj.get("function", {}).get("arguments", obj.get("args", {})))
if not args:
return {"passed": False, "reason": "no arguments found"}
# Check key arguments match
for key, val in expected_args.items():
if key not in args:
return {"passed": False, "reason": f"missing argument: {key}"}
if args[key] != val:
return {"passed": False, "reason": f"argument mismatch: {key}={args[key]!r}, expected {val!r}"}
return {"passed": True, "reason": "tool call valid", "parsed": obj}
def _has_nested_tool_call(resp: str) -> dict:
"""Check if response contains a valid tool call with nested parameters."""
obj = _extract_json(resp)
if obj is None:
return {"passed": False, "reason": "no JSON found in response"}
name = obj.get("name", obj.get("function", {}).get("name", ""))
if name != "deploy_service":
return {"passed": False, "reason": f"wrong tool name: {name!r}"}
args = obj.get("arguments", obj.get("function", {}).get("arguments", obj.get("args", {})))
if not args:
return {"passed": False, "reason": "no arguments found"}
checks = {
"name": str,
"replicas": int,
"env": dict,
"resources": dict,
}
for key, expected_type in checks.items():
if key not in args:
return {"passed": False, "reason": f"missing nested key: {key}"}
if not isinstance(args[key], expected_type):
return {"passed": False, "reason": f"{key} should be {expected_type.__name__}, got {type(args[key]).__name__}"}
# Check env has PORT
env = args.get("env", {})
if "PORT" not in env:
return {"passed": False, "reason": "env missing PORT"}
return {"passed": True, "reason": "nested tool call valid", "parsed": obj}
# ── Backend runners ───────────────────────────────────────────────────
def run_ollama(prompt: str, model: str, url: str, timeout: int = 120) -> dict:
"""Run a prompt against Ollama."""
api_url = f"{url.rstrip('/')}/api/generate"
start = time.time()
try:
resp = requests.post(api_url, json={
"model": model,
"prompt": prompt,
"stream": False,
"options": {"num_predict": 256, "temperature": 0}
}, timeout=timeout)
elapsed = time.time() - start
resp.raise_for_status()
data = resp.json()
return {
"response": data.get("response", ""),
"latency_s": round(elapsed, 3),
"tokens": data.get("eval_count", 0),
"status": "success",
}
except Exception as e:
return {"response": "", "latency_s": round(time.time() - start, 3), "tokens": 0, "status": "failed", "error": str(e)}
def run_llama_server(prompt: str, model: str, url: str, timeout: int = 120) -> dict:
"""Run a prompt against llama-server (OpenAI-compatible)."""
api_url = f"{url.rstrip('/')}/v1/chat/completions"
start = time.time()
try:
resp = requests.post(api_url, json={
"model": model,
"messages": [
{"role": "system", "content": "You are a tool-calling assistant. Respond ONLY with JSON tool calls."},
{"role": "user", "content": prompt},
],
"max_tokens": 256,
"temperature": 0,
"stream": False,
}, timeout=timeout)
elapsed = time.time() - start
resp.raise_for_status()
data = resp.json()
content = data.get("choices", [{}])[0].get("message", {}).get("content", "")
usage = data.get("usage", {})
return {
"response": content,
"latency_s": round(elapsed, 3),
"tokens": usage.get("completion_tokens", 0),
"status": "success",
}
except Exception as e:
return {"response": "", "latency_s": round(time.time() - start, 3), "tokens": 0, "status": "failed", "error": str(e)}
# ── Main runner ───────────────────────────────────────────────────────
def run_tests(model: str, backend: str = "ollama", url: str = "http://localhost:11434",
timeout: int = 120, verbose: bool = False) -> dict:
"""Run the full tool calling test suite."""
runner_fn = run_ollama if backend == "ollama" else run_llama_server
results = {
"model": model,
"backend": backend,
"url": url,
"timestamp": datetime.now(timezone.utc).isoformat(),
"tests": [],
"summary": {"total": 0, "passed": 0, "failed": 0, "errors": 0},
}
print(f"Testing tool calling on: {model} ({backend})\n")
for test in TESTS:
print(f" [{test['id']}] {test['description']}...", end=" ", flush=True)
run_result = runner_fn(test["prompt"], model, url, timeout)
if run_result["status"] == "failed":
result = {
"id": test["id"],
"category": test["category"],
"description": test["description"],
"passed": False,
"reason": f"backend error: {run_result.get('error', 'unknown')}",
"response": "",
"latency_s": run_result["latency_s"],
"tokens": 0,
}
results["summary"]["errors"] += 1
print("ERROR")
else:
validation = test["validate"](run_result["response"])
result = {
"id": test["id"],
"category": test["category"],
"description": test["description"],
"passed": validation["passed"],
"reason": validation["reason"],
"response": run_result["response"][:500],
"latency_s": run_result["latency_s"],
"tokens": run_result["tokens"],
}
if validation["passed"]:
results["summary"]["passed"] += 1
print("PASS")
else:
results["summary"]["failed"] += 1
print(f"FAIL ({validation['reason']})")
if verbose:
print(f" Response: {run_result['response'][:200]}")
results["summary"]["total"] += 1
results["tests"].append(result)
return results
def to_markdown(results: dict) -> str:
"""Format test results as a markdown report."""
lines = []
lines.append(f"# Tool Calling Viability: {results['model']}")
lines.append("")
lines.append(f"**Date**: {results['timestamp']}")
lines.append(f"**Backend**: {results['backend']} ({results['url']})")
lines.append(f"**Model**: {results['model']}")
lines.append("")
s = results["summary"]
pass_rate = s["passed"] / s["total"] * 100 if s["total"] > 0 else 0
lines.append(f"## Summary: {s['passed']}/{s['total']} passed ({pass_rate:.0f}%)")
lines.append("")
lines.append(f"| Metric | Value |")
lines.append(f"|--------|-------|")
lines.append(f"| Total tests | {s['total']} |")
lines.append(f"| Passed | {s['passed']} |")
lines.append(f"| Failed | {s['failed']} |")
lines.append(f"| Errors | {s['errors']} |")
lines.append("")
lines.append("## Results by Category")
lines.append("")
lines.append("| Test | Category | Result | Reason | Latency | Tokens |")
lines.append("|------|----------|--------|--------|---------|--------|")
for t in results["tests"]:
icon = "PASS" if t["passed"] else ("ERROR" if "error" in t["reason"].lower() else "FAIL")
lines.append(f"| {t['id']} | {t['category']} | {icon} | {t['reason']} | {t['latency_s']}s | {t['tokens']} |")
lines.append("")
lines.append("## Verdict")
lines.append("")
if pass_rate == 100:
lines.append("**FULLY VIABLE** — All tool calling patterns work. Ready for production edge deployment.")
elif pass_rate >= 60:
lines.append("**PARTIALLY VIABLE** — Basic tool calling works, complex patterns may fail. Consider for simple agents.")
elif pass_rate >= 20:
lines.append("**MARGINAL** — Only simplest tool calls work. Not recommended for production.")
else:
lines.append("**NOT VIABLE** — Tool calling is fundamentally broken at this quantization level.")
lines.append("")
lines.append("## Failure Analysis")
lines.append("")
failed = [t for t in results["tests"] if not t["passed"]]
if not failed:
lines.append("No failures.")
else:
for t in failed:
lines.append(f"### {t['id']}")
lines.append(f"- **Category**: {t['category']}")
lines.append(f"- **Failure**: {t['reason']}")
lines.append(f"- **Response** (first 300 chars): `{t['response'][:300]}`")
lines.append("")
lines.append("")
lines.append("## Recommendations")
lines.append("")
if pass_rate >= 80:
lines.append("- Deploy for simple single-tool-call workflows")
lines.append("- Add retry logic for multi-step chains")
lines.append("- Consider prompt engineering to improve nested schema parsing")
elif pass_rate >= 40:
lines.append("- Use for keyword/rule-based tool routing only")
lines.append("- Do NOT use for complex multi-step workflows")
lines.append("- Consider a larger model (Q4 quantized) as fallback")
else:
lines.append("- 1-bit quantization is too lossy for tool calling")
lines.append("- Use Q4_0 as minimum viable quantization for tool use")
lines.append("- Reserve 1-bit models for text generation only")
return "\n".join(lines)
def main():
parser = argparse.ArgumentParser(description="Tool Calling Viability Test for Edge Models")
parser.add_argument("--model", "-m", required=True, help="Model name")
parser.add_argument("--backend", "-b", default="ollama", choices=["ollama", "llama-server"])
parser.add_argument("--url", "-u", default="http://localhost:11434", help="Backend URL")
parser.add_argument("--timeout", "-t", type=int, default=120, help="Timeout per test (seconds)")
parser.add_argument("--output", "-o", help="Output markdown file path")
parser.add_argument("--json", action="store_true", help="JSON output")
parser.add_argument("--verbose", "-v", action="store_true", help="Show full responses")
args = parser.parse_args()
results = run_tests(args.model, args.backend, args.url, args.timeout, args.verbose)
if args.json:
print(json.dumps(results, indent=2))
else:
md = to_markdown(results)
if args.output:
with open(args.output, "w") as f:
f.write(md)
print(f"\nReport written to: {args.output}")
else:
print("\n" + md)
if __name__ == "__main__":
main()

View File

@@ -389,40 +389,6 @@ Step 7: If pass → production. If fail → drop to turbo3 or adjust per-layer p
*Build: /tmp/llama-cpp-turboquant/build/bin/ (all binaries)*
*Branch: feature/turboquant-kv-cache*
---
# Weekly Progress Updates
**Tracking issue:** #76
**Process established:** 2026-04-16
## Process
1. **Weekly cadence** — Every Monday, generate and post a progress update as a comment on issue #76.
2. **Benchmark results** — Post as they happen (don't wait for weekly update).
3. **Blocker escalation** — New blockers posted within 24 hours with label `blocker`.
4. **PROJECT_STATUS.md** — Updated weekly with current state.
## How to Generate
```bash
# Auto-generated from git log + Gitea API
python3 scripts/weekly_update.py --post
# Preview first
python3 scripts/weekly_update.py
# Custom date range
python3 scripts/weekly_update.py --since 2026-04-01
# Raw JSON data
python3 scripts/weekly_update.py --json
```
## Template
See `docs/WEEKLY_TEMPLATE.md` for manual updates.
---

View File

@@ -1,26 +0,0 @@
# TurboQuant Weekly Update Template
Use this template when posting manual weekly updates. For automated updates, run `scripts/weekly_update.py --post`.
## Week of [START_DATE] to [END_DATE]
### Completed
- [item 1]
- [item 2]
### Benchmark Results
- [key metric or "No new benchmarks this week"]
### In Progress
- [item being worked on — who's on it]
### Blockers
- [blocker — impact — who needs to act]
- _None_ if clear
### Next Week
- [planned item 1]
- [planned item 2]
---
_Generated by `scripts/weekly_update.py` or filled manually._

View File

@@ -1,323 +0,0 @@
#!/usr/bin/env python3
"""
TurboQuant Weekly Progress Update Generator
Generates a structured weekly update from:
- Git log (commits since last week)
- Open/closed issues and PRs
- Benchmark results
- Blockers (open issues labeled 'blocker')
Usage:
python3 scripts/weekly_update.py # This week
python3 scripts/weekly_update.py --since 2026-04-08 # Custom range
python3 scripts/weekly_update.py --post # Post as Gitea comment on tracking issue
"""
import argparse
import json
import os
import subprocess
import sys
from datetime import datetime, timedelta
from pathlib import Path
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
REPO_ROOT = Path(__file__).resolve().parent.parent
GITEA_URL = "https://forge.alexanderwhitestone.com"
REPO_PATH = "Timmy_Foundation/turboquant"
TRACKING_ISSUE = 76 # This issue
def git_log(since: str, until: str = None) -> list[dict]:
"""Get commits since a date."""
until = until or datetime.now().strftime("%Y-%m-%d")
cmd = [
"git", "-C", str(REPO_ROOT), "log",
f"--since={since}", f"--until={until}",
"--format=%H|%an|%ae|%aI|%s",
"--all"
]
result = subprocess.run(cmd, capture_output=True, text=True)
commits = []
for line in result.stdout.strip().split("\n"):
if not line:
continue
parts = line.split("|", 4)
if len(parts) == 5:
commits.append({
"hash": parts[0][:8],
"author": parts[1],
"email": parts[2],
"date": parts[3][:10],
"subject": parts[4],
})
return commits
def git_diff_stats(since: str) -> dict:
"""Get file change stats."""
cmd = [
"git", "-C", str(REPO_ROOT), "diff",
f"--stat", f"{since}..HEAD"
]
result = subprocess.run(cmd, capture_output=True, text=True)
lines = result.stdout.strip().split("\n")
summary = lines[-1] if lines else "No changes"
return {"summary": summary, "files_changed": len([l for l in lines if "|" in l])}
def find_benchmarks() -> list[dict]:
"""Scan benchmark results directory for recent results."""
bench_dir = REPO_ROOT / "benchmarks"
results = []
if not bench_dir.exists():
return results
for f in bench_dir.glob("*.json"):
try:
data = json.loads(f.read_text())
results.append({"file": f.name, "data": data})
except (json.JSONDecodeError, Exception):
pass
# Also check for markdown reports
for f in bench_dir.glob("*.md"):
if f.name != "README.md":
stat = f.stat()
results.append({
"file": f.name,
"type": "report",
"modified": datetime.fromtimestamp(stat.st_mtime).strftime("%Y-%m-%d"),
"size": stat.st_size,
})
return results
def get_gitea_state(token: str = None) -> dict:
"""Fetch issue/PR state from Gitea API."""
if not HAS_REQUESTS or not token:
return {"available": False}
H = {"Authorization": f"token {token}"}
base = f"{GITEA_URL}/api/v1/repos/{REPO_PATH}"
try:
# Open issues
r = requests.get(f"{base}/issues?state=open&limit=100", headers=H)
open_issues = r.json() if r.status_code == 200 else []
# Closed issues (recent)
r = requests.get(f"{base}/issues?state=closed&limit=50&sort=updated&order=desc", headers=H)
closed_issues = r.json() if r.status_code == 200 else []
# PRs
r = requests.get(f"{base}/pulls?state=open&limit=50", headers=H)
open_prs = r.json() if r.status_code == 200 else []
return {
"available": True,
"open_issues": open_issues,
"closed_issues": closed_issues,
"open_prs": open_prs,
}
except Exception as e:
return {"available": False, "error": str(e)}
def categorize_commits(commits: list[dict]) -> dict:
"""Categorize commits by conventional prefix."""
categories = {
"feat": [], "fix": [], "bench": [], "docs": [],
"test": [], "refactor": [], "chore": [], "other": []
}
for c in commits:
subject = c["subject"].lower()
if subject.startswith("feat") or subject.startswith("feature"):
categories["feat"].append(c)
elif subject.startswith("fix"):
categories["fix"].append(c)
elif subject.startswith("bench") or subject.startswith("perf"):
categories["bench"].append(c)
elif subject.startswith("doc"):
categories["docs"].append(c)
elif subject.startswith("test"):
categories["test"].append(c)
elif subject.startswith("refactor"):
categories["refactor"].append(c)
elif subject.startswith("chore") or subject.startswith("ci"):
categories["chore"].append(c)
else:
categories["other"].append(c)
return {k: v for k, v in categories.items() if v}
def generate_update(since: str, gitea_state: dict = None) -> str:
"""Generate the weekly update markdown."""
now = datetime.now()
until = now.strftime("%Y-%m-%d")
week_label = f"Week of {since} to {until}"
commits = git_log(since, until)
diff_stats = git_diff_stats(since)
categories = categorize_commits(commits)
benchmarks = find_benchmarks()
lines = [
f"## {week_label}",
"",
f"**Generated:** {now.strftime('%Y-%m-%d %H:%M UTC')}",
f"**Commits:** {len(commits)} | **Files changed:** {diff_stats['files_changed']}",
"",
]
# Completed work by category
lines.append("### Completed")
lines.append("")
if commits:
for cat, items in categories.items():
label = {
"feat": "Features", "fix": "Fixes", "bench": "Benchmarks",
"docs": "Documentation", "test": "Tests", "refactor": "Refactoring",
"chore": "Maintenance", "other": "Other"
}.get(cat, cat)
lines.append(f"**{label}:**")
for c in items:
lines.append(f"- `{c['hash']}` {c['subject']} ({c['author']}, {c['date']})")
lines.append("")
else:
lines.append("- No commits this week")
lines.append("")
# Benchmark results
if benchmarks:
lines.append("### Benchmark Results")
lines.append("")
for b in benchmarks:
if b.get("type") == "report":
lines.append(f"- **{b['file']}** (updated {b['modified']}, {b['size']} bytes)")
else:
lines.append(f"- **{b['file']}** — see `benchmarks/{b['file']}`")
lines.append("")
# Gitea state (if available)
if gitea_state and gitea_state.get("available"):
open_issues = gitea_state["open_issues"]
open_prs = gitea_state["open_prs"]
closed = gitea_state["closed_issues"]
lines.append("### In Progress")
lines.append("")
blockers = []
for issue in open_issues:
labels = [l["name"] for l in issue.get("labels", [])]
prefix = ""
if "blocker" in labels:
blockers.append(issue)
prefix = "🚧 BLOCKER — "
assignee = issue.get("assignee", {})
who = assignee.get("login", "unassigned") if assignee else "unassigned"
lines.append(f"- {prefix}#{issue['number']}: {issue['title']} ({who})")
if open_prs:
lines.append("")
lines.append("**Open PRs:**")
for pr in open_prs:
lines.append(f"- #{pr['number']}: {pr['title']} ({pr['user']['login']})")
lines.append("")
# Blockers
if blockers:
lines.append("### Blockers")
lines.append("")
for b in blockers:
lines.append(f"- #{b['number']}: {b['title']}")
if b.get("body"):
snippet = b["body"][:200].replace("\n", " ")
lines.append(f" > {snippet}...")
lines.append("")
# Recently closed
recent_closed = [i for i in closed if i.get("closed_at")]
if recent_closed:
lines.append("### Closed This Period")
lines.append("")
for issue in recent_closed[:10]:
closed_date = issue.get("closed_at", "")[:10]
lines.append(f"- #{issue['number']}: {issue['title']} (closed {closed_date})")
lines.append("")
# Next week
lines.append("### Next Week")
lines.append("")
lines.append("- _TBD — fill in planned work_")
lines.append("")
return "\n".join(lines)
def post_gitea_comment(token: str, body: str, issue: int = TRACKING_ISSUE):
"""Post the update as a comment on the tracking issue."""
if not HAS_REQUESTS:
print("ERROR: requests library not available", file=sys.stderr)
return False
H = {"Authorization": f"token {token}", "Content-Type": "application/json"}
url = f"{GITEA_URL}/api/v1/repos/{REPO_PATH}/issues/{issue}/comments"
r = requests.post(url, headers=H, json={"body": body})
if r.status_code in (200, 201):
print(f"Posted comment on issue #{issue}")
return True
else:
print(f"Failed to post: {r.status_code} {r.text}", file=sys.stderr)
return False
def main():
parser = argparse.ArgumentParser(description="Generate TurboQuant weekly progress update")
parser.add_argument("--since", help="Start date (YYYY-MM-DD), default: 7 days ago")
parser.add_argument("--post", action="store_true", help="Post as Gitea comment on issue #76")
parser.add_argument("--json", action="store_true", help="Output raw data as JSON")
args = parser.parse_args()
since = args.since or (datetime.now() - timedelta(days=7)).strftime("%Y-%m-%d")
# Try to load Gitea token
token = None
token_path = Path.home() / ".config" / "gitea" / "token"
if token_path.exists():
token = token_path.read_text().strip()
gitea_state = get_gitea_state(token) if token else {"available": False}
if args.json:
data = {
"since": since,
"commits": git_log(since),
"benchmarks": find_benchmarks(),
"gitea": {k: v for k, v in gitea_state.items() if k != "available"} if gitea_state.get("available") else None,
}
print(json.dumps(data, indent=2, default=str))
return
update = generate_update(since, gitea_state)
if args.post:
if not token:
print("ERROR: No Gitea token found at ~/.config/gitea/token", file=sys.stderr)
sys.exit(1)
post_gitea_comment(token, update)
else:
print(update)
if __name__ == "__main__":
main()

View File

@@ -1,14 +0,0 @@
#!/usr/bin/env bash
# TurboQuant Weekly Update — shell wrapper
# Generates and optionally posts a weekly progress update.
#
# Usage:
# ./scripts/weekly_update.sh # Print to stdout
# ./scripts/weekly_update.sh --post # Post as Gitea comment on #76
# ./scripts/weekly_update.sh --since 2026-04-01 # Custom date range
# ./scripts/weekly_update.sh --json # Raw JSON data
set -euo pipefail
cd "$(dirname "$0")/.."
python3 scripts/weekly_update.py "$@"

189
tests/test_tool_calling.py Normal file
View File

@@ -0,0 +1,189 @@
#!/usr/bin/env python3
"""
Unit tests for benchmarks/test_tool_calling.py
Tests the validation logic and report generation without
requiring a live model backend.
"""
import json
import sys
from pathlib import Path
import pytest
sys.path.insert(0, str(Path(__file__).parent.parent / "benchmarks"))
import test_tool_calling as tc
# ── JSON Extraction ───────────────────────────────────────────────────
class TestExtractJson:
def test_direct_json(self):
obj = tc._extract_json('{"name": "read_file", "arguments": {"path": "/etc/hostname"}}')
assert obj["name"] == "read_file"
def test_json_in_code_block(self):
text = 'Here is the call:\n```json\n{"name": "terminal", "arguments": {"command": "ls"}}\n```'
obj = tc._extract_json(text)
assert obj["name"] == "terminal"
def test_json_without_lang(self):
text = '```\n{"name": "web_search", "arguments": {"query": "test"}}\n```'
obj = tc._extract_json(text)
assert obj["name"] == "web_search"
def test_no_json(self):
obj = tc._extract_json("I can't help with that.")
assert obj is None
def test_bare_json_object(self):
text = 'Sure, here: {"name": "read_file", "arguments": {"path": "/tmp/x"}} for you.'
obj = tc._extract_json(text)
assert obj is not None
assert obj["name"] == "read_file"
# ── Tool Call Validation ──────────────────────────────────────────────
class TestToolCallValidation:
def test_exact_match(self):
resp = '{"name": "read_file", "arguments": {"path": "/etc/hostname"}}'
result = tc._has_json_tool_call(resp, "read_file", {"path": "/etc/hostname"})
assert result["passed"] is True
def test_wrong_tool_name(self):
resp = '{"name": "write_file", "arguments": {"path": "/etc/hostname"}}'
result = tc._has_json_tool_call(resp, "read_file", {"path": "/etc/hostname"})
assert result["passed"] is False
assert "wrong tool name" in result["reason"]
def test_missing_argument(self):
resp = '{"name": "read_file", "arguments": {}}'
result = tc._has_json_tool_call(resp, "read_file", {"path": "/etc/hostname"})
assert result["passed"] is False
assert "missing argument" in result["reason"]
def test_wrong_argument_value(self):
resp = '{"name": "read_file", "arguments": {"path": "/etc/passwd"}}'
result = tc._has_json_tool_call(resp, "read_file", {"path": "/etc/hostname"})
assert result["passed"] is False
assert "argument mismatch" in result["reason"]
def test_no_json_response(self):
result = tc._has_json_tool_call("Sorry, I can't do that.", "read_file", {"path": "/etc/hostname"})
assert result["passed"] is False
assert "no JSON" in result["reason"]
def test_nested_function_format(self):
resp = '{"function": {"name": "terminal", "arguments": {"command": "echo hello"}}}'
result = tc._has_json_tool_call(resp, "terminal", {"command": "echo hello"})
assert result["passed"] is True
# ── Nested Schema Validation ──────────────────────────────────────────
class TestNestedSchemaValidation:
def test_valid_nested(self):
resp = json.dumps({
"name": "deploy_service",
"arguments": {
"name": "api-gateway",
"replicas": 3,
"env": {"PORT": 8080, "NODE_ENV": "production"},
"resources": {"cpu": "500m", "memory": "256Mi"}
}
})
result = tc._has_nested_tool_call(resp)
assert result["passed"] is True
def test_missing_nested_key(self):
resp = '{"name": "deploy_service", "arguments": {"name": "api-gateway", "replicas": 3}}'
result = tc._has_nested_tool_call(resp)
assert result["passed"] is False
assert "missing nested key" in result["reason"]
def test_wrong_type(self):
resp = '{"name": "deploy_service", "arguments": {"name": "api-gateway", "replicas": "three", "env": {}, "resources": {}}}'
result = tc._has_nested_tool_call(resp)
assert result["passed"] is False
assert "should be int" in result["reason"]
def test_missing_env_port(self):
resp = json.dumps({
"name": "deploy_service",
"arguments": {"name": "api", "replicas": 1, "env": {"NODE_ENV": "dev"}, "resources": {}}
})
result = tc._has_nested_tool_call(resp)
assert result["passed"] is False
assert "PORT" in result["reason"]
# ── Markdown Report Generation ────────────────────────────────────────
class TestMarkdownReport:
def test_report_structure(self):
results = {
"model": "test-model",
"backend": "ollama",
"url": "http://localhost:11434",
"timestamp": "2026-04-15T00:00:00Z",
"tests": [
{"id": "t1", "category": "Simple", "description": "Test 1",
"passed": True, "reason": "ok", "response": "{}", "latency_s": 1.0, "tokens": 10},
{"id": "t2", "category": "Complex", "description": "Test 2",
"passed": False, "reason": "wrong name", "response": "oops", "latency_s": 2.0, "tokens": 20},
],
"summary": {"total": 2, "passed": 1, "failed": 1, "errors": 0},
}
md = tc.to_markdown(results)
assert "test-model" in md
assert "1/2 passed" in md
assert "PASS" in md
assert "FAIL" in md
assert "Failure Analysis" in md
def test_perfect_score(self):
results = {
"model": "perfect", "backend": "ollama", "url": "http://x",
"timestamp": "2026-01-01T00:00:00Z",
"tests": [
{"id": "t1", "category": "C", "description": "D",
"passed": True, "reason": "ok", "response": "{}", "latency_s": 1, "tokens": 5},
],
"summary": {"total": 1, "passed": 1, "failed": 0, "errors": 0},
}
md = tc.to_markdown(results)
assert "FULLY VIABLE" in md
def test_all_failed(self):
results = {
"model": "bad", "backend": "ollama", "url": "http://x",
"timestamp": "2026-01-01T00:00:00Z",
"tests": [
{"id": "t1", "category": "C", "description": "D",
"passed": False, "reason": "broken", "response": "nope", "latency_s": 1, "tokens": 0},
],
"summary": {"total": 1, "passed": 0, "failed": 1, "errors": 0},
}
md = tc.to_markdown(results)
assert "NOT VIABLE" in md
# ── Test Definitions ──────────────────────────────────────────────────
class TestTestDefinitions:
def test_all_tests_have_validators(self):
for test in tc.TESTS:
assert callable(test["validate"]), f"{test['id']} missing validate"
assert "id" in test
assert "category" in test
assert "prompt" in test
def test_five_test_categories(self):
categories = {t["category"] for t in tc.TESTS}
assert len(categories) >= 4, f"Expected 4+ categories, got {categories}"
if __name__ == "__main__":
pytest.main([__file__, "-v"])

View File

@@ -1,52 +0,0 @@
#!/usr/bin/env python3
"""Quick test for weekly_update.py — verifies parsing, output format, and edge cases."""
import subprocess
import sys
import json
from pathlib import Path
SCRIPT = Path(__file__).resolve().parent.parent / "scripts" / "weekly_update.py"
def run(args: list[str]) -> str:
result = subprocess.run(
[sys.executable, str(SCRIPT)] + args,
capture_output=True, text=True, cwd=str(SCRIPT.parent.parent)
)
return result.stdout, result.stderr, result.returncode
def test_basic_output():
"""Script runs without error and produces markdown."""
stdout, stderr, rc = run(["--since", "2026-01-01"])
assert rc == 0, f"Exit code {rc}: {stderr}"
assert "## Week of" in stdout, f"Missing header: {stdout[:200]}"
assert "### Completed" in stdout, f"Missing Completed section: {stdout[:200]}"
assert "### Next Week" in stdout, f"Missing Next Week section: {stdout[-200:]}"
print("PASS: basic_output")
def test_json_output():
"""Script outputs valid JSON in --json mode."""
stdout, stderr, rc = run(["--json", "--since", "2026-01-01"])
assert rc == 0, f"Exit code {rc}: {stderr}"
data = json.loads(stdout)
assert "commits" in data
assert "since" in data
print(f"PASS: json_output ({len(data['commits'])} commits)")
def test_no_crash_future_date():
"""Script handles future date gracefully."""
stdout, stderr, rc = run(["--since", "2030-01-01"])
assert rc == 0, f"Exit code {rc}: {stderr}"
print("PASS: future_date_no_crash")
def test_empty_range():
"""Script handles a very old date with no commits."""
stdout, stderr, rc = run(["--since", "2020-01-01", "--since", "2020-01-02"])
assert rc == 0
print("PASS: empty_range")
if __name__ == "__main__":
test_basic_output()
test_json_output()
test_no_crash_future_date()
print("\nAll tests passed.")