#!/usr/bin/env python3 """Evaluate Qwen3.5:35B as a local model option -- Issue #288, Epic #281.""" import json, sys, time from dataclasses import dataclass, field from typing import Any, Dict @dataclass class ModelSpec: name: str = "Qwen3.5-35B-A3B" ollama_tag: str = "qwen3.5:35b" hf_id: str = "Qwen/Qwen3.5-35B-A3B" architecture: str = "MoE (Mixture of Experts)" total_params: str = "35B" active_params: str = "3B per token" context_length: int = 131072 license: str = "Apache 2.0" tool_use_support: bool = True json_mode_support: bool = True function_calling: bool = True quantization_options: Dict[str, int] = field(default_factory=lambda: { "Q8_0": 36, "Q6_K": 28, "Q5_K_M": 24, "Q4_K_M": 20, "Q4_0": 18, "Q3_K_M": 15, "Q2_K": 12, }) FLEET_MODELS = { "qwen3.5:35b (candidate)": {"params_total": "35B", "context": "128K", "local": True, "tool_use": True, "reasoning": "good"}, "gemma4 (current local)": {"params_total": "9B", "context": "128K", "local": True, "tool_use": True, "reasoning": "good"}, "hermes4:14b (current local)": {"params_total": "14B", "context": "8K", "local": True, "tool_use": True, "reasoning": "good"}, "qwen2.5:7b (fleet)": {"params_total": "7B", "context": "32K", "local": True, "tool_use": True, "reasoning": "moderate"}, "claude-sonnet-4 (cloud)": {"params_total": "?", "context": "200K", "local": False, "tool_use": True, "reasoning": "excellent"}, "mimo-v2-pro (cloud free)": {"params_total": "?", "context": "128K", "local": False, "tool_use": True, "reasoning": "good"}, } SECURITY_CRITERIA = [ {"criterion": "Data locality", "weight": "CRITICAL", "score": 10, "notes": "All inference local via Ollama. Zero exfiltration."}, {"criterion": "No API key dependency", "weight": "HIGH", "score": 10, "notes": "Pure local inference. No external creds needed."}, {"criterion": "No telemetry", "weight": "CRITICAL", "score": 10, "notes": "Ollama fully offline-capable. No phone-home."}, {"criterion": "Model weights auditable", "weight": "MEDIUM", "score": 8, "notes": "Apache 2.0, HF SHA verification. MoE harder to audit."}, {"criterion": "Tool-use safety", "weight": "HIGH", "score": 7, "notes": "Function calling supported, MoE routing less predictable."}, {"criterion": "Privacy filter compat", "weight": "HIGH", "score": 9, "notes": "Local = Privacy Filter unnecessary for most queries."}, {"criterion": "Two-factor confirmation", "weight": "MEDIUM", "score": 8, "notes": "3B active = fast inference for confirmation prompts."}, {"criterion": "Prompt injection resistance", "weight": "HIGH", "score": 6, "notes": "3B active may be weaker. Needs red-team (#324)."}, ] HARDWARE_PROFILES = { "mac_m2_ultra_192gb": {"name": "Mac Studio M2 Ultra (192GB)", "mem_gb": 192, "fits_q4": True, "fits_q8": True, "rec": "Q6_K", "tok_sec": 40}, "mac_m4_pro_48gb": {"name": "Mac Mini M4 Pro (48GB)", "mem_gb": 48, "fits_q4": True, "fits_q8": False, "rec": "Q4_K_M", "tok_sec": 30}, "mac_m1_16gb": {"name": "Mac M1 (16GB)", "mem_gb": 16, "fits_q4": False, "fits_q8": False, "rec": None, "tok_sec": None}, "rtx_4090_24gb": {"name": "NVIDIA RTX 4090 (24GB)", "mem_gb": 24, "fits_q4": True, "fits_q8": False, "rec": "Q5_K_M", "tok_sec": 50}, "rtx_3090_24gb": {"name": "NVIDIA RTX 3090 (24GB)", "mem_gb": 24, "fits_q4": True, "fits_q8": False, "rec": "Q4_K_M", "tok_sec": 35}, "runpod_l40s_48gb": {"name": "RunPod L40S (48GB)", "mem_gb": 48, "fits_q4": True, "fits_q8": True, "rec": "Q6_K", "tok_sec": 60}, } def check_ollama_status() -> Dict[str, Any]: import subprocess result = {"running": False, "models": [], "qwen35_available": False} try: r = subprocess.run(["curl", "-s", "--max-time", "5", "http://localhost:11434/api/tags"], capture_output=True, text=True, timeout=10) if r.returncode == 0: data = json.loads(r.stdout) result["running"] = True result["models"] = [m["name"] for m in data.get("models", [])] result["qwen35_available"] = any("qwen3.5" in m.lower() for m in result["models"]) except Exception as e: result["error"] = str(e) return result def generate_report() -> str: spec = ModelSpec() ollama = check_ollama_status() lines = ["=" * 72, "Qwen3.5:35B EVALUATION REPORT -- Issue #288", "Epic #281 -- Vitalik Secure LLM Architecture", "=" * 72] lines.append("\n## 1. Model Specification\n") lines.append(f" Name: {spec.name} | Arch: {spec.architecture}") lines.append(f" Params: {spec.total_params} total, {spec.active_params} | Context: {spec.context_length:,} tokens") lines.append(f" License: {spec.license} | Tools: {spec.tool_use_support} | JSON: {spec.json_mode_support}") lines.append("\n## 2. VRAM\n") for q, vram in sorted(spec.quantization_options.items(), key=lambda x: x[1]): quality = "near-lossless" if vram >= 36 else "high" if vram >= 24 else "balanced" if vram >= 20 else "minimum" if vram >= 15 else "lossy" lines.append(f" {q:<10} {vram:>4}GB {quality}") lines.append("\n## 3. Hardware\n") for hw in HARDWARE_PROFILES.values(): lines.append(f" {hw['name']} {hw['mem_gb']}GB Q4:{'YES' if hw['fits_q4'] else 'NO '} Rec:{hw['rec'] or 'N/A'} ~{hw['tok_sec'] or 'N/A'} tok/s") lines.append("\n## 4. Security (Vitalik Framework)\n") wm = {"CRITICAL": 3, "HIGH": 2, "MEDIUM": 1} tw = sum(wm[c["weight"]] for c in SECURITY_CRITERIA) ws = sum(c["score"] * wm[c["weight"]] for c in SECURITY_CRITERIA) for c in SECURITY_CRITERIA: lines.append(f" [{c['weight']:<8}] {c['criterion']}: {c['score']}/10 -- {c['notes']}") avg = ws / tw lines.append(f"\n Weighted: {avg:.1f}/10 Verdict: {'STRONG' if avg >= 8 else 'ADEQUATE'}") lines.append("\n## 5. Fleet Comparison\n") for name, d in FLEET_MODELS.items(): lines.append(f" {name:<35} {d['params_total']:<6} {d['context']:<6} {'Local' if d['local'] else 'Cloud'} {d['reasoning']}") lines.append("\n## 6. Ollama\n") lines.append(f" Running: {'Yes' if ollama['running'] else 'No'} | Models: {', '.join(ollama['models']) or 'none'}") lines.append(f" Qwen3.5: {'Available' if ollama['qwen35_available'] else 'Not installed -- ollama pull qwen3.5:35b'}") lines.append("\n## 7. Recommendation\n") lines.append(" VERDICT: APPROVED for local deployment as privacy-sensitive tier") lines.append("\n + Perfect data sovereignty, 128K context, Apache 2.0, MoE speed") lines.append(" + Tool use + JSON mode, eliminates Privacy Filter for most queries") lines.append(" - 20GB VRAM at Q4, MoE less predictable, needs red-team testing") lines.append("\n Deployment: ollama pull qwen3.5:35b -> config.yaml privacy_model") return "\n".join(lines) if __name__ == "__main__": if "--check-ollama" in sys.argv: print(json.dumps(check_ollama_status(), indent=2)) else: print(generate_report())