feat: standardize llama.cpp backend for sovereign local inference (#1123)

This commit is contained in:
2026-04-14 01:52:45 +00:00
committed by Alexander Whitestone
parent a3f1688cb7
commit bc48abd970

View File

@@ -2,7 +2,6 @@
"""llama_client.py — OpenAI-compatible client for llama.cpp HTTP API."""
import argparse, json, os, sys, time
from dataclasses import dataclass
from typing import Generator, Optional
import urllib.request, urllib.error
DEFAULT_ENDPOINT = os.environ.get("LLAMA_ENDPOINT", "http://localhost:11435")