test: make local llama.cpp the default runtime (#77)
This commit was merged in pull request #77.
This commit is contained in:
@@ -1,8 +1,8 @@
|
||||
model:
|
||||
default: gpt-5.4
|
||||
provider: openai-codex
|
||||
default: hermes4:14b
|
||||
provider: custom
|
||||
context_length: 65536
|
||||
base_url: https://chatgpt.com/backend-api/codex
|
||||
base_url: http://localhost:8081/v1
|
||||
toolsets:
|
||||
- all
|
||||
agent:
|
||||
@@ -188,7 +188,7 @@ custom_providers:
|
||||
- name: Local llama.cpp
|
||||
base_url: http://localhost:8081/v1
|
||||
api_key: none
|
||||
model: auto
|
||||
model: hermes4:14b
|
||||
- name: Google Gemini
|
||||
base_url: https://generativelanguage.googleapis.com/v1beta/openai
|
||||
api_key_env: GEMINI_API_KEY
|
||||
|
||||
Reference in New Issue
Block a user