- Enhanced Codex model discovery by fetching available models from the API, with fallback to local cache and defaults. - Updated the context compressor's summary target tokens to 2500 for improved performance. - Added external credential detection for Codex CLI to streamline authentication. - Refactored various components to ensure consistent handling of authentication and model selection across the application.
145 lines
4.5 KiB
Python
145 lines
4.5 KiB
Python
"""Codex model discovery from API, local cache, and config."""
|
|
|
|
from __future__ import annotations
|
|
|
|
import json
|
|
import logging
|
|
from pathlib import Path
|
|
from typing import List, Optional
|
|
|
|
from hermes_cli.auth import resolve_codex_home_path
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
DEFAULT_CODEX_MODELS: List[str] = [
|
|
"gpt-5.3-codex",
|
|
"gpt-5.2-codex",
|
|
"gpt-5.1-codex-max",
|
|
"gpt-5.1-codex-mini",
|
|
]
|
|
|
|
|
|
def _fetch_models_from_api(access_token: str) -> List[str]:
|
|
"""Fetch available models from the Codex API. Returns visible models sorted by priority."""
|
|
try:
|
|
import httpx
|
|
resp = httpx.get(
|
|
"https://chatgpt.com/backend-api/codex/models?client_version=1.0.0",
|
|
headers={"Authorization": f"Bearer {access_token}"},
|
|
timeout=10,
|
|
)
|
|
if resp.status_code != 200:
|
|
return []
|
|
data = resp.json()
|
|
entries = data.get("models", []) if isinstance(data, dict) else []
|
|
except Exception as exc:
|
|
logger.debug("Failed to fetch Codex models from API: %s", exc)
|
|
return []
|
|
|
|
sortable = []
|
|
for item in entries:
|
|
if not isinstance(item, dict):
|
|
continue
|
|
slug = item.get("slug")
|
|
if not isinstance(slug, str) or not slug.strip():
|
|
continue
|
|
slug = slug.strip()
|
|
if item.get("supported_in_api") is False:
|
|
continue
|
|
visibility = item.get("visibility", "")
|
|
if isinstance(visibility, str) and visibility.strip().lower() == "hide":
|
|
continue
|
|
priority = item.get("priority")
|
|
rank = int(priority) if isinstance(priority, (int, float)) else 10_000
|
|
sortable.append((rank, slug))
|
|
|
|
sortable.sort(key=lambda x: (x[0], x[1]))
|
|
return [slug for _, slug in sortable]
|
|
|
|
|
|
def _read_default_model(codex_home: Path) -> Optional[str]:
|
|
config_path = codex_home / "config.toml"
|
|
if not config_path.exists():
|
|
return None
|
|
try:
|
|
import tomllib
|
|
except Exception:
|
|
return None
|
|
try:
|
|
payload = tomllib.loads(config_path.read_text(encoding="utf-8"))
|
|
except Exception:
|
|
return None
|
|
model = payload.get("model") if isinstance(payload, dict) else None
|
|
if isinstance(model, str) and model.strip():
|
|
return model.strip()
|
|
return None
|
|
|
|
|
|
def _read_cache_models(codex_home: Path) -> List[str]:
|
|
cache_path = codex_home / "models_cache.json"
|
|
if not cache_path.exists():
|
|
return []
|
|
try:
|
|
raw = json.loads(cache_path.read_text(encoding="utf-8"))
|
|
except Exception:
|
|
return []
|
|
|
|
entries = raw.get("models") if isinstance(raw, dict) else None
|
|
sortable = []
|
|
if isinstance(entries, list):
|
|
for item in entries:
|
|
if not isinstance(item, dict):
|
|
continue
|
|
slug = item.get("slug")
|
|
if not isinstance(slug, str) or not slug.strip():
|
|
continue
|
|
slug = slug.strip()
|
|
if "codex" not in slug.lower():
|
|
continue
|
|
if item.get("supported_in_api") is False:
|
|
continue
|
|
visibility = item.get("visibility")
|
|
if isinstance(visibility, str) and visibility.strip().lower() == "hidden":
|
|
continue
|
|
priority = item.get("priority")
|
|
rank = int(priority) if isinstance(priority, (int, float)) else 10_000
|
|
sortable.append((rank, slug))
|
|
|
|
sortable.sort(key=lambda item: (item[0], item[1]))
|
|
deduped: List[str] = []
|
|
for _, slug in sortable:
|
|
if slug not in deduped:
|
|
deduped.append(slug)
|
|
return deduped
|
|
|
|
|
|
def get_codex_model_ids(access_token: Optional[str] = None) -> List[str]:
|
|
"""Return available Codex model IDs, trying API first, then local sources.
|
|
|
|
Resolution order: API (live, if token provided) > config.toml default >
|
|
local cache > hardcoded defaults.
|
|
"""
|
|
codex_home = resolve_codex_home_path()
|
|
ordered: List[str] = []
|
|
|
|
# Try live API if we have a token
|
|
if access_token:
|
|
api_models = _fetch_models_from_api(access_token)
|
|
if api_models:
|
|
return api_models
|
|
|
|
# Fall back to local sources
|
|
default_model = _read_default_model(codex_home)
|
|
if default_model:
|
|
ordered.append(default_model)
|
|
|
|
for model_id in _read_cache_models(codex_home):
|
|
if model_id not in ordered:
|
|
ordered.append(model_id)
|
|
|
|
for model_id in DEFAULT_CODEX_MODELS:
|
|
if model_id not in ordered:
|
|
ordered.append(model_id)
|
|
|
|
return ordered
|