From 9db75fcfc2254bb4801b6307bd0cc94c7c8bbe11 Mon Sep 17 00:00:00 2001 From: teknium1 Date: Tue, 17 Mar 2026 04:12:08 -0700 Subject: [PATCH] fix(metadata): fuzzy context length match prefers longest key The fuzzy match for model context lengths iterated dict insertion order. Shorter model names (e.g. 'gpt-5') could match before more specific ones (e.g. 'gpt-5.4-pro'), returning the wrong context length. Sort by key length descending so more specific model names always match first. --- agent/model_metadata.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/agent/model_metadata.py b/agent/model_metadata.py index 2f9ea666c..c578acf50 100644 --- a/agent/model_metadata.py +++ b/agent/model_metadata.py @@ -266,8 +266,10 @@ def get_model_context_length(model: str, base_url: str = "") -> int: if model in metadata: return metadata[model].get("context_length", 128000) - # 3. Hardcoded defaults (fuzzy match) - for default_model, length in DEFAULT_CONTEXT_LENGTHS.items(): + # 3. Hardcoded defaults (fuzzy match — longest key first for specificity) + for default_model, length in sorted( + DEFAULT_CONTEXT_LENGTHS.items(), key=lambda x: len(x[0]), reverse=True + ): if default_model in model or model in default_model: return length