From 43fe83755dabbe01dbe6b6cbf29de1df06a2d4f2 Mon Sep 17 00:00:00 2001 From: Pedro Perez Date: Mon, 2 Feb 2026 22:15:21 +0100 Subject: [PATCH] fix: Use correct 'zai/' prefix for Zhipu AI models in LiteLLM LiteLLM expects the 'zai/' provider prefix for Zhipu AI (Z.ai) models, not 'zhipu/'. This was causing 'LLM Provider NOT provided' errors when users configured models like 'glm-4.7' without an explicit prefix. According to LiteLLM docs, the correct format is: - model='zai/glm-4.7' (correct) - NOT model='zhipu/glm-4.7' (incorrect) This fix ensures auto-prefixed models use the correct 'zai/' format. Fixes: Error when using Zhipu AI models with shorthand names like 'glm-4.7' --- nanobot/providers/litellm_provider.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nanobot/providers/litellm_provider.py b/nanobot/providers/litellm_provider.py index 42b4bf5..a0927c4 100644 --- a/nanobot/providers/litellm_provider.py +++ b/nanobot/providers/litellm_provider.py @@ -86,13 +86,13 @@ class LiteLLMProvider(LLMProvider): model = f"openrouter/{model}" # For Zhipu/Z.ai, ensure prefix is present - # Handle cases like "glm-4.7-flash" -> "zhipu/glm-4.7-flash" + # Handle cases like "glm-4.7-flash" -> "zai/glm-4.7-flash" if ("glm" in model.lower() or "zhipu" in model.lower()) and not ( model.startswith("zhipu/") or model.startswith("zai/") or model.startswith("openrouter/") ): - model = f"zhipu/{model}" + model = f"zai/{model}" # For vLLM, use hosted_vllm/ prefix per LiteLLM docs # Convert openai/ prefix to hosted_vllm/ if user specified it