Merge pull request #32 from pjperez/main

fix: Use correct 'zai/' prefix for Zhipu AI models in LiteLLM
This commit is contained in:
Xubin Ren 2026-02-04 17:10:13 +08:00 committed by GitHub
commit b1bce89d4b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -88,13 +88,13 @@ class LiteLLMProvider(LLMProvider):
model = f"openrouter/{model}" model = f"openrouter/{model}"
# For Zhipu/Z.ai, ensure prefix is present # For Zhipu/Z.ai, ensure prefix is present
# Handle cases like "glm-4.7-flash" -> "zhipu/glm-4.7-flash" # Handle cases like "glm-4.7-flash" -> "zai/glm-4.7-flash"
if ("glm" in model.lower() or "zhipu" in model.lower()) and not ( if ("glm" in model.lower() or "zhipu" in model.lower()) and not (
model.startswith("zhipu/") or model.startswith("zhipu/") or
model.startswith("zai/") or model.startswith("zai/") or
model.startswith("openrouter/") model.startswith("openrouter/")
): ):
model = f"zhipu/{model}" model = f"zai/{model}"
# For vLLM, use hosted_vllm/ prefix per LiteLLM docs # For vLLM, use hosted_vllm/ prefix per LiteLLM docs
# Convert openai/ prefix to hosted_vllm/ if user specified it # Convert openai/ prefix to hosted_vllm/ if user specified it