add dashscope support

This commit is contained in:
ZJUCQR 2026-02-03 16:27:15 +08:00
parent ee6c13212c
commit 8499dbf132
2 changed files with 12 additions and 1 deletions

View File

@ -52,6 +52,7 @@ class ProvidersConfig(BaseModel):
openrouter: ProviderConfig = Field(default_factory=ProviderConfig)
groq: ProviderConfig = Field(default_factory=ProviderConfig)
zhipu: ProviderConfig = Field(default_factory=ProviderConfig)
dashscope: ProviderConfig = Field(default_factory=ProviderConfig) # 阿里云通义千问
vllm: ProviderConfig = Field(default_factory=ProviderConfig)
gemini: ProviderConfig = Field(default_factory=ProviderConfig)
@ -92,13 +93,14 @@ class Config(BaseSettings):
return Path(self.agents.defaults.workspace).expanduser()
def get_api_key(self) -> str | None:
"""Get API key in priority order: OpenRouter > Anthropic > OpenAI > Gemini > Zhipu > Groq > vLLM."""
"""Get API key in priority order: OpenRouter > Anthropic > OpenAI > Gemini > Zhipu > DashScope > Groq > vLLM."""
return (
self.providers.openrouter.api_key or
self.providers.anthropic.api_key or
self.providers.openai.api_key or
self.providers.gemini.api_key or
self.providers.zhipu.api_key or
self.providers.dashscope.api_key or
self.providers.groq.api_key or
self.providers.vllm.api_key or
None

View File

@ -51,6 +51,8 @@ class LiteLLMProvider(LLMProvider):
os.environ.setdefault("GEMINI_API_KEY", api_key)
elif "zhipu" in default_model or "glm" in default_model or "zai" in default_model:
os.environ.setdefault("ZHIPUAI_API_KEY", api_key)
elif "dashscope" in default_model or "qwen" in default_model.lower():
os.environ.setdefault("DASHSCOPE_API_KEY", api_key)
elif "groq" in default_model:
os.environ.setdefault("GROQ_API_KEY", api_key)
@ -95,6 +97,13 @@ class LiteLLMProvider(LLMProvider):
model.startswith("openrouter/")
):
model = f"zhipu/{model}"
# For DashScope/Qwen, ensure prefix is present
if ("qwen" in model.lower() or "dashscope" in model.lower()) and not (
model.startswith("dashscope/") or
model.startswith("openrouter/")
):
model = f"dashscope/{model}"
# For vLLM, use hosted_vllm/ prefix per LiteLLM docs
# Convert openai/ prefix to hosted_vllm/ if user specified it