diff --git a/README.md b/README.md index f31f646..bde285d 100644 --- a/README.md +++ b/README.md @@ -91,8 +91,7 @@ pip install nanobot-ai > [!TIP] > Set your API key in `~/.nanobot/config.json`. -> Get API keys: [OpenRouter](https://openrouter.ai/keys) (LLM) · [Brave Search](https://brave.com/search/api/) (optional, for web search) -> You can also change the model to `minimax/minimax-m2` for lower cost. +> Get API keys: [OpenRouter](https://openrouter.ai/keys) (Global) · [DashScope](https://dashscope.console.aliyun.com) (Qwen) · [Brave Search](https://brave.com/search/api/) (optional, for web search) **1. Initialize** @@ -102,6 +101,7 @@ nanobot onboard **2. Configure** (`~/.nanobot/config.json`) +For OpenRouter - recommended for global users: ```json { "providers": { @@ -113,18 +113,10 @@ nanobot onboard "defaults": { "model": "anthropic/claude-opus-4-5" } - }, - "tools": { - "web": { - "search": { - "apiKey": "BSA-xxx" - } - } } } ``` - **3. Chat** ```bash @@ -360,6 +352,7 @@ Config file: `~/.nanobot/config.json` | `deepseek` | LLM (DeepSeek direct) | [platform.deepseek.com](https://platform.deepseek.com) | | `groq` | LLM + **Voice transcription** (Whisper) | [console.groq.com](https://console.groq.com) | | `gemini` | LLM (Gemini direct) | [aistudio.google.com](https://aistudio.google.com) | +| `dashscope` | LLM (Qwen) | [dashscope.console.aliyun.com](https://dashscope.console.aliyun.com) | ### Security diff --git a/nanobot/config/schema.py b/nanobot/config/schema.py index 590fd19..9af6ee2 100644 --- a/nanobot/config/schema.py +++ b/nanobot/config/schema.py @@ -75,6 +75,7 @@ class ProvidersConfig(BaseModel): deepseek: ProviderConfig = Field(default_factory=ProviderConfig) groq: ProviderConfig = Field(default_factory=ProviderConfig) zhipu: ProviderConfig = Field(default_factory=ProviderConfig) + dashscope: ProviderConfig = Field(default_factory=ProviderConfig) # 阿里云通义千问 vllm: ProviderConfig = Field(default_factory=ProviderConfig) gemini: ProviderConfig = Field(default_factory=ProviderConfig) moonshot: ProviderConfig = Field(default_factory=ProviderConfig) @@ -137,6 +138,8 @@ class Config(BaseSettings): "zhipu": self.providers.zhipu, "glm": self.providers.zhipu, "zai": self.providers.zhipu, + "dashscope": self.providers.dashscope, + "qwen": self.providers.dashscope, "groq": self.providers.groq, "moonshot": self.providers.moonshot, "kimi": self.providers.moonshot, @@ -158,8 +161,8 @@ class Config(BaseSettings): self.providers.openrouter, self.providers.deepseek, self.providers.anthropic, self.providers.openai, self.providers.gemini, self.providers.zhipu, - self.providers.moonshot, self.providers.vllm, - self.providers.groq, + self.providers.dashscope, self.providers.moonshot, + self.providers.vllm, self.providers.groq, ]: if provider.api_key: return provider.api_key diff --git a/nanobot/providers/litellm_provider.py b/nanobot/providers/litellm_provider.py index 4f57d62..a15f05e 100644 --- a/nanobot/providers/litellm_provider.py +++ b/nanobot/providers/litellm_provider.py @@ -53,6 +53,8 @@ class LiteLLMProvider(LLMProvider): os.environ.setdefault("GEMINI_API_KEY", api_key) elif "zhipu" in default_model or "glm" in default_model or "zai" in default_model: os.environ.setdefault("ZAI_API_KEY", api_key) + elif "dashscope" in default_model or "qwen" in default_model.lower(): + os.environ.setdefault("DASHSCOPE_API_KEY", api_key) elif "groq" in default_model: os.environ.setdefault("GROQ_API_KEY", api_key) elif "moonshot" in default_model or "kimi" in default_model: @@ -101,6 +103,13 @@ class LiteLLMProvider(LLMProvider): ): model = f"zai/{model}" + # For DashScope/Qwen, ensure dashscope/ prefix + if ("qwen" in model.lower() or "dashscope" in model.lower()) and not ( + model.startswith("dashscope/") or + model.startswith("openrouter/") + ): + model = f"dashscope/{model}" + # For Moonshot/Kimi, ensure moonshot/ prefix (before vLLM check) if ("moonshot" in model.lower() or "kimi" in model.lower()) and not ( model.startswith("moonshot/") or model.startswith("openrouter/") @@ -111,6 +120,7 @@ class LiteLLMProvider(LLMProvider): if "gemini" in model.lower() and not model.startswith("gemini/"): model = f"gemini/{model}" + # For vLLM, use hosted_vllm/ prefix per LiteLLM docs # Convert openai/ prefix to hosted_vllm/ if user specified it if self.is_vllm: