From 240db894b43ddf521c83850be57d8025cbc27562 Mon Sep 17 00:00:00 2001 From: w0x7ce Date: Sun, 8 Feb 2026 11:37:36 +0800 Subject: [PATCH 1/9] feat(channels): add DingTalk channel support and documentation --- README.md | 43 +++++++ nanobot/channels/dingtalk.py | 219 +++++++++++++++++++++++++++++++++++ nanobot/channels/manager.py | 11 ++ nanobot/config/schema.py | 9 ++ pyproject.toml | 1 + 5 files changed, 283 insertions(+) create mode 100644 nanobot/channels/dingtalk.py diff --git a/README.md b/README.md index a1ea905..95a5625 100644 --- a/README.md +++ b/README.md @@ -336,6 +336,49 @@ nanobot gateway +
+DingTalk (钉钉) + +Uses **Stream Mode** — no public IP required. + +```bash +pip install nanobot-ai[dingtalk] +``` + +**1. Create a DingTalk bot** +- Visit [DingTalk Open Platform](https://open-dev.dingtalk.com/) +- Create a new app -> Add **Robot** capability +- **Configuration**: + - Toggle **Stream Mode** ON +- **Permissions**: Add necessary permissions for sending messages +- Get **AppKey** (Client ID) and **AppSecret** (Client Secret) from "Credentials" +- Publish the app + +**2. Configure** + +```json +{ + "channels": { + "dingtalk": { + "enabled": true, + "clientId": "YOUR_APP_KEY", + "clientSecret": "YOUR_APP_SECRET", + "allowFrom": [] + } + } +} +``` + +> `allowFrom`: Leave empty to allow all users, or add `["staffId"]` to restrict access. + +**3. Run** + +```bash +nanobot gateway +``` + +
+ ## ⚙️ Configuration Config file: `~/.nanobot/config.json` diff --git a/nanobot/channels/dingtalk.py b/nanobot/channels/dingtalk.py new file mode 100644 index 0000000..897e5be --- /dev/null +++ b/nanobot/channels/dingtalk.py @@ -0,0 +1,219 @@ +"""DingTalk/DingDing channel implementation using Stream Mode.""" + +import asyncio +import json +import threading +import time +from typing import Any + +from loguru import logger +import httpx + +from nanobot.bus.events import OutboundMessage, InboundMessage +from nanobot.bus.queue import MessageBus +from nanobot.channels.base import BaseChannel +from nanobot.config.schema import DingTalkConfig + +try: + from dingtalk_stream import ( + DingTalkStreamClient, + Credential, + CallbackHandler, + CallbackMessage, + AckMessage + ) + from dingtalk_stream.chatbot import ChatbotMessage + DINGTALK_AVAILABLE = True +except ImportError: + DINGTALK_AVAILABLE = False + + +class NanobotDingTalkHandler(CallbackHandler): + """ + Standard DingTalk Stream SDK Callback Handler. + Parses incoming messages and forwards them to the Nanobot channel. + """ + def __init__(self, channel: "DingTalkChannel"): + super().__init__() + self.channel = channel + + async def process(self, message: CallbackMessage): + """Process incoming stream message.""" + try: + # Parse using SDK's ChatbotMessage for robust handling + chatbot_msg = ChatbotMessage.from_dict(message.data) + + # Extract content based on message type + content = "" + if chatbot_msg.text: + content = chatbot_msg.text.content.strip() + elif chatbot_msg.message_type == "text": + # Fallback manual extraction if object not populated + content = message.data.get("text", {}).get("content", "").strip() + + if not content: + logger.warning(f"Received empty or unsupported message type: {chatbot_msg.message_type}") + return AckMessage.STATUS_OK, "OK" + + sender_id = chatbot_msg.sender_staff_id or chatbot_msg.sender_id + sender_name = chatbot_msg.sender_nick or "Unknown" + + logger.info(f"Received DingTalk message from {sender_name} ({sender_id}): {content}") + + # Forward to Nanobot + # We use asyncio.create_task to avoid blocking the ACK return + asyncio.create_task( + self.channel._on_message(content, sender_id, sender_name) + ) + + return AckMessage.STATUS_OK, "OK" + + except Exception as e: + logger.error(f"Error processing DingTalk message: {e}") + # Return OK to avoid retry loop from DingTalk server if it's a parsing error + return AckMessage.STATUS_OK, "Error" + +class DingTalkChannel(BaseChannel): + """ + DingTalk channel using Stream Mode. + + Uses WebSocket to receive events via `dingtalk-stream` SDK. + Uses direct HTTP API to send messages (since SDK is mainly for receiving). + """ + + name = "dingtalk" + + def __init__(self, config: DingTalkConfig, bus: MessageBus): + super().__init__(config, bus) + self.config: DingTalkConfig = config + self._client: Any = None + self._loop: asyncio.AbstractEventLoop | None = None + + # Access Token management for sending messages + self._access_token: str | None = None + self._token_expiry: float = 0 + + async def start(self) -> None: + """Start the DingTalk bot with Stream Mode.""" + try: + if not DINGTALK_AVAILABLE: + logger.error("DingTalk Stream SDK not installed. Run: pip install dingtalk-stream") + return + + if not self.config.client_id or not self.config.client_secret: + logger.error("DingTalk client_id and client_secret not configured") + return + + self._running = True + self._loop = asyncio.get_running_loop() + + logger.info(f"Initializing DingTalk Stream Client with Client ID: {self.config.client_id}...") + credential = Credential(self.config.client_id, self.config.client_secret) + self._client = DingTalkStreamClient(credential) + + # Register standard handler + handler = NanobotDingTalkHandler(self) + + # Register using the chatbot topic standard for bots + self._client.register_callback_handler( + ChatbotMessage.TOPIC, + handler + ) + + logger.info("DingTalk bot started with Stream Mode") + + # The client.start() method is an async infinite loop that handles the websocket connection + await self._client.start() + + except Exception as e: + logger.exception(f"Failed to start DingTalk channel: {e}") + + async def stop(self) -> None: + """Stop the DingTalk bot.""" + self._running = False + # SDK doesn't expose a clean stop method that cancels loop immediately without private access + pass + + async def _get_access_token(self) -> str | None: + """Get or refresh Access Token.""" + if self._access_token and time.time() < self._token_expiry: + return self._access_token + + url = "https://api.dingtalk.com/v1.0/oauth2/accessToken" + data = { + "appKey": self.config.client_id, + "appSecret": self.config.client_secret + } + + try: + async with httpx.AsyncClient() as client: + resp = await client.post(url, json=data) + resp.raise_for_status() + res_data = resp.json() + self._access_token = res_data.get("accessToken") + # Expire 60s early to be safe + self._token_expiry = time.time() + int(res_data.get("expireIn", 7200)) - 60 + return self._access_token + except Exception as e: + logger.error(f"Failed to get DingTalk access token: {e}") + return None + + async def send(self, msg: OutboundMessage) -> None: + """Send a message through DingTalk.""" + token = await self._get_access_token() + if not token: + return + + # This endpoint is for sending to a single user in a bot chat + # https://open.dingtalk.com/document/orgapp/robot-batch-send-messages + url = "https://api.dingtalk.com/v1.0/robot/oToMessages/batchSend" + + headers = { + "x-acs-dingtalk-access-token": token + } + + # Convert markdown code blocks for basic compatibility if needed, + # but DingTalk supports markdown loosely. + + data = { + "robotCode": self.config.client_id, + "userIds": [msg.chat_id], # chat_id is the user's staffId/unionId + "msgKey": "sampleMarkdown", # Using markdown template + "msgParam": json.dumps({ + "text": msg.content, + "title": "Nanobot Reply" + }) + } + + try: + async with httpx.AsyncClient() as client: + resp = await client.post(url, json=data, headers=headers) + # Check 200 OK but also API error codes if any + if resp.status_code != 200: + logger.error(f"DingTalk send failed: {resp.text}") + else: + logger.debug(f"DingTalk message sent to {msg.chat_id}") + except Exception as e: + logger.error(f"Error sending DingTalk message: {e}") + + async def _on_message(self, content: str, sender_id: str, sender_name: str) -> None: + """Handle incoming message (called by NanobotDingTalkHandler).""" + try: + logger.info(f"DingTalk inbound: {content} from {sender_name}") + + # Correct InboundMessage usage based on events.py definition + # @dataclass class InboundMessage: + # channel: str, sender_id: str, chat_id: str, content: str, ... + msg = InboundMessage( + channel=self.name, + sender_id=sender_id, + chat_id=sender_id, # For private stats, chat_id is sender_id + content=str(content), + metadata={ + "sender_name": sender_name, + "platform": "dingtalk" + } + ) + await self.bus.publish_inbound(msg) + except Exception as e: + logger.error(f"Error publishing DingTalk message: {e}") diff --git a/nanobot/channels/manager.py b/nanobot/channels/manager.py index 846ea70..c7ab7c3 100644 --- a/nanobot/channels/manager.py +++ b/nanobot/channels/manager.py @@ -77,6 +77,17 @@ class ChannelManager: logger.info("Feishu channel enabled") except ImportError as e: logger.warning(f"Feishu channel not available: {e}") + + # DingTalk channel + if self.config.channels.dingtalk.enabled: + try: + from nanobot.channels.dingtalk import DingTalkChannel + self.channels["dingtalk"] = DingTalkChannel( + self.config.channels.dingtalk, self.bus + ) + logger.info("DingTalk channel enabled") + except ImportError as e: + logger.warning(f"DingTalk channel not available: {e}") async def _start_channel(self, name: str, channel: BaseChannel) -> None: """Start a channel and log any exceptions.""" diff --git a/nanobot/config/schema.py b/nanobot/config/schema.py index 7724288..e46b5df 100644 --- a/nanobot/config/schema.py +++ b/nanobot/config/schema.py @@ -30,6 +30,14 @@ class FeishuConfig(BaseModel): allow_from: list[str] = Field(default_factory=list) # Allowed user open_ids +class DingTalkConfig(BaseModel): + """DingTalk channel configuration using Stream mode.""" + enabled: bool = False + client_id: str = "" # AppKey + client_secret: str = "" # AppSecret + allow_from: list[str] = Field(default_factory=list) # Allowed staff_ids + + class DiscordConfig(BaseModel): """Discord channel configuration.""" enabled: bool = False @@ -45,6 +53,7 @@ class ChannelsConfig(BaseModel): telegram: TelegramConfig = Field(default_factory=TelegramConfig) discord: DiscordConfig = Field(default_factory=DiscordConfig) feishu: FeishuConfig = Field(default_factory=FeishuConfig) + dingtalk: DingTalkConfig = Field(default_factory=DingTalkConfig) class AgentDefaults(BaseModel): diff --git a/pyproject.toml b/pyproject.toml index 4093474..6fda084 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,6 +28,7 @@ dependencies = [ "readability-lxml>=0.8.0", "rich>=13.0.0", "croniter>=2.0.0", + "dingtalk-stream>=0.4.0", "python-telegram-bot[socks]>=21.0", "lark-oapi>=1.0.0", "socksio>=1.0.0", From 3b61ae4fff435a4dce9675ecd2bdabf9c097f414 Mon Sep 17 00:00:00 2001 From: Re-bin Date: Sun, 8 Feb 2026 04:29:51 +0000 Subject: [PATCH 2/9] fix: skip provider prefix rules for vLLM/OpenRouter/AiHubMix endpoints --- nanobot/providers/litellm_provider.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/nanobot/providers/litellm_provider.py b/nanobot/providers/litellm_provider.py index 7a52e7c..415100c 100644 --- a/nanobot/providers/litellm_provider.py +++ b/nanobot/providers/litellm_provider.py @@ -107,11 +107,12 @@ class LiteLLMProvider(LLMProvider): (("moonshot", "kimi"), "moonshot", ("moonshot/", "openrouter/")), (("gemini",), "gemini", ("gemini/",)), ] - model_lower = model.lower() - for keywords, prefix, skip in _prefix_rules: - if any(kw in model_lower for kw in keywords) and not any(model.startswith(s) for s in skip): - model = f"{prefix}/{model}" - break + if not (self.is_vllm or self.is_openrouter or self.is_aihubmix): + model_lower = model.lower() + for keywords, prefix, skip in _prefix_rules: + if any(kw in model_lower for kw in keywords) and not any(model.startswith(s) for s in skip): + model = f"{prefix}/{model}" + break # Gateway/endpoint-specific prefixes (detected by api_base/api_key, not model name) if self.is_openrouter and not model.startswith("openrouter/"): From f7f812a1774ebe20ba8e46a7e71f0ac5f1de37b5 Mon Sep 17 00:00:00 2001 From: Re-bin Date: Sun, 8 Feb 2026 05:06:41 +0000 Subject: [PATCH 3/9] feat: add /reset and /help commands for Telegram bot --- README.md | 2 +- nanobot/agent/loop.py | 3 +- nanobot/channels/manager.py | 11 ++++- nanobot/channels/telegram.py | 81 ++++++++++++++++++++++++++++++++---- nanobot/cli/commands.py | 5 ++- 5 files changed, 88 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index a1ea905..ff827be 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ ⚡️ Delivers core agent functionality in just **~4,000** lines of code — **99% smaller** than Clawdbot's 430k+ lines. -📏 Real-time line count: **3,422 lines** (run `bash core_agent_lines.sh` to verify anytime) +📏 Real-time line count: **3,423 lines** (run `bash core_agent_lines.sh` to verify anytime) ## 📢 News diff --git a/nanobot/agent/loop.py b/nanobot/agent/loop.py index b13113f..a65f3a5 100644 --- a/nanobot/agent/loop.py +++ b/nanobot/agent/loop.py @@ -45,6 +45,7 @@ class AgentLoop: exec_config: "ExecToolConfig | None" = None, cron_service: "CronService | None" = None, restrict_to_workspace: bool = False, + session_manager: SessionManager | None = None, ): from nanobot.config.schema import ExecToolConfig from nanobot.cron.service import CronService @@ -59,7 +60,7 @@ class AgentLoop: self.restrict_to_workspace = restrict_to_workspace self.context = ContextBuilder(workspace) - self.sessions = SessionManager(workspace) + self.sessions = session_manager or SessionManager(workspace) self.tools = ToolRegistry() self.subagents = SubagentManager( provider=provider, diff --git a/nanobot/channels/manager.py b/nanobot/channels/manager.py index 846ea70..efb7db0 100644 --- a/nanobot/channels/manager.py +++ b/nanobot/channels/manager.py @@ -1,7 +1,9 @@ """Channel manager for coordinating chat channels.""" +from __future__ import annotations + import asyncio -from typing import Any +from typing import Any, TYPE_CHECKING from loguru import logger @@ -10,6 +12,9 @@ from nanobot.bus.queue import MessageBus from nanobot.channels.base import BaseChannel from nanobot.config.schema import Config +if TYPE_CHECKING: + from nanobot.session.manager import SessionManager + class ChannelManager: """ @@ -21,9 +26,10 @@ class ChannelManager: - Route outbound messages """ - def __init__(self, config: Config, bus: MessageBus): + def __init__(self, config: Config, bus: MessageBus, session_manager: "SessionManager | None" = None): self.config = config self.bus = bus + self.session_manager = session_manager self.channels: dict[str, BaseChannel] = {} self._dispatch_task: asyncio.Task | None = None @@ -40,6 +46,7 @@ class ChannelManager: self.config.channels.telegram, self.bus, groq_api_key=self.config.providers.groq.api_key, + session_manager=self.session_manager, ) logger.info("Telegram channel enabled") except ImportError as e: diff --git a/nanobot/channels/telegram.py b/nanobot/channels/telegram.py index f2b6d1f..4f62557 100644 --- a/nanobot/channels/telegram.py +++ b/nanobot/channels/telegram.py @@ -1,17 +1,23 @@ """Telegram channel implementation using python-telegram-bot.""" +from __future__ import annotations + import asyncio import re +from typing import TYPE_CHECKING from loguru import logger -from telegram import Update -from telegram.ext import Application, MessageHandler, filters, ContextTypes +from telegram import BotCommand, Update +from telegram.ext import Application, CommandHandler, MessageHandler, filters, ContextTypes from nanobot.bus.events import OutboundMessage from nanobot.bus.queue import MessageBus from nanobot.channels.base import BaseChannel from nanobot.config.schema import TelegramConfig +if TYPE_CHECKING: + from nanobot.session.manager import SessionManager + def _markdown_to_telegram_html(text: str) -> str: """ @@ -85,10 +91,24 @@ class TelegramChannel(BaseChannel): name = "telegram" - def __init__(self, config: TelegramConfig, bus: MessageBus, groq_api_key: str = ""): + # Commands registered with Telegram's command menu + BOT_COMMANDS = [ + BotCommand("start", "Start the bot"), + BotCommand("reset", "Reset conversation history"), + BotCommand("help", "Show available commands"), + ] + + def __init__( + self, + config: TelegramConfig, + bus: MessageBus, + groq_api_key: str = "", + session_manager: SessionManager | None = None, + ): super().__init__(config, bus) self.config: TelegramConfig = config self.groq_api_key = groq_api_key + self.session_manager = session_manager self._app: Application | None = None self._chat_ids: dict[str, int] = {} # Map sender_id to chat_id for replies @@ -106,6 +126,11 @@ class TelegramChannel(BaseChannel): builder = builder.proxy(self.config.proxy).get_updates_proxy(self.config.proxy) self._app = builder.build() + # Add command handlers + self._app.add_handler(CommandHandler("start", self._on_start)) + self._app.add_handler(CommandHandler("reset", self._on_reset)) + self._app.add_handler(CommandHandler("help", self._on_help)) + # Add message handler for text, photos, voice, documents self._app.add_handler( MessageHandler( @@ -115,20 +140,22 @@ class TelegramChannel(BaseChannel): ) ) - # Add /start command handler - from telegram.ext import CommandHandler - self._app.add_handler(CommandHandler("start", self._on_start)) - logger.info("Starting Telegram bot (polling mode)...") # Initialize and start polling await self._app.initialize() await self._app.start() - # Get bot info + # Get bot info and register command menu bot_info = await self._app.bot.get_me() logger.info(f"Telegram bot @{bot_info.username} connected") + try: + await self._app.bot.set_my_commands(self.BOT_COMMANDS) + logger.debug("Telegram bot commands registered") + except Exception as e: + logger.warning(f"Failed to register bot commands: {e}") + # Start polling (this runs until stopped) await self._app.updater.start_polling( allowed_updates=["message"], @@ -187,9 +214,45 @@ class TelegramChannel(BaseChannel): user = update.effective_user await update.message.reply_text( f"👋 Hi {user.first_name}! I'm nanobot.\n\n" - "Send me a message and I'll respond!" + "Send me a message and I'll respond!\n" + "Type /help to see available commands." ) + async def _on_reset(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + """Handle /reset command — clear conversation history.""" + if not update.message or not update.effective_user: + return + + chat_id = str(update.message.chat_id) + session_key = f"{self.name}:{chat_id}" + + if self.session_manager is None: + logger.warning("/reset called but session_manager is not available") + await update.message.reply_text("⚠️ Session management is not available.") + return + + session = self.session_manager.get_or_create(session_key) + msg_count = len(session.messages) + session.clear() + self.session_manager.save(session) + + logger.info(f"Session reset for {session_key} (cleared {msg_count} messages)") + await update.message.reply_text("🔄 Conversation history cleared. Let's start fresh!") + + async def _on_help(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + """Handle /help command — show available commands.""" + if not update.message: + return + + help_text = ( + "🐈 nanobot commands\n\n" + "/start — Start the bot\n" + "/reset — Reset conversation history\n" + "/help — Show this help message\n\n" + "Just send me a text message to chat!" + ) + await update.message.reply_text(help_text, parse_mode="HTML") + async def _on_message(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: """Handle incoming messages (text, photos, voice, documents).""" if not update.message or not update.effective_user: diff --git a/nanobot/cli/commands.py b/nanobot/cli/commands.py index 19e62e9..bfb3b1d 100644 --- a/nanobot/cli/commands.py +++ b/nanobot/cli/commands.py @@ -179,6 +179,7 @@ def gateway( from nanobot.bus.queue import MessageBus from nanobot.agent.loop import AgentLoop from nanobot.channels.manager import ChannelManager + from nanobot.session.manager import SessionManager from nanobot.cron.service import CronService from nanobot.cron.types import CronJob from nanobot.heartbeat.service import HeartbeatService @@ -192,6 +193,7 @@ def gateway( config = load_config() bus = MessageBus() provider = _make_provider(config) + session_manager = SessionManager(config.workspace_path) # Create cron service first (callback set after agent creation) cron_store_path = get_data_dir() / "cron" / "jobs.json" @@ -208,6 +210,7 @@ def gateway( exec_config=config.tools.exec, cron_service=cron, restrict_to_workspace=config.tools.restrict_to_workspace, + session_manager=session_manager, ) # Set cron callback (needs agent) @@ -242,7 +245,7 @@ def gateway( ) # Create channel manager - channels = ChannelManager(config, bus) + channels = ChannelManager(config, bus, session_manager=session_manager) if channels.enabled_channels: console.print(f"[green]✓[/green] Channels enabled: {', '.join(channels.enabled_channels)}") From 00185f2beea1fbab70a2aa9e229d35a7aa54d6fa Mon Sep 17 00:00:00 2001 From: Re-bin Date: Sun, 8 Feb 2026 05:44:06 +0000 Subject: [PATCH 4/9] feat: add Telegram typing indicator --- .gitignore | 1 + nanobot/channels/telegram.py | 38 +++++++++++++++++++++++++++++++++++- 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 316e214..55338f7 100644 --- a/.gitignore +++ b/.gitignore @@ -17,3 +17,4 @@ docs/ __pycache__/ poetry.lock .pytest_cache/ +tests/ \ No newline at end of file diff --git a/nanobot/channels/telegram.py b/nanobot/channels/telegram.py index 4f62557..ff46c86 100644 --- a/nanobot/channels/telegram.py +++ b/nanobot/channels/telegram.py @@ -111,6 +111,7 @@ class TelegramChannel(BaseChannel): self.session_manager = session_manager self._app: Application | None = None self._chat_ids: dict[str, int] = {} # Map sender_id to chat_id for replies + self._typing_tasks: dict[str, asyncio.Task] = {} # chat_id -> typing loop task async def start(self) -> None: """Start the Telegram bot with long polling.""" @@ -170,6 +171,10 @@ class TelegramChannel(BaseChannel): """Stop the Telegram bot.""" self._running = False + # Cancel all typing indicators + for chat_id in list(self._typing_tasks): + self._stop_typing(chat_id) + if self._app: logger.info("Stopping Telegram bot...") await self._app.updater.stop() @@ -183,6 +188,9 @@ class TelegramChannel(BaseChannel): logger.warning("Telegram bot not running") return + # Stop typing indicator for this chat + self._stop_typing(msg.chat_id) + try: # chat_id should be the Telegram chat ID (integer) chat_id = int(msg.chat_id) @@ -335,10 +343,15 @@ class TelegramChannel(BaseChannel): logger.debug(f"Telegram message from {sender_id}: {content[:50]}...") + str_chat_id = str(chat_id) + + # Start typing indicator before processing + self._start_typing(str_chat_id) + # Forward to the message bus await self._handle_message( sender_id=sender_id, - chat_id=str(chat_id), + chat_id=str_chat_id, content=content, media=media_paths, metadata={ @@ -350,6 +363,29 @@ class TelegramChannel(BaseChannel): } ) + def _start_typing(self, chat_id: str) -> None: + """Start sending 'typing...' indicator for a chat.""" + # Cancel any existing typing task for this chat + self._stop_typing(chat_id) + self._typing_tasks[chat_id] = asyncio.create_task(self._typing_loop(chat_id)) + + def _stop_typing(self, chat_id: str) -> None: + """Stop the typing indicator for a chat.""" + task = self._typing_tasks.pop(chat_id, None) + if task and not task.done(): + task.cancel() + + async def _typing_loop(self, chat_id: str) -> None: + """Repeatedly send 'typing' action until cancelled.""" + try: + while self._app: + await self._app.bot.send_chat_action(chat_id=int(chat_id), action="typing") + await asyncio.sleep(4) + except asyncio.CancelledError: + pass + except Exception as e: + logger.debug(f"Typing indicator stopped for {chat_id}: {e}") + def _get_extension(self, media_type: str, mime_type: str | None) -> str: """Get file extension based on media type.""" if mime_type: From 299d8b33b31418bd6e4f0b38260a937f8789dca4 Mon Sep 17 00:00:00 2001 From: Re-bin Date: Sun, 8 Feb 2026 07:29:31 +0000 Subject: [PATCH 5/9] refactor: replace provider if-elif chains with declarative registry --- README.md | 48 ++++ nanobot/cli/commands.py | 33 ++- nanobot/config/schema.py | 47 ++-- nanobot/providers/litellm_provider.py | 133 ++++++----- nanobot/providers/registry.py | 323 ++++++++++++++++++++++++++ 5 files changed, 474 insertions(+), 110 deletions(-) create mode 100644 nanobot/providers/registry.py diff --git a/README.md b/README.md index ff827be..90ca9e3 100644 --- a/README.md +++ b/README.md @@ -20,6 +20,7 @@ ## 📢 News +- **2026-02-08** 🔧 Refactored Providers — adding a new LLM provider only takes just 2 steps! Check [here](#providers). - **2026-02-07** 🚀 Released v0.1.3.post5 with Qwen support & several improvements! Check [here](https://github.com/HKUDS/nanobot/releases/tag/v0.1.3.post5) for details. - **2026-02-06** ✨ Added Moonshot/Kimi provider, Discord integration, and enhanced security hardening! - **2026-02-05** ✨ Added Feishu channel, DeepSeek provider, and enhanced scheduled tasks support! @@ -355,6 +356,53 @@ Config file: `~/.nanobot/config.json` | `gemini` | LLM (Gemini direct) | [aistudio.google.com](https://aistudio.google.com) | | `aihubmix` | LLM (API gateway, access to all models) | [aihubmix.com](https://aihubmix.com) | | `dashscope` | LLM (Qwen) | [dashscope.console.aliyun.com](https://dashscope.console.aliyun.com) | +| `moonshot` | LLM (Moonshot/Kimi) | [platform.moonshot.cn](https://platform.moonshot.cn) | +| `zhipu` | LLM (Zhipu GLM) | [open.bigmodel.cn](https://open.bigmodel.cn) | +| `vllm` | LLM (local, any OpenAI-compatible server) | — | + +
+Adding a New Provider (Developer Guide) + +nanobot uses a **Provider Registry** (`nanobot/providers/registry.py`) as the single source of truth. +Adding a new provider only takes **2 steps** — no if-elif chains to touch. + +**Step 1.** Add a `ProviderSpec` entry to `PROVIDERS` in `nanobot/providers/registry.py`: + +```python +ProviderSpec( + name="myprovider", # config field name + keywords=("myprovider", "mymodel"), # model-name keywords for auto-matching + env_key="MYPROVIDER_API_KEY", # env var for LiteLLM + display_name="My Provider", # shown in `nanobot status` + litellm_prefix="myprovider", # auto-prefix: model → myprovider/model + skip_prefixes=("myprovider/",), # don't double-prefix +) +``` + +**Step 2.** Add a field to `ProvidersConfig` in `nanobot/config/schema.py`: + +```python +class ProvidersConfig(BaseModel): + ... + myprovider: ProviderConfig = ProviderConfig() +``` + +That's it! Environment variables, model prefixing, config matching, and `nanobot status` display will all work automatically. + +**Common `ProviderSpec` options:** + +| Field | Description | Example | +|-------|-------------|---------| +| `litellm_prefix` | Auto-prefix model names for LiteLLM | `"dashscope"` → `dashscope/qwen-max` | +| `skip_prefixes` | Don't prefix if model already starts with these | `("dashscope/", "openrouter/")` | +| `env_extras` | Additional env vars to set | `(("ZHIPUAI_API_KEY", "{api_key}"),)` | +| `model_overrides` | Per-model parameter overrides | `(("kimi-k2.5", {"temperature": 1.0}),)` | +| `is_gateway` | Can route any model (like OpenRouter) | `True` | +| `detect_by_key_prefix` | Detect gateway by API key prefix | `"sk-or-"` | +| `detect_by_base_keyword` | Detect gateway by API base URL | `"openrouter"` | +| `strip_model_prefix` | Strip existing prefix before re-prefixing | `True` (for AiHubMix) | + +
### Security diff --git a/nanobot/cli/commands.py b/nanobot/cli/commands.py index bfb3b1d..1dab818 100644 --- a/nanobot/cli/commands.py +++ b/nanobot/cli/commands.py @@ -635,25 +635,24 @@ def status(): console.print(f"Workspace: {workspace} {'[green]✓[/green]' if workspace.exists() else '[red]✗[/red]'}") if config_path.exists(): + from nanobot.providers.registry import PROVIDERS + console.print(f"Model: {config.agents.defaults.model}") - # Check API keys - has_openrouter = bool(config.providers.openrouter.api_key) - has_anthropic = bool(config.providers.anthropic.api_key) - has_openai = bool(config.providers.openai.api_key) - has_gemini = bool(config.providers.gemini.api_key) - has_zhipu = bool(config.providers.zhipu.api_key) - has_vllm = bool(config.providers.vllm.api_base) - has_aihubmix = bool(config.providers.aihubmix.api_key) - - console.print(f"OpenRouter API: {'[green]✓[/green]' if has_openrouter else '[dim]not set[/dim]'}") - console.print(f"Anthropic API: {'[green]✓[/green]' if has_anthropic else '[dim]not set[/dim]'}") - console.print(f"OpenAI API: {'[green]✓[/green]' if has_openai else '[dim]not set[/dim]'}") - console.print(f"Gemini API: {'[green]✓[/green]' if has_gemini else '[dim]not set[/dim]'}") - console.print(f"Zhipu AI API: {'[green]✓[/green]' if has_zhipu else '[dim]not set[/dim]'}") - console.print(f"AiHubMix API: {'[green]✓[/green]' if has_aihubmix else '[dim]not set[/dim]'}") - vllm_status = f"[green]✓ {config.providers.vllm.api_base}[/green]" if has_vllm else "[dim]not set[/dim]" - console.print(f"vLLM/Local: {vllm_status}") + # Check API keys from registry + for spec in PROVIDERS: + p = getattr(config.providers, spec.name, None) + if p is None: + continue + if spec.is_local: + # Local deployments show api_base instead of api_key + if p.api_base: + console.print(f"{spec.label}: [green]✓ {p.api_base}[/green]") + else: + console.print(f"{spec.label}: [dim]not set[/dim]") + else: + has_key = bool(p.api_key) + console.print(f"{spec.label}: {'[green]✓[/green]' if has_key else '[dim]not set[/dim]'}") if __name__ == "__main__": diff --git a/nanobot/config/schema.py b/nanobot/config/schema.py index 7724288..ea8f8ba 100644 --- a/nanobot/config/schema.py +++ b/nanobot/config/schema.py @@ -125,29 +125,23 @@ class Config(BaseSettings): """Get expanded workspace path.""" return Path(self.agents.defaults.workspace).expanduser() - # Default base URLs for API gateways - _GATEWAY_DEFAULTS = {"openrouter": "https://openrouter.ai/api/v1", "aihubmix": "https://aihubmix.com/v1"} - def get_provider(self, model: str | None = None) -> ProviderConfig | None: """Get matched provider config (api_key, api_base, extra_headers). Falls back to first available.""" - model = (model or self.agents.defaults.model).lower() - p = self.providers - # Keyword → provider mapping (order matters: gateways first) - keyword_map = { - "aihubmix": p.aihubmix, "openrouter": p.openrouter, - "deepseek": p.deepseek, "anthropic": p.anthropic, "claude": p.anthropic, - "openai": p.openai, "gpt": p.openai, "gemini": p.gemini, - "zhipu": p.zhipu, "glm": p.zhipu, "zai": p.zhipu, - "dashscope": p.dashscope, "qwen": p.dashscope, - "groq": p.groq, "moonshot": p.moonshot, "kimi": p.moonshot, "vllm": p.vllm, - } - for kw, provider in keyword_map.items(): - if kw in model and provider.api_key: - return provider - # Fallback: gateways first (can serve any model), then specific providers - all_providers = [p.openrouter, p.aihubmix, p.anthropic, p.openai, p.deepseek, - p.gemini, p.zhipu, p.dashscope, p.moonshot, p.vllm, p.groq] - return next((pr for pr in all_providers if pr.api_key), None) + from nanobot.providers.registry import PROVIDERS + model_lower = (model or self.agents.defaults.model).lower() + + # Match by keyword (order follows PROVIDERS registry) + for spec in PROVIDERS: + p = getattr(self.providers, spec.name, None) + if p and any(kw in model_lower for kw in spec.keywords) and p.api_key: + return p + + # Fallback: gateways first, then others (follows registry order) + for spec in PROVIDERS: + p = getattr(self.providers, spec.name, None) + if p and p.api_key: + return p + return None def get_api_key(self, model: str | None = None) -> str | None: """Get API key for the given model. Falls back to first available key.""" @@ -156,13 +150,16 @@ class Config(BaseSettings): def get_api_base(self, model: str | None = None) -> str | None: """Get API base URL for the given model. Applies default URLs for known gateways.""" + from nanobot.providers.registry import PROVIDERS p = self.get_provider(model) if p and p.api_base: return p.api_base - # Default URLs for known gateways (openrouter, aihubmix) - for name, url in self._GATEWAY_DEFAULTS.items(): - if p == getattr(self.providers, name): - return url + # Only gateways get a default URL here. Standard providers (like Moonshot) + # handle their base URL via env vars in _setup_env, NOT via api_base — + # otherwise find_gateway() would misdetect them as local/vLLM. + for spec in PROVIDERS: + if spec.is_gateway and spec.default_api_base and p == getattr(self.providers, spec.name, None): + return spec.default_api_base return None class Config: diff --git a/nanobot/providers/litellm_provider.py b/nanobot/providers/litellm_provider.py index 415100c..5e9c22f 100644 --- a/nanobot/providers/litellm_provider.py +++ b/nanobot/providers/litellm_provider.py @@ -1,5 +1,6 @@ """LiteLLM provider implementation for multi-provider support.""" +import json import os from typing import Any @@ -7,6 +8,7 @@ import litellm from litellm import acompletion from nanobot.providers.base import LLMProvider, LLMResponse, ToolCallRequest +from nanobot.providers.registry import find_by_model, find_gateway class LiteLLMProvider(LLMProvider): @@ -14,7 +16,8 @@ class LiteLLMProvider(LLMProvider): LLM provider using LiteLLM for multi-provider support. Supports OpenRouter, Anthropic, OpenAI, Gemini, and many other providers through - a unified interface. + a unified interface. Provider-specific logic is driven by the registry + (see providers/registry.py) — no if-elif chains needed here. """ def __init__( @@ -28,47 +31,17 @@ class LiteLLMProvider(LLMProvider): self.default_model = default_model self.extra_headers = extra_headers or {} - # Detect OpenRouter by api_key prefix or explicit api_base - self.is_openrouter = ( - (api_key and api_key.startswith("sk-or-")) or - (api_base and "openrouter" in api_base) - ) + # Detect gateway / local deployment from api_key and api_base + self._gateway = find_gateway(api_key, api_base) - # Detect AiHubMix by api_base - self.is_aihubmix = bool(api_base and "aihubmix" in api_base) + # Backwards-compatible flags (used by tests and possibly external code) + self.is_openrouter = bool(self._gateway and self._gateway.name == "openrouter") + self.is_aihubmix = bool(self._gateway and self._gateway.name == "aihubmix") + self.is_vllm = bool(self._gateway and self._gateway.is_local) - # Track if using custom endpoint (vLLM, etc.) - self.is_vllm = bool(api_base) and not self.is_openrouter and not self.is_aihubmix - - # Configure LiteLLM based on provider + # Configure environment variables if api_key: - if self.is_openrouter: - # OpenRouter mode - set key - os.environ["OPENROUTER_API_KEY"] = api_key - elif self.is_aihubmix: - # AiHubMix gateway - OpenAI-compatible - os.environ["OPENAI_API_KEY"] = api_key - elif self.is_vllm: - # vLLM/custom endpoint - uses OpenAI-compatible API - os.environ["HOSTED_VLLM_API_KEY"] = api_key - elif "deepseek" in default_model: - os.environ.setdefault("DEEPSEEK_API_KEY", api_key) - elif "anthropic" in default_model: - os.environ.setdefault("ANTHROPIC_API_KEY", api_key) - elif "openai" in default_model or "gpt" in default_model: - os.environ.setdefault("OPENAI_API_KEY", api_key) - elif "gemini" in default_model.lower(): - os.environ.setdefault("GEMINI_API_KEY", api_key) - elif "zhipu" in default_model or "glm" in default_model or "zai" in default_model: - os.environ.setdefault("ZAI_API_KEY", api_key) - os.environ.setdefault("ZHIPUAI_API_KEY", api_key) - elif "dashscope" in default_model or "qwen" in default_model.lower(): - os.environ.setdefault("DASHSCOPE_API_KEY", api_key) - elif "groq" in default_model: - os.environ.setdefault("GROQ_API_KEY", api_key) - elif "moonshot" in default_model or "kimi" in default_model: - os.environ.setdefault("MOONSHOT_API_KEY", api_key) - os.environ.setdefault("MOONSHOT_API_BASE", api_base or "https://api.moonshot.cn/v1") + self._setup_env(api_key, api_base, default_model) if api_base: litellm.api_base = api_base @@ -76,6 +49,55 @@ class LiteLLMProvider(LLMProvider): # Disable LiteLLM logging noise litellm.suppress_debug_info = True + def _setup_env(self, api_key: str, api_base: str | None, model: str) -> None: + """Set environment variables based on detected provider.""" + if self._gateway: + # Gateway / local: direct set (not setdefault) + os.environ[self._gateway.env_key] = api_key + return + + # Standard provider: match by model name + spec = find_by_model(model) + if spec: + os.environ.setdefault(spec.env_key, api_key) + # Resolve env_extras placeholders: + # {api_key} → user's API key + # {api_base} → user's api_base, falling back to spec.default_api_base + effective_base = api_base or spec.default_api_base + for env_name, env_val in spec.env_extras: + resolved = env_val.replace("{api_key}", api_key) + resolved = resolved.replace("{api_base}", effective_base) + os.environ.setdefault(env_name, resolved) + + def _resolve_model(self, model: str) -> str: + """Resolve model name by applying provider/gateway prefixes.""" + if self._gateway: + # Gateway mode: apply gateway prefix, skip provider-specific prefixes + prefix = self._gateway.litellm_prefix + if self._gateway.strip_model_prefix: + model = model.split("/")[-1] + if prefix and not model.startswith(f"{prefix}/"): + model = f"{prefix}/{model}" + return model + + # Standard mode: auto-prefix for known providers + spec = find_by_model(model) + if spec and spec.litellm_prefix: + if not any(model.startswith(s) for s in spec.skip_prefixes): + model = f"{spec.litellm_prefix}/{model}" + + return model + + def _apply_model_overrides(self, model: str, kwargs: dict[str, Any]) -> None: + """Apply model-specific parameter overrides from the registry.""" + model_lower = model.lower() + spec = find_by_model(model) + if spec: + for pattern, overrides in spec.model_overrides: + if pattern in model_lower: + kwargs.update(overrides) + return + async def chat( self, messages: list[dict[str, Any]], @@ -97,35 +119,8 @@ class LiteLLMProvider(LLMProvider): Returns: LLMResponse with content and/or tool calls. """ - model = model or self.default_model + model = self._resolve_model(model or self.default_model) - # Auto-prefix model names for known providers - # (keywords, target_prefix, skip_if_starts_with) - _prefix_rules = [ - (("glm", "zhipu"), "zai", ("zhipu/", "zai/", "openrouter/", "hosted_vllm/")), - (("qwen", "dashscope"), "dashscope", ("dashscope/", "openrouter/")), - (("moonshot", "kimi"), "moonshot", ("moonshot/", "openrouter/")), - (("gemini",), "gemini", ("gemini/",)), - ] - if not (self.is_vllm or self.is_openrouter or self.is_aihubmix): - model_lower = model.lower() - for keywords, prefix, skip in _prefix_rules: - if any(kw in model_lower for kw in keywords) and not any(model.startswith(s) for s in skip): - model = f"{prefix}/{model}" - break - - # Gateway/endpoint-specific prefixes (detected by api_base/api_key, not model name) - if self.is_openrouter and not model.startswith("openrouter/"): - model = f"openrouter/{model}" - elif self.is_aihubmix: - model = f"openai/{model.split('/')[-1]}" - elif self.is_vllm: - model = f"hosted_vllm/{model}" - - # kimi-k2.5 only supports temperature=1.0 - if "kimi-k2.5" in model.lower(): - temperature = 1.0 - kwargs: dict[str, Any] = { "model": model, "messages": messages, @@ -133,6 +128,9 @@ class LiteLLMProvider(LLMProvider): "temperature": temperature, } + # Apply model-specific overrides (e.g. kimi-k2.5 temperature) + self._apply_model_overrides(model, kwargs) + # Pass api_base directly for custom endpoints (vLLM, etc.) if self.api_base: kwargs["api_base"] = self.api_base @@ -166,7 +164,6 @@ class LiteLLMProvider(LLMProvider): # Parse arguments from JSON string if needed args = tc.function.arguments if isinstance(args, str): - import json try: args = json.loads(args) except json.JSONDecodeError: diff --git a/nanobot/providers/registry.py b/nanobot/providers/registry.py new file mode 100644 index 0000000..aa4a76e --- /dev/null +++ b/nanobot/providers/registry.py @@ -0,0 +1,323 @@ +""" +Provider Registry — single source of truth for LLM provider metadata. + +Adding a new provider: + 1. Add a ProviderSpec to PROVIDERS below. + 2. Add a field to ProvidersConfig in config/schema.py. + Done. Env vars, prefixing, config matching, status display all derive from here. + +Order matters — it controls match priority and fallback. Gateways first. +Every entry writes out all fields so you can copy-paste as a template. +""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import Any + + +@dataclass(frozen=True) +class ProviderSpec: + """One LLM provider's metadata. See PROVIDERS below for real examples. + + Placeholders in env_extras values: + {api_key} — the user's API key + {api_base} — api_base from config, or this spec's default_api_base + """ + + # identity + name: str # config field name, e.g. "dashscope" + keywords: tuple[str, ...] # model-name keywords for matching (lowercase) + env_key: str # LiteLLM env var, e.g. "DASHSCOPE_API_KEY" + display_name: str = "" # shown in `nanobot status` + + # model prefixing + litellm_prefix: str = "" # "dashscope" → model becomes "dashscope/{model}" + skip_prefixes: tuple[str, ...] = () # don't prefix if model already starts with these + + # extra env vars, e.g. (("ZHIPUAI_API_KEY", "{api_key}"),) + env_extras: tuple[tuple[str, str], ...] = () + + # gateway / local detection + is_gateway: bool = False # routes any model (OpenRouter, AiHubMix) + is_local: bool = False # local deployment (vLLM, Ollama) + detect_by_key_prefix: str = "" # match api_key prefix, e.g. "sk-or-" + detect_by_base_keyword: str = "" # match substring in api_base URL + default_api_base: str = "" # fallback base URL + + # gateway behavior + strip_model_prefix: bool = False # strip "provider/" before re-prefixing + + # per-model param overrides, e.g. (("kimi-k2.5", {"temperature": 1.0}),) + model_overrides: tuple[tuple[str, dict[str, Any]], ...] = () + + @property + def label(self) -> str: + return self.display_name or self.name.title() + + +# --------------------------------------------------------------------------- +# PROVIDERS — the registry. Order = priority. Copy any entry as template. +# --------------------------------------------------------------------------- + +PROVIDERS: tuple[ProviderSpec, ...] = ( + + # === Gateways (detected by api_key / api_base, not model name) ========= + # Gateways can route any model, so they win in fallback. + + # OpenRouter: global gateway, keys start with "sk-or-" + ProviderSpec( + name="openrouter", + keywords=("openrouter",), + env_key="OPENROUTER_API_KEY", + display_name="OpenRouter", + litellm_prefix="openrouter", # claude-3 → openrouter/claude-3 + skip_prefixes=(), + env_extras=(), + is_gateway=True, + is_local=False, + detect_by_key_prefix="sk-or-", + detect_by_base_keyword="openrouter", + default_api_base="https://openrouter.ai/api/v1", + strip_model_prefix=False, + model_overrides=(), + ), + + # AiHubMix: global gateway, OpenAI-compatible interface. + # strip_model_prefix=True: it doesn't understand "anthropic/claude-3", + # so we strip to bare "claude-3" then re-prefix as "openai/claude-3". + ProviderSpec( + name="aihubmix", + keywords=("aihubmix",), + env_key="OPENAI_API_KEY", # OpenAI-compatible + display_name="AiHubMix", + litellm_prefix="openai", # → openai/{model} + skip_prefixes=(), + env_extras=(), + is_gateway=True, + is_local=False, + detect_by_key_prefix="", + detect_by_base_keyword="aihubmix", + default_api_base="https://aihubmix.com/v1", + strip_model_prefix=True, # anthropic/claude-3 → claude-3 → openai/claude-3 + model_overrides=(), + ), + + # === Standard providers (matched by model-name keywords) =============== + + # Anthropic: LiteLLM recognizes "claude-*" natively, no prefix needed. + ProviderSpec( + name="anthropic", + keywords=("anthropic", "claude"), + env_key="ANTHROPIC_API_KEY", + display_name="Anthropic", + litellm_prefix="", + skip_prefixes=(), + env_extras=(), + is_gateway=False, + is_local=False, + detect_by_key_prefix="", + detect_by_base_keyword="", + default_api_base="", + strip_model_prefix=False, + model_overrides=(), + ), + + # OpenAI: LiteLLM recognizes "gpt-*" natively, no prefix needed. + ProviderSpec( + name="openai", + keywords=("openai", "gpt"), + env_key="OPENAI_API_KEY", + display_name="OpenAI", + litellm_prefix="", + skip_prefixes=(), + env_extras=(), + is_gateway=False, + is_local=False, + detect_by_key_prefix="", + detect_by_base_keyword="", + default_api_base="", + strip_model_prefix=False, + model_overrides=(), + ), + + # DeepSeek: needs "deepseek/" prefix for LiteLLM routing. + ProviderSpec( + name="deepseek", + keywords=("deepseek",), + env_key="DEEPSEEK_API_KEY", + display_name="DeepSeek", + litellm_prefix="deepseek", # deepseek-chat → deepseek/deepseek-chat + skip_prefixes=("deepseek/",), # avoid double-prefix + env_extras=(), + is_gateway=False, + is_local=False, + detect_by_key_prefix="", + detect_by_base_keyword="", + default_api_base="", + strip_model_prefix=False, + model_overrides=(), + ), + + # Gemini: needs "gemini/" prefix for LiteLLM. + ProviderSpec( + name="gemini", + keywords=("gemini",), + env_key="GEMINI_API_KEY", + display_name="Gemini", + litellm_prefix="gemini", # gemini-pro → gemini/gemini-pro + skip_prefixes=("gemini/",), # avoid double-prefix + env_extras=(), + is_gateway=False, + is_local=False, + detect_by_key_prefix="", + detect_by_base_keyword="", + default_api_base="", + strip_model_prefix=False, + model_overrides=(), + ), + + # Zhipu: LiteLLM uses "zai/" prefix. + # Also mirrors key to ZHIPUAI_API_KEY (some LiteLLM paths check that). + # skip_prefixes: don't add "zai/" when already routed via gateway. + ProviderSpec( + name="zhipu", + keywords=("zhipu", "glm", "zai"), + env_key="ZAI_API_KEY", + display_name="Zhipu AI", + litellm_prefix="zai", # glm-4 → zai/glm-4 + skip_prefixes=("zhipu/", "zai/", "openrouter/", "hosted_vllm/"), + env_extras=( + ("ZHIPUAI_API_KEY", "{api_key}"), + ), + is_gateway=False, + is_local=False, + detect_by_key_prefix="", + detect_by_base_keyword="", + default_api_base="", + strip_model_prefix=False, + model_overrides=(), + ), + + # DashScope: Qwen models, needs "dashscope/" prefix. + ProviderSpec( + name="dashscope", + keywords=("qwen", "dashscope"), + env_key="DASHSCOPE_API_KEY", + display_name="DashScope", + litellm_prefix="dashscope", # qwen-max → dashscope/qwen-max + skip_prefixes=("dashscope/", "openrouter/"), + env_extras=(), + is_gateway=False, + is_local=False, + detect_by_key_prefix="", + detect_by_base_keyword="", + default_api_base="", + strip_model_prefix=False, + model_overrides=(), + ), + + # Moonshot: Kimi models, needs "moonshot/" prefix. + # LiteLLM requires MOONSHOT_API_BASE env var to find the endpoint. + # Kimi K2.5 API enforces temperature >= 1.0. + ProviderSpec( + name="moonshot", + keywords=("moonshot", "kimi"), + env_key="MOONSHOT_API_KEY", + display_name="Moonshot", + litellm_prefix="moonshot", # kimi-k2.5 → moonshot/kimi-k2.5 + skip_prefixes=("moonshot/", "openrouter/"), + env_extras=( + ("MOONSHOT_API_BASE", "{api_base}"), + ), + is_gateway=False, + is_local=False, + detect_by_key_prefix="", + detect_by_base_keyword="", + default_api_base="https://api.moonshot.ai/v1", # intl; use api.moonshot.cn for China + strip_model_prefix=False, + model_overrides=( + ("kimi-k2.5", {"temperature": 1.0}), + ), + ), + + # === Local deployment (fallback: unknown api_base → assume local) ====== + + # vLLM / any OpenAI-compatible local server. + # If api_base is set but doesn't match a known gateway, we land here. + # Placed before Groq so vLLM wins the fallback when both are configured. + ProviderSpec( + name="vllm", + keywords=("vllm",), + env_key="HOSTED_VLLM_API_KEY", + display_name="vLLM/Local", + litellm_prefix="hosted_vllm", # Llama-3-8B → hosted_vllm/Llama-3-8B + skip_prefixes=(), + env_extras=(), + is_gateway=False, + is_local=True, + detect_by_key_prefix="", + detect_by_base_keyword="", + default_api_base="", # user must provide in config + strip_model_prefix=False, + model_overrides=(), + ), + + # === Auxiliary (not a primary LLM provider) ============================ + + # Groq: mainly used for Whisper voice transcription, also usable for LLM. + # Needs "groq/" prefix for LiteLLM routing. Placed last — it rarely wins fallback. + ProviderSpec( + name="groq", + keywords=("groq",), + env_key="GROQ_API_KEY", + display_name="Groq", + litellm_prefix="groq", # llama3-8b-8192 → groq/llama3-8b-8192 + skip_prefixes=("groq/",), # avoid double-prefix + env_extras=(), + is_gateway=False, + is_local=False, + detect_by_key_prefix="", + detect_by_base_keyword="", + default_api_base="", + strip_model_prefix=False, + model_overrides=(), + ), +) + + +# --------------------------------------------------------------------------- +# Lookup helpers +# --------------------------------------------------------------------------- + +def find_by_model(model: str) -> ProviderSpec | None: + """Match a standard provider by model-name keyword (case-insensitive). + Skips gateways/local — those are matched by api_key/api_base instead.""" + model_lower = model.lower() + for spec in PROVIDERS: + if spec.is_gateway or spec.is_local: + continue + if any(kw in model_lower for kw in spec.keywords): + return spec + return None + + +def find_gateway(api_key: str | None, api_base: str | None) -> ProviderSpec | None: + """Detect gateway/local by api_key prefix or api_base substring. + Fallback: unknown api_base → treat as local (vLLM).""" + for spec in PROVIDERS: + if spec.detect_by_key_prefix and api_key and api_key.startswith(spec.detect_by_key_prefix): + return spec + if spec.detect_by_base_keyword and api_base and spec.detect_by_base_keyword in api_base: + return spec + if api_base: + return next((s for s in PROVIDERS if s.is_local), None) + return None + + +def find_by_name(name: str) -> ProviderSpec | None: + """Find a provider spec by config field name, e.g. "dashscope".""" + for spec in PROVIDERS: + if spec.name == name: + return spec + return None From f49c639b74ced46df483ad12523580cd5e51da81 Mon Sep 17 00:00:00 2001 From: chaohuang-ai Date: Sun, 8 Feb 2026 18:02:48 +0800 Subject: [PATCH 6/9] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 90ca9e3..8824570 100644 --- a/README.md +++ b/README.md @@ -20,8 +20,8 @@ ## 📢 News -- **2026-02-08** 🔧 Refactored Providers — adding a new LLM provider only takes just 2 steps! Check [here](#providers). -- **2026-02-07** 🚀 Released v0.1.3.post5 with Qwen support & several improvements! Check [here](https://github.com/HKUDS/nanobot/releases/tag/v0.1.3.post5) for details. +- **2026-02-08** 🔧 Refactored Providers — adding a new LLM provider now takes just 2 simple steps! Check [here](#providers). +- **2026-02-07** 🚀 🚀 Released v0.1.3.post5 with Qwen support & several key improvements! Check [here](https://github.com/HKUDS/nanobot/releases/tag/v0.1.3.post5) for details. - **2026-02-06** ✨ Added Moonshot/Kimi provider, Discord integration, and enhanced security hardening! - **2026-02-05** ✨ Added Feishu channel, DeepSeek provider, and enhanced scheduled tasks support! - **2026-02-04** 🚀 Released v0.1.3.post4 with multi-provider & Docker support! Check [here](https://github.com/HKUDS/nanobot/releases/tag/v0.1.3.post4) for details. From 9e3823ae034e16287cebbe1b36e0c486e99139b5 Mon Sep 17 00:00:00 2001 From: chaohuang-ai Date: Sun, 8 Feb 2026 18:03:00 +0800 Subject: [PATCH 7/9] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 8824570..d1ae7ce 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,7 @@ ## 📢 News - **2026-02-08** 🔧 Refactored Providers — adding a new LLM provider now takes just 2 simple steps! Check [here](#providers). -- **2026-02-07** 🚀 🚀 Released v0.1.3.post5 with Qwen support & several key improvements! Check [here](https://github.com/HKUDS/nanobot/releases/tag/v0.1.3.post5) for details. +- **2026-02-07** 🚀 Released v0.1.3.post5 with Qwen support & several key improvements! Check [here](https://github.com/HKUDS/nanobot/releases/tag/v0.1.3.post5) for details. - **2026-02-06** ✨ Added Moonshot/Kimi provider, Discord integration, and enhanced security hardening! - **2026-02-05** ✨ Added Feishu channel, DeepSeek provider, and enhanced scheduled tasks support! - **2026-02-04** 🚀 Released v0.1.3.post4 with multi-provider & Docker support! Check [here](https://github.com/HKUDS/nanobot/releases/tag/v0.1.3.post4) for details. From 3675758a44d2c4d49dd867e776c18a764014975e Mon Sep 17 00:00:00 2001 From: chaohuang-ai Date: Sun, 8 Feb 2026 18:10:24 +0800 Subject: [PATCH 8/9] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d1ae7ce..a833dbe 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ ## 📢 News -- **2026-02-08** 🔧 Refactored Providers — adding a new LLM provider now takes just 2 simple steps! Check [here](#providers). +- **2026-02-08** 🔧 Refactored Providers—adding a new LLM provider now takes just 2 simple steps! Check [here](#providers). - **2026-02-07** 🚀 Released v0.1.3.post5 with Qwen support & several key improvements! Check [here](https://github.com/HKUDS/nanobot/releases/tag/v0.1.3.post5) for details. - **2026-02-06** ✨ Added Moonshot/Kimi provider, Discord integration, and enhanced security hardening! - **2026-02-05** ✨ Added Feishu channel, DeepSeek provider, and enhanced scheduled tasks support! From b6ec6a8a7686b8d3239bd9f363fa55490f9f9217 Mon Sep 17 00:00:00 2001 From: Re-bin Date: Sun, 8 Feb 2026 18:06:07 +0000 Subject: [PATCH 9/9] fix(dingtalk): security and resource fixes for DingTalk channel --- README.md | 10 +- nanobot/channels/dingtalk.py | 195 +++++++++++++++++++---------------- 2 files changed, 108 insertions(+), 97 deletions(-) diff --git a/README.md b/README.md index 8c5c387..326f253 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ ⚡️ Delivers core agent functionality in just **~4,000** lines of code — **99% smaller** than Clawdbot's 430k+ lines. -📏 Real-time line count: **3,423 lines** (run `bash core_agent_lines.sh` to verify anytime) +📏 Real-time line count: **3,429 lines** (run `bash core_agent_lines.sh` to verify anytime) ## 📢 News @@ -293,10 +293,6 @@ nanobot gateway Uses **WebSocket** long connection — no public IP required. -```bash -pip install nanobot-ai[feishu] -``` - **1. Create a Feishu bot** - Visit [Feishu Open Platform](https://open.feishu.cn/app) - Create a new app → Enable **Bot** capability @@ -342,10 +338,6 @@ nanobot gateway Uses **Stream Mode** — no public IP required. -```bash -pip install nanobot-ai[dingtalk] -``` - **1. Create a DingTalk bot** - Visit [DingTalk Open Platform](https://open-dev.dingtalk.com/) - Create a new app -> Add **Robot** capability diff --git a/nanobot/channels/dingtalk.py b/nanobot/channels/dingtalk.py index 897e5be..72d3afd 100644 --- a/nanobot/channels/dingtalk.py +++ b/nanobot/channels/dingtalk.py @@ -2,30 +2,35 @@ import asyncio import json -import threading import time from typing import Any from loguru import logger import httpx -from nanobot.bus.events import OutboundMessage, InboundMessage +from nanobot.bus.events import OutboundMessage from nanobot.bus.queue import MessageBus from nanobot.channels.base import BaseChannel from nanobot.config.schema import DingTalkConfig try: from dingtalk_stream import ( - DingTalkStreamClient, + DingTalkStreamClient, Credential, CallbackHandler, CallbackMessage, - AckMessage + AckMessage, ) from dingtalk_stream.chatbot import ChatbotMessage + DINGTALK_AVAILABLE = True except ImportError: DINGTALK_AVAILABLE = False + # Fallback so class definitions don't crash at module level + CallbackHandler = object # type: ignore[assignment,misc] + CallbackMessage = None # type: ignore[assignment,misc] + AckMessage = None # type: ignore[assignment,misc] + ChatbotMessage = None # type: ignore[assignment,misc] class NanobotDingTalkHandler(CallbackHandler): @@ -33,127 +38,146 @@ class NanobotDingTalkHandler(CallbackHandler): Standard DingTalk Stream SDK Callback Handler. Parses incoming messages and forwards them to the Nanobot channel. """ + def __init__(self, channel: "DingTalkChannel"): super().__init__() self.channel = channel - + async def process(self, message: CallbackMessage): """Process incoming stream message.""" try: # Parse using SDK's ChatbotMessage for robust handling chatbot_msg = ChatbotMessage.from_dict(message.data) - - # Extract content based on message type + + # Extract text content; fall back to raw dict if SDK object is empty content = "" if chatbot_msg.text: content = chatbot_msg.text.content.strip() - elif chatbot_msg.message_type == "text": - # Fallback manual extraction if object not populated - content = message.data.get("text", {}).get("content", "").strip() - if not content: - logger.warning(f"Received empty or unsupported message type: {chatbot_msg.message_type}") + content = message.data.get("text", {}).get("content", "").strip() + + if not content: + logger.warning( + f"Received empty or unsupported message type: {chatbot_msg.message_type}" + ) return AckMessage.STATUS_OK, "OK" sender_id = chatbot_msg.sender_staff_id or chatbot_msg.sender_id sender_name = chatbot_msg.sender_nick or "Unknown" - + logger.info(f"Received DingTalk message from {sender_name} ({sender_id}): {content}") - # Forward to Nanobot - # We use asyncio.create_task to avoid blocking the ACK return - asyncio.create_task( + # Forward to Nanobot via _on_message (non-blocking). + # Store reference to prevent GC before task completes. + task = asyncio.create_task( self.channel._on_message(content, sender_id, sender_name) ) + self.channel._background_tasks.add(task) + task.add_done_callback(self.channel._background_tasks.discard) return AckMessage.STATUS_OK, "OK" - + except Exception as e: logger.error(f"Error processing DingTalk message: {e}") - # Return OK to avoid retry loop from DingTalk server if it's a parsing error + # Return OK to avoid retry loop from DingTalk server return AckMessage.STATUS_OK, "Error" + class DingTalkChannel(BaseChannel): """ DingTalk channel using Stream Mode. - + Uses WebSocket to receive events via `dingtalk-stream` SDK. - Uses direct HTTP API to send messages (since SDK is mainly for receiving). + Uses direct HTTP API to send messages (SDK is mainly for receiving). + + Note: Currently only supports private (1:1) chat. Group messages are + received but replies are sent back as private messages to the sender. """ - + name = "dingtalk" - + def __init__(self, config: DingTalkConfig, bus: MessageBus): super().__init__(config, bus) self.config: DingTalkConfig = config self._client: Any = None - self._loop: asyncio.AbstractEventLoop | None = None - + self._http: httpx.AsyncClient | None = None + # Access Token management for sending messages self._access_token: str | None = None self._token_expiry: float = 0 - + + # Hold references to background tasks to prevent GC + self._background_tasks: set[asyncio.Task] = set() + async def start(self) -> None: """Start the DingTalk bot with Stream Mode.""" try: if not DINGTALK_AVAILABLE: - logger.error("DingTalk Stream SDK not installed. Run: pip install dingtalk-stream") + logger.error( + "DingTalk Stream SDK not installed. Run: pip install dingtalk-stream" + ) return - + if not self.config.client_id or not self.config.client_secret: logger.error("DingTalk client_id and client_secret not configured") return - + self._running = True - self._loop = asyncio.get_running_loop() - - logger.info(f"Initializing DingTalk Stream Client with Client ID: {self.config.client_id}...") + self._http = httpx.AsyncClient() + + logger.info( + f"Initializing DingTalk Stream Client with Client ID: {self.config.client_id}..." + ) credential = Credential(self.config.client_id, self.config.client_secret) self._client = DingTalkStreamClient(credential) - + # Register standard handler handler = NanobotDingTalkHandler(self) - - # Register using the chatbot topic standard for bots - self._client.register_callback_handler( - ChatbotMessage.TOPIC, - handler - ) - + self._client.register_callback_handler(ChatbotMessage.TOPIC, handler) + logger.info("DingTalk bot started with Stream Mode") - - # The client.start() method is an async infinite loop that handles the websocket connection + + # client.start() is an async infinite loop handling the websocket connection await self._client.start() except Exception as e: logger.exception(f"Failed to start DingTalk channel: {e}") - + async def stop(self) -> None: """Stop the DingTalk bot.""" self._running = False - # SDK doesn't expose a clean stop method that cancels loop immediately without private access - pass + # Close the shared HTTP client + if self._http: + await self._http.aclose() + self._http = None + # Cancel outstanding background tasks + for task in self._background_tasks: + task.cancel() + self._background_tasks.clear() async def _get_access_token(self) -> str | None: """Get or refresh Access Token.""" if self._access_token and time.time() < self._token_expiry: return self._access_token - + url = "https://api.dingtalk.com/v1.0/oauth2/accessToken" data = { "appKey": self.config.client_id, - "appSecret": self.config.client_secret + "appSecret": self.config.client_secret, } - + + if not self._http: + logger.warning("DingTalk HTTP client not initialized, cannot refresh token") + return None + try: - async with httpx.AsyncClient() as client: - resp = await client.post(url, json=data) - resp.raise_for_status() - res_data = resp.json() - self._access_token = res_data.get("accessToken") - # Expire 60s early to be safe - self._token_expiry = time.time() + int(res_data.get("expireIn", 7200)) - 60 - return self._access_token + resp = await self._http.post(url, json=data) + resp.raise_for_status() + res_data = resp.json() + self._access_token = res_data.get("accessToken") + # Expire 60s early to be safe + self._token_expiry = time.time() + int(res_data.get("expireIn", 7200)) - 60 + return self._access_token except Exception as e: logger.error(f"Failed to get DingTalk access token: {e}") return None @@ -163,57 +187,52 @@ class DingTalkChannel(BaseChannel): token = await self._get_access_token() if not token: return - - # This endpoint is for sending to a single user in a bot chat + + # oToMessages/batchSend: sends to individual users (private chat) # https://open.dingtalk.com/document/orgapp/robot-batch-send-messages url = "https://api.dingtalk.com/v1.0/robot/oToMessages/batchSend" - - headers = { - "x-acs-dingtalk-access-token": token - } - - # Convert markdown code blocks for basic compatibility if needed, - # but DingTalk supports markdown loosely. - + + headers = {"x-acs-dingtalk-access-token": token} + data = { "robotCode": self.config.client_id, - "userIds": [msg.chat_id], # chat_id is the user's staffId/unionId - "msgKey": "sampleMarkdown", # Using markdown template + "userIds": [msg.chat_id], # chat_id is the user's staffId + "msgKey": "sampleMarkdown", "msgParam": json.dumps({ "text": msg.content, - "title": "Nanobot Reply" - }) + "title": "Nanobot Reply", + }), } - + + if not self._http: + logger.warning("DingTalk HTTP client not initialized, cannot send") + return + try: - async with httpx.AsyncClient() as client: - resp = await client.post(url, json=data, headers=headers) - # Check 200 OK but also API error codes if any - if resp.status_code != 200: - logger.error(f"DingTalk send failed: {resp.text}") - else: - logger.debug(f"DingTalk message sent to {msg.chat_id}") + resp = await self._http.post(url, json=data, headers=headers) + if resp.status_code != 200: + logger.error(f"DingTalk send failed: {resp.text}") + else: + logger.debug(f"DingTalk message sent to {msg.chat_id}") except Exception as e: logger.error(f"Error sending DingTalk message: {e}") async def _on_message(self, content: str, sender_id: str, sender_name: str) -> None: - """Handle incoming message (called by NanobotDingTalkHandler).""" + """Handle incoming message (called by NanobotDingTalkHandler). + + Delegates to BaseChannel._handle_message() which enforces allow_from + permission checks before publishing to the bus. + """ try: logger.info(f"DingTalk inbound: {content} from {sender_name}") - - # Correct InboundMessage usage based on events.py definition - # @dataclass class InboundMessage: - # channel: str, sender_id: str, chat_id: str, content: str, ... - msg = InboundMessage( - channel=self.name, + await self._handle_message( sender_id=sender_id, - chat_id=sender_id, # For private stats, chat_id is sender_id + chat_id=sender_id, # For private chat, chat_id == sender_id content=str(content), metadata={ "sender_name": sender_name, - "platform": "dingtalk" - } + "platform": "dingtalk", + }, ) - await self.bus.publish_inbound(msg) except Exception as e: logger.error(f"Error publishing DingTalk message: {e}")