Date: Tue, 3 Feb 2026 12:44:55 +0000
Subject: [PATCH 25/35] docs: optimize the structure
---
README.md | 2 --
1 file changed, 2 deletions(-)
diff --git a/README.md b/README.md
index 93789d5..f4b1df2 100644
--- a/README.md
+++ b/README.md
@@ -391,7 +391,6 @@ PRs welcome! The codebase is intentionally small and readable. π€
----
## β Star History
@@ -410,7 +409,6 @@ PRs welcome! The codebase is intentionally small and readable. π€
----
nanobot is for educational, research, and technical exchange purposes only
From 1a784fca1e8df195d0f1cb8ee3364bfdce9ac263 Mon Sep 17 00:00:00 2001
From: Re-bin
Date: Tue, 3 Feb 2026 17:13:30 +0000
Subject: [PATCH 26/35] refactor: simplify _validate_url function
---
nanobot/agent/tools/web.py | 28 +++++++---------------------
1 file changed, 7 insertions(+), 21 deletions(-)
diff --git a/nanobot/agent/tools/web.py b/nanobot/agent/tools/web.py
index ad72604..9de1d3c 100644
--- a/nanobot/agent/tools/web.py
+++ b/nanobot/agent/tools/web.py
@@ -31,30 +31,16 @@ def _normalize(text: str) -> str:
def _validate_url(url: str) -> tuple[bool, str]:
- """
- Validate URL for security.
-
- Returns:
- (is_valid, error_message): Tuple of validation result and error message if invalid.
- """
+ """Validate URL: must be http(s) with valid domain."""
try:
- parsed = urlparse(url)
-
- # Check if scheme exists
- if not parsed.scheme:
- return False, "URL must include a scheme (http:// or https://)"
-
- # Only allow http and https schemes
- if parsed.scheme.lower() not in ('http', 'https'):
- return False, f"Invalid URL scheme '{parsed.scheme}'. Only http:// and https:// are allowed"
-
- # Check if netloc (domain) exists
- if not parsed.netloc:
- return False, "URL must include a valid domain"
-
+ p = urlparse(url)
+ if p.scheme not in ('http', 'https'):
+ return False, f"Only http/https allowed, got '{p.scheme or 'none'}'"
+ if not p.netloc:
+ return False, "Missing domain"
return True, ""
except Exception as e:
- return False, f"Invalid URL format: {str(e)}"
+ return False, str(e)
class WebSearchTool(Tool):
From a20d887f9e8eae6bf34bb73480027a5140710df3 Mon Sep 17 00:00:00 2001
From: Re-bin
Date: Wed, 4 Feb 2026 03:45:26 +0000
Subject: [PATCH 27/35] feat: add parameter validation and safety guard for
exec tool
---
nanobot/agent/loop.py | 12 ++++++--
nanobot/agent/subagent.py | 9 +++++-
nanobot/agent/tools/base.py | 57 +++++++++++++++---------------------
nanobot/agent/tools/shell.py | 31 ++++++++------------
nanobot/cli/commands.py | 6 ++--
nanobot/config/schema.py | 7 +++++
6 files changed, 64 insertions(+), 58 deletions(-)
diff --git a/nanobot/agent/loop.py b/nanobot/agent/loop.py
index 4a96b84..bfe6e89 100644
--- a/nanobot/agent/loop.py
+++ b/nanobot/agent/loop.py
@@ -40,14 +40,17 @@ class AgentLoop:
workspace: Path,
model: str | None = None,
max_iterations: int = 20,
- brave_api_key: str | None = None
+ brave_api_key: str | None = None,
+ exec_config: "ExecToolConfig | None" = None,
):
+ from nanobot.config.schema import ExecToolConfig
self.bus = bus
self.provider = provider
self.workspace = workspace
self.model = model or provider.get_default_model()
self.max_iterations = max_iterations
self.brave_api_key = brave_api_key
+ self.exec_config = exec_config or ExecToolConfig()
self.context = ContextBuilder(workspace)
self.sessions = SessionManager(workspace)
@@ -58,6 +61,7 @@ class AgentLoop:
bus=bus,
model=self.model,
brave_api_key=brave_api_key,
+ exec_config=self.exec_config,
)
self._running = False
@@ -72,7 +76,11 @@ class AgentLoop:
self.tools.register(ListDirTool())
# Shell tool
- self.tools.register(ExecTool(working_dir=str(self.workspace)))
+ self.tools.register(ExecTool(
+ working_dir=str(self.workspace),
+ timeout=self.exec_config.timeout,
+ restrict_to_workspace=self.exec_config.restrict_to_workspace,
+ ))
# Web tools
self.tools.register(WebSearchTool(api_key=self.brave_api_key))
diff --git a/nanobot/agent/subagent.py b/nanobot/agent/subagent.py
index d3b320c..05ffbb8 100644
--- a/nanobot/agent/subagent.py
+++ b/nanobot/agent/subagent.py
@@ -33,12 +33,15 @@ class SubagentManager:
bus: MessageBus,
model: str | None = None,
brave_api_key: str | None = None,
+ exec_config: "ExecToolConfig | None" = None,
):
+ from nanobot.config.schema import ExecToolConfig
self.provider = provider
self.workspace = workspace
self.bus = bus
self.model = model or provider.get_default_model()
self.brave_api_key = brave_api_key
+ self.exec_config = exec_config or ExecToolConfig()
self._running_tasks: dict[str, asyncio.Task[None]] = {}
async def spawn(
@@ -96,7 +99,11 @@ class SubagentManager:
tools.register(ReadFileTool())
tools.register(WriteFileTool())
tools.register(ListDirTool())
- tools.register(ExecTool(working_dir=str(self.workspace)))
+ tools.register(ExecTool(
+ working_dir=str(self.workspace),
+ timeout=self.exec_config.timeout,
+ restrict_to_workspace=self.exec_config.restrict_to_workspace,
+ ))
tools.register(WebSearchTool(api_key=self.brave_api_key))
tools.register(WebFetchTool())
diff --git a/nanobot/agent/tools/base.py b/nanobot/agent/tools/base.py
index 5888a77..cbaadbd 100644
--- a/nanobot/agent/tools/base.py
+++ b/nanobot/agent/tools/base.py
@@ -12,6 +12,15 @@ class Tool(ABC):
the environment, such as reading files, executing commands, etc.
"""
+ _TYPE_MAP = {
+ "string": str,
+ "integer": int,
+ "number": (int, float),
+ "boolean": bool,
+ "array": list,
+ "object": dict,
+ }
+
@property
@abstractmethod
def name(self) -> str:
@@ -65,60 +74,40 @@ class Tool(ABC):
def _validate_schema(self, value: Any, schema: dict[str, Any], path: str) -> list[str]:
errors: list[str] = []
expected_type = schema.get("type")
+ label = path or "parameter"
- type_map = {
- "string": str,
- "integer": int,
- "number": (int, float),
- "boolean": bool,
- "array": list,
- "object": dict,
- }
-
- def label(p: str) -> str:
- return p or "parameter"
-
- if expected_type in type_map and not isinstance(value, type_map[expected_type]):
- errors.append(f"{label(path)} should be {expected_type}")
- return errors
+ if expected_type in self._TYPE_MAP and not isinstance(value, self._TYPE_MAP[expected_type]):
+ return [f"{label} should be {expected_type}"]
if "enum" in schema and value not in schema["enum"]:
- errors.append(f"{label(path)} must be one of {schema['enum']}")
+ errors.append(f"{label} must be one of {schema['enum']}")
if expected_type in ("integer", "number"):
if "minimum" in schema and value < schema["minimum"]:
- errors.append(f"{label(path)} must be >= {schema['minimum']}")
+ errors.append(f"{label} must be >= {schema['minimum']}")
if "maximum" in schema and value > schema["maximum"]:
- errors.append(f"{label(path)} must be <= {schema['maximum']}")
+ errors.append(f"{label} must be <= {schema['maximum']}")
if expected_type == "string":
if "minLength" in schema and len(value) < schema["minLength"]:
- errors.append(f"{label(path)} must be at least {schema['minLength']} chars")
+ errors.append(f"{label} must be at least {schema['minLength']} chars")
if "maxLength" in schema and len(value) > schema["maxLength"]:
- errors.append(f"{label(path)} must be at most {schema['maxLength']} chars")
+ errors.append(f"{label} must be at most {schema['maxLength']} chars")
if expected_type == "object":
- properties = schema.get("properties", {}) or {}
- required = set(schema.get("required", []) or [])
-
- for key in required:
+ properties = schema.get("properties", {})
+ for key in schema.get("required", []):
if key not in value:
- p = f"{path}.{key}" if path else key
- errors.append(f"missing required {p}")
-
+ errors.append(f"missing required {path}.{key}" if path else f"missing required {key}")
for key, item in value.items():
- prop_schema = properties.get(key)
- if not prop_schema:
- continue # ignore unknown fields
- p = f"{path}.{key}" if path else key
- errors.extend(self._validate_schema(item, prop_schema, p))
+ if key in properties:
+ errors.extend(self._validate_schema(item, properties[key], f"{path}.{key}" if path else key))
if expected_type == "array":
items_schema = schema.get("items")
if items_schema:
for idx, item in enumerate(value):
- p = f"{path}[{idx}]" if path else f"[{idx}]"
- errors.extend(self._validate_schema(item, items_schema, p))
+ errors.extend(self._validate_schema(item, items_schema, f"{path}[{idx}]" if path else f"[{idx}]"))
return errors
diff --git a/nanobot/agent/tools/shell.py b/nanobot/agent/tools/shell.py
index ce00bca..143d187 100644
--- a/nanobot/agent/tools/shell.py
+++ b/nanobot/agent/tools/shell.py
@@ -18,29 +18,22 @@ class ExecTool(Tool):
working_dir: str | None = None,
deny_patterns: list[str] | None = None,
allow_patterns: list[str] | None = None,
- restrict_to_working_dir: bool = False,
+ restrict_to_workspace: bool = False,
):
self.timeout = timeout
self.working_dir = working_dir
self.deny_patterns = deny_patterns or [
- r"\brm\s+-rf\b",
- r"\brm\s+-fr\b",
- r"\brm\s+-r\b",
- r"\bdel\s+/f\b",
- r"\bdel\s+/q\b",
- r"\brmdir\s+/s\b",
- r"\bformat\b",
- r"\bmkfs\b",
- r"\bdd\s+if=",
- r">\s*/dev/sd",
- r"\bdiskpart\b",
- r"\bshutdown\b",
- r"\breboot\b",
- r"\bpoweroff\b",
- r":\(\)\s*\{\s*:\s*\|\s*:\s*&\s*\};\s*:",
+ r"\brm\s+-[rf]{1,2}\b", # rm -r, rm -rf, rm -fr
+ r"\bdel\s+/[fq]\b", # del /f, del /q
+ r"\brmdir\s+/s\b", # rmdir /s
+ r"\b(format|mkfs|diskpart)\b", # disk operations
+ r"\bdd\s+if=", # dd
+ r">\s*/dev/sd", # write to disk
+ r"\b(shutdown|reboot|poweroff)\b", # system power
+ r":\(\)\s*\{.*\};\s*:", # fork bomb
]
self.allow_patterns = allow_patterns or []
- self.restrict_to_working_dir = restrict_to_working_dir
+ self.restrict_to_workspace = restrict_to_workspace
@property
def name(self) -> str:
@@ -128,14 +121,14 @@ class ExecTool(Tool):
if not any(re.search(p, lower) for p in self.allow_patterns):
return "Error: Command blocked by safety guard (not in allowlist)"
- if self.restrict_to_working_dir:
+ if self.restrict_to_workspace:
if "..\\" in cmd or "../" in cmd:
return "Error: Command blocked by safety guard (path traversal detected)"
cwd_path = Path(cwd).resolve()
win_paths = re.findall(r"[A-Za-z]:\\[^\\\"']+", cmd)
- posix_paths = re.findall(r"/[^\\s\"']+", cmd)
+ posix_paths = re.findall(r"/[^\s\"']+", cmd)
for raw in win_paths + posix_paths:
try:
diff --git a/nanobot/cli/commands.py b/nanobot/cli/commands.py
index 5ecc31b..6b95667 100644
--- a/nanobot/cli/commands.py
+++ b/nanobot/cli/commands.py
@@ -202,7 +202,8 @@ def gateway(
workspace=config.workspace_path,
model=config.agents.defaults.model,
max_iterations=config.agents.defaults.max_tool_iterations,
- brave_api_key=config.tools.web.search.api_key or None
+ brave_api_key=config.tools.web.search.api_key or None,
+ exec_config=config.tools.exec,
)
# Create cron service
@@ -309,7 +310,8 @@ def agent(
bus=bus,
provider=provider,
workspace=config.workspace_path,
- brave_api_key=config.tools.web.search.api_key or None
+ brave_api_key=config.tools.web.search.api_key or None,
+ exec_config=config.tools.exec,
)
if message:
diff --git a/nanobot/config/schema.py b/nanobot/config/schema.py
index 71e3361..4c34834 100644
--- a/nanobot/config/schema.py
+++ b/nanobot/config/schema.py
@@ -73,9 +73,16 @@ class WebToolsConfig(BaseModel):
search: WebSearchConfig = Field(default_factory=WebSearchConfig)
+class ExecToolConfig(BaseModel):
+ """Shell exec tool configuration."""
+ timeout: int = 60
+ restrict_to_workspace: bool = False # If true, block commands accessing paths outside workspace
+
+
class ToolsConfig(BaseModel):
"""Tools configuration."""
web: WebToolsConfig = Field(default_factory=WebToolsConfig)
+ exec: ExecToolConfig = Field(default_factory=ExecToolConfig)
class Config(BaseSettings):
From 9a0f8fcc73d49f0292d0caf86a79007068f01549 Mon Sep 17 00:00:00 2001
From: Re-bin
Date: Wed, 4 Feb 2026 03:50:39 +0000
Subject: [PATCH 28/35] refactor: simplify parameter validation logic
---
nanobot/agent/tools/base.py | 77 ++++++++++++++-----------------------
1 file changed, 28 insertions(+), 49 deletions(-)
diff --git a/nanobot/agent/tools/base.py b/nanobot/agent/tools/base.py
index cbaadbd..ca9bcc2 100644
--- a/nanobot/agent/tools/base.py
+++ b/nanobot/agent/tools/base.py
@@ -53,62 +53,41 @@ class Tool(ABC):
pass
def validate_params(self, params: dict[str, Any]) -> list[str]:
- """
- Lightweight JSON schema validation for tool parameters.
-
- Returns a list of error strings (empty if valid).
- Unknown params are ignored.
- """
+ """Validate tool parameters against JSON schema. Returns error list (empty if valid)."""
schema = self.parameters or {}
+ if schema.get("type", "object") != "object":
+ raise ValueError(f"Schema must be object type, got {schema.get('type')!r}")
+ return self._validate(params, {**schema, "type": "object"}, "")
- # Default to an object schema if type is missing, and fail fast on unsupported top-level types.
- if "type" not in schema:
- schema = {"type": "object", **schema}
- elif schema.get("type") != "object":
- raise ValueError(
- f"Tool parameter schemas must have top-level type 'object'; got {schema.get('type')!r}"
- )
-
- return self._validate_schema(params, schema, path="")
-
- def _validate_schema(self, value: Any, schema: dict[str, Any], path: str) -> list[str]:
- errors: list[str] = []
- expected_type = schema.get("type")
- label = path or "parameter"
-
- if expected_type in self._TYPE_MAP and not isinstance(value, self._TYPE_MAP[expected_type]):
- return [f"{label} should be {expected_type}"]
-
- if "enum" in schema and value not in schema["enum"]:
+ def _validate(self, val: Any, schema: dict[str, Any], path: str) -> list[str]:
+ t, label = schema.get("type"), path or "parameter"
+ if t in self._TYPE_MAP and not isinstance(val, self._TYPE_MAP[t]):
+ return [f"{label} should be {t}"]
+
+ errors = []
+ if "enum" in schema and val not in schema["enum"]:
errors.append(f"{label} must be one of {schema['enum']}")
-
- if expected_type in ("integer", "number"):
- if "minimum" in schema and value < schema["minimum"]:
+ if t in ("integer", "number"):
+ if "minimum" in schema and val < schema["minimum"]:
errors.append(f"{label} must be >= {schema['minimum']}")
- if "maximum" in schema and value > schema["maximum"]:
+ if "maximum" in schema and val > schema["maximum"]:
errors.append(f"{label} must be <= {schema['maximum']}")
-
- if expected_type == "string":
- if "minLength" in schema and len(value) < schema["minLength"]:
+ if t == "string":
+ if "minLength" in schema and len(val) < schema["minLength"]:
errors.append(f"{label} must be at least {schema['minLength']} chars")
- if "maxLength" in schema and len(value) > schema["maxLength"]:
+ if "maxLength" in schema and len(val) > schema["maxLength"]:
errors.append(f"{label} must be at most {schema['maxLength']} chars")
-
- if expected_type == "object":
- properties = schema.get("properties", {})
- for key in schema.get("required", []):
- if key not in value:
- errors.append(f"missing required {path}.{key}" if path else f"missing required {key}")
- for key, item in value.items():
- if key in properties:
- errors.extend(self._validate_schema(item, properties[key], f"{path}.{key}" if path else key))
-
- if expected_type == "array":
- items_schema = schema.get("items")
- if items_schema:
- for idx, item in enumerate(value):
- errors.extend(self._validate_schema(item, items_schema, f"{path}[{idx}]" if path else f"[{idx}]"))
-
+ if t == "object":
+ props = schema.get("properties", {})
+ for k in schema.get("required", []):
+ if k not in val:
+ errors.append(f"missing required {path + '.' + k if path else k}")
+ for k, v in val.items():
+ if k in props:
+ errors.extend(self._validate(v, props[k], path + '.' + k if path else k))
+ if t == "array" and "items" in schema:
+ for i, item in enumerate(val):
+ errors.extend(self._validate(item, schema["items"], f"{path}[{i}]" if path else f"[{i}]"))
return errors
def to_schema(self) -> dict[str, Any]:
From 50fa024ab4d44c074157c9b5278717d769445e3a Mon Sep 17 00:00:00 2001
From: "tao.jun" <61566027@163.com>
Date: Wed, 4 Feb 2026 14:07:45 +0800
Subject: [PATCH 29/35] feishu support
---
README.md | 61 +++++++-
nanobot/channels/feishu.py | 281 ++++++++++++++++++++++++++++++++++++
nanobot/channels/manager.py | 11 ++
nanobot/config/schema.py | 12 ++
pyproject.toml | 3 +
5 files changed, 367 insertions(+), 1 deletion(-)
create mode 100644 nanobot/channels/feishu.py
diff --git a/README.md b/README.md
index f4b1df2..efa6821 100644
--- a/README.md
+++ b/README.md
@@ -162,12 +162,13 @@ nanobot agent -m "Hello from my local LLM!"
## π¬ Chat Apps
-Talk to your nanobot through Telegram or WhatsApp β anytime, anywhere.
+Talk to your nanobot through Telegram, WhatsApp, or Feishu β anytime, anywhere.
| Channel | Setup |
|---------|-------|
| **Telegram** | Easy (just a token) |
| **WhatsApp** | Medium (scan QR) |
+| **Feishu** | Medium (app credentials) |
Telegram (Recommended)
@@ -238,6 +239,56 @@ nanobot gateway
+
+Feishu (ι£δΉ¦)
+
+Uses **WebSocket** long connection β no public IP required.
+
+Requires **lark-oapi** SDK:
+
+```bash
+pip install lark-oapi
+```
+
+**1. Create a Feishu bot**
+- Visit [Feishu Open Platform](https://open.feishu.cn/app)
+- Create a new app (Custom App)
+- Enable bot capability
+- Add event subscription: `im.message.receive_v1`
+- Get credentials:
+ - **App ID** and **App Secret** from "Credentials & Basic Info"
+ - **Verification Token** and **Encrypt Key** from "Event Subscriptions"
+
+**2. Configure**
+
+```json
+{
+ "channels": {
+ "feishu": {
+ "enabled": true,
+ "appId": "cli_xxx",
+ "appSecret": "xxx",
+ "verificationToken": "xxx",
+ "encryptKey": "xxx",
+ "allowFrom": ["ou_xxx"]
+ }
+ }
+}
+```
+
+> Get your Open ID by sending a message to the bot, or from Feishu admin console.
+
+**3. Run**
+
+```bash
+nanobot gateway
+```
+
+> [!TIP]
+> Feishu uses WebSocket to receive messages β no webhook or public IP needed!
+
+
+
## βοΈ Configuration
Config file: `~/.nanobot/config.json`
@@ -282,6 +333,14 @@ Config file: `~/.nanobot/config.json`
},
"whatsapp": {
"enabled": false
+ },
+ "feishu": {
+ "enabled": false,
+ "appId": "cli_xxx",
+ "appSecret": "xxx",
+ "verificationToken": "xxx",
+ "encryptKey": "xxx",
+ "allowFrom": ["ou_xxx"]
}
},
"tools": {
diff --git a/nanobot/channels/feishu.py b/nanobot/channels/feishu.py
new file mode 100644
index 0000000..4326cf0
--- /dev/null
+++ b/nanobot/channels/feishu.py
@@ -0,0 +1,281 @@
+"""Feishu/Lark channel implementation using lark-oapi SDK with WebSocket long connection."""
+
+import asyncio
+import json
+import threading
+from typing import Any
+
+from loguru import logger
+
+from nanobot.bus.events import OutboundMessage
+from nanobot.bus.queue import MessageBus
+from nanobot.channels.base import BaseChannel
+from nanobot.config.schema import FeishuConfig
+
+try:
+ import lark_oapi as lark
+ from lark_oapi.api.im.v1 import (
+ CreateMessageRequest,
+ CreateMessageRequestBody,
+ CreateMessageReactionRequest,
+ CreateMessageReactionRequestBody,
+ P2ImMessageReceiveV1,
+ )
+ FEISHU_AVAILABLE = True
+except ImportError:
+ FEISHU_AVAILABLE = False
+ lark = None
+
+
+class FeishuChannel(BaseChannel):
+ """
+ Feishu/Lark channel using WebSocket long connection.
+
+ Uses WebSocket to receive events - no public IP or webhook required.
+
+ Requires:
+ - App ID and App Secret from Feishu Open Platform
+ - Bot capability enabled
+ - Event subscription enabled (im.message.receive_v1)
+ """
+
+ name = "feishu"
+
+ def __init__(self, config: FeishuConfig, bus: MessageBus):
+ super().__init__(config, bus)
+ self.config: FeishuConfig = config
+ self._client: Any = None
+ self._ws_client: Any = None
+ self._ws_thread: threading.Thread | None = None
+ self._processed_message_ids: set[str] = set() # Dedup message IDs
+ self._loop: asyncio.AbstractEventLoop | None = None
+
+ async def start(self) -> None:
+ """Start the Feishu bot with WebSocket long connection."""
+ if not FEISHU_AVAILABLE:
+ logger.error("Feishu SDK not installed. Run: pip install lark-oapi")
+ return
+
+ if not self.config.app_id or not self.config.app_secret:
+ logger.error("Feishu app_id and app_secret not configured")
+ return
+
+ self._running = True
+ self._loop = asyncio.get_event_loop()
+
+ # Create Lark client for sending messages
+ self._client = lark.Client.builder() \
+ .app_id(self.config.app_id) \
+ .app_secret(self.config.app_secret) \
+ .log_level(lark.LogLevel.INFO) \
+ .build()
+
+ # Create event handler (only register message receive, ignore other events)
+ event_handler = lark.EventDispatcherHandler.builder(
+ self.config.encrypt_key or "",
+ self.config.verification_token or "",
+ ).register_p2_im_message_receive_v1(
+ self._on_message_sync
+ ).build()
+
+ # Create WebSocket client for long connection
+ self._ws_client = lark.ws.Client(
+ self.config.app_id,
+ self.config.app_secret,
+ event_handler=event_handler,
+ log_level=lark.LogLevel.INFO
+ )
+
+ # Start WebSocket client in a separate thread
+ def run_ws():
+ try:
+ self._ws_client.start()
+ except Exception as e:
+ logger.error(f"Feishu WebSocket error: {e}")
+
+ self._ws_thread = threading.Thread(target=run_ws, daemon=True)
+ self._ws_thread.start()
+
+ logger.info("Feishu bot started with WebSocket long connection")
+ logger.info("No public IP required - using WebSocket to receive events")
+
+ # Keep running until stopped
+ while self._running:
+ await asyncio.sleep(1)
+
+ async def stop(self) -> None:
+ """Stop the Feishu bot."""
+ self._running = False
+ logger.info("Feishu bot stopped")
+
+ def _add_reaction(self, message_id: str, emoji_type: str = "SMILE") -> None:
+ """
+ Add a reaction emoji to a message.
+
+ Common emoji types: THUMBSUP, OK, EYES, DONE, OnIt, HEART
+ """
+ if not self._client:
+ logger.warning("Cannot add reaction: client not initialized")
+ return
+
+ try:
+ from lark_oapi.api.im.v1 import Emoji
+
+ request = CreateMessageReactionRequest.builder() \
+ .message_id(message_id) \
+ .request_body(
+ CreateMessageReactionRequestBody.builder()
+ .reaction_type(Emoji.builder().emoji_type(emoji_type).build())
+ .build()
+ ).build()
+
+ response = self._client.im.v1.message_reaction.create(request)
+
+ if not response.success():
+ logger.warning(f"Failed to add reaction: code={response.code}, msg={response.msg}")
+ else:
+ logger.info(f"Added {emoji_type} reaction to message {message_id}")
+ except Exception as e:
+ logger.warning(f"Error adding reaction: {e}")
+
+ async def send(self, msg: OutboundMessage) -> None:
+ """Send a message through Feishu."""
+ if not self._client:
+ logger.warning("Feishu client not initialized")
+ return
+
+ try:
+ # Determine receive_id_type based on chat_id format
+ # open_id starts with "ou_", chat_id starts with "oc_"
+ if msg.chat_id.startswith("oc_"):
+ receive_id_type = "chat_id"
+ else:
+ receive_id_type = "open_id"
+
+ # Build text message content
+ content = json.dumps({"text": msg.content})
+
+ request = CreateMessageRequest.builder() \
+ .receive_id_type(receive_id_type) \
+ .request_body(
+ CreateMessageRequestBody.builder()
+ .receive_id(msg.chat_id)
+ .msg_type("text")
+ .content(content)
+ .build()
+ ).build()
+
+ response = self._client.im.v1.message.create(request)
+
+ if not response.success():
+ logger.error(
+ f"Failed to send Feishu message: code={response.code}, "
+ f"msg={response.msg}, log_id={response.get_log_id()}"
+ )
+ else:
+ logger.debug(f"Feishu message sent to {msg.chat_id}")
+
+ except Exception as e:
+ logger.error(f"Error sending Feishu message: {e}")
+
+ def _on_message_sync(self, data: "P2ImMessageReceiveV1") -> None:
+ """
+ Sync handler for incoming messages (called from WebSocket thread).
+ Schedules async handling in the main event loop.
+ """
+ try:
+ if self._loop and self._loop.is_running():
+ # Schedule the async handler in the main event loop
+ asyncio.run_coroutine_threadsafe(
+ self._on_message(data),
+ self._loop
+ )
+ else:
+ # Fallback: run in new event loop
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ try:
+ loop.run_until_complete(self._on_message(data))
+ finally:
+ loop.close()
+ except Exception as e:
+ logger.error(f"Error handling Feishu message: {e}")
+
+ async def _on_message(self, data: "P2ImMessageReceiveV1") -> None:
+ """Handle incoming message from Feishu."""
+ try:
+ event = data.event
+ message = event.message
+ sender = event.sender
+
+ # Get message ID for deduplication
+ message_id = message.message_id
+ if message_id in self._processed_message_ids:
+ logger.debug(f"Skipping duplicate message: {message_id}")
+ return
+ self._processed_message_ids.add(message_id)
+
+ # Limit dedup cache size
+ if len(self._processed_message_ids) > 1000:
+ self._processed_message_ids = set(list(self._processed_message_ids)[-500:])
+
+ # Extract sender info
+ sender_id = sender.sender_id.open_id if sender.sender_id else "unknown"
+ sender_type = sender.sender_type # "user" or "bot"
+
+ # Skip bot messages
+ if sender_type == "bot":
+ return
+
+ # Add reaction to user's message to indicate "seen" (π THUMBSUP)
+ self._add_reaction(message_id, "THUMBSUP")
+
+ # Get chat_id for replies
+ chat_id = message.chat_id
+ chat_type = message.chat_type # "p2p" or "group"
+
+ # Parse message content
+ content = ""
+ msg_type = message.message_type
+
+ if msg_type == "text":
+ # Text message: {"text": "hello"}
+ try:
+ content_obj = json.loads(message.content)
+ content = content_obj.get("text", "")
+ except json.JSONDecodeError:
+ content = message.content or ""
+ elif msg_type == "image":
+ content = "[image]"
+ elif msg_type == "audio":
+ content = "[audio]"
+ elif msg_type == "file":
+ content = "[file]"
+ elif msg_type == "sticker":
+ content = "[sticker]"
+ else:
+ content = f"[{msg_type}]"
+
+ if not content:
+ return
+
+ logger.debug(f"Feishu message from {sender_id} in {chat_id}: {content[:50]}...")
+
+ # Forward to message bus
+ # Use chat_id for group chats, sender's open_id for p2p
+ reply_to = chat_id if chat_type == "group" else sender_id
+
+ await self._handle_message(
+ sender_id=sender_id,
+ chat_id=reply_to,
+ content=content,
+ metadata={
+ "message_id": message_id,
+ "chat_type": chat_type,
+ "msg_type": msg_type,
+ "sender_type": sender_type,
+ }
+ )
+
+ except Exception as e:
+ logger.error(f"Error processing Feishu message: {e}")
diff --git a/nanobot/channels/manager.py b/nanobot/channels/manager.py
index 73c3334..979d01e 100644
--- a/nanobot/channels/manager.py
+++ b/nanobot/channels/manager.py
@@ -55,6 +55,17 @@ class ChannelManager:
logger.info("WhatsApp channel enabled")
except ImportError as e:
logger.warning(f"WhatsApp channel not available: {e}")
+
+ # Feishu channel
+ if self.config.channels.feishu.enabled:
+ try:
+ from nanobot.channels.feishu import FeishuChannel
+ self.channels["feishu"] = FeishuChannel(
+ self.config.channels.feishu, self.bus
+ )
+ logger.info("Feishu channel enabled")
+ except ImportError as e:
+ logger.warning(f"Feishu channel not available: {e}")
async def start_all(self) -> None:
"""Start WhatsApp channel and the outbound dispatcher."""
diff --git a/nanobot/config/schema.py b/nanobot/config/schema.py
index 4c34834..4492096 100644
--- a/nanobot/config/schema.py
+++ b/nanobot/config/schema.py
@@ -17,12 +17,24 @@ class TelegramConfig(BaseModel):
enabled: bool = False
token: str = "" # Bot token from @BotFather
allow_from: list[str] = Field(default_factory=list) # Allowed user IDs or usernames
+ proxy: str | None = None # HTTP/SOCKS5 proxy URL, e.g. "http://127.0.0.1:7890" or "socks5://127.0.0.1:1080"
+
+
+class FeishuConfig(BaseModel):
+ """Feishu/Lark channel configuration using WebSocket long connection."""
+ enabled: bool = False
+ app_id: str = "" # App ID from Feishu Open Platform
+ app_secret: str = "" # App Secret from Feishu Open Platform
+ encrypt_key: str = "" # Encrypt Key for event subscription (optional)
+ verification_token: str = "" # Verification Token for event subscription (optional)
+ allow_from: list[str] = Field(default_factory=list) # Allowed user open_ids
class ChannelsConfig(BaseModel):
"""Configuration for chat channels."""
whatsapp: WhatsAppConfig = Field(default_factory=WhatsAppConfig)
telegram: TelegramConfig = Field(default_factory=TelegramConfig)
+ feishu: FeishuConfig = Field(default_factory=FeishuConfig)
class AgentDefaults(BaseModel):
diff --git a/pyproject.toml b/pyproject.toml
index d081dd7..e027097 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -32,6 +32,9 @@ dependencies = [
]
[project.optional-dependencies]
+feishu = [
+ "lark-oapi>=1.0.0",
+]
dev = [
"pytest>=7.0.0",
"pytest-asyncio>=0.21.0",
From bf1dc7c0d3eafa6dd4e4f7c329b55bc6c9c7b226 Mon Sep 17 00:00:00 2001
From: Re-bin
Date: Wed, 4 Feb 2026 06:45:53 +0000
Subject: [PATCH 30/35] docs: fix incorrect references and add missing tool
docs
---
README.md | 8 +++--
nanobot/skills/skill-creator/SKILL.md | 44 +++++++++++++--------------
workspace/AGENTS.md | 1 +
workspace/TOOLS.md | 28 ++++++++++++-----
4 files changed, 49 insertions(+), 32 deletions(-)
diff --git a/README.md b/README.md
index f4b1df2..e54bb8f 100644
--- a/README.md
+++ b/README.md
@@ -108,8 +108,12 @@ nanobot onboard
"model": "anthropic/claude-opus-4-5"
}
},
- "webSearch": {
- "apiKey": "BSA-xxx"
+ "tools": {
+ "web": {
+ "search": {
+ "apiKey": "BSA-xxx"
+ }
+ }
}
}
```
diff --git a/nanobot/skills/skill-creator/SKILL.md b/nanobot/skills/skill-creator/SKILL.md
index 4680d5e..9b5eb6f 100644
--- a/nanobot/skills/skill-creator/SKILL.md
+++ b/nanobot/skills/skill-creator/SKILL.md
@@ -9,9 +9,9 @@ This skill provides guidance for creating effective skills.
## About Skills
-Skills are modular, self-contained packages that extend Codex's capabilities by providing
+Skills are modular, self-contained packages that extend the agent's capabilities by providing
specialized knowledge, workflows, and tools. Think of them as "onboarding guides" for specific
-domains or tasksβthey transform Codex from a general-purpose agent into a specialized agent
+domains or tasksβthey transform the agent from a general-purpose agent into a specialized agent
equipped with procedural knowledge that no model can fully possess.
### What Skills Provide
@@ -25,9 +25,9 @@ equipped with procedural knowledge that no model can fully possess.
### Concise is Key
-The context window is a public good. Skills share the context window with everything else Codex needs: system prompt, conversation history, other Skills' metadata, and the actual user request.
+The context window is a public good. Skills share the context window with everything else the agent needs: system prompt, conversation history, other Skills' metadata, and the actual user request.
-**Default assumption: Codex is already very smart.** Only add context Codex doesn't already have. Challenge each piece of information: "Does Codex really need this explanation?" and "Does this paragraph justify its token cost?"
+**Default assumption: the agent is already very smart.** Only add context the agent doesn't already have. Challenge each piece of information: "Does the agent really need this explanation?" and "Does this paragraph justify its token cost?"
Prefer concise examples over verbose explanations.
@@ -41,7 +41,7 @@ Match the level of specificity to the task's fragility and variability:
**Low freedom (specific scripts, few parameters)**: Use when operations are fragile and error-prone, consistency is critical, or a specific sequence must be followed.
-Think of Codex as exploring a path: a narrow bridge with cliffs needs specific guardrails (low freedom), while an open field allows many routes (high freedom).
+Think of the agent as exploring a path: a narrow bridge with cliffs needs specific guardrails (low freedom), while an open field allows many routes (high freedom).
### Anatomy of a Skill
@@ -64,7 +64,7 @@ skill-name/
Every SKILL.md consists of:
-- **Frontmatter** (YAML): Contains `name` and `description` fields. These are the only fields that Codex reads to determine when the skill gets used, thus it is very important to be clear and comprehensive in describing what the skill is, and when it should be used.
+- **Frontmatter** (YAML): Contains `name` and `description` fields. These are the only fields that the agent reads to determine when the skill gets used, thus it is very important to be clear and comprehensive in describing what the skill is, and when it should be used.
- **Body** (Markdown): Instructions and guidance for using the skill. Only loaded AFTER the skill triggers (if at all).
#### Bundled Resources (optional)
@@ -76,27 +76,27 @@ Executable code (Python/Bash/etc.) for tasks that require deterministic reliabil
- **When to include**: When the same code is being rewritten repeatedly or deterministic reliability is needed
- **Example**: `scripts/rotate_pdf.py` for PDF rotation tasks
- **Benefits**: Token efficient, deterministic, may be executed without loading into context
-- **Note**: Scripts may still need to be read by Codex for patching or environment-specific adjustments
+- **Note**: Scripts may still need to be read by the agent for patching or environment-specific adjustments
##### References (`references/`)
-Documentation and reference material intended to be loaded as needed into context to inform Codex's process and thinking.
+Documentation and reference material intended to be loaded as needed into context to inform the agent's process and thinking.
-- **When to include**: For documentation that Codex should reference while working
+- **When to include**: For documentation that the agent should reference while working
- **Examples**: `references/finance.md` for financial schemas, `references/mnda.md` for company NDA template, `references/policies.md` for company policies, `references/api_docs.md` for API specifications
- **Use cases**: Database schemas, API documentation, domain knowledge, company policies, detailed workflow guides
-- **Benefits**: Keeps SKILL.md lean, loaded only when Codex determines it's needed
+- **Benefits**: Keeps SKILL.md lean, loaded only when the agent determines it's needed
- **Best practice**: If files are large (>10k words), include grep search patterns in SKILL.md
- **Avoid duplication**: Information should live in either SKILL.md or references files, not both. Prefer references files for detailed information unless it's truly core to the skillβthis keeps SKILL.md lean while making information discoverable without hogging the context window. Keep only essential procedural instructions and workflow guidance in SKILL.md; move detailed reference material, schemas, and examples to references files.
##### Assets (`assets/`)
-Files not intended to be loaded into context, but rather used within the output Codex produces.
+Files not intended to be loaded into context, but rather used within the output the agent produces.
- **When to include**: When the skill needs files that will be used in the final output
- **Examples**: `assets/logo.png` for brand assets, `assets/slides.pptx` for PowerPoint templates, `assets/frontend-template/` for HTML/React boilerplate, `assets/font.ttf` for typography
- **Use cases**: Templates, images, icons, boilerplate code, fonts, sample documents that get copied or modified
-- **Benefits**: Separates output resources from documentation, enables Codex to use files without loading them into context
+- **Benefits**: Separates output resources from documentation, enables the agent to use files without loading them into context
#### What to Not Include in a Skill
@@ -116,7 +116,7 @@ Skills use a three-level loading system to manage context efficiently:
1. **Metadata (name + description)** - Always in context (~100 words)
2. **SKILL.md body** - When skill triggers (<5k words)
-3. **Bundled resources** - As needed by Codex (Unlimited because scripts can be executed without reading into context window)
+3. **Bundled resources** - As needed by the agent (Unlimited because scripts can be executed without reading into context window)
#### Progressive Disclosure Patterns
@@ -141,7 +141,7 @@ Extract text with pdfplumber:
- **Examples**: See [EXAMPLES.md](EXAMPLES.md) for common patterns
```
-Codex loads FORMS.md, REFERENCE.md, or EXAMPLES.md only when needed.
+the agent loads FORMS.md, REFERENCE.md, or EXAMPLES.md only when needed.
**Pattern 2: Domain-specific organization**
@@ -157,7 +157,7 @@ bigquery-skill/
βββ marketing.md (campaigns, attribution)
```
-When a user asks about sales metrics, Codex only reads sales.md.
+When a user asks about sales metrics, the agent only reads sales.md.
Similarly, for skills supporting multiple frameworks or variants, organize by variant:
@@ -170,7 +170,7 @@ cloud-deploy/
βββ azure.md (Azure deployment patterns)
```
-When the user chooses AWS, Codex only reads aws.md.
+When the user chooses AWS, the agent only reads aws.md.
**Pattern 3: Conditional details**
@@ -191,12 +191,12 @@ For simple edits, modify the XML directly.
**For OOXML details**: See [OOXML.md](OOXML.md)
```
-Codex reads REDLINING.md or OOXML.md only when the user needs those features.
+the agent reads REDLINING.md or OOXML.md only when the user needs those features.
**Important guidelines:**
- **Avoid deeply nested references** - Keep references one level deep from SKILL.md. All reference files should link directly from SKILL.md.
-- **Structure longer reference files** - For files longer than 100 lines, include a table of contents at the top so Codex can see the full scope when previewing.
+- **Structure longer reference files** - For files longer than 100 lines, include a table of contents at the top so the agent can see the full scope when previewing.
## Skill Creation Process
@@ -293,7 +293,7 @@ After initialization, customize the SKILL.md and add resources as needed. If you
### Step 4: Edit the Skill
-When editing the (newly-generated or existing) skill, remember that the skill is being created for another instance of Codex to use. Include information that would be beneficial and non-obvious to Codex. Consider what procedural knowledge, domain-specific details, or reusable assets would help another Codex instance execute these tasks more effectively.
+When editing the (newly-generated or existing) skill, remember that the skill is being created for another instance of the agent to use. Include information that would be beneficial and non-obvious to the agent. Consider what procedural knowledge, domain-specific details, or reusable assets would help another the agent instance execute these tasks more effectively.
#### Learn Proven Design Patterns
@@ -321,10 +321,10 @@ If you used `--examples`, delete any placeholder files that are not needed for t
Write the YAML frontmatter with `name` and `description`:
- `name`: The skill name
-- `description`: This is the primary triggering mechanism for your skill, and helps Codex understand when to use the skill.
+- `description`: This is the primary triggering mechanism for your skill, and helps the agent understand when to use the skill.
- Include both what the Skill does and specific triggers/contexts for when to use it.
- - Include all "when to use" information here - Not in the body. The body is only loaded after triggering, so "When to Use This Skill" sections in the body are not helpful to Codex.
- - Example description for a `docx` skill: "Comprehensive document creation, editing, and analysis with support for tracked changes, comments, formatting preservation, and text extraction. Use when Codex needs to work with professional documents (.docx files) for: (1) Creating new documents, (2) Modifying or editing content, (3) Working with tracked changes, (4) Adding comments, or any other document tasks"
+ - Include all "when to use" information here - Not in the body. The body is only loaded after triggering, so "When to Use This Skill" sections in the body are not helpful to the agent.
+ - Example description for a `docx` skill: "Comprehensive document creation, editing, and analysis with support for tracked changes, comments, formatting preservation, and text extraction. Use when the agent needs to work with professional documents (.docx files) for: (1) Creating new documents, (2) Modifying or editing content, (3) Working with tracked changes, (4) Adding comments, or any other document tasks"
Do not include any other fields in YAML frontmatter.
diff --git a/workspace/AGENTS.md b/workspace/AGENTS.md
index a99a7b4..b4e5b5f 100644
--- a/workspace/AGENTS.md
+++ b/workspace/AGENTS.md
@@ -16,6 +16,7 @@ You have access to:
- Shell commands (exec)
- Web access (search, fetch)
- Messaging (message)
+- Background tasks (spawn)
## Memory
diff --git a/workspace/TOOLS.md b/workspace/TOOLS.md
index 9915561..0134a64 100644
--- a/workspace/TOOLS.md
+++ b/workspace/TOOLS.md
@@ -37,29 +37,31 @@ exec(command: str, working_dir: str = None) -> str
```
**Safety Notes:**
-- Commands have a 60-second timeout
+- Commands have a configurable timeout (default 60s)
+- Dangerous commands are blocked (rm -rf, format, dd, shutdown, etc.)
- Output is truncated at 10,000 characters
-- Use with caution for destructive operations
+- Optional `restrictToWorkspace` config to limit paths
## Web Access
### web_search
-Search the web using DuckDuckGo.
+Search the web using Brave Search API.
```
-web_search(query: str) -> str
+web_search(query: str, count: int = 5) -> str
```
-Returns top 5 search results with titles, URLs, and snippets.
+Returns search results with titles, URLs, and snippets. Requires `tools.web.search.apiKey` in config.
### web_fetch
Fetch and extract main content from a URL.
```
-web_fetch(url: str) -> str
+web_fetch(url: str, extractMode: str = "markdown", maxChars: int = 50000) -> str
```
**Notes:**
-- Content is extracted using trafilatura
-- Output is truncated at 8,000 characters
+- Content is extracted using readability
+- Supports markdown or plain text extraction
+- Output is truncated at 50,000 characters by default
## Communication
@@ -69,6 +71,16 @@ Send a message to the user (used internally).
message(content: str, channel: str = None, chat_id: str = None) -> str
```
+## Background Tasks
+
+### spawn
+Spawn a subagent to handle a task in the background.
+```
+spawn(task: str, label: str = None) -> str
+```
+
+Use for complex or time-consuming tasks that can run independently. The subagent will complete the task and report back when done.
+
## Scheduled Reminders (Cron)
Use the `exec` tool to create scheduled reminders with `nanobot cron add`:
From be8772355c8e563b2186e9c8b137422a0061973b Mon Sep 17 00:00:00 2001
From: Re-bin
Date: Wed, 4 Feb 2026 09:21:53 +0000
Subject: [PATCH 31/35] fix: correct bridge path for pip-installed package
---
nanobot/cli/commands.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/nanobot/cli/commands.py b/nanobot/cli/commands.py
index 6b95667..c2241fb 100644
--- a/nanobot/cli/commands.py
+++ b/nanobot/cli/commands.py
@@ -400,7 +400,7 @@ def _get_bridge_dir() -> Path:
raise typer.Exit(1)
# Find source bridge: first check package data, then source dir
- pkg_bridge = Path(__file__).parent / "bridge" # nanobot/bridge (installed)
+ pkg_bridge = Path(__file__).parent.parent / "bridge" # nanobot/bridge (installed)
src_bridge = Path(__file__).parent.parent.parent / "bridge" # repo root/bridge (dev)
source = None
From 795f8105a0a76e71a27c0ea37fb07a87f02fbe23 Mon Sep 17 00:00:00 2001
From: Re-bin
Date: Wed, 4 Feb 2026 09:27:37 +0000
Subject: [PATCH 32/35] bump version to 0.1.3.post4
---
pyproject.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pyproject.toml b/pyproject.toml
index d081dd7..d578a08 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "nanobot-ai"
-version = "0.1.3.post3"
+version = "0.1.3.post4"
description = "A lightweight personal AI assistant framework"
requires-python = ">=3.11"
license = {text = "MIT"}
From 50a4c4ca1ab9104a945a552396ee42c8d6337e7d Mon Sep 17 00:00:00 2001
From: Re-bin
Date: Thu, 5 Feb 2026 06:01:02 +0000
Subject: [PATCH 33/35] refactor: improve feishu channel implementation
---
nanobot/channels/feishu.py | 114 ++++++++++++++++---------------------
1 file changed, 48 insertions(+), 66 deletions(-)
diff --git a/nanobot/channels/feishu.py b/nanobot/channels/feishu.py
index 4326cf0..01b808e 100644
--- a/nanobot/channels/feishu.py
+++ b/nanobot/channels/feishu.py
@@ -3,6 +3,7 @@
import asyncio
import json
import threading
+from collections import OrderedDict
from typing import Any
from loguru import logger
@@ -19,12 +20,22 @@ try:
CreateMessageRequestBody,
CreateMessageReactionRequest,
CreateMessageReactionRequestBody,
+ Emoji,
P2ImMessageReceiveV1,
)
FEISHU_AVAILABLE = True
except ImportError:
FEISHU_AVAILABLE = False
lark = None
+ Emoji = None
+
+# Message type display mapping
+MSG_TYPE_MAP = {
+ "image": "[image]",
+ "audio": "[audio]",
+ "file": "[file]",
+ "sticker": "[sticker]",
+}
class FeishuChannel(BaseChannel):
@@ -47,7 +58,7 @@ class FeishuChannel(BaseChannel):
self._client: Any = None
self._ws_client: Any = None
self._ws_thread: threading.Thread | None = None
- self._processed_message_ids: set[str] = set() # Dedup message IDs
+ self._processed_message_ids: OrderedDict[str, None] = OrderedDict() # Ordered dedup cache
self._loop: asyncio.AbstractEventLoop | None = None
async def start(self) -> None:
@@ -61,7 +72,7 @@ class FeishuChannel(BaseChannel):
return
self._running = True
- self._loop = asyncio.get_event_loop()
+ self._loop = asyncio.get_running_loop()
# Create Lark client for sending messages
self._client = lark.Client.builder() \
@@ -106,21 +117,16 @@ class FeishuChannel(BaseChannel):
async def stop(self) -> None:
"""Stop the Feishu bot."""
self._running = False
+ if self._ws_client:
+ try:
+ self._ws_client.stop()
+ except Exception as e:
+ logger.warning(f"Error stopping WebSocket client: {e}")
logger.info("Feishu bot stopped")
- def _add_reaction(self, message_id: str, emoji_type: str = "SMILE") -> None:
- """
- Add a reaction emoji to a message.
-
- Common emoji types: THUMBSUP, OK, EYES, DONE, OnIt, HEART
- """
- if not self._client:
- logger.warning("Cannot add reaction: client not initialized")
- return
-
+ def _add_reaction_sync(self, message_id: str, emoji_type: str) -> None:
+ """Sync helper for adding reaction (runs in thread pool)."""
try:
- from lark_oapi.api.im.v1 import Emoji
-
request = CreateMessageReactionRequest.builder() \
.message_id(message_id) \
.request_body(
@@ -134,9 +140,21 @@ class FeishuChannel(BaseChannel):
if not response.success():
logger.warning(f"Failed to add reaction: code={response.code}, msg={response.msg}")
else:
- logger.info(f"Added {emoji_type} reaction to message {message_id}")
+ logger.debug(f"Added {emoji_type} reaction to message {message_id}")
except Exception as e:
logger.warning(f"Error adding reaction: {e}")
+
+ async def _add_reaction(self, message_id: str, emoji_type: str = "THUMBSUP") -> None:
+ """
+ Add a reaction emoji to a message (non-blocking).
+
+ Common emoji types: THUMBSUP, OK, EYES, DONE, OnIt, HEART
+ """
+ if not self._client or not Emoji:
+ return
+
+ loop = asyncio.get_running_loop()
+ await loop.run_in_executor(None, self._add_reaction_sync, message_id, emoji_type)
async def send(self, msg: OutboundMessage) -> None:
"""Send a message through Feishu."""
@@ -183,23 +201,8 @@ class FeishuChannel(BaseChannel):
Sync handler for incoming messages (called from WebSocket thread).
Schedules async handling in the main event loop.
"""
- try:
- if self._loop and self._loop.is_running():
- # Schedule the async handler in the main event loop
- asyncio.run_coroutine_threadsafe(
- self._on_message(data),
- self._loop
- )
- else:
- # Fallback: run in new event loop
- loop = asyncio.new_event_loop()
- asyncio.set_event_loop(loop)
- try:
- loop.run_until_complete(self._on_message(data))
- finally:
- loop.close()
- except Exception as e:
- logger.error(f"Error handling Feishu message: {e}")
+ if self._loop and self._loop.is_running():
+ asyncio.run_coroutine_threadsafe(self._on_message(data), self._loop)
async def _on_message(self, data: "P2ImMessageReceiveV1") -> None:
"""Handle incoming message from Feishu."""
@@ -208,63 +211,43 @@ class FeishuChannel(BaseChannel):
message = event.message
sender = event.sender
- # Get message ID for deduplication
+ # Deduplication check
message_id = message.message_id
if message_id in self._processed_message_ids:
- logger.debug(f"Skipping duplicate message: {message_id}")
return
- self._processed_message_ids.add(message_id)
+ self._processed_message_ids[message_id] = None
- # Limit dedup cache size
- if len(self._processed_message_ids) > 1000:
- self._processed_message_ids = set(list(self._processed_message_ids)[-500:])
-
- # Extract sender info
- sender_id = sender.sender_id.open_id if sender.sender_id else "unknown"
- sender_type = sender.sender_type # "user" or "bot"
+ # Trim cache: keep most recent 500 when exceeds 1000
+ while len(self._processed_message_ids) > 1000:
+ self._processed_message_ids.popitem(last=False)
# Skip bot messages
+ sender_type = sender.sender_type
if sender_type == "bot":
return
- # Add reaction to user's message to indicate "seen" (π THUMBSUP)
- self._add_reaction(message_id, "THUMBSUP")
-
- # Get chat_id for replies
+ sender_id = sender.sender_id.open_id if sender.sender_id else "unknown"
chat_id = message.chat_id
chat_type = message.chat_type # "p2p" or "group"
-
- # Parse message content
- content = ""
msg_type = message.message_type
+ # Add reaction to indicate "seen"
+ await self._add_reaction(message_id, "THUMBSUP")
+
+ # Parse message content
if msg_type == "text":
- # Text message: {"text": "hello"}
try:
- content_obj = json.loads(message.content)
- content = content_obj.get("text", "")
+ content = json.loads(message.content).get("text", "")
except json.JSONDecodeError:
content = message.content or ""
- elif msg_type == "image":
- content = "[image]"
- elif msg_type == "audio":
- content = "[audio]"
- elif msg_type == "file":
- content = "[file]"
- elif msg_type == "sticker":
- content = "[sticker]"
else:
- content = f"[{msg_type}]"
+ content = MSG_TYPE_MAP.get(msg_type, f"[{msg_type}]")
if not content:
return
- logger.debug(f"Feishu message from {sender_id} in {chat_id}: {content[:50]}...")
-
# Forward to message bus
- # Use chat_id for group chats, sender's open_id for p2p
reply_to = chat_id if chat_type == "group" else sender_id
-
await self._handle_message(
sender_id=sender_id,
chat_id=reply_to,
@@ -273,7 +256,6 @@ class FeishuChannel(BaseChannel):
"message_id": message_id,
"chat_type": chat_type,
"msg_type": msg_type,
- "sender_type": sender_type,
}
)
From f341de075de7120019ab9033322dd50a101beca7 Mon Sep 17 00:00:00 2001
From: Re-bin
Date: Thu, 5 Feb 2026 06:05:09 +0000
Subject: [PATCH 34/35] docs: simplify Feishu configuration guide
---
README.md | 31 +++++++++++++++----------------
1 file changed, 15 insertions(+), 16 deletions(-)
diff --git a/README.md b/README.md
index 5832380..467f8e3 100644
--- a/README.md
+++ b/README.md
@@ -248,20 +248,18 @@ nanobot gateway
Uses **WebSocket** long connection β no public IP required.
-Requires **lark-oapi** SDK:
-
```bash
-pip install lark-oapi
+pip install nanobot-ai[feishu]
```
**1. Create a Feishu bot**
- Visit [Feishu Open Platform](https://open.feishu.cn/app)
-- Create a new app (Custom App)
-- Enable bot capability
-- Add event subscription: `im.message.receive_v1`
-- Get credentials:
- - **App ID** and **App Secret** from "Credentials & Basic Info"
- - **Verification Token** and **Encrypt Key** from "Event Subscriptions"
+- Create a new app β Enable **Bot** capability
+- **Permissions**: Add `im:message` (send messages)
+- **Events**: Add `im.message.receive_v1` (receive messages)
+ - Select **Long Connection** mode (requires running nanobot first to establish connection)
+- Get **App ID** and **App Secret** from "Credentials & Basic Info"
+- Publish the app
**2. Configure**
@@ -272,15 +270,16 @@ pip install lark-oapi
"enabled": true,
"appId": "cli_xxx",
"appSecret": "xxx",
- "verificationToken": "xxx",
- "encryptKey": "xxx",
- "allowFrom": ["ou_xxx"]
+ "encryptKey": "",
+ "verificationToken": "",
+ "allowFrom": []
}
}
}
```
-> Get your Open ID by sending a message to the bot, or from Feishu admin console.
+> `encryptKey` and `verificationToken` are optional for Long Connection mode.
+> `allowFrom`: Leave empty to allow all users, or add `["ou_xxx"]` to restrict access.
**3. Run**
@@ -342,9 +341,9 @@ Config file: `~/.nanobot/config.json`
"enabled": false,
"appId": "cli_xxx",
"appSecret": "xxx",
- "verificationToken": "xxx",
- "encryptKey": "xxx",
- "allowFrom": ["ou_xxx"]
+ "encryptKey": "",
+ "verificationToken": "",
+ "allowFrom": []
}
},
"tools": {
From 1d74dd24d6b54f447ae8aab1ecf61ce137210719 Mon Sep 17 00:00:00 2001
From: Re-bin
Date: Thu, 5 Feb 2026 06:09:37 +0000
Subject: [PATCH 35/35] docs: update contributors image
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 467f8e3..47f9315 100644
--- a/README.md
+++ b/README.md
@@ -450,7 +450,7 @@ PRs welcome! The codebase is intentionally small and readable. π€
### Contributors
-
+