Merge branch 'main' into pr-625

This commit is contained in:
Re-bin 2026-02-15 05:27:16 +00:00
commit fd480bb6f5
2 changed files with 57 additions and 0 deletions

View File

@ -39,6 +39,53 @@ MSG_TYPE_MAP = {
}
def _extract_post_text(content_json: dict) -> str:
"""Extract plain text from Feishu post (rich text) message content.
Supports two formats:
1. Direct format: {"title": "...", "content": [...]}
2. Localized format: {"zh_cn": {"title": "...", "content": [...]}}
"""
def extract_from_lang(lang_content: dict) -> str | None:
if not isinstance(lang_content, dict):
return None
title = lang_content.get("title", "")
content_blocks = lang_content.get("content", [])
if not isinstance(content_blocks, list):
return None
text_parts = []
if title:
text_parts.append(title)
for block in content_blocks:
if not isinstance(block, list):
continue
for element in block:
if isinstance(element, dict):
tag = element.get("tag")
if tag == "text":
text_parts.append(element.get("text", ""))
elif tag == "a":
text_parts.append(element.get("text", ""))
elif tag == "at":
text_parts.append(f"@{element.get('user_name', 'user')}")
return " ".join(text_parts).strip() if text_parts else None
# Try direct format first
if "content" in content_json:
result = extract_from_lang(content_json)
if result:
return result
# Try localized format
for lang_key in ("zh_cn", "en_us", "ja_jp"):
lang_content = content_json.get(lang_key)
result = extract_from_lang(lang_content)
if result:
return result
return ""
class FeishuChannel(BaseChannel):
"""
Feishu/Lark channel using WebSocket long connection.
@ -326,6 +373,12 @@ class FeishuChannel(BaseChannel):
content = json.loads(message.content).get("text", "")
except json.JSONDecodeError:
content = message.content or ""
elif msg_type == "post":
try:
content_json = json.loads(message.content)
content = _extract_post_text(content_json)
except (json.JSONDecodeError, TypeError):
content = message.content or ""
else:
content = MSG_TYPE_MAP.get(msg_type, f"[{msg_type}]")

View File

@ -122,6 +122,10 @@ class LiteLLMProvider(LLMProvider):
"""
model = self._resolve_model(model or self.default_model)
# Clamp max_tokens to at least 1 — negative or zero values cause
# LiteLLM to reject the request with "max_tokens must be at least 1".
max_tokens = max(1, max_tokens)
kwargs: dict[str, Any] = {
"model": model,
"messages": messages,