fix: preserve reasoning_content in conversation history for thinking models

This commit is contained in:
Re-bin 2026-02-08 18:37:41 +00:00
parent b4217b2690
commit 2931694eb8
5 changed files with 16 additions and 4 deletions

View File

@ -16,7 +16,7 @@
⚡️ Delivers core agent functionality in just **~4,000** lines of code — **99% smaller** than Clawdbot's 430k+ lines. ⚡️ Delivers core agent functionality in just **~4,000** lines of code — **99% smaller** than Clawdbot's 430k+ lines.
📏 Real-time line count: **3,429 lines** (run `bash core_agent_lines.sh` to verify anytime) 📏 Real-time line count: **3,437 lines** (run `bash core_agent_lines.sh` to verify anytime)
## 📢 News ## 📢 News

View File

@ -207,7 +207,8 @@ When remembering something, write to {workspace_path}/memory/MEMORY.md"""
self, self,
messages: list[dict[str, Any]], messages: list[dict[str, Any]],
content: str | None, content: str | None,
tool_calls: list[dict[str, Any]] | None = None tool_calls: list[dict[str, Any]] | None = None,
reasoning_content: str | None = None,
) -> list[dict[str, Any]]: ) -> list[dict[str, Any]]:
""" """
Add an assistant message to the message list. Add an assistant message to the message list.
@ -216,6 +217,7 @@ When remembering something, write to {workspace_path}/memory/MEMORY.md"""
messages: Current message list. messages: Current message list.
content: Message content. content: Message content.
tool_calls: Optional tool calls. tool_calls: Optional tool calls.
reasoning_content: Thinking output (Kimi, DeepSeek-R1, etc.).
Returns: Returns:
Updated message list. Updated message list.
@ -225,5 +227,9 @@ When remembering something, write to {workspace_path}/memory/MEMORY.md"""
if tool_calls: if tool_calls:
msg["tool_calls"] = tool_calls msg["tool_calls"] = tool_calls
# Thinking models reject history without this
if reasoning_content:
msg["reasoning_content"] = reasoning_content
messages.append(msg) messages.append(msg)
return messages return messages

View File

@ -213,7 +213,8 @@ class AgentLoop:
for tc in response.tool_calls for tc in response.tool_calls
] ]
messages = self.context.add_assistant_message( messages = self.context.add_assistant_message(
messages, response.content, tool_call_dicts messages, response.content, tool_call_dicts,
reasoning_content=response.reasoning_content,
) )
# Execute tools # Execute tools
@ -317,7 +318,8 @@ class AgentLoop:
for tc in response.tool_calls for tc in response.tool_calls
] ]
messages = self.context.add_assistant_message( messages = self.context.add_assistant_message(
messages, response.content, tool_call_dicts messages, response.content, tool_call_dicts,
reasoning_content=response.reasoning_content,
) )
for tool_call in response.tool_calls: for tool_call in response.tool_calls:

View File

@ -20,6 +20,7 @@ class LLMResponse:
tool_calls: list[ToolCallRequest] = field(default_factory=list) tool_calls: list[ToolCallRequest] = field(default_factory=list)
finish_reason: str = "stop" finish_reason: str = "stop"
usage: dict[str, int] = field(default_factory=dict) usage: dict[str, int] = field(default_factory=dict)
reasoning_content: str | None = None # Kimi, DeepSeek-R1 etc.
@property @property
def has_tool_calls(self) -> bool: def has_tool_calls(self) -> bool:

View File

@ -183,11 +183,14 @@ class LiteLLMProvider(LLMProvider):
"total_tokens": response.usage.total_tokens, "total_tokens": response.usage.total_tokens,
} }
reasoning_content = getattr(message, "reasoning_content", None)
return LLMResponse( return LLMResponse(
content=message.content, content=message.content,
tool_calls=tool_calls, tool_calls=tool_calls,
finish_reason=choice.finish_reason or "stop", finish_reason=choice.finish_reason or "stop",
usage=usage, usage=usage,
reasoning_content=reasoning_content,
) )
def get_default_model(self) -> str: def get_default_model(self) -> str: