diff --git a/README.md b/README.md
index f6362bb..6a3ec3e 100644
--- a/README.md
+++ b/README.md
@@ -585,6 +585,37 @@ Config file: `~/.nanobot/config.json`
| `moonshot` | LLM (Moonshot/Kimi) | [platform.moonshot.cn](https://platform.moonshot.cn) |
| `zhipu` | LLM (Zhipu GLM) | [open.bigmodel.cn](https://open.bigmodel.cn) |
| `vllm` | LLM (local, any OpenAI-compatible server) | — |
+| `openai_codex` | LLM (Codex, OAuth) | `nanobot provider login openai-codex` |
+
+
+OpenAI Codex (OAuth)
+
+Codex uses OAuth instead of API keys. Requires a ChatGPT Plus or Pro account.
+
+**1. Login:**
+```bash
+nanobot provider login openai-codex
+```
+
+**2. Set model** (merge into `~/.nanobot/config.json`):
+```json
+{
+ "agents": {
+ "defaults": {
+ "model": "openai-codex/gpt-5.1-codex"
+ }
+ }
+}
+```
+
+**3. Chat:**
+```bash
+nanobot agent -m "Hello!"
+```
+
+> Docker users: use `docker run -it` for interactive OAuth login.
+
+
Custom Provider (Any OpenAI-compatible API)
diff --git a/nanobot/cli/commands.py b/nanobot/cli/commands.py
index b2d3f5a..235bfdc 100644
--- a/nanobot/cli/commands.py
+++ b/nanobot/cli/commands.py
@@ -290,10 +290,7 @@ def _make_provider(config: Config):
# OpenAI Codex (OAuth): don't route via LiteLLM; use the dedicated implementation.
if provider_name == "openai_codex" or model.startswith("openai-codex/"):
- return OpenAICodexProvider(
- default_model=model,
- api_base=p.api_base if p else None,
- )
+ return OpenAICodexProvider(default_model=model)
if not model.startswith("bedrock/") and not (p and p.api_key):
console.print("[red]Error: No API key configured.[/red]")
diff --git a/nanobot/config/schema.py b/nanobot/config/schema.py
index 9d648be..15b6bb2 100644
--- a/nanobot/config/schema.py
+++ b/nanobot/config/schema.py
@@ -192,7 +192,7 @@ class ProvidersConfig(BaseModel):
moonshot: ProviderConfig = Field(default_factory=ProviderConfig)
minimax: ProviderConfig = Field(default_factory=ProviderConfig)
aihubmix: ProviderConfig = Field(default_factory=ProviderConfig) # AiHubMix API gateway
- openai_codex: ProviderConfig = Field(default_factory=ProviderConfig) # OpenAI Codex (OAuth) # AiHubMix API gateway
+ openai_codex: ProviderConfig = Field(default_factory=ProviderConfig) # OpenAI Codex (OAuth)
class GatewayConfig(BaseModel):
@@ -252,19 +252,19 @@ class Config(BaseSettings):
model_lower = (model or self.agents.defaults.model).lower()
# Match by keyword (order follows PROVIDERS registry)
- # Note: OAuth providers don't require api_key, so we check is_oauth flag
for spec in PROVIDERS:
p = getattr(self.providers, spec.name, None)
if p and any(kw in model_lower for kw in spec.keywords):
- # OAuth providers don't need api_key
if spec.is_oauth or p.api_key:
return p, spec.name
# Fallback: gateways first, then others (follows registry order)
- # OAuth providers are also valid fallbacks
+ # OAuth providers are NOT valid fallbacks — they require explicit model selection
for spec in PROVIDERS:
+ if spec.is_oauth:
+ continue
p = getattr(self.providers, spec.name, None)
- if p and (spec.is_oauth or p.api_key):
+ if p and p.api_key:
return p, spec.name
return None, None
diff --git a/nanobot/providers/openai_codex_provider.py b/nanobot/providers/openai_codex_provider.py
index f6d56aa..5067438 100644
--- a/nanobot/providers/openai_codex_provider.py
+++ b/nanobot/providers/openai_codex_provider.py
@@ -8,6 +8,7 @@ import json
from typing import Any, AsyncGenerator
import httpx
+from loguru import logger
from oauth_cli_kit import get_token as get_codex_token
from nanobot.providers.base import LLMProvider, LLMResponse, ToolCallRequest
@@ -59,9 +60,9 @@ class OpenAICodexProvider(LLMProvider):
try:
content, tool_calls, finish_reason = await _request_codex(url, headers, body, verify=True)
except Exception as e:
- # Certificate verification failed, downgrade to disable verification (security risk)
if "CERTIFICATE_VERIFY_FAILED" not in str(e):
raise
+ logger.warning("SSL certificate verification failed for Codex API; retrying with verify=False")
content, tool_calls, finish_reason = await _request_codex(url, headers, body, verify=False)
return LLMResponse(
content=content,
@@ -77,6 +78,7 @@ class OpenAICodexProvider(LLMProvider):
def get_default_model(self) -> str:
return self.default_model
+
def _strip_model_prefix(model: str) -> str:
if model.startswith("openai-codex/"):
return model.split("/", 1)[1]
@@ -94,6 +96,7 @@ def _build_headers(account_id: str, token: str) -> dict[str, str]:
"content-type": "application/json",
}
+
async def _request_codex(
url: str,
headers: dict[str, str],
@@ -107,36 +110,25 @@ async def _request_codex(
raise RuntimeError(_friendly_error(response.status_code, text.decode("utf-8", "ignore")))
return await _consume_sse(response)
+
def _convert_tools(tools: list[dict[str, Any]]) -> list[dict[str, Any]]:
- # Nanobot tool definitions already use the OpenAI function schema.
+ """Convert OpenAI function-calling schema to Codex flat format."""
converted: list[dict[str, Any]] = []
for tool in tools:
- fn = tool.get("function") if isinstance(tool, dict) and tool.get("type") == "function" else None
- if fn and isinstance(fn, dict):
- name = fn.get("name")
- desc = fn.get("description")
- params = fn.get("parameters")
- else:
- name = tool.get("name")
- desc = tool.get("description")
- params = tool.get("parameters")
- if not isinstance(name, str) or not name:
- # Skip invalid tools to avoid Codex rejection.
+ fn = (tool.get("function") or {}) if tool.get("type") == "function" else tool
+ name = fn.get("name")
+ if not name:
continue
- params = params or {}
- if not isinstance(params, dict):
- # Parameters must be a JSON Schema object.
- params = {}
- converted.append(
- {
- "type": "function",
- "name": name,
- "description": desc or "",
- "parameters": params,
- }
- )
+ params = fn.get("parameters") or {}
+ converted.append({
+ "type": "function",
+ "name": name,
+ "description": fn.get("description") or "",
+ "parameters": params if isinstance(params, dict) else {},
+ })
return converted
+
def _convert_messages(messages: list[dict[str, Any]]) -> tuple[str, list[dict[str, Any]]]:
system_prompt = ""
input_items: list[dict[str, Any]] = []
@@ -183,7 +175,7 @@ def _convert_messages(messages: list[dict[str, Any]]) -> tuple[str, list[dict[st
continue
if role == "tool":
- call_id = _extract_call_id(msg.get("tool_call_id"))
+ call_id, _ = _split_tool_call_id(msg.get("tool_call_id"))
output_text = content if isinstance(content, str) else json.dumps(content)
input_items.append(
{
@@ -196,6 +188,7 @@ def _convert_messages(messages: list[dict[str, Any]]) -> tuple[str, list[dict[st
return system_prompt, input_items
+
def _convert_user_message(content: Any) -> dict[str, Any]:
if isinstance(content, str):
return {"role": "user", "content": [{"type": "input_text", "text": content}]}
@@ -215,12 +208,6 @@ def _convert_user_message(content: Any) -> dict[str, Any]:
return {"role": "user", "content": [{"type": "input_text", "text": ""}]}
-def _extract_call_id(tool_call_id: Any) -> str:
- if isinstance(tool_call_id, str) and tool_call_id:
- return tool_call_id.split("|", 1)[0]
- return "call_0"
-
-
def _split_tool_call_id(tool_call_id: Any) -> tuple[str, str | None]:
if isinstance(tool_call_id, str) and tool_call_id:
if "|" in tool_call_id:
@@ -229,10 +216,12 @@ def _split_tool_call_id(tool_call_id: Any) -> tuple[str, str | None]:
return tool_call_id, None
return "call_0", None
+
def _prompt_cache_key(messages: list[dict[str, Any]]) -> str:
raw = json.dumps(messages, ensure_ascii=True, sort_keys=True)
return hashlib.sha256(raw.encode("utf-8")).hexdigest()
+
async def _iter_sse(response: httpx.Response) -> AsyncGenerator[dict[str, Any], None]:
buffer: list[str] = []
async for line in response.aiter_lines():
@@ -252,6 +241,7 @@ async def _iter_sse(response: httpx.Response) -> AsyncGenerator[dict[str, Any],
continue
buffer.append(line)
+
async def _consume_sse(response: httpx.Response) -> tuple[str, list[ToolCallRequest], str]:
content = ""
tool_calls: list[ToolCallRequest] = []
@@ -308,16 +298,13 @@ async def _consume_sse(response: httpx.Response) -> tuple[str, list[ToolCallRequ
return content, tool_calls, finish_reason
+
+_FINISH_REASON_MAP = {"completed": "stop", "incomplete": "length", "failed": "error", "cancelled": "error"}
+
+
def _map_finish_reason(status: str | None) -> str:
- if not status:
- return "stop"
- if status == "completed":
- return "stop"
- if status == "incomplete":
- return "length"
- if status in {"failed", "cancelled"}:
- return "error"
- return "stop"
+ return _FINISH_REASON_MAP.get(status or "completed", "stop")
+
def _friendly_error(status_code: int, raw: str) -> str:
if status_code == 429:
diff --git a/nanobot/providers/registry.py b/nanobot/providers/registry.py
index 1b4a776..59af5e1 100644
--- a/nanobot/providers/registry.py
+++ b/nanobot/providers/registry.py
@@ -53,7 +53,6 @@ class ProviderSpec:
# OAuth-based providers (e.g., OpenAI Codex) don't use API keys
is_oauth: bool = False # if True, uses OAuth flow instead of API key
- oauth_provider: str = "" # OAuth provider name for token retrieval
@property
def label(self) -> str:
@@ -176,7 +175,6 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
strip_model_prefix=False,
model_overrides=(),
is_oauth=True, # OAuth-based authentication
- oauth_provider="openai-codex", # OAuth provider identifier
),
# DeepSeek: needs "deepseek/" prefix for LiteLLM routing.