diff --git a/README.md b/README.md
index c1b7e46..6a3ec3e 100644
--- a/README.md
+++ b/README.md
@@ -585,6 +585,37 @@ Config file: `~/.nanobot/config.json`
| `moonshot` | LLM (Moonshot/Kimi) | [platform.moonshot.cn](https://platform.moonshot.cn) |
| `zhipu` | LLM (Zhipu GLM) | [open.bigmodel.cn](https://open.bigmodel.cn) |
| `vllm` | LLM (local, any OpenAI-compatible server) | — |
+| `openai_codex` | LLM (Codex, OAuth) | `nanobot provider login openai-codex` |
+
+
+OpenAI Codex (OAuth)
+
+Codex uses OAuth instead of API keys. Requires a ChatGPT Plus or Pro account.
+
+**1. Login:**
+```bash
+nanobot provider login openai-codex
+```
+
+**2. Set model** (merge into `~/.nanobot/config.json`):
+```json
+{
+ "agents": {
+ "defaults": {
+ "model": "openai-codex/gpt-5.1-codex"
+ }
+ }
+}
+```
+
+**3. Chat:**
+```bash
+nanobot agent -m "Hello!"
+```
+
+> Docker users: use `docker run -it` for interactive OAuth login.
+
+
Custom Provider (Any OpenAI-compatible API)
@@ -749,6 +780,7 @@ MCP tools are automatically discovered and registered on startup. The LLM can us
| `nanobot agent --logs` | Show runtime logs during chat |
| `nanobot gateway` | Start the gateway |
| `nanobot status` | Show status |
+| `nanobot provider login openai-codex` | OAuth login for providers |
| `nanobot channels login` | Link WhatsApp (scan QR) |
| `nanobot channels status` | Show channel status |
diff --git a/nanobot/channels/slack.py b/nanobot/channels/slack.py
index be95dd2..dd18e79 100644
--- a/nanobot/channels/slack.py
+++ b/nanobot/channels/slack.py
@@ -84,7 +84,7 @@ class SlackChannel(BaseChannel):
use_thread = thread_ts and channel_type != "im"
await self._web_client.chat_postMessage(
channel=msg.chat_id,
- text=msg.content or "",
+ text=self._convert_markdown(msg.content) or "",
thread_ts=thread_ts if use_thread else None,
)
except Exception as e:
@@ -203,3 +203,47 @@ class SlackChannel(BaseChannel):
if not text or not self._bot_user_id:
return text
return re.sub(rf"<@{re.escape(self._bot_user_id)}>\s*", "", text).strip()
+
+ # Markdown → Slack mrkdwn formatting rules (order matters: longest markers first)
+ _MD_TO_SLACK = (
+ (r'(?m)(^|[^\*])\*\*\*(.+?)\*\*\*([^\*]|$)', r'\1*_\2_*\3'), # ***bold italic***
+ (r'(?m)(^|[^_])___(.+?)___([^_]|$)', r'\1*_\2_*\3'), # ___bold italic___
+ (r'(?m)(^|[^\*])\*\*(.+?)\*\*([^\*]|$)', r'\1*\2*\3'), # **bold**
+ (r'(?m)(^|[^_])__(.+?)__([^_]|$)', r'\1*\2*\3'), # __bold__
+ (r'(?m)(^|[^\*])\*(.+?)\*([^\*]|$)', r'\1_\2_\3'), # *italic*
+ (r'(?m)(^|[^~])~~(.+?)~~([^~]|$)', r'\1~\2~\3'), # ~~strike~~
+ (r'(?m)(^|[^!])\[(.+?)\]\((http.+?)\)', r'\1<\3|\2>'), # [text](url)
+ (r'!\[.+?\]\((http.+?)(?:\s".*?")?\)', r'<\1>'), # 
+ )
+ _TABLE_RE = re.compile(r'(?m)^\|.*?\|$(?:\n(?:\|\:?-{3,}\:?)*?\|$)(?:\n\|.*?\|$)*')
+
+ def _convert_markdown(self, text: str) -> str:
+ """Convert standard Markdown to Slack mrkdwn format."""
+ if not text:
+ return text
+ for pattern, repl in self._MD_TO_SLACK:
+ text = re.sub(pattern, repl, text)
+ return self._TABLE_RE.sub(self._convert_table, text)
+
+ @staticmethod
+ def _convert_table(match: re.Match) -> str:
+ """Convert Markdown table to Slack quote + bullet format."""
+ lines = [l.strip() for l in match.group(0).strip().split('\n') if l.strip()]
+ if len(lines) < 2:
+ return match.group(0)
+
+ headers = [h.strip() for h in lines[0].strip('|').split('|')]
+ start = 2 if not re.search(r'[^|\-\s:]', lines[1]) else 1
+
+ result: list[str] = []
+ for line in lines[start:]:
+ cells = [c.strip() for c in line.strip('|').split('|')]
+ cells = (cells + [''] * len(headers))[:len(headers)]
+ if not any(cells):
+ continue
+ result.append(f"> *{headers[0]}*: {cells[0] or '--'}")
+ for i, cell in enumerate(cells[1:], 1):
+ if cell and i < len(headers):
+ result.append(f" \u2022 *{headers[i]}*: {cell}")
+ result.append("")
+ return '\n'.join(result).rstrip()
diff --git a/nanobot/cli/commands.py b/nanobot/cli/commands.py
index 6a9c92f..235bfdc 100644
--- a/nanobot/cli/commands.py
+++ b/nanobot/cli/commands.py
@@ -19,6 +19,7 @@ from prompt_toolkit.history import FileHistory
from prompt_toolkit.patch_stdout import patch_stdout
from nanobot import __version__, __logo__
+from nanobot.config.schema import Config
app = typer.Typer(
name="nanobot",
@@ -278,21 +279,30 @@ This file stores important information that should persist across sessions.
skills_dir.mkdir(exist_ok=True)
-def _make_provider(config):
+def _make_provider(config: Config):
"""Create LiteLLMProvider from config. Exits if no API key found."""
from nanobot.providers.litellm_provider import LiteLLMProvider
- p = config.get_provider()
+ from nanobot.providers.openai_codex_provider import OpenAICodexProvider
+
model = config.agents.defaults.model
- if not (p and p.api_key) and not model.startswith("bedrock/"):
+ provider_name = config.get_provider_name(model)
+ p = config.get_provider(model)
+
+ # OpenAI Codex (OAuth): don't route via LiteLLM; use the dedicated implementation.
+ if provider_name == "openai_codex" or model.startswith("openai-codex/"):
+ return OpenAICodexProvider(default_model=model)
+
+ if not model.startswith("bedrock/") and not (p and p.api_key):
console.print("[red]Error: No API key configured.[/red]")
console.print("Set one in ~/.nanobot/config.json under providers section")
raise typer.Exit(1)
+
return LiteLLMProvider(
api_key=p.api_key if p else None,
- api_base=config.get_api_base(),
+ api_base=config.get_api_base(model),
default_model=model,
extra_headers=p.extra_headers if p else None,
- provider_name=config.get_provider_name(),
+ provider_name=provider_name,
)
@@ -874,5 +884,52 @@ def status():
console.print(f"{spec.label}: {'[green]✓[/green]' if has_key else '[dim]not set[/dim]'}")
+# ============================================================================
+# OAuth Login
+# ============================================================================
+
+provider_app = typer.Typer(help="Manage providers")
+app.add_typer(provider_app, name="provider")
+
+
+@provider_app.command("login")
+def provider_login(
+ provider: str = typer.Argument(..., help="OAuth provider to authenticate with (e.g., 'openai-codex')"),
+):
+ """Authenticate with an OAuth provider."""
+ console.print(f"{__logo__} OAuth Login - {provider}\n")
+
+ if provider == "openai-codex":
+ try:
+ from oauth_cli_kit import get_token, login_oauth_interactive
+ token = None
+ try:
+ token = get_token()
+ except Exception:
+ token = None
+ if not (token and token.access):
+ console.print("[cyan]No valid token found. Starting interactive OAuth login...[/cyan]")
+ console.print("A browser window may open for you to authenticate.\n")
+ token = login_oauth_interactive(
+ print_fn=lambda s: console.print(s),
+ prompt_fn=lambda s: typer.prompt(s),
+ )
+ if not (token and token.access):
+ console.print("[red]✗ Authentication failed[/red]")
+ raise typer.Exit(1)
+ console.print("[green]✓ Successfully authenticated with OpenAI Codex![/green]")
+ console.print(f"[dim]Account ID: {token.account_id}[/dim]")
+ except ImportError:
+ console.print("[red]oauth_cli_kit not installed. Run: pip install oauth-cli-kit[/red]")
+ raise typer.Exit(1)
+ except Exception as e:
+ console.print(f"[red]Authentication error: {e}[/red]")
+ raise typer.Exit(1)
+ else:
+ console.print(f"[red]Unknown OAuth provider: {provider}[/red]")
+ console.print("[yellow]Supported providers: openai-codex[/yellow]")
+ raise typer.Exit(1)
+
+
if __name__ == "__main__":
app()
diff --git a/nanobot/config/schema.py b/nanobot/config/schema.py
index 0934aac..15b6bb2 100644
--- a/nanobot/config/schema.py
+++ b/nanobot/config/schema.py
@@ -192,6 +192,7 @@ class ProvidersConfig(BaseModel):
moonshot: ProviderConfig = Field(default_factory=ProviderConfig)
minimax: ProviderConfig = Field(default_factory=ProviderConfig)
aihubmix: ProviderConfig = Field(default_factory=ProviderConfig) # AiHubMix API gateway
+ openai_codex: ProviderConfig = Field(default_factory=ProviderConfig) # OpenAI Codex (OAuth)
class GatewayConfig(BaseModel):
@@ -253,11 +254,15 @@ class Config(BaseSettings):
# Match by keyword (order follows PROVIDERS registry)
for spec in PROVIDERS:
p = getattr(self.providers, spec.name, None)
- if p and any(kw in model_lower for kw in spec.keywords) and p.api_key:
- return p, spec.name
+ if p and any(kw in model_lower for kw in spec.keywords):
+ if spec.is_oauth or p.api_key:
+ return p, spec.name
# Fallback: gateways first, then others (follows registry order)
+ # OAuth providers are NOT valid fallbacks — they require explicit model selection
for spec in PROVIDERS:
+ if spec.is_oauth:
+ continue
p = getattr(self.providers, spec.name, None)
if p and p.api_key:
return p, spec.name
diff --git a/nanobot/providers/__init__.py b/nanobot/providers/__init__.py
index ceff8fa..b2bb2b9 100644
--- a/nanobot/providers/__init__.py
+++ b/nanobot/providers/__init__.py
@@ -2,5 +2,6 @@
from nanobot.providers.base import LLMProvider, LLMResponse
from nanobot.providers.litellm_provider import LiteLLMProvider
+from nanobot.providers.openai_codex_provider import OpenAICodexProvider
-__all__ = ["LLMProvider", "LLMResponse", "LiteLLMProvider"]
+__all__ = ["LLMProvider", "LLMResponse", "LiteLLMProvider", "OpenAICodexProvider"]
diff --git a/nanobot/providers/litellm_provider.py b/nanobot/providers/litellm_provider.py
index ed4cf49..8cc4e35 100644
--- a/nanobot/providers/litellm_provider.py
+++ b/nanobot/providers/litellm_provider.py
@@ -55,6 +55,9 @@ class LiteLLMProvider(LLMProvider):
spec = self._gateway or find_by_model(model)
if not spec:
return
+ if not spec.env_key:
+ # OAuth/provider-only specs (for example: openai_codex)
+ return
# Gateway/local overrides existing env; standard provider doesn't
if self._gateway:
diff --git a/nanobot/providers/openai_codex_provider.py b/nanobot/providers/openai_codex_provider.py
new file mode 100644
index 0000000..5067438
--- /dev/null
+++ b/nanobot/providers/openai_codex_provider.py
@@ -0,0 +1,312 @@
+"""OpenAI Codex Responses Provider."""
+
+from __future__ import annotations
+
+import asyncio
+import hashlib
+import json
+from typing import Any, AsyncGenerator
+
+import httpx
+from loguru import logger
+
+from oauth_cli_kit import get_token as get_codex_token
+from nanobot.providers.base import LLMProvider, LLMResponse, ToolCallRequest
+
+DEFAULT_CODEX_URL = "https://chatgpt.com/backend-api/codex/responses"
+DEFAULT_ORIGINATOR = "nanobot"
+
+
+class OpenAICodexProvider(LLMProvider):
+ """Use Codex OAuth to call the Responses API."""
+
+ def __init__(self, default_model: str = "openai-codex/gpt-5.1-codex"):
+ super().__init__(api_key=None, api_base=None)
+ self.default_model = default_model
+
+ async def chat(
+ self,
+ messages: list[dict[str, Any]],
+ tools: list[dict[str, Any]] | None = None,
+ model: str | None = None,
+ max_tokens: int = 4096,
+ temperature: float = 0.7,
+ ) -> LLMResponse:
+ model = model or self.default_model
+ system_prompt, input_items = _convert_messages(messages)
+
+ token = await asyncio.to_thread(get_codex_token)
+ headers = _build_headers(token.account_id, token.access)
+
+ body: dict[str, Any] = {
+ "model": _strip_model_prefix(model),
+ "store": False,
+ "stream": True,
+ "instructions": system_prompt,
+ "input": input_items,
+ "text": {"verbosity": "medium"},
+ "include": ["reasoning.encrypted_content"],
+ "prompt_cache_key": _prompt_cache_key(messages),
+ "tool_choice": "auto",
+ "parallel_tool_calls": True,
+ }
+
+ if tools:
+ body["tools"] = _convert_tools(tools)
+
+ url = DEFAULT_CODEX_URL
+
+ try:
+ try:
+ content, tool_calls, finish_reason = await _request_codex(url, headers, body, verify=True)
+ except Exception as e:
+ if "CERTIFICATE_VERIFY_FAILED" not in str(e):
+ raise
+ logger.warning("SSL certificate verification failed for Codex API; retrying with verify=False")
+ content, tool_calls, finish_reason = await _request_codex(url, headers, body, verify=False)
+ return LLMResponse(
+ content=content,
+ tool_calls=tool_calls,
+ finish_reason=finish_reason,
+ )
+ except Exception as e:
+ return LLMResponse(
+ content=f"Error calling Codex: {str(e)}",
+ finish_reason="error",
+ )
+
+ def get_default_model(self) -> str:
+ return self.default_model
+
+
+def _strip_model_prefix(model: str) -> str:
+ if model.startswith("openai-codex/"):
+ return model.split("/", 1)[1]
+ return model
+
+
+def _build_headers(account_id: str, token: str) -> dict[str, str]:
+ return {
+ "Authorization": f"Bearer {token}",
+ "chatgpt-account-id": account_id,
+ "OpenAI-Beta": "responses=experimental",
+ "originator": DEFAULT_ORIGINATOR,
+ "User-Agent": "nanobot (python)",
+ "accept": "text/event-stream",
+ "content-type": "application/json",
+ }
+
+
+async def _request_codex(
+ url: str,
+ headers: dict[str, str],
+ body: dict[str, Any],
+ verify: bool,
+) -> tuple[str, list[ToolCallRequest], str]:
+ async with httpx.AsyncClient(timeout=60.0, verify=verify) as client:
+ async with client.stream("POST", url, headers=headers, json=body) as response:
+ if response.status_code != 200:
+ text = await response.aread()
+ raise RuntimeError(_friendly_error(response.status_code, text.decode("utf-8", "ignore")))
+ return await _consume_sse(response)
+
+
+def _convert_tools(tools: list[dict[str, Any]]) -> list[dict[str, Any]]:
+ """Convert OpenAI function-calling schema to Codex flat format."""
+ converted: list[dict[str, Any]] = []
+ for tool in tools:
+ fn = (tool.get("function") or {}) if tool.get("type") == "function" else tool
+ name = fn.get("name")
+ if not name:
+ continue
+ params = fn.get("parameters") or {}
+ converted.append({
+ "type": "function",
+ "name": name,
+ "description": fn.get("description") or "",
+ "parameters": params if isinstance(params, dict) else {},
+ })
+ return converted
+
+
+def _convert_messages(messages: list[dict[str, Any]]) -> tuple[str, list[dict[str, Any]]]:
+ system_prompt = ""
+ input_items: list[dict[str, Any]] = []
+
+ for idx, msg in enumerate(messages):
+ role = msg.get("role")
+ content = msg.get("content")
+
+ if role == "system":
+ system_prompt = content if isinstance(content, str) else ""
+ continue
+
+ if role == "user":
+ input_items.append(_convert_user_message(content))
+ continue
+
+ if role == "assistant":
+ # Handle text first.
+ if isinstance(content, str) and content:
+ input_items.append(
+ {
+ "type": "message",
+ "role": "assistant",
+ "content": [{"type": "output_text", "text": content}],
+ "status": "completed",
+ "id": f"msg_{idx}",
+ }
+ )
+ # Then handle tool calls.
+ for tool_call in msg.get("tool_calls", []) or []:
+ fn = tool_call.get("function") or {}
+ call_id, item_id = _split_tool_call_id(tool_call.get("id"))
+ call_id = call_id or f"call_{idx}"
+ item_id = item_id or f"fc_{idx}"
+ input_items.append(
+ {
+ "type": "function_call",
+ "id": item_id,
+ "call_id": call_id,
+ "name": fn.get("name"),
+ "arguments": fn.get("arguments") or "{}",
+ }
+ )
+ continue
+
+ if role == "tool":
+ call_id, _ = _split_tool_call_id(msg.get("tool_call_id"))
+ output_text = content if isinstance(content, str) else json.dumps(content)
+ input_items.append(
+ {
+ "type": "function_call_output",
+ "call_id": call_id,
+ "output": output_text,
+ }
+ )
+ continue
+
+ return system_prompt, input_items
+
+
+def _convert_user_message(content: Any) -> dict[str, Any]:
+ if isinstance(content, str):
+ return {"role": "user", "content": [{"type": "input_text", "text": content}]}
+ if isinstance(content, list):
+ converted: list[dict[str, Any]] = []
+ for item in content:
+ if not isinstance(item, dict):
+ continue
+ if item.get("type") == "text":
+ converted.append({"type": "input_text", "text": item.get("text", "")})
+ elif item.get("type") == "image_url":
+ url = (item.get("image_url") or {}).get("url")
+ if url:
+ converted.append({"type": "input_image", "image_url": url, "detail": "auto"})
+ if converted:
+ return {"role": "user", "content": converted}
+ return {"role": "user", "content": [{"type": "input_text", "text": ""}]}
+
+
+def _split_tool_call_id(tool_call_id: Any) -> tuple[str, str | None]:
+ if isinstance(tool_call_id, str) and tool_call_id:
+ if "|" in tool_call_id:
+ call_id, item_id = tool_call_id.split("|", 1)
+ return call_id, item_id or None
+ return tool_call_id, None
+ return "call_0", None
+
+
+def _prompt_cache_key(messages: list[dict[str, Any]]) -> str:
+ raw = json.dumps(messages, ensure_ascii=True, sort_keys=True)
+ return hashlib.sha256(raw.encode("utf-8")).hexdigest()
+
+
+async def _iter_sse(response: httpx.Response) -> AsyncGenerator[dict[str, Any], None]:
+ buffer: list[str] = []
+ async for line in response.aiter_lines():
+ if line == "":
+ if buffer:
+ data_lines = [l[5:].strip() for l in buffer if l.startswith("data:")]
+ buffer = []
+ if not data_lines:
+ continue
+ data = "\n".join(data_lines).strip()
+ if not data or data == "[DONE]":
+ continue
+ try:
+ yield json.loads(data)
+ except Exception:
+ continue
+ continue
+ buffer.append(line)
+
+
+async def _consume_sse(response: httpx.Response) -> tuple[str, list[ToolCallRequest], str]:
+ content = ""
+ tool_calls: list[ToolCallRequest] = []
+ tool_call_buffers: dict[str, dict[str, Any]] = {}
+ finish_reason = "stop"
+
+ async for event in _iter_sse(response):
+ event_type = event.get("type")
+ if event_type == "response.output_item.added":
+ item = event.get("item") or {}
+ if item.get("type") == "function_call":
+ call_id = item.get("call_id")
+ if not call_id:
+ continue
+ tool_call_buffers[call_id] = {
+ "id": item.get("id") or "fc_0",
+ "name": item.get("name"),
+ "arguments": item.get("arguments") or "",
+ }
+ elif event_type == "response.output_text.delta":
+ content += event.get("delta") or ""
+ elif event_type == "response.function_call_arguments.delta":
+ call_id = event.get("call_id")
+ if call_id and call_id in tool_call_buffers:
+ tool_call_buffers[call_id]["arguments"] += event.get("delta") or ""
+ elif event_type == "response.function_call_arguments.done":
+ call_id = event.get("call_id")
+ if call_id and call_id in tool_call_buffers:
+ tool_call_buffers[call_id]["arguments"] = event.get("arguments") or ""
+ elif event_type == "response.output_item.done":
+ item = event.get("item") or {}
+ if item.get("type") == "function_call":
+ call_id = item.get("call_id")
+ if not call_id:
+ continue
+ buf = tool_call_buffers.get(call_id) or {}
+ args_raw = buf.get("arguments") or item.get("arguments") or "{}"
+ try:
+ args = json.loads(args_raw)
+ except Exception:
+ args = {"raw": args_raw}
+ tool_calls.append(
+ ToolCallRequest(
+ id=f"{call_id}|{buf.get('id') or item.get('id') or 'fc_0'}",
+ name=buf.get("name") or item.get("name"),
+ arguments=args,
+ )
+ )
+ elif event_type == "response.completed":
+ status = (event.get("response") or {}).get("status")
+ finish_reason = _map_finish_reason(status)
+ elif event_type in {"error", "response.failed"}:
+ raise RuntimeError("Codex response failed")
+
+ return content, tool_calls, finish_reason
+
+
+_FINISH_REASON_MAP = {"completed": "stop", "incomplete": "length", "failed": "error", "cancelled": "error"}
+
+
+def _map_finish_reason(status: str | None) -> str:
+ return _FINISH_REASON_MAP.get(status or "completed", "stop")
+
+
+def _friendly_error(status_code: int, raw: str) -> str:
+ if status_code == 429:
+ return "ChatGPT usage quota exceeded or rate limit triggered. Please try again later."
+ return f"HTTP {status_code}: {raw}"
diff --git a/nanobot/providers/registry.py b/nanobot/providers/registry.py
index b9071a0..59af5e1 100644
--- a/nanobot/providers/registry.py
+++ b/nanobot/providers/registry.py
@@ -51,6 +51,9 @@ class ProviderSpec:
# per-model param overrides, e.g. (("kimi-k2.5", {"temperature": 1.0}),)
model_overrides: tuple[tuple[str, dict[str, Any]], ...] = ()
+ # OAuth-based providers (e.g., OpenAI Codex) don't use API keys
+ is_oauth: bool = False # if True, uses OAuth flow instead of API key
+
@property
def label(self) -> str:
return self.display_name or self.name.title()
@@ -155,6 +158,25 @@ PROVIDERS: tuple[ProviderSpec, ...] = (
model_overrides=(),
),
+ # OpenAI Codex: uses OAuth, not API key.
+ ProviderSpec(
+ name="openai_codex",
+ keywords=("openai-codex", "codex"),
+ env_key="", # OAuth-based, no API key
+ display_name="OpenAI Codex",
+ litellm_prefix="", # Not routed through LiteLLM
+ skip_prefixes=(),
+ env_extras=(),
+ is_gateway=False,
+ is_local=False,
+ detect_by_key_prefix="",
+ detect_by_base_keyword="codex",
+ default_api_base="https://chatgpt.com/backend-api",
+ strip_model_prefix=False,
+ model_overrides=(),
+ is_oauth=True, # OAuth-based authentication
+ ),
+
# DeepSeek: needs "deepseek/" prefix for LiteLLM routing.
ProviderSpec(
name="deepseek",
diff --git a/pyproject.toml b/pyproject.toml
index 147e799..f5fd60c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -23,7 +23,8 @@ dependencies = [
"pydantic-settings>=2.0.0",
"websockets>=12.0",
"websocket-client>=1.6.0",
- "httpx[socks]>=0.25.0",
+ "httpx>=0.25.0",
+ "oauth-cli-kit>=0.1.1",
"loguru>=0.7.0",
"readability-lxml>=0.8.0",
"rich>=13.0.0",