Merge branch 'main' of https://github.com/HKUDS/nanobot
This commit is contained in:
commit
74bec26698
@ -574,7 +574,7 @@ Config file: `~/.nanobot/config.json`
|
|||||||
|
|
||||||
| Provider | Purpose | Get API Key |
|
| Provider | Purpose | Get API Key |
|
||||||
|----------|---------|-------------|
|
|----------|---------|-------------|
|
||||||
| `custom` | Any OpenAI-compatible endpoint | — |
|
| `custom` | Any OpenAI-compatible endpoint (direct, no LiteLLM) | — |
|
||||||
| `openrouter` | LLM (recommended, access to all models) | [openrouter.ai](https://openrouter.ai) |
|
| `openrouter` | LLM (recommended, access to all models) | [openrouter.ai](https://openrouter.ai) |
|
||||||
| `anthropic` | LLM (Claude direct) | [console.anthropic.com](https://console.anthropic.com) |
|
| `anthropic` | LLM (Claude direct) | [console.anthropic.com](https://console.anthropic.com) |
|
||||||
| `openai` | LLM (GPT direct) | [platform.openai.com](https://platform.openai.com) |
|
| `openai` | LLM (GPT direct) | [platform.openai.com](https://platform.openai.com) |
|
||||||
@ -623,7 +623,7 @@ nanobot agent -m "Hello!"
|
|||||||
<details>
|
<details>
|
||||||
<summary><b>Custom Provider (Any OpenAI-compatible API)</b></summary>
|
<summary><b>Custom Provider (Any OpenAI-compatible API)</b></summary>
|
||||||
|
|
||||||
If your provider is not listed above but exposes an **OpenAI-compatible API** (e.g. Together AI, Fireworks, Azure OpenAI, self-hosted endpoints), use the `custom` provider:
|
Connects directly to any OpenAI-compatible endpoint — LM Studio, llama.cpp, Together AI, Fireworks, Azure OpenAI, or any self-hosted server. Bypasses LiteLLM; model name is passed as-is.
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
@ -641,7 +641,7 @@ If your provider is not listed above but exposes an **OpenAI-compatible API** (e
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
> The `custom` provider routes through LiteLLM's OpenAI-compatible path. It works with any endpoint that follows the OpenAI chat completions API format. The model name is passed directly to the endpoint without any prefix.
|
> For local servers that don't require a key, set `apiKey` to any non-empty string (e.g. `"no-key"`).
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
|
|||||||
@ -280,18 +280,27 @@ This file stores important information that should persist across sessions.
|
|||||||
|
|
||||||
|
|
||||||
def _make_provider(config: Config):
|
def _make_provider(config: Config):
|
||||||
"""Create LiteLLMProvider from config. Exits if no API key found."""
|
"""Create the appropriate LLM provider from config."""
|
||||||
from nanobot.providers.litellm_provider import LiteLLMProvider
|
from nanobot.providers.litellm_provider import LiteLLMProvider
|
||||||
from nanobot.providers.openai_codex_provider import OpenAICodexProvider
|
from nanobot.providers.openai_codex_provider import OpenAICodexProvider
|
||||||
|
from nanobot.providers.custom_provider import CustomProvider
|
||||||
|
|
||||||
model = config.agents.defaults.model
|
model = config.agents.defaults.model
|
||||||
provider_name = config.get_provider_name(model)
|
provider_name = config.get_provider_name(model)
|
||||||
p = config.get_provider(model)
|
p = config.get_provider(model)
|
||||||
|
|
||||||
# OpenAI Codex (OAuth): don't route via LiteLLM; use the dedicated implementation.
|
# OpenAI Codex (OAuth)
|
||||||
if provider_name == "openai_codex" or model.startswith("openai-codex/"):
|
if provider_name == "openai_codex" or model.startswith("openai-codex/"):
|
||||||
return OpenAICodexProvider(default_model=model)
|
return OpenAICodexProvider(default_model=model)
|
||||||
|
|
||||||
|
# Custom: direct OpenAI-compatible endpoint, bypasses LiteLLM
|
||||||
|
if provider_name == "custom":
|
||||||
|
return CustomProvider(
|
||||||
|
api_key=p.api_key if p else "no-key",
|
||||||
|
api_base=config.get_api_base(model) or "http://localhost:8000/v1",
|
||||||
|
default_model=model,
|
||||||
|
)
|
||||||
|
|
||||||
from nanobot.providers.registry import find_by_name
|
from nanobot.providers.registry import find_by_name
|
||||||
spec = find_by_name(provider_name)
|
spec = find_by_name(provider_name)
|
||||||
if not model.startswith("bedrock/") and not (p and p.api_key) and not (spec and spec.is_oauth):
|
if not model.startswith("bedrock/") and not (p and p.api_key) and not (spec and spec.is_oauth):
|
||||||
|
|||||||
47
nanobot/providers/custom_provider.py
Normal file
47
nanobot/providers/custom_provider.py
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
"""Direct OpenAI-compatible provider — bypasses LiteLLM."""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import json_repair
|
||||||
|
from openai import AsyncOpenAI
|
||||||
|
|
||||||
|
from nanobot.providers.base import LLMProvider, LLMResponse, ToolCallRequest
|
||||||
|
|
||||||
|
|
||||||
|
class CustomProvider(LLMProvider):
|
||||||
|
|
||||||
|
def __init__(self, api_key: str = "no-key", api_base: str = "http://localhost:8000/v1", default_model: str = "default"):
|
||||||
|
super().__init__(api_key, api_base)
|
||||||
|
self.default_model = default_model
|
||||||
|
self._client = AsyncOpenAI(api_key=api_key, base_url=api_base)
|
||||||
|
|
||||||
|
async def chat(self, messages: list[dict[str, Any]], tools: list[dict[str, Any]] | None = None,
|
||||||
|
model: str | None = None, max_tokens: int = 4096, temperature: float = 0.7) -> LLMResponse:
|
||||||
|
kwargs: dict[str, Any] = {"model": model or self.default_model, "messages": messages,
|
||||||
|
"max_tokens": max(1, max_tokens), "temperature": temperature}
|
||||||
|
if tools:
|
||||||
|
kwargs.update(tools=tools, tool_choice="auto")
|
||||||
|
try:
|
||||||
|
return self._parse(await self._client.chat.completions.create(**kwargs))
|
||||||
|
except Exception as e:
|
||||||
|
return LLMResponse(content=f"Error: {e}", finish_reason="error")
|
||||||
|
|
||||||
|
def _parse(self, response: Any) -> LLMResponse:
|
||||||
|
choice = response.choices[0]
|
||||||
|
msg = choice.message
|
||||||
|
tool_calls = [
|
||||||
|
ToolCallRequest(id=tc.id, name=tc.function.name,
|
||||||
|
arguments=json_repair.loads(tc.function.arguments) if isinstance(tc.function.arguments, str) else tc.function.arguments)
|
||||||
|
for tc in (msg.tool_calls or [])
|
||||||
|
]
|
||||||
|
u = response.usage
|
||||||
|
return LLMResponse(
|
||||||
|
content=msg.content, tool_calls=tool_calls, finish_reason=choice.finish_reason or "stop",
|
||||||
|
usage={"prompt_tokens": u.prompt_tokens, "completion_tokens": u.completion_tokens, "total_tokens": u.total_tokens} if u else {},
|
||||||
|
reasoning_content=getattr(msg, "reasoning_content", None),
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_default_model(self) -> str:
|
||||||
|
return self.default_model
|
||||||
@ -54,6 +54,9 @@ class ProviderSpec:
|
|||||||
# OAuth-based providers (e.g., OpenAI Codex) don't use API keys
|
# OAuth-based providers (e.g., OpenAI Codex) don't use API keys
|
||||||
is_oauth: bool = False # if True, uses OAuth flow instead of API key
|
is_oauth: bool = False # if True, uses OAuth flow instead of API key
|
||||||
|
|
||||||
|
# Direct providers bypass LiteLLM entirely (e.g., CustomProvider)
|
||||||
|
is_direct: bool = False
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def label(self) -> str:
|
def label(self) -> str:
|
||||||
return self.display_name or self.name.title()
|
return self.display_name or self.name.title()
|
||||||
@ -65,18 +68,14 @@ class ProviderSpec:
|
|||||||
|
|
||||||
PROVIDERS: tuple[ProviderSpec, ...] = (
|
PROVIDERS: tuple[ProviderSpec, ...] = (
|
||||||
|
|
||||||
# === Custom (user-provided OpenAI-compatible endpoint) =================
|
# === Custom (direct OpenAI-compatible endpoint, bypasses LiteLLM) ======
|
||||||
# No auto-detection — only activates when user explicitly configures "custom".
|
|
||||||
|
|
||||||
ProviderSpec(
|
ProviderSpec(
|
||||||
name="custom",
|
name="custom",
|
||||||
keywords=(),
|
keywords=(),
|
||||||
env_key="OPENAI_API_KEY",
|
env_key="",
|
||||||
display_name="Custom",
|
display_name="Custom",
|
||||||
litellm_prefix="openai",
|
litellm_prefix="",
|
||||||
skip_prefixes=("openai/",),
|
is_direct=True,
|
||||||
is_gateway=True,
|
|
||||||
strip_model_prefix=True,
|
|
||||||
),
|
),
|
||||||
|
|
||||||
# === Gateways (detected by api_key / api_base, not model name) =========
|
# === Gateways (detected by api_key / api_base, not model name) =========
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user