diff --git a/nanobot/cli/commands.py b/nanobot/cli/commands.py index e48865f..18c23b1 100644 --- a/nanobot/cli/commands.py +++ b/nanobot/cli/commands.py @@ -155,20 +155,18 @@ def main( @app.command() def onboard(): """Initialize nanobot configuration and workspace.""" - from nanobot.config.loader import get_config_path, save_config + from nanobot.config.loader import get_config_path, load_config, save_config from nanobot.config.schema import Config from nanobot.utils.helpers import get_workspace_path config_path = get_config_path() if config_path.exists(): - console.print(f"[yellow]Config already exists at {config_path}[/yellow]") - if not typer.confirm("Overwrite?"): - console.print("[dim]Skipping config creation[/dim]") - config_path = None # Sentinel to skip creation - - if config_path: - # Create default config + # Load existing config — Pydantic fills in defaults for any new fields + config = load_config() + save_config(config) + console.print(f"[green]✓[/green] Config refreshed at {config_path} (existing values preserved)") + else: config = Config() save_config(config) console.print(f"[green]✓[/green] Created config at {config_path}") diff --git a/nanobot/config/schema.py b/nanobot/config/schema.py index ef999b7..60bbc69 100644 --- a/nanobot/config/schema.py +++ b/nanobot/config/schema.py @@ -179,6 +179,7 @@ class ProviderConfig(BaseModel): class ProvidersConfig(BaseModel): """Configuration for LLM providers.""" + custom: ProviderConfig = Field(default_factory=ProviderConfig) # Any OpenAI-compatible endpoint anthropic: ProviderConfig = Field(default_factory=ProviderConfig) openai: ProviderConfig = Field(default_factory=ProviderConfig) openrouter: ProviderConfig = Field(default_factory=ProviderConfig) diff --git a/nanobot/providers/registry.py b/nanobot/providers/registry.py index fdd036e..b9071a0 100644 --- a/nanobot/providers/registry.py +++ b/nanobot/providers/registry.py @@ -62,6 +62,20 @@ class ProviderSpec: PROVIDERS: tuple[ProviderSpec, ...] = ( + # === Custom (user-provided OpenAI-compatible endpoint) ================= + # No auto-detection — only activates when user explicitly configures "custom". + + ProviderSpec( + name="custom", + keywords=(), + env_key="OPENAI_API_KEY", + display_name="Custom", + litellm_prefix="openai", + skip_prefixes=("openai/",), + is_gateway=True, + strip_model_prefix=True, + ), + # === Gateways (detected by api_key / api_base, not model name) ========= # Gateways can route any model, so they win in fallback.