- Merged latest 166 commits from origin/main - Resolved conflicts in .gitignore, commands.py, schema.py, providers/__init__.py, and registry.py - Kept both local providers (Ollama, AirLLM) and new providers from main - Preserved transformers 4.39.3 compatibility fixes - Combined error handling improvements with new features
197 lines
6.9 KiB
Python
197 lines
6.9 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Setup script to configure llama3.2 with AirLLM using local model path (no tokens after initial download).
|
|
|
|
This script will:
|
|
1. Download llama3.2 to a local directory (one-time token needed)
|
|
2. Configure nanobot to use the local path (no tokens needed after)
|
|
"""
|
|
|
|
import json
|
|
import os
|
|
import sys
|
|
from pathlib import Path
|
|
|
|
CONFIG_PATH = Path.home() / ".nanobot" / "config.json"
|
|
MODEL_DIR = Path.home() / ".local" / "models" / "llama3.2-3b-instruct"
|
|
MODEL_NAME = "meta-llama/Llama-3.2-3B-Instruct"
|
|
|
|
def load_existing_config():
|
|
"""Load existing config or return default."""
|
|
if CONFIG_PATH.exists():
|
|
try:
|
|
with open(CONFIG_PATH) as f:
|
|
return json.load(f)
|
|
except Exception as e:
|
|
print(f"Warning: Could not read existing config: {e}")
|
|
return {}
|
|
return {}
|
|
|
|
def download_model_with_token():
|
|
"""Download model using Hugging Face token."""
|
|
print("\n" + "="*70)
|
|
print("DOWNLOADING LLAMA3.2 MODEL")
|
|
print("="*70)
|
|
print(f"\nThis will download {MODEL_NAME} to:")
|
|
print(f" {MODEL_DIR}")
|
|
print("\nYou'll need a Hugging Face token (one-time only).")
|
|
print("After download, no tokens will be needed!\n")
|
|
|
|
has_token = input("Do you have a Hugging Face token? (y/n): ").strip().lower()
|
|
|
|
if has_token != 'y':
|
|
print("\n" + "="*70)
|
|
print("GETTING A HUGGING FACE TOKEN")
|
|
print("="*70)
|
|
print("\n1. Go to: https://huggingface.co/settings/tokens")
|
|
print("2. Click 'New token'")
|
|
print("3. Give it a name (e.g., 'nanobot')")
|
|
print("4. Select 'Read' permission")
|
|
print("5. Click 'Generate token'")
|
|
print("6. Copy the token (starts with 'hf_...')")
|
|
print("\nThen accept the Llama license:")
|
|
print(f"1. Go to: https://huggingface.co/{MODEL_NAME}")
|
|
print("2. Click 'Agree and access repository'")
|
|
print("3. Accept the license terms")
|
|
print("\nRun this script again after getting your token.")
|
|
return False
|
|
|
|
hf_token = input("\nEnter your Hugging Face token (starts with 'hf_'): ").strip()
|
|
if not hf_token or not hf_token.startswith('hf_'):
|
|
print("⚠ Error: Token must start with 'hf_'")
|
|
return False
|
|
|
|
print(f"\nDownloading {MODEL_NAME}...")
|
|
print("This may take a while depending on your internet connection...")
|
|
|
|
try:
|
|
from huggingface_hub import snapshot_download
|
|
import os
|
|
|
|
# Set token as environment variable
|
|
os.environ['HF_TOKEN'] = hf_token
|
|
|
|
# Download to local directory
|
|
MODEL_DIR.parent.mkdir(parents=True, exist_ok=True)
|
|
|
|
snapshot_download(
|
|
repo_id=MODEL_NAME,
|
|
local_dir=str(MODEL_DIR),
|
|
token=hf_token,
|
|
local_dir_use_symlinks=False
|
|
)
|
|
|
|
print(f"\n✓ Model downloaded successfully to: {MODEL_DIR}")
|
|
return True
|
|
|
|
except ImportError:
|
|
print("\n⚠ Error: huggingface_hub not installed.")
|
|
print("Install it with: pip install huggingface_hub")
|
|
return False
|
|
except Exception as e:
|
|
print(f"\n⚠ Error downloading model: {e}")
|
|
print("\nYou can try downloading manually:")
|
|
print(f" huggingface-cli download {MODEL_NAME} --local-dir {MODEL_DIR} --token {hf_token[:10]}...")
|
|
return False
|
|
|
|
def check_model_exists():
|
|
"""Check if model is already downloaded locally."""
|
|
# Check for common model files
|
|
required_files = ['config.json', 'tokenizer.json']
|
|
if MODEL_DIR.exists():
|
|
has_files = all((MODEL_DIR / f).exists() for f in required_files)
|
|
if has_files:
|
|
print(f"✓ Found existing model at: {MODEL_DIR}")
|
|
return True
|
|
return False
|
|
|
|
def configure_for_local_path(config):
|
|
"""Configure nanobot to use local model path."""
|
|
# Ensure providers section exists
|
|
if "providers" not in config:
|
|
config["providers"] = {}
|
|
|
|
# Ensure agents section exists
|
|
if "agents" not in config:
|
|
config["agents"] = {}
|
|
if "defaults" not in config["agents"]:
|
|
config["agents"]["defaults"] = {}
|
|
|
|
# Set up AirLLM provider with local path
|
|
config["providers"]["airllm"] = {
|
|
"apiKey": str(MODEL_DIR), # Local path - no tokens needed!
|
|
"apiBase": None,
|
|
"extraHeaders": {} # No hf_token needed for local paths
|
|
}
|
|
|
|
# Set default model to local path
|
|
config["agents"]["defaults"]["model"] = str(MODEL_DIR)
|
|
|
|
return config
|
|
|
|
def save_config(config):
|
|
"""Save config to file."""
|
|
CONFIG_PATH.parent.mkdir(parents=True, exist_ok=True)
|
|
with open(CONFIG_PATH, 'w') as f:
|
|
json.dump(config, f, indent=2)
|
|
|
|
# Set secure permissions
|
|
os.chmod(CONFIG_PATH, 0o600)
|
|
print(f"\n✓ Configuration saved to: {CONFIG_PATH}")
|
|
|
|
def main():
|
|
"""Main setup function."""
|
|
print("\n" + "="*70)
|
|
print("LLAMA3.2 + AIRLLM LOCAL SETUP (NO TOKENS AFTER DOWNLOAD)")
|
|
print("="*70)
|
|
|
|
# Check if model already exists
|
|
if check_model_exists():
|
|
print("\nModel already downloaded! Configuring...")
|
|
config = load_existing_config()
|
|
config = configure_for_local_path(config)
|
|
save_config(config)
|
|
print("\n✓ Configuration complete!")
|
|
print(f" Model path: {MODEL_DIR}")
|
|
print(" No tokens needed - using local model!")
|
|
return
|
|
|
|
# Check if user wants to download
|
|
print(f"\nModel not found at: {MODEL_DIR}")
|
|
download = input("\nDownload model now? (y/n): ").strip().lower()
|
|
|
|
if download == 'y':
|
|
if download_model_with_token():
|
|
# Configure after successful download
|
|
config = load_existing_config()
|
|
config = configure_for_local_path(config)
|
|
save_config(config)
|
|
print("\n" + "="*70)
|
|
print("SETUP COMPLETE!")
|
|
print("="*70)
|
|
print(f"\n✓ Model downloaded to: {MODEL_DIR}")
|
|
print(f"✓ Configuration updated to use local path")
|
|
print("\n🎉 No tokens needed anymore - using local model!")
|
|
print("\nTest it with:")
|
|
print(" nanobot agent -m 'Hello, what is 2+5?'")
|
|
else:
|
|
print("\n⚠ Download failed. You can:")
|
|
print(" 1. Run this script again")
|
|
print(" 2. Download manually and point config to the path")
|
|
else:
|
|
# Just configure for local path (user will provide model)
|
|
print(f"\nConfiguring for local path: {MODEL_DIR}")
|
|
print("Make sure the model is downloaded to this location.")
|
|
config = load_existing_config()
|
|
config = configure_for_local_path(config)
|
|
save_config(config)
|
|
print("\n✓ Configuration saved!")
|
|
print(f"\nTo download the model manually:")
|
|
print(f" huggingface-cli download {MODEL_NAME} --local-dir {MODEL_DIR}")
|
|
print("\nOr place your model files in:")
|
|
print(f" {MODEL_DIR}")
|
|
|
|
if __name__ == "__main__":
|
|
main()
|
|
|