- Merged latest 166 commits from origin/main - Resolved conflicts in .gitignore, commands.py, schema.py, providers/__init__.py, and registry.py - Kept both local providers (Ollama, AirLLM) and new providers from main - Preserved transformers 4.39.3 compatibility fixes - Combined error handling improvements with new features
124 lines
3.8 KiB
Python
124 lines
3.8 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Download llama3.2 using Hugging Face token - easier to use than shell script
|
|
"""
|
|
|
|
import os
|
|
import sys
|
|
from pathlib import Path
|
|
|
|
MODEL_NAME = "meta-llama/Llama-3.2-3B-Instruct"
|
|
MODEL_DIR = Path.home() / ".local" / "models" / "llama3.2-3b-instruct"
|
|
|
|
def main():
|
|
print("="*70)
|
|
print("DOWNLOADING LLAMA3.2 FOR AIRLLM")
|
|
print("="*70)
|
|
print()
|
|
print(f"This will download {MODEL_NAME} to:")
|
|
print(f" {MODEL_DIR}")
|
|
print()
|
|
print("After download, no tokens will be needed!")
|
|
print()
|
|
|
|
# Check if model already exists
|
|
if MODEL_DIR.exists() and (MODEL_DIR / "config.json").exists():
|
|
print(f"✓ Model already exists at: {MODEL_DIR}")
|
|
print(" You're all set! No download needed.")
|
|
return
|
|
|
|
# Check if huggingface_hub is installed
|
|
try:
|
|
from huggingface_hub import snapshot_download
|
|
except ImportError:
|
|
print("Installing huggingface_hub...")
|
|
os.system("pip install -q huggingface_hub")
|
|
try:
|
|
from huggingface_hub import snapshot_download
|
|
except ImportError:
|
|
print("⚠ Error: Could not install huggingface_hub")
|
|
print("Try: pip install huggingface_hub")
|
|
return
|
|
|
|
# Get token - try multiple methods
|
|
hf_token = None
|
|
|
|
# Method 1: Command line argument
|
|
if len(sys.argv) > 1:
|
|
hf_token = sys.argv[1]
|
|
print(f"Using token from command line argument")
|
|
# Method 2: Environment variable
|
|
elif os.environ.get("HF_TOKEN"):
|
|
hf_token = os.environ.get("HF_TOKEN")
|
|
print(f"Using token from HF_TOKEN environment variable")
|
|
# Method 3: Interactive input
|
|
else:
|
|
print("Enter your Hugging Face token (starts with 'hf_'):")
|
|
print("(You can also pass it as: python3 download_llama3.2.py YOUR_TOKEN)")
|
|
print("(Or set environment variable: export HF_TOKEN=YOUR_TOKEN)")
|
|
print()
|
|
hf_token = input("Token: ").strip()
|
|
|
|
if not hf_token:
|
|
print("⚠ Error: Token is required")
|
|
return
|
|
|
|
if not hf_token.startswith("hf_"):
|
|
print("⚠ Warning: Token should start with 'hf_'")
|
|
confirm = input("Continue anyway? (y/n): ").strip().lower()
|
|
if confirm != 'y':
|
|
return
|
|
|
|
print()
|
|
print("Downloading model (this may take a while depending on your connection)...")
|
|
print("Model size: ~2GB")
|
|
print()
|
|
|
|
# Create directory
|
|
MODEL_DIR.parent.mkdir(parents=True, exist_ok=True)
|
|
|
|
try:
|
|
# Download using huggingface_hub
|
|
snapshot_download(
|
|
repo_id=MODEL_NAME,
|
|
local_dir=str(MODEL_DIR),
|
|
token=hf_token,
|
|
local_dir_use_symlinks=False
|
|
)
|
|
|
|
print()
|
|
print("="*70)
|
|
print("✓ DOWNLOAD COMPLETE!")
|
|
print("="*70)
|
|
print()
|
|
print(f"Model downloaded to: {MODEL_DIR}")
|
|
print()
|
|
print("🎉 No tokens needed anymore - using local model!")
|
|
print()
|
|
print("Your config is already set up. Test it with:")
|
|
print(" nanobot agent -m 'Hello, what is 2+5?'")
|
|
print()
|
|
print("You can now delete your Hugging Face token from the config")
|
|
print("since the model is stored locally.")
|
|
|
|
except Exception as e:
|
|
print()
|
|
print("⚠ Download failed!")
|
|
print(f"Error: {e}")
|
|
print()
|
|
print("Common issues:")
|
|
print(" 1. Make sure you accepted the Llama license:")
|
|
print(f" https://huggingface.co/{MODEL_NAME}")
|
|
print(" 2. Check your token is valid")
|
|
print(" 3. Check your internet connection")
|
|
print()
|
|
print("Try again with:")
|
|
print(f" python3 download_llama3.2.py YOUR_TOKEN")
|
|
return 1
|
|
|
|
return 0
|
|
|
|
if __name__ == "__main__":
|
|
sys.exit(main())
|
|
|