- Merged latest 166 commits from origin/main - Resolved conflicts in .gitignore, commands.py, schema.py, providers/__init__.py, and registry.py - Kept both local providers (Ollama, AirLLM) and new providers from main - Preserved transformers 4.39.3 compatibility fixes - Combined error handling improvements with new features
89 lines
2.5 KiB
Bash
89 lines
2.5 KiB
Bash
#!/bin/bash
|
|
# Download llama3.2 using your Hugging Face token
|
|
|
|
MODEL_NAME="meta-llama/Llama-3.2-3B-Instruct"
|
|
MODEL_DIR="$HOME/.local/models/llama3.2-3b-instruct"
|
|
|
|
echo "======================================================================"
|
|
echo "DOWNLOADING LLAMA3.2 FOR AIRLLM"
|
|
echo "======================================================================"
|
|
echo ""
|
|
echo "This will download $MODEL_NAME to:"
|
|
echo " $MODEL_DIR"
|
|
echo ""
|
|
echo "After download, no tokens will be needed!"
|
|
echo ""
|
|
|
|
# Check if model already exists
|
|
if [ -d "$MODEL_DIR" ] && [ -f "$MODEL_DIR/config.json" ]; then
|
|
echo "✓ Model already exists at: $MODEL_DIR"
|
|
echo " You're all set! No download needed."
|
|
exit 0
|
|
fi
|
|
|
|
# Check if huggingface-cli is available
|
|
if ! command -v huggingface-cli &> /dev/null; then
|
|
echo "Installing huggingface_hub..."
|
|
pip install -q huggingface_hub
|
|
fi
|
|
|
|
# Get token
|
|
echo "Enter your Hugging Face token (starts with 'hf_'):"
|
|
read -s HF_TOKEN
|
|
echo ""
|
|
|
|
if [ -z "$HF_TOKEN" ]; then
|
|
echo "⚠ Error: Token is required"
|
|
exit 1
|
|
fi
|
|
|
|
if [[ ! "$HF_TOKEN" =~ ^hf_ ]]; then
|
|
echo "⚠ Warning: Token should start with 'hf_'"
|
|
read -p "Continue anyway? (y/n): " confirm
|
|
if [ "$confirm" != "y" ]; then
|
|
exit 1
|
|
fi
|
|
fi
|
|
|
|
echo ""
|
|
echo "Downloading model (this may take a while depending on your connection)..."
|
|
echo "Model size: ~2GB"
|
|
echo ""
|
|
|
|
# Create directory
|
|
mkdir -p "$MODEL_DIR"
|
|
|
|
# Download using huggingface-cli
|
|
huggingface-cli download "$MODEL_NAME" \
|
|
--local-dir "$MODEL_DIR" \
|
|
--token "$HF_TOKEN" \
|
|
--local-dir-use-symlinks False
|
|
|
|
if [ $? -eq 0 ]; then
|
|
echo ""
|
|
echo "======================================================================"
|
|
echo "✓ DOWNLOAD COMPLETE!"
|
|
echo "======================================================================"
|
|
echo ""
|
|
echo "Model downloaded to: $MODEL_DIR"
|
|
echo ""
|
|
echo "🎉 No tokens needed anymore - using local model!"
|
|
echo ""
|
|
echo "Your config is already set up. Test it with:"
|
|
echo " nanobot agent -m 'Hello, what is 2+5?'"
|
|
echo ""
|
|
echo "You can now delete your Hugging Face token from the config"
|
|
echo "since the model is stored locally."
|
|
else
|
|
echo ""
|
|
echo "⚠ Download failed. Common issues:"
|
|
echo " 1. Make sure you accepted the Llama license:"
|
|
echo " https://huggingface.co/$MODEL_NAME"
|
|
echo " 2. Check your token is valid"
|
|
echo " 3. Check your internet connection"
|
|
echo ""
|
|
echo "Try again with:"
|
|
echo " huggingface-cli download $MODEL_NAME --local-dir $MODEL_DIR --token YOUR_TOKEN"
|
|
fi
|
|
|