- Merged latest 166 commits from origin/main - Resolved conflicts in .gitignore, commands.py, schema.py, providers/__init__.py, and registry.py - Kept both local providers (Ollama, AirLLM) and new providers from main - Preserved transformers 4.39.3 compatibility fixes - Combined error handling improvements with new features
103 lines
2.9 KiB
Bash
103 lines
2.9 KiB
Bash
#!/bin/bash
|
|
# Configure llama3.2 with AirLLM using local path (no tokens after download)
|
|
|
|
CONFIG_FILE="$HOME/.nanobot/config.json"
|
|
MODEL_DIR="$HOME/.local/models/llama3.2-3b-instruct"
|
|
MODEL_NAME="meta-llama/Llama-3.2-3B-Instruct"
|
|
|
|
echo "======================================================================"
|
|
echo "LLAMA3.2 + AIRLLM LOCAL SETUP (NO TOKENS AFTER DOWNLOAD)"
|
|
echo "======================================================================"
|
|
echo ""
|
|
|
|
# Create config directory if it doesn't exist
|
|
mkdir -p "$(dirname "$CONFIG_FILE")"
|
|
|
|
# Load existing config or create new one
|
|
if [ -f "$CONFIG_FILE" ]; then
|
|
echo "Found existing config at: $CONFIG_FILE"
|
|
# Create backup
|
|
cp "$CONFIG_FILE" "$CONFIG_FILE.backup"
|
|
echo "✓ Backup created: $CONFIG_FILE.backup"
|
|
CONFIG=$(cat "$CONFIG_FILE")
|
|
else
|
|
CONFIG="{}"
|
|
echo "Creating new config at: $CONFIG_FILE"
|
|
fi
|
|
|
|
# Use Python to update JSON config
|
|
python3 << EOF
|
|
import json
|
|
import sys
|
|
from pathlib import Path
|
|
|
|
config_file = Path("$CONFIG_FILE")
|
|
model_dir = "$MODEL_DIR"
|
|
|
|
# Load config
|
|
try:
|
|
with open(config_file) as f:
|
|
config = json.load(f)
|
|
except:
|
|
config = {}
|
|
|
|
# Ensure structure
|
|
if "providers" not in config:
|
|
config["providers"] = {}
|
|
if "agents" not in config:
|
|
config["agents"] = {}
|
|
if "defaults" not in config["agents"]:
|
|
config["agents"]["defaults"] = {}
|
|
|
|
# Configure airllm with local path
|
|
config["providers"]["airllm"] = {
|
|
"apiKey": model_dir, # Local path - no tokens needed!
|
|
"apiBase": None,
|
|
"extraHeaders": {}
|
|
}
|
|
|
|
# Set default model to local path
|
|
config["agents"]["defaults"]["model"] = model_dir
|
|
|
|
# Save config
|
|
config_file.parent.mkdir(parents=True, exist_ok=True)
|
|
with open(config_file, 'w') as f:
|
|
json.dump(config, f, indent=2)
|
|
|
|
import os
|
|
os.chmod(config_file, 0o600)
|
|
|
|
print(f"✓ Configuration updated!")
|
|
print(f" Model path: {model_dir}")
|
|
print(f" Config file: {config_file}")
|
|
EOF
|
|
|
|
echo ""
|
|
echo "======================================================================"
|
|
echo "CONFIGURATION COMPLETE!"
|
|
echo "======================================================================"
|
|
echo ""
|
|
echo "✓ Config updated to use local model path: $MODEL_DIR"
|
|
echo "✓ No tokens needed - will use local model!"
|
|
echo ""
|
|
|
|
# Check if model exists
|
|
if [ -d "$MODEL_DIR" ] && [ -f "$MODEL_DIR/config.json" ]; then
|
|
echo "✓ Model found at: $MODEL_DIR"
|
|
echo ""
|
|
echo "You're all set! Test it with:"
|
|
echo " nanobot agent -m 'Hello, what is 2+5?'"
|
|
else
|
|
echo "⚠ Model not found at: $MODEL_DIR"
|
|
echo ""
|
|
echo "To download the model (one-time, requires HF token):"
|
|
echo " 1. Get a Hugging Face token: https://huggingface.co/settings/tokens"
|
|
echo " 2. Accept Llama license: https://huggingface.co/$MODEL_NAME"
|
|
echo " 3. Download model:"
|
|
echo " huggingface-cli download $MODEL_NAME --local-dir $MODEL_DIR"
|
|
echo ""
|
|
echo "After download, no tokens will be needed!"
|
|
fi
|
|
echo ""
|
|
|