- Merged latest 166 commits from origin/main - Resolved conflicts in .gitignore, commands.py, schema.py, providers/__init__.py, and registry.py - Kept both local providers (Ollama, AirLLM) and new providers from main - Preserved transformers 4.39.3 compatibility fixes - Combined error handling improvements with new features
78 lines
2.6 KiB
Python
78 lines
2.6 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Check if we can find llama3.2 model files that could work with AirLLM.
|
|
Looks in various locations where models might be stored.
|
|
"""
|
|
|
|
import os
|
|
from pathlib import Path
|
|
|
|
def check_directory(path, description):
|
|
"""Check if a directory exists and contains model files."""
|
|
path_obj = Path(path)
|
|
if not path_obj.exists():
|
|
return False, f"{description}: Not found"
|
|
|
|
# Look for common model files
|
|
model_files = ['config.json', 'tokenizer.json', 'model.safetensors', 'pytorch_model.bin']
|
|
found_files = [f for f in model_files if (path_obj / f).exists()]
|
|
|
|
if found_files:
|
|
return True, f"{description}: Found {len(found_files)} model files: {', '.join(found_files)}"
|
|
else:
|
|
# Check subdirectories
|
|
subdirs = [d for d in path_obj.iterdir() if d.is_dir()]
|
|
if subdirs:
|
|
return True, f"{description}: Found {len(subdirs)} subdirectories (might contain model files)"
|
|
return False, f"{description}: No model files found"
|
|
|
|
print("="*70)
|
|
print("SEARCHING FOR LLAMA3.2 MODEL FILES")
|
|
print("="*70)
|
|
print()
|
|
|
|
# Check common locations
|
|
locations = [
|
|
("~/.ollama/models", "Ollama models directory"),
|
|
("~/.cache/huggingface/hub", "Hugging Face cache"),
|
|
("~/.local/share/ollama", "Ollama data directory"),
|
|
("~/models", "User models directory"),
|
|
("/usr/local/share/ollama", "System Ollama directory"),
|
|
]
|
|
|
|
found_any = False
|
|
for path, desc in locations:
|
|
expanded = os.path.expanduser(path)
|
|
exists, message = check_directory(expanded, desc)
|
|
print(f" {message}")
|
|
if exists:
|
|
found_any = True
|
|
print(f" Path: {expanded}")
|
|
|
|
print()
|
|
print("="*70)
|
|
if found_any:
|
|
print("OPTIONS:")
|
|
print("="*70)
|
|
print()
|
|
print("1. If you found model files in Hugging Face format:")
|
|
print(" - Use that path directly in your config (no token needed!)")
|
|
print()
|
|
print("2. If you only have Ollama format:")
|
|
print(" - Ollama uses a different format, can't be used directly")
|
|
print(" - You'd need to get the model in Hugging Face format")
|
|
print()
|
|
print("3. Alternative: Get model files from someone else")
|
|
print(" - If someone has downloaded llama3.2 in HF format,")
|
|
print(" you can copy their files and use them directly")
|
|
print()
|
|
else:
|
|
print("No model files found in common locations.")
|
|
print()
|
|
print("To use AirLLM with llama3.2 without a Hugging Face account:")
|
|
print(" 1. Get the model files from someone else (in HF format)")
|
|
print(" 2. Place them in: ~/.local/models/llama3.2-3b-instruct")
|
|
print(" 3. Your config is already set to use that path!")
|
|
print()
|
|
|