#!/bin/bash # Find and verify Ollama models on GPU VM # Run this ON THE GPU VM echo "=== Finding Ollama Models ===" echo "" echo "1. Check what Ollama API reports:" echo " Running: curl http://localhost:11434/api/tags" curl -s http://localhost:11434/api/tags | python3 -m json.tool 2>/dev/null || curl -s http://localhost:11434/api/tags echo "" echo "" echo "2. Find Ollama data directory:" echo " Checking common locations..." # Check for OLLAMA_MODELS env var if [ -n "$OLLAMA_MODELS" ]; then echo " OLLAMA_MODELS env var: $OLLAMA_MODELS" if [ -d "$OLLAMA_MODELS" ]; then echo " ✓ Found! Size: $(du -sh "$OLLAMA_MODELS" 2>/dev/null | cut -f1)" echo " Models:" ls -lh "$OLLAMA_MODELS" | head -10 fi fi # Check common locations for dir in ~/.ollama/models ~/.ollama /usr/share/ollama/models /usr/share/ollama /var/lib/ollama/models /var/lib/ollama; do if [ -d "$dir" ]; then echo " Found: $dir" echo " Size: $(du -sh "$dir" 2>/dev/null | cut -f1)" if [ -d "$dir/models" ]; then echo " Models in subdirectory:" ls -lh "$dir/models" 2>/dev/null | head -5 fi find "$dir" -name "*.gguf" -o -name "*.bin" 2>/dev/null | head -5 fi done echo "" echo "3. Check Ollama process environment:" sudo cat /proc/$(pgrep -f ollama | head -1)/environ 2>/dev/null | tr '\0' '\n' | grep -i model || echo " No OLLAMA_MODELS in process env" echo "" echo "4. Check systemd service environment:" systemctl show ollama | grep -i environment echo "" echo "=== If models are missing ===" echo "They might be in a different location. Ollama stores models in:" echo " - Default: ~/.ollama/models (or /usr/share/ollama/models)" echo " - Or wherever OLLAMA_MODELS env var points" echo "" echo "To re-download models:" echo " ollama pull qwen2:latest" echo " ollama pull qwen2.5:14b" echo " ollama pull llama3.1:8b" echo " ollama pull qwen2.5:7b"