#!/usr/bin/env python3 """Quick test script to check GPU VM connection.""" import asyncio import sys sys.path.insert(0, '.') from backend.llm_client import list_models from backend.config import OPENAI_COMPAT_BASE_URL, COUNCIL_MODELS, CHAIRMAN_MODEL async def test_connection(): print(f"Testing connection to: {OPENAI_COMPAT_BASE_URL}") print(f"Configured council models: {COUNCIL_MODELS}") print(f"Chairman model: {CHAIRMAN_MODEL}") print("-" * 60) try: models = await list_models() if models is None: print("✗ Unable to list models (connection error, timeout, or incompatible endpoint).") print("") print("Next checks:") print(f" - curl {OPENAI_COMPAT_BASE_URL.rstrip('/')}/api/tags") print(f" - curl {OPENAI_COMPAT_BASE_URL.rstrip('/')}/v1/models") print("") print("If you're using Ollama remotely, the port is usually 11434.") return if models: print(f"✓ Connection successful!") print(f"Found {len(models)} available models:\n") for model in models: marker = "✓" if model in COUNCIL_MODELS else " " chairman_marker = " (CHAIRMAN)" if model == CHAIRMAN_MODEL else "" print(f" {marker} {model}{chairman_marker}") print("\n" + "-" * 60) missing = [m for m in COUNCIL_MODELS if m not in models] if missing: print(f"⚠ Warning: {len(missing)} configured models not found:") for m in missing: print(f" - {m}") else: print("✓ All configured council models are available!") else: print("✗ Connected, but the server returned an empty model list.") print(" This is unusual for Ollama; double-check the base URL/port and server.") except Exception as e: print(f"✗ Connection failed: {type(e).__name__}: {e}") import traceback traceback.print_exc() if __name__ == "__main__": asyncio.run(test_connection())