AI-Life-Coach-Streamlit2 / diagnose_ollama.py
rdune71's picture
Add ngrok URL input in Streamlit UI and improve diagnostic script
8b21538
raw
history blame
7.21 kB
#!/usr/bin/env python3
"""
Diagnostic script to test Ollama connectivity
"""
import requests
from utils.config import config
def test_ollama_connectivity(custom_ngrok_url=None):
"""Test if Ollama is reachable from the current environment"""
test_url = custom_ngrok_url if custom_ngrok_url else config.ollama_host
print("=== Ollama Connectivity Diagnostic ===")
print(f"Testing Ollama Host: {test_url}")
print(f"Configured Model: {config.local_model_name}")
print()
# Headers to skip ngrok browser warning
headers = {
"ngrok-skip-browser-warning": "true",
"User-Agent": "AI-Life-Coach-Diagnostic"
}
# Test 1: Check if we can reach the Ollama host
print("Test 1: Checking Ollama host connectivity...")
try:
response = requests.get(
f"{test_url}/api/tags",
headers=headers,
timeout=10
)
print(f" Status Code: {response.status_code}")
if response.status_code == 200:
print(" βœ“ Successfully connected to Ollama host")
try:
data = response.json()
models = data.get("models", [])
print(f" Available Models: {len(models)}")
for model in models:
print(f" - {model.get('name', 'Unknown model')}")
# Check if our configured model is available
model_names = [m.get('name') for m in models]
if config.local_model_name in model_names:
print(f" βœ“ Configured model '{config.local_model_name}' is available")
else:
print(f" ⚠ Configured model '{config.local_model_name}' not found")
if models:
print(f" Available models: {', '.join(model_names)}")
else:
print(f" No models found. Try running: ollama pull {config.local_model_name}")
except Exception as e:
print(f" Error parsing response: {e}")
print(f" Response text: {response.text[:200]}...")
else:
print(f" βœ— Unexpected status code: {response.status_code}")
print(f" Response: {response.text[:200]}...")
except requests.exceptions.Timeout:
print(" βœ— Request timed out (took more than 10 seconds)")
print(" This may indicate network issues or an unresponsive Ollama server")
except requests.exceptions.ConnectionError as e:
print(f" βœ— Connection error: {e}")
print(" This may indicate that the Ollama server is not running or the URL is incorrect")
except Exception as e:
print(f" βœ— Unexpected error: {e}")
print()
# Test 2: Test model generation if we can connect
print("Test 2: Testing model generation...")
try:
# First verify we can connect
response = requests.get(
f"{test_url}/api/tags",
headers=headers,
timeout=10
)
if response.status_code == 200:
data = response.json()
models = data.get("models", [])
model_names = [m.get('name') for m in models]
if config.local_model_name in model_names:
print(f" Testing generation with model: {config.local_model_name}")
# Test generation
generate_payload = {
"model": config.local_model_name,
"prompt": "Hello, please respond with just the word 'Success' in uppercase.",
"stream": False
}
generate_response = requests.post(
f"{test_url}/api/generate",
json=generate_payload,
headers=headers,
timeout=30
)
if generate_response.status_code == 200:
generate_data = generate_response.json()
response_text = generate_data.get("response", "").strip()
print(f" βœ“ Model generation successful")
print(f" Response: {response_text}")
else:
print(f" βœ— Model generation failed with status {generate_response.status_code}")
print(f" Response: {generate_response.text[:200]}...")
else:
print(f" ⚠ Skipping generation test - model '{config.local_model_name}' not available")
if not models:
print(f" No models found. Try running: ollama pull {config.local_model_name}")
else:
print(f" ⚠ Skipping generation test - cannot connect to Ollama host")
except requests.exceptions.Timeout:
print(" βœ— Generation test timed out (took more than 30 seconds)")
print(" This may indicate the model is still loading or the server is under heavy load")
except requests.exceptions.ConnectionError as e:
print(f" βœ— Generation test connection error: {e}")
except Exception as e:
print(f" βœ— Generation test error: {e}")
print()
# Test 3: Check localhost as fallback
print("Test 3: Checking localhost fallback (if different from configured host)...")
if test_url != "http://localhost:11434":
try:
local_response = requests.get(
"http://localhost:11434/api/tags",
headers=headers,
timeout=5
)
print(f" Local Status Code: {local_response.status_code}")
if local_response.status_code == 200:
print(" βœ“ Successfully connected to localhost Ollama")
try:
local_data = local_response.json()
local_models = local_data.get("models", [])
print(f" Local Available Models: {len(local_models)}")
except Exception as e:
print(f" Error parsing local response: {e}")
else:
print(f" βœ— Local connection failed with status {local_response.status_code}")
except Exception as e:
print(f" βœ— Local connection error: {e}")
else:
print(" Skipping (configured host is already localhost)")
print()
print("=== Diagnostic Complete ===")
print()
print("Troubleshooting Tips:")
print("1. If connection fails, verify your Ollama server is running: ollama serve")
print("2. If no models found, pull a model: ollama pull mistral")
print("3. If ngrok issues, verify your tunnel is active and URL is correct")
print("4. If timeout issues, check firewall settings and network connectivity")
if __name__ == "__main__":
# Check if user provided a custom ngrok URL
import sys
custom_url = None
if len(sys.argv) > 1:
custom_url = sys.argv[1]
print(f"Using custom ngrok URL: {custom_url}")
test_ollama_connectivity(custom_url)