File size: 1,203 Bytes
11f438b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import requests
from utils.config import config

def check_ollama_status():
    """
    Checks if Ollama is running and which model is loaded.
    
    Returns:
        dict: {
            "running": True/False,
            "model_loaded": "mistral-7b" or None,
            "ngrok_url": "https://a877ef1aa487.ngrok-free.app/",
            "local_url": "http://localhost:11434/"
        }
    """
    ngrok_url = "https://a877ef1aa487.ngrok-free.app/"
    local_url = config.ollama_host  # From .env

    def _get_model_from_url(base_url):
        try:
            response = requests.get(f"{base_url}/api/tags", timeout=3)
            if response.status_code == 200:
                models = response.json().get("models", [])
                if models:
                    return models[0].get("name")
        except Exception:
            return None
        return None

    local_model = _get_model_from_url(local_url)
    remote_model = _get_model_from_url(ngrok_url)

    model_loaded = local_model or remote_model
    running = bool(model_loaded)

    return {
        "running": running,
        "model_loaded": model_loaded,
        "ngrok_url": ngrok_url,
        "local_url": local_url,
    }