AI-Life-Coach-Streamlit2 / services /ollama_monitor.py
rdune71's picture
Implement Ollama status monitor
11f438b
raw
history blame
1.2 kB
import requests
from utils.config import config
def check_ollama_status():
"""
Checks if Ollama is running and which model is loaded.
Returns:
dict: {
"running": True/False,
"model_loaded": "mistral-7b" or None,
"ngrok_url": "https://a877ef1aa487.ngrok-free.app/",
"local_url": "http://localhost:11434/"
}
"""
ngrok_url = "https://a877ef1aa487.ngrok-free.app/"
local_url = config.ollama_host # From .env
def _get_model_from_url(base_url):
try:
response = requests.get(f"{base_url}/api/tags", timeout=3)
if response.status_code == 200:
models = response.json().get("models", [])
if models:
return models[0].get("name")
except Exception:
return None
return None
local_model = _get_model_from_url(local_url)
remote_model = _get_model_from_url(ngrok_url)
model_loaded = local_model or remote_model
running = bool(model_loaded)
return {
"running": running,
"model_loaded": model_loaded,
"ngrok_url": ngrok_url,
"local_url": local_url,
}