File size: 11,176 Bytes
b127732
5720799
 
 
 
6015c25
3c74ffa
5720799
 
 
 
5082283
3c74ffa
8b21538
3c74ffa
 
 
 
758943e
 
 
5720799
 
 
8b21538
 
 
 
 
 
 
3c74ffa
758943e
8b21538
 
 
6015c25
 
 
 
 
 
5082283
 
758943e
 
b127732
758943e
 
 
 
 
5082283
 
 
 
 
 
 
 
 
 
 
 
 
758943e
 
5082283
 
758943e
 
b127732
758943e
b127732
 
 
758943e
 
 
 
 
 
 
 
 
 
5082283
758943e
 
5720799
8b21538
3771a70
6015c25
b127732
6015c25
 
 
5720799
6015c25
8b21538
 
 
6015c25
758943e
6015c25
b127732
758943e
8b21538
758943e
 
8b21538
 
 
 
b127732
8b21538
 
 
6015c25
b127732
 
 
 
 
 
 
 
6015c25
8b21538
6015c25
b127732
6015c25
 
8b21538
6015c25
8b21538
5082283
5720799
3c74ffa
 
 
 
 
 
5720799
 
b127732
8b21538
 
b127732
 
 
 
 
 
 
 
 
758943e
 
 
 
 
 
 
 
5720799
b127732
 
 
 
8b21538
 
b127732
8b21538
 
6015c25
8b21538
 
758943e
 
 
 
 
8b21538
 
5082283
8b21538
 
 
 
 
 
 
5082283
8b21538
5720799
3c74ffa
 
 
 
 
 
5720799
 
 
 
6015c25
 
 
8b21538
758943e
8b21538
6015c25
 
 
8b21538
 
3c74ffa
6015c25
8b21538
758943e
8b21538
5082283
8b21538
758943e
8b21538
5082283
 
 
 
 
8b21538
 
 
 
 
 
 
 
 
 
5720799
8b21538
 
 
 
 
 
 
 
 
 
3c74ffa
8b21538
5082283
 
8b21538
5082283
 
 
 
8b21538
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b127732
8b21538
b127732
8b21538
 
 
 
 
 
 
 
758943e
b127732
 
758943e
 
 
8b21538
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
# Force redeploy trigger - version 1.9
import streamlit as st
from utils.config import config
import requests
import json
import os
from core.memory import load_user_state, check_redis_health

# Set page config
st.set_page_config(page_title="AI Life Coach", page_icon="🧘", layout="centered")

# Initialize session state
if 'ngrok_url' not in st.session_state:
    st.session_state.ngrok_url = config.ollama_host
if 'model_status' not in st.session_state:
    st.session_state.model_status = "checking"
if 'available_models' not in st.session_state:
    st.session_state.available_models = []
if 'selected_model' not in st.session_state:
    st.session_state.selected_model = config.local_model_name

# Sidebar for user selection
st.sidebar.title("🧘 AI Life Coach")
user = st.sidebar.selectbox("Select User", ["Rob", "Sarah"])

# Ngrok URL input in sidebar
st.sidebar.markdown("---")
st.sidebar.subheader("Ollama Connection")
ngrok_input = st.sidebar.text_input("Ngrok URL", value=st.session_state.ngrok_url)
if st.sidebar.button("Update Ngrok URL"):
    st.session_state.ngrok_url = ngrok_input
    st.session_state.model_status = "checking"
    st.session_state.available_models = []
    st.sidebar.success("Ngrok URL updated!")
    st.experimental_rerun()

# Headers to skip ngrok browser warning
NGROK_HEADERS = {
    "ngrok-skip-browser-warning": "true",
    "User-Agent": "AI-Life-Coach-App"
}

# Fetch available models
def fetch_available_models(ngrok_url):
    try:
        response = requests.get(
            f"{ngrok_url}/api/tags",
            headers=NGROK_HEADERS,
            timeout=5
        )
        if response.status_code == 200:
            models_data = response.json().get("models", [])
            return [m.get("name") for m in models_data]
    except Exception:
        pass
    return []

# Update available models
if st.session_state.ngrok_url and st.session_state.model_status != "unreachable":
    model_names = fetch_available_models(st.session_state.ngrok_url)
    if model_names:
        st.session_state.available_models = model_names
        # If current selected model not in list, select the first one
        if st.session_state.selected_model not in model_names:
            st.session_state.selected_model = model_names[0]

# Model selector dropdown
st.sidebar.markdown("---")
st.sidebar.subheader("Model Selection")
if st.session_state.available_models:
    selected_model = st.sidebar.selectbox(
        "Select Model",
        st.session_state.available_models,
        index=st.session_state.available_models.index(st.session_state.selected_model)
        if st.session_state.selected_model in st.session_state.available_models
        else 0
    )
    st.session_state.selected_model = selected_model
else:
    st.sidebar.warning("No models available - check Ollama connection")
    model_input = st.sidebar.text_input("Or enter model name", value=st.session_state.selected_model)
    st.session_state.selected_model = model_input

st.sidebar.markdown("---")

# Get environment info
BASE_URL = os.environ.get("SPACE_ID", "")
IS_HF_SPACE = bool(BASE_URL)

# Fetch Ollama status
def get_ollama_status(ngrok_url):
    try:
        response = requests.get(
            f"{ngrok_url}/api/tags",
            headers=NGROK_HEADERS,
            timeout=10
        )
        if response.status_code == 200:
            models = response.json().get("models", [])
            model_names = [m.get("name") for m in models]
            st.session_state.available_models = model_names
            
            if models:
                selected_model_available = st.session_state.selected_model in model_names
                return {
                    "running": True,
                    "model_loaded": st.session_state.selected_model if selected_model_available else model_names[0],
                    "remote_host": ngrok_url,
                    "available_models": model_names,
                    "selected_model_available": selected_model_available
                }
            else:
                st.session_state.model_status = "no_models"
                return {
                    "running": False,
                    "model_loaded": None,
                    "remote_host": ngrok_url,
                    "message": "Connected to Ollama but no models found"
                }
        else:
            st.session_state.model_status = "unreachable"
            return {
                "running": False,
                "model_loaded": None,
                "error": f"HTTP {response.status_code}",
                "remote_host": ngrok_url
            }
    except Exception as e:
        st.session_state.model_status = "unreachable"
        return {
            "running": False,
            "model_loaded": None,
            "error": str(e),
            "remote_host": ngrok_url
        }

# Load conversation history
def get_conversation_history(user_id):
    try:
        user_state = load_user_state(user_id)
        if user_state and "conversation" in user_state:
            return json.loads(user_state["conversation"])
    except Exception as e:
        st.warning(f"Could not load conversation history: {e}")
    return []

# Get Ollama status with null safety
ollama_status = get_ollama_status(st.session_state.ngrok_url)

# Add null safety check
if ollama_status is None:
    ollama_status = {
        "running": False,
        "model_loaded": None,
        "error": "Failed to get Ollama status",
        "remote_host": st.session_state.ngrok_url
    }

# Update model status
if ollama_status.get("running", False):
    if ollama_status.get("available_models"):
        st.session_state.model_status = "ready"
    else:
        st.session_state.model_status = "no_models"
else:
    st.session_state.model_status = "unreachable"

# Ensure ollama_status is a dict even if None
ollama_status = ollama_status or {}

# Determine if we should use fallback
use_fallback = not ollama_status.get("running", False) or config.use_fallback

# Display Ollama status
if use_fallback:
    st.sidebar.warning("🌐 Using Hugging Face fallback (Ollama not available)")
    if "error" in ollama_status:
        st.sidebar.caption(f"Error: {ollama_status['error'][:50]}...")
else:
    model_status_msg = ollama_status.get('model_loaded', 'Unknown')
    if ollama_status.get('selected_model_available', True):
        st.sidebar.success(f"🧠 Ollama Model: {model_status_msg}")
    else:
        st.sidebar.warning(f"🧠 Ollama Model: {model_status_msg} (selected model not available)")
    st.sidebar.info(f"Connected to: {ollama_status['remote_host']}")

# Status indicators
model_status_container = st.sidebar.empty()
if st.session_state.model_status == "ready":
    model_status_container.success("βœ… Model Ready")
elif st.session_state.model_status == "checking":
    model_status_container.info("πŸ” Checking model...")
elif st.session_state.model_status == "no_models":
    model_status_container.warning("⚠️ No models found")
else:
    model_status_container.error("❌ Ollama unreachable")

redis_status_container = st.sidebar.empty()
if check_redis_health():
    redis_status_container.success("βœ… Redis Connected")
else:
    redis_status_container.warning("⚠️ Redis Not Available")

# Main chat interface
st.title("🧘 AI Life Coach")
st.markdown("Talk to your personal development assistant.")

# Show detailed status
with st.expander("πŸ” Connection Status"):
    st.write("Ollama Status:", ollama_status)
    st.write("Model Status:", st.session_state.model_status)
    st.write("Selected Model:", st.session_state.selected_model)
    st.write("Available Models:", st.session_state.available_models)
    st.write("Environment Info:")
    st.write("- Is HF Space:", IS_HF_SPACE)
    st.write("- Base URL:", BASE_URL or "Not in HF Space")
    st.write("- Current Ngrok URL:", st.session_state.ngrok_url)
    st.write("- Using Fallback:", use_fallback)
    st.write("- Redis Health:", check_redis_health())

# Function to send message to Ollama
def send_to_ollama(user_input, conversation_history, ngrok_url, model_name):
    try:
        # Use the correct chat endpoint with proper payload
        payload = {
            "model": model_name,
            "messages": conversation_history,
            "stream": False,
            "options": {
                "temperature": 0.7,
                "top_p": 0.9
            }
        }
        response = requests.post(
            f"{ngrok_url}/api/chat",
            json=payload,
            headers=NGROK_HEADERS,
            timeout=60
        )
        if response.status_code == 200:
            response_data = response.json()
            return response_data.get("message", {}).get("content", "")
        else:
            st.error(f"Ollama API error: {response.status_code}")
            st.error(response.text[:200])
            return None
    except Exception as e:
        st.error(f"Connection error: {e}")
        return None

# Function to send message to Hugging Face (fallback)
def send_to_hf(user_input, conversation_history):
    try:
        from core.llm import LLMClient
        llm_client = LLMClient(provider="huggingface")
        # Format for HF
        prompt = "You are a helpful life coach. "
        for msg in conversation_history:
            if msg["role"] == "user":
                prompt += f"Human: {msg['content']} "
            elif msg["role"] == "assistant":
                prompt += f"Assistant: {msg['content']} "
        prompt += "Assistant:"
        response = llm_client.generate(prompt, max_tokens=500, stream=False)
        return response
    except Exception as e:
        st.error(f"Hugging Face API error: {e}")
        return None

# Display conversation history
conversation = get_conversation_history(user)
for msg in conversation:
    role = msg["role"].capitalize()
    content = msg["content"]
    st.markdown(f"**{role}:** {content}")

# Chat input
user_input = st.text_input("Your message...", key="input")
if st.button("Send"):
    if user_input.strip() == "":
        st.warning("Please enter a message.")
    else:
        # Display user message
        st.markdown(f"**You:** {user_input}")
        
        # Prepare conversation history
        conversation_history = [{"role": msg["role"], "content": msg["content"]} for msg in conversation[-5:]]
        conversation_history.append({"role": "user", "content": user_input})
        
        # Send to appropriate backend
        with st.spinner("AI Coach is thinking..."):
            if use_fallback:
                ai_response = send_to_hf(user_input, conversation_history)
                backend_used = "Hugging Face"
            else:
                ai_response = send_to_ollama(
                    user_input,
                    conversation_history,
                    st.session_state.ngrok_url,
                    st.session_state.selected_model
                )
                backend_used = "Ollama"
            
            if ai_response:
                st.markdown(f"**AI Coach ({backend_used}):** {ai_response}")
            else:
                st.error(f"Failed to get response from {backend_used}.")