|
|
import streamlit as st |
|
|
import time |
|
|
import os |
|
|
import sys |
|
|
from datetime import datetime |
|
|
from pathlib import Path |
|
|
sys.path.append(str(Path(__file__).parent)) |
|
|
|
|
|
from utils.config import config |
|
|
from core.llm import send_to_ollama, send_to_hf |
|
|
from core.session import session_manager |
|
|
from core.memory import check_redis_health |
|
|
import logging |
|
|
|
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
st.set_page_config(page_title="AI Life Coach", page_icon="🧠", layout="wide") |
|
|
|
|
|
|
|
|
if "messages" not in st.session_state: |
|
|
st.session_state.messages = [] |
|
|
if "last_error" not in st.session_state: |
|
|
st.session_state.last_error = "" |
|
|
if "last_ollama_call_success" not in st.session_state: |
|
|
st.session_state.last_ollama_call_success = None |
|
|
if "last_ollama_call_time" not in st.session_state: |
|
|
st.session_state.last_ollama_call_time = "" |
|
|
if "last_ollama_response_preview" not in st.session_state: |
|
|
st.session_state.last_ollama_response_preview = "" |
|
|
if "last_hf_call_success" not in st.session_state: |
|
|
st.session_state.last_hf_call_success = None |
|
|
if "last_hf_call_time" not in st.session_state: |
|
|
st.session_state.last_hf_call_time = "" |
|
|
if "last_hf_response_preview" not in st.session_state: |
|
|
st.session_state.last_hf_response_preview = "" |
|
|
|
|
|
|
|
|
with st.sidebar: |
|
|
st.title("AI Life Coach") |
|
|
st.markdown("Your personal AI-powered life development assistant") |
|
|
|
|
|
|
|
|
model_options = { |
|
|
"Mistral 7B (Local)": "mistral:latest", |
|
|
"Llama 2 7B (Local)": "llama2:latest", |
|
|
"OpenChat 3.5 (Local)": "openchat:latest" |
|
|
} |
|
|
selected_model_name = st.selectbox( |
|
|
"Select Model", |
|
|
options=list(model_options.keys()), |
|
|
index=0 |
|
|
) |
|
|
st.session_state.selected_model = model_options[selected_model_name] |
|
|
|
|
|
|
|
|
st.session_state.ngrok_url = st.text_input( |
|
|
"Ollama Server URL", |
|
|
value=st.session_state.get("ngrok_url", "http://localhost:11434"), |
|
|
help="Enter the URL to your Ollama server" |
|
|
) |
|
|
|
|
|
|
|
|
st.subheader("Conversation History") |
|
|
if st.button("Clear History"): |
|
|
st.session_state.messages = [] |
|
|
st.success("History cleared!") |
|
|
|
|
|
|
|
|
with st.expander("🔍 Advanced Debug", expanded=False): |
|
|
st.subheader("System Controls") |
|
|
|
|
|
|
|
|
fallback_mode = st.checkbox( |
|
|
"Enable Fallback Mode", |
|
|
value=config.use_fallback, |
|
|
help="Enable automatic fallback between providers" |
|
|
) |
|
|
|
|
|
|
|
|
hf_enabled = st.checkbox( |
|
|
"Enable HF Deep Analysis", |
|
|
value=bool(config.hf_token), |
|
|
help="Enable Hugging Face endpoint coordination" |
|
|
) |
|
|
|
|
|
|
|
|
web_search_enabled = st.checkbox( |
|
|
"Enable Web Search", |
|
|
value=bool(os.getenv("TAVILY_API_KEY")), |
|
|
help="Enable Tavily/DDG web search integration" |
|
|
) |
|
|
|
|
|
st.subheader("Provider Status") |
|
|
|
|
|
|
|
|
try: |
|
|
from services.ollama_monitor import check_ollama_status |
|
|
ollama_status = check_ollama_status() |
|
|
if ollama_status.get("running"): |
|
|
st.success(f"🦙 Ollama: Running ({ollama_status.get('model_loaded', 'Unknown')})") |
|
|
else: |
|
|
st.error("🦙 Ollama: Unavailable") |
|
|
except Exception as e: |
|
|
st.warning(f"🦙 Ollama: Status check failed") |
|
|
|
|
|
|
|
|
try: |
|
|
from services.hf_endpoint_monitor import hf_monitor |
|
|
hf_status = hf_monitor.get_status_summary() |
|
|
if "🟢" in hf_status: |
|
|
st.success(f"🤗 HF Endpoint: {hf_status.replace('🟢 ', '')}") |
|
|
elif "🟡" in hf_status: |
|
|
st.warning(f"🤗 HF Endpoint: {hf_status.replace('🟡 ', '')}") |
|
|
else: |
|
|
st.error(f"🤗 HF Endpoint: {hf_status.replace('🔴 ', '')}") |
|
|
except Exception as e: |
|
|
st.warning("🤗 HF Endpoint: Status check failed") |
|
|
|
|
|
|
|
|
redis_healthy = check_redis_health() |
|
|
if redis_healthy: |
|
|
st.success("💾 Redis: Connected") |
|
|
else: |
|
|
st.error("💾 Redis: Disconnected") |
|
|
|
|
|
st.subheader("External Services") |
|
|
|
|
|
|
|
|
if os.getenv("TAVILY_API_KEY"): |
|
|
st.success("🔍 Web Search: Tavily API Active") |
|
|
else: |
|
|
st.info("🔍 Web Search: Not configured") |
|
|
|
|
|
|
|
|
if config.openweather_api_key: |
|
|
st.success("🌤️ Weather: API Active") |
|
|
else: |
|
|
st.info("🌤️ Weather: Not configured") |
|
|
|
|
|
|
|
|
try: |
|
|
user_session = session_manager.get_session("default_user") |
|
|
conversation_length = len(user_session.get("conversation", [])) |
|
|
st.info(f"💬 Conversation Length: {conversation_length} messages") |
|
|
except: |
|
|
st.info("💬 Session: Not initialized") |
|
|
|
|
|
|
|
|
st.subheader("Web Search Activity") |
|
|
|
|
|
|
|
|
if 'recent_searches' in st.session_state: |
|
|
for search in st.session_state.recent_searches[-3:]: |
|
|
st.caption(f"🔍 {search['query'][:30]}... ({search['timestamp']})") |
|
|
else: |
|
|
st.info("No recent searches") |
|
|
|
|
|
|
|
|
if st.button("🧪 Test Web Search"): |
|
|
try: |
|
|
from tavily import TavilyClient |
|
|
if os.getenv("TAVILY_API_KEY"): |
|
|
tavily = TavilyClient(api_key=os.getenv("TAVILY_API_KEY")) |
|
|
test_result = tavily.search("AI life coach benefits", max_results=1) |
|
|
st.success("✅ Web search working") |
|
|
if test_result.get('results'): |
|
|
st.caption(f"Sample: {test_result['results'][0].get('title', 'No title')}") |
|
|
else: |
|
|
st.warning("Web API key not configured") |
|
|
except Exception as e: |
|
|
st.error(f"❌ Web search test failed: {e}") |
|
|
|
|
|
|
|
|
st.subheader("Configuration Details") |
|
|
|
|
|
|
|
|
st.caption(f"**Primary Provider**: Ollama ({config.local_model_name})") |
|
|
if config.hf_token: |
|
|
st.caption(f"**Secondary Provider**: Hugging Face") |
|
|
st.caption(f"**HF Endpoint**: {config.hf_api_url}") |
|
|
|
|
|
|
|
|
env_type = "☁️ HF Space" if config.is_hf_space else "🏠 Local" |
|
|
st.caption(f"**Environment**: {env_type}") |
|
|
|
|
|
|
|
|
features = [] |
|
|
if config.use_fallback: |
|
|
features.append("Fallback Mode") |
|
|
if os.getenv("TAVILY_API_KEY"): |
|
|
features.append("Web Search") |
|
|
if config.openweather_api_key: |
|
|
features.append("Weather Data") |
|
|
if config.hf_token: |
|
|
features.append("Deep Analysis") |
|
|
|
|
|
if features: |
|
|
st.caption(f"**Active Features**: {', '.join(features)}") |
|
|
else: |
|
|
st.caption("**Active Features**: None") |
|
|
|
|
|
|
|
|
st.title("🧠 AI Life Coach") |
|
|
st.markdown("Ask me anything about personal development, goal setting, or life advice!") |
|
|
|
|
|
|
|
|
for message in st.session_state.messages: |
|
|
with st.chat_message(message["role"]): |
|
|
st.markdown(message["content"]) |
|
|
|
|
|
|
|
|
col1, col2 = st.columns([4, 1]) |
|
|
with col1: |
|
|
user_input = st.text_input( |
|
|
"Your message...", |
|
|
key="user_message_input", |
|
|
placeholder="Type your message here...", |
|
|
label_visibility="collapsed" |
|
|
) |
|
|
with col2: |
|
|
send_button = st.button("Send", key="send_message_button", use_container_width=True) |
|
|
|
|
|
if send_button and user_input.strip(): |
|
|
|
|
|
with st.chat_message("user"): |
|
|
st.markdown(user_input) |
|
|
|
|
|
|
|
|
st.session_state.messages.append({"role": "user", "content": user_input}) |
|
|
|
|
|
|
|
|
st.session_state.last_error = "" |
|
|
|
|
|
|
|
|
user_session = session_manager.get_session("default_user") |
|
|
conversation = user_session.get("conversation", []) |
|
|
conversation_history = conversation[-5:] |
|
|
conversation_history.append({"role": "user", "content": user_input}) |
|
|
|
|
|
|
|
|
with st.chat_message("assistant"): |
|
|
with st.spinner("AI Coach is thinking..."): |
|
|
ai_response = None |
|
|
backend_used = "" |
|
|
error_msg = "" |
|
|
|
|
|
|
|
|
if not config.use_fallback: |
|
|
try: |
|
|
ai_response = send_to_ollama( |
|
|
user_input, conversation_history, st.session_state.ngrok_url, st.session_state.selected_model |
|
|
) |
|
|
backend_used = "Ollama" |
|
|
|
|
|
st.session_state.last_ollama_call_success = True |
|
|
st.session_state.last_ollama_call_time = str(datetime.utcnow()) |
|
|
st.session_state.last_ollama_response_preview = ai_response[:200] if ai_response else "" |
|
|
except Exception as e: |
|
|
error_msg = f"Ollama error: {str(e)}" |
|
|
|
|
|
st.session_state.last_ollama_call_success = False |
|
|
st.session_state.last_ollama_call_time = str(datetime.utcnow()) |
|
|
st.session_state.last_ollama_response_preview = str(e)[:200] |
|
|
|
|
|
|
|
|
if not ai_response and config.hf_token: |
|
|
try: |
|
|
ai_response = send_to_hf(user_input, conversation_history) |
|
|
backend_used = "Hugging Face" |
|
|
|
|
|
st.session_state.last_hf_call_success = True |
|
|
st.session_state.last_hf_call_time = str(datetime.utcnow()) |
|
|
st.session_state.last_hf_response_preview = ai_response[:200] if ai_response else "" |
|
|
except Exception as e: |
|
|
error_msg = f"Hugging Face error: {str(e)}" |
|
|
|
|
|
st.session_state.last_hf_call_success = False |
|
|
st.session_state.last_hf_call_time = str(datetime.utcnow()) |
|
|
st.session_state.last_hf_response_preview = str(e)[:200] |
|
|
|
|
|
if ai_response: |
|
|
st.markdown(f"{ai_response}") |
|
|
|
|
|
conversation.append({"role": "user", "content": user_input}) |
|
|
conversation.append({"role": "assistant", "content": ai_response}) |
|
|
|
|
|
user_session["conversation"] = conversation |
|
|
session_manager.update_session("default_user", user_session) |
|
|
|
|
|
st.session_state.messages.append({"role": "assistant", "content": ai_response}) |
|
|
else: |
|
|
st.error("Failed to get response from both providers.") |
|
|
st.session_state.last_error = error_msg or "No response from either provider" |
|
|
|
|
|
|
|
|
st.experimental_rerun() |
|
|
|