import streamlit as st import time import os import sys from datetime import datetime from pathlib import Path sys.path.append(str(Path(__file__).parent)) from utils.config import config from core.llm import send_to_ollama, send_to_hf from core.session import session_manager from core.memory import check_redis_health import logging # Set up logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) st.set_page_config(page_title="AI Life Coach", page_icon="๐Ÿง ", layout="wide") # Initialize session state if "messages" not in st.session_state: st.session_state.messages = [] if "last_error" not in st.session_state: st.session_state.last_error = "" if "last_ollama_call_success" not in st.session_state: st.session_state.last_ollama_call_success = None if "last_ollama_call_time" not in st.session_state: st.session_state.last_ollama_call_time = "" if "last_ollama_response_preview" not in st.session_state: st.session_state.last_ollama_response_preview = "" if "last_hf_call_success" not in st.session_state: st.session_state.last_hf_call_success = None if "last_hf_call_time" not in st.session_state: st.session_state.last_hf_call_time = "" if "last_hf_response_preview" not in st.session_state: st.session_state.last_hf_response_preview = "" # Sidebar with st.sidebar: st.title("AI Life Coach") st.markdown("Your personal AI-powered life development assistant") # Model selection model_options = { "Mistral 7B (Local)": "mistral:latest", "Llama 2 7B (Local)": "llama2:latest", "OpenChat 3.5 (Local)": "openchat:latest" } selected_model_name = st.selectbox( "Select Model", options=list(model_options.keys()), index=0 ) st.session_state.selected_model = model_options[selected_model_name] # Ollama URL input st.session_state.ngrok_url = st.text_input( "Ollama Server URL", value=st.session_state.get("ngrok_url", "http://localhost:11434"), help="Enter the URL to your Ollama server" ) # Conversation history st.subheader("Conversation History") if st.button("Clear History"): st.session_state.messages = [] st.success("History cleared!") # Enhanced Debug Panel with st.expander("๐Ÿ” Advanced Debug", expanded=False): st.subheader("System Controls") # Fallback Mode Toggle fallback_mode = st.checkbox( "Enable Fallback Mode", value=config.use_fallback, help="Enable automatic fallback between providers" ) # HF Endpoint Control hf_enabled = st.checkbox( "Enable HF Deep Analysis", value=bool(config.hf_token), help="Enable Hugging Face endpoint coordination" ) # Web Search Toggle web_search_enabled = st.checkbox( "Enable Web Search", value=bool(os.getenv("TAVILY_API_KEY")), help="Enable Tavily/DDG web search integration" ) st.subheader("Provider Status") # Ollama Status try: from services.ollama_monitor import check_ollama_status ollama_status = check_ollama_status() if ollama_status.get("running"): st.success(f"๐Ÿฆ™ Ollama: Running ({ollama_status.get('model_loaded', 'Unknown')})") else: st.error("๐Ÿฆ™ Ollama: Unavailable") except Exception as e: st.warning(f"๐Ÿฆ™ Ollama: Status check failed") # HF Endpoint Status try: from services.hf_endpoint_monitor import hf_monitor hf_status = hf_monitor.get_status_summary() if "๐ŸŸข" in hf_status: st.success(f"๐Ÿค— HF Endpoint: {hf_status.replace('๐ŸŸข ', '')}") elif "๐ŸŸก" in hf_status: st.warning(f"๐Ÿค— HF Endpoint: {hf_status.replace('๐ŸŸก ', '')}") else: st.error(f"๐Ÿค— HF Endpoint: {hf_status.replace('๐Ÿ”ด ', '')}") except Exception as e: st.warning("๐Ÿค— HF Endpoint: Status check failed") # Redis Status redis_healthy = check_redis_health() if redis_healthy: st.success("๐Ÿ’พ Redis: Connected") else: st.error("๐Ÿ’พ Redis: Disconnected") st.subheader("External Services") # Web Search Status if os.getenv("TAVILY_API_KEY"): st.success("๐Ÿ” Web Search: Tavily API Active") else: st.info("๐Ÿ” Web Search: Not configured") # Weather Service if config.openweather_api_key: st.success("๐ŸŒค๏ธ Weather: API Active") else: st.info("๐ŸŒค๏ธ Weather: Not configured") # Session Stats try: user_session = session_manager.get_session("default_user") conversation_length = len(user_session.get("conversation", [])) st.info(f"๐Ÿ’ฌ Conversation Length: {conversation_length} messages") except: st.info("๐Ÿ’ฌ Session: Not initialized") # Real-time Web Search Status st.subheader("Web Search Activity") # Recent searches (if tracking enabled) if 'recent_searches' in st.session_state: for search in st.session_state.recent_searches[-3:]: # Last 3 searches st.caption(f"๐Ÿ” {search['query'][:30]}... ({search['timestamp']})") else: st.info("No recent searches") # Search test button if st.button("๐Ÿงช Test Web Search"): try: from tavily import TavilyClient if os.getenv("TAVILY_API_KEY"): tavily = TavilyClient(api_key=os.getenv("TAVILY_API_KEY")) test_result = tavily.search("AI life coach benefits", max_results=1) st.success("โœ… Web search working") if test_result.get('results'): st.caption(f"Sample: {test_result['results'][0].get('title', 'No title')}") else: st.warning("Web API key not configured") except Exception as e: st.error(f"โŒ Web search test failed: {e}") # Enhanced Configuration Display st.subheader("Configuration Details") # Provider Configuration st.caption(f"**Primary Provider**: Ollama ({config.local_model_name})") if config.hf_token: st.caption(f"**Secondary Provider**: Hugging Face") st.caption(f"**HF Endpoint**: {config.hf_api_url}") # Environment Detection env_type = "โ˜๏ธ HF Space" if config.is_hf_space else "๐Ÿ  Local" st.caption(f"**Environment**: {env_type}") # Feature Flags features = [] if config.use_fallback: features.append("Fallback Mode") if os.getenv("TAVILY_API_KEY"): features.append("Web Search") if config.openweather_api_key: features.append("Weather Data") if config.hf_token: features.append("Deep Analysis") if features: st.caption(f"**Active Features**: {', '.join(features)}") else: st.caption("**Active Features**: None") # Main chat interface st.title("๐Ÿง  AI Life Coach") st.markdown("Ask me anything about personal development, goal setting, or life advice!") # Display chat messages for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) # Chat input and send button col1, col2 = st.columns([4, 1]) with col1: user_input = st.text_input( "Your message...", key="user_message_input", placeholder="Type your message here...", label_visibility="collapsed" ) with col2: send_button = st.button("Send", key="send_message_button", use_container_width=True) if send_button and user_input.strip(): # Display user message with st.chat_message("user"): st.markdown(user_input) # Add user message to history st.session_state.messages.append({"role": "user", "content": user_input}) # Reset error state st.session_state.last_error = "" # Get conversation history user_session = session_manager.get_session("default_user") conversation = user_session.get("conversation", []) conversation_history = conversation[-5:] # Last 5 messages conversation_history.append({"role": "user", "content": user_input}) # Send to backend with st.chat_message("assistant"): with st.spinner("AI Coach is thinking..."): ai_response = None backend_used = "" error_msg = "" # Try Ollama first if not falling back if not config.use_fallback: try: ai_response = send_to_ollama( user_input, conversation_history, st.session_state.ngrok_url, st.session_state.selected_model ) backend_used = "Ollama" # Capture success metadata st.session_state.last_ollama_call_success = True st.session_state.last_ollama_call_time = str(datetime.utcnow()) st.session_state.last_ollama_response_preview = ai_response[:200] if ai_response else "" except Exception as e: error_msg = f"Ollama error: {str(e)}" # Capture failure metadata st.session_state.last_ollama_call_success = False st.session_state.last_ollama_call_time = str(datetime.utcnow()) st.session_state.last_ollama_response_preview = str(e)[:200] # Fallback to Hugging Face if not ai_response and config.hf_token if not ai_response and config.hf_token: try: ai_response = send_to_hf(user_input, conversation_history) backend_used = "Hugging Face" # Capture success metadata st.session_state.last_hf_call_success = True st.session_state.last_hf_call_time = str(datetime.utcnow()) st.session_state.last_hf_response_preview = ai_response[:200] if ai_response else "" except Exception as e: error_msg = f"Hugging Face error: {str(e)}" # Capture failure metadata st.session_state.last_hf_call_success = False st.session_state.last_hf_call_time = str(datetime.utcnow()) st.session_state.last_hf_response_preview = str(e)[:200] if ai_response: st.markdown(f"{ai_response}") # Update conversation history conversation.append({"role": "user", "content": user_input}) conversation.append({"role": "assistant", "content": ai_response}) # Update session using the correct method user_session["conversation"] = conversation session_manager.update_session("default_user", user_session) # Add assistant response to history st.session_state.messages.append({"role": "assistant", "content": ai_response}) else: st.error("Failed to get response from both providers.") st.session_state.last_error = error_msg or "No response from either provider" # Clear input by forcing rerun st.experimental_rerun()