Remove Performance Reports tab and keep only About tab
Browse files
app.py
CHANGED
|
@@ -467,125 +467,12 @@ if user_input and not st.session_state.is_processing:
|
|
| 467 |
time.sleep(0.5) # Brief pause
|
| 468 |
st.experimental_rerun()
|
| 469 |
|
| 470 |
-
# Add evaluation dashboard tab (separate from chat interface) -
|
| 471 |
st.divider()
|
| 472 |
-
#
|
| 473 |
-
tab1,
|
| 474 |
|
| 475 |
-
# Changed with tab2: to with tab1:
|
| 476 |
with tab1:
|
| 477 |
-
st.header("📊 Performance Reports")
|
| 478 |
-
st.markdown("System performance metrics and usage analytics.")
|
| 479 |
-
|
| 480 |
-
# System status
|
| 481 |
-
st.subheader("System Status")
|
| 482 |
-
col1, col2, col3 = st.columns(3)
|
| 483 |
-
with col1:
|
| 484 |
-
try:
|
| 485 |
-
from services.ollama_monitor import check_ollama_status
|
| 486 |
-
ollama_status = check_ollama_status()
|
| 487 |
-
if ollama_status.get("running"):
|
| 488 |
-
st.success("🦙 Ollama: Running")
|
| 489 |
-
else:
|
| 490 |
-
st.warning("🦙 Ollama: Not running")
|
| 491 |
-
except:
|
| 492 |
-
st.info("🦙 Ollama: Unknown")
|
| 493 |
-
with col2:
|
| 494 |
-
try:
|
| 495 |
-
hf_status = hf_monitor.check_endpoint_status()
|
| 496 |
-
if hf_status['available']:
|
| 497 |
-
if hf_status.get('initialized', False):
|
| 498 |
-
st.success("🤗 HF: Available & Initialized")
|
| 499 |
-
else:
|
| 500 |
-
st.warning("⚡ HF: Initializing...")
|
| 501 |
-
else:
|
| 502 |
-
# Cat-themed unavailable message
|
| 503 |
-
st.info("😴 Kitty Napping")
|
| 504 |
-
except:
|
| 505 |
-
st.info("🤗 HF: Unknown")
|
| 506 |
-
with col3:
|
| 507 |
-
if check_redis_health():
|
| 508 |
-
st.success("💾 Redis: Connected")
|
| 509 |
-
else:
|
| 510 |
-
st.error("💾 Redis: Disconnected")
|
| 511 |
-
|
| 512 |
-
# Session statistics
|
| 513 |
-
st.subheader("Session Statistics")
|
| 514 |
-
try:
|
| 515 |
-
user_session = session_manager.get_session("default_user")
|
| 516 |
-
conversation = user_session.get("conversation", [])
|
| 517 |
-
st.metric("Total Messages", len(conversation))
|
| 518 |
-
|
| 519 |
-
coord_stats = user_session.get('ai_coordination', {})
|
| 520 |
-
if coord_stats:
|
| 521 |
-
st.metric("AI Requests Processed", coord_stats.get('requests_processed', 0))
|
| 522 |
-
st.metric("Ollama Responses", coord_stats.get('ollama_responses', 0))
|
| 523 |
-
st.metric("HF Responses", coord_stats.get('hf_responses', 0))
|
| 524 |
-
else:
|
| 525 |
-
st.info("No coordination statistics available yet.")
|
| 526 |
-
except Exception as e:
|
| 527 |
-
st.warning(f"Could not load session statistics: {translate_error(e)}")
|
| 528 |
-
|
| 529 |
-
# Recent activity
|
| 530 |
-
st.subheader("Recent Activity")
|
| 531 |
-
try:
|
| 532 |
-
recent_activities = coordinator.get_recent_activities("default_user")
|
| 533 |
-
if recent_activities and recent_activities.get('last_request'):
|
| 534 |
-
st.markdown(f"**Last Request:** {recent_activities['last_request']}")
|
| 535 |
-
st.markdown(f"**Requests Processed:** {recent_activities['requests_processed']}")
|
| 536 |
-
st.markdown(f"**Ollama Responses:** {recent_activities['ollama_responses']}")
|
| 537 |
-
st.markdown(f"**HF Responses:** {recent_activities['hf_responses']}")
|
| 538 |
-
else:
|
| 539 |
-
st.info("No recent activity recorded.")
|
| 540 |
-
except Exception as e:
|
| 541 |
-
st.warning(f"Could not load recent activity: {translate_error(e)}")
|
| 542 |
-
|
| 543 |
-
# Configuration summary
|
| 544 |
-
st.subheader("Configuration Summary")
|
| 545 |
-
st.markdown(f"**Environment:** {'HF Space' if config.is_hf_space else 'Local'}")
|
| 546 |
-
st.markdown(f"**Primary Model:** {config.local_model_name or 'Not set'}")
|
| 547 |
-
st.markdown(f"**Ollama Host:** {config.ollama_host or 'Not configured'}")
|
| 548 |
-
st.markdown(f"**Cosmic Mode:** {'Enabled' if st.session_state.cosmic_mode else 'Disabled'}")
|
| 549 |
-
|
| 550 |
-
features = []
|
| 551 |
-
if config.use_fallback:
|
| 552 |
-
features.append("Fallback Mode")
|
| 553 |
-
if os.getenv("TAVILY_API_KEY"):
|
| 554 |
-
features.append("Web Search")
|
| 555 |
-
if config.openweather_api_key:
|
| 556 |
-
features.append("Weather Data")
|
| 557 |
-
st.markdown(f"**Active Features:** {', '.join(features) if features else 'None'}")
|
| 558 |
-
|
| 559 |
-
# Conversation Analytics
|
| 560 |
-
st.subheader("📊 Conversation Analytics")
|
| 561 |
-
try:
|
| 562 |
-
user_session = session_manager.get_session("default_user")
|
| 563 |
-
conversation = user_session.get("conversation", [])
|
| 564 |
-
if conversation:
|
| 565 |
-
# Analyze conversation patterns
|
| 566 |
-
user_messages = [msg for msg in conversation if msg["role"] == "user"]
|
| 567 |
-
ai_messages = [msg for msg in conversation if msg["role"] == "assistant"]
|
| 568 |
-
|
| 569 |
-
col1, col2, col3 = st.columns(3)
|
| 570 |
-
col1.metric("Total Exchanges", len(user_messages))
|
| 571 |
-
col2.metric("Avg Response Length",
|
| 572 |
-
round(sum(len(msg.get("content", "")) for msg in ai_messages) / len(ai_messages)) if ai_messages else 0)
|
| 573 |
-
col3.metric("Topics Discussed",
|
| 574 |
-
len(set(["life", "goal", "health", "career"]) & set(" ".join([msg.get("content", "") for msg in conversation]).lower().split())))
|
| 575 |
-
|
| 576 |
-
# Show most common words/topics
|
| 577 |
-
all_text = " ".join([msg.get("content", "") for msg in conversation]).lower()
|
| 578 |
-
common_words = ["life", "goal", "health", "career", "productivity", "mindfulness"]
|
| 579 |
-
relevant_topics = [word for word in common_words if word in all_text]
|
| 580 |
-
if relevant_topics:
|
| 581 |
-
st.markdown(f"**Detected Topics:** {', '.join(relevant_topics)}")
|
| 582 |
-
else:
|
| 583 |
-
st.info("No conversation data available yet.")
|
| 584 |
-
except Exception as e:
|
| 585 |
-
st.warning(f"Could not analyze conversation: {translate_error(e)}")
|
| 586 |
-
|
| 587 |
-
# Changed with tab3: to with tab2:
|
| 588 |
-
with tab2:
|
| 589 |
st.header("ℹ️ About CosmicCat AI Assistant")
|
| 590 |
st.markdown("""
|
| 591 |
The CosmicCat AI Assistant is a sophisticated conversational AI system with the following capabilities:
|
|
|
|
| 467 |
time.sleep(0.5) # Brief pause
|
| 468 |
st.experimental_rerun()
|
| 469 |
|
| 470 |
+
# Add evaluation dashboard tab (separate from chat interface) - ONLY ABOUT TAB NOW
|
| 471 |
st.divider()
|
| 472 |
+
# Only one tab now - About
|
| 473 |
+
tab1, = st.tabs(["ℹ️ About"])
|
| 474 |
|
|
|
|
| 475 |
with tab1:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 476 |
st.header("ℹ️ About CosmicCat AI Assistant")
|
| 477 |
st.markdown("""
|
| 478 |
The CosmicCat AI Assistant is a sophisticated conversational AI system with the following capabilities:
|