danulr05 commited on
Commit
7b576cd
·
verified ·
1 Parent(s): 35dde42

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1298 -40
app.py CHANGED
@@ -326,32 +326,23 @@ def search_budget_proposals(query: str) -> str:
326
  context_parts = []
327
  language_specific_matches = []
328
 
329
- # Filter matches by language - prioritize documents in the user's language
 
330
  for match in matches:
331
  metadata = match.get('metadata', {})
332
  file_path = metadata.get('file_path', '')
333
 
334
- # Check if this document has content in the detected language
335
- is_language_match = False
336
- if detected_language == 'en':
337
- # For English, prefer documents without language suffixes
338
- is_language_match = not any(lang in file_path.lower() for lang in ['_sin_', '_tam_', '-sin', '-tam', 'sinhala', 'tamil'])
339
- elif detected_language == 'si':
340
- # For Sinhala, prefer documents with Sinhala indicators
341
- is_language_match = any(indicator in file_path.lower() for indicator in ['_sin_', '-sin', 'sinhala', 'si/', '- sinhala'])
342
- elif detected_language == 'ta':
343
- # For Tamil, prefer documents with Tamil indicators
344
- is_language_match = any(indicator in file_path.lower() for indicator in ['_tam_', '-tam', 'tamil', 'ta/'])
345
 
346
- if is_language_match:
347
- language_specific_matches.append(match)
348
 
349
- # If no language-specific matches found, use all matches (fallback)
350
- if not language_specific_matches:
351
- language_specific_matches = matches[:1] # Take only the most relevant fallback
352
  else:
353
- # Always return only the most relevant document in the user's language
354
- language_specific_matches = language_specific_matches[:1]
355
 
356
  logger.info(f"Returning {len(language_specific_matches)} most relevant document(s) for {detected_language}")
357
 
@@ -670,47 +661,1314 @@ def extract_sources_from_search_context(search_context: str, user_language: str
670
  if pdf in search_context:
671
  found_files.add(pdf)
672
 
673
- # Filter by user language preference and prioritize by relevance
674
  language_filtered_files = []
675
 
676
- # First, collect all language-appropriate documents
677
  for pdf in found_files:
678
- # Determine document language from filename
679
  doc_language = get_document_language(pdf)
680
 
681
- # Language matching logic
682
  should_include = False
683
  if user_language == 'en' or user_language == 'singlish':
684
- # English users can see all documents, but prefer English versions
685
- if doc_language in ['en', 'english'] or user_language == 'singlish':
686
  should_include = True
687
- elif not any(f for f in found_files if get_document_language(f) in ['en', 'english']):
688
- # If no English version available, show other languages
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
689
  should_include = True
690
  elif user_language == 'si' or user_language == 'sinhala':
691
- # Sinhala users prefer Sinhala documents
692
  if doc_language in ['si', 'sinhala']:
693
  should_include = True
694
- elif not any(f for f in found_files if get_document_language(f) in ['si', 'sinhala']):
695
- # If no Sinhala version available, show other languages
696
- should_include = True
697
  elif user_language == 'ta' or user_language == 'tamil':
698
- # Tamil users prefer Tamil documents
699
  if doc_language in ['ta', 'tamil']:
700
  should_include = True
701
- elif not any(f for f in found_files if get_document_language(f) in ['ta', 'tamil']):
702
- # If no Tamil version available, show other languages
703
- should_include = True
704
  else:
705
- # Default: show all documents
706
- should_include = True
 
707
 
708
  if should_include:
709
  language_filtered_files.append(pdf)
710
 
711
- # Always return only the most relevant document in the user's language
712
- if language_filtered_files:
713
- language_filtered_files = [language_filtered_files[0]] # Single most relevant document
 
 
 
 
 
 
 
 
 
 
 
714
 
715
  # Convert to list with short names and correct URLs
716
  for pdf in language_filtered_files:
 
326
  context_parts = []
327
  language_specific_matches = []
328
 
329
+ # Filter matches to only include English documents
330
+ english_matches = []
331
  for match in matches:
332
  metadata = match.get('metadata', {})
333
  file_path = metadata.get('file_path', '')
334
 
335
+ # Only include English documents (no language suffixes)
336
+ is_english_document = not any(lang in file_path.lower() for lang in ['_sin_', '_tam_', '-sin', '-tam', 'sinhala', 'tamil', 'si/', 'ta/'])
 
 
 
 
 
 
 
 
 
337
 
338
+ if is_english_document:
339
+ english_matches.append(match)
340
 
341
+ # Use English matches only, or fallback to top match if no English documents found
342
+ if english_matches:
343
+ language_specific_matches = english_matches[:1] # Take only the most relevant English document
344
  else:
345
+ language_specific_matches = matches[:1] # Fallback to any document if no English found
 
346
 
347
  logger.info(f"Returning {len(language_specific_matches)} most relevant document(s) for {detected_language}")
348
 
 
661
  if pdf in search_context:
662
  found_files.add(pdf)
663
 
664
+ # Filter to return sources in the user's language, but prioritize by relevance
665
  language_filtered_files = []
666
 
667
+ # First, try to find documents in the user's language
668
  for pdf in found_files:
 
669
  doc_language = get_document_language(pdf)
670
 
671
+ # Language matching logic - return sources in user's language
672
  should_include = False
673
  if user_language == 'en' or user_language == 'singlish':
674
+ # English users get English documents
675
+ if doc_language in ['en', 'english']:
676
  should_include = True
677
+ elif user_language == 'si' or user_language == 'sinhala':
678
+ # Sinhala users get Sinhala documents
679
+ if doc_language in ['si', 'sinhala']:
680
+ should_include = True
681
+ elif user_language == 'ta' or user_language == 'tamil':
682
+ # Tamil users get Tamil documents
683
+ if doc_language in ['ta', 'tamil']:
684
+ should_include = True
685
+ else:
686
+ # Default: show English documents
687
+ if doc_language in ['en', 'english']:
688
+ should_include = True
689
+
690
+ if should_include:
691
+ language_filtered_files.append(pdf)
692
+
693
+ # If no language-specific documents found, fallback to English
694
+ if not language_filtered_files:
695
+ for pdf in found_files:
696
+ doc_language = get_document_language(pdf)
697
+ if doc_language in ['en', 'english']:
698
+ language_filtered_files.append(pdf)
699
+
700
+ # If still no documents, use any available document
701
+ if not language_filtered_files and found_files:
702
+ language_filtered_files = [list(found_files)[0]]
703
+
704
+ # Return only the most relevant document in the user's language
705
+ if language_filtered_files:
706
+ language_filtered_files = [language_filtered_files[0]]
707
+
708
+ # Convert to list with short names and correct URLs
709
+ for pdf in language_filtered_files:
710
+ sources.append({
711
+ "filename": pdf,
712
+ "short_name": get_short_document_name(pdf),
713
+ "pdf_url": get_correct_pdf_url(pdf)
714
+ })
715
+
716
+ return sources
717
+
718
+ def get_document_language(filename: str) -> str:
719
+ """Determine the language of a document from its filename"""
720
+ filename_lower = filename.lower()
721
+
722
+ if any(indicator in filename_lower for indicator in ['_sin_', '-sin', 'sinhala', 'si/', '- sinhala']):
723
+ return 'si'
724
+ elif any(indicator in filename_lower for indicator in ['_tam_', '-tam', 'tamil', 'ta/']):
725
+ return 'ta'
726
+ elif '_raj_' in filename_lower:
727
+ return 'en' # Treat Raj as English/default
728
+ elif '_en_' in filename_lower or '_english_' in filename_lower:
729
+ return 'en'
730
+ else:
731
+ # Default to English if no language indicator found
732
+ return 'en'
733
+
734
+ def get_correct_pdf_url(filename: str) -> str:
735
+ """Get the correct PDF URL based on document language"""
736
+ doc_language = get_document_language(filename)
737
+
738
+ # Map language to directory
739
+ if doc_language == 'si':
740
+ return f"../si/assets/pdfs/{filename}"
741
+ elif doc_language == 'ta':
742
+ return f"../ta/assets/pdfs/{filename}"
743
+ else:
744
+ # English documents
745
+ return f"assets/pdfs/{filename}"
746
+
747
+ def extract_sources_from_response(response: str) -> List[Dict[str, str]]:
748
+ """Extract source documents mentioned in the response with short names (fallback method)"""
749
+ sources = []
750
+
751
+ # Get dynamically available PDF files
752
+ available_pdfs = get_available_pdfs()
753
+
754
+ # Look for source patterns like "(Source: filename.pdf)" or "(Sources: file1.pdf, file2.pdf)"
755
+ # Also look for partial matches for the new budget proposal files
756
+ found_files = set()
757
+ for pdf in available_pdfs:
758
+ if pdf in response:
759
+ found_files.add(pdf)
760
+ # Also check for partial matches (e.g., "MaternityLeaveBenefit" matches the full filename)
761
+ elif any(keyword in response for keyword in pdf.split('_') if len(keyword) > 5):
762
+ found_files.add(pdf)
763
+
764
+ # Convert to list with short names and correct URLs
765
+ for pdf in found_files:
766
+ sources.append({
767
+ "filename": pdf,
768
+ "short_name": get_short_document_name(pdf),
769
+ "pdf_url": get_correct_pdf_url(pdf)
770
+ })
771
+
772
+ return sources
773
+
774
+ def generate_response_with_rag(user_message: str, session_id: str) -> Dict[str, Any]:
775
+ """Generate response using RAG with memory and multilingual support"""
776
+ try:
777
+ # Process multilingual input
778
+ processed_message, original_language, needs_translation, transliteration_used, ai_detection_used, confidence = simple_process_input(user_message)
779
+ logger.info(f"Input processing: original='{user_message}', processed='{processed_message}', lang='{original_language}', transliteration='{transliteration_used}', ai_detection='{ai_detection_used}', confidence='{confidence:.2f}'")
780
+
781
+ # Get or create memory for this session
782
+ memory = get_or_create_memory(session_id)
783
+
784
+ # Let Gemini handle both specific and general questions intelligently
785
+ # Always search with the user's actual query - Gemini will handle vague questions
786
+ search_context = search_budget_proposals(processed_message)
787
+
788
+ # Get conversation history for context
789
+ chat_history = memory.chat_memory.messages
790
+ conversation_context = ""
791
+ if chat_history:
792
+ # Get last few messages for context
793
+ recent_messages = chat_history[-6:] # Last 3 exchanges
794
+ conversation_parts = []
795
+ for msg in recent_messages:
796
+ if isinstance(msg, HumanMessage):
797
+ conversation_parts.append(f"User: {msg.content}")
798
+ elif isinstance(msg, AIMessage):
799
+ conversation_parts.append(f"Assistant: {msg.content}")
800
+ conversation_context = "\n".join(conversation_parts)
801
+
802
+ # Create a prompt with conversation history and retrieved context
803
+ language_instruction = ""
804
+ if original_language == 'si':
805
+ language_instruction = "\n\nIMPORTANT: The user asked in Sinhala. Please respond in the same language (Sinhala) using proper Sinhala script and formal language appropriate for policy discussions. The question was: '{}'".format(user_message)
806
+ elif original_language == 'ta':
807
+ language_instruction = "\n\nIMPORTANT: The user asked in Tamil. Please respond in the same language (Tamil) using proper Tamil script and formal language appropriate for policy discussions. Use Sri Lankan Tamil terminology and context. The question was: '{}'".format(user_message)
808
+ elif original_language == 'singlish':
809
+ language_instruction = "\n\nIMPORTANT: The user asked in Singlish (Romanized Sinhala - Sinhala words written in English letters). Please respond in proper Sinhala script using formal language appropriate for policy discussions. Translate their question and provide a comprehensive answer in Sinhala. The original question was: '{}'".format(user_message)
810
+ elif original_language == 'romanized_tamil':
811
+ language_instruction = "\n\nIMPORTANT: The user asked in Romanized Tamil (Tamil words written in English letters). Please respond in proper Tamil script using formal language appropriate for policy discussions. Use Sri Lankan Tamil terminology and context. Translate their question and provide a comprehensive answer in Tamil. The original question was: '{}'".format(user_message)
812
+
813
+ prompt = f"""You are a helpful assistant for budget proposals in Sri Lanka. You can communicate in English, Sinhala, Tamil (Sri Lankan Tamil), and understand Singlish and Romanized Tamil.
814
+
815
+ FORMATTING RULES:
816
+ - DO NOT use asterisks (*) for formatting or emphasis
817
+ - DO NOT use markdown formatting like **bold** or *italic*
818
+ - Use plain text without any special formatting characters
819
+ - Keep responses clean and readable without formatting symbols
820
+
821
+ IMPORTANT: This website contains various budget proposals for Sri Lanka including:
822
+ - Maternity leave benefits proposals (multiple language versions)
823
+ - EPF (Employee Provident Fund) taxation removal proposals
824
+ - Industrial land expansion proposals
825
+ - Cigarette tax reform proposals
826
+ - Electricity tariff reforms
827
+ - Tax policy changes
828
+ - Economic growth initiatives
829
+ - Social protection measures
830
+ - Budget 2025 and 2026 proposals
831
+
832
+ Based on the following information from the budget proposals database:
833
+
834
+ {search_context}
835
+
836
+ {conversation_context}
837
+
838
+ Current user question: {processed_message}
839
+ Original user input: {user_message}
840
+ {language_instruction}
841
+
842
+ Guidelines:
843
+ - For general questions like "monada meh" (what is this), "help", or vague inquiries, provide a helpful overview of available budget proposals
844
+ - Never say "I couldn't process your request" - always provide useful information about budget proposals
845
+ - Be professional but approachable in any language
846
+ - Include specific details from the retrieved information when available
847
+ - When mentioning proposals, refer to them by topic (e.g., "maternity leave benefits proposal", "EPF tax removal proposal") - DO NOT include long document filenames
848
+ - If the search doesn't return relevant results, provide an overview of available proposals with examples
849
+ - For vague questions, proactively explain what's available and guide users to specific topics (EPF, electricity, maternity leave, cigarette taxes, etc.)
850
+ - Keep responses clear and informative
851
+ - Reference previous conversation context when relevant
852
+ - Maintain conversation continuity
853
+ - Be culturally sensitive when discussing Sri Lankan policies
854
+ - When responding in Sinhala, use appropriate formal language for policy discussions
855
+ - When responding in Tamil, use Sri Lankan Tamil dialect and formal language appropriate for policy discussions
856
+ - Always be helpful - turn any question into an opportunity to inform about budget proposals
857
+
858
+ Please provide a helpful response:"""
859
+
860
+ # Generate response using the LLM directly
861
+ response = llm.invoke(prompt)
862
+ response_text = response.content.strip()
863
+
864
+ # No need to translate response - Gemini handles language matching automatically
865
+
866
+ # Extract sources from search context (where the actual filenames are)
867
+ sources = extract_sources_from_search_context(search_context, original_language)
868
+
869
+ # Add messages to memory (store original user message for context)
870
+ memory.chat_memory.add_user_message(user_message)
871
+ memory.chat_memory.add_ai_message(response_text)
872
+
873
+ # Get updated conversation history for context
874
+ chat_history = memory.chat_memory.messages
875
+
876
+ return {
877
+ "response": response_text,
878
+ "confidence": "high",
879
+ "session_id": session_id,
880
+ "conversation_length": len(chat_history),
881
+ "memory_used": True,
882
+ "rag_used": True,
883
+ "sources": sources,
884
+ "language_detected": original_language,
885
+ "translation_used": needs_translation,
886
+ "transliteration_used": transliteration_used,
887
+ "ai_detection_used": ai_detection_used,
888
+ "detection_confidence": confidence
889
+ }
890
+
891
+ except Exception as e:
892
+ logger.error(f"Error generating response with RAG: {e}")
893
+ # Provide error message in appropriate language
894
+ error_message = "I'm sorry, I'm having trouble processing your request right now. Please try again later."
895
+
896
+ return {
897
+ "response": error_message,
898
+ "confidence": "error",
899
+ "session_id": session_id,
900
+ "memory_used": False,
901
+ "rag_used": False,
902
+ "sources": [],
903
+ "language_detected": original_language if 'original_language' in locals() else 'en',
904
+ "translation_used": False,
905
+ "transliteration_used": False,
906
+ "ai_detection_used": False,
907
+ "detection_confidence": 0.0
908
+ }
909
+
910
+ def clear_session_memory(session_id: str) -> bool:
911
+ """Clear memory for a specific session"""
912
+ try:
913
+ if session_id in conversation_memories:
914
+ del conversation_memories[session_id]
915
+ logger.info(f"Cleared memory for session: {session_id}")
916
+ return True
917
+ return False
918
+ except Exception as e:
919
+ logger.error(f"Error clearing memory: {e}")
920
+ return False
921
+
922
+ @app.route('/api/chat', methods=['POST'])
923
+ def chat():
924
+ """Enhanced chat endpoint with memory"""
925
+ try:
926
+ data = request.get_json()
927
+ user_message = data.get('message', '').strip()
928
+ session_id = data.get('session_id', 'default')
929
+
930
+ if not user_message:
931
+ return jsonify({
932
+ "error": "Message is required"
933
+ }), 400
934
+
935
+ # Generate response with memory
936
+ result = generate_response_with_rag(user_message, session_id)
937
+
938
+ return jsonify({
939
+ "response": result["response"],
940
+ "confidence": result["confidence"],
941
+ "session_id": session_id,
942
+ "conversation_length": result.get("conversation_length", 0),
943
+ "memory_used": result.get("memory_used", False),
944
+ "rag_used": result.get("rag_used", False),
945
+ "sources": result.get("sources", []),
946
+ "user_message": user_message,
947
+ "language_detected": result.get("language_detected", "en"),
948
+ "translation_used": result.get("translation_used", False),
949
+ "transliteration_used": result.get("transliteration_used", False),
950
+ "ai_detection_used": result.get("ai_detection_used", False),
951
+ "detection_confidence": result.get("detection_confidence", 0.0)
952
+ })
953
+
954
+ except Exception as e:
955
+ logger.error(f"Chat API error: {e}")
956
+ return jsonify({"error": str(e)}), 500
957
+
958
+ @app.route('/api/chat/clear', methods=['POST'])
959
+ def clear_chat():
960
+ """Clear chat memory for a session"""
961
+ try:
962
+ data = request.get_json()
963
+ session_id = data.get('session_id', 'default')
964
+
965
+ success = clear_session_memory(session_id)
966
+
967
+ return jsonify({
968
+ "success": success,
969
+ "session_id": session_id,
970
+ "message": "Chat memory cleared successfully" if success else "Session not found"
971
+ })
972
+
973
+ except Exception as e:
974
+ logger.error(f"Clear chat error: {e}")
975
+ return jsonify({"error": str(e)}), 500
976
+
977
+ @app.route('/api/chat/sessions', methods=['GET'])
978
+ def list_sessions():
979
+ """List all active chat sessions"""
980
+ try:
981
+ sessions = []
982
+ for session_id, memory in conversation_memories.items():
983
+ messages = memory.chat_memory.messages
984
+ sessions.append({
985
+ "session_id": session_id,
986
+ "message_count": len(messages),
987
+ "last_activity": datetime.now().isoformat() # Simplified for now
988
+ })
989
+
990
+ return jsonify({
991
+ "sessions": sessions,
992
+ "total_sessions": len(sessions)
993
+ })
994
+
995
+ except Exception as e:
996
+ logger.error(f"List sessions error: {e}")
997
+ return jsonify({"error": str(e)}), 500
998
+
999
+ @app.route('/api/chat/history/<session_id>', methods=['GET'])
1000
+ def get_chat_history(session_id: str):
1001
+ """Get chat history for a specific session"""
1002
+ try:
1003
+ if session_id not in conversation_memories:
1004
+ return jsonify({
1005
+ "session_id": session_id,
1006
+ "history": [],
1007
+ "message_count": 0
1008
+ })
1009
+
1010
+ memory = conversation_memories[session_id]
1011
+ messages = memory.chat_memory.messages
1012
+
1013
+ history = []
1014
+ for msg in messages:
1015
+ if isinstance(msg, HumanMessage):
1016
+ history.append({
1017
+ "type": "human",
1018
+ "content": msg.content,
1019
+ "timestamp": datetime.now().isoformat()
1020
+ })
1021
+ elif isinstance(msg, AIMessage):
1022
+ history.append({
1023
+ "type": "ai",
1024
+ "content": msg.content,
1025
+ "timestamp": datetime.now().isoformat()
1026
+ })
1027
+
1028
+ return jsonify({
1029
+ "session_id": session_id,
1030
+ "history": history,
1031
+ "message_count": len(history)
1032
+ })
1033
+
1034
+ except Exception as e:
1035
+ logger.error(f"Get chat history error: {e}")
1036
+ return jsonify({"error": str(e)}), 500
1037
+
1038
+ @app.route('/api/chat/health', methods=['GET'])
1039
+ def chat_health():
1040
+ """Health check for the enhanced chatbot"""
1041
+ try:
1042
+ # Test LangChain connection and vector database
1043
+ test_agent = create_agent("health_check")
1044
+ test_response = test_agent.invoke({"input": "Hello"})
1045
+
1046
+ # Test vector database connection
1047
+ pc_index = get_pinecone_index()
1048
+ vector_db_status = "connected" if pc_index else "disconnected"
1049
+
1050
+ return jsonify({
1051
+ "status": "healthy",
1052
+ "message": "Enhanced budget proposals chatbot with RAG is running",
1053
+ "langchain_status": "connected" if test_response else "disconnected",
1054
+ "vector_db_status": vector_db_status,
1055
+ "rag_enabled": True,
1056
+ "active_sessions": len(conversation_memories),
1057
+ "memory_enabled": True
1058
+ })
1059
+ except Exception as e:
1060
+ return jsonify({
1061
+ "status": "unhealthy",
1062
+ "message": f"Error: {str(e)}"
1063
+ }), 500
1064
+
1065
+ @app.route('/api/chat/debug/<session_id>', methods=['GET'])
1066
+ def debug_session(session_id: str):
1067
+ """Debug endpoint to check session memory"""
1068
+ try:
1069
+ memory_exists = session_id in conversation_memories
1070
+ memory_info = {
1071
+ "session_id": session_id,
1072
+ "memory_exists": memory_exists,
1073
+ "total_sessions": len(conversation_memories),
1074
+ "session_keys": list(conversation_memories.keys())
1075
+ }
1076
+
1077
+ if memory_exists:
1078
+ memory = conversation_memories[session_id]
1079
+ messages = memory.chat_memory.messages
1080
+ memory_info.update({
1081
+ "message_count": len(messages),
1082
+ "messages": [
1083
+ {
1084
+ "type": getattr(msg, 'type', 'unknown'),
1085
+ "content": getattr(msg, 'content', '')[:100] + "..." if len(getattr(msg, 'content', '')) > 100 else getattr(msg, 'content', '')
1086
+ }
1087
+ for msg in messages
1088
+ ]
1089
+ })
1090
+
1091
+ return jsonify(memory_info)
1092
+
1093
+ except Exception as e:
1094
+ logger.error(f"Debug session error: {e}")
1095
+ return jsonify({"error": str(e)}), 500
1096
+
1097
+ @app.route('/api/chat/suggestions', methods=['GET'])
1098
+ def get_chat_suggestions():
1099
+ """Get suggested questions for the chatbot with multilingual support"""
1100
+ suggestions = [
1101
+ "What are the maternity leave benefits proposed? 🤱",
1102
+ "What are the industrial land expansion proposals? 🏭",
1103
+ "How do the cigarette tax proposals work? 💰",
1104
+ "What changes are proposed for electricity tariffs? ⚡",
1105
+ "Tell me about the EPF taxation removal proposals 💰",
1106
+ "What tax reforms are being suggested? 🏛️",
1107
+ "How will these proposals affect the economy? 📈",
1108
+ "What is the cost of implementing these proposals? 💵",
1109
+ "Can you compare the costs of different proposals? ⚖️",
1110
+ "What are the main benefits of these proposals? ✨",
1111
+ "Budget proposals gana kiyanna 📋",
1112
+ "EPF eka gana mokadda thiyenne? 💰",
1113
+ "Industrial land expansion kiyannako 🏭",
1114
+ "Electricity bill eka wenas wenawada? ⚡",
1115
+ "Maternity leave benefits kiyannako 🤱",
1116
+ "මේ budget proposals වල cost එක කීයද? 💵",
1117
+ "රජයේ ආර්థික ප්‍රතිපත්ති ගැන කියන්න 🏛️"
1118
+ ]
1119
+
1120
+ return jsonify({
1121
+ "suggestions": suggestions,
1122
+ "supported_languages": ["English", "Sinhala", "Singlish"]
1123
+ })
1124
+
1125
+ @app.route('/api/chat/available-pdfs', methods=['GET'])
1126
+ def get_available_pdfs_endpoint():
1127
+ """Get list of available PDF files with short names for UI display"""
1128
+ try:
1129
+ available_pdfs = get_available_pdfs()
1130
+
1131
+ # Create list with both full names and short names
1132
+ pdf_list = []
1133
+ short_names = []
1134
+ for pdf in available_pdfs:
1135
+ short_name = get_short_document_name(pdf)
1136
+ pdf_list.append({
1137
+ "filename": pdf,
1138
+ "short_name": short_name,
1139
+ "type": "PDF" if pdf.endswith('.pdf') else "DOCX"
1140
+ })
1141
+ short_names.append(short_name)
1142
+
1143
+ return jsonify({
1144
+ "available_pdfs": available_pdfs,
1145
+ "pdf_list": pdf_list,
1146
+ "short_names": short_names, # Simple array for easy frontend use
1147
+ "count": len(available_pdfs),
1148
+ "pdf_directory": "Budget_Proposals copy-2/assets/pdfs"
1149
+ })
1150
+ except Exception as e:
1151
+ logger.error(f"Error getting available PDFs: {e}")
1152
+ return jsonify({"error": str(e)}), 500
1153
+
1154
+ @app.route('/api/chat/document-names', methods=['GET'])
1155
+ def get_document_names():
1156
+ """Get document names with short names for UI display"""
1157
+ try:
1158
+ available_pdfs = get_available_pdfs()
1159
+
1160
+ # Create mapping of full names to short names
1161
+ document_mapping = {}
1162
+ for pdf in available_pdfs:
1163
+ document_mapping[pdf] = get_short_document_name(pdf)
1164
+
1165
+ return jsonify({
1166
+ "document_mapping": document_mapping,
1167
+ "count": len(available_pdfs)
1168
+ })
1169
+ except Exception as e:
1170
+ logger.error(f"Error getting document names: {e}")
1171
+ return jsonify({"error": str(e)}), 500
1172
+
1173
+ @app.route('/api/chat/short-document-names', methods=['GET'])
1174
+ def get_short_document_names():
1175
+ """Get just the short document names as a simple array for frontend display"""
1176
+ try:
1177
+ available_pdfs = get_available_pdfs()
1178
+
1179
+ # Create simple array of short names
1180
+ short_names = []
1181
+ for pdf in available_pdfs:
1182
+ short_names.append(get_short_document_name(pdf))
1183
+
1184
+ return jsonify({
1185
+ "short_names": short_names,
1186
+ "count": len(short_names)
1187
+ })
1188
+ except Exception as e:
1189
+ logger.error(f"Error getting short document names: {e}")
1190
+ return jsonify({"error": str(e)}), 500
1191
+
1192
+ @app.route('/api/chat/document-buttons', methods=['GET'])
1193
+ def get_document_buttons():
1194
+ """Get document names formatted specifically for UI buttons (simple strings only)"""
1195
+ try:
1196
+ available_pdfs = get_available_pdfs()
1197
+
1198
+ # Create simple array of just the short names as strings
1199
+ button_names = []
1200
+ for pdf in available_pdfs:
1201
+ button_names.append(get_short_document_name(pdf))
1202
+
1203
+ # Return just the array of strings - no objects
1204
+ return jsonify(button_names)
1205
+ except Exception as e:
1206
+ logger.error(f"Error getting document buttons: {e}")
1207
+ return jsonify({"error": str(e)}), 500
1208
+
1209
+ @app.route('/api/chat/detect-language', methods=['POST'])
1210
+ def detect_language():
1211
+ """Test language detection functionality"""
1212
+ try:
1213
+ data = request.get_json()
1214
+ text = data.get('text', '').strip()
1215
+
1216
+ if not text:
1217
+ return jsonify({
1218
+ "error": "Text is required"
1219
+ }), 400
1220
+
1221
+ processed_message, original_language, needs_translation, transliteration_used, ai_detection_used, confidence = simple_process_input(text)
1222
+
1223
+ return jsonify({
1224
+ "original_text": text,
1225
+ "processed_text": processed_message,
1226
+ "language_detected": original_language,
1227
+ "translation_needed": needs_translation,
1228
+ "transliteration_used": transliteration_used,
1229
+ "ai_detection_used": ai_detection_used,
1230
+ "detection_confidence": confidence,
1231
+ "contains_sinhala": detect_sinhala_content(text),
1232
+ "is_singlish": detect_singlish(text)
1233
+ })
1234
+
1235
+ except Exception as e:
1236
+ logger.error(f"Language detection error: {e}")
1237
+ return jsonify({"error": str(e)}), 500
1238
+
1239
+ @app.route('/', methods=['GET'])
1240
+ def home():
1241
+ """Home endpoint with API documentation"""
1242
+ return jsonify({
1243
+ "message": "Multilingual Budget Proposals Chatbot API with Swabhasha Pipeline",
1244
+ "version": "2.1.0",
1245
+ "supported_languages": ["English", "Sinhala", "Tamil (Sri Lankan)", "Romanized Sinhala (Singlish)", "Romanized Tamil"],
1246
+ "features": ["RAG", "Memory", "Swabhasha Transliteration", "Google Translation", "FAISS Vector Store"],
1247
+ "pipeline": "Romanized Sinhala → Swabhasha → Sinhala Script → Google Translate → English → LLM → Response",
1248
+ "endpoints": {
1249
+ "POST /api/chat": "Chat with memory, RAG, and multilingual support",
1250
+ "POST /api/chat/clear": "Clear chat memory",
1251
+ "GET /api/chat/sessions": "List active sessions",
1252
+ "GET /api/chat/history/<session_id>": "Get chat history",
1253
+ "GET /api/chat/health": "Health check",
1254
+ "GET /api/chat/suggestions": "Get suggested questions (multilingual)",
1255
+ "GET /api/chat/available-pdfs": "Get available PDF files with short names",
1256
+ "GET /api/chat/document-names": "Get document name mapping (full to short names)",
1257
+ "GET /api/chat/short-document-names": "Get simple array of short document names",
1258
+ "GET /api/chat/document-buttons": "Get document names as simple string array for UI buttons",
1259
+ "POST /api/chat/detect-language": "Test language detection"
1260
+ },
1261
+ "status": "running"
1262
+ })
1263
+
1264
+ if __name__ == '__main__':
1265
+ app.run(debug=False, host='0.0.0.0', port=7860)
1266
+ #!/usr/bin/env python3
1267
+ """
1268
+ Enhanced Budget Proposals Chatbot API using LangChain with Memory and Agentic RAG
1269
+ """
1270
+
1271
+ from flask import Flask, request, jsonify
1272
+ from flask_cors import CORS
1273
+ import os
1274
+ import logging
1275
+ import json
1276
+ from datetime import datetime
1277
+ from typing import Dict, List, Any
1278
+
1279
+ # LangChain imports
1280
+ from langchain_google_genai import ChatGoogleGenerativeAI
1281
+ from langchain.memory import ConversationBufferWindowMemory
1282
+ from langchain.schema import HumanMessage, AIMessage
1283
+ from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
1284
+ from langchain.chains import LLMChain
1285
+ from langchain_community.chat_message_histories import RedisChatMessageHistory
1286
+ from langchain.tools import Tool
1287
+ from langchain.agents import AgentExecutor, create_openai_functions_agent
1288
+ from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
1289
+ from langchain.schema import BaseMessage
1290
+
1291
+ # Vector database imports
1292
+ from pinecone import Pinecone
1293
+ from sentence_transformers import SentenceTransformer
1294
+
1295
+ # Language detection imports
1296
+ import re
1297
+ import requests
1298
+ import json
1299
+
1300
+ app = Flask(__name__)
1301
+ CORS(app)
1302
+
1303
+ # Configure logging
1304
+ logging.basicConfig(level=logging.INFO)
1305
+ logger = logging.getLogger(__name__)
1306
+
1307
+ # Configure Gemini
1308
+ GEMINI_API_KEY = os.getenv('GEMINI_API_KEY')
1309
+ if not GEMINI_API_KEY:
1310
+ logger.error("GEMINI_API_KEY not found in environment variables")
1311
+ raise ValueError("Please set GEMINI_API_KEY in your .env file")
1312
+
1313
+ # Configure Pinecone
1314
+ PINECONE_API_KEY = os.getenv('PINECONE_API_KEY')
1315
+ if not PINECONE_API_KEY:
1316
+ logger.error("PINECONE_API_KEY not found in environment variables")
1317
+ raise ValueError("Please set PINECONE_API_KEY in your .env file")
1318
+
1319
+ # Configure Hugging Face (optional - needed for some models)
1320
+ HF_TOKEN = os.getenv('HUGGINGFACE_TOKEN')
1321
+ if HF_TOKEN:
1322
+ logger.info("Hugging Face token found - will use for model downloads")
1323
+ else:
1324
+ logger.warning("HUGGINGFACE_TOKEN not found - some models may not work")
1325
+
1326
+ # Initialize Pinecone and embedding model - Using all-MiniLM model only
1327
+ pc = Pinecone(api_key=PINECONE_API_KEY)
1328
+ BUDGET_INDEX_NAME = "budget-proposals-optimized" # Index for all-MiniLM model
1329
+
1330
+ # Initialize all-MiniLM model (no HF token needed)
1331
+ embed_model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
1332
+ logger.info("✅ all-MiniLM model loaded successfully")
1333
+
1334
+ # Initialize LangChain components
1335
+ llm = ChatGoogleGenerativeAI(
1336
+ model="gemini-2.5-flash",
1337
+ google_api_key=GEMINI_API_KEY,
1338
+ temperature=0.7,
1339
+ max_tokens=2000 # Increased for longer Sinhala responses
1340
+ )
1341
+
1342
+ # Simplified initialization - Let Gemini handle everything
1343
+ logger.info("Using Gemini for all language processing (transliteration, translation, responses)")
1344
+
1345
+ def detect_sinhala_content(text: str) -> bool:
1346
+ """Detect if text contains Sinhala characters"""
1347
+ # Sinhala Unicode range: U+0D80 to U+0DFF
1348
+ sinhala_pattern = re.compile(r'[\u0D80-\u0DFF]')
1349
+ return bool(sinhala_pattern.search(text))
1350
+
1351
+ def detect_tamil_content(text: str) -> bool:
1352
+ """Detect if text contains Tamil characters"""
1353
+ # Tamil Unicode range: U+0B80 to U+0BFF
1354
+ tamil_pattern = re.compile(r'[\u0B80-\u0BFF]')
1355
+ return bool(tamil_pattern.search(text))
1356
+
1357
+ def simple_detect_language(text: str) -> Dict[str, Any]:
1358
+ """Simplified language detection with Tamil support - let Gemini handle the complexity"""
1359
+ try:
1360
+ # Check for Sinhala Unicode first (most reliable)
1361
+ has_sinhala_unicode = detect_sinhala_content(text)
1362
+ if has_sinhala_unicode:
1363
+ return {
1364
+ 'language': 'si',
1365
+ 'confidence': 0.95,
1366
+ 'is_sinhala_unicode': True,
1367
+ 'is_tamil_unicode': False,
1368
+ 'is_romanized_sinhala': False,
1369
+ 'is_english': False,
1370
+ 'detection_method': 'unicode_detection'
1371
+ }
1372
+
1373
+ # Check for Tamil Unicode
1374
+ has_tamil_unicode = detect_tamil_content(text)
1375
+ if has_tamil_unicode:
1376
+ return {
1377
+ 'language': 'ta',
1378
+ 'confidence': 0.95,
1379
+ 'is_sinhala_unicode': False,
1380
+ 'is_tamil_unicode': True,
1381
+ 'is_romanized_sinhala': False,
1382
+ 'is_english': False,
1383
+ 'detection_method': 'unicode_detection'
1384
+ }
1385
+
1386
+ # Use enhanced rule-based detection for Singlish
1387
+ return enhanced_rule_based_detection(text)
1388
+
1389
+ except Exception as e:
1390
+ logger.error(f"Language detection failed: {e}")
1391
+ return rule_based_language_detection(text)
1392
+
1393
+ def enhanced_rule_based_detection(text: str) -> Dict[str, Any]:
1394
+ """Enhanced rule-based detection with Singlish and Romanized Tamil recognition"""
1395
+ has_sinhala_unicode = detect_sinhala_content(text)
1396
+ has_tamil_unicode = detect_tamil_content(text)
1397
+ is_romanized_sinhala = detect_singlish(text) and not has_sinhala_unicode and not has_tamil_unicode
1398
+ is_romanized_tamil = detect_romanized_tamil(text) and not has_sinhala_unicode and not has_tamil_unicode and not is_romanized_sinhala
1399
+
1400
+ # More sophisticated Singlish detection
1401
+ if not has_sinhala_unicode and not is_romanized_sinhala:
1402
+ # Check for common Sinhala sentence patterns in English letters
1403
+ sinhala_patterns = [
1404
+ r'\b(mokadda|kohomada|api|oya|mama)\b',
1405
+ r'\b(eka|meka|thiyenne|kiyala)\b',
1406
+ r'\b(gana|genna|danna|karanna)\b',
1407
+ r'\b(budget|proposal).*\b(gana|eka)\b'
1408
+ ]
1409
+
1410
+ text_lower = text.lower()
1411
+ pattern_matches = sum(1 for pattern in sinhala_patterns if re.search(pattern, text_lower))
1412
+
1413
+ if pattern_matches >= 2: # More conservative threshold to avoid false positives
1414
+ is_romanized_sinhala = True
1415
+
1416
+ if has_sinhala_unicode:
1417
+ language_code = 'si'
1418
+ confidence = 0.9
1419
+ elif has_tamil_unicode:
1420
+ language_code = 'ta'
1421
+ confidence = 0.9
1422
+ elif is_romanized_sinhala:
1423
+ language_code = 'singlish'
1424
+ confidence = 0.8
1425
+ elif is_romanized_tamil:
1426
+ language_code = 'romanized_tamil'
1427
+ confidence = 0.8
1428
+ else:
1429
+ language_code = 'en'
1430
+ confidence = 0.7
1431
+
1432
+ return {
1433
+ 'language': language_code,
1434
+ 'confidence': confidence,
1435
+ 'is_sinhala_unicode': has_sinhala_unicode,
1436
+ 'is_tamil_unicode': has_tamil_unicode,
1437
+ 'is_romanized_sinhala': is_romanized_sinhala,
1438
+ 'is_romanized_tamil': is_romanized_tamil,
1439
+ 'is_english': language_code == 'en',
1440
+ 'detection_method': 'enhanced_rule_based'
1441
+ }
1442
+
1443
+ def rule_based_language_detection(text: str) -> Dict[str, Any]:
1444
+ """Fallback rule-based language detection with Tamil and Romanized Tamil support"""
1445
+ has_sinhala_unicode = detect_sinhala_content(text)
1446
+ has_tamil_unicode = detect_tamil_content(text)
1447
+ is_romanized_sinhala = detect_singlish(text) and not has_sinhala_unicode and not has_tamil_unicode
1448
+ is_romanized_tamil = detect_romanized_tamil(text) and not has_sinhala_unicode and not has_tamil_unicode and not is_romanized_sinhala
1449
+ is_english = not has_sinhala_unicode and not has_tamil_unicode and not is_romanized_sinhala and not is_romanized_tamil
1450
+
1451
+ if has_sinhala_unicode:
1452
+ language_code = 'si'
1453
+ elif has_tamil_unicode:
1454
+ language_code = 'ta'
1455
+ elif is_romanized_sinhala:
1456
+ language_code = 'singlish'
1457
+ elif is_romanized_tamil:
1458
+ language_code = 'romanized_tamil'
1459
+ else:
1460
+ language_code = 'en'
1461
+
1462
+ return {
1463
+ 'language': language_code,
1464
+ 'confidence': 0.8, # Default confidence for rule-based
1465
+ 'is_sinhala_unicode': has_sinhala_unicode,
1466
+ 'is_tamil_unicode': has_tamil_unicode,
1467
+ 'is_romanized_sinhala': is_romanized_sinhala,
1468
+ 'is_romanized_tamil': is_romanized_tamil,
1469
+ 'is_english': is_english,
1470
+ 'detection_method': 'rule_based'
1471
+ }
1472
+
1473
+ def detect_singlish(text: str) -> bool:
1474
+ """Detect common Singlish patterns and words"""
1475
+ singlish_words = [
1476
+ 'mokadda', 'kohomada', 'api', 'oya', 'mama', 'eka', 'meka', 'oya', 'dan', 'kiyala',
1477
+ 'karan', 'karanna', 'gana', 'genna', 'danna', 'ahala', 'denna',
1478
+ 'mata', 'ape', 'wage', 'wenas', 'thiyenne', 'kiyanawa', 'balanawa', 'pennanna',
1479
+ 'sampura', 'mudal', 'pasal', 'vyaparayak', 'rajaye', 'arthikaya', 'sammandala',
1480
+ 'kara', 'karanna', 'giya', 'yanawa', 'enawa', 'gihin', 'awe', 'nane', 'inne',
1481
+ 'danna', 'kiyanna', 'balanna', 'ganna', 'denna', 'yanna', 'enna'
1482
+ ]
1483
+
1484
+ # Convert to lowercase and check for common Singlish words
1485
+ text_lower = text.lower()
1486
+ singlish_word_count = sum(1 for word in singlish_words if word in text_lower)
1487
+
1488
+ # Consider it Singlish if it has 3 or more Singlish words (more conservative)
1489
+ return singlish_word_count >= 3
1490
+
1491
+ def detect_romanized_tamil(text: str) -> bool:
1492
+ """Detect common Romanized Tamil patterns and words (Tamil written in English letters)"""
1493
+ romanized_tamil_words = [
1494
+ # Common Tamil words in Roman script
1495
+ 'enna', 'epdi', 'enga', 'yaar', 'naa', 'nee', 'avar', 'ivan', 'ival', 'ithu', 'athu',
1496
+ 'vandhu', 'ponga', 'vanga', 'sollu', 'kelu', 'paaru', 'irukku', 'irukkanga', 'irundhu',
1497
+ 'seiya', 'panna', 'mudiyum', 'mudiyathu', 'venum', 'vendam', 'puriyuthu', 'puriyala',
1498
+ 'nalla', 'ketta', 'romba', 'konjam', 'neraya', 'kammi', 'adhikam', 'thaan', 'daan',
1499
+ # Budget/government related Tamil terms (excluding common English words)
1500
+ 'sarkar', 'arasaangam', 'vyavasai', 'panam', 'kaasu', 'thogai',
1501
+ 'nilai', 'mari', 'maatram', 'thiruththam', 'yojana', 'thittam', 'mudhal', 'selavu',
1502
+ 'varumanam', 'aayam', 'viduli'
1503
+ ]
1504
+
1505
+ # Convert to lowercase and check for common Romanized Tamil words
1506
+ text_lower = text.lower()
1507
+ tamil_word_count = sum(1 for word in romanized_tamil_words if word in text_lower)
1508
+
1509
+ # Consider it Romanized Tamil if it has 3 or more Tamil words (more conservative)
1510
+ return tamil_word_count >= 3
1511
+
1512
+ # Removed: AI transliteration and Google Translate functions
1513
+ # Gemini will handle all transliteration and translation needs
1514
+
1515
+ def simple_process_input(user_message: str) -> tuple:
1516
+ """
1517
+ Simplified input processing - let Gemini handle everything
1518
+ """
1519
+ # Step 1: Simple language detection
1520
+ language_info = simple_detect_language(user_message)
1521
+ original_language = language_info['language']
1522
+ confidence = language_info['confidence']
1523
+ detection_method = language_info['detection_method']
1524
+
1525
+ logger.info(f"Language detection: {original_language} (confidence: {confidence:.2f}, method: {detection_method})")
1526
+
1527
+ # Use original message for all processing - Gemini will handle the rest
1528
+ processed_message = user_message
1529
+ needs_translation = False # Gemini handles translation internally
1530
+ transliteration_used = False # Gemini handles transliteration internally
1531
+ ai_detection_used = detection_method == 'ai'
1532
+
1533
+ logger.info(f"Input processing: keeping original '{user_message}' for Gemini to handle")
1534
+
1535
+ return processed_message, original_language, needs_translation, transliteration_used, ai_detection_used, confidence
1536
+
1537
+ # Removed: translate_response_if_needed function
1538
+ # Gemini handles all language responses automatically
1539
+
1540
+ def get_pinecone_index():
1541
+ """Get the Pinecone index - single index for all languages"""
1542
+ try:
1543
+ return pc.Index(BUDGET_INDEX_NAME)
1544
+ except Exception as e:
1545
+ logger.error(f"Error accessing Pinecone index: {e}")
1546
+ return None
1547
+
1548
+ def get_embedding_model():
1549
+ """Get the embedding model - single model for all languages"""
1550
+ return embed_model
1551
+
1552
+ def search_budget_proposals(query: str) -> str:
1553
+ """Search budget proposals using all-MiniLM model for all languages"""
1554
+ try:
1555
+ # Detect language for logging and result filtering
1556
+ language_info = simple_detect_language(query)
1557
+ detected_language = language_info.get('language', 'en')
1558
+
1559
+ logger.info(f"Detected language: {detected_language} for query: {query[:50]}...")
1560
+
1561
+ # Get index and model (single model for all languages)
1562
+ index = get_pinecone_index()
1563
+ if not index:
1564
+ return "Error: Could not access vector database."
1565
+
1566
+ # Use all-MiniLM model for all languages
1567
+ model = get_embedding_model()
1568
+ query_embedding = model.encode(query).tolist()
1569
+
1570
+ logger.info(f"Using all-MiniLM model for {detected_language}")
1571
+
1572
+ # Query the vector database directly
1573
+ search_results = index.query(
1574
+ vector=query_embedding,
1575
+ top_k=5,
1576
+ include_metadata=True
1577
+ )
1578
+
1579
+ matches = search_results.get('matches', [])
1580
+
1581
+ # Debug: Log what we're getting from the vector database
1582
+ logger.info(f"Vector DB returned {len(matches)} results")
1583
+ if matches:
1584
+ sample_match = matches[0]
1585
+ logger.info(f"Sample match metadata keys: {list(sample_match.get('metadata', {}).keys())}")
1586
+
1587
+ if not matches:
1588
+ return "No relevant budget proposals found in the database."
1589
+
1590
+ # Build context from vector database results, filtering by language
1591
+ context_parts = []
1592
+ language_specific_matches = []
1593
+
1594
+ # Filter matches to only include English documents
1595
+ english_matches = []
1596
+ for match in matches:
1597
+ metadata = match.get('metadata', {})
1598
+ file_path = metadata.get('file_path', '')
1599
+
1600
+ # Only include English documents (no language suffixes)
1601
+ is_english_document = not any(lang in file_path.lower() for lang in ['_sin_', '_tam_', '-sin', '-tam', 'sinhala', 'tamil', 'si/', 'ta/'])
1602
+
1603
+ if is_english_document:
1604
+ english_matches.append(match)
1605
+
1606
+ # Use English matches only, or fallback to top match if no English documents found
1607
+ if english_matches:
1608
+ language_specific_matches = english_matches[:1] # Take only the most relevant English document
1609
+ else:
1610
+ language_specific_matches = matches[:1] # Fallback to any document if no English found
1611
+
1612
+ logger.info(f"Returning {len(language_specific_matches)} most relevant document(s) for {detected_language}")
1613
+
1614
+ for match in language_specific_matches:
1615
+ metadata = match.get('metadata', {})
1616
+ score = match.get('score', 0)
1617
+
1618
+ file_path = metadata.get('file_path', '')
1619
+ category = metadata.get('category', '')
1620
+ title = metadata.get('title', '')
1621
+ content = metadata.get('content', '')
1622
+ summary = metadata.get('summary', '')
1623
+ cost = metadata.get('costLKR', '')
1624
+
1625
+ # Include relevance score for debugging
1626
+ context_parts.append(f"From {file_path} ({category}) [Relevance: {score:.3f}]: {title}")
1627
+
1628
+ # Prioritize content over summary, but include both if available
1629
+ if content and len(content.strip()) > 50: # Only use substantial content
1630
+ context_parts.append(f"Content: {content}")
1631
+ elif summary:
1632
+ context_parts.append(f"Summary: {summary}")
1633
+
1634
+ # Always include cost information if available
1635
+ if cost and cost != "No Costing Available":
1636
+ context_parts.append(f"Cost: {cost}")
1637
+
1638
+ # Add any additional relevant fields from metadata
1639
+ if metadata.get('implementation_period'):
1640
+ context_parts.append(f"Implementation Period: {metadata.get('implementation_period')}")
1641
+ if metadata.get('beneficiaries'):
1642
+ context_parts.append(f"Beneficiaries: {metadata.get('beneficiaries')}")
1643
+ if metadata.get('revenue_impact'):
1644
+ context_parts.append(f"Revenue Impact: {metadata.get('revenue_impact')}")
1645
+ if metadata.get('proposal_type'):
1646
+ context_parts.append(f"Proposal Type: {metadata.get('proposal_type')}")
1647
+ if metadata.get('sector'):
1648
+ context_parts.append(f"Sector: {metadata.get('sector')}")
1649
+
1650
+ return "\n\n".join(context_parts)
1651
+
1652
+ except Exception as e:
1653
+ logger.error(f"Error searching vector database: {e}")
1654
+ return f"Error searching database: {str(e)}"
1655
+
1656
+ # Create the RAG tool
1657
+ search_tool = Tool(
1658
+ name="search_budget_proposals",
1659
+ description="Search for relevant budget proposals in the vector database. Use this when you need specific information about budget proposals, costs, policies, or implementation details.",
1660
+ func=search_budget_proposals
1661
+ )
1662
+
1663
+ # Create the prompt template for the agent
1664
+ agent_prompt = ChatPromptTemplate.from_messages([
1665
+ ("system", """You are a helpful assistant for budget proposals in Sri Lanka. You have access to a vector database containing detailed information about various budget proposals. You can communicate in English, Sinhala, and understand Singlish (Sinhala written in English letters).
1666
+
1667
+ When a user asks about budget proposals, you should:
1668
+ 1. Use the search_budget_proposals tool to find relevant information
1669
+ 2. Provide accurate, detailed responses based on the retrieved information
1670
+ 3. Reference proposals by their content/topic, not by filename
1671
+ 4. Be professional but approachable in any language
1672
+ 5. If the search doesn't return relevant results, acknowledge this and provide general guidance
1673
+ 6. Respond in the same language or style as the user's question when possible
1674
+
1675
+ Guidelines:
1676
+ - Always use the search tool for specific questions about budget proposals
1677
+ - When mentioning proposals, refer to them by topic (e.g., "maternity leave benefits proposal", "EPF tax removal proposal") rather than document filenames
1678
+ - Keep responses clear and informative in any language
1679
+ - Use a balanced tone - helpful but not overly casual
1680
+ - If asked about topics not covered, redirect to relevant topics professionally
1681
+ - Be culturally sensitive when discussing Sri Lankan policies and economic matters
1682
+ - When responding in Sinhala, use appropriate formal language for policy discussions
1683
+ - DO NOT include long document filenames in your responses - refer to proposals by their topic instead"""),
1684
+ MessagesPlaceholder(variable_name="chat_history"),
1685
+ ("human", "{input}"),
1686
+ MessagesPlaceholder(variable_name="agent_scratchpad")
1687
+ ])
1688
+
1689
+ # Store conversation memories for different sessions
1690
+ conversation_memories: Dict[str, ConversationBufferWindowMemory] = {}
1691
+
1692
+ def get_or_create_memory(session_id: str) -> ConversationBufferWindowMemory:
1693
+ """Get or create a memory instance for a session"""
1694
+ if session_id not in conversation_memories:
1695
+ # Create new memory with window of 10 messages (5 exchanges)
1696
+ conversation_memories[session_id] = ConversationBufferWindowMemory(
1697
+ k=10, # Remember last 10 messages
1698
+ return_messages=True,
1699
+ memory_key="chat_history"
1700
+ )
1701
+ logger.info(f"Created new memory for session: {session_id}")
1702
+
1703
+ return conversation_memories[session_id]
1704
+
1705
+ def create_agent(session_id: str) -> AgentExecutor:
1706
+ """Create a LangChain agent with memory and RAG capabilities"""
1707
+ memory = get_or_create_memory(session_id)
1708
+
1709
+ # Create the agent
1710
+ agent = create_openai_functions_agent(
1711
+ llm=llm,
1712
+ tools=[search_tool],
1713
+ prompt=agent_prompt
1714
+ )
1715
+
1716
+ # Create agent executor with memory
1717
+ agent_executor = AgentExecutor(
1718
+ agent=agent,
1719
+ tools=[search_tool],
1720
+ memory=memory,
1721
+ verbose=False,
1722
+ handle_parsing_errors=True
1723
+ )
1724
+
1725
+ return agent_executor
1726
+
1727
+ def get_short_document_name(filename: str) -> str:
1728
+ """
1729
+ Convert long document names to shorter, user-friendly names automatically
1730
+
1731
+ SHORT NAME GENERATION GUIDE:
1732
+ ===========================
1733
+
1734
+ 1. MANUAL MAPPING (Priority 1):
1735
+ - Add entries to the 'short_names' dictionary for specific files
1736
+ - Format: 'full_filename_without_extension': 'Short Display Name'
1737
+ - Example: '20250813_Budget2026Proposal_MaternityLeaveBenefit_Raj_D01': 'Maternity Leave Benefits'
1738
+
1739
+ 2. AUTOMATIC PATTERN MATCHING (Priority 2):
1740
+ - System automatically detects proposal types and languages
1741
+ - Proposal Types Detected:
1742
+ * MaternityLeaveBenefit/MaternityLeave → "Maternity Leave Benefits"
1743
+ * RemovalOfTaxationOnEPF/EPF → "EPF Tax Removal"
1744
+ * ExpandingIndustrialLand/IndustrialLand → "Industrial Land Expansion"
1745
+ * Budget2025/Budget2026 → "Budget 2025/2026 Proposals"
1746
+ * Template → "Budget Template"
1747
+ * OnePagers → "Budget YYYY One-Pagers"
1748
+
1749
+ - Language Detection:
1750
+ * _Sin_/_Sinhala_ → "(Sinhala)"
1751
+ * _Tam_/_Tamil_ ��� "(Tamil)"
1752
+ * _En_/_English_ → "(EN)"
1753
+ * _Raj_ → No language suffix (treated as default/English)
1754
+ * No language indicator → No language suffix
1755
+
1756
+ 3. GENERIC FALLBACK (Priority 3):
1757
+ - Removes date prefixes: 20250813_ → ""
1758
+ - Removes language suffixes: _Sin_, _Tam_, _Raj_, _En_, _F_, _Final_, _D01
1759
+ - Removes budget prefixes: Budget2026Proposal_ → ""
1760
+ - Converts underscores to spaces: _ → " "
1761
+ - Capitalizes words: "maternity leave" → "Maternity Leave"
1762
+ - Limits length: Truncates to 37 chars + "..." if longer than 40
1763
+
1764
+ EXAMPLES:
1765
+ =========
1766
+ Input: "20250813_Budget2026Proposal_MaternityLeaveBenefit_Sin_F.pdf"
1767
+ Output: "Maternity Leave Benefits (Sinhala)"
1768
+
1769
+ Input: "20250825_Budget2026Proposal_RemovalOfTaxationOnEPF_Tam_F.pdf"
1770
+ Output: "EPF Tax Removal (Tamil)"
1771
+
1772
+ Input: "20250813_Budget2026_Proposal_ExpandingIndustrialLand_En_F.pdf"
1773
+ Output: "Industrial Land Expansion (EN)"
1774
+
1775
+ Input: "20250813_Budget2026Proposal_MaternityLeaveBenefit_Raj_D01.pdf"
1776
+ Output: "Maternity Leave Benefits" (no language suffix)
1777
+
1778
+ HOW TO ADD NEW DOCUMENTS:
1779
+ =========================
1780
+ 1. Drop the PDF/DOCX file in the assets/pdfs/ folder
1781
+ 2. The system will automatically generate a short name using pattern matching
1782
+ 3. If you want a custom name, add it to the 'short_names' dictionary
1783
+ 4. No code changes needed for automatic naming!
1784
+ """
1785
+ # Remove file extension
1786
+ name = filename.replace('.pdf', '').replace('.docx', '')
1787
+
1788
+ # Create mapping for common document types (can be updated manually for special cases)
1789
+ short_names = {
1790
+ '20241211_Econ_VRProposals_Budget2025_OnePagers': 'Budget 2025 One-Pagers',
1791
+ '20250813_Budget2026_Proposal_ExpandingIndustrialLand_En_F': 'Industrial Land Expansion (EN)',
1792
+ '20250813_Budget2026Proposal_ExpandingIndustrialLand_F': 'Industrial Land Expansion (English)',
1793
+ '20250813_Budget2026Proposal_ExpandingIndustrialLand_F - Sinhala': 'Industrial Land Expansion (Sinhala)',
1794
+ '20250813_Budget2026Proposal_MaternityLeaveBenefit_Raj_D01': 'Maternity Leave Benefits',
1795
+ '20250813_Budget2026Proposal_RemovalOfTaxationOnEPF_Raj_F': 'EPF Tax Removal',
1796
+ '20250825_Budget2026Proposal_MaternityLeaveBenefit_Sin_F': 'Maternity Leave Benefits (Sinhala)',
1797
+ '20250825_Budget2026Proposal_MaternityLeaveBenefit_Tam_F': 'Maternity Leave Benefits (Tamil)',
1798
+ '20250825_Budget2026Proposal_RemovalOfTaxationOnEPF_Sin_Final': 'EPF Tax Removal (Sinhala)',
1799
+ '20250825_Budget2026Proposal_RemovalOfTaxationOnEPF_Tam_F': 'EPF Tax Removal (Tamil)',
1800
+ '20250908_Budget2026Proposal_Template': 'Budget 2026 Template'
1801
+ }
1802
+
1803
+ # Return short name if found in manual mapping
1804
+ if name in short_names:
1805
+ return short_names[name]
1806
+
1807
+ # Automatic pattern-based naming (works for new files without manual updates)
1808
+ # Extract year
1809
+ year_match = re.search(r'20\d{2}', name)
1810
+ year = year_match.group() if year_match else ''
1811
+
1812
+ # Extract language indicators
1813
+ language = ''
1814
+ if '_Sin_' in name or '_Sinhala_' in name:
1815
+ language = ' (Sinhala)'
1816
+ elif '_Tam_' in name or '_Tamil_' in name:
1817
+ language = ' (Tamil)'
1818
+ elif '_Raj_' in name:
1819
+ language = ' (Raj)'
1820
+ elif '_En_' in name or '_English_' in name:
1821
+ language = ' (EN)'
1822
+
1823
+ # Extract proposal type
1824
+ if 'MaternityLeaveBenefit' in name or 'MaternityLeave' in name:
1825
+ return f'Maternity Leave Benefits{language}'
1826
+ elif 'RemovalOfTaxationOnEPF' in name or 'EPF' in name:
1827
+ return f'EPF Tax Removal{language}'
1828
+ elif 'ExpandingIndustrialLand' in name or 'IndustrialLand' in name:
1829
+ return f'Industrial Land Expansion{language}'
1830
+ elif 'Budget' in name and year:
1831
+ return f'Budget {year} Proposals{language}'
1832
+ elif 'Template' in name:
1833
+ return f'Budget Template{language}'
1834
+ elif 'OnePagers' in name:
1835
+ return f'Budget {year} One-Pagers'
1836
+ else:
1837
+ # Generic fallback - clean up the name
1838
+ # Remove date prefixes and common suffixes
1839
+ clean_name = re.sub(r'^\d{8}_', '', name) # Remove date prefix
1840
+ clean_name = re.sub(r'_(En|Sin|Tam|Raj|F|Final|D01)$', '', clean_name) # Remove language/version suffixes
1841
+ clean_name = re.sub(r'Budget\d{4}Proposal_?', '', clean_name) # Remove budget proposal prefix
1842
+ clean_name = re.sub(r'_', ' ', clean_name) # Replace underscores with spaces
1843
+
1844
+ # Capitalize words
1845
+ clean_name = ' '.join(word.capitalize() for word in clean_name.split())
1846
+
1847
+ # Limit length
1848
+ if len(clean_name) > 40:
1849
+ clean_name = clean_name[:37] + '...'
1850
+
1851
+ return clean_name + language
1852
+
1853
+ def get_available_pdfs() -> List[str]:
1854
+ """Dynamically get list of available PDF files from all language directories"""
1855
+ try:
1856
+ import os
1857
+ import glob
1858
+
1859
+ # Search in all language directories
1860
+ pdf_dirs = [
1861
+ "Budget_Proposals copy-2/en/assets/pdfs/",
1862
+ "Budget_Proposals copy-2/si/assets/pdfs/",
1863
+ "Budget_Proposals copy-2/ta/assets/pdfs/",
1864
+ "Budget_Proposals copy-2/assets/pdfs/"
1865
+ ]
1866
+
1867
+ pdf_files = set()
1868
+ for pdf_dir in pdf_dirs:
1869
+ if os.path.exists(pdf_dir):
1870
+ files = [f for f in os.listdir(pdf_dir) if f.lower().endswith(('.pdf', '.docx'))]
1871
+ pdf_files.update(files)
1872
+
1873
+ if pdf_files:
1874
+ return list(pdf_files)
1875
+ else:
1876
+ # Fallback to known PDFs if no directories exist
1877
+ return [
1878
+ '20250813_Budget2026Proposal_ExpandingIndustrialLand_F.pdf',
1879
+ '20250813_Budget2026Proposal_ExpandingIndustrialLand_F - Sinhala.pdf',
1880
+ '20250813_Budget2026Proposal_ExpandingIndustrialLand_F - TamilReviewed.pdf',
1881
+ '20250813_Budget2026Proposal_MaternityLeaveBenefit_Raj_D01.pdf',
1882
+ '20250813_Budget2026Proposal_RemovalOfTaxationOnEPF_Raj_F.pdf',
1883
+ '20250825_Budget2026Proposal_MaternityLeaveBenefit_Sin_F.pdf',
1884
+ '20250825_Budget2026Proposal_MaternityLeaveBenefit_Tam_F.pdf',
1885
+ '20250825_Budget2026Proposal_RemovalOfTaxationOnEPF_Sin_Final.pdf',
1886
+ '20250825_Budget2026Proposal_RemovalOfTaxationOnEPF_Tam_F.pdf'
1887
+ ]
1888
+ except Exception as e:
1889
+ logger.error(f"Error getting available PDFs: {e}")
1890
+ # Fallback to known PDFs
1891
+ return [
1892
+ '20250813_Budget2026Proposal_ExpandingIndustrialLand_F.pdf',
1893
+ '20250813_Budget2026Proposal_ExpandingIndustrialLand_F - Sinhala.pdf',
1894
+ '20250813_Budget2026Proposal_ExpandingIndustrialLand_F - TamilReviewed.pdf',
1895
+ '20250813_Budget2026Proposal_MaternityLeaveBenefit_Raj_D01.pdf',
1896
+ '20250813_Budget2026Proposal_RemovalOfTaxationOnEPF_Raj_F.pdf',
1897
+ '20250825_Budget2026Proposal_MaternityLeaveBenefit_Sin_F.pdf',
1898
+ '20250825_Budget2026Proposal_MaternityLeaveBenefit_Tam_F.pdf',
1899
+ '20250825_Budget2026Proposal_RemovalOfTaxationOnEPF_Sin_Final.pdf',
1900
+ '20250825_Budget2026Proposal_RemovalOfTaxationOnEPF_Tam_F.pdf'
1901
+ ]
1902
+
1903
+ def extract_sources_from_search_context(search_context: str, user_language: str = 'en') -> List[Dict[str, str]]:
1904
+ """Extract source documents from search context with short names, filtered by user language"""
1905
+ sources = []
1906
+
1907
+ # Get dynamically available PDF files
1908
+ available_pdfs = get_available_pdfs()
1909
+
1910
+ # Look for the specific pattern "From {filename} ({category}):" in search context
1911
+ import re
1912
+ found_files = set()
1913
+
1914
+ # Pattern to match "From filename.pdf (category):" or "From filename.docx (category):"
1915
+ # Updated to handle assets/pdfs/ prefix and empty parentheses, and stop at the colon
1916
+ from_pattern = r'From\s+assets/pdfs/([^:]+\.(?:pdf|docx))\s*\([^)]*\)'
1917
+ matches = re.findall(from_pattern, search_context)
1918
+
1919
+ for match in matches:
1920
+ if match in available_pdfs:
1921
+ found_files.add(match)
1922
+
1923
+ # Fallback: if no "From" pattern found, look for direct filename mentions
1924
+ if not found_files:
1925
+ for pdf in available_pdfs:
1926
+ if pdf in search_context:
1927
+ found_files.add(pdf)
1928
+
1929
+ # Filter to return sources in the user's language, but prioritize by relevance
1930
+ language_filtered_files = []
1931
+
1932
+ # First, try to find documents in the user's language
1933
+ for pdf in found_files:
1934
+ doc_language = get_document_language(pdf)
1935
+
1936
+ # Language matching logic - return sources in user's language
1937
+ should_include = False
1938
+ if user_language == 'en' or user_language == 'singlish':
1939
+ # English users get English documents
1940
+ if doc_language in ['en', 'english']:
1941
  should_include = True
1942
  elif user_language == 'si' or user_language == 'sinhala':
1943
+ # Sinhala users get Sinhala documents
1944
  if doc_language in ['si', 'sinhala']:
1945
  should_include = True
 
 
 
1946
  elif user_language == 'ta' or user_language == 'tamil':
1947
+ # Tamil users get Tamil documents
1948
  if doc_language in ['ta', 'tamil']:
1949
  should_include = True
 
 
 
1950
  else:
1951
+ # Default: show English documents
1952
+ if doc_language in ['en', 'english']:
1953
+ should_include = True
1954
 
1955
  if should_include:
1956
  language_filtered_files.append(pdf)
1957
 
1958
+ # If no language-specific documents found, fallback to English
1959
+ if not language_filtered_files:
1960
+ for pdf in found_files:
1961
+ doc_language = get_document_language(pdf)
1962
+ if doc_language in ['en', 'english']:
1963
+ language_filtered_files.append(pdf)
1964
+
1965
+ # If still no documents, use any available document
1966
+ if not language_filtered_files and found_files:
1967
+ language_filtered_files = [list(found_files)[0]]
1968
+
1969
+ # Return only the most relevant document in the user's language
1970
+ if language_filtered_files:
1971
+ language_filtered_files = [language_filtered_files[0]]
1972
 
1973
  # Convert to list with short names and correct URLs
1974
  for pdf in language_filtered_files: