File size: 55,092 Bytes
3c720c2 e0f9bee 7b576cd cb96026 7b576cd 6f7b77d 73092be 7b576cd 73092be 6f7b77d 73092be 7b576cd 73092be 6f7b77d 73092be 7b576cd 6f7b77d 7b576cd 73092be 3d493f2 73092be b9aafb3 3d493f2 b9aafb3 73092be af234c7 73092be af234c7 73092be 3d493f2 cb96026 b9aafb3 3c720c2 cd3a774 5c778c3 3c720c2 5c778c3 cd3a774 5c778c3 3c720c2 3d493f2 5c778c3 3d493f2 5c778c3 3c720c2 2c119ef 3c720c2 2c119ef 0a488a7 e0f9bee 2c119ef 3c720c2 4225086 3c720c2 2c119ef 294faef 4c96947 2c119ef fe35681 4c96947 2c119ef 4c96947 3c720c2 78192b1 3232b6b cd3a774 3232b6b cd3a774 3232b6b 3c720c2 2c119ef 3c720c2 4225086 2c119ef 4225086 475240c 4225086 3c720c2 2c119ef 4c96947 4225086 3c720c2 0a488a7 c97fb95 cb96026 3c720c2 2c119ef 3c720c2 cb96026 2c119ef e0f9bee 3c720c2 2c119ef 3c720c2 2c119ef 3c720c2 2c119ef e0f9bee 3c720c2 cb96026 2c119ef e0f9bee 3c720c2 b909fee 3c720c2 b909fee 3c720c2 b909fee 3c720c2 b909fee 3c720c2 2c119ef 3c720c2 cd3a774 3c720c2 cd3a774 3c720c2 2c119ef cd3a774 2c119ef 3c720c2 2c119ef 3c720c2 5c778c3 3c720c2 5c778c3 5d078cb 5c778c3 5d078cb 5c778c3 5d078cb 5c778c3 5d078cb 5c778c3 3c720c2 5c778c3 5d078cb 3c720c2 5c778c3 3c720c2 5c778c3 5d078cb 238cc0a 2c119ef 0a488a7 2c119ef e0f9bee 2c119ef 3c720c2 2c119ef 4c96947 2c119ef 3c720c2 2c119ef 3c720c2 2c119ef 5c778c3 5d078cb 238cc0a 2c119ef 3c720c2 c97fb95 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 |
#!/usr/bin/env python3
"""
Enhanced Budget Proposals Chatbot API using LangChain with Memory and Agentic RAG
"""
from flask import Flask, request, jsonify
from flask_cors import CORS
import os
import logging
import json
from datetime import datetime
from typing import Dict, List, Any
# LangChain imports
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain.memory import ConversationBufferWindowMemory
from langchain.schema import HumanMessage, AIMessage
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.chains import LLMChain
from langchain_community.chat_message_histories import RedisChatMessageHistory
from langchain.tools import Tool
from langchain.agents import AgentExecutor, create_openai_functions_agent
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain.schema import BaseMessage
# Vector database imports
from pinecone import Pinecone
from sentence_transformers import SentenceTransformer
# Language detection imports
import re
import requests
import json
app = Flask(__name__)
CORS(app)
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Configure Gemini
GEMINI_API_KEY = os.getenv('GEMINI_API_KEY')
if not GEMINI_API_KEY:
logger.error("GEMINI_API_KEY not found in environment variables")
raise ValueError("Please set GEMINI_API_KEY in your .env file")
# Configure Pinecone
PINECONE_API_KEY = os.getenv('PINECONE_API_KEY')
if not PINECONE_API_KEY:
logger.error("PINECONE_API_KEY not found in environment variables")
raise ValueError("Please set PINECONE_API_KEY in your .env file")
# Configure Hugging Face (optional - needed for some models)
HF_TOKEN = os.getenv('HUGGINGFACE_TOKEN')
if HF_TOKEN:
logger.info("Hugging Face token found - will use for model downloads")
else:
logger.warning("HUGGINGFACE_TOKEN not found - some models may not work")
# Initialize Pinecone and embedding model - Using all-MiniLM model only
pc = Pinecone(api_key=PINECONE_API_KEY)
BUDGET_INDEX_NAME = "budget-proposals-optimized" # Index for all-MiniLM model
# Initialize all-MiniLM model (no HF token needed)
embed_model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
logger.info("✅ all-MiniLM model loaded successfully")
# Initialize LangChain components
llm = ChatGoogleGenerativeAI(
model="gemini-2.5-flash",
google_api_key=GEMINI_API_KEY,
temperature=0.7,
max_tokens=2000 # Increased for longer Sinhala responses
)
# Simplified initialization - Let Gemini handle everything
logger.info("Using Gemini for all language processing (transliteration, translation, responses)")
def detect_sinhala_content(text: str) -> bool:
"""Detect if text contains Sinhala characters"""
# Sinhala Unicode range: U+0D80 to U+0DFF
sinhala_pattern = re.compile(r'[\u0D80-\u0DFF]')
return bool(sinhala_pattern.search(text))
def detect_tamil_content(text: str) -> bool:
"""Detect if text contains Tamil characters"""
# Tamil Unicode range: U+0B80 to U+0BFF
tamil_pattern = re.compile(r'[\u0B80-\u0BFF]')
return bool(tamil_pattern.search(text))
def simple_detect_language(text: str) -> Dict[str, Any]:
"""Simplified language detection with Tamil support - let Gemini handle the complexity"""
try:
# Check for Sinhala Unicode first (most reliable)
has_sinhala_unicode = detect_sinhala_content(text)
if has_sinhala_unicode:
return {
'language': 'si',
'confidence': 0.95,
'is_sinhala_unicode': True,
'is_tamil_unicode': False,
'is_romanized_sinhala': False,
'is_english': False,
'detection_method': 'unicode_detection'
}
# Check for Tamil Unicode
has_tamil_unicode = detect_tamil_content(text)
if has_tamil_unicode:
return {
'language': 'ta',
'confidence': 0.95,
'is_sinhala_unicode': False,
'is_tamil_unicode': True,
'is_romanized_sinhala': False,
'is_english': False,
'detection_method': 'unicode_detection'
}
# Use enhanced rule-based detection for Singlish
return enhanced_rule_based_detection(text)
except Exception as e:
logger.error(f"Language detection failed: {e}")
return rule_based_language_detection(text)
def enhanced_rule_based_detection(text: str) -> Dict[str, Any]:
"""Enhanced rule-based detection with Singlish and Romanized Tamil recognition"""
has_sinhala_unicode = detect_sinhala_content(text)
has_tamil_unicode = detect_tamil_content(text)
is_romanized_sinhala = detect_singlish(text) and not has_sinhala_unicode and not has_tamil_unicode
is_romanized_tamil = detect_romanized_tamil(text) and not has_sinhala_unicode and not has_tamil_unicode and not is_romanized_sinhala
# More sophisticated Singlish detection
if not has_sinhala_unicode and not is_romanized_sinhala:
# Check for common Sinhala sentence patterns in English letters
sinhala_patterns = [
r'\b(mokadda|kohomada|api|oya|mama)\b',
r'\b(eka|meka|thiyenne|kiyala)\b',
r'\b(gana|genna|danna|karanna)\b',
r'\b(budget|proposal).*\b(gana|eka)\b'
]
text_lower = text.lower()
pattern_matches = sum(1 for pattern in sinhala_patterns if re.search(pattern, text_lower))
if pattern_matches >= 2: # More conservative threshold to avoid false positives
is_romanized_sinhala = True
if has_sinhala_unicode:
language_code = 'si'
confidence = 0.9
elif has_tamil_unicode:
language_code = 'ta'
confidence = 0.9
elif is_romanized_sinhala:
language_code = 'singlish'
confidence = 0.8
elif is_romanized_tamil:
language_code = 'romanized_tamil'
confidence = 0.8
else:
language_code = 'en'
confidence = 0.7
return {
'language': language_code,
'confidence': confidence,
'is_sinhala_unicode': has_sinhala_unicode,
'is_tamil_unicode': has_tamil_unicode,
'is_romanized_sinhala': is_romanized_sinhala,
'is_romanized_tamil': is_romanized_tamil,
'is_english': language_code == 'en',
'detection_method': 'enhanced_rule_based'
}
def rule_based_language_detection(text: str) -> Dict[str, Any]:
"""Fallback rule-based language detection with Tamil and Romanized Tamil support"""
has_sinhala_unicode = detect_sinhala_content(text)
has_tamil_unicode = detect_tamil_content(text)
is_romanized_sinhala = detect_singlish(text) and not has_sinhala_unicode and not has_tamil_unicode
is_romanized_tamil = detect_romanized_tamil(text) and not has_sinhala_unicode and not has_tamil_unicode and not is_romanized_sinhala
is_english = not has_sinhala_unicode and not has_tamil_unicode and not is_romanized_sinhala and not is_romanized_tamil
if has_sinhala_unicode:
language_code = 'si'
elif has_tamil_unicode:
language_code = 'ta'
elif is_romanized_sinhala:
language_code = 'singlish'
elif is_romanized_tamil:
language_code = 'romanized_tamil'
else:
language_code = 'en'
return {
'language': language_code,
'confidence': 0.8, # Default confidence for rule-based
'is_sinhala_unicode': has_sinhala_unicode,
'is_tamil_unicode': has_tamil_unicode,
'is_romanized_sinhala': is_romanized_sinhala,
'is_romanized_tamil': is_romanized_tamil,
'is_english': is_english,
'detection_method': 'rule_based'
}
def detect_singlish(text: str) -> bool:
"""Detect common Singlish patterns and words"""
singlish_words = [
'mokadda', 'kohomada', 'api', 'oya', 'mama', 'eka', 'meka', 'oya', 'dan', 'kiyala',
'karan', 'karanna', 'gana', 'genna', 'danna', 'ahala', 'denna',
'mata', 'ape', 'wage', 'wenas', 'thiyenne', 'kiyanawa', 'balanawa', 'pennanna',
'sampura', 'mudal', 'pasal', 'vyaparayak', 'rajaye', 'arthikaya', 'sammandala',
'kara', 'karanna', 'giya', 'yanawa', 'enawa', 'gihin', 'awe', 'nane', 'inne',
'danna', 'kiyanna', 'balanna', 'ganna', 'denna', 'yanna', 'enna'
]
# Convert to lowercase and check for common Singlish words
text_lower = text.lower()
singlish_word_count = sum(1 for word in singlish_words if word in text_lower)
# Consider it Singlish if it has 3 or more Singlish words (more conservative)
return singlish_word_count >= 3
def detect_romanized_tamil(text: str) -> bool:
"""Detect common Romanized Tamil patterns and words (Tamil written in English letters)"""
romanized_tamil_words = [
# Common Tamil words in Roman script
'enna', 'epdi', 'enga', 'yaar', 'naa', 'nee', 'avar', 'ivan', 'ival', 'ithu', 'athu',
'vandhu', 'ponga', 'vanga', 'sollu', 'kelu', 'paaru', 'irukku', 'irukkanga', 'irundhu',
'seiya', 'panna', 'mudiyum', 'mudiyathu', 'venum', 'vendam', 'puriyuthu', 'puriyala',
'nalla', 'ketta', 'romba', 'konjam', 'neraya', 'kammi', 'adhikam', 'thaan', 'daan',
# Budget/government related Tamil terms (excluding common English words)
'sarkar', 'arasaangam', 'vyavasai', 'panam', 'kaasu', 'thogai',
'nilai', 'mari', 'maatram', 'thiruththam', 'yojana', 'thittam', 'mudhal', 'selavu',
'varumanam', 'aayam', 'viduli'
]
# Convert to lowercase and check for common Romanized Tamil words
text_lower = text.lower()
tamil_word_count = sum(1 for word in romanized_tamil_words if word in text_lower)
# Consider it Romanized Tamil if it has 3 or more Tamil words (more conservative)
return tamil_word_count >= 3
# Removed: AI transliteration and Google Translate functions
# Gemini will handle all transliteration and translation needs
def simple_process_input(user_message: str) -> tuple:
"""
Simplified input processing - let Gemini handle everything
"""
# Step 1: Simple language detection
language_info = simple_detect_language(user_message)
original_language = language_info['language']
confidence = language_info['confidence']
detection_method = language_info['detection_method']
logger.info(f"Language detection: {original_language} (confidence: {confidence:.2f}, method: {detection_method})")
# Use original message for all processing - Gemini will handle the rest
processed_message = user_message
needs_translation = False # Gemini handles translation internally
transliteration_used = False # Gemini handles transliteration internally
ai_detection_used = detection_method == 'ai'
logger.info(f"Input processing: keeping original '{user_message}' for Gemini to handle")
return processed_message, original_language, needs_translation, transliteration_used, ai_detection_used, confidence
# Removed: translate_response_if_needed function
# Gemini handles all language responses automatically
def get_pinecone_index():
"""Get the Pinecone index - single index for all languages"""
try:
return pc.Index(BUDGET_INDEX_NAME)
except Exception as e:
logger.error(f"Error accessing Pinecone index: {e}")
return None
def get_embedding_model():
"""Get the embedding model - single model for all languages"""
return embed_model
def search_budget_proposals(query: str) -> str:
"""Search budget proposals using all-MiniLM model for all languages"""
try:
# Detect language for logging and result filtering
language_info = simple_detect_language(query)
detected_language = language_info.get('language', 'en')
logger.info(f"Detected language: {detected_language} for query: {query[:50]}...")
# Get index and model (single model for all languages)
index = get_pinecone_index()
if not index:
return "Error: Could not access vector database."
# Use all-MiniLM model for all languages
model = get_embedding_model()
query_embedding = model.encode(query).tolist()
logger.info(f"Using all-MiniLM model for {detected_language}")
# Query the vector database directly
search_results = index.query(
vector=query_embedding,
top_k=5,
include_metadata=True
)
matches = search_results.get('matches', [])
# Debug: Log what we're getting from the vector database
logger.info(f"Vector DB returned {len(matches)} results")
if matches:
sample_match = matches[0]
logger.info(f"Sample match metadata keys: {list(sample_match.get('metadata', {}).keys())}")
if not matches:
return "No relevant budget proposals found in the database."
# Build context from vector database results, filtering by language
context_parts = []
language_specific_matches = []
# Filter matches to only include English documents
english_matches = []
for match in matches:
metadata = match.get('metadata', {})
file_path = metadata.get('file_path', '')
# Only include English documents (no language suffixes)
is_english_document = not any(lang in file_path.lower() for lang in ['_sin_', '_tam_', '-sin', '-tam', 'sinhala', 'tamil', 'si/', 'ta/'])
if is_english_document:
english_matches.append(match)
# Use English matches only, or fallback to top match if no English documents found
if english_matches:
language_specific_matches = english_matches[:1] # Take only the most relevant English document
else:
language_specific_matches = matches[:1] # Fallback to any document if no English found
logger.info(f"Returning {len(language_specific_matches)} most relevant document(s) for {detected_language}")
for match in language_specific_matches:
metadata = match.get('metadata', {})
score = match.get('score', 0)
file_path = metadata.get('file_path', '')
category = metadata.get('category', '')
title = metadata.get('title', '')
content = metadata.get('content', '')
summary = metadata.get('summary', '')
cost = metadata.get('costLKR', '')
# Include relevance score for debugging
context_parts.append(f"From {file_path} ({category}) [Relevance: {score:.3f}]: {title}")
# Prioritize content over summary, but include both if available
if content and len(content.strip()) > 50: # Only use substantial content
context_parts.append(f"Content: {content}")
elif summary:
context_parts.append(f"Summary: {summary}")
# Always include cost information if available
if cost and cost != "No Costing Available":
context_parts.append(f"Cost: {cost}")
# Add any additional relevant fields from metadata
if metadata.get('implementation_period'):
context_parts.append(f"Implementation Period: {metadata.get('implementation_period')}")
if metadata.get('beneficiaries'):
context_parts.append(f"Beneficiaries: {metadata.get('beneficiaries')}")
if metadata.get('revenue_impact'):
context_parts.append(f"Revenue Impact: {metadata.get('revenue_impact')}")
if metadata.get('proposal_type'):
context_parts.append(f"Proposal Type: {metadata.get('proposal_type')}")
if metadata.get('sector'):
context_parts.append(f"Sector: {metadata.get('sector')}")
return "\n\n".join(context_parts)
except Exception as e:
logger.error(f"Error searching vector database: {e}")
return f"Error searching database: {str(e)}"
# Create the RAG tool
search_tool = Tool(
name="search_budget_proposals",
description="Search for relevant budget proposals in the vector database. Use this when you need specific information about budget proposals, costs, policies, or implementation details.",
func=search_budget_proposals
)
# Create the prompt template for the agent
agent_prompt = ChatPromptTemplate.from_messages([
("system", """You are a helpful assistant for budget proposals in Sri Lanka. You have access to a vector database containing detailed information about various budget proposals. You can communicate in English, Sinhala, and understand Singlish (Sinhala written in English letters).
When a user asks about budget proposals, you should:
1. Use the search_budget_proposals tool to find relevant information
2. Provide accurate, detailed responses based on the retrieved information
3. Reference proposals by their content/topic, not by filename
4. Be professional but approachable in any language
5. If the search doesn't return relevant results, acknowledge this and provide general guidance
6. Respond in the same language or style as the user's question when possible
Guidelines:
- Always use the search tool for specific questions about budget proposals
- When mentioning proposals, refer to them by topic (e.g., "maternity leave benefits proposal", "EPF tax removal proposal") rather than document filenames
- Keep responses clear and informative in any language
- Use a balanced tone - helpful but not overly casual
- If asked about topics not covered, redirect to relevant topics professionally
- Be culturally sensitive when discussing Sri Lankan policies and economic matters
- When responding in Sinhala, use appropriate formal language for policy discussions
- DO NOT include long document filenames in your responses - refer to proposals by their topic instead"""),
MessagesPlaceholder(variable_name="chat_history"),
("human", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad")
])
# Store conversation memories for different sessions
conversation_memories: Dict[str, ConversationBufferWindowMemory] = {}
def get_or_create_memory(session_id: str) -> ConversationBufferWindowMemory:
"""Get or create a memory instance for a session"""
if session_id not in conversation_memories:
# Create new memory with window of 10 messages (5 exchanges)
conversation_memories[session_id] = ConversationBufferWindowMemory(
k=10, # Remember last 10 messages
return_messages=True,
memory_key="chat_history"
)
logger.info(f"Created new memory for session: {session_id}")
return conversation_memories[session_id]
def create_agent(session_id: str) -> AgentExecutor:
"""Create a LangChain agent with memory and RAG capabilities"""
memory = get_or_create_memory(session_id)
# Create the agent
agent = create_openai_functions_agent(
llm=llm,
tools=[search_tool],
prompt=agent_prompt
)
# Create agent executor with memory
agent_executor = AgentExecutor(
agent=agent,
tools=[search_tool],
memory=memory,
verbose=False,
handle_parsing_errors=True
)
return agent_executor
def get_short_document_name(filename: str) -> str:
"""
Convert long document names to shorter, user-friendly names automatically
SHORT NAME GENERATION GUIDE:
===========================
1. MANUAL MAPPING (Priority 1):
- Add entries to the 'short_names' dictionary for specific files
- Format: 'full_filename_without_extension': 'Short Display Name'
- Example: '20250813_Budget2026Proposal_MaternityLeaveBenefit_Raj_D01': 'Maternity Leave Benefits'
2. AUTOMATIC PATTERN MATCHING (Priority 2):
- System automatically detects proposal types and languages
- Proposal Types Detected:
* MaternityLeaveBenefit/MaternityLeave → "Maternity Leave Benefits"
* RemovalOfTaxationOnEPF/EPF → "EPF Tax Removal"
* ExpandingIndustrialLand/IndustrialLand → "Industrial Land Expansion"
* Budget2025/Budget2026 → "Budget 2025/2026 Proposals"
* Template → "Budget Template"
* OnePagers → "Budget YYYY One-Pagers"
- Language Detection:
* _Sin_/_Sinhala_ → "(Sinhala)"
* _Tam_/_Tamil_ → "(Tamil)"
* _En_/_English_ → "(EN)"
* _Raj_ → No language suffix (treated as default/English)
* No language indicator → No language suffix
3. GENERIC FALLBACK (Priority 3):
- Removes date prefixes: 20250813_ → ""
- Removes language suffixes: _Sin_, _Tam_, _Raj_, _En_, _F_, _Final_, _D01
- Removes budget prefixes: Budget2026Proposal_ → ""
- Converts underscores to spaces: _ → " "
- Capitalizes words: "maternity leave" → "Maternity Leave"
- Limits length: Truncates to 37 chars + "..." if longer than 40
EXAMPLES:
=========
Input: "20250813_Budget2026Proposal_MaternityLeaveBenefit_Sin_F.pdf"
Output: "Maternity Leave Benefits (Sinhala)"
Input: "20250825_Budget2026Proposal_RemovalOfTaxationOnEPF_Tam_F.pdf"
Output: "EPF Tax Removal (Tamil)"
Input: "20250813_Budget2026_Proposal_ExpandingIndustrialLand_En_F.pdf"
Output: "Industrial Land Expansion (EN)"
Input: "20250813_Budget2026Proposal_MaternityLeaveBenefit_Raj_D01.pdf"
Output: "Maternity Leave Benefits" (no language suffix)
HOW TO ADD NEW DOCUMENTS:
=========================
1. Drop the PDF/DOCX file in the assets/pdfs/ folder
2. The system will automatically generate a short name using pattern matching
3. If you want a custom name, add it to the 'short_names' dictionary
4. No code changes needed for automatic naming!
"""
# Remove file extension
name = filename.replace('.pdf', '').replace('.docx', '')
# Create mapping for common document types (can be updated manually for special cases)
short_names = {
'20241211_Econ_VRProposals_Budget2025_OnePagers': 'Budget 2025 One-Pagers',
'20250813_Budget2026_Proposal_ExpandingIndustrialLand_En_F': 'Industrial Land Expansion (EN)',
'20250813_Budget2026Proposal_ExpandingIndustrialLand_F': 'Industrial Land Expansion (English)',
'20250813_Budget2026Proposal_ExpandingIndustrialLand_F - Sinhala': 'Industrial Land Expansion (Sinhala)',
'20250813_Budget2026Proposal_MaternityLeaveBenefit_Raj_D01': 'Maternity Leave Benefits',
'20250813_Budget2026Proposal_RemovalOfTaxationOnEPF_Raj_F': 'EPF Tax Removal',
'20250825_Budget2026Proposal_MaternityLeaveBenefit_Sin_F': 'Maternity Leave Benefits (Sinhala)',
'20250825_Budget2026Proposal_MaternityLeaveBenefit_Tam_F': 'Maternity Leave Benefits (Tamil)',
'20250825_Budget2026Proposal_RemovalOfTaxationOnEPF_Sin_Final': 'EPF Tax Removal (Sinhala)',
'20250825_Budget2026Proposal_RemovalOfTaxationOnEPF_Tam_F': 'EPF Tax Removal (Tamil)',
'20250908_Budget2026Proposal_Template': 'Budget 2026 Template'
}
# Return short name if found in manual mapping
if name in short_names:
return short_names[name]
# Automatic pattern-based naming (works for new files without manual updates)
# Extract year
year_match = re.search(r'20\d{2}', name)
year = year_match.group() if year_match else ''
# Extract language indicators
language = ''
if '_Sin_' in name or '_Sinhala_' in name:
language = ' (Sinhala)'
elif '_Tam_' in name or '_Tamil_' in name:
language = ' (Tamil)'
elif '_Raj_' in name:
language = ' (Raj)'
elif '_En_' in name or '_English_' in name:
language = ' (EN)'
# Extract proposal type
if 'MaternityLeaveBenefit' in name or 'MaternityLeave' in name:
return f'Maternity Leave Benefits{language}'
elif 'RemovalOfTaxationOnEPF' in name or 'EPF' in name:
return f'EPF Tax Removal{language}'
elif 'ExpandingIndustrialLand' in name or 'IndustrialLand' in name:
return f'Industrial Land Expansion{language}'
elif 'Budget' in name and year:
return f'Budget {year} Proposals{language}'
elif 'Template' in name:
return f'Budget Template{language}'
elif 'OnePagers' in name:
return f'Budget {year} One-Pagers'
else:
# Generic fallback - clean up the name
# Remove date prefixes and common suffixes
clean_name = re.sub(r'^\d{8}_', '', name) # Remove date prefix
clean_name = re.sub(r'_(En|Sin|Tam|Raj|F|Final|D01)$', '', clean_name) # Remove language/version suffixes
clean_name = re.sub(r'Budget\d{4}Proposal_?', '', clean_name) # Remove budget proposal prefix
clean_name = re.sub(r'_', ' ', clean_name) # Replace underscores with spaces
# Capitalize words
clean_name = ' '.join(word.capitalize() for word in clean_name.split())
# Limit length
if len(clean_name) > 40:
clean_name = clean_name[:37] + '...'
return clean_name + language
def get_available_pdfs() -> List[str]:
"""Dynamically get list of available PDF files from all language directories"""
try:
import os
import glob
# Search in all language directories
pdf_dirs = [
"Budget_Proposals copy-2/en/assets/pdfs/",
"Budget_Proposals copy-2/si/assets/pdfs/",
"Budget_Proposals copy-2/ta/assets/pdfs/",
"Budget_Proposals copy-2/assets/pdfs/"
]
pdf_files = set()
for pdf_dir in pdf_dirs:
if os.path.exists(pdf_dir):
files = [f for f in os.listdir(pdf_dir) if f.lower().endswith(('.pdf', '.docx'))]
pdf_files.update(files)
if pdf_files:
return list(pdf_files)
else:
# Fallback to known PDFs if no directories exist
return [
'20250813_Budget2026Proposal_ExpandingIndustrialLand_F.pdf',
'20250813_Budget2026Proposal_ExpandingIndustrialLand_F - Sinhala.pdf',
'20250813_Budget2026Proposal_ExpandingIndustrialLand_F - TamilReviewed.pdf',
'20250813_Budget2026Proposal_MaternityLeaveBenefit_Raj_D01.pdf',
'20250813_Budget2026Proposal_RemovalOfTaxationOnEPF_Raj_F.pdf',
'20250825_Budget2026Proposal_MaternityLeaveBenefit_Sin_F.pdf',
'20250825_Budget2026Proposal_MaternityLeaveBenefit_Tam_F.pdf',
'20250825_Budget2026Proposal_RemovalOfTaxationOnEPF_Sin_Final.pdf',
'20250825_Budget2026Proposal_RemovalOfTaxationOnEPF_Tam_F.pdf'
]
except Exception as e:
logger.error(f"Error getting available PDFs: {e}")
# Fallback to known PDFs
return [
'20250813_Budget2026Proposal_ExpandingIndustrialLand_F.pdf',
'20250813_Budget2026Proposal_ExpandingIndustrialLand_F - Sinhala.pdf',
'20250813_Budget2026Proposal_ExpandingIndustrialLand_F - TamilReviewed.pdf',
'20250813_Budget2026Proposal_MaternityLeaveBenefit_Raj_D01.pdf',
'20250813_Budget2026Proposal_RemovalOfTaxationOnEPF_Raj_F.pdf',
'20250825_Budget2026Proposal_MaternityLeaveBenefit_Sin_F.pdf',
'20250825_Budget2026Proposal_MaternityLeaveBenefit_Tam_F.pdf',
'20250825_Budget2026Proposal_RemovalOfTaxationOnEPF_Sin_Final.pdf',
'20250825_Budget2026Proposal_RemovalOfTaxationOnEPF_Tam_F.pdf'
]
# DISABLED - Source extraction removed
def extract_sources_from_search_context_DISABLED(search_context: str, user_language: str = 'en') -> List[Dict[str, str]]:
"""Extract source documents from search context with short names, filtered by user language"""
sources = []
# Get dynamically available PDF files
available_pdfs = get_available_pdfs()
# Look for the specific pattern "From {filename} ({category}):" in search context
import re
found_files = set()
# Pattern to match "From filename.pdf (category):" or "From filename.docx (category):"
# Updated to handle assets/pdfs/ prefix and empty parentheses, and stop at the colon
from_pattern = r'From\s+assets/pdfs/([^:]+\.(?:pdf|docx))\s*\([^)]*\)'
matches = re.findall(from_pattern, search_context)
for match in matches:
if match in available_pdfs:
found_files.add(match)
# Fallback: if no "From" pattern found, look for direct filename mentions
if not found_files:
for pdf in available_pdfs:
if pdf in search_context:
found_files.add(pdf)
# Filter to return sources in the user's language, but prioritize by relevance
language_filtered_files = []
# First, try to find documents in the user's language
for pdf in found_files:
doc_language = get_document_language(pdf)
# Language matching logic - return sources in user's language
should_include = False
if user_language == 'en' or user_language == 'singlish':
# English users get English documents
if doc_language in ['en', 'english']:
should_include = True
elif user_language == 'si' or user_language == 'sinhala':
# Sinhala users get Sinhala documents
if doc_language in ['si', 'sinhala']:
should_include = True
elif user_language == 'ta' or user_language == 'tamil':
# Tamil users get Tamil documents
if doc_language in ['ta', 'tamil']:
should_include = True
else:
# Default: show English documents
if doc_language in ['en', 'english']:
should_include = True
if should_include:
language_filtered_files.append(pdf)
# If no language-specific documents found, fallback to English
if not language_filtered_files:
for pdf in found_files:
doc_language = get_document_language(pdf)
if doc_language in ['en', 'english']:
language_filtered_files.append(pdf)
# If still no documents, use any available document
if not language_filtered_files and found_files:
language_filtered_files = [list(found_files)[0]]
# Return only the most relevant document in the user's language
if language_filtered_files:
language_filtered_files = [language_filtered_files[0]]
# Convert to list with short names and correct URLs
for pdf in language_filtered_files:
sources.append({
"filename": pdf,
"short_name": get_short_document_name(pdf),
"pdf_url": get_correct_pdf_url(pdf)
})
return sources
def get_document_language(filename: str) -> str:
"""Determine the language of a document from its filename"""
filename_lower = filename.lower()
if any(indicator in filename_lower for indicator in ['_sin_', '-sin', 'sinhala', 'si/', '- sinhala']):
return 'si'
elif any(indicator in filename_lower for indicator in ['_tam_', '-tam', 'tamil', 'ta/']):
return 'ta'
elif '_raj_' in filename_lower:
return 'en' # Treat Raj as English/default
elif '_en_' in filename_lower or '_english_' in filename_lower:
return 'en'
else:
# Default to English if no language indicator found
return 'en'
def get_correct_pdf_url(filename: str) -> str:
"""Get the correct PDF URL based on document language"""
doc_language = get_document_language(filename)
# Map language to directory
if doc_language == 'si':
return f"../si/assets/pdfs/{filename}"
elif doc_language == 'ta':
return f"../ta/assets/pdfs/{filename}"
else:
# English documents
return f"assets/pdfs/{filename}"
# DISABLED - Source extraction removed
def extract_sources_from_response_DISABLED(response: str) -> List[Dict[str, str]]:
"""Extract source documents mentioned in the response with short names (fallback method)"""
sources = []
# Get dynamically available PDF files
available_pdfs = get_available_pdfs()
# Look for source patterns like "(Source: filename.pdf)" or "(Sources: file1.pdf, file2.pdf)"
# Also look for partial matches for the new budget proposal files
found_files = set()
for pdf in available_pdfs:
if pdf in response:
found_files.add(pdf)
# Also check for partial matches (e.g., "MaternityLeaveBenefit" matches the full filename)
elif any(keyword in response for keyword in pdf.split('_') if len(keyword) > 5):
found_files.add(pdf)
# Convert to list with short names and correct URLs
for pdf in found_files:
sources.append({
"filename": pdf,
"short_name": get_short_document_name(pdf),
"pdf_url": get_correct_pdf_url(pdf)
})
return sources
def generate_response_with_rag(user_message: str, session_id: str) -> Dict[str, Any]:
"""Generate response using RAG with memory and multilingual support"""
try:
# Process multilingual input
processed_message, original_language, needs_translation, transliteration_used, ai_detection_used, confidence = simple_process_input(user_message)
logger.info(f"Input processing: original='{user_message}', processed='{processed_message}', lang='{original_language}', transliteration='{transliteration_used}', ai_detection='{ai_detection_used}', confidence='{confidence:.2f}'")
# Get or create memory for this session
memory = get_or_create_memory(session_id)
# Let Gemini handle both specific and general questions intelligently
# Always search with the user's actual query - Gemini will handle vague questions
search_context = search_budget_proposals(processed_message)
# Get conversation history for context
chat_history = memory.chat_memory.messages
conversation_context = ""
if chat_history:
# Get last few messages for context
recent_messages = chat_history[-6:] # Last 3 exchanges
conversation_parts = []
for msg in recent_messages:
if isinstance(msg, HumanMessage):
conversation_parts.append(f"User: {msg.content}")
elif isinstance(msg, AIMessage):
conversation_parts.append(f"Assistant: {msg.content}")
conversation_context = "\n".join(conversation_parts)
# Create a prompt with conversation history and retrieved context
language_instruction = ""
if original_language == 'si':
language_instruction = "\n\nIMPORTANT: The user asked in Sinhala. Please respond in the same language (Sinhala) using proper Sinhala script and formal language appropriate for policy discussions. The question was: '{}'".format(user_message)
elif original_language == 'ta':
language_instruction = "\n\nIMPORTANT: The user asked in Tamil. Please respond in the same language (Tamil) using proper Tamil script and formal language appropriate for policy discussions. Use Sri Lankan Tamil terminology and context. The question was: '{}'".format(user_message)
elif original_language == 'singlish':
language_instruction = "\n\nIMPORTANT: The user asked in Singlish (Romanized Sinhala - Sinhala words written in English letters). Please respond in proper Sinhala script using formal language appropriate for policy discussions. Translate their question and provide a comprehensive answer in Sinhala. The original question was: '{}'".format(user_message)
elif original_language == 'romanized_tamil':
language_instruction = "\n\nIMPORTANT: The user asked in Romanized Tamil (Tamil words written in English letters). Please respond in proper Tamil script using formal language appropriate for policy discussions. Use Sri Lankan Tamil terminology and context. Translate their question and provide a comprehensive answer in Tamil. The original question was: '{}'".format(user_message)
prompt = f"""You are a helpful assistant for budget proposals in Sri Lanka. You can communicate in English, Sinhala, Tamil (Sri Lankan Tamil), and understand Singlish and Romanized Tamil.
FORMATTING RULES:
- DO NOT use asterisks (*) for formatting or emphasis
- DO NOT use markdown formatting like **bold** or *italic*
- Use plain text without any special formatting characters
- Keep responses clean and readable without formatting symbols
IMPORTANT: This website contains various budget proposals for Sri Lanka including:
- Maternity leave benefits proposals (multiple language versions)
- EPF (Employee Provident Fund) taxation removal proposals
- Industrial land expansion proposals
- Cigarette tax reform proposals
- Electricity tariff reforms
- Tax policy changes
- Economic growth initiatives
- Social protection measures
- Budget 2025 and 2026 proposals
Based on the following information from the budget proposals database:
{search_context}
{conversation_context}
Current user question: {processed_message}
Original user input: {user_message}
{language_instruction}
Guidelines:
- For general questions like "monada meh" (what is this), "help", or vague inquiries, provide a helpful overview of available budget proposals
- Never say "I couldn't process your request" - always provide useful information about budget proposals
- Be professional but approachable in any language
- Include specific details from the retrieved information when available
- When mentioning proposals, refer to them by topic (e.g., "maternity leave benefits proposal", "EPF tax removal proposal") - DO NOT include long document filenames
- If the search doesn't return relevant results, provide an overview of available proposals with examples
- For vague questions, proactively explain what's available and guide users to specific topics (EPF, electricity, maternity leave, cigarette taxes, etc.)
- Keep responses clear and informative
- Reference previous conversation context when relevant
- Maintain conversation continuity
- Be culturally sensitive when discussing Sri Lankan policies
- When responding in Sinhala, use appropriate formal language for policy discussions
- When responding in Tamil, use Sri Lankan Tamil dialect and formal language appropriate for policy discussions
- Always be helpful - turn any question into an opportunity to inform about budget proposals
Please provide a helpful response:"""
# Generate response using the LLM directly
response = llm.invoke(prompt)
response_text = response.content.strip()
# No need to translate response - Gemini handles language matching automatically
# Sources removed - no longer extracting source documents
# Add messages to memory (store original user message for context)
memory.chat_memory.add_user_message(user_message)
memory.chat_memory.add_ai_message(response_text)
# Get updated conversation history for context
chat_history = memory.chat_memory.messages
return {
"response": response_text,
"confidence": "high",
"session_id": session_id,
"conversation_length": len(chat_history),
"memory_used": True,
"rag_used": True,
"sources": [],
"language_detected": original_language,
"translation_used": needs_translation,
"transliteration_used": transliteration_used,
"ai_detection_used": ai_detection_used,
"detection_confidence": confidence
}
except Exception as e:
logger.error(f"Error generating response with RAG: {e}")
# Provide error message in appropriate language
error_message = "I'm sorry, I'm having trouble processing your request right now. Please try again later."
return {
"response": error_message,
"confidence": "error",
"session_id": session_id,
"memory_used": False,
"rag_used": False,
"sources": [],
"language_detected": original_language if 'original_language' in locals() else 'en',
"translation_used": False,
"transliteration_used": False,
"ai_detection_used": False,
"detection_confidence": 0.0
}
def clear_session_memory(session_id: str) -> bool:
"""Clear memory for a specific session"""
try:
if session_id in conversation_memories:
del conversation_memories[session_id]
logger.info(f"Cleared memory for session: {session_id}")
return True
return False
except Exception as e:
logger.error(f"Error clearing memory: {e}")
return False
@app.route('/api/chat', methods=['POST'])
def chat():
"""Enhanced chat endpoint with memory"""
try:
data = request.get_json()
user_message = data.get('message', '').strip()
session_id = data.get('session_id', 'default')
if not user_message:
return jsonify({
"error": "Message is required"
}), 400
# Generate response with memory
result = generate_response_with_rag(user_message, session_id)
return jsonify({
"response": result["response"],
"confidence": result["confidence"],
"session_id": session_id,
"conversation_length": result.get("conversation_length", 0),
"memory_used": result.get("memory_used", False),
"rag_used": result.get("rag_used", False),
"sources": [],
"user_message": user_message,
"language_detected": result.get("language_detected", "en"),
"translation_used": result.get("translation_used", False),
"transliteration_used": result.get("transliteration_used", False),
"ai_detection_used": result.get("ai_detection_used", False),
"detection_confidence": result.get("detection_confidence", 0.0)
})
except Exception as e:
logger.error(f"Chat API error: {e}")
return jsonify({"error": str(e)}), 500
@app.route('/api/chat/clear', methods=['POST'])
def clear_chat():
"""Clear chat memory for a session"""
try:
data = request.get_json()
session_id = data.get('session_id', 'default')
success = clear_session_memory(session_id)
return jsonify({
"success": success,
"session_id": session_id,
"message": "Chat memory cleared successfully" if success else "Session not found"
})
except Exception as e:
logger.error(f"Clear chat error: {e}")
return jsonify({"error": str(e)}), 500
@app.route('/api/chat/sessions', methods=['GET'])
def list_sessions():
"""List all active chat sessions"""
try:
sessions = []
for session_id, memory in conversation_memories.items():
messages = memory.chat_memory.messages
sessions.append({
"session_id": session_id,
"message_count": len(messages),
"last_activity": datetime.now().isoformat() # Simplified for now
})
return jsonify({
"sessions": sessions,
"total_sessions": len(sessions)
})
except Exception as e:
logger.error(f"List sessions error: {e}")
return jsonify({"error": str(e)}), 500
@app.route('/api/chat/history/<session_id>', methods=['GET'])
def get_chat_history(session_id: str):
"""Get chat history for a specific session"""
try:
if session_id not in conversation_memories:
return jsonify({
"session_id": session_id,
"history": [],
"message_count": 0
})
memory = conversation_memories[session_id]
messages = memory.chat_memory.messages
history = []
for msg in messages:
if isinstance(msg, HumanMessage):
history.append({
"type": "human",
"content": msg.content,
"timestamp": datetime.now().isoformat()
})
elif isinstance(msg, AIMessage):
history.append({
"type": "ai",
"content": msg.content,
"timestamp": datetime.now().isoformat()
})
return jsonify({
"session_id": session_id,
"history": history,
"message_count": len(history)
})
except Exception as e:
logger.error(f"Get chat history error: {e}")
return jsonify({"error": str(e)}), 500
@app.route('/api/chat/health', methods=['GET'])
def chat_health():
"""Health check for the enhanced chatbot"""
try:
# Test LangChain connection and vector database
test_agent = create_agent("health_check")
test_response = test_agent.invoke({"input": "Hello"})
# Test vector database connection
pc_index = get_pinecone_index()
vector_db_status = "connected" if pc_index else "disconnected"
return jsonify({
"status": "healthy",
"message": "Enhanced budget proposals chatbot with RAG is running",
"langchain_status": "connected" if test_response else "disconnected",
"vector_db_status": vector_db_status,
"rag_enabled": True,
"active_sessions": len(conversation_memories),
"memory_enabled": True
})
except Exception as e:
return jsonify({
"status": "unhealthy",
"message": f"Error: {str(e)}"
}), 500
@app.route('/api/chat/debug/<session_id>', methods=['GET'])
def debug_session(session_id: str):
"""Debug endpoint to check session memory"""
try:
memory_exists = session_id in conversation_memories
memory_info = {
"session_id": session_id,
"memory_exists": memory_exists,
"total_sessions": len(conversation_memories),
"session_keys": list(conversation_memories.keys())
}
if memory_exists:
memory = conversation_memories[session_id]
messages = memory.chat_memory.messages
memory_info.update({
"message_count": len(messages),
"messages": [
{
"type": getattr(msg, 'type', 'unknown'),
"content": getattr(msg, 'content', '')[:100] + "..." if len(getattr(msg, 'content', '')) > 100 else getattr(msg, 'content', '')
}
for msg in messages
]
})
return jsonify(memory_info)
except Exception as e:
logger.error(f"Debug session error: {e}")
return jsonify({"error": str(e)}), 500
@app.route('/api/chat/suggestions', methods=['GET'])
def get_chat_suggestions():
"""Get suggested questions for the chatbot with multilingual support"""
suggestions = [
"What are the maternity leave benefits proposed? 🤱",
"What are the industrial land expansion proposals? 🏭",
"How do the cigarette tax proposals work? 💰",
"What changes are proposed for electricity tariffs? ⚡",
"Tell me about the EPF taxation removal proposals 💰",
"What tax reforms are being suggested? 🏛️",
"How will these proposals affect the economy? 📈",
"What is the cost of implementing these proposals? 💵",
"Can you compare the costs of different proposals? ⚖️",
"What are the main benefits of these proposals? ✨",
"Budget proposals gana kiyanna 📋",
"EPF eka gana mokadda thiyenne? 💰",
"Industrial land expansion kiyannako 🏭",
"Electricity bill eka wenas wenawada? ⚡",
"Maternity leave benefits kiyannako 🤱",
"මේ budget proposals වල cost එක කීයද? 💵",
"රජයේ ආර්థික ප්රතිපත්ති ගැන කියන්න 🏛️"
]
return jsonify({
"suggestions": suggestions,
"supported_languages": ["English", "Sinhala", "Singlish"]
})
@app.route('/api/chat/available-pdfs', methods=['GET'])
def get_available_pdfs_endpoint():
"""Get list of available PDF files with short names for UI display"""
try:
available_pdfs = get_available_pdfs()
# Create list with both full names and short names
pdf_list = []
short_names = []
for pdf in available_pdfs:
short_name = get_short_document_name(pdf)
pdf_list.append({
"filename": pdf,
"short_name": short_name,
"type": "PDF" if pdf.endswith('.pdf') else "DOCX"
})
short_names.append(short_name)
return jsonify({
"available_pdfs": available_pdfs,
"pdf_list": pdf_list,
"short_names": short_names, # Simple array for easy frontend use
"count": len(available_pdfs),
"pdf_directory": "Budget_Proposals copy-2/assets/pdfs"
})
except Exception as e:
logger.error(f"Error getting available PDFs: {e}")
return jsonify({"error": str(e)}), 500
@app.route('/api/chat/document-names', methods=['GET'])
def get_document_names():
"""Get document names with short names for UI display"""
try:
available_pdfs = get_available_pdfs()
# Create mapping of full names to short names
document_mapping = {}
for pdf in available_pdfs:
document_mapping[pdf] = get_short_document_name(pdf)
return jsonify({
"document_mapping": document_mapping,
"count": len(available_pdfs)
})
except Exception as e:
logger.error(f"Error getting document names: {e}")
return jsonify({"error": str(e)}), 500
@app.route('/api/chat/short-document-names', methods=['GET'])
def get_short_document_names():
"""Get just the short document names as a simple array for frontend display"""
try:
available_pdfs = get_available_pdfs()
# Create simple array of short names
short_names = []
for pdf in available_pdfs:
short_names.append(get_short_document_name(pdf))
return jsonify({
"short_names": short_names,
"count": len(short_names)
})
except Exception as e:
logger.error(f"Error getting short document names: {e}")
return jsonify({"error": str(e)}), 500
@app.route('/api/chat/document-buttons', methods=['GET'])
def get_document_buttons():
"""Get document names formatted specifically for UI buttons (simple strings only)"""
try:
available_pdfs = get_available_pdfs()
# Create simple array of just the short names as strings
button_names = []
for pdf in available_pdfs:
button_names.append(get_short_document_name(pdf))
# Return just the array of strings - no objects
return jsonify(button_names)
except Exception as e:
logger.error(f"Error getting document buttons: {e}")
return jsonify({"error": str(e)}), 500
@app.route('/api/chat/detect-language', methods=['POST'])
def detect_language():
"""Test language detection functionality"""
try:
data = request.get_json()
text = data.get('text', '').strip()
if not text:
return jsonify({
"error": "Text is required"
}), 400
processed_message, original_language, needs_translation, transliteration_used, ai_detection_used, confidence = simple_process_input(text)
return jsonify({
"original_text": text,
"processed_text": processed_message,
"language_detected": original_language,
"translation_needed": needs_translation,
"transliteration_used": transliteration_used,
"ai_detection_used": ai_detection_used,
"detection_confidence": confidence,
"contains_sinhala": detect_sinhala_content(text),
"is_singlish": detect_singlish(text)
})
except Exception as e:
logger.error(f"Language detection error: {e}")
return jsonify({"error": str(e)}), 500
@app.route('/', methods=['GET'])
def home():
"""Home endpoint with API documentation"""
return jsonify({
"message": "Multilingual Budget Proposals Chatbot API with Swabhasha Pipeline",
"version": "2.1.0",
"supported_languages": ["English", "Sinhala", "Tamil (Sri Lankan)", "Romanized Sinhala (Singlish)", "Romanized Tamil"],
"features": ["RAG", "Memory", "Swabhasha Transliteration", "Google Translation", "FAISS Vector Store"],
"pipeline": "Romanized Sinhala → Swabhasha → Sinhala Script → Google Translate → English → LLM → Response",
"endpoints": {
"POST /api/chat": "Chat with memory, RAG, and multilingual support",
"POST /api/chat/clear": "Clear chat memory",
"GET /api/chat/sessions": "List active sessions",
"GET /api/chat/history/<session_id>": "Get chat history",
"GET /api/chat/health": "Health check",
"GET /api/chat/suggestions": "Get suggested questions (multilingual)",
"GET /api/chat/available-pdfs": "Get available PDF files with short names",
"GET /api/chat/document-names": "Get document name mapping (full to short names)",
"GET /api/chat/short-document-names": "Get simple array of short document names",
"GET /api/chat/document-buttons": "Get document names as simple string array for UI buttons",
"POST /api/chat/detect-language": "Test language detection"
},
"status": "running"
})
if __name__ == '__main__':
app.run(debug=False, host='0.0.0.0', port=7860)
|