dpnayak251 commited on
Commit
5e0a86e
·
verified ·
1 Parent(s): 8c14250

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +86 -86
app.py CHANGED
@@ -249,10 +249,10 @@ def translate_internal(text: str, src_lang: str, tgt_lang: str) -> str:
249
  # API Functions
250
  # ============================================
251
 
252
- def translate(text: str, source_lang: str, target_lang: str) -> dict:
253
  """Translate text between Indian languages"""
254
  if not text or not text.strip():
255
- return {"error": "No text provided", "translation": ""}
256
  try:
257
  tokenizer, model, model_name = get_translation_model()
258
 
@@ -275,21 +275,21 @@ def translate(text: str, source_lang: str, target_lang: str) -> dict:
275
 
276
  translation = tokenizer.decode(generated[0], skip_special_tokens=True)
277
 
278
- return {
279
  "translation": translation,
280
  "source_lang": source_lang,
281
  "target_lang": target_lang,
282
  "model": model_name
283
- }
284
  except Exception as e:
285
- return {"error": str(e), "translation": ""}
286
- def check_grammar(text: str, language: str = "english") -> dict:
287
  """
288
  Check and correct grammar in text.
289
  For Indian languages: Translate to English -> Correct -> Translate back
290
  """
291
  if not text or not text.strip():
292
- return {"error": "No text provided", "corrected": "", "suggestions": []}
293
  try:
294
  original_text = text
295
  is_indian = is_indian_language(language)
@@ -345,22 +345,22 @@ def check_grammar(text: str, language: str = "english") -> dict:
345
  "type": "grammar"
346
  })
347
 
348
- return {
349
  "corrected": corrected,
350
  "original": original_text,
351
  "suggestions": suggestions,
352
  "has_errors": has_errors,
353
  "language": language
354
- }
355
  except Exception as e:
356
- return {"error": str(e), "corrected": text, "original": text, "has_errors": False, "suggestions": []}
357
- def check_spelling(text: str, language: str = "english") -> dict:
358
  """
359
  Check spelling in text.
360
  For Indian languages: Uses translation-based correction
361
  """
362
  if not text or not text.strip():
363
- return {"error": "No text provided", "corrected": "", "misspelled": []}
364
  try:
365
  original_text = text
366
  is_indian = is_indian_language(language)
@@ -401,13 +401,13 @@ def check_spelling(text: str, language: str = "english") -> dict:
401
  corrected_words.append(word)
402
 
403
  corrected = ' '.join(corrected_words)
404
- return {
405
  "corrected": corrected,
406
  "original": original_text,
407
  "misspelled": misspelled,
408
  "has_errors": len(misspelled) > 0,
409
  "language": language
410
- }
411
  except ImportError:
412
  pass
413
 
@@ -418,13 +418,13 @@ def check_spelling(text: str, language: str = "english") -> dict:
418
 
419
  has_errors = corrected.strip() != original_text.strip()
420
 
421
- return {
422
  "corrected": corrected if has_errors else original_text,
423
  "original": original_text,
424
  "misspelled": [],
425
  "has_errors": has_errors,
426
  "language": language
427
- }
428
  # Fallback: use grammar model
429
  tokenizer, model = get_grammar_model()
430
 
@@ -444,21 +444,21 @@ def check_spelling(text: str, language: str = "english") -> dict:
444
  corrected = tokenizer.decode(outputs[0], skip_special_tokens=True)
445
  has_errors = corrected.strip() != original_text.strip()
446
 
447
- return {
448
  "corrected": corrected if has_errors else original_text,
449
  "original": original_text,
450
  "has_errors": has_errors,
451
  "language": language
452
- }
453
  except Exception as e:
454
- return {"error": str(e), "corrected": text, "original": text, "has_errors": False}
455
- def rewrite_text(text: str, style: str = "formal", language: str = "english") -> dict:
456
  """
457
  Rewrite text in different styles.
458
  Uses translation-based approach for Indian languages.
459
  """
460
  if not text or not text.strip():
461
- return {"error": "No text provided", "rewritten": ""}
462
  try:
463
  original_text = text
464
  is_indian = is_indian_language(language)
@@ -510,26 +510,26 @@ def rewrite_text(text: str, style: str = "formal", language: str = "english") ->
510
  else:
511
  rewritten = rewritten_english
512
 
513
- return {
514
  "rewritten": rewritten,
515
  "original": original_text,
516
  "style": style,
517
  "language": language
518
- }
519
  except Exception as e:
520
- return {"error": str(e), "rewritten": text}
521
- def summarize_text(text: str, max_length: int = 150) -> dict:
522
  """Summarize text (works for all languages via translation)"""
523
  if not text or not text.strip():
524
- return {"error": "No text provided", "summary": ""}
525
  word_count = len(text.split())
526
  if word_count < 20:
527
- return {
528
  "summary": text,
529
  "original_length": word_count,
530
  "summary_length": word_count,
531
  "note": "Text too short to summarize"
532
- }
533
  try:
534
  script = detect_script(text)
535
  is_indian = script != "latin"
@@ -580,18 +580,18 @@ def summarize_text(text: str, max_length: int = 150) -> dict:
580
  else:
581
  summary = summary_english
582
 
583
- return {
584
  "summary": summary,
585
  "original_length": word_count,
586
  "summary_length": len(summary.split()),
587
  "detected_language": detected_lang
588
- }
589
  except Exception as e:
590
- return {"error": str(e), "summary": ""}
591
- def paraphrase(text: str, language: str = "english") -> dict:
592
  """Paraphrase text while keeping the meaning"""
593
  if not text or not text.strip():
594
- return {"error": "No text provided", "paraphrased": ""}
595
  try:
596
  original_text = text
597
  is_indian = is_indian_language(language)
@@ -651,26 +651,26 @@ def paraphrase(text: str, language: str = "english") -> dict:
651
  else:
652
  paraphrased = paraphrased_english
653
 
654
- return {
655
  "paraphrased": paraphrased,
656
  "original": original_text,
657
  "language": language
658
- }
659
  except Exception as e:
660
- return {"error": str(e), "paraphrased": text}
661
  # ============================================
662
  # LANGUAGE TOOL API FUNCTIONS (38 Tools)
663
  # Using lightweight models optimized for CPU Basic
664
  # ============================================
665
 
666
- def tool_grammar_checker(text: str, language: str = "english", tool_type: str = "grammar-checker") -> dict:
667
  """
668
  Grammar checker for language tool pages.
669
  Uses grammarly/coedit-large model for better results.
670
  Supports: grammar-checker, sentence-corrector, paragraph-corrector, text-corrector, writing-checker
671
  """
672
  if not text or not text.strip():
673
- return {"error": "No text provided", "result": ""}
674
  try:
675
  original_text = text
676
  is_indian = is_indian_language(language)
@@ -714,22 +714,22 @@ def tool_grammar_checker(text: str, language: str = "english", tool_type: str =
714
  else:
715
  corrected = corrected_english
716
 
717
- return {
718
  "result": corrected,
719
  "original": original_text,
720
  "tool": tool_type,
721
  "language": language,
722
  "has_corrections": corrected.strip() != original_text.strip()
723
- }
724
  except Exception as e:
725
- return {"error": str(e), "result": text, "tool": tool_type}
726
- def tool_spell_checker(text: str, language: str = "english", tool_type: str = "spell-checker") -> dict:
727
  """
728
  Spell checker for language tool pages.
729
  Uses lighter model for: spell-checker, punctuation-checker, autocorrect, spell-check-online
730
  """
731
  if not text or not text.strip():
732
- return {"error": "No text provided", "result": ""}
733
  try:
734
  original_text = text
735
  is_indian = is_indian_language(language)
@@ -769,23 +769,23 @@ def tool_spell_checker(text: str, language: str = "english", tool_type: str = "s
769
  else:
770
  corrected = corrected_english
771
 
772
- return {
773
  "result": corrected,
774
  "original": original_text,
775
  "tool": tool_type,
776
  "language": language,
777
  "has_corrections": corrected.strip() != original_text.strip()
778
- }
779
  except Exception as e:
780
- return {"error": str(e), "result": text, "tool": tool_type}
781
- def tool_writing_assistant(text: str, language: str = "english", tool_type: str = "rewrite", style: str = "formal") -> dict:
782
  """
783
  Writing assistant for language tool pages.
784
  Uses flan-t5-base for: essay-corrector, content-writer, article-rewriter, blog-writer,
785
  proofreader, readability-checker, letter-writer, editor, paraphrasing-tool
786
  """
787
  if not text or not text.strip():
788
- return {"error": "No text provided", "result": ""}
789
  try:
790
  original_text = text
791
  is_indian = is_indian_language(language)
@@ -836,30 +836,30 @@ def tool_writing_assistant(text: str, language: str = "english", tool_type: str
836
  else:
837
  result = result_english
838
 
839
- return {
840
  "result": result,
841
  "original": original_text,
842
  "tool": tool_type,
843
  "language": language,
844
  "style": style
845
- }
846
  except Exception as e:
847
- return {"error": str(e), "result": text, "tool": tool_type}
848
- def tool_summarizer(text: str, language: str = "english", max_length: int = 150) -> dict:
849
  """
850
  Summarizer for language tool pages.
851
  Uses flan-t5-base model.
852
  """
853
  if not text or not text.strip():
854
- return {"error": "No text provided", "result": ""}
855
  word_count = len(text.split())
856
  if word_count < 20:
857
- return {
858
  "result": text,
859
  "original_length": word_count,
860
  "summary_length": word_count,
861
  "note": "Text too short to summarize"
862
- }
863
  try:
864
  is_indian = is_indian_language(language)
865
  original_text = text
@@ -891,36 +891,36 @@ def tool_summarizer(text: str, language: str = "english", max_length: int = 150)
891
  else:
892
  summary = summary_english
893
 
894
- return {
895
  "result": summary,
896
  "original": original_text,
897
  "original_length": word_count,
898
  "summary_length": len(summary.split()),
899
  "language": language,
900
  "tool": "summarizer"
901
- }
902
  except Exception as e:
903
- return {"error": str(e), "result": text, "tool": "summarizer"}
904
- def tool_translator(text: str, source_lang: str = "english", target_lang: str = "hindi") -> dict:
905
  """
906
  Translator for language tool pages.
907
  Uses NLLB translation model - same as main translate function.
908
  """
909
  if not text or not text.strip():
910
- return {"error": "No text provided", "result": ""}
911
  try:
912
  translated = translate_internal(text, source_lang, target_lang)
913
 
914
- return {
915
  "result": translated,
916
  "original": text,
917
  "source_language": source_lang,
918
  "target_language": target_lang,
919
  "tool": "translator"
920
- }
921
  except Exception as e:
922
- return {"error": str(e), "result": text, "tool": "translator"}
923
- def tool_generic(text: str, language: str = "english", tool_type: str = "generic") -> dict:
924
  """
925
  Generic tool handler for tools that don't need heavy AI processing.
926
  Returns processed result based on tool type.
@@ -928,7 +928,7 @@ def tool_generic(text: str, language: str = "english", tool_type: str = "generic
928
  typing-tool, font-converter, unicode-converter, script-converter
929
  """
930
  if not text or not text.strip():
931
- return {"error": "No text provided", "result": ""}
932
  try:
933
  result_data = {
934
  "original": text,
@@ -975,15 +975,15 @@ def tool_generic(text: str, language: str = "english", tool_type: str = "generic
975
  return result_data
976
 
977
  except Exception as e:
978
- return {"error": str(e), "result": text, "tool": tool_type}
979
- def tool_transliterate(text: str, source_lang: str = "english", target_lang: str = "hindi", mode: str = "roman") -> dict:
980
  """
981
  Transliterate text between scripts.
982
  Modes: roman (English to target script), script (between Indian scripts)
983
  Uses translation model as a proxy for transliteration.
984
  """
985
  if not text or not text.strip():
986
- return {"error": "No text provided", "result": ""}
987
  try:
988
  # Use translation as proxy for transliteration
989
  # For roman typing: English -> target language
@@ -993,24 +993,24 @@ def tool_transliterate(text: str, source_lang: str = "english", target_lang: str
993
  # Script conversion: source script -> target script
994
  result = translate_internal(text, source_lang, target_lang)
995
 
996
- return {
997
  "result": result,
998
  "original": text,
999
  "source_language": source_lang,
1000
  "target_language": target_lang,
1001
  "mode": mode,
1002
  "tool": "transliterate"
1003
- }
1004
  except Exception as e:
1005
- return {"error": str(e), "result": text, "tool": "transliterate"}
1006
- def tool_dictionary(text: str, language: str = "english", mode: str = "meaning") -> dict:
1007
  """
1008
  Dictionary/Thesaurus tool.
1009
  Modes: meaning (dictionary), synonyms (thesaurus)
1010
  Uses AI to provide definitions and synonyms.
1011
  """
1012
  if not text or not text.strip():
1013
- return {"error": "No text provided", "result": ""}
1014
  try:
1015
  tokenizer, model, _ = get_text_generation_model()
1016
 
@@ -1040,23 +1040,23 @@ def tool_dictionary(text: str, language: str = "english", mode: str = "meaning")
1040
  if is_indian_language(language):
1041
  result = translate_internal(result, "english", language)
1042
 
1043
- return {
1044
  "result": result,
1045
  "original": text,
1046
  "language": language,
1047
  "mode": mode,
1048
  "tool": "dictionary" if mode == "meaning" else "thesaurus"
1049
- }
1050
  except Exception as e:
1051
- return {"error": str(e), "result": text, "tool": "dictionary"}
1052
- def tool_generate(text: str, language: str = "english", mode: str = "sentence", count: int = 2) -> dict:
1053
  """
1054
  Text generation tool for various purposes.
1055
  Modes: sentence (make sentences), content (write content), letter (write letter),
1056
  blog (write blog), caption (generate captions), vocabulary (word lists)
1057
  """
1058
  if not text or not text.strip():
1059
- return {"error": "No text provided", "result": ""}
1060
  try:
1061
  tokenizer, model, _ = get_text_generation_model()
1062
 
@@ -1099,23 +1099,23 @@ def tool_generate(text: str, language: str = "english", mode: str = "sentence",
1099
  else:
1100
  result = result_english
1101
 
1102
- return {
1103
  "result": result,
1104
  "original": text,
1105
  "language": language,
1106
  "mode": mode,
1107
  "count": count,
1108
  "tool": "generator"
1109
- }
1110
  except Exception as e:
1111
- return {"error": str(e), "result": text, "tool": "generator"}
1112
- def tool_analyze(text: str, language: str = "english", mode: str = "quality") -> dict:
1113
  """
1114
  Text analysis tool for quality and readability.
1115
  Modes: quality (writing quality score), email (email analysis), readability
1116
  """
1117
  if not text or not text.strip():
1118
- return {"error": "No text provided", "result": ""}
1119
  try:
1120
  original_text = text
1121
  is_indian = is_indian_language(language)
@@ -1174,7 +1174,7 @@ def tool_analyze(text: str, language: str = "english", mode: str = "quality") ->
1174
  else:
1175
  quality_level = "Needs Improvement"
1176
 
1177
- return {
1178
  "result": suggestion,
1179
  "original": original_text,
1180
  "language": language,
@@ -1188,9 +1188,9 @@ def tool_analyze(text: str, language: str = "english", mode: str = "quality") ->
1188
  "readability_score": round(readability_score),
1189
  "quality_level": quality_level
1190
  }
1191
- }
1192
  except Exception as e:
1193
- return {"error": str(e), "result": text, "tool": "analyzer"}
1194
 
1195
 
1196
  # ============================================
 
249
  # API Functions
250
  # ============================================
251
 
252
+ def translate(text: str, source_lang: str, target_lang: str) -> str:
253
  """Translate text between Indian languages"""
254
  if not text or not text.strip():
255
+ return json.dumps({"error": "No text provided", "translation": ""})
256
  try:
257
  tokenizer, model, model_name = get_translation_model()
258
 
 
275
 
276
  translation = tokenizer.decode(generated[0], skip_special_tokens=True)
277
 
278
+ return json.dumps({
279
  "translation": translation,
280
  "source_lang": source_lang,
281
  "target_lang": target_lang,
282
  "model": model_name
283
+ })
284
  except Exception as e:
285
+ return json.dumps({"error": str(e), "translation": ""})
286
+ def check_grammar(text: str, language: str = "english") -> str:
287
  """
288
  Check and correct grammar in text.
289
  For Indian languages: Translate to English -> Correct -> Translate back
290
  """
291
  if not text or not text.strip():
292
+ return json.dumps({"error": "No text provided", "corrected": "", "suggestions": []})
293
  try:
294
  original_text = text
295
  is_indian = is_indian_language(language)
 
345
  "type": "grammar"
346
  })
347
 
348
+ return json.dumps({
349
  "corrected": corrected,
350
  "original": original_text,
351
  "suggestions": suggestions,
352
  "has_errors": has_errors,
353
  "language": language
354
+ })
355
  except Exception as e:
356
+ return json.dumps({"error": str(e), "corrected": text, "original": text, "has_errors": False, "suggestions": []})
357
+ def check_spelling(text: str, language: str = "english") -> str:
358
  """
359
  Check spelling in text.
360
  For Indian languages: Uses translation-based correction
361
  """
362
  if not text or not text.strip():
363
+ return json.dumps({"error": "No text provided", "corrected": "", "misspelled": []})
364
  try:
365
  original_text = text
366
  is_indian = is_indian_language(language)
 
401
  corrected_words.append(word)
402
 
403
  corrected = ' '.join(corrected_words)
404
+ return json.dumps({
405
  "corrected": corrected,
406
  "original": original_text,
407
  "misspelled": misspelled,
408
  "has_errors": len(misspelled) > 0,
409
  "language": language
410
+ })
411
  except ImportError:
412
  pass
413
 
 
418
 
419
  has_errors = corrected.strip() != original_text.strip()
420
 
421
+ return json.dumps({
422
  "corrected": corrected if has_errors else original_text,
423
  "original": original_text,
424
  "misspelled": [],
425
  "has_errors": has_errors,
426
  "language": language
427
+ })
428
  # Fallback: use grammar model
429
  tokenizer, model = get_grammar_model()
430
 
 
444
  corrected = tokenizer.decode(outputs[0], skip_special_tokens=True)
445
  has_errors = corrected.strip() != original_text.strip()
446
 
447
+ return json.dumps({
448
  "corrected": corrected if has_errors else original_text,
449
  "original": original_text,
450
  "has_errors": has_errors,
451
  "language": language
452
+ })
453
  except Exception as e:
454
+ return json.dumps({"error": str(e), "corrected": text, "original": text, "has_errors": False})
455
+ def rewrite_text(text: str, style: str = "formal", language: str = "english") -> str:
456
  """
457
  Rewrite text in different styles.
458
  Uses translation-based approach for Indian languages.
459
  """
460
  if not text or not text.strip():
461
+ return json.dumps({"error": "No text provided", "rewritten": ""})
462
  try:
463
  original_text = text
464
  is_indian = is_indian_language(language)
 
510
  else:
511
  rewritten = rewritten_english
512
 
513
+ return json.dumps({
514
  "rewritten": rewritten,
515
  "original": original_text,
516
  "style": style,
517
  "language": language
518
+ })
519
  except Exception as e:
520
+ return json.dumps({"error": str(e), "rewritten": text})
521
+ def summarize_text(text: str, max_length: int = 150) -> str:
522
  """Summarize text (works for all languages via translation)"""
523
  if not text or not text.strip():
524
+ return json.dumps({"error": "No text provided", "summary": ""})
525
  word_count = len(text.split())
526
  if word_count < 20:
527
+ return json.dumps({
528
  "summary": text,
529
  "original_length": word_count,
530
  "summary_length": word_count,
531
  "note": "Text too short to summarize"
532
+ })
533
  try:
534
  script = detect_script(text)
535
  is_indian = script != "latin"
 
580
  else:
581
  summary = summary_english
582
 
583
+ return json.dumps({
584
  "summary": summary,
585
  "original_length": word_count,
586
  "summary_length": len(summary.split()),
587
  "detected_language": detected_lang
588
+ })
589
  except Exception as e:
590
+ return json.dumps({"error": str(e), "summary": ""})
591
+ def paraphrase(text: str, language: str = "english") -> str:
592
  """Paraphrase text while keeping the meaning"""
593
  if not text or not text.strip():
594
+ return json.dumps({"error": "No text provided", "paraphrased": ""})
595
  try:
596
  original_text = text
597
  is_indian = is_indian_language(language)
 
651
  else:
652
  paraphrased = paraphrased_english
653
 
654
+ return json.dumps({
655
  "paraphrased": paraphrased,
656
  "original": original_text,
657
  "language": language
658
+ })
659
  except Exception as e:
660
+ return json.dumps({"error": str(e), "paraphrased": text})
661
  # ============================================
662
  # LANGUAGE TOOL API FUNCTIONS (38 Tools)
663
  # Using lightweight models optimized for CPU Basic
664
  # ============================================
665
 
666
+ def tool_grammar_checker(text: str, language: str = "english", tool_type: str = "grammar-checker") -> str:
667
  """
668
  Grammar checker for language tool pages.
669
  Uses grammarly/coedit-large model for better results.
670
  Supports: grammar-checker, sentence-corrector, paragraph-corrector, text-corrector, writing-checker
671
  """
672
  if not text or not text.strip():
673
+ return json.dumps({"error": "No text provided", "result": ""})
674
  try:
675
  original_text = text
676
  is_indian = is_indian_language(language)
 
714
  else:
715
  corrected = corrected_english
716
 
717
+ return json.dumps({
718
  "result": corrected,
719
  "original": original_text,
720
  "tool": tool_type,
721
  "language": language,
722
  "has_corrections": corrected.strip() != original_text.strip()
723
+ })
724
  except Exception as e:
725
+ return json.dumps({"error": str(e), "result": text, "tool": tool_type})
726
+ def tool_spell_checker(text: str, language: str = "english", tool_type: str = "spell-checker") -> str:
727
  """
728
  Spell checker for language tool pages.
729
  Uses lighter model for: spell-checker, punctuation-checker, autocorrect, spell-check-online
730
  """
731
  if not text or not text.strip():
732
+ return json.dumps({"error": "No text provided", "result": ""})
733
  try:
734
  original_text = text
735
  is_indian = is_indian_language(language)
 
769
  else:
770
  corrected = corrected_english
771
 
772
+ return json.dumps({
773
  "result": corrected,
774
  "original": original_text,
775
  "tool": tool_type,
776
  "language": language,
777
  "has_corrections": corrected.strip() != original_text.strip()
778
+ })
779
  except Exception as e:
780
+ return json.dumps({"error": str(e), "result": text, "tool": tool_type})
781
+ def tool_writing_assistant(text: str, language: str = "english", tool_type: str = "rewrite", style: str = "formal") -> str:
782
  """
783
  Writing assistant for language tool pages.
784
  Uses flan-t5-base for: essay-corrector, content-writer, article-rewriter, blog-writer,
785
  proofreader, readability-checker, letter-writer, editor, paraphrasing-tool
786
  """
787
  if not text or not text.strip():
788
+ return json.dumps({"error": "No text provided", "result": ""})
789
  try:
790
  original_text = text
791
  is_indian = is_indian_language(language)
 
836
  else:
837
  result = result_english
838
 
839
+ return json.dumps({
840
  "result": result,
841
  "original": original_text,
842
  "tool": tool_type,
843
  "language": language,
844
  "style": style
845
+ })
846
  except Exception as e:
847
+ return json.dumps({"error": str(e), "result": text, "tool": tool_type})
848
+ def tool_summarizer(text: str, language: str = "english", max_length: int = 150) -> str:
849
  """
850
  Summarizer for language tool pages.
851
  Uses flan-t5-base model.
852
  """
853
  if not text or not text.strip():
854
+ return json.dumps({"error": "No text provided", "result": ""})
855
  word_count = len(text.split())
856
  if word_count < 20:
857
+ return json.dumps({
858
  "result": text,
859
  "original_length": word_count,
860
  "summary_length": word_count,
861
  "note": "Text too short to summarize"
862
+ })
863
  try:
864
  is_indian = is_indian_language(language)
865
  original_text = text
 
891
  else:
892
  summary = summary_english
893
 
894
+ return json.dumps({
895
  "result": summary,
896
  "original": original_text,
897
  "original_length": word_count,
898
  "summary_length": len(summary.split()),
899
  "language": language,
900
  "tool": "summarizer"
901
+ })
902
  except Exception as e:
903
+ return json.dumps({"error": str(e), "result": text, "tool": "summarizer"})
904
+ def tool_translator(text: str, source_lang: str = "english", target_lang: str = "hindi") -> str:
905
  """
906
  Translator for language tool pages.
907
  Uses NLLB translation model - same as main translate function.
908
  """
909
  if not text or not text.strip():
910
+ return json.dumps({"error": "No text provided", "result": ""})
911
  try:
912
  translated = translate_internal(text, source_lang, target_lang)
913
 
914
+ return json.dumps({
915
  "result": translated,
916
  "original": text,
917
  "source_language": source_lang,
918
  "target_language": target_lang,
919
  "tool": "translator"
920
+ })
921
  except Exception as e:
922
+ return json.dumps({"error": str(e), "result": text, "tool": "translator"})
923
+ def tool_generic(text: str, language: str = "english", tool_type: str = "generic") -> str:
924
  """
925
  Generic tool handler for tools that don't need heavy AI processing.
926
  Returns processed result based on tool type.
 
928
  typing-tool, font-converter, unicode-converter, script-converter
929
  """
930
  if not text or not text.strip():
931
+ return json.dumps({"error": "No text provided", "result": ""})
932
  try:
933
  result_data = {
934
  "original": text,
 
975
  return result_data
976
 
977
  except Exception as e:
978
+ return json.dumps({"error": str(e), "result": text, "tool": tool_type})
979
+ def tool_transliterate(text: str, source_lang: str = "english", target_lang: str = "hindi", mode: str = "roman") -> str:
980
  """
981
  Transliterate text between scripts.
982
  Modes: roman (English to target script), script (between Indian scripts)
983
  Uses translation model as a proxy for transliteration.
984
  """
985
  if not text or not text.strip():
986
+ return json.dumps({"error": "No text provided", "result": ""})
987
  try:
988
  # Use translation as proxy for transliteration
989
  # For roman typing: English -> target language
 
993
  # Script conversion: source script -> target script
994
  result = translate_internal(text, source_lang, target_lang)
995
 
996
+ return json.dumps({
997
  "result": result,
998
  "original": text,
999
  "source_language": source_lang,
1000
  "target_language": target_lang,
1001
  "mode": mode,
1002
  "tool": "transliterate"
1003
+ })
1004
  except Exception as e:
1005
+ return json.dumps({"error": str(e), "result": text, "tool": "transliterate"})
1006
+ def tool_dictionary(text: str, language: str = "english", mode: str = "meaning") -> str:
1007
  """
1008
  Dictionary/Thesaurus tool.
1009
  Modes: meaning (dictionary), synonyms (thesaurus)
1010
  Uses AI to provide definitions and synonyms.
1011
  """
1012
  if not text or not text.strip():
1013
+ return json.dumps({"error": "No text provided", "result": ""})
1014
  try:
1015
  tokenizer, model, _ = get_text_generation_model()
1016
 
 
1040
  if is_indian_language(language):
1041
  result = translate_internal(result, "english", language)
1042
 
1043
+ return json.dumps({
1044
  "result": result,
1045
  "original": text,
1046
  "language": language,
1047
  "mode": mode,
1048
  "tool": "dictionary" if mode == "meaning" else "thesaurus"
1049
+ })
1050
  except Exception as e:
1051
+ return json.dumps({"error": str(e), "result": text, "tool": "dictionary"})
1052
+ def tool_generate(text: str, language: str = "english", mode: str = "sentence", count: int = 2) -> str:
1053
  """
1054
  Text generation tool for various purposes.
1055
  Modes: sentence (make sentences), content (write content), letter (write letter),
1056
  blog (write blog), caption (generate captions), vocabulary (word lists)
1057
  """
1058
  if not text or not text.strip():
1059
+ return json.dumps({"error": "No text provided", "result": ""})
1060
  try:
1061
  tokenizer, model, _ = get_text_generation_model()
1062
 
 
1099
  else:
1100
  result = result_english
1101
 
1102
+ return json.dumps({
1103
  "result": result,
1104
  "original": text,
1105
  "language": language,
1106
  "mode": mode,
1107
  "count": count,
1108
  "tool": "generator"
1109
+ })
1110
  except Exception as e:
1111
+ return json.dumps({"error": str(e), "result": text, "tool": "generator"})
1112
+ def tool_analyze(text: str, language: str = "english", mode: str = "quality") -> str:
1113
  """
1114
  Text analysis tool for quality and readability.
1115
  Modes: quality (writing quality score), email (email analysis), readability
1116
  """
1117
  if not text or not text.strip():
1118
+ return json.dumps({"error": "No text provided", "result": ""})
1119
  try:
1120
  original_text = text
1121
  is_indian = is_indian_language(language)
 
1174
  else:
1175
  quality_level = "Needs Improvement"
1176
 
1177
+ return json.dumps({
1178
  "result": suggestion,
1179
  "original": original_text,
1180
  "language": language,
 
1188
  "readability_score": round(readability_score),
1189
  "quality_level": quality_level
1190
  }
1191
+ })
1192
  except Exception as e:
1193
+ return json.dumps({"error": str(e), "result": text, "tool": "analyzer"})
1194
 
1195
 
1196
  # ============================================