harshvardhan96 commited on
Commit
8e4e15e
·
1 Parent(s): 1dc9349

created app.py

Browse files
Files changed (1) hide show
  1. app.py +67 -0
app.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ import numpy as np
3
+ import pickle
4
+ import gradio as gr
5
+ from tensorflow.keras.preprocessing import sequence
6
+
7
+ # Load the encoder model
8
+ enc_model = tf.keras.models.load_model('/kaggle/input/model-1/encoder_model.h5')
9
+
10
+ # Load the decoder model
11
+ dec_model = tf.keras.models.load_model('/kaggle/input/model-1/decoder_model.h5')
12
+
13
+ with open('/kaggle/input/tokenizer1/tokenizer.pkl', 'rb') as f:
14
+ tokenizer = pickle.load(f)
15
+
16
+ with open('/kaggle/input/tokenizer-params/tokenizer_params (1).pkl', 'rb') as f:
17
+ tokenizer_params = pickle.load(f)
18
+
19
+ maxlen_questions = tokenizer_params["maxlen_questions"]
20
+ maxlen_answers = tokenizer_params["maxlen_answers"]
21
+
22
+ def str_to_tokens(sentence: str):
23
+ words = sentence.lower().split()
24
+ tokens_list = list()
25
+
26
+ for word in words:
27
+ tokens_list.append(tokenizer.word_index[word])
28
+ return sequence.pad_sequences([tokens_list], maxlen=maxlen_questions, padding='post')
29
+
30
+ def chatbot_response(question):
31
+ states_values = enc_model.predict(str_to_tokens(question))
32
+ empty_target_seq = np.zeros((1, 1))
33
+ empty_target_seq[0, 0] = tokenizer.word_index['start']
34
+ stop_condition = False
35
+ decoded_translation = ''
36
+
37
+ while not stop_condition:
38
+ dec_outputs, h, c = dec_model.predict([empty_target_seq] + states_values)
39
+ sampled_word_index = np.argmax(dec_outputs[0, -1, :])
40
+ sampled_word = None
41
+
42
+ for word, index in tokenizer.word_index.items():
43
+ if sampled_word_index == index:
44
+ decoded_translation += f' {word}'
45
+ sampled_word = word
46
+
47
+ if sampled_word == 'end' or len(decoded_translation.split()) > maxlen_answers:
48
+ stop_condition = True
49
+
50
+ empty_target_seq = np.zeros((1, 1))
51
+ empty_target_seq[0, 0] = sampled_word_index
52
+ states_values = [h, c]
53
+
54
+ decoded_translation = decoded_translation.split(' end')[0]
55
+ return decoded_translation
56
+
57
+ # Gradio Interface
58
+ iface = gr.Interface(
59
+ fn=chatbot_response,
60
+ inputs=gr.inputs.Textbox(),
61
+ outputs=gr.outputs.Textbox(),
62
+ title="Chatbot",
63
+ description="Talk to the chatbot and it will respond!"
64
+ )
65
+
66
+ # Launch the Gradio interface on Hugging Face Spaces
67
+ iface.launch(share=True)