kawre commited on
Commit
05fce26
·
verified ·
1 Parent(s): 23b51bf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -15
app.py CHANGED
@@ -18,50 +18,45 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
18
 
19
  # --- Passo 1: Llama 3.1 ---
20
  result_main = client_main.text_generation(
21
- inputs=full_prompt,
22
- max_tokens=max_tokens,
23
  temperature=temperature,
24
  top_p=top_p
25
  )
26
- response_main = result_main[0]["generated_text"]
27
 
28
  # --- Passo 2: FLAN-T5 ---
29
  result_aux1 = client_aux1.text_generation(
30
- inputs=f"Reformule este texto de forma clara:\n{response_main}",
31
  max_new_tokens=max_tokens
32
  )
33
- response_aux1 = result_aux1[0]["generated_text"]
34
 
35
  # --- Passo 3: BART ---
36
  result_aux2 = client_aux2.text_generation(
37
- inputs=f"Resuma este texto em 3 frases:\n{response_aux1}",
38
  max_new_tokens=150
39
  )
40
- response_aux2 = result_aux2[0]["generated_text"]
41
 
42
  except Exception as e:
43
  response_aux2 = f"Erro ao gerar resposta: {e}"
44
 
45
  # Atualiza histórico no formato correto para o Gradio Chatbot
46
- history.append({"role": "user", "content": message})
47
- history.append({"role": "assistant", "content": response_aux2})
48
-
49
- return history, history
50
-
51
-
52
- # Atualiza histórico do chat
53
  history.append({"role": "user", "content": message})
54
  history.append({"role": "assistant", "content": response_aux2})
55
 
56
- return response_aux2, history
57
 
58
  # Interface Gradio
59
  with gr.Blocks() as demo:
60
  gr.Markdown("## 🤖 Chatbot em Cascata (Llama 3.1 + FLAN-T5 + BART)")
 
61
  system_message = gr.Textbox(
62
  value="Você é um chatbot amigável e prestativo.",
63
  label="System Message"
64
  )
 
65
  chatbot = gr.Chatbot()
66
  msg = gr.Textbox(label="Digite sua mensagem")
67
  max_tokens = gr.Slider(50, 2048, 512, step=50, label="Max Tokens")
 
18
 
19
  # --- Passo 1: Llama 3.1 ---
20
  result_main = client_main.text_generation(
21
+ prompt=full_prompt,
22
+ max_new_tokens=max_tokens,
23
  temperature=temperature,
24
  top_p=top_p
25
  )
26
+ response_main = result_main.generated_text
27
 
28
  # --- Passo 2: FLAN-T5 ---
29
  result_aux1 = client_aux1.text_generation(
30
+ prompt=f"Reformule este texto de forma clara:\n{response_main}",
31
  max_new_tokens=max_tokens
32
  )
33
+ response_aux1 = result_aux1.generated_text
34
 
35
  # --- Passo 3: BART ---
36
  result_aux2 = client_aux2.text_generation(
37
+ prompt=f"Resuma este texto em 3 frases:\n{response_aux1}",
38
  max_new_tokens=150
39
  )
40
+ response_aux2 = result_aux2.generated_text
41
 
42
  except Exception as e:
43
  response_aux2 = f"Erro ao gerar resposta: {e}"
44
 
45
  # Atualiza histórico no formato correto para o Gradio Chatbot
 
 
 
 
 
 
 
46
  history.append({"role": "user", "content": message})
47
  history.append({"role": "assistant", "content": response_aux2})
48
 
49
+ return history, history
50
 
51
  # Interface Gradio
52
  with gr.Blocks() as demo:
53
  gr.Markdown("## 🤖 Chatbot em Cascata (Llama 3.1 + FLAN-T5 + BART)")
54
+
55
  system_message = gr.Textbox(
56
  value="Você é um chatbot amigável e prestativo.",
57
  label="System Message"
58
  )
59
+
60
  chatbot = gr.Chatbot()
61
  msg = gr.Textbox(label="Digite sua mensagem")
62
  max_tokens = gr.Slider(50, 2048, 512, step=50, label="Max Tokens")