Update app.py
Browse files
app.py
CHANGED
|
@@ -13,28 +13,32 @@ client_aux2 = InferenceClient(token=HF_TOKEN, model="facebook/bart-large-cnn")
|
|
| 13 |
# Função principal de resposta
|
| 14 |
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
| 15 |
try:
|
| 16 |
-
# --- Passo 1: Llama 3.1
|
| 17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
for h in history:
|
| 19 |
-
|
| 20 |
-
|
| 21 |
|
| 22 |
-
|
| 23 |
-
|
| 24 |
max_new_tokens=max_tokens,
|
| 25 |
temperature=temperature,
|
| 26 |
top_p=top_p
|
| 27 |
)
|
| 28 |
-
response_main =
|
| 29 |
|
| 30 |
-
# --- Passo 2: FLAN-T5 (
|
| 31 |
result_aux1 = client_aux1.text_generation(
|
| 32 |
prompt=f"Reformule este texto de forma clara e concisa:\n{response_main}",
|
| 33 |
max_new_tokens=max_tokens
|
| 34 |
)
|
| 35 |
response_aux1 = result_aux1.generated_text
|
| 36 |
|
| 37 |
-
# --- Passo 3: BART (
|
| 38 |
result_aux2 = client_aux2.text_generation(
|
| 39 |
prompt=f"Resuma este texto em 3 frases:\n{response_aux1}",
|
| 40 |
max_new_tokens=150
|
|
|
|
| 13 |
# Função principal de resposta
|
| 14 |
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
| 15 |
try:
|
| 16 |
+
# --- Passo 1: Llama 3.1 via ProxyClientChat ---
|
| 17 |
+
chat = client_main.chat # objeto de chat, não chamável
|
| 18 |
+
chat.clear_messages() # limpa mensagens anteriores do objeto (opcional)
|
| 19 |
+
|
| 20 |
+
# Adiciona mensagens do histórico
|
| 21 |
+
chat.add_message("system", system_message)
|
| 22 |
for h in history:
|
| 23 |
+
chat.add_message(h['role'], h['content'])
|
| 24 |
+
chat.add_message("user", message)
|
| 25 |
|
| 26 |
+
# Gera resposta
|
| 27 |
+
response_main_obj = chat.send_message(
|
| 28 |
max_new_tokens=max_tokens,
|
| 29 |
temperature=temperature,
|
| 30 |
top_p=top_p
|
| 31 |
)
|
| 32 |
+
response_main = response_main_obj.content # pega o texto gerado
|
| 33 |
|
| 34 |
+
# --- Passo 2: FLAN-T5 (reformulação) ---
|
| 35 |
result_aux1 = client_aux1.text_generation(
|
| 36 |
prompt=f"Reformule este texto de forma clara e concisa:\n{response_main}",
|
| 37 |
max_new_tokens=max_tokens
|
| 38 |
)
|
| 39 |
response_aux1 = result_aux1.generated_text
|
| 40 |
|
| 41 |
+
# --- Passo 3: BART (resumo em 3 frases) ---
|
| 42 |
result_aux2 = client_aux2.text_generation(
|
| 43 |
prompt=f"Resuma este texto em 3 frases:\n{response_aux1}",
|
| 44 |
max_new_tokens=150
|