Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -13,26 +13,29 @@ client_aux2 = InferenceClient(token=HF_TOKEN, model="facebook/bart-large-cnn")
|
|
| 13 |
# Função principal de resposta
|
| 14 |
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
| 15 |
try:
|
| 16 |
-
#
|
| 17 |
-
|
|
|
|
|
|
|
|
|
|
| 18 |
|
| 19 |
-
# --- Passo 1: Llama 3.1
|
| 20 |
-
result_main = client_main.
|
| 21 |
-
prompt=full_prompt,
|
| 22 |
max_new_tokens=max_tokens,
|
| 23 |
temperature=temperature,
|
| 24 |
top_p=top_p
|
| 25 |
)
|
| 26 |
-
response_main = result_main.
|
| 27 |
|
| 28 |
-
# --- Passo 2: FLAN-T5 ---
|
| 29 |
result_aux1 = client_aux1.text_generation(
|
| 30 |
-
prompt=f"Reformule este texto de forma clara:\n{response_main}",
|
| 31 |
max_new_tokens=max_tokens
|
| 32 |
)
|
| 33 |
response_aux1 = result_aux1.generated_text
|
| 34 |
|
| 35 |
-
# --- Passo 3: BART ---
|
| 36 |
result_aux2 = client_aux2.text_generation(
|
| 37 |
prompt=f"Resuma este texto em 3 frases:\n{response_aux1}",
|
| 38 |
max_new_tokens=150
|
|
@@ -42,7 +45,7 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
|
|
| 42 |
except Exception as e:
|
| 43 |
response_aux2 = f"Erro ao gerar resposta: {e}"
|
| 44 |
|
| 45 |
-
# Atualiza histórico no formato
|
| 46 |
history.append({"role": "user", "content": message})
|
| 47 |
history.append({"role": "assistant", "content": response_aux2})
|
| 48 |
|
|
|
|
| 13 |
# Função principal de resposta
|
| 14 |
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
| 15 |
try:
|
| 16 |
+
# --- Monta contexto de conversa ---
|
| 17 |
+
conversation_context = "\n".join(
|
| 18 |
+
[f"Usuário: {h['content']}" if h['role']=='user' else f"Assistente: {h['content']}" for h in history]
|
| 19 |
+
)
|
| 20 |
+
full_prompt = f"{system_message}\n{conversation_context}\nUsuário: {message}"
|
| 21 |
|
| 22 |
+
# --- Passo 1: Llama 3.1 ---
|
| 23 |
+
result_main = client_main.text_generation(
|
| 24 |
+
prompt=full_prompt,
|
| 25 |
max_new_tokens=max_tokens,
|
| 26 |
temperature=temperature,
|
| 27 |
top_p=top_p
|
| 28 |
)
|
| 29 |
+
response_main = result_main.generated_text
|
| 30 |
|
| 31 |
+
# --- Passo 2: FLAN-T5 (reformula o texto) ---
|
| 32 |
result_aux1 = client_aux1.text_generation(
|
| 33 |
+
prompt=f"Reformule este texto de forma clara e concisa:\n{response_main}",
|
| 34 |
max_new_tokens=max_tokens
|
| 35 |
)
|
| 36 |
response_aux1 = result_aux1.generated_text
|
| 37 |
|
| 38 |
+
# --- Passo 3: BART (resuma em 3 frases) ---
|
| 39 |
result_aux2 = client_aux2.text_generation(
|
| 40 |
prompt=f"Resuma este texto em 3 frases:\n{response_aux1}",
|
| 41 |
max_new_tokens=150
|
|
|
|
| 45 |
except Exception as e:
|
| 46 |
response_aux2 = f"Erro ao gerar resposta: {e}"
|
| 47 |
|
| 48 |
+
# Atualiza histórico no formato Gradio Chatbot
|
| 49 |
history.append({"role": "user", "content": message})
|
| 50 |
history.append({"role": "assistant", "content": response_aux2})
|
| 51 |
|