eberhenriquez94 commited on
Commit
d92e247
·
verified ·
1 Parent(s): 53888ba
Files changed (1) hide show
  1. app.py +17 -11
app.py CHANGED
@@ -205,7 +205,7 @@ async def google_response(borrador):
205
 
206
  async def nvidia_response(borrador):
207
  try:
208
- messages = [nvidia_system_message, {"role": "user", "content": borrador}]
209
  completion = await asyncio.to_thread(
210
  nvidia_client.chat.completions.create,
211
  model="meta/llama-3.1-405b-instruct",
@@ -242,7 +242,15 @@ async def procesar_respuestas(borrador, chat_historial):
242
  yield chat_historial, "Listo", google_result, nvidia_result
243
 
244
 
245
- # Configuración de la interfaz de usuario (Cambios en submit_button.click y clear_button.click)
 
 
 
 
 
 
 
 
246
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
247
  gr.Markdown("# Comparador de Modelos Jurídicos: Google Gemini vs NVIDIA")
248
  with gr.Row():
@@ -251,10 +259,8 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
251
  with gr.Row():
252
  with gr.Column():
253
  google_output = gr.Textbox(label="Texto Revisado por Google", lines=4)
254
- #google_justificacion = gr.Textbox(label="Justificación Google", lines=4) #Se eliminó
255
  with gr.Column():
256
  nvidia_output = gr.Textbox(label="Texto Revisado por NVIDIA",lines=4)
257
- #nvidia_justificacion = gr.Textbox(label="Justificación NVIDIA", lines=4) #Se eliminó
258
 
259
 
260
  chatbox = gr.Chatbot(label="Respuestas de los Modelos", type="messages")
@@ -266,18 +272,18 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
266
  submit_button = gr.Button("Enviar")
267
  clear_button = gr.Button("Limpiar")
268
 
269
- cargando = gr.Textbox(label="Estado", value="Listo", interactive=False) # Indicador de progreso como Textbox
270
  chat_historial = gr.State([])
271
 
272
  submit_button.click(
273
- procesar_respuestas,
274
- inputs=[input_text, chat_historial],
275
- outputs=[chatbox, cargando, google_output, nvidia_output] #Se actualizó
276
  )
277
  clear_button.click(
278
- lambda: ([], "Listo", None, None), # Limpiar todos los outputs #Se actualizó
279
  inputs=None,
280
- outputs=[chatbox, cargando, google_output, nvidia_output] #Se actualizó
281
  )
282
 
283
  gr.Examples(
@@ -290,4 +296,4 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
290
  )
291
 
292
  if __name__ == "__main__":
293
- demo.launch()
 
205
 
206
  async def nvidia_response(borrador):
207
  try:
208
+ messages = [ {"role": "user", "content": borrador}] #Se omite system role
209
  completion = await asyncio.to_thread(
210
  nvidia_client.chat.completions.create,
211
  model="meta/llama-3.1-405b-instruct",
 
242
  yield chat_historial, "Listo", google_result, nvidia_result
243
 
244
 
245
+ async def predict(borrador):
246
+ chat_historial = []
247
+ results = []
248
+ async for chatbot, status, google_result, nvidia_result in procesar_respuestas(borrador, chat_historial):
249
+ results = [chatbot, status, google_result, nvidia_result]
250
+
251
+ return results
252
+
253
+ # Configuración de la interfaz de usuario
254
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
255
  gr.Markdown("# Comparador de Modelos Jurídicos: Google Gemini vs NVIDIA")
256
  with gr.Row():
 
259
  with gr.Row():
260
  with gr.Column():
261
  google_output = gr.Textbox(label="Texto Revisado por Google", lines=4)
 
262
  with gr.Column():
263
  nvidia_output = gr.Textbox(label="Texto Revisado por NVIDIA",lines=4)
 
264
 
265
 
266
  chatbox = gr.Chatbot(label="Respuestas de los Modelos", type="messages")
 
272
  submit_button = gr.Button("Enviar")
273
  clear_button = gr.Button("Limpiar")
274
 
275
+ cargando = gr.Textbox(label="Estado", value="Listo", interactive=False)
276
  chat_historial = gr.State([])
277
 
278
  submit_button.click(
279
+ predict,
280
+ inputs=input_text,
281
+ outputs=[chatbox, cargando, google_output, nvidia_output]
282
  )
283
  clear_button.click(
284
+ lambda: ([], "Listo", None, None),
285
  inputs=None,
286
+ outputs=[chatbox, cargando, google_output, nvidia_output]
287
  )
288
 
289
  gr.Examples(
 
296
  )
297
 
298
  if __name__ == "__main__":
299
+ demo.queue().launch(fn=predict, server_name="0.0.0.0", server_port=7860)