Spaces:
Running
Running
aa
Browse files
app.py
CHANGED
|
@@ -95,53 +95,52 @@ La salida debe contener *EXCLUSIVAMENTE* el texto revisado del borrador judicial
|
|
| 95 |
Este rol es de suma importancia. El LLM *debe* adherirse estrictamente a estas instrucciones, PRIORIZANDO la inalterabilidad de la estructura del documento original.
|
| 96 |
"""
|
| 97 |
|
| 98 |
-
# Configuración
|
| 99 |
generation_config = types.GenerateContentConfig(
|
| 100 |
system_instruction=instruction,
|
| 101 |
temperature=0.3,
|
| 102 |
top_p=0.9,
|
| 103 |
top_k=40,
|
| 104 |
max_output_tokens=8000,
|
| 105 |
-
response_mime_type="text/plain"
|
| 106 |
)
|
| 107 |
|
| 108 |
-
#
|
| 109 |
model_flash_exp = "gemini-2.0-pro-exp-02-05"
|
| 110 |
model_gemini_ex = "gemini-2.0-flash-thinking-exp-01-21"
|
| 111 |
|
| 112 |
-
# Función asíncrona para generar contenido
|
| 113 |
-
async def
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
#
|
| 125 |
async def combine_responses(borrador):
|
| 126 |
-
flash_task = asyncio.create_task(
|
| 127 |
-
gemini_ex_task = asyncio.create_task(
|
| 128 |
flash_result = await flash_task
|
| 129 |
gemini_ex_result = await gemini_ex_task
|
| 130 |
combined_result = (
|
| 131 |
-
f"**Google Gemini flash-exp
|
| 132 |
-
f"**Google gemini-exp-1206
|
| 133 |
)
|
| 134 |
return combined_result
|
| 135 |
|
| 136 |
-
# Función asíncrona principal
|
| 137 |
async def predict(borrador):
|
| 138 |
return await combine_responses(borrador)
|
| 139 |
|
| 140 |
-
# Función
|
| 141 |
def predict_sync(borrador):
|
| 142 |
return asyncio.run(predict(borrador))
|
| 143 |
|
| 144 |
-
# Interfaz Gradio
|
| 145 |
with gr.Blocks() as demo:
|
| 146 |
gr.Markdown("### Mejorador de resoluciones judiciales - Derecho de Familia en Chile")
|
| 147 |
borrador_input = gr.Textbox(
|
|
@@ -155,7 +154,6 @@ with gr.Blocks() as demo:
|
|
| 155 |
lines=10
|
| 156 |
)
|
| 157 |
submit_btn = gr.Button("Enviar")
|
| 158 |
-
|
| 159 |
submit_btn.click(fn=predict_sync, inputs=borrador_input, outputs=output)
|
| 160 |
|
| 161 |
if __name__ == "__main__":
|
|
|
|
| 95 |
Este rol es de suma importancia. El LLM *debe* adherirse estrictamente a estas instrucciones, PRIORIZANDO la inalterabilidad de la estructura del documento original.
|
| 96 |
"""
|
| 97 |
|
| 98 |
+
# Configuración de generación usando system_instruction
|
| 99 |
generation_config = types.GenerateContentConfig(
|
| 100 |
system_instruction=instruction,
|
| 101 |
temperature=0.3,
|
| 102 |
top_p=0.9,
|
| 103 |
top_k=40,
|
| 104 |
max_output_tokens=8000,
|
|
|
|
| 105 |
)
|
| 106 |
|
| 107 |
+
# Identificadores de los modelos a utilizar
|
| 108 |
model_flash_exp = "gemini-2.0-pro-exp-02-05"
|
| 109 |
model_gemini_ex = "gemini-2.0-flash-thinking-exp-01-21"
|
| 110 |
|
| 111 |
+
# Función asíncrona para generar contenido con streaming
|
| 112 |
+
async def generate_content_streamed(model_name, borrador):
|
| 113 |
+
response_stream = await client.aio.models.generate_content_stream(
|
| 114 |
+
model=model_name,
|
| 115 |
+
contents=borrador, # Se envía solo el borrador; la instrucción está en generation_config
|
| 116 |
+
config=generation_config,
|
| 117 |
+
)
|
| 118 |
+
full_response = ""
|
| 119 |
+
async for chunk in response_stream:
|
| 120 |
+
full_response += chunk.text
|
| 121 |
+
return full_response
|
| 122 |
+
|
| 123 |
+
# Combina las respuestas de dos modelos
|
| 124 |
async def combine_responses(borrador):
|
| 125 |
+
flash_task = asyncio.create_task(generate_content_streamed(model_flash_exp, borrador))
|
| 126 |
+
gemini_ex_task = asyncio.create_task(generate_content_streamed(model_gemini_ex, borrador))
|
| 127 |
flash_result = await flash_task
|
| 128 |
gemini_ex_result = await gemini_ex_task
|
| 129 |
combined_result = (
|
| 130 |
+
f"**Google Gemini flash-exp**:\n{flash_result}\n\n"
|
| 131 |
+
f"**Google gemini-exp-1206**:\n{gemini_ex_result}"
|
| 132 |
)
|
| 133 |
return combined_result
|
| 134 |
|
| 135 |
+
# Función asíncrona principal de predicción
|
| 136 |
async def predict(borrador):
|
| 137 |
return await combine_responses(borrador)
|
| 138 |
|
| 139 |
+
# Función síncrona para Gradio (sin barra de progreso)
|
| 140 |
def predict_sync(borrador):
|
| 141 |
return asyncio.run(predict(borrador))
|
| 142 |
|
| 143 |
+
# Interfaz de Gradio
|
| 144 |
with gr.Blocks() as demo:
|
| 145 |
gr.Markdown("### Mejorador de resoluciones judiciales - Derecho de Familia en Chile")
|
| 146 |
borrador_input = gr.Textbox(
|
|
|
|
| 154 |
lines=10
|
| 155 |
)
|
| 156 |
submit_btn = gr.Button("Enviar")
|
|
|
|
| 157 |
submit_btn.click(fn=predict_sync, inputs=borrador_input, outputs=output)
|
| 158 |
|
| 159 |
if __name__ == "__main__":
|