Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -357,11 +357,11 @@ def process_input(input_mode: str, reference_text: str, candidate_text: str, mod
|
|
| 357 |
|
| 358 |
# Determine model provider
|
| 359 |
if model_choice == "Gemini":
|
| 360 |
-
model_provider = GeminiProvider("gemini-
|
| 361 |
-
elif model_choice == "Llama-
|
| 362 |
-
model_provider = GroqProvider("
|
| 363 |
else: # Llama-3-8b
|
| 364 |
-
model_provider = GroqProvider("
|
| 365 |
|
| 366 |
# Check if model is available
|
| 367 |
if not model_provider.available:
|
|
@@ -482,7 +482,7 @@ with gr.Blocks(title="LLM Evaluation Framework", theme=gr.themes.Soft()) as demo
|
|
| 482 |
|
| 483 |
gr.Markdown("### ⚙️ Configuration")
|
| 484 |
model_choice = gr.Radio(
|
| 485 |
-
["Gemini", "Llama-
|
| 486 |
label="Select Model",
|
| 487 |
value="Gemini",
|
| 488 |
elem_id="model-choice"
|
|
|
|
| 357 |
|
| 358 |
# Determine model provider
|
| 359 |
if model_choice == "Gemini":
|
| 360 |
+
model_provider = GeminiProvider("gemini-2.5-flash")
|
| 361 |
+
elif model_choice == "Llama-4-17b":
|
| 362 |
+
model_provider = GroqProvider("meta-llama/llama-4-maverick-17b-128e-instruct")
|
| 363 |
else: # Llama-3-8b
|
| 364 |
+
model_provider = GroqProvider("qwen/qwen3-32b")
|
| 365 |
|
| 366 |
# Check if model is available
|
| 367 |
if not model_provider.available:
|
|
|
|
| 482 |
|
| 483 |
gr.Markdown("### ⚙️ Configuration")
|
| 484 |
model_choice = gr.Radio(
|
| 485 |
+
["Gemini", "Llama-4-17b", "Qwen-3"],
|
| 486 |
label="Select Model",
|
| 487 |
value="Gemini",
|
| 488 |
elem_id="model-choice"
|