Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -435,11 +435,7 @@ metaprompt_explanations = {
|
|
| 435 |
}
|
| 436 |
|
| 437 |
models = [
|
| 438 |
-
#
|
| 439 |
-
"Qwen/Qwen2.5-72B-Instruct",
|
| 440 |
-
"Qwen/Qwen2.5-1.5B",
|
| 441 |
-
|
| 442 |
-
# Meta-Llama models
|
| 443 |
"meta-llama/Meta-Llama-3-70B-Instruct",
|
| 444 |
"meta-llama/Meta-Llama-3-8B-Instruct",
|
| 445 |
"meta-llama/Llama-3.1-70B-Instruct",
|
|
@@ -449,32 +445,16 @@ models = [
|
|
| 449 |
"meta-llama/Llama-2-13b-chat-hf",
|
| 450 |
"meta-llama/Llama-2-7b-chat-hf",
|
| 451 |
|
| 452 |
-
#
|
| 453 |
-
"microsoft/Phi-3.5-mini-instruct",
|
| 454 |
-
"microsoft/Phi-3-mini-4k-instruct",
|
| 455 |
-
"microsoft/DialoGPT-medium",
|
| 456 |
-
|
| 457 |
-
# HuggingFaceH4 models
|
| 458 |
-
"HuggingFaceH4/starchat2-15b-v0.1",
|
| 459 |
"HuggingFaceH4/zephyr-7b-beta",
|
| 460 |
"HuggingFaceH4/zephyr-7b-alpha",
|
| 461 |
|
| 462 |
-
#
|
| 463 |
-
"
|
| 464 |
-
"
|
| 465 |
-
"mistralai/Mistral-7B-Instruct-v0.3",
|
| 466 |
-
"mistralai/Mistral-7B-Instruct-v0.2",
|
| 467 |
-
|
| 468 |
-
# NousResearch models
|
| 469 |
-
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
| 470 |
-
"NousResearch/Hermes-3-Llama-3.1-8B",
|
| 471 |
|
| 472 |
-
#
|
| 473 |
-
"
|
| 474 |
-
"google/gemma-1.1-2b-it",
|
| 475 |
-
"Ninja5000/DialoGPT-medium-TWEWYJoshua",
|
| 476 |
-
"nopeno600321/DialoGPT-medium-Loki",
|
| 477 |
-
"tiiuae/falcon-7b-instruct"
|
| 478 |
]
|
| 479 |
|
| 480 |
explanation_markdown = "".join([f"- **{key}**: {value}\n" for key, value in metaprompt_explanations.items()])
|
|
|
|
| 435 |
}
|
| 436 |
|
| 437 |
models = [
|
| 438 |
+
# Meta-Llama models (all support system)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 439 |
"meta-llama/Meta-Llama-3-70B-Instruct",
|
| 440 |
"meta-llama/Meta-Llama-3-8B-Instruct",
|
| 441 |
"meta-llama/Llama-3.1-70B-Instruct",
|
|
|
|
| 445 |
"meta-llama/Llama-2-13b-chat-hf",
|
| 446 |
"meta-llama/Llama-2-7b-chat-hf",
|
| 447 |
|
| 448 |
+
# HuggingFaceH4 models (support system)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 449 |
"HuggingFaceH4/zephyr-7b-beta",
|
| 450 |
"HuggingFaceH4/zephyr-7b-alpha",
|
| 451 |
|
| 452 |
+
# Qwen models (support system)
|
| 453 |
+
"Qwen/Qwen2.5-72B-Instruct",
|
| 454 |
+
"Qwen/Qwen2.5-1.5B",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 455 |
|
| 456 |
+
# Google models (supports system)
|
| 457 |
+
"google/gemma-1.1-2b-it"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 458 |
]
|
| 459 |
|
| 460 |
explanation_markdown = "".join([f"- **{key}**: {value}\n" for key, value in metaprompt_explanations.items()])
|