Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -28,7 +28,16 @@ def add_new_eval(
|
|
| 28 |
):
|
| 29 |
representation_name = model_name_textbox if revision_name_textbox == '' else revision_name_textbox
|
| 30 |
results = run_probe(benchmark_type, representation_name, human_file, skempi_file, similarity_tasks, function_prediction_aspect, function_prediction_dataset, family_prediction_dataset)
|
| 31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
# Function to update leaderboard dynamically based on user selection
|
| 34 |
def update_leaderboard(selected_methods, selected_metrics):
|
|
@@ -94,7 +103,7 @@ with block:
|
|
| 94 |
method_selector = gr.CheckboxGroup(choices=method_names, label="Select methods to visualize", interactive=True, value=method_names)
|
| 95 |
|
| 96 |
# Button to draw the plot for the selected benchmark
|
| 97 |
-
plot_button = gr.Button("Plot
|
| 98 |
plot_output = gr.Image(label="Plot")
|
| 99 |
|
| 100 |
# Update metric selectors when benchmark type is chosen
|
|
|
|
| 28 |
):
|
| 29 |
representation_name = model_name_textbox if revision_name_textbox == '' else revision_name_textbox
|
| 30 |
results = run_probe(benchmark_type, representation_name, human_file, skempi_file, similarity_tasks, function_prediction_aspect, function_prediction_dataset, family_prediction_dataset)
|
| 31 |
+
|
| 32 |
+
for benchmark_type in results:
|
| 33 |
+
if benchmark_type == 'similarity':
|
| 34 |
+
save_similarity_output(results['similarity'], representation_name)
|
| 35 |
+
elif benchmark_type == 'function':
|
| 36 |
+
save_function_output(results['function'], representation_name)
|
| 37 |
+
elif benchmark_type == 'family':
|
| 38 |
+
save_family_output(results['family'], representation_name)
|
| 39 |
+
elif benchmark_type == "affinity":
|
| 40 |
+
save_affinity_output(results['affinity', representation_name])
|
| 41 |
|
| 42 |
# Function to update leaderboard dynamically based on user selection
|
| 43 |
def update_leaderboard(selected_methods, selected_metrics):
|
|
|
|
| 103 |
method_selector = gr.CheckboxGroup(choices=method_names, label="Select methods to visualize", interactive=True, value=method_names)
|
| 104 |
|
| 105 |
# Button to draw the plot for the selected benchmark
|
| 106 |
+
plot_button = gr.Button("Plot")
|
| 107 |
plot_output = gr.Image(label="Plot")
|
| 108 |
|
| 109 |
# Update metric selectors when benchmark type is chosen
|