Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -56,77 +56,79 @@ with block:
|
|
| 56 |
# table jmmmu bench
|
| 57 |
with gr.TabItem("🏅 PROBE Leaderboard", elem_id="probe-benchmark-tab-table", id=1):
|
| 58 |
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
)
|
| 70 |
-
leaderboard_metric_selector
|
| 71 |
-
|
|
|
|
|
|
|
| 72 |
)
|
| 73 |
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
y_metric_selector = gr.Dropdown(choices=[], label="Select Y-axis Metric", visible=False)
|
| 106 |
-
aspect_type_selector = gr.Dropdown(choices=[], label="Select Aspect Type", visible=False)
|
| 107 |
-
dataset_type_selector = gr.Dropdown(choices=[], label="Select Dataset Type", visible=False)
|
| 108 |
-
dataset_selector = gr.Dropdown(choices=[], label="Select Dataset", visible=False)
|
| 109 |
-
single_metric_selector = gr.Dropdown(choices=[], label="Select Metric", visible=False)
|
| 110 |
-
|
| 111 |
-
# CheckboxGroup for methods
|
| 112 |
-
method_selector = gr.CheckboxGroup(choices=method_names, label="Select methods to visualize", interactive=True, value=method_names)
|
| 113 |
-
|
| 114 |
-
# Button to draw the plot for the selected benchmark
|
| 115 |
-
plot_button = gr.Button("Plot")
|
| 116 |
-
plot_output = gr.Image(label="Plot")
|
| 117 |
-
|
| 118 |
-
# Update selectors when benchmark type changes
|
| 119 |
-
benchmark_type_selector.change(
|
| 120 |
-
update_metric_choices,
|
| 121 |
-
inputs=[benchmark_type_selector],
|
| 122 |
-
outputs=[x_metric_selector, y_metric_selector, aspect_type_selector, dataset_type_selector, dataset_selector, single_metric_selector]
|
| 123 |
-
)
|
| 124 |
-
|
| 125 |
-
plot_button.click(
|
| 126 |
-
benchmark_plot,
|
| 127 |
-
inputs=[benchmark_type_selector, method_selector, x_metric_selector, y_metric_selector, aspect_type_selector, dataset_type_selector, dataset_selector, single_metric_selector],
|
| 128 |
-
outputs=plot_output
|
| 129 |
-
)
|
| 130 |
|
| 131 |
with gr.TabItem("📝 About", elem_id="probe-benchmark-tab-table", id=2):
|
| 132 |
with gr.Row():
|
|
|
|
| 56 |
# table jmmmu bench
|
| 57 |
with gr.TabItem("🏅 PROBE Leaderboard", elem_id="probe-benchmark-tab-table", id=1):
|
| 58 |
|
| 59 |
+
with gr.Row(show_progress=True):
|
| 60 |
+
method_names = pd.read_csv(CSV_RESULT_PATH)['method_name'].unique().tolist()
|
| 61 |
+
metric_names = pd.read_csv(CSV_RESULT_PATH).columns.tolist()
|
| 62 |
+
metrics_with_method = metric_names.copy()
|
| 63 |
+
metric_names.remove('method_name') # Remove method_name from the metric options
|
| 64 |
+
|
| 65 |
+
# Leaderboard section with method and metric selectors
|
| 66 |
+
with gr.Row():
|
| 67 |
+
# Add method and metric selectors for leaderboard
|
| 68 |
+
leaderboard_method_selector = gr.CheckboxGroup(
|
| 69 |
+
choices=method_names, label="Select method_names for Leaderboard", value=method_names, interactive=True
|
| 70 |
+
)
|
| 71 |
+
leaderboard_metric_selector = gr.CheckboxGroup(
|
| 72 |
+
choices=metric_names, label="Select Metrics for Leaderboard", value=metric_names, interactive=True
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
# Display the filtered leaderboard
|
| 76 |
+
baseline_value = get_baseline_df(method_names, metric_names)
|
| 77 |
+
baseline_header = ["method_name"] + metric_names
|
| 78 |
+
baseline_datatype = ['markdown'] + ['number'] * len(metric_names)
|
| 79 |
+
|
| 80 |
+
data_component = gr.components.Dataframe(
|
| 81 |
+
value=baseline_value,
|
| 82 |
+
headers=baseline_header,
|
| 83 |
+
type="pandas",
|
| 84 |
+
datatype=baseline_datatype,
|
| 85 |
+
interactive=False,
|
| 86 |
+
visible=True,
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
# Update leaderboard when method/metric selection changes
|
| 90 |
+
leaderboard_method_selector.change(
|
| 91 |
+
update_leaderboard,
|
| 92 |
+
inputs=[leaderboard_method_selector, leaderboard_metric_selector],
|
| 93 |
+
outputs=data_component
|
| 94 |
)
|
| 95 |
+
leaderboard_metric_selector.change(
|
| 96 |
+
update_leaderboard,
|
| 97 |
+
inputs=[leaderboard_method_selector, leaderboard_metric_selector],
|
| 98 |
+
outputs=data_component
|
| 99 |
)
|
| 100 |
|
| 101 |
+
with gr.Row(variant='panel', show_progress=True):
|
| 102 |
+
# Dropdown for benchmark type
|
| 103 |
+
benchmark_type_selector = gr.Dropdown(choices=list(benchmark_specific_metrics.keys()), label="Select Benchmark Type")
|
| 104 |
+
|
| 105 |
+
# Dynamic selectors
|
| 106 |
+
x_metric_selector = gr.Dropdown(choices=[], label="Select X-axis Metric", visible=False)
|
| 107 |
+
y_metric_selector = gr.Dropdown(choices=[], label="Select Y-axis Metric", visible=False)
|
| 108 |
+
aspect_type_selector = gr.Dropdown(choices=[], label="Select Aspect Type", visible=False)
|
| 109 |
+
dataset_type_selector = gr.Dropdown(choices=[], label="Select Dataset Type", visible=False)
|
| 110 |
+
dataset_selector = gr.Dropdown(choices=[], label="Select Dataset", visible=False)
|
| 111 |
+
single_metric_selector = gr.Dropdown(choices=[], label="Select Metric", visible=False)
|
| 112 |
+
|
| 113 |
+
# CheckboxGroup for methods
|
| 114 |
+
method_selector = gr.CheckboxGroup(choices=method_names, label="Select methods to visualize", interactive=True, value=method_names)
|
| 115 |
+
|
| 116 |
+
# Button to draw the plot for the selected benchmark
|
| 117 |
+
plot_button = gr.Button("Plot")
|
| 118 |
+
plot_output = gr.Image(label="Plot")
|
| 119 |
+
|
| 120 |
+
# Update selectors when benchmark type changes
|
| 121 |
+
benchmark_type_selector.change(
|
| 122 |
+
update_metric_choices,
|
| 123 |
+
inputs=[benchmark_type_selector],
|
| 124 |
+
outputs=[x_metric_selector, y_metric_selector, aspect_type_selector, dataset_type_selector, dataset_selector, single_metric_selector]
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
plot_button.click(
|
| 128 |
+
benchmark_plot,
|
| 129 |
+
inputs=[benchmark_type_selector, method_selector, x_metric_selector, y_metric_selector, aspect_type_selector, dataset_type_selector, dataset_selector, single_metric_selector],
|
| 130 |
+
outputs=plot_output
|
| 131 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 132 |
|
| 133 |
with gr.TabItem("📝 About", elem_id="probe-benchmark-tab-table", id=2):
|
| 134 |
with gr.Row():
|