Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -63,14 +63,12 @@ with block:
|
|
| 63 |
metric_names.remove('method_name') # Remove method_name from the metric options
|
| 64 |
|
| 65 |
# Leaderboard section with method and metric selectors
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
choices=metric_names, label="Select Metrics for Leaderboard", value=metric_names, interactive=True
|
| 73 |
-
)
|
| 74 |
|
| 75 |
# Display the filtered leaderboard
|
| 76 |
baseline_value = get_baseline_df(method_names, metric_names)
|
|
@@ -103,20 +101,19 @@ with block:
|
|
| 103 |
# Dropdown for benchmark type
|
| 104 |
benchmark_type_selector = gr.Dropdown(choices=list(benchmark_specific_metrics.keys()), label="Select Benchmark Type")
|
| 105 |
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
single_metric_selector = gr.Dropdown(choices=[], label="Select Metric", visible=False)
|
| 114 |
|
| 115 |
method_selector = gr.CheckboxGroup(choices=method_names, label="Select methods to visualize", interactive=True, value=method_names)
|
| 116 |
|
| 117 |
# Button to draw the plot for the selected benchmark
|
| 118 |
-
|
| 119 |
-
|
| 120 |
|
| 121 |
plot_output = gr.Image(label="Plot")
|
| 122 |
|
|
|
|
| 63 |
metric_names.remove('method_name') # Remove method_name from the metric options
|
| 64 |
|
| 65 |
# Leaderboard section with method and metric selectors
|
| 66 |
+
leaderboard_method_selector = gr.CheckboxGroup(
|
| 67 |
+
choices=method_names, label="Select method_names for Leaderboard", value=method_names, interactive=True
|
| 68 |
+
)
|
| 69 |
+
leaderboard_metric_selector = gr.CheckboxGroup(
|
| 70 |
+
choices=metric_names, label="Select Metrics for Leaderboard", value=metric_names, interactive=True
|
| 71 |
+
)
|
|
|
|
|
|
|
| 72 |
|
| 73 |
# Display the filtered leaderboard
|
| 74 |
baseline_value = get_baseline_df(method_names, metric_names)
|
|
|
|
| 101 |
# Dropdown for benchmark type
|
| 102 |
benchmark_type_selector = gr.Dropdown(choices=list(benchmark_specific_metrics.keys()), label="Select Benchmark Type")
|
| 103 |
|
| 104 |
+
# Dynamic selectors
|
| 105 |
+
x_metric_selector = gr.Dropdown(choices=[], label="Select X-axis Metric", visible=False)
|
| 106 |
+
y_metric_selector = gr.Dropdown(choices=[], label="Select Y-axis Metric", visible=False)
|
| 107 |
+
aspect_type_selector = gr.Dropdown(choices=[], label="Select Aspect Type", visible=False)
|
| 108 |
+
dataset_type_selector = gr.Dropdown(choices=[], label="Select Dataset Type", visible=False)
|
| 109 |
+
dataset_selector = gr.Dropdown(choices=[], label="Select Dataset", visible=False)
|
| 110 |
+
single_metric_selector = gr.Dropdown(choices=[], label="Select Metric", visible=False)
|
|
|
|
| 111 |
|
| 112 |
method_selector = gr.CheckboxGroup(choices=method_names, label="Select methods to visualize", interactive=True, value=method_names)
|
| 113 |
|
| 114 |
# Button to draw the plot for the selected benchmark
|
| 115 |
+
|
| 116 |
+
plot_button = gr.Button("Plot")
|
| 117 |
|
| 118 |
plot_output = gr.Image(label="Plot")
|
| 119 |
|