Spaces:
Runtime error
Runtime error
Refactor metrics
Browse files
app.py
CHANGED
|
@@ -42,15 +42,9 @@ TASK_TO_ID = {
|
|
| 42 |
TASK_TO_DEFAULT_METRICS = {
|
| 43 |
"binary_classification": ["f1", "precision", "recall", "auc", "accuracy"],
|
| 44 |
"multi_class_classification": [
|
| 45 |
-
"
|
| 46 |
-
"
|
| 47 |
-
"
|
| 48 |
-
"precision_macro",
|
| 49 |
-
"precision_micro",
|
| 50 |
-
"precision_weighted",
|
| 51 |
-
"recall_macro",
|
| 52 |
-
"recall_micro",
|
| 53 |
-
"recall_weighted",
|
| 54 |
"accuracy",
|
| 55 |
],
|
| 56 |
"entity_extraction": ["precision", "recall", "f1", "accuracy"],
|
|
@@ -307,9 +301,7 @@ with st.expander("Advanced configuration"):
|
|
| 307 |
col_mapping[answers_text_col] = "answers.text"
|
| 308 |
col_mapping[answers_start_col] = "answers.answer_start"
|
| 309 |
|
| 310 |
-
|
| 311 |
-
|
| 312 |
-
compatible_models = get_compatible_models(selected_task, selected_dataset)
|
| 313 |
st.markdown("The following metrics will be computed")
|
| 314 |
html_string = " ".join(
|
| 315 |
[
|
|
@@ -328,9 +320,13 @@ with st.form(key="form"):
|
|
| 328 |
)
|
| 329 |
st.info(
|
| 330 |
"Note: user-selected metrics will be run with their default arguments from "
|
| 331 |
-
+ "[here](https://github.com/huggingface/
|
| 332 |
)
|
| 333 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 334 |
selected_models = st.multiselect("Select the models you wish to evaluate", compatible_models)
|
| 335 |
print("Selected models:", selected_models)
|
| 336 |
|
|
@@ -348,7 +344,7 @@ with st.form(key="form"):
|
|
| 348 |
|
| 349 |
if submit_button:
|
| 350 |
if len(selected_models) > 0:
|
| 351 |
-
project_id = str(uuid.uuid4())
|
| 352 |
payload = {
|
| 353 |
"username": AUTOTRAIN_USERNAME,
|
| 354 |
"proj_name": f"eval-project-{project_id}",
|
|
|
|
| 42 |
TASK_TO_DEFAULT_METRICS = {
|
| 43 |
"binary_classification": ["f1", "precision", "recall", "auc", "accuracy"],
|
| 44 |
"multi_class_classification": [
|
| 45 |
+
"f1",
|
| 46 |
+
"precision",
|
| 47 |
+
"recall",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
"accuracy",
|
| 49 |
],
|
| 50 |
"entity_extraction": ["precision", "recall", "f1", "accuracy"],
|
|
|
|
| 301 |
col_mapping[answers_text_col] = "answers.text"
|
| 302 |
col_mapping[answers_start_col] = "answers.answer_start"
|
| 303 |
|
| 304 |
+
st.markdown("**Select metrics**")
|
|
|
|
|
|
|
| 305 |
st.markdown("The following metrics will be computed")
|
| 306 |
html_string = " ".join(
|
| 307 |
[
|
|
|
|
| 320 |
)
|
| 321 |
st.info(
|
| 322 |
"Note: user-selected metrics will be run with their default arguments from "
|
| 323 |
+
+ "[here](https://github.com/huggingface/evaluate/tree/main/metrics)"
|
| 324 |
)
|
| 325 |
|
| 326 |
+
with st.form(key="form"):
|
| 327 |
+
|
| 328 |
+
compatible_models = get_compatible_models(selected_task, selected_dataset)
|
| 329 |
+
|
| 330 |
selected_models = st.multiselect("Select the models you wish to evaluate", compatible_models)
|
| 331 |
print("Selected models:", selected_models)
|
| 332 |
|
|
|
|
| 344 |
|
| 345 |
if submit_button:
|
| 346 |
if len(selected_models) > 0:
|
| 347 |
+
project_id = str(uuid.uuid4())
|
| 348 |
payload = {
|
| 349 |
"username": AUTOTRAIN_USERNAME,
|
| 350 |
"proj_name": f"eval-project-{project_id}",
|