update read_evals and app.py
Browse files- app.py +14 -12
- src/leaderboard/read_evals.py +30 -19
app.py
CHANGED
|
@@ -35,18 +35,19 @@ def restart_space():
|
|
| 35 |
API.restart_space(repo_id=REPO_ID)
|
| 36 |
|
| 37 |
|
| 38 |
-
try:
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
except Exception:
|
| 49 |
-
|
|
|
|
| 50 |
try:
|
| 51 |
print(EVAL_RESULTS_PATH)
|
| 52 |
snapshot_download(
|
|
@@ -56,6 +57,7 @@ try:
|
|
| 56 |
tqdm_class=None,
|
| 57 |
etag_timeout=30,
|
| 58 |
token=TOKEN,
|
|
|
|
| 59 |
)
|
| 60 |
except Exception:
|
| 61 |
restart_space()
|
|
|
|
| 35 |
API.restart_space(repo_id=REPO_ID)
|
| 36 |
|
| 37 |
|
| 38 |
+
# try:
|
| 39 |
+
# print(EVAL_REQUESTS_PATH)
|
| 40 |
+
# snapshot_download(
|
| 41 |
+
# repo_id=QUEUE_REPO,
|
| 42 |
+
# local_dir=EVAL_REQUESTS_PATH,
|
| 43 |
+
# repo_type="dataset",
|
| 44 |
+
# tqdm_class=None,
|
| 45 |
+
# etag_timeout=30,
|
| 46 |
+
# token=TOKEN,
|
| 47 |
+
# )
|
| 48 |
+
# except Exception:
|
| 49 |
+
# restart_space()
|
| 50 |
+
|
| 51 |
try:
|
| 52 |
print(EVAL_RESULTS_PATH)
|
| 53 |
snapshot_download(
|
|
|
|
| 57 |
tqdm_class=None,
|
| 58 |
etag_timeout=30,
|
| 59 |
token=TOKEN,
|
| 60 |
+
force_download=True,
|
| 61 |
)
|
| 62 |
except Exception:
|
| 63 |
restart_space()
|
src/leaderboard/read_evals.py
CHANGED
|
@@ -14,23 +14,26 @@ from src.submission.check_validity import is_model_on_hub
|
|
| 14 |
|
| 15 |
@dataclass
|
| 16 |
class EvalResult:
|
| 17 |
-
"""Represents one full evaluation. Built from a combination of the result and request file for a given run.
|
| 18 |
-
|
| 19 |
-
eval_name: str
|
| 20 |
-
full_model: str
|
| 21 |
-
org: str
|
| 22 |
model: str
|
| 23 |
-
revision: str
|
| 24 |
results: dict
|
| 25 |
precision: Precision = Precision.Unknown
|
| 26 |
-
model_type: ModelType = ModelType.Unknown
|
| 27 |
-
weight_type: WeightType = WeightType.Original
|
| 28 |
-
architecture: str = "Unknown"
|
| 29 |
license: str = "?"
|
| 30 |
likes: int = 0
|
| 31 |
num_params: int = 0
|
| 32 |
-
date: str = ""
|
| 33 |
still_on_hub: bool = False
|
|
|
|
|
|
|
|
|
|
| 34 |
|
| 35 |
@classmethod
|
| 36 |
def init_from_json_file(self, json_filepath):
|
|
@@ -40,6 +43,14 @@ class EvalResult:
|
|
| 40 |
|
| 41 |
config = data.get("config")
|
| 42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
# Precision
|
| 44 |
precision = Precision.from_str(config.get("model_dtype"))
|
| 45 |
|
|
@@ -85,10 +96,11 @@ class EvalResult:
|
|
| 85 |
org=org,
|
| 86 |
model=model,
|
| 87 |
results=results,
|
| 88 |
-
precision=precision,
|
| 89 |
-
revision=
|
| 90 |
still_on_hub=still_on_hub,
|
| 91 |
-
architecture=architecture
|
|
|
|
| 92 |
)
|
| 93 |
|
| 94 |
def update_with_request_file(self, requests_path):
|
|
@@ -105,7 +117,9 @@ class EvalResult:
|
|
| 105 |
self.num_params = request.get("params", 0)
|
| 106 |
self.date = request.get("submitted_time", "")
|
| 107 |
except Exception:
|
| 108 |
-
print(
|
|
|
|
|
|
|
| 109 |
|
| 110 |
def to_dict(self):
|
| 111 |
"""Converts the Eval Result to a dict compatible with our dataframe display"""
|
|
@@ -146,10 +160,7 @@ def get_request_file_for_model(requests_path, model_name, precision):
|
|
| 146 |
for tmp_request_file in request_files:
|
| 147 |
with open(tmp_request_file, "r") as f:
|
| 148 |
req_content = json.load(f)
|
| 149 |
-
if (
|
| 150 |
-
req_content["status"] in ["FINISHED"]
|
| 151 |
-
and req_content["precision"] == precision.split(".")[-1]
|
| 152 |
-
):
|
| 153 |
request_file = tmp_request_file
|
| 154 |
return request_file
|
| 155 |
|
|
@@ -188,7 +199,7 @@ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResu
|
|
| 188 |
results = []
|
| 189 |
for v in eval_results.values():
|
| 190 |
try:
|
| 191 |
-
v.to_dict()
|
| 192 |
results.append(v)
|
| 193 |
except KeyError: # not all eval values present
|
| 194 |
continue
|
|
|
|
| 14 |
|
| 15 |
@dataclass
|
| 16 |
class EvalResult:
|
| 17 |
+
"""Represents one full evaluation. Built from a combination of the result and request file for a given run."""
|
| 18 |
+
|
| 19 |
+
eval_name: str # org_model_precision (uid)
|
| 20 |
+
full_model: str # org/model (path on hub)
|
| 21 |
+
org: str
|
| 22 |
model: str
|
| 23 |
+
revision: str # commit hash, "" if main
|
| 24 |
results: dict
|
| 25 |
precision: Precision = Precision.Unknown
|
| 26 |
+
model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
|
| 27 |
+
weight_type: WeightType = WeightType.Original # Original or Adapter
|
| 28 |
+
architecture: str = "Unknown"
|
| 29 |
license: str = "?"
|
| 30 |
likes: int = 0
|
| 31 |
num_params: int = 0
|
| 32 |
+
date: str = "" # submission date of request file
|
| 33 |
still_on_hub: bool = False
|
| 34 |
+
base_model: str = None
|
| 35 |
+
training_codebase: str = None
|
| 36 |
+
training_data: str = None
|
| 37 |
|
| 38 |
@classmethod
|
| 39 |
def init_from_json_file(self, json_filepath):
|
|
|
|
| 43 |
|
| 44 |
config = data.get("config")
|
| 45 |
|
| 46 |
+
additional_info = {
|
| 47 |
+
"license": config.get("license", None),
|
| 48 |
+
"num_params": config.get("params", None),
|
| 49 |
+
"base_model": config.get("base_model", None),
|
| 50 |
+
"training_codebase": config.get("training_codebase", None),
|
| 51 |
+
"training_data": config.get("training_data", None),
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
# Precision
|
| 55 |
precision = Precision.from_str(config.get("model_dtype"))
|
| 56 |
|
|
|
|
| 96 |
org=org,
|
| 97 |
model=model,
|
| 98 |
results=results,
|
| 99 |
+
precision=precision,
|
| 100 |
+
revision=config.get("model_sha", ""),
|
| 101 |
still_on_hub=still_on_hub,
|
| 102 |
+
architecture=architecture,
|
| 103 |
+
**additional_info,
|
| 104 |
)
|
| 105 |
|
| 106 |
def update_with_request_file(self, requests_path):
|
|
|
|
| 117 |
self.num_params = request.get("params", 0)
|
| 118 |
self.date = request.get("submitted_time", "")
|
| 119 |
except Exception:
|
| 120 |
+
print(
|
| 121 |
+
f"Could not find request file for {self.org}/{self.model} with precision {self.precision.value.name}"
|
| 122 |
+
)
|
| 123 |
|
| 124 |
def to_dict(self):
|
| 125 |
"""Converts the Eval Result to a dict compatible with our dataframe display"""
|
|
|
|
| 160 |
for tmp_request_file in request_files:
|
| 161 |
with open(tmp_request_file, "r") as f:
|
| 162 |
req_content = json.load(f)
|
| 163 |
+
if req_content["status"] in ["FINISHED"] and req_content["precision"] == precision.split(".")[-1]:
|
|
|
|
|
|
|
|
|
|
| 164 |
request_file = tmp_request_file
|
| 165 |
return request_file
|
| 166 |
|
|
|
|
| 199 |
results = []
|
| 200 |
for v in eval_results.values():
|
| 201 |
try:
|
| 202 |
+
v.to_dict() # we test if the dict version is complete
|
| 203 |
results.append(v)
|
| 204 |
except KeyError: # not all eval values present
|
| 205 |
continue
|