Spaces:
Runtime error
Runtime error
File size: 7,242 Bytes
3003ba0 c41e86b 1d0c560 c41e86b 1d0c560 c41e86b 1d0c560 c41e86b 1d0c560 c41e86b 1d0c560 c41e86b 1d0c560 c41e86b 1d0c560 c41e86b 1d0c560 c41e86b 1d0c560 c41e86b 1d0c560 c41e86b 1d0c560 c41e86b 1d0c560 c41e86b 1d0c560 c41e86b 3003ba0 1d0c560 3003ba0 1d0c560 3003ba0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 |
import json
import os
from datetime import datetime, timezone
from huggingface_hub import snapshot_download
from src.submission.check_validity import get_model_tags
from src.display.formatting import styled_error, styled_message, styled_warning
from src.envs import API, EVAL_REQUESTS_PATH, DYNAMIC_INFO_PATH, DYNAMIC_INFO_FILE_PATH, DYNAMIC_INFO_REPO, TOKEN, QUEUE_REPO, RATE_LIMIT_PERIOD, RATE_LIMIT_QUOTA
from src.submission.check_validity import (
already_submitted_models,
check_model_card,
get_model_size,
is_model_on_hub,
)
REQUESTED_MODELS = None
USERS_TO_SUBMISSION_DATES = None
def submit_eval_complete(
model_name: str,
revision_commit: str,
model_api_url: str,
model_api_key: str,
online_api_model_name: str,
runsh_file,
adapter_file
):
"""
Complete evaluation submission - integrates all three parts of information
"""
# Validate model information
if not model_name or not model_name.strip():
return styled_error("Please enter model name")
if not revision_commit or not revision_commit.strip():
revision_commit = "main"
# Validate API information (if provided)
if model_api_url and model_api_key and online_api_model_name:
if not model_api_url.startswith(('http://', 'https://')):
return styled_error("API URL format is incorrect, please start with http:// or https://")
# Validate inference files (if provided)
if runsh_file and adapter_file:
max_size = 5 * 1024 * 1024 # 5MB
if os.path.getsize(runsh_file.name) > max_size:
return styled_error("run.sh file size cannot exceed 5MB")
if os.path.getsize(adapter_file.name) > max_size:
return styled_error("model_adapter.py file size cannot exceed 5MB")
# Call the original add_new_eval function
try:
result = add_new_eval(
model=model_name,
model_api_url=model_api_url or "",
model_api_key=model_api_key or "",
model_api_name=online_api_model_name or "",
base_model="", # Can be set as needed
revision=revision_commit,
precision="float16", # Default precision
private="false",
weight_type="Original", # Default weight type
model_type="", # Can be set as needed
runsh=runsh_file,
adapter=adapter_file
)
return result
except Exception as e:
return styled_error(f"Submission failed: {str(e)}")
def add_new_eval(
model: str,
model_api_url: str,
model_api_key: str,
model_api_name: str,
base_model: str,
revision: str,
precision: str,
private: str,
weight_type: str,
model_type: str,
runsh,
adapter
):
global REQUESTED_MODELS
global USERS_TO_SUBMISSION_DATES
if not REQUESTED_MODELS:
REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
user_name = ""
model_path = model
if "/" in model:
user_name = model.split("/")[0]
model_path = model.split("/")[1]
precision = precision.split(" ")[0]
current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
if model_type is None:
model_type = ""
#return styled_error("Please select a model type.")
# Does the model actually exist?
if revision == "":
revision = "main"
architecture = "?"
downloads = 0
created_at = ""
# Is the model on the hub?
if len(model_api_url)==0:
# Is the model info correctly filled?
try:
model_info = API.model_info(repo_id=model, revision=revision)
except Exception:
return styled_error("Could not get your model information. Please fill it up properly.")
model_size = get_model_size(model_info=model_info, precision=precision)
modelcard_OK, error_msg = check_model_card(model)
if not modelcard_OK:
return styled_error(error_msg)
tags = []
likes = model_info.likes
else:
model_size = 0
license = ""
likes = 0
tags = []
downloads = 0
# Seems good, creating the eval
print("Adding new eval", runsh)
max_size = 5 * 1024 * 1024 # 5MB
if (runsh is not None) and (adapter is not None):
if os.path.getsize(runsh.name) > max_size:
return "Error: File size cannot exceed 5MB!"
if os.path.getsize(adapter.name) > max_size:
return "Error: File size cannot exceed 5MB!"
with open(runsh.name, "r") as f:
runsh = f.read()
with open(adapter.name, "r") as f:
adapter = f.read()
else:
runsh = ""
adapter = ""
eval_entry = {
"model": model,
"model_api_url": model_api_url,
"model_api_key": model_api_key,
"model_api_name": model_api_name,
"base_model": base_model,
"revision": revision,
"precision": precision,
"private": private,
"weight_type": weight_type,
"status": "PENDING",
"submitted_time": current_time,
"model_type": model_type,
"params": model_size,
"private": False,
"runsh": runsh,
"adapter": adapter,
}
supplementary_info = {
"likes": 0,
"license": license,
"still_on_hub": True,
"tags": tags,
"downloads": downloads,
"created_at": created_at
}
# Check for duplicate submission
if f"{model}_{revision}_{precision}" in REQUESTED_MODELS:
return styled_warning("This model has been already submitted.")
print("Creating eval file")
OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
os.makedirs(OUT_DIR, exist_ok=True)
out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{weight_type}.json"
with open(out_path, "w") as f:
f.write(json.dumps(eval_entry))
print("Uploading eval file")
API.upload_file(
path_or_fileobj=out_path,
path_in_repo=out_path.split("eval-queue/")[1],
repo_id=QUEUE_REPO,
repo_type="dataset",
commit_message=f"Add {model} to eval queue",
)
# We want to grab the latest version of the submission file to not accidentally overwrite it
snapshot_download(
repo_id=DYNAMIC_INFO_REPO, local_dir=DYNAMIC_INFO_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30
)
with open(DYNAMIC_INFO_FILE_PATH) as f:
all_supplementary_info = json.load(f)
all_supplementary_info[model] = supplementary_info
with open(DYNAMIC_INFO_FILE_PATH, "w") as f:
json.dump(all_supplementary_info, f, indent=2)
API.upload_file(
path_or_fileobj=DYNAMIC_INFO_FILE_PATH,
path_in_repo=DYNAMIC_INFO_FILE_PATH.split("/")[-1],
repo_id=DYNAMIC_INFO_REPO,
repo_type="dataset",
commit_message=f"Add {model} to dynamic info queue",
)
# Remove the local file
os.remove(out_path)
return styled_message(
"Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list."
)
|