Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
| import os | |
| from yaml import safe_load | |
| from huggingface_hub import HfApi | |
| TASK_CONFIG_NAME = os.getenv("TASK_CONFIG", "pt_config") | |
| TASK_CONFIG_PATH = os.path.join('tasks_config', TASK_CONFIG_NAME + ".yaml") | |
| with open(TASK_CONFIG_PATH, 'r', encoding='utf-8') as f: | |
| TASK_CONFIG = safe_load(f) | |
| def get_config(name, default): | |
| res = None | |
| if name in os.environ: | |
| res = os.environ[name] | |
| elif 'config' in TASK_CONFIG: | |
| res = TASK_CONFIG['config'].get(name, None) | |
| if res is None: | |
| return default | |
| return res | |
| def str2bool(v): | |
| return str(v).lower() in ("yes", "true", "t", "1") | |
| # clone / pull the lmeh eval data | |
| H4_TOKEN = get_config("H4_TOKEN", None) | |
| LEADERBOARD_NAME = get_config("LEADERBOARD_NAME", "Open LLM Leaderboard") | |
| REPO_ID = get_config("REPO_ID", "HuggingFaceH4/open_llm_leaderboard") | |
| QUEUE_REPO = get_config("QUEUE_REPO", "open-llm-leaderboard/requests") | |
| DYNAMIC_INFO_REPO = get_config("DYNAMIC_INFO_REPO", "open-llm-leaderboard/dynamic_model_information") | |
| RESULTS_REPO = get_config("RESULTS_REPO", "open-llm-leaderboard/results") | |
| RAW_RESULTS_REPO = get_config("RAW_RESULTS_REPO", None) | |
| PRIVATE_QUEUE_REPO = QUEUE_REPO | |
| PRIVATE_RESULTS_REPO = RESULTS_REPO | |
| #PRIVATE_QUEUE_REPO = "open-llm-leaderboard/private-requests" | |
| #PRIVATE_RESULTS_REPO = "open-llm-leaderboard/private-results" | |
| IS_PUBLIC = str2bool(get_config("IS_PUBLIC", True)) | |
| CACHE_PATH=get_config("HF_HOME", ".") | |
| os.environ["HF_HOME"] = CACHE_PATH | |
| if not os.access(CACHE_PATH, os.W_OK): | |
| print(f"No write access to HF_HOME: {CACHE_PATH}. Resetting to current directory.") | |
| CACHE_PATH = "." | |
| os.environ["HF_HOME"] = CACHE_PATH | |
| else: | |
| print(f"Write access confirmed for HF_HOME") | |
| EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue") | |
| EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results") | |
| DYNAMIC_INFO_PATH = os.path.join(CACHE_PATH, "dynamic-info") | |
| DYNAMIC_INFO_FILE_PATH = os.path.join(DYNAMIC_INFO_PATH, "model_infos.json") | |
| EVAL_REQUESTS_PATH_PRIVATE = "eval-queue-private" | |
| EVAL_RESULTS_PATH_PRIVATE = "eval-results-private" | |
| PATH_TO_COLLECTION = get_config("PATH_TO_COLLECTION", "open-llm-leaderboard/llm-leaderboard-best-models-652d6c7965a4619fb5c27a03") | |
| # Rate limit variables | |
| RATE_LIMIT_PERIOD = int(get_config("RATE_LIMIT_PERIOD", 7)) | |
| RATE_LIMIT_QUOTA = int(get_config("RATE_LIMIT_QUOTA", 5)) | |
| HAS_HIGHER_RATE_LIMIT = get_config("HAS_HIGHER_RATE_LIMIT", "TheBloke").split(',') | |
| TRUST_REMOTE_CODE = str2bool(get_config("TRUST_REMOTE_CODE", False)) | |
| #Set if you want to get an extra field with the average eval results from the HF leaderboard | |
| GET_ORIGINAL_HF_LEADERBOARD_EVAL_RESULTS = str2bool(get_config("GET_ORIGINAL_HF_LEADERBOARD_EVAL_RESULTS", False)) | |
| ORIGINAL_HF_LEADERBOARD_RESULTS_REPO = get_config("ORIGINAL_HF_LEADERBOARD_RESULTS_REPO", "open-llm-leaderboard/results") | |
| ORIGINAL_HF_LEADERBOARD_EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, 'original_results') | |
| SHOW_INCOMPLETE_EVALS = str2bool(get_config("SHOW_INCOMPLETE_EVALS", False)) | |
| REQUIRE_MODEL_CARD = str2bool(get_config("REQUIRE_MODEL_CARD", True)) | |
| REQUIRE_MODEL_LICENSE = str2bool(get_config("REQUIRE_MODEL_LICENSE", True)) | |
| API = HfApi(token=H4_TOKEN) | |