Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
| import os | |
| import pandas as pd | |
| from huggingface_hub import add_collection_item, delete_collection_item, get_collection, update_collection_item | |
| from huggingface_hub.utils import HfHubHTTPError | |
| from pandas import DataFrame | |
| import numpy as np | |
| import traceback | |
| from src.display.utils import AutoEvalColumn, ModelType, NUMERIC_INTERVALS | |
| from src.envs import H4_TOKEN, PATH_TO_COLLECTION | |
| # Specific intervals for the collections | |
| """ | |
| intervals = { | |
| "1B": pd.Interval(0, 1.5, closed="right"), | |
| "3B": pd.Interval(2.5, 3.5, closed="neither"), | |
| "7B": pd.Interval(6, 8, closed="neither"), | |
| "13B": pd.Interval(10, 14, closed="neither"), | |
| "30B": pd.Interval(25, 35, closed="neither"), | |
| "65B": pd.Interval(60, 70, closed="neither"), | |
| } | |
| """ | |
| intervals = {k:v for k,v in NUMERIC_INTERVALS.items() if "?" not in k} | |
| def update_collections(df: DataFrame): | |
| """This function updates the Open LLM Leaderboard model collection with the latest best models for | |
| each size category and type. | |
| """ | |
| print("Updating collections...") | |
| collection = get_collection(collection_slug=PATH_TO_COLLECTION, token=H4_TOKEN) | |
| params_column = pd.to_numeric(df[AutoEvalColumn.params.name], errors="coerce") | |
| cur_best_models = [] | |
| cur_best_scores = [] | |
| cur_itens = [] | |
| scores_per_type = {'pretrained': 0, 'other': 0, 'language': 0} | |
| types_to_consider = [('pretrained', [ModelType.PT]), ('other', [ModelType.LA, ModelType.FT, ModelType.chat])] | |
| for item in collection.items: | |
| try: | |
| delete_collection_item( | |
| collection_slug=PATH_TO_COLLECTION, item_object_id=item.item_object_id, token=H4_TOKEN | |
| ) | |
| except HfHubHTTPError: | |
| traceback.print_exc() | |
| continue | |
| #filter quantized models | |
| #df = df[df[AutoEvalColumn.precision.name].isin(['bfloat16', 'float16', "?"])] | |
| ix = 0 | |
| for size in intervals: | |
| interval_scores = [] | |
| interval_itens_languages = [] | |
| interval_itens = [] | |
| numeric_interval = pd.IntervalIndex([intervals[size]]) | |
| mask = params_column.apply(lambda x: any(numeric_interval.contains(x))) | |
| size_df = df.loc[mask] | |
| for model_type, types in types_to_consider: | |
| type_emojis = [] | |
| for type in types: | |
| if type.value.name == "": | |
| continue | |
| type_emoji = [t[0] for t in type.value.symbol] | |
| type_emojis.extend(type_emoji) | |
| filtered_df = size_df[size_df[AutoEvalColumn.model_type_symbol.name].isin(type_emojis)] | |
| filtered_df = filtered_df[filtered_df[AutoEvalColumn.average.name].astype(float) > scores_per_type[model_type]] | |
| best_models = filtered_df.sort_values(AutoEvalColumn.average.name, ascending=False) | |
| print(type_emojis, size, list(best_models[AutoEvalColumn.dummy.name])[:10]) | |
| # We add them one by one to the leaderboard | |
| for i, row in best_models.iterrows(): | |
| model = row[AutoEvalColumn.dummy.name] | |
| hf_path = row['hf_path'] | |
| hf_path = hf_path if 'meta-llama/Meta-' not in hf_path else hf_path.replace("meta-llama/Meta-", "meta-llama/") | |
| if hf_path in cur_best_models: | |
| continue | |
| score = row[AutoEvalColumn.average.name] | |
| language = row[AutoEvalColumn.main_language.name] | |
| if language == 'Portuguese': | |
| note = f"Best Portuguese {type.to_str(' ')} model of around {size} on the leaderboard today! (Score: {score})" | |
| else: | |
| note = f"Best {type.to_str(' ')} model of around {size} on the leaderboard today! (Score: {score})" | |
| try: | |
| collection = add_collection_item( | |
| PATH_TO_COLLECTION, | |
| item_id=hf_path, | |
| item_type="model", | |
| exists_ok=True, | |
| note=note, | |
| token=H4_TOKEN, | |
| ) | |
| ix += 1 | |
| item_object_id = collection.items[-1].item_object_id | |
| cur_best_models.append(hf_path) | |
| cur_best_scores.append(float(score)) | |
| interval_scores.append(float(score)) | |
| interval_itens_languages.append(language) | |
| cur_itens.append(item_object_id) | |
| interval_itens.append(item_object_id) | |
| scores_per_type[model_type] = float(score) | |
| break | |
| except HfHubHTTPError: | |
| traceback.print_exc() | |
| continue | |
| if 'Portuguese' not in interval_itens_languages: | |
| language = ['Portuguese'] | |
| model_type = 'language' | |
| filtered_df = size_df[size_df[AutoEvalColumn.main_language.name].isin(language)] | |
| filtered_df = filtered_df[filtered_df[AutoEvalColumn.average.name].astype(float) > scores_per_type[model_type]] | |
| best_models = filtered_df.sort_values(AutoEvalColumn.average.name, ascending=False) | |
| print(language, size, list(best_models[AutoEvalColumn.dummy.name])[:10]) | |
| # We add them one by one to the leaderboard | |
| for i, row in best_models.iterrows(): | |
| model = row[AutoEvalColumn.dummy.name] | |
| hf_path = row['hf_path'] | |
| hf_path = hf_path if 'meta-llama/Meta-' not in hf_path else hf_path.replace("meta-llama/Meta-", "meta-llama/") | |
| if hf_path in cur_best_models: | |
| continue | |
| score = row[AutoEvalColumn.average.name] | |
| language = row[AutoEvalColumn.main_language.name] | |
| if language == 'Portuguese': | |
| note = f"Best Portuguese {type.to_str(' ')} model of around {size} on the leaderboard today! (Score: {score})" | |
| else: | |
| note = f"Best {type.to_str(' ')} model of around {size} on the leaderboard today! (Score: {score})" | |
| try: | |
| collection = add_collection_item( | |
| PATH_TO_COLLECTION, | |
| item_id=hf_path, | |
| item_type="model", | |
| exists_ok=True, | |
| note=note, | |
| token=H4_TOKEN, | |
| ) | |
| ix += 1 | |
| item_object_id = collection.items[-1].item_object_id | |
| cur_best_models.append(hf_path) | |
| cur_best_scores.append(float(score)) | |
| interval_scores.append(float(score)) | |
| interval_itens_languages.append(language) | |
| cur_itens.append(item_object_id) | |
| interval_itens.append(item_object_id) | |
| scores_per_type[model_type] = float(score) | |
| break | |
| except HfHubHTTPError: | |
| traceback.print_exc() | |
| continue | |
| # fix order: | |
| starting_idx = len(cur_best_models) | |
| k = 0 | |
| for i in np.argsort(cur_best_scores): | |
| if i == k: | |
| continue | |
| else: | |
| try: | |
| #print(cur_best_models[i], interval_itens[i], starting_idx+k, interval_scores[i]) | |
| update_collection_item( | |
| collection_slug=PATH_TO_COLLECTION, item_object_id=cur_itens[i], position=starting_idx+k | |
| ) | |
| except: | |
| traceback.print_exc() | |
| pass | |
| k += 1 | |
| collection = get_collection(PATH_TO_COLLECTION, token=H4_TOKEN) | |
| for item in collection.items: | |
| if item.item_id not in cur_best_models: | |
| try: | |
| delete_collection_item( | |
| collection_slug=PATH_TO_COLLECTION, item_object_id=item.item_object_id, token=H4_TOKEN | |
| ) | |
| except HfHubHTTPError: | |
| traceback.print_exc() | |
| continue | |