trying again
Browse files
app.py
CHANGED
|
@@ -1,6 +1,4 @@
|
|
| 1 |
import os
|
| 2 |
-
os.system("wget https://raw.githubusercontent.com/Weyaxi/scrape-open-llm-leaderboard/main/openllm.py")
|
| 3 |
-
from openllm import *
|
| 4 |
import requests
|
| 5 |
import pandas as pd
|
| 6 |
from bs4 import BeautifulSoup
|
|
@@ -9,7 +7,6 @@ from huggingface_hub import HfApi, CommitOperationAdd, create_commit
|
|
| 9 |
import gradio as gr
|
| 10 |
import datetime
|
| 11 |
from huggingface_hub.utils import HfHubHTTPError
|
| 12 |
-
import time
|
| 13 |
|
| 14 |
api = HfApi()
|
| 15 |
|
|
@@ -18,7 +15,6 @@ HF_TOKEN = os.getenv('HF_TOKEN')
|
|
| 18 |
|
| 19 |
|
| 20 |
headers_models = ["🔢 Serial Number", "👤 Author Name", "📥 Total Downloads", "👍 Total Likes", "🤖 Number of Models",
|
| 21 |
-
"🏆 Best Model On Open LLM Leaderboard", "🥇 Best Rank On Open LLM Leaderboard",
|
| 22 |
"📊 Average Downloads per Model", "📈 Average Likes per Model", "🚀 Most Downloaded Model",
|
| 23 |
"📈 Most Download Count", "❤️ Most Liked Model", "👍 Most Like Count", "🔥 Trending Model",
|
| 24 |
"👑 Best Rank at Trending Models", "🏷️ Type"]
|
|
@@ -74,15 +70,7 @@ def get_sum(df_for_sum_function):
|
|
| 74 |
return {"Downloads": sum_downloads, "Likes": sum_likes}
|
| 75 |
|
| 76 |
|
| 77 |
-
|
| 78 |
-
try:
|
| 79 |
-
data = get_json_format_data()
|
| 80 |
-
finished_models = get_datas(data)
|
| 81 |
-
df = pd.DataFrame(finished_models)
|
| 82 |
-
return df['Model'].tolist()
|
| 83 |
-
except Exception as e: # something is wrong about the leaderboard so return empty list
|
| 84 |
-
print(e)
|
| 85 |
-
return []
|
| 86 |
|
| 87 |
|
| 88 |
def get_ranking(model_list, target_org):
|
|
@@ -133,7 +121,6 @@ def group_models_by_author(all_things):
|
|
| 133 |
|
| 134 |
def make_leaderboard(orgs, users, which_one, data):
|
| 135 |
data_rows = []
|
| 136 |
-
open_llm_leaderboard = get_openllm_leaderboard() if which_one == "models" else None
|
| 137 |
|
| 138 |
trend = get_trending_list(1, which_one)
|
| 139 |
hepsi = [orgs, users]
|
|
@@ -152,15 +139,12 @@ def make_leaderboard(orgs, users, which_one, data):
|
|
| 152 |
most_info = get_most(df)
|
| 153 |
|
| 154 |
if which_one == "models":
|
| 155 |
-
|
| 156 |
-
|
| 157 |
data_rows.append({
|
| 158 |
"Author Name": org,
|
| 159 |
"Total Downloads": sum_info["Downloads"],
|
| 160 |
"Total Likes": sum_info["Likes"],
|
| 161 |
"Number of Models": num_things,
|
| 162 |
-
"Best Model On Open LLM Leaderboard": open_llm_leaderboard_get_org[1] if open_llm_leaderboard_get_org not in ["Not Found", "Error on Leaderboard"] else open_llm_leaderboard_get_org,
|
| 163 |
-
"Best Rank On Open LLM Leaderboard": open_llm_leaderboard_get_org[1] if open_llm_leaderboard_get_org not in ["Not Found", "Error on Leaderboard"] else open_llm_leaderboard_get_org,
|
| 164 |
"Average Downloads per Model": int(sum_info["Downloads"] / num_things) if num_things != 0 else 0,
|
| 165 |
"Average Likes per Model": int(sum_info["Likes"] / num_things) if num_things != 0 else 0,
|
| 166 |
"Most Downloaded Model": most_info["Most Download"]["id"],
|
|
@@ -204,6 +188,7 @@ def make_leaderboard(orgs, users, which_one, data):
|
|
| 204 |
})
|
| 205 |
|
| 206 |
leaderboard = pd.DataFrame(data_rows)
|
|
|
|
| 207 |
temp = ["Total Downloads"] if which_one != "spaces" else ["Total Likes"]
|
| 208 |
|
| 209 |
leaderboard = leaderboard.sort_values(by=temp, ascending=False)
|
|
@@ -293,8 +278,6 @@ INTRODUCTION_TEXT = f"""
|
|
| 293 |
|
| 294 |
🛠️ The leaderboard's backend mainly runs on the [Hugging Face Hub API](https://huggingface.co/docs/huggingface_hub/v0.5.1/en/package_reference/hf_api).
|
| 295 |
|
| 296 |
-
📒 **Note:** In the model's dataframe, there are some columns related to the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). This data is also retrieved through web scraping.
|
| 297 |
-
|
| 298 |
📒 **Note:** In trending models/datasets/spaces, first 300 models/datasets/spaces is being retrieved from huggingface.
|
| 299 |
|
| 300 |
## 🔍 Searching Organizations and Users
|
|
@@ -540,13 +523,13 @@ with gr.Blocks() as demo:
|
|
| 540 |
search_bar_in_df = gr.Textbox(placeholder="🔍 Search for a author", show_label=False)
|
| 541 |
|
| 542 |
with gr.TabItem("🏛️ Models", id=1):
|
| 543 |
-
columns_to_convert = ["Author Name", "
|
| 544 |
"Most Liked Model", "Trending Model"]
|
| 545 |
models_df = make_leaderboard(org_names_in_list, user_names_in_list, "models", group_models_by_author(all_models))
|
| 546 |
models_df = models_df_to_clickable(models_df, columns_to_convert, "models")
|
| 547 |
|
| 548 |
gr_models = gr.Dataframe(apply_headers(models_df, headers_models).head(400), headers=headers_models, interactive=True,
|
| 549 |
-
datatype=["str", "markdown", "str", "str", "str", "
|
| 550 |
"markdown", "str", "markdown", "str", "markdown", "str", "str"])
|
| 551 |
|
| 552 |
with gr.TabItem("📊 Datasets", id=2):
|
|
|
|
| 1 |
import os
|
|
|
|
|
|
|
| 2 |
import requests
|
| 3 |
import pandas as pd
|
| 4 |
from bs4 import BeautifulSoup
|
|
|
|
| 7 |
import gradio as gr
|
| 8 |
import datetime
|
| 9 |
from huggingface_hub.utils import HfHubHTTPError
|
|
|
|
| 10 |
|
| 11 |
api = HfApi()
|
| 12 |
|
|
|
|
| 15 |
|
| 16 |
|
| 17 |
headers_models = ["🔢 Serial Number", "👤 Author Name", "📥 Total Downloads", "👍 Total Likes", "🤖 Number of Models",
|
|
|
|
| 18 |
"📊 Average Downloads per Model", "📈 Average Likes per Model", "🚀 Most Downloaded Model",
|
| 19 |
"📈 Most Download Count", "❤️ Most Liked Model", "👍 Most Like Count", "🔥 Trending Model",
|
| 20 |
"👑 Best Rank at Trending Models", "🏷️ Type"]
|
|
|
|
| 70 |
return {"Downloads": sum_downloads, "Likes": sum_likes}
|
| 71 |
|
| 72 |
|
| 73 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
|
| 75 |
|
| 76 |
def get_ranking(model_list, target_org):
|
|
|
|
| 121 |
|
| 122 |
def make_leaderboard(orgs, users, which_one, data):
|
| 123 |
data_rows = []
|
|
|
|
| 124 |
|
| 125 |
trend = get_trending_list(1, which_one)
|
| 126 |
hepsi = [orgs, users]
|
|
|
|
| 139 |
most_info = get_most(df)
|
| 140 |
|
| 141 |
if which_one == "models":
|
| 142 |
+
|
|
|
|
| 143 |
data_rows.append({
|
| 144 |
"Author Name": org,
|
| 145 |
"Total Downloads": sum_info["Downloads"],
|
| 146 |
"Total Likes": sum_info["Likes"],
|
| 147 |
"Number of Models": num_things,
|
|
|
|
|
|
|
| 148 |
"Average Downloads per Model": int(sum_info["Downloads"] / num_things) if num_things != 0 else 0,
|
| 149 |
"Average Likes per Model": int(sum_info["Likes"] / num_things) if num_things != 0 else 0,
|
| 150 |
"Most Downloaded Model": most_info["Most Download"]["id"],
|
|
|
|
| 188 |
})
|
| 189 |
|
| 190 |
leaderboard = pd.DataFrame(data_rows)
|
| 191 |
+
|
| 192 |
temp = ["Total Downloads"] if which_one != "spaces" else ["Total Likes"]
|
| 193 |
|
| 194 |
leaderboard = leaderboard.sort_values(by=temp, ascending=False)
|
|
|
|
| 278 |
|
| 279 |
🛠️ The leaderboard's backend mainly runs on the [Hugging Face Hub API](https://huggingface.co/docs/huggingface_hub/v0.5.1/en/package_reference/hf_api).
|
| 280 |
|
|
|
|
|
|
|
| 281 |
📒 **Note:** In trending models/datasets/spaces, first 300 models/datasets/spaces is being retrieved from huggingface.
|
| 282 |
|
| 283 |
## 🔍 Searching Organizations and Users
|
|
|
|
| 523 |
search_bar_in_df = gr.Textbox(placeholder="🔍 Search for a author", show_label=False)
|
| 524 |
|
| 525 |
with gr.TabItem("🏛️ Models", id=1):
|
| 526 |
+
columns_to_convert = ["Author Name", "Most Downloaded Model",
|
| 527 |
"Most Liked Model", "Trending Model"]
|
| 528 |
models_df = make_leaderboard(org_names_in_list, user_names_in_list, "models", group_models_by_author(all_models))
|
| 529 |
models_df = models_df_to_clickable(models_df, columns_to_convert, "models")
|
| 530 |
|
| 531 |
gr_models = gr.Dataframe(apply_headers(models_df, headers_models).head(400), headers=headers_models, interactive=True,
|
| 532 |
+
datatype=["str", "markdown", "str", "str", "str", "str", "str",
|
| 533 |
"markdown", "str", "markdown", "str", "markdown", "str", "str"])
|
| 534 |
|
| 535 |
with gr.TabItem("📊 Datasets", id=2):
|