Spaces:
Sleeping
Sleeping
| import requests | |
| from bs4 import BeautifulSoup | |
| import pandas as pd | |
| import matplotlib.pyplot as plt | |
| import seaborn as sns | |
| import gradio as gr | |
| import io | |
| import os | |
| import base64 | |
| import zipfile | |
| from PIL import Image | |
| from io import BytesIO | |
| import tempfile | |
| import sys | |
| # -------------------------------------------------------------------- | |
| # PART 1: TINY DATA + PLOTS | |
| # -------------------------------------------------------------------- | |
| # This dataframe is your ΓÇ£tinyΓÇ¥ version of model performance data. | |
| # Used for plotting & demonstration in the Gradio app. | |
| data_full = [ | |
| ['CultriX/Qwen2.5-14B-SLERPv7', 'https://huggingface.co/CultriX/Qwen2.5-14B-SLERPv7', 0.7205, 0.8272, 0.7541, 0.6581, 0.5, 0.729], | |
| ['djuna/Q2.5-Veltha-14B-0.5', 'https://huggingface.co/djuna/Q2.5-Veltha-14B-0.5', 0.7492, 0.8386, 0.7305, 0.598, 0.43, 0.7817], | |
| ['CultriX/Qwen2.5-14B-FinalMerge', 'https://huggingface.co/CultriX/Qwen2.5-14B-FinalMerge', 0.7248, 0.8277, 0.7113, 0.7052, 0.57, 0.7001], | |
| ['CultriX/Qwen2.5-14B-MultiCultyv2', 'https://huggingface.co/CultriX/Qwen2.5-14B-MultiCultyv2', 0.7295, 0.8359, 0.7363, 0.5767, 0.44, 0.7316], | |
| ['CultriX/Qwen2.5-14B-Brocav7', 'https://huggingface.co/CultriX/Qwen2.5-14B-Brocav7', 0.7445, 0.8353, 0.7508, 0.6292, 0.46, 0.7629], | |
| ['CultriX/Qwen2.5-14B-Broca', 'https://huggingface.co/CultriX/Qwen2.5-14B-Broca', 0.7456, 0.8352, 0.748, 0.6034, 0.44, 0.7716], | |
| ['CultriX/Qwen2.5-14B-Brocav3', 'https://huggingface.co/CultriX/Qwen2.5-14B-Brocav3', 0.7395, 0.8388, 0.7393, 0.6405, 0.47, 0.7659], | |
| ['CultriX/Qwen2.5-14B-Brocav4', 'https://huggingface.co/CultriX/Qwen2.5-14B-Brocav4', 0.7432, 0.8377, 0.7444, 0.6277, 0.48, 0.758], | |
| ['CultriX/Qwen2.5-14B-Brocav2', 'https://huggingface.co/CultriX/Qwen2.5-14B-Brocav2', 0.7492, 0.8302, 0.7508, 0.6377, 0.51, 0.7478], | |
| ['CultriX/Qwen2.5-14B-Brocav5', 'https://huggingface.co/CultriX/Qwen2.5-14B-Brocav5', 0.7445, 0.8313, 0.7547, 0.6376, 0.5, 0.7304], | |
| ['CultriX/Qwen2.5-14B-Brocav6', 'https://huggingface.co/CultriX/Qwen2.5-14B-Brocav6', 0.7179, 0.8354, 0.7531, 0.6378, 0.49, 0.7524], | |
| ['CultriX/Qwenfinity-2.5-14B', 'https://huggingface.co/CultriX/Qwenfinity-2.5-14B', 0.7347, 0.8254, 0.7279, 0.7267, 0.56, 0.697], | |
| ['CultriX/Qwen2.5-14B-Emergedv2', 'https://huggingface.co/CultriX/Qwen2.5-14B-Emergedv2', 0.7137, 0.8335, 0.7363, 0.5836, 0.44, 0.7344], | |
| ['CultriX/Qwen2.5-14B-Unity', 'https://huggingface.co/CultriX/Qwen2.5-14B-Unity', 0.7063, 0.8343, 0.7423, 0.682, 0.57, 0.7498], | |
| ['CultriX/Qwen2.5-14B-MultiCultyv3', 'https://huggingface.co/CultriX/Qwen2.5-14B-MultiCultyv3', 0.7132, 0.8216, 0.7395, 0.6792, 0.55, 0.712], | |
| ['CultriX/Qwen2.5-14B-Emergedv3', 'https://huggingface.co/CultriX/Qwen2.5-14B-Emergedv3', 0.7436, 0.8312, 0.7519, 0.6585, 0.55, 0.7068], | |
| ['CultriX/SeQwence-14Bv1', 'https://huggingface.co/CultriX/SeQwence-14Bv1', 0.7278, 0.841, 0.7541, 0.6816, 0.52, 0.7539], | |
| ['CultriX/Qwen2.5-14B-Wernickev2', 'https://huggingface.co/CultriX/Qwen2.5-14B-Wernickev2', 0.7391, 0.8168, 0.7273, 0.622, 0.45, 0.7572], | |
| ['CultriX/Qwen2.5-14B-Wernickev3', 'https://huggingface.co/CultriX/Qwen2.5-14B-Wernickev3', 0.7357, 0.8148, 0.7245, 0.7023, 0.55, 0.7869], | |
| ['CultriX/Qwen2.5-14B-Wernickev4', 'https://huggingface.co/CultriX/Qwen2.5-14B-Wernickev4', 0.7355, 0.829, 0.7497, 0.6306, 0.48, 0.7635], | |
| ['CultriX/SeQwential-14B-v1', 'https://huggingface.co/CultriX/SeQwential-14B-v1', 0.7355, 0.8205, 0.7549, 0.6367, 0.48, 0.7626], | |
| ['CultriX/Qwen2.5-14B-Wernickev5', 'https://huggingface.co/CultriX/Qwen2.5-14B-Wernickev5', 0.7224, 0.8272, 0.7541, 0.679, 0.51, 0.7578], | |
| ['CultriX/Qwen2.5-14B-Wernickev6', 'https://huggingface.co/CultriX/Qwen2.5-14B-Wernickev6', 0.6994, 0.7549, 0.5816, 0.6991, 0.58, 0.7267], | |
| ['CultriX/Qwen2.5-14B-Wernickev7', 'https://huggingface.co/CultriX/Qwen2.5-14B-Wernickev7', 0.7147, 0.7599, 0.6097, 0.7056, 0.57, 0.7164], | |
| ['CultriX/Qwen2.5-14B-FinalMerge-tmp2', 'https://huggingface.co/CultriX/Qwen2.5-14B-FinalMerge-tmp2', 0.7255, 0.8192, 0.7535, 0.6671, 0.5, 0.7612], | |
| ['CultriX/Qwen2.5-14B-BrocaV8', 'https://huggingface.co/CultriX/Qwen2.5-14B-BrocaV8', 0.7415, 0.8396, 0.7334, 0.5785, 0.43, 0.7646], | |
| ['CultriX/Qwexit-2.5-14B-2024', 'https://huggingface.co/CultriX/Qwexit-2.5-14B-2024', 0.7253, 0.8174, 0.7456, 0.6688, 0.5300, 0.7027], | |
| ['CultriX/Qwen2.5-14B-BrocaV9', 'https://huggingface.co/CultriX/Qwen2.5-14B-BrocaV9', 0.7432, 0.8307, 0.7467, 0.6221, 0.5000, 0.7623], | |
| ['CultriX/Qwen2.5-14B-partialmergept1', 'https://huggingface.co/CultriX/Qwen2.5-14B-partialmergept1', 0.7389, 0.8370, 0.7451, 0.6715, 0.5700, 0.7308], | |
| ['CultriX/Qwen2.5-14B-partialmergept2', 'https://huggingface.co/CultriX/Qwen2.5-14B-partialmergept2', 0.7300, 0.8428, 0.7371, 0.5944, 0.4200, 0.7581], | |
| ['CultriX/model', 'https://huggingface.co/CultriX/model', 0.7010, 0.8320, 0.7194, 0.6158, 0.4700, 0.7385], | |
| ['CultriX/Qwen2.5-14B-BrocaFinal', 'https://huggingface.co/CultriX/Qwen2.5-14B-BrocaFinal', 0.6265, 0.7688, 0.7007, 0.7035, 0.5100, 0.7218], | |
| ['CultriX/Qwen2.5-14B-Hyperionv1', 'https://huggingface.co/CultriX/Qwen2.5-14B-Hyperionv1', 0.7300, 0.8477, 0.7448, 0.6063, 0.4400, 0.7651], | |
| ['CultriX/Qwen2.5-14B-Hyperionv3', 'https://huggingface.co/CultriX/Qwen2.5-14B-Hyperionv3', 0.7445, 0.8414, 0.7458, 0.6371, 0.4900, 0.7543], | |
| ['sometimesanotion/Lamarck-14B-v0.6', 'https://huggingface.com/sometimesanotion/Lamarck-14B-v0.6', 0.7446, 0.8294, 0.7368, 0.6008, 0.4300, 0.7423], | |
| ['CultriX/Qwen2.5-14B-Hyper', 'https://huggingface.com/CultriX/Qwen2.5-14B-Hyper', 0.7372, 0.8411, 0.7424, 0.5830, 0.4400, 0.7792], | |
| ['CultriX/Qwen2.5-14B-Hyperionv4', 'https://huggingface.co/CultriX/Qwen2.5-14B-Hyperionv4', 0.7305, 0.8359, 0.7454, 0.5827, 0.4600, 0.7797], | |
| ['CultriX/Qwen2.5-14B-Hyperionv5', 'https://huggingface.co/CultriX/Qwen2.5-14B-Hyperionv5', 0.7458, 0.8290, 0.7508, 0.6228, 0.5200, 0.7540], | |
| ['CultriX/Qwen2.5-14B-Hyperionv6', 'https://huggingface.co/CultriX/Qwen2.5-14B-Hyperionv6', 0.7430, 0.8308, 0.7353, 0.6184, 0.4500, 0.7665], | |
| ['CultriX/Qwen2.5-14B-Hyperionv7', 'https://huggingface.co/CultriX/Qwen2.5-14B-Hyperionv7', 0.7412, 0.8287, 0.7508, 0.6208, 0.4800, 0.7532], | |
| ['CultriX/Qwen2.5-14B-Ultima', 'https://huggingface.co/CultriX/Qwen2.5-14B-Ultima', 0.7413, 0.8335, 0.7487, 0.6156, 0.4500, 0.7601], | |
| ['sometimesanotion/Lamarck-14B-v0.7-rc4', 'https://huggingface.co/sometimesanotion/Lamarck-14B-v0.7-rc4', 0.7541, 0.8310, 0.7487, 0.6043, 0.4400, 0.7421], | |
| ['CultriX/Enhanced-TIES-Base-v1', 'https://huggingface.co/CultriX/Enhanced-TIES-Base-v1', 0.7497, 0.8376, 0.7424, 0.6168, 0.4700, 0.7544], | |
| ['CultriX/Qwen2.5-14B-Qwentangledv2', 'https://huggingface.co/CultriX/Qwen2.5-14B-Qwentangledv2', 0.7355, 0.8218, 0.7438, 0.6093, 0.4500, 0.7352], | |
| ['CultriX/Qwen2.5-14B-Optimav3', 'https://huggingface.co/CultriX/Qwen2.5-14B-Optimav3', 0.7482, 0.8216, 0.7424, 0.6186, 0.4800, 0.7675], | |
| ['CultriX/Qwen2.5-14B-Ultimav2', 'https://huggingface.co/CultriX/Qwen2.5-14B-Ultimav2', 0.7568, 0.8333, 0.7454, 0.6277, 0.4900, 0.7870], | |
| ['CultriX/Qwen2.5-14B-HyperSeek', 'https://huggingface.co/CultriX/Qwen2.5-14B-HyperSeek', 0.7445, 0.8414, 0.7458, 0.6371, 0.4900, 0.7543], | |
| ['CultriX/Qwen2.5-14B-HyperSeekv2', 'https://huggingface.co/CultriX/Qwen2.5-14B-HyperSeekv2', 0.7445, 0.8431, 0.7458, 0.6344, 0.5000, 0.7501], | |
| ['CultriX/Qwen2.5-14B-Hyperseek-h', 'https://huggingface.co/CultriX/Qwen2.5-14B-HyperSeek-h', 0.7445, 0.8414, 0.7458, 0.6371, 0.4900, 0.7543], | |
| ['CultriX/Qwen2.5-14B-HyperSeek', 'https://huggingface.co/CultriX/Qwen2.5-14B-HyperSeek', 0.7396, 0.8289, 0.7532, 0.6516, 0.4900, 0.7458], | |
| ['CultriX/Qwen2.5-DeepHyper', 'https://huggingface.co/CultriX/Qwen2.5-DeepHyper', 0.7558, 0.8283, 0.7330, 0.6962, 0.5900, 0.7191], | |
| ['CultriX/Qwen2.5-DeepHyper', 'https://huggingface.co/CultriX/Qwen2.5-DeepHyper', 0.7396, 0.8289, 0.7532, 0.6564, 0.5100, 0.7524], | |
| ['CultriX/MergeStage1', 'https://huggingface.co/CultriX/MergeStage1', 0.7559, 0.8291, 0.7519, 0.6256, 0.4800, 0.7383], | |
| ['CultriX/MergeStage3', 'https://huggingface.co/CultriX/MergeStage3', 0.7355, 0.8258, 0.7408, 0.6179, 0.4800, 0.7626], | |
| ['CultriX/MergeStage2', 'https://huggingface.co/CultriX/MergeStage2', 0.7468, 0.8242, 0.7497, 0.6156, 0.4900, 0.7424], | |
| ['CultriX/MergeStage3v2', 'https://huggingface.co/CultriX/MergeStage3v2', 0.7492, 0.8216, 0.7408, 0.6167, 0.4600, 0.7642], | |
| ['CultriX/MergeStag1v2', 'https://huggingface.co/CultriX/MergeStage1v2', 0.7430, 0.8121, 0.7424, 0.6042, 0.4400, 0.7701], | |
| ['CultriX/MergeStag2v2', 'https://huggingface.co/CultriX/MergeStage2v2', 0.7430, 0.8289, 0.7368, 0.6011, 0.4500, 0.7421], | |
| ['CultriX/MergeStag1v3', 'https://huggingface.co/CultriX/MergeStage1v3', 0.7216, 0.8458, 0.7281, 0.7202, 0.5500, 0.7362], | |
| ['CultriX/MergeStag2v3', 'https://huggingface.co/CultriX/MergeStage2v3', 0.7430, 0.8343, 0.7330, 0.6989, 0.5800, 0.7133], | |
| ['CultriX/MergeStag3v3', 'https://huggingface.co/CultriX/MergeStage3v3', 0.7430, 0.8097, 0.7467, 0.6162, 0.4600, 0.7833], | |
| ['CultriX/MergeStag3v4', 'https://huggingface.co/CultriX/MergeStage3v4', 0.7481, 0.8262, 0.7298, 0.6726, 0.5400, 0.7192], | |
| ['CultriX/MergeStag3v4v2', 'https://huggingface.co/CultriX/MergeStage3v4v2', 0.7353, 0.8258, 0.7337, 0.6669, 0.5200, 0.7329], | |
| ['CultriX/MergeStag4v2', 'https://huggingface.co/CultriX/MergeStage4v2', 0.7357, 0.8058, 0.7486, 0.6002, 0.4400, 0.7694], | |
| ['CultriX/MergeStag4v3', 'https://huggingface.co/CultriX/MergeStage4v3', 0.7413, 0.8314, 0.7457, 0.6529, 0.4800, 0.7456], | |
| ['deepseek-ai/DeepSeek-R1-Distill-Qwen-14B', 'https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-14B', 0.6355, 0.8191, 0.6956, 0.5615, 0.3800, 0.7030], | |
| ['suayptalha/Lamarckvergence-14B', 'https://huggingface.co/suayptalha/Lamarckvergence-14B', 0.7554, 0.8468, 0.7457, 0.6044, 0.4300, 0.7687], | |
| ['CultriX/Qwen2.5-14B-HyperMarck', 'https://huggingface.co/CultriX/Qwen2.5-14B-HyperMarck', 0.7457, 0.8225, 0.7337, 0.6473, 0.4900, 0.7192], | |
| ['CultriX/Qwen2.5-14B-HyperMarck-dl', 'https://huggingface.co/CultriX/Qwen2.5-14B-HyperMarck-dl', 0.7354, 0.8458, 0.7248, 0.7023, 0.5600, 0.7181], | |
| ['CultriX/Qwen2.5-14B-HyperMarck-dt', 'https://huggingface.co/CultriX/Qwen2.5-14B-HyperMarck-dt', 0.7300, 0.8405, 0.7248, 0.7017, 0.5600, 0.7226], | |
| ['CultriX/Qwen2.5-14B-HyperMarck', 'https://huggingface.co/CultriX/Qwen2.5-14B-HyperMarck', 0.7568, 0.8257, 0.7368, 0.6242, 0.4600, 0.7639], | |
| ['CultriX/Qwen2.5-14B-DeepSearchv2', 'https://huggingface.co/CultriX/Qwen2.5-14B-DeepSearchv2', 0.7000, 0.8340, 0.7218, 0.6329, 0.4800, 0.7646], | |
| ['CultriX/Qwen2.5-14B-CoreGeneralist', 'https://huggingface.co/CultriX/Qwen2.5-14B-CoreGeneralist', 0.7396, 0.8289, 0.7487, 0.6337, 0.4700, 0.7453], | |
| ['CultriX/Qwen2.5-14B-ReasoningMerge', 'https://huggingface.co/CultriX/Qwen2.5-14B-ReasoningMerge', 0.7452, 0.8364, 0.7216, 0.5982, 0.4500, 0.7705], | |
| ['CultriX/Qwen2.5-14B-GeneralReasoning', 'https://huggingface.co/CultriX/Qwen2.5-14B-GeneralReasoning', 0.7478, 0.8323, 0.7314, 0.6151, 0.4500, 0.7706], | |
| ['CultriX/Qwen2.5-14B-DeepResearch', 'https://huggingface.co/CultriX/Qwen2.5-14B-DeepResearch', 0.7568, 0.8207, 0.7435, 0.6184, 0.4800, 0.7369], | |
| ['CultriX/Qwen2.5-14B-ModelStock', 'https://huggingface.co/CultriX/Qwen2.5-14B-ModelStock', 0.7456, 0.8274, 0.7471, 0.6278, 0.4800, 0.7453], | |
| ['CultriX/Qwen2.5-14B-Qwenvergence', 'https://huggingface.co/CultriX/Qwen2.5-14B-Qwenvergence', 0.7458, 0.8405, 0.7247, 0.5920, 0.4500, 0.7544], | |
| ['CultriX/Qwen2.5-14B-Verged', 'https://huggingface.co/CultriX/Qwen2.5-14B-Verged', 0.7317, 0.8365, 0.7229, 0.6052, 0.4600, 0.7706], | |
| ['CultriX/Qwen3-8B-Hippocratesv1', 'https://huggingface.co/CultriX/Qwen2.5-14B-Verged', 0.6549, 0.7620, 0.7182, 0.5238, 0.3400, 0.6625] | |
| ] | |
| columns = [ | |
| "Model Configuration", "Model Link", "tinyArc", "tinyHellaswag", | |
| "tinyMMLU", "tinyTruthfulQA", "tinyTruthfulQA_mc1", "tinyWinogrande" | |
| ] | |
| df_full = pd.DataFrame(data_full, columns=columns) | |
| df_full = pd.DataFrame(data_full, columns=columns) | |
| def plot_average_scores(): | |
| df_full["Average Score"] = df_full.iloc[:, 2:].mean(axis=1) | |
| df_avg_sorted = df_full.sort_values(by="Average Score", ascending=False) | |
| plt.figure(figsize=(14, 10)) | |
| plt.barh(df_avg_sorted["Model Configuration"], df_avg_sorted["Average Score"]) | |
| plt.title("Average Performance of Models Across Tasks", fontsize=16) | |
| plt.xlabel("Average Score", fontsize=14) | |
| plt.ylabel("Model Configuration", fontsize=14) | |
| plt.gca().invert_yaxis() | |
| plt.grid(axis='x', linestyle='--', alpha=0.7) | |
| plt.tight_layout() | |
| img_buffer = io.BytesIO() | |
| plt.savefig(img_buffer, format='png') | |
| img_buffer.seek(0) | |
| img_base64 = base64.b64encode(img_buffer.read()).decode('utf-8') | |
| plt.close() | |
| pil_image = Image.open(BytesIO(base64.b64decode(img_base64))) | |
| temp_image_file = tempfile.NamedTemporaryFile(suffix=".png", delete=False) | |
| pil_image.save(temp_image_file.name) | |
| return pil_image, temp_image_file.name | |
| def plot_task_performance(): | |
| df_full_melted = df_full.melt( | |
| id_vars=["Model Configuration", "Model Link"], | |
| var_name="Task", value_name="Score" | |
| ) | |
| plt.figure(figsize=(16, 12)) | |
| for model in df_full["Model Configuration"]: | |
| model_data = df_full_melted[df_full_melted["Model Configuration"] == model] | |
| plt.plot(model_data["Task"], model_data["Score"], marker="o", label=model) | |
| plt.title("Performance of All Models Across Tasks", fontsize=16) | |
| plt.xlabel("Task", fontsize=14) | |
| plt.ylabel("Score", fontsize=14) | |
| plt.xticks(rotation=45) | |
| plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=9) | |
| plt.grid(axis='y', linestyle='--', alpha=0.7) | |
| plt.tight_layout() | |
| img_buffer = io.BytesIO() | |
| plt.savefig(img_buffer, format='png') | |
| img_buffer.seek(0) | |
| img_base64 = base64.b64encode(img_buffer.read()).decode('utf-8') | |
| plt.close() | |
| pil_image = Image.open(BytesIO(base64.b64decode(img_base64))) | |
| temp_image_file = tempfile.NamedTemporaryFile(suffix=".png", delete=False) | |
| pil_image.save(temp_image_file.name) | |
| return pil_image, temp_image_file.name | |
| def plot_task_specific_top_models(): | |
| top_models = df_full.iloc[:, 2:].idxmax() | |
| top_scores = df_full.iloc[:, 2:].max() | |
| results = pd.DataFrame({"Top Model": top_models, "Score": top_scores}).reset_index().rename(columns={"index": "Task"}) | |
| plt.figure(figsize=(14, 8)) | |
| plt.bar(results["Task"], results["Score"]) | |
| plt.title("Task-Specific Top Models", fontsize=16) | |
| plt.xlabel("Task", fontsize=14) | |
| plt.ylabel("Score", fontsize=14) | |
| plt.grid(axis="y", linestyle="--", alpha=0.7) | |
| plt.tight_layout() | |
| img_buffer = io.BytesIO() | |
| plt.savefig(img_buffer, format='png') | |
| img_buffer.seek(0) | |
| img_base64 = base64.b64encode(img_buffer.read()).decode('utf-8') | |
| plt.close() | |
| pil_image = Image.open(BytesIO(base64.b64decode(img_base64))) | |
| temp_image_file = tempfile.NamedTemporaryFile(suffix=".png", delete=False) | |
| pil_image.save(temp_image_file.name) | |
| return pil_image, temp_image_file.name | |
| def plot_heatmap(): | |
| # Add a column for the total scores across all tasks | |
| df_full["Total Scores"] = df_full.iloc[:, 2:].sum(axis=1) | |
| # Normalize each column individually for consistent coloring | |
| normalized_data = df_full.iloc[:, 2:].apply(lambda x: (x - x.min()) / (x.max() - x.min()), axis=0) | |
| plt.figure(figsize=(14, 10)) | |
| sns.heatmap( | |
| normalized_data, | |
| annot=df_full.iloc[:, 2:], # Show actual values in annotations | |
| cmap="YlGnBu", | |
| xticklabels=list(columns[2:]) + ["Total Scores"], | |
| yticklabels=df_full["Model Configuration"] | |
| ) | |
| plt.title("Performance Heatmap", fontsize=16) | |
| plt.tight_layout() | |
| img_buffer = io.BytesIO() | |
| plt.savefig(img_buffer, format='png') | |
| img_buffer.seek(0) | |
| img_base64 = base64.b64encode(img_buffer.read()).decode('utf-8') | |
| plt.close() | |
| pil_image = Image.open(BytesIO(base64.b64decode(img_base64))) | |
| temp_image_file = tempfile.NamedTemporaryFile(suffix=".png", delete=False) | |
| pil_image.save(temp_image_file.name) | |
| return pil_image, temp_image_file.name | |
| def scrape_mergekit_config(model_name): | |
| """ | |
| For the *tiny* tableΓÇÖs model links. | |
| Scrapes <pre> tags on the huggingface model page to find a YAML config. | |
| """ | |
| df_row = df_full.loc[df_full["Model Configuration"] == model_name] | |
| if df_row.empty: | |
| return f"No data found for model {model_name}." | |
| model_link = df_row["Model Link"].values[0] | |
| response = requests.get(model_link) | |
| if response.status_code != 200: | |
| return f"Failed to fetch model page for {model_name}. Please check the link." | |
| soup = BeautifulSoup(response.text, "html.parser") | |
| yaml_config = soup.find("pre") # Assume YAML is in <pre> tags | |
| if yaml_config: | |
| return yaml_config.text.strip() | |
| return f"No YAML configuration found for {model_name}." | |
| def download_yaml(yaml_content, model_name): | |
| """ | |
| Let users download the scraped YAML if it exists. | |
| """ | |
| if "No YAML configuration found" in yaml_content or "Failed to fetch model page" in yaml_content: | |
| return None | |
| filename = f"{model_name.replace('/', '_')}_config.yaml" | |
| return gr.File(value=yaml_content.encode(), filename=filename) | |
| def scrape_model_page(model_url): | |
| """ | |
| Used for the "Live Scraping" text box in the Gradio UI. | |
| """ | |
| try: | |
| response = requests.get(model_url) | |
| if response.status_code != 200: | |
| return f"Error: Unable to fetch the page (Status Code: {response.status_code})" | |
| soup = BeautifulSoup(response.text, "html.parser") | |
| yaml_config = soup.find("pre") | |
| yaml_text = yaml_config.text.strip() if yaml_config else "No YAML configuration found." | |
| metadata_section = soup.find("div", class_="metadata") | |
| metadata_text = metadata_section.text.strip() if metadata_section else "No metadata found." | |
| return f"**YAML Configuration:**\n{yaml_text}\n\n**Metadata:**\n{metadata_text}" | |
| except Exception as e: | |
| return f"Error: {str(e)}" | |
| def display_scraped_model_data(model_url): | |
| """ | |
| Helper for the "Live Scraping Features" section of the Gradio app. | |
| """ | |
| return scrape_model_page(model_url) | |
| def download_all_data(): | |
| """ | |
| Builds and returns a zip of: | |
| - the CSV of your 'tiny' data, | |
| - four plots (average performance, task performance, top models, heatmap), | |
| - any YAML configurations for the 'tiny' table's models (if found). | |
| """ | |
| import io | |
| csv_buffer = io.StringIO() | |
| df_full.to_csv(csv_buffer, index=False) | |
| csv_data = csv_buffer.getvalue().encode('utf-8') | |
| average_plot_pil, average_plot_name = plot_average_scores() | |
| task_plot_pil, task_plot_name = plot_task_performance() | |
| top_models_plot_pil, top_models_plot_name = plot_task_specific_top_models() | |
| heatmap_plot_pil, heatmap_plot_name = plot_heatmap() | |
| plot_dict = { | |
| "average_performance": (average_plot_pil, average_plot_name), | |
| "task_performance": (task_plot_pil, task_plot_name), | |
| "top_models": (top_models_plot_pil, top_models_plot_name), | |
| "heatmap": (heatmap_plot_pil, heatmap_plot_name) | |
| } | |
| zip_buffer = io.BytesIO() | |
| with zipfile.ZipFile(zip_buffer, 'w') as zf: | |
| zf.writestr("model_scores.csv", csv_data) | |
| # Add the images | |
| for name, (pil_image, filename) in plot_dict.items(): | |
| image_bytes = io.BytesIO() | |
| pil_image.save(image_bytes, format='PNG') | |
| image_bytes.seek(0) | |
| zf.writestr(filename, image_bytes.read()) | |
| # Also try scraping each model in the *tiny* dataset for a YAML config | |
| for model_name in df_full["Model Configuration"].to_list(): | |
| yaml_content = scrape_mergekit_config(model_name) | |
| if ("No YAML configuration found" not in yaml_content) and ("Failed to fetch model page" not in yaml_content): | |
| zf.writestr(f"{model_name.replace('/', '_')}_config.yaml", yaml_content.encode()) | |
| zip_buffer.seek(0) | |
| return zip_buffer, "analysis_data.zip" | |
| # -------------------------------------------------------------------- | |
| # PART 2: THE "DATA START" SNIPPET (RANKS 44ΓÇô105) + Parser | |
| # -------------------------------------------------------------------- | |
| # This is your larger dataset, rank = 44..105 | |
| benchmark_data = [ | |
| [ | |
| { | |
| "rank": 1, | |
| "name": "wanlige/li-14b-v0.4", | |
| "scores": { | |
| "average": 43.66, | |
| "IFEval": 81.33, | |
| "BBH": 50.38, | |
| "MATH": 55.74, | |
| "GPQA": 11.86, | |
| "MUSR": 16.35, | |
| "MMLU_PRO": 46.3, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.77B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/wanlige/li-14b-v0.4", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 2, | |
| "name": "suayptalha/Lamarckvergence-14B", | |
| "scores": { | |
| "average": 43.32, | |
| "IFEval": 76.56, | |
| "BBH": 50.33, | |
| "MATH": 54, | |
| "GPQA": 15.1, | |
| "MUSR": 16.34, | |
| "MMLU_PRO": 47.59, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/suayptalha/Lamarckvergence-14B", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 3, | |
| "name": "wanlige/li-14b-v0.4-slerp0.1", | |
| "scores": { | |
| "average": 42.91, | |
| "IFEval": 79.23, | |
| "BBH": 50.88, | |
| "MATH": 53.32, | |
| "GPQA": 14.54, | |
| "MUSR": 11.75, | |
| "MMLU_PRO": 47.71, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/wanlige/li-14b-v0.4-slerp0.1", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 4, | |
| "name": "sthenno-com/miscii-14b-0218", | |
| "scores": { | |
| "average": 42.9, | |
| "IFEval": 76.56, | |
| "BBH": 50.64, | |
| "MATH": 51.44, | |
| "GPQA": 17.79, | |
| "MUSR": 13.21, | |
| "MMLU_PRO": 47.75, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/sthenno-com/miscii-14b-0218", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 5, | |
| "name": "sthenno/tempesthenno-ppo-ckpt40", | |
| "scores": { | |
| "average": 42.74, | |
| "IFEval": 79.23, | |
| "BBH": 50.57, | |
| "MATH": 47.36, | |
| "GPQA": 17, | |
| "MUSR": 14.56, | |
| "MMLU_PRO": 47.69, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/sthenno/tempesthenno-ppo-ckpt40", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 6, | |
| "name": "tanliboy/lambda-qwen2.5-14b-dpo-test", | |
| "scores": { | |
| "average": 42.62, | |
| "IFEval": 82.31, | |
| "BBH": 48.45, | |
| "MATH": 54.61, | |
| "GPQA": 14.99, | |
| "MUSR": 12.59, | |
| "MMLU_PRO": 42.75, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.77B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/tanliboy/lambda-qwen2.5-14b-dpo-test", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 7, | |
| "name": "sthenno/tempesthenno-nuslerp-001", | |
| "scores": { | |
| "average": 42.59, | |
| "IFEval": 79.26, | |
| "BBH": 51.04, | |
| "MATH": 47.58, | |
| "GPQA": 16.44, | |
| "MUSR": 13.88, | |
| "MMLU_PRO": 47.3, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/sthenno/tempesthenno-nuslerp-001", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 8, | |
| "name": "YOYO-AI/Qwen2.5-14B-1M-YOYO-V3", | |
| "scores": { | |
| "average": 42.56, | |
| "IFEval": 83.98, | |
| "BBH": 49.47, | |
| "MATH": 53.55, | |
| "GPQA": 10.51, | |
| "MUSR": 11.1, | |
| "MMLU_PRO": 46.74, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/YOYO-AI/Qwen2.5-14B-1M-YOYO-V3", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 9, | |
| "name": "Goekdeniz-Guelmez/Josiefied-Qwen2.5-14B-Instruct-abliterated-v4", | |
| "scores": { | |
| "average": 42.55, | |
| "IFEval": 82.92, | |
| "BBH": 48.05, | |
| "MATH": 54.23, | |
| "GPQA": 12.3, | |
| "MUSR": 13.15, | |
| "MMLU_PRO": 44.65, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.77B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/Goekdeniz-Guelmez/Josiefied-Qwen2.5-14B-Instruct-abliterated-v4", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 10, | |
| "name": "djuna/Q2.5-Veltha-14B", | |
| "scores": { | |
| "average": 42.52, | |
| "IFEval": 82.92, | |
| "BBH": 49.75, | |
| "MATH": 47.89, | |
| "GPQA": 14.54, | |
| "MUSR": 12.26, | |
| "MMLU_PRO": 47.76, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/djuna/Q2.5-Veltha-14B", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 11, | |
| "name": "arcee-ai/Virtuoso-Small-v2", | |
| "scores": { | |
| "average": 42.48, | |
| "IFEval": 82.73, | |
| "BBH": 50.95, | |
| "MATH": 46.6, | |
| "GPQA": 13.76, | |
| "MUSR": 14.28, | |
| "MMLU_PRO": 46.53, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/arcee-ai/Virtuoso-Small-v2", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 12, | |
| "name": "YOYO-AI/Qwen2.5-14B-YOYO-V4-p1", | |
| "scores": { | |
| "average": 42.46, | |
| "IFEval": 82.03, | |
| "BBH": 50.25, | |
| "MATH": 53.32, | |
| "GPQA": 12.75, | |
| "MUSR": 11.73, | |
| "MMLU_PRO": 44.67, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/YOYO-AI/Qwen2.5-14B-YOYO-V4-p1", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 13, | |
| "name": "jpacifico/Chocolatine-14B-Instruct-DPO-v1.3", | |
| "scores": { | |
| "average": 42.42, | |
| "IFEval": 70.4, | |
| "BBH": 54.85, | |
| "MATH": 56.19, | |
| "GPQA": 12.19, | |
| "MUSR": 12.29, | |
| "MMLU_PRO": 48.6, | |
| "Architecture": "Phi3ForCausalLM", | |
| "Parameters": "14.66B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/jpacifico/Chocolatine-14B-Instruct-DPO-v1.3", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 14, | |
| "name": "sthenno-com/miscii-14b-1028", | |
| "scores": { | |
| "average": 42.38, | |
| "IFEval": 82.37, | |
| "BBH": 49.26, | |
| "MATH": 50.3, | |
| "GPQA": 14.21, | |
| "MUSR": 12, | |
| "MMLU_PRO": 46.14, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.77B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/sthenno-com/miscii-14b-1028", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 15, | |
| "name": "sthenno-com/miscii-14b-1225", | |
| "scores": { | |
| "average": 42.35, | |
| "IFEval": 78.78, | |
| "BBH": 50.91, | |
| "MATH": 45.17, | |
| "GPQA": 17, | |
| "MUSR": 14.77, | |
| "MMLU_PRO": 47.46, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/sthenno-com/miscii-14b-1225", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 16, | |
| "name": "prithivMLmods/Sombrero-Opus-14B-Elite5", | |
| "scores": { | |
| "average": 42.32, | |
| "IFEval": 78.81, | |
| "BBH": 50.17, | |
| "MATH": 53.55, | |
| "GPQA": 11.52, | |
| "MUSR": 13.22, | |
| "MMLU_PRO": 46.67, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/prithivMLmods/Sombrero-Opus-14B-Elite5", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 17, | |
| "name": "Lunzima/NQLSG-Qwen2.5-14B-MegaFusion-v8", | |
| "scores": { | |
| "average": 42.26, | |
| "IFEval": 73.84, | |
| "BBH": 49.31, | |
| "MATH": 41.69, | |
| "GPQA": 18.23, | |
| "MUSR": 21.96, | |
| "MMLU_PRO": 48.5, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "No" | |
| }, | |
| "hf_url": "https://huggingface.co/Lunzima/NQLSG-Qwen2.5-14B-MegaFusion-v8", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 18, | |
| "name": "prithivMLmods/Equuleus-Opus-14B-Exp", | |
| "scores": { | |
| "average": 42.2, | |
| "IFEval": 70.01, | |
| "BBH": 48.62, | |
| "MATH": 45.85, | |
| "GPQA": 18.23, | |
| "MUSR": 21.9, | |
| "MMLU_PRO": 48.6, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "No" | |
| }, | |
| "hf_url": "https://huggingface.co/prithivMLmods/Equuleus-Opus-14B-Exp", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 19, | |
| "name": "rombodawg/Rombos-LLM-V2.6-Qwen-14b", | |
| "scores": { | |
| "average": 42.2, | |
| "IFEval": 84.32, | |
| "BBH": 49.28, | |
| "MATH": 52.11, | |
| "GPQA": 11.19, | |
| "MUSR": 12.29, | |
| "MMLU_PRO": 44.01, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.77B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/rombodawg/Rombos-LLM-V2.6-Qwen-14b", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 20, | |
| "name": "nbeerbower/EVA-abliterated-TIES-Qwen2.5-14B", | |
| "scores": { | |
| "average": 42.16, | |
| "IFEval": 78.36, | |
| "BBH": 48.52, | |
| "MATH": 50.45, | |
| "GPQA": 13.98, | |
| "MUSR": 14.88, | |
| "MMLU_PRO": 46.79, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.77B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/nbeerbower/EVA-abliterated-TIES-Qwen2.5-14B", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 21, | |
| "name": "sometimesanotion/LamarckInfusion-14B-v1", | |
| "scores": { | |
| "average": 42.06, | |
| "IFEval": 71.98, | |
| "BBH": 50.35, | |
| "MATH": 41.69, | |
| "GPQA": 18.79, | |
| "MUSR": 20.9, | |
| "MMLU_PRO": 48.63, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "No" | |
| }, | |
| "hf_url": "https://huggingface.co/sometimesanotion/LamarckInfusion-14B-v1", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 22, | |
| "name": "tensopolis/virtuoso-small-v2-tensopolis-v1", | |
| "scores": { | |
| "average": 41.99, | |
| "IFEval": 82.4, | |
| "BBH": 50.53, | |
| "MATH": 46.53, | |
| "GPQA": 12.53, | |
| "MUSR": 13.88, | |
| "MMLU_PRO": 46.07, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/tensopolis/virtuoso-small-v2-tensopolis-v1", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 23, | |
| "name": "Quazim0t0/Fugazi14b", | |
| "scores": { | |
| "average": 41.94, | |
| "IFEval": 69.98, | |
| "BBH": 56.09, | |
| "MATH": 46.53, | |
| "GPQA": 13.53, | |
| "MUSR": 16.42, | |
| "MMLU_PRO": 49.08, | |
| "Architecture": "LlamaForCausalLM", | |
| "Parameters": "14.66B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/Quazim0t0/Fugazi14b", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 24, | |
| "name": "1024m/QWEN-14B-B100", | |
| "scores": { | |
| "average": 41.92, | |
| "IFEval": 77.62, | |
| "BBH": 49.78, | |
| "MATH": 54.38, | |
| "GPQA": 13.42, | |
| "MUSR": 9.88, | |
| "MMLU_PRO": 46.43, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.77B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/1024m/QWEN-14B-B100", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 25, | |
| "name": "Sakalti/Saka-14B", | |
| "scores": { | |
| "average": 41.91, | |
| "IFEval": 71.74, | |
| "BBH": 49.72, | |
| "MATH": 40.94, | |
| "GPQA": 19.46, | |
| "MUSR": 20.74, | |
| "MMLU_PRO": 48.84, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "No" | |
| }, | |
| "hf_url": "https://huggingface.co/Sakalti/Saka-14B", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 26, | |
| "name": "prithivMLmods/Sombrero-Opus-14B-Elite6", | |
| "scores": { | |
| "average": 41.88, | |
| "IFEval": 72.26, | |
| "BBH": 49.6, | |
| "MATH": 40.79, | |
| "GPQA": 19.13, | |
| "MUSR": 20.74, | |
| "MMLU_PRO": 48.78, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "No" | |
| }, | |
| "hf_url": "https://huggingface.co/prithivMLmods/Sombrero-Opus-14B-Elite6", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 27, | |
| "name": "YOYO-AI/Qwen2.5-14B-YOYO-latest-V2", | |
| "scores": { | |
| "average": 41.85, | |
| "IFEval": 77.71, | |
| "BBH": 47.3, | |
| "MATH": 51.59, | |
| "GPQA": 13.87, | |
| "MUSR": 13.68, | |
| "MMLU_PRO": 46.93, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/YOYO-AI/Qwen2.5-14B-YOYO-latest-V2", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 28, | |
| "name": "Tsunami-th/Tsunami-1.0-14B-Instruct", | |
| "scores": { | |
| "average": 41.84, | |
| "IFEval": 78.29, | |
| "BBH": 49.15, | |
| "MATH": 45.85, | |
| "GPQA": 14.21, | |
| "MUSR": 16.34, | |
| "MMLU_PRO": 47.21, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.77B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/Tsunami-th/Tsunami-1.0-14B-Instruct", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 29, | |
| "name": "sthenno/tempesthenno-kto-0205-ckpt80", | |
| "scores": { | |
| "average": 41.79, | |
| "IFEval": 80.54, | |
| "BBH": 50.64, | |
| "MATH": 45.92, | |
| "GPQA": 13.09, | |
| "MUSR": 12.93, | |
| "MMLU_PRO": 47.62, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "No" | |
| }, | |
| "hf_url": "https://huggingface.co/sthenno/tempesthenno-kto-0205-ckpt80", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 30, | |
| "name": "sometimesanotion/Lamarck-14B-v0.7-rc4", | |
| "scores": { | |
| "average": 41.79, | |
| "IFEval": 72.11, | |
| "BBH": 49.85, | |
| "MATH": 40.26, | |
| "GPQA": 18.57, | |
| "MUSR": 21.07, | |
| "MMLU_PRO": 48.89, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "No" | |
| }, | |
| "hf_url": "https://huggingface.co/sometimesanotion/Lamarck-14B-v0.7-rc4", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 31, | |
| "name": "prithivMLmods/Porpoise-Opus-14B-Exp", | |
| "scores": { | |
| "average": 41.77, | |
| "IFEval": 70.98, | |
| "BBH": 49.95, | |
| "MATH": 40.41, | |
| "GPQA": 19.13, | |
| "MUSR": 21.3, | |
| "MMLU_PRO": 48.85, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "No" | |
| }, | |
| "hf_url": "https://huggingface.co/prithivMLmods/Porpoise-Opus-14B-Exp", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 32, | |
| "name": "CombinHorizon/Josiefied-abliteratedV4-Qwen2.5-14B-Inst-BaseMerge-TIES", | |
| "scores": { | |
| "average": 41.77, | |
| "IFEval": 82.4, | |
| "BBH": 48.2, | |
| "MATH": 53.17, | |
| "GPQA": 9.96, | |
| "MUSR": 12.65, | |
| "MMLU_PRO": 44.21, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.77B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/CombinHorizon/Josiefied-abliteratedV4-Qwen2.5-14B-Inst-BaseMerge-TIES", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 33, | |
| "name": "suayptalha/Lamarckvergence-14B", | |
| "scores": { | |
| "average": 43.32, | |
| "IFEval": 76.56, | |
| "BBH": 50.33, | |
| "MATH": 54, | |
| "GPQA": 15.1, | |
| "MUSR": 16.34, | |
| "MMLU_PRO": 47.59, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/suayptalha/Lamarckvergence-14B", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 34, | |
| "name": "sthenno/tempesthenno-ppo-ckpt40", | |
| "scores": { | |
| "average": 42.74, | |
| "IFEval": 79.23, | |
| "BBH": 50.57, | |
| "MATH": 47.36, | |
| "GPQA": 17, | |
| "MUSR": 14.56, | |
| "MMLU_PRO": 47.69, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/sthenno/tempesthenno-ppo-ckpt40", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 35, | |
| "name": "tanliboy/lambda-qwen2.5-14b-dpo-test", | |
| "scores": { | |
| "average": 42.62, | |
| "IFEval": 82.31, | |
| "BBH": 48.45, | |
| "MATH": 54.61, | |
| "GPQA": 14.99, | |
| "MUSR": 12.59, | |
| "MMLU_PRO": 42.75, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.77B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/tanliboy/lambda-qwen2.5-14b-dpo-test", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 36, | |
| "name": "sthenno/tempesthenno-nuslerp-001", | |
| "scores": { | |
| "average": 42.59, | |
| "IFEval": 79.26, | |
| "BBH": 51.04, | |
| "MATH": 47.58, | |
| "GPQA": 16.44, | |
| "MUSR": 13.88, | |
| "MMLU_PRO": 47.3, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/sthenno/tempesthenno-nuslerp-001", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 37, | |
| "name": "Goekdeniz-Guelmez/Josiefied-Qwen2.5-14B-Instruct-abliterated-v4", | |
| "scores": { | |
| "average": 42.55, | |
| "IFEval": 82.92, | |
| "BBH": 48.05, | |
| "MATH": 54.23, | |
| "GPQA": 12.3, | |
| "MUSR": 13.15, | |
| "MMLU_PRO": 44.65, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.77B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/Goekdeniz-Guelmez/Josiefied-Qwen2.5-14B-Instruct-abliterated-v4", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 38, | |
| "name": "djuna/Q2.5-Veltha-14B", | |
| "scores": { | |
| "average": 42.52, | |
| "IFEval": 82.92, | |
| "BBH": 49.75, | |
| "MATH": 47.89, | |
| "GPQA": 14.54, | |
| "MUSR": 12.26, | |
| "MMLU_PRO": 47.76, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/djuna/Q2.5-Veltha-14B", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 39, | |
| "name": "arcee-ai/Virtuoso-Small-v2", | |
| "scores": { | |
| "average": 42.48, | |
| "IFEval": 82.73, | |
| "BBH": 50.95, | |
| "MATH": 46.6, | |
| "GPQA": 13.76, | |
| "MUSR": 14.28, | |
| "MMLU_PRO": 46.53, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/arcee-ai/Virtuoso-Small-v2", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 40, | |
| "name": "jpacifico/Chocolatine-14B-Instruct-DPO-v1.3", | |
| "scores": { | |
| "average": 42.42, | |
| "IFEval": 70.4, | |
| "BBH": 54.85, | |
| "MATH": 56.19, | |
| "GPQA": 12.19, | |
| "MUSR": 12.29, | |
| "MMLU_PRO": 48.6, | |
| "Architecture": "Phi3ForCausalLM", | |
| "Parameters": "14.66B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/jpacifico/Chocolatine-14B-Instruct-DPO-v1.3", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 41, | |
| "name": "sthenno-com/miscii-14b-1028", | |
| "scores": { | |
| "average": 42.38, | |
| "IFEval": 82.37, | |
| "BBH": 49.26, | |
| "MATH": 50.3, | |
| "GPQA": 14.21, | |
| "MUSR": 12, | |
| "MMLU_PRO": 46.14, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.77B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/sthenno-com/miscii-14b-1028", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 42, | |
| "name": "sthenno-com/miscii-14b-1225", | |
| "scores": { | |
| "average": 42.35, | |
| "IFEval": 78.78, | |
| "BBH": 50.91, | |
| "MATH": 45.17, | |
| "GPQA": 17, | |
| "MUSR": 14.77, | |
| "MMLU_PRO": 47.46, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/sthenno-com/miscii-14b-1225", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 43, | |
| "name": "tensopolis/virtuoso-small-v2-tensopolis-v1", | |
| "scores": { | |
| "average": 42.34, | |
| "IFEval": 83.4, | |
| "BBH": 50.99, | |
| "MATH": 46.6, | |
| "GPQA": 12.98, | |
| "MUSR": 13.38, | |
| "MMLU_PRO": 46.67, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/tensopolis/virtuoso-small-v2-tensopolis-v1", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 44, | |
| "name": "rombodawg/Rombos-LLM-V2.6-Qwen-14b", | |
| "scores": { | |
| "average": 42.2, | |
| "IFEval": 84.32, | |
| "BBH": 49.28, | |
| "MATH": 52.11, | |
| "GPQA": 11.19, | |
| "MUSR": 12.29, | |
| "MMLU_PRO": 44.01, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.77B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/rombodawg/Rombos-LLM-V2.6-Qwen-14b", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 45, | |
| "name": "1024m/QWEN-14B-B100", | |
| "scores": { | |
| "average": 41.92, | |
| "IFEval": 77.62, | |
| "BBH": 49.78, | |
| "MATH": 54.38, | |
| "GPQA": 13.42, | |
| "MUSR": 9.88, | |
| "MMLU_PRO": 46.43, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.77B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/1024m/QWEN-14B-B100", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 46, | |
| "name": "Sakalti/Saka-14B", | |
| "scores": { | |
| "average": 41.91, | |
| "IFEval": 71.74, | |
| "BBH": 49.72, | |
| "MATH": 40.94, | |
| "GPQA": 19.46, | |
| "MUSR": 20.74, | |
| "MMLU_PRO": 48.84, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "No" | |
| }, | |
| "hf_url": "https://huggingface.co/Sakalti/Saka-14B", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 47, | |
| "name": "Tsunami-th/Tsunami-1.0-14B-Instruct", | |
| "scores": { | |
| "average": 41.84, | |
| "IFEval": 78.29, | |
| "BBH": 49.15, | |
| "MATH": 45.85, | |
| "GPQA": 14.21, | |
| "MUSR": 16.34, | |
| "MMLU_PRO": 47.21, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.77B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/Tsunami-th/Tsunami-1.0-14B-Instruct", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 48, | |
| "name": "sthenno/tempesthenno-kto-0205-ckpt80", | |
| "scores": { | |
| "average": 41.79, | |
| "IFEval": 80.54, | |
| "BBH": 50.64, | |
| "MATH": 45.92, | |
| "GPQA": 13.09, | |
| "MUSR": 12.93, | |
| "MMLU_PRO": 47.62, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "No" | |
| }, | |
| "hf_url": "https://huggingface.co/sthenno/tempesthenno-kto-0205-ckpt80", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 49, | |
| "name": "sometimesanotion/Lamarck-14B-v0.7-rc4", | |
| "scores": { | |
| "average": 41.79, | |
| "IFEval": 72.11, | |
| "BBH": 49.85, | |
| "MATH": 40.26, | |
| "GPQA": 18.57, | |
| "MUSR": 21.07, | |
| "MMLU_PRO": 48.89, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "No" | |
| }, | |
| "hf_url": "https://huggingface.co/sometimesanotion/Lamarck-14B-v0.7-rc4", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 50, | |
| "name": "CombinHorizon/Josiefied-abliteratedV4-Qwen2.5-14B-Inst-BaseMerge-TIES", | |
| "scores": { | |
| "average": 41.77, | |
| "IFEval": 82.4, | |
| "BBH": 48.2, | |
| "MATH": 53.17, | |
| "GPQA": 9.96, | |
| "MUSR": 12.65, | |
| "MMLU_PRO": 44.21, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.77B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/CombinHorizon/Josiefied-abliteratedV4-Qwen2.5-14B-Inst-BaseMerge-TIES", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 51, | |
| "name": "suayptalha/Luminis-phi-4", | |
| "scores": { | |
| "average": 41.76, | |
| "IFEval": 69, | |
| "BBH": 55.8, | |
| "MATH": 46.37, | |
| "GPQA": 13.53, | |
| "MUSR": 16.68, | |
| "MMLU_PRO": 49.15, | |
| "Architecture": "LlamaForCausalLM", | |
| "Parameters": "14.66B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/suayptalha/Luminis-phi-4", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 52, | |
| "name": "huihui-ai/Qwen2.5-14B-Instruct-abliterated-v2", | |
| "scores": { | |
| "average": 41.75, | |
| "IFEval": 83.28, | |
| "BBH": 47.41, | |
| "MATH": 53.02, | |
| "GPQA": 11.19, | |
| "MUSR": 11.58, | |
| "MMLU_PRO": 44.02, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.77B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/huihui-ai/Qwen2.5-14B-Instruct-abliterated-v2", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 53, | |
| "name": "djuna/Q2.5-Veltha-14B-0.5", | |
| "scores": { | |
| "average": 41.61, | |
| "IFEval": 77.96, | |
| "BBH": 50.32, | |
| "MATH": 43.73, | |
| "GPQA": 15.77, | |
| "MUSR": 14.17, | |
| "MMLU_PRO": 47.72, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/djuna/Q2.5-Veltha-14B-0.5", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 54, | |
| "name": "Qwen/Qwen2.5-14B-Instruct-1M", | |
| "scores": { | |
| "average": 41.56, | |
| "IFEval": 84.14, | |
| "BBH": 45.66, | |
| "MATH": 53.02, | |
| "GPQA": 12.42, | |
| "MUSR": 11.35, | |
| "MMLU_PRO": 42.77, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.77B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/Qwen/Qwen2.5-14B-Instruct-1M", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 55, | |
| "name": "notbdq/Qwen2.5-14B-Instruct-1M-GRPO-Reasoning", | |
| "scores": { | |
| "average": 41.56, | |
| "IFEval": 84.14, | |
| "BBH": 45.66, | |
| "MATH": 53.02, | |
| "GPQA": 12.42, | |
| "MUSR": 11.35, | |
| "MMLU_PRO": 42.77, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.77B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/notbdq/Qwen2.5-14B-Instruct-1M-GRPO-Reasoning", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 56, | |
| "name": "sometimesanotion/Qwenvergence-14B-v11", | |
| "scores": { | |
| "average": 41.52, | |
| "IFEval": 71.92, | |
| "BBH": 47.55, | |
| "MATH": 46.45, | |
| "GPQA": 16.33, | |
| "MUSR": 18.76, | |
| "MMLU_PRO": 48.08, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "No" | |
| }, | |
| "hf_url": "https://huggingface.co/sometimesanotion/Qwenvergence-14B-v11", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 57, | |
| "name": "sometimesanotion/Qwenvergence-14B-v10", | |
| "scores": { | |
| "average": 41.48, | |
| "IFEval": 67.57, | |
| "BBH": 46.75, | |
| "MATH": 47.89, | |
| "GPQA": 17.23, | |
| "MUSR": 22.33, | |
| "MMLU_PRO": 47.1, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "No" | |
| }, | |
| "hf_url": "https://huggingface.co/sometimesanotion/Qwenvergence-14B-v10", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 58, | |
| "name": "CombinHorizon/huihui-ai-abliteratedV2-Qwen2.5-14B-Inst-BaseMerge-TIES", | |
| "scores": { | |
| "average": 41.47, | |
| "IFEval": 81.76, | |
| "BBH": 47.77, | |
| "MATH": 54.76, | |
| "GPQA": 8.61, | |
| "MUSR": 12.45, | |
| "MMLU_PRO": 43.45, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.77B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/CombinHorizon/huihui-ai-abliteratedV2-Qwen2.5-14B-Inst-BaseMerge-TIES", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 59, | |
| "name": "RDson/WomboCombo-R1-Coder-14B-Preview", | |
| "scores": { | |
| "average": 41.46, | |
| "IFEval": 62.86, | |
| "BBH": 48.15, | |
| "MATH": 59.89, | |
| "GPQA": 9.51, | |
| "MUSR": 22.01, | |
| "MMLU_PRO": 46.31, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.77B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/RDson/WomboCombo-R1-Coder-14B-Preview", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 60, | |
| "name": "jpacifico/Chocolatine-2-14B-Instruct-v2.0b3", | |
| "scores": { | |
| "average": 41.43, | |
| "IFEval": 73.23, | |
| "BBH": 49.57, | |
| "MATH": 41.09, | |
| "GPQA": 17.23, | |
| "MUSR": 19.3, | |
| "MMLU_PRO": 48.19, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "No" | |
| }, | |
| "hf_url": "https://huggingface.co/jpacifico/Chocolatine-2-14B-Instruct-v2.0b3", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 61, | |
| "name": "Quazim0t0/Nova-14b-sce", | |
| "scores": { | |
| "average": 41.41, | |
| "IFEval": 70.22, | |
| "BBH": 56.03, | |
| "MATH": 41.62, | |
| "GPQA": 15.1, | |
| "MUSR": 16.43, | |
| "MMLU_PRO": 49.03, | |
| "Architecture": "LlamaForCausalLM", | |
| "Parameters": "14.66B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/Quazim0t0/Nova-14b-sce", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 62, | |
| "name": "v000000/Qwen2.5-14B-Gutenberg-Instruct-Slerpeno", | |
| "scores": { | |
| "average": 41.36, | |
| "IFEval": 81.97, | |
| "BBH": 48.45, | |
| "MATH": 53.25, | |
| "GPQA": 10.85, | |
| "MUSR": 10.05, | |
| "MMLU_PRO": 43.59, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.77B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/v000000/Qwen2.5-14B-Gutenberg-Instruct-Slerpeno", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 63, | |
| "name": "Quazim0t0/NovaScotia-14b-stock", | |
| "scores": { | |
| "average": 41.35, | |
| "IFEval": 67.87, | |
| "BBH": 56.03, | |
| "MATH": 46.3, | |
| "GPQA": 13.2, | |
| "MUSR": 15.7, | |
| "MMLU_PRO": 48.99, | |
| "Architecture": "LlamaForCausalLM", | |
| "Parameters": "14.66B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/Quazim0t0/NovaScotia-14b-stock", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 64, | |
| "name": "Quazim0t0/ODB-14b-sce", | |
| "scores": { | |
| "average": 41.34, | |
| "IFEval": 70.16, | |
| "BBH": 56.19, | |
| "MATH": 41.16, | |
| "GPQA": 14.99, | |
| "MUSR": 16.5, | |
| "MMLU_PRO": 49.02, | |
| "Architecture": "LlamaForCausalLM", | |
| "Parameters": "14.66B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/Quazim0t0/ODB-14b-sce", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 65, | |
| "name": "LightningRodLabs/Flashlight-v1.1", | |
| "scores": { | |
| "average": 40.99, | |
| "IFEval": 67.21, | |
| "BBH": 55.43, | |
| "MATH": 53.25, | |
| "GPQA": 11.97, | |
| "MUSR": 9, | |
| "MMLU_PRO": 49.06, | |
| "Architecture": "Phi3ForCausalLM", | |
| "Parameters": "14.66B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/LightningRodLabs/Flashlight-v1.1", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 66, | |
| "name": "Quazim0t0/Mithril-14B-sce", | |
| "scores": { | |
| "average": 40.98, | |
| "IFEval": 69.58, | |
| "BBH": 55.93, | |
| "MATH": 38.22, | |
| "GPQA": 15.88, | |
| "MUSR": 17.37, | |
| "MMLU_PRO": 48.92, | |
| "Architecture": "LlamaForCausalLM", | |
| "Parameters": "14.66B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/Quazim0t0/Mithril-14B-sce", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 67, | |
| "name": "Sakalti/ultiima-14B-v0.2", | |
| "scores": { | |
| "average": 40.96, | |
| "IFEval": 70.7, | |
| "BBH": 49.51, | |
| "MATH": 39.95, | |
| "GPQA": 17.67, | |
| "MUSR": 19.19, | |
| "MMLU_PRO": 48.75, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "No" | |
| }, | |
| "hf_url": "https://huggingface.co/Sakalti/ultiima-14B-v0.2", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 68, | |
| "name": "bunnycore/Phi-4-ReasoningRP", | |
| "scores": { | |
| "average": 40.95, | |
| "IFEval": 67.36, | |
| "BBH": 55.88, | |
| "MATH": 45.69, | |
| "GPQA": 12.53, | |
| "MUSR": 15.14, | |
| "MMLU_PRO": 49.12, | |
| "Architecture": "LlamaForCausalLM", | |
| "Parameters": "14.66B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/bunnycore/Phi-4-ReasoningRP", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 69, | |
| "name": "dwikitheduck/gen-inst-1", | |
| "scores": { | |
| "average": 40.88, | |
| "IFEval": 77.5, | |
| "BBH": 48.32, | |
| "MATH": 45.54, | |
| "GPQA": 16.22, | |
| "MUSR": 12.27, | |
| "MMLU_PRO": 45.43, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.77B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/dwikitheduck/gen-inst-1", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 70, | |
| "name": "v000000/Qwen2.5-14B-Gutenberg-1e-Delta", | |
| "scores": { | |
| "average": 40.88, | |
| "IFEval": 80.45, | |
| "BBH": 48.62, | |
| "MATH": 52.64, | |
| "GPQA": 10.51, | |
| "MUSR": 9.38, | |
| "MMLU_PRO": 43.67, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.77B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/v000000/Qwen2.5-14B-Gutenberg-1e-Delta", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 60, | |
| "name": "hotmailuser/QwenSlerp2-14B", | |
| "scores": { | |
| "average": 40.86, | |
| "IFEval": 70.37, | |
| "BBH": 49.68, | |
| "MATH": 39.65, | |
| "GPQA": 17.45, | |
| "MUSR": 19.35, | |
| "MMLU_PRO": 48.66, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "No" | |
| }, | |
| "hf_url": "https://huggingface.co/hotmailuser/QwenSlerp2-14B", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 71, | |
| "name": "Quazim0t0/Loke-14B-sce", | |
| "scores": { | |
| "average": 40.86, | |
| "IFEval": 68.48, | |
| "BBH": 55.83, | |
| "MATH": 39.05, | |
| "GPQA": 15.32, | |
| "MUSR": 17.56, | |
| "MMLU_PRO": 48.9, | |
| "Architecture": "LlamaForCausalLM", | |
| "Parameters": "14.66B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/Quazim0t0/Loke-14B-sce", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 72, | |
| "name": "Quazim0t0/mosaic-14b-sce", | |
| "scores": { | |
| "average": 40.83, | |
| "IFEval": 68.76, | |
| "BBH": 55.69, | |
| "MATH": 40.26, | |
| "GPQA": 14.99, | |
| "MUSR": 16.44, | |
| "MMLU_PRO": 48.85, | |
| "Architecture": "LlamaForCausalLM", | |
| "Parameters": "14.66B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/Quazim0t0/mosaic-14b-sce", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 73, | |
| "name": "bunnycore/Phi-4-Model-Stock", | |
| "scores": { | |
| "average": 40.79, | |
| "IFEval": 68.79, | |
| "BBH": 55.32, | |
| "MATH": 42.98, | |
| "GPQA": 13.98, | |
| "MUSR": 15.12, | |
| "MMLU_PRO": 48.54, | |
| "Architecture": "LlamaForCausalLM", | |
| "Parameters": "14.66B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/bunnycore/Phi-4-Model-Stock", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 74, | |
| "name": "unsloth/phi-4", | |
| "scores": { | |
| "average": 40.73, | |
| "IFEval": 68.82, | |
| "BBH": 55.25, | |
| "MATH": 50, | |
| "GPQA": 11.52, | |
| "MUSR": 10.13, | |
| "MMLU_PRO": 48.65, | |
| "Architecture": "LlamaForCausalLM", | |
| "Parameters": "14.66B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/unsloth/phi-4", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 75, | |
| "name": "pankajmathur/orca_mini_phi-4", | |
| "scores": { | |
| "average": 40.68, | |
| "IFEval": 77.81, | |
| "BBH": 54.63, | |
| "MATH": 29.53, | |
| "GPQA": 16.55, | |
| "MUSR": 18.25, | |
| "MMLU_PRO": 47.28, | |
| "Architecture": "LlamaForCausalLM", | |
| "Parameters": "14.66B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/pankajmathur/orca_mini_phi-4", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 76, | |
| "name": "pankajmathur/orca_mini_v9_2_14B", | |
| "scores": { | |
| "average": 40.68, | |
| "IFEval": 77.81, | |
| "BBH": 54.63, | |
| "MATH": 29.53, | |
| "GPQA": 16.55, | |
| "MUSR": 18.25, | |
| "MMLU_PRO": 47.28, | |
| "Architecture": "LlamaForCausalLM", | |
| "Parameters": "14.66B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/pankajmathur/orca_mini_v9_2_14B", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 77, | |
| "name": "sometimesanotion/Lamarck-14B-v0.6-model_stock", | |
| "scores": { | |
| "average": 40.68, | |
| "IFEval": 67.9, | |
| "BBH": 46.49, | |
| "MATH": 42.45, | |
| "GPQA": 17.9, | |
| "MUSR": 22.68, | |
| "MMLU_PRO": 46.64, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14B", | |
| "Chat_Template": "No" | |
| }, | |
| "hf_url": "https://huggingface.co/sometimesanotion/Lamarck-14B-v0.6-model_stock", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 78, | |
| "name": "sometimesanotion/Qwenvergence-14B-v0.6-004-model_stock", | |
| "scores": { | |
| "average": 40.6, | |
| "IFEval": 68.6, | |
| "BBH": 46.37, | |
| "MATH": 40.94, | |
| "GPQA": 17.79, | |
| "MUSR": 23.35, | |
| "MMLU_PRO": 46.59, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14B", | |
| "Chat_Template": "No" | |
| }, | |
| "hf_url": "https://huggingface.co/sometimesanotion/Qwenvergence-14B-v0.6-004-model_stock", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 79, | |
| "name": "Quazim0t0/Oasis-14B-ties", | |
| "scores": { | |
| "average": 40.59, | |
| "IFEval": 69.37, | |
| "BBH": 55.75, | |
| "MATH": 37.54, | |
| "GPQA": 15.32, | |
| "MUSR": 16.63, | |
| "MMLU_PRO": 48.94, | |
| "Architecture": "LlamaForCausalLM", | |
| "Parameters": "14.66B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/Quazim0t0/Oasis-14B-ties", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 80, | |
| "name": "LightningRodLabs/Flashlight-v1.0", | |
| "scores": { | |
| "average": 40.57, | |
| "IFEval": 67.45, | |
| "BBH": 55.15, | |
| "MATH": 49.7, | |
| "GPQA": 12.3, | |
| "MUSR": 9.93, | |
| "MMLU_PRO": 48.91, | |
| "Architecture": "LlamaForCausalLM", | |
| "Parameters": "14.66B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/LightningRodLabs/Flashlight-v1.0", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 81, | |
| "name": "arcee-ai/Virtuoso-Small", | |
| "scores": { | |
| "average": 40.54, | |
| "IFEval": 79.35, | |
| "BBH": 50.4, | |
| "MATH": 40.94, | |
| "GPQA": 11.52, | |
| "MUSR": 14.44, | |
| "MMLU_PRO": 46.57, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.77B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/arcee-ai/Virtuoso-Small", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 82, | |
| "name": "Quazim0t0/GuiltySpark-14B-ties", | |
| "scores": { | |
| "average": 40.52, | |
| "IFEval": 68.54, | |
| "BBH": 55.72, | |
| "MATH": 38.37, | |
| "GPQA": 15.32, | |
| "MUSR": 16.3, | |
| "MMLU_PRO": 48.89, | |
| "Architecture": "LlamaForCausalLM", | |
| "Parameters": "14.66B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/Quazim0t0/GuiltySpark-14B-ties", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 83, | |
| "name": "ozone-ai/0x-lite", | |
| "scores": { | |
| "average": 40.48, | |
| "IFEval": 77.4, | |
| "BBH": 47.53, | |
| "MATH": 50.45, | |
| "GPQA": 9.28, | |
| "MUSR": 11.76, | |
| "MMLU_PRO": 46.49, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.77B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/ozone-ai/0x-lite", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 84, | |
| "name": "Quazim0t0/Casa-14b-sce", | |
| "scores": { | |
| "average": 40.41, | |
| "IFEval": 66.54, | |
| "BBH": 55.4, | |
| "MATH": 46.98, | |
| "GPQA": 11.07, | |
| "MUSR": 13.31, | |
| "MMLU_PRO": 49.17, | |
| "Architecture": "LlamaForCausalLM", | |
| "Parameters": "14.66B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/Quazim0t0/Casa-14b-sce", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 85, | |
| "name": "Sakalti/ultiima-14B-v0.3", | |
| "scores": { | |
| "average": 40.38, | |
| "IFEval": 70.4, | |
| "BBH": 48.45, | |
| "MATH": 39.65, | |
| "GPQA": 16.89, | |
| "MUSR": 18.73, | |
| "MMLU_PRO": 48.18, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "No" | |
| }, | |
| "hf_url": "https://huggingface.co/Sakalti/ultiima-14B-v0.3", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 86, | |
| "name": "ehristoforu/fp4-14b-v1-fix", | |
| "scores": { | |
| "average": 40.37, | |
| "IFEval": 67.42, | |
| "BBH": 54.33, | |
| "MATH": 42.07, | |
| "GPQA": 13.87, | |
| "MUSR": 16.18, | |
| "MMLU_PRO": 48.37, | |
| "Architecture": "LlamaForCausalLM", | |
| "Parameters": "14.66B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/ehristoforu/fp4-14b-v1-fix", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 87, | |
| "name": "FINGU-AI/Chocolatine-Fusion-14B", | |
| "scores": { | |
| "average": 40.36, | |
| "IFEval": 69.49, | |
| "BBH": 48.6, | |
| "MATH": 38.52, | |
| "GPQA": 16.22, | |
| "MUSR": 21.99, | |
| "MMLU_PRO": 47.35, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "8.367B", | |
| "Chat_Template": "No" | |
| }, | |
| "hf_url": "https://huggingface.co/FINGU-AI/Chocolatine-Fusion-14B", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 88, | |
| "name": "hotmailuser/QwenSlerp-14B", | |
| "scores": { | |
| "average": 40.35, | |
| "IFEval": 70.25, | |
| "BBH": 49.42, | |
| "MATH": 38.37, | |
| "GPQA": 18.34, | |
| "MUSR": 16.83, | |
| "MMLU_PRO": 48.89, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "No" | |
| }, | |
| "hf_url": "https://huggingface.co/hotmailuser/QwenSlerp-14B", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 89, | |
| "name": "Triangle104/Robo-Gutenberg_V1.0", | |
| "scores": { | |
| "average": 40.35, | |
| "IFEval": 60.08, | |
| "BBH": 50.29, | |
| "MATH": 45.62, | |
| "GPQA": 18.12, | |
| "MUSR": 19.2, | |
| "MMLU_PRO": 48.79, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.77B", | |
| "Chat_Template": "No" | |
| }, | |
| "hf_url": "https://huggingface.co/Triangle104/Robo-Gutenberg_V1.0", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 90, | |
| "name": "Quazim0t0/Adamant-14B-sce", | |
| "scores": { | |
| "average": 40.32, | |
| "IFEval": 68.58, | |
| "BBH": 54.97, | |
| "MATH": 39.88, | |
| "GPQA": 13.42, | |
| "MUSR": 16.51, | |
| "MMLU_PRO": 48.57, | |
| "Architecture": "LlamaForCausalLM", | |
| "Parameters": "14.66B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/Quazim0t0/Adamant-14B-sce", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 91, | |
| "name": "Quazim0t0/Phi4Basis-14B-sce", | |
| "scores": { | |
| "average": 40.31, | |
| "IFEval": 65.02, | |
| "BBH": 55.67, | |
| "MATH": 47.89, | |
| "GPQA": 10.51, | |
| "MUSR": 14.02, | |
| "MMLU_PRO": 48.78, | |
| "Architecture": "LlamaForCausalLM", | |
| "Parameters": "14.66B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/Quazim0t0/Phi4Basis-14B-sce", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 92, | |
| "name": "Quazim0t0/bloom-14b-stock", | |
| "scores": { | |
| "average": 40.29, | |
| "IFEval": 65.75, | |
| "BBH": 55.27, | |
| "MATH": 48.11, | |
| "GPQA": 10.85, | |
| "MUSR": 13.17, | |
| "MMLU_PRO": 48.59, | |
| "Architecture": "LlamaForCausalLM", | |
| "Parameters": "14.66B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/Quazim0t0/bloom-14b-stock", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 93, | |
| "name": "sometimesanotion/Qwen2.5-14B-Vimarckoso-v3-Prose01", | |
| "scores": { | |
| "average": 40.28, | |
| "IFEval": 68.72, | |
| "BBH": 47.71, | |
| "MATH": 39.95, | |
| "GPQA": 18.23, | |
| "MUSR": 19.56, | |
| "MMLU_PRO": 47.5, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14B", | |
| "Chat_Template": "No" | |
| }, | |
| "hf_url": "https://huggingface.co/sometimesanotion/Qwen2.5-14B-Vimarckoso-v3-Prose01", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 94, | |
| "name": "Quazim0t0/Halo-14B-sce", | |
| "scores": { | |
| "average": 40.26, | |
| "IFEval": 67.54, | |
| "BBH": 55.27, | |
| "MATH": 42.9, | |
| "GPQA": 12.98, | |
| "MUSR": 14.24, | |
| "MMLU_PRO": 48.63, | |
| "Architecture": "LlamaForCausalLM", | |
| "Parameters": "14.66B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/Quazim0t0/Halo-14B-sce", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 95, | |
| "name": "prithivMLmods/Calcium-Opus-14B-Elite2", | |
| "scores": { | |
| "average": 40.25, | |
| "IFEval": 61.76, | |
| "BBH": 46.81, | |
| "MATH": 46.9, | |
| "GPQA": 16, | |
| "MUSR": 22.24, | |
| "MMLU_PRO": 47.79, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.766B", | |
| "Chat_Template": "No" | |
| }, | |
| "hf_url": "https://huggingface.co/prithivMLmods/Calcium-Opus-14B-Elite2", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 96, | |
| "name": "SicariusSicariiStuff/Impish_QWEN_14B-1M", | |
| "scores": { | |
| "average": 40.24, | |
| "IFEval": 78.68, | |
| "BBH": 47.22, | |
| "MATH": 39.65, | |
| "GPQA": 13.42, | |
| "MUSR": 17.52, | |
| "MMLU_PRO": 44.93, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.77B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/SicariusSicariiStuff/Impish_QWEN_14B-1M", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 97, | |
| "name": "bunnycore/Phi-4-Stock-Ex", | |
| "scores": { | |
| "average": 40.22, | |
| "IFEval": 65.75, | |
| "BBH": 55.2, | |
| "MATH": 40.86, | |
| "GPQA": 13.42, | |
| "MUSR": 17.46, | |
| "MMLU_PRO": 48.61, | |
| "Architecture": "LlamaForCausalLM", | |
| "Parameters": "14.66B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/bunnycore/Phi-4-Stock-Ex", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 98, | |
| "name": "sometimesanotion/Qwenvergence-14B-qv256", | |
| "scores": { | |
| "average": 40.12, | |
| "IFEval": 70.06, | |
| "BBH": 47.08, | |
| "MATH": 38.97, | |
| "GPQA": 17.11, | |
| "MUSR": 21.07, | |
| "MMLU_PRO": 46.42, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14B", | |
| "Chat_Template": "No" | |
| }, | |
| "hf_url": "https://huggingface.co/sometimesanotion/Qwenvergence-14B-qv256", | |
| "known_config": "null" | |
| }, | |
| { | |
| "rank": 99, | |
| "name": "tensopolis/virtuoso-small-tensopolis-v2", | |
| "scores": { | |
| "average": 40.11, | |
| "IFEval": 80.2, | |
| "BBH": 50.23, | |
| "MATH": 38.75, | |
| "GPQA": 10.51, | |
| "MUSR": 14.84, | |
| "MMLU_PRO": 46.15, | |
| "Architecture": "Qwen2ForCausalLM", | |
| "Parameters": "14.77B", | |
| "Chat_Template": "Yes" | |
| }, | |
| "hf_url": "https://huggingface.co/tensopolis/virtuoso-small-tensopolis-v2", | |
| "known_config": "null" | |
| } | |
| ] | |
| ] | |
| def snippet_scrape_model_page(url): | |
| """ | |
| Equivalent scraping function for the larger dataset | |
| to look for <pre> YAML and a .metadata section. | |
| """ | |
| try: | |
| response = requests.get(url) | |
| if response.status_code != 200: | |
| return f"Error: Unable to fetch the page (Status Code: {response.status_code})" | |
| soup = BeautifulSoup(response.text, "html.parser") | |
| yaml_config = soup.find("pre") | |
| yaml_text = yaml_config.text.strip() if yaml_config else "No YAML configuration found." | |
| metadata_section = soup.find("div", class_="metadata") | |
| metadata_text = metadata_section.text.strip() if metadata_section else "No metadata found." | |
| return { | |
| "yaml_configuration": yaml_text, | |
| "metadata": metadata_text | |
| } | |
| except Exception as e: | |
| return f"Error: {str(e)}" | |
| def snippet_print_benchmark_and_config_info(model_info): | |
| """ | |
| Prints an overview for each model in the rank=44..105 dataset. | |
| If known_config is not None, prints it. Otherwise attempts to scrape. | |
| """ | |
| print(f"---\nModel Rank: {model_info['rank']}") | |
| print(f"Model Name: {model_info['name']}") | |
| print(f"Model average score across benchmarks in %: {model_info['scores']['average']}") | |
| print(f"Models average score on IFEval benchmarks in %: {model_info['scores']['IFEval']}") | |
| print(f"Models average score on BBH benchmarks in %: {model_info['scores']['BBH']}") | |
| print(f"Models average score on MATH benchmarks in %: {model_info['scores']['MATH']}") | |
| print(f"Models average score in GPQA benchmarks in %: {model_info['scores']['GPQA']}") | |
| print(f"Models average score in MUSR benchmarks in %: {model_info['scores']['MUSR']}") | |
| print(f"Models average score in MMLU_PRO benchmarks in %: {model_info['scores']['MMLU_PRO']}") | |
| # If there's a known_config, print it in YAML form and stop. | |
| if model_info["known_config"] is not None: | |
| print("###") | |
| print("models:") | |
| for m in model_info["known_config"]["models"]: | |
| print(f" - model: {m['model']}") | |
| print(f"merge_method: {model_info['known_config']['merge_method']}") | |
| print(f"base_model: {model_info['known_config']['base_model']}") | |
| print(f"dtype: {model_info['known_config']['dtype']}") | |
| print("parameters:") | |
| t_vals = model_info["known_config"]["parameters"]["t"] | |
| print(f" t: {t_vals} # V shaped curve: Hermes for input & output, WizardMath in the middle layers") | |
| print("###") | |
| return | |
| # Otherwise, do scraping: | |
| scraped = snippet_scrape_model_page(model_info["hf_url"]) | |
| if isinstance(scraped, str): | |
| # Means it's an error string or something | |
| print("(No MergeKit configuration found or scraping error.)") | |
| print(scraped) | |
| return | |
| else: | |
| # It's presumably a dict | |
| if "No YAML configuration found." in scraped["yaml_configuration"]: | |
| print("(No MergeKit configuration found.)\n") | |
| print("You can try the following Python script to scrape the model page:\n") | |
| print("#" * 70) | |
| print(f'''import requests | |
| from bs4 import BeautifulSoup | |
| def scrape_model_page(model_url): | |
| try: | |
| response = requests.get(model_url) | |
| if response.status_code != 200: | |
| return f"Error: Unable to fetch the page (Status Code: {{response.status_code}})" | |
| soup = BeautifulSoup(response.text, "html.parser") | |
| yaml_config = soup.find("pre") | |
| yaml_text = yaml_config.text.strip() if yaml_config else "No YAML configuration found." | |
| metadata_section = soup.find("div", class_="metadata") | |
| metadata_text = metadata_section.text.strip() if metadata_section else "No metadata found." | |
| return {{ | |
| "yaml_configuration": yaml_text, | |
| "metadata": metadata_text | |
| }} | |
| except Exception as e: | |
| return f"Error: {{str(e)}}" | |
| if __name__ == "__main__": | |
| model_url = "{model_info['hf_url']}" | |
| result = scrape_model_page(model_url) | |
| print(result)''') | |
| print("#" * 70) | |
| else: | |
| # Found some YAML | |
| print("###") | |
| print(scraped["yaml_configuration"]) | |
| print("###") | |
| def run_non_tiny_benchmarks(): | |
| """ | |
| Captures the stdout from printing each model in benchmark_data (ranks 44..105), | |
| returning the entire output as a single string for Gradio to display. | |
| """ | |
| old_stdout = sys.stdout | |
| buffer = io.StringIO() | |
| sys.stdout = buffer | |
| for model in benchmark_data: | |
| snippet_print_benchmark_and_config_info(model) | |
| sys.stdout = old_stdout | |
| return buffer.getvalue() | |
| # -------------------------------------------------------------------- | |
| # PART 3: The Gradio App | |
| # -------------------------------------------------------------------- | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# Comprehensive Model Performance Analysis with Hugging Face Links") | |
| # The existing UI for the ΓÇ£tinyΓÇ¥ data | |
| with gr.Row(): | |
| btn1 = gr.Button("Show Average Performance") | |
| img1 = gr.Image(type="pil", label="Average Performance Plot") | |
| img1_download = gr.File(label="Download Average Performance") | |
| btn1.click(plot_average_scores, outputs=[img1, img1_download]) | |
| with gr.Row(): | |
| btn2 = gr.Button("Show Task Performance") | |
| img2 = gr.Image(type="pil", label="Task Performance Plot") | |
| img2_download = gr.File(label="Download Task Performance") | |
| btn2.click(plot_task_performance, outputs=[img2, img2_download]) | |
| with gr.Row(): | |
| btn3 = gr.Button("Task-Specific Top Models") | |
| img3 = gr.Image(type="pil", label="Task-Specific Top Models Plot") | |
| img3_download = gr.File(label="Download Top Models") | |
| btn3.click(plot_task_specific_top_models, outputs=[img3, img3_download]) | |
| with gr.Row(): | |
| btn4 = gr.Button("Plot Performance Heatmap") | |
| heatmap_img = gr.Image(type="pil", label="Performance Heatmap") | |
| heatmap_download = gr.File(label="Download Heatmap") | |
| btn4.click(plot_heatmap, outputs=[heatmap_img, heatmap_download]) | |
| # Scraping & YAML handling for the *tiny* table | |
| with gr.Row(): | |
| model_selector = gr.Dropdown(choices=df_full["Model Configuration"].tolist(), label="Select a Model") | |
| with gr.Column(): | |
| scrape_btn = gr.Button("Scrape MergeKit Configuration") | |
| yaml_output = gr.Textbox(lines=10, placeholder="YAML Configuration will appear here.") | |
| scrape_btn.click(scrape_mergekit_config, inputs=model_selector, outputs=yaml_output) | |
| with gr.Column(): | |
| save_yaml_btn = gr.Button("Save MergeKit Configuration") | |
| yaml_download = gr.File(label="Download MergeKit Configuration") | |
| save_yaml_btn.click(download_yaml, inputs=[yaml_output, model_selector], outputs=yaml_download) | |
| # Download everything (CSV, plots, any found YAML) | |
| with gr.Row(): | |
| download_all_btn = gr.Button("Download Everything") | |
| all_downloads = gr.File(label="Download All Data") | |
| download_all_btn.click(download_all_data, outputs=all_downloads) | |
| # Live Scraping | |
| gr.Markdown("## Live Scraping Features") | |
| with gr.Row(): | |
| url_input = gr.Textbox(label="Enter Hugging Face Model URL", placeholder="https://huggingface.co/<model>") | |
| live_scrape_btn = gr.Button("Scrape Model Page") | |
| live_scrape_output = gr.Textbox(label="Scraped Data", lines=15) | |
| live_scrape_btn.click(display_scraped_model_data, inputs=url_input, outputs=live_scrape_output) | |
| # Non-Tiny Benchmarks | |
| gr.Markdown("## Non-Tiny Benchmark Parser (Ranks 44ΓÇô105)") | |
| with gr.Row(): | |
| parse_non_tiny_btn = gr.Button("Parse Non-Tiny Benchmarks") | |
| parse_non_tiny_output = gr.Textbox(label="Non-Tiny Benchmark Output", lines=30) | |
| parse_non_tiny_btn.click(fn=run_non_tiny_benchmarks, outputs=parse_non_tiny_output) | |
| demo.launch() | |