Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import pandas as pd | |
| import random | |
| import time | |
| from info.train_a_model import ( | |
| LLM_BENCHMARKS_TEXT) | |
| from info.submit import ( | |
| SUBMIT_TEXT) | |
| from info.deployment import ( | |
| DEPLOY_TEXT) | |
| from info.programs import ( | |
| PROGRAMS_TEXT) | |
| from info.citation import( | |
| CITATION_TEXT) | |
| from src.processing import filter_benchmarks_table, make_clickable | |
| demo = gr.Blocks() | |
| with demo: | |
| gr.HTML("""<h1 align="center" id="space-title">๐คPowered-by-Intel LLM Leaderboard ๐ป</h1>""") | |
| gr.Markdown("This leaderboard is designed to evaluate, score, and rank open-source large language \ | |
| models that have been pre-trained or fine-tuned on Intel Hardware ๐ฆพ") | |
| gr.Markdown("Models submitted to the leaderboard are evaluated \ | |
| on the Intel Developer Cloud โ๏ธ") | |
| # TODO: Coming soon comparison tool | |
| #with gr.Accordion("๐ฅLarge Language Model Boxing Ring ๐ฅ", open=False): | |
| # with gr.Row(): | |
| # chat_a = gr.Chatbot() | |
| # chat_b = gr.Chatbot() | |
| # msg = gr.Textbox() | |
| # gr.ClearButton([msg, chat_a]) | |
| # | |
| # def respond(message, chat_history): | |
| # bot_message = random.choice(["How are you?", "I love you", "I'm very hungry"]) | |
| # chat_history.append((message, bot_message)) | |
| # time.sleep(2) | |
| # return "", chat_history | |
| # | |
| # msg.submit(respond, inputs = [msg, chat_a],outputs = [msg, chat_a]) | |
| with gr.Tabs(elem_classes="tab-buttons") as tabs: | |
| with gr.TabItem("๐ LLM Benchmark", elem_id="llm-benchmark-table", id=0): | |
| with gr.Row(): | |
| with gr.Column(): | |
| filter_hw = gr.CheckboxGroup(choices=["Gaudi","Xeon","GPU Max","Arc GPU","Core Ultra"], | |
| label="Select Training Platform*", | |
| elem_id="compute_platforms", | |
| value=["Gaudi","Xeon","GPU Max","Arc GPU","Core Ultra"]) | |
| filter_platform = gr.CheckboxGroup(choices=["Intel Developer Cloud","AWS","Azure","GCP","Local"], | |
| label="Training Infrastructure*", | |
| elem_id="training_infra", | |
| value=["Intel Developer Cloud","AWS","Azure","GCP","Local"]) | |
| filter_affiliation = gr.CheckboxGroup(choices=["No Affiliation","Intel Innovator","Intel Student Ambassador", "Intel Software Liftoff", "Intel Labs", "Other"], | |
| label="Intel Program Affiliation", | |
| elem_id="program_affiliation", | |
| value=["No Affiliation","Intel Innovator","Intel Student Ambassador", "Intel Software Liftoff", "Intel Labs", "Other"]) | |
| with gr.Column(): | |
| filter_size = gr.CheckboxGroup(choices=[1,3,5,7,13,35,60,70,100], | |
| label="Model Sizes (Billion of Parameters)", | |
| elem_id="parameter_size", | |
| value=[1,3,5,7,13,35,60,70,100]) | |
| filter_precision = gr.CheckboxGroup(choices=["fp8","fp16","bf16","int8","4bit"], | |
| label="Model Precision", | |
| elem_id="precision", | |
| value=["fp8","fp16","bf16","int8","4bit"]) | |
| filter_type = gr.CheckboxGroup(choices=["pretrained","fine-tuned","chat-models","merges/moerges"], | |
| label="Model Types", | |
| elem_id="model_types", | |
| value=["pretrained","fine-tuned","chat-models","merges/moerges"]) | |
| initial_df = pd.read_csv("leaderboard_status_030424.csv") | |
| gradio_df_display = gr.Dataframe() | |
| def update_df(hw_selected, platform_selected, affiliation_selected, size_selected, precision_selected, type_selected): | |
| filtered_df = filter_benchmarks_table(df=initial_df, hw_selected=hw_selected, platform_selected=platform_selected, | |
| affiliation_selected=affiliation_selected, size_selected=size_selected, | |
| precision_selected=precision_selected, type_selected=type_selected) | |
| return filtered_df | |
| filter_hw.change(fn=update_df, | |
| inputs=[filter_hw, filter_platform, filter_affiliation, filter_size, filter_precision, filter_type], | |
| outputs=[gradio_df_display]) | |
| filter_platform.change(fn=update_df, | |
| inputs=[filter_hw, filter_platform, filter_affiliation, filter_size, filter_precision, filter_type], | |
| outputs=[gradio_df_display]) | |
| filter_affiliation.change(fn=update_df, | |
| inputs=[filter_hw, filter_platform, filter_affiliation, filter_size, filter_precision, filter_type], | |
| outputs=[gradio_df_display]) | |
| filter_size.change(fn=update_df, | |
| inputs=[filter_hw, filter_platform, filter_affiliation, filter_size, filter_precision, filter_type], | |
| outputs=[gradio_df_display]) | |
| filter_precision.change(fn=update_df, | |
| inputs=[filter_hw, filter_platform, filter_affiliation, filter_size, filter_precision, filter_type], | |
| outputs=[gradio_df_display]) | |
| filter_type.change(fn=update_df, | |
| inputs=[filter_hw, filter_platform, filter_affiliation, filter_size, filter_precision, filter_type], | |
| outputs=[gradio_df_display]) | |
| with gr.TabItem("๐งฐ Train a Model", elem_id="getting-started", id=1): | |
| gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text") | |
| with gr.TabItem("๐ Deployment Tips", elem_id="deployment-tips", id=2): | |
| gr.Markdown(DEPLOY_TEXT, elem_classes="markdown-text") | |
| with gr.TabItem("๐ฉโ๐ป Developer Programs", elem_id="hardward-program", id=3): | |
| gr.Markdown(PROGRAMS_TEXT, elem_classes="markdown-text") | |
| with gr.TabItem("๐๏ธ Submit", elem_id="submit", id=4): | |
| gr.Markdown(SUBMIT_TEXT, elem_classes="markdown-text") | |
| with gr.Row(): | |
| gr.Markdown("# Submit Model for Evaluation ๐๏ธ", elem_classes="markdown-text") | |
| with gr.Row(): | |
| with gr.Column(): | |
| model_name_textbox = gr.Textbox(label="Model name") | |
| revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main") | |
| model_type = gr.Dropdown( | |
| choices=["pretrained","fine-tuned","chat models","merges/moerges"], | |
| label="Model type", | |
| multiselect=False, | |
| value="pretrained", | |
| interactive=True, | |
| ) | |
| hw_type = gr.Dropdown( | |
| choices=["Gaudi","Xeon","GPU Max","Arc GPU"], | |
| label="Training Hardware", | |
| multiselect=False, | |
| value="Gaudi2", | |
| interactive=True, | |
| ) | |
| terms = gr.Checkbox( | |
| label="Check if you have read and agreed to terms and conditions associated with submitting\ | |
| a model to the leaderboard.", | |
| value=False, | |
| interactive=True, | |
| ) | |
| with gr.Column(): | |
| precision = gr.Dropdown( | |
| choices=["fp8","fp16","bf16","int8","4bit"], | |
| label="Precision", | |
| multiselect=False, | |
| value="fp16", | |
| interactive=True, | |
| ) | |
| weight_type = gr.Dropdown( | |
| choices=["Original", "Adapter", "Delta"], | |
| label="Weights type", | |
| multiselect=False, | |
| value="Original", | |
| interactive=True, | |
| ) | |
| training_infra = gr.Dropdown( | |
| choices=["IDC","AWS","Azure","GCP","Local"], | |
| label="Training Infrastructure", | |
| multiselect=False, | |
| value="IDC", | |
| interactive=True, | |
| ) | |
| affiliation = gr.Dropdown( | |
| choices=["No Affiliation","Innovator","Student Ambassador","Intel Liftoff", "Intel Labs", "Other"], | |
| label="Affiliation with Intel", | |
| multiselect=False, | |
| value="Independent", | |
| interactive=True, | |
| ) | |
| base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)") | |
| #submit_button = gr.Button("Submit Eval") | |
| #submission_result = gr.Markdown() | |
| gr.Markdown("Community Submissions Coming soon!") | |
| with gr.Accordion("๐ Citation", open=False): | |
| citation =gr.Textbox(value = CITATION_TEXT, | |
| lines=6, | |
| label="Use the following to cite this content") | |
| demo.launch() |