Spaces:
Running
Running
| import os, glob | |
| from datetime import datetime, timezone | |
| import pandas as pd | |
| import gradio as gr | |
| from datasets import load_dataset, Dataset | |
| from huggingface_hub import HfApi, ModelInfo | |
| # ---------- Config ---------- | |
| OWNER = "AIEnergyScore" | |
| COMPUTE_SPACE = f"{OWNER}/launch-computation-example" | |
| TOKEN = os.environ.get("DEBUG") # keep your existing env var | |
| API = HfApi(token=TOKEN) | |
| def preflight_status(): | |
| # 1) Check token presence | |
| if not TOKEN: | |
| return ("β No HF token found in env var 'DEBUG'. " | |
| "Add a secret named DEBUG in the Space settings (a token with 'write' scope).") | |
| # 2) Check identity | |
| try: | |
| me = API.whoami(token=TOKEN) | |
| user_str = me.get("name") or me.get("username") or "unknown-user" | |
| except Exception as e: | |
| return f"β Token error: cannot authenticate ({e})." | |
| # 3) Check dataset access | |
| repo_id = "AIEnergyScore/tested_proprietary_models" | |
| try: | |
| info = API.repo_info(repo_id=repo_id, repo_type="dataset", token=TOKEN) | |
| # If this succeeds, you at least have read access; write failure will surface during upload. | |
| return f"β Connected as **{user_str}**. Dataset **{repo_id}** reachable." | |
| except Exception as e: | |
| return (f"β οΈ Auth OK as **{user_str}**, but cannot access dataset " | |
| f"**{repo_id}** ({e}). Make sure the token user has write access.") | |
| # ---------- Upload to HF dataset (kept from your original) ---------- | |
| def add_docker_eval(zip_file): | |
| new_fid = os.path.basename(zip_file) | |
| if new_fid.endswith(".zip"): | |
| API.upload_file( | |
| path_or_fileobj=zip_file, | |
| repo_id="AIEnergyScore/tested_proprietary_models", | |
| path_in_repo="submitted_models/" + new_fid, | |
| repo_type="dataset", | |
| commit_message="Adding logs via submission Space.", | |
| token=TOKEN, | |
| ) | |
| gr.Info( | |
| "Uploaded logs to dataset! We will validate their validity and add them to the next version of the leaderboard." | |
| ) | |
| else: | |
| gr.Info("You can only upload .zip files here!") | |
| # ---------- Minimal UI ---------- | |
| GITHUB_DOCKER_URL = "https://github.com/huggingface/AIEnergyScore" | |
| METHODOLOGY_URL = "https://huggingface.co/spaces/AIEnergyScore/README" | |
| with gr.Blocks(title="AI Energy Score") as demo: | |
| # Header links (kept) | |
| gr.HTML(""" | |
| <style> | |
| .header-link { color: black !important; } | |
| @media (prefers-color-scheme: dark) { .header-link { color: white !important; } } | |
| </style> | |
| <div style="display:flex;justify-content:space-evenly;align-items:center;margin-bottom:20px;"> | |
| <a class="header-link" href="https://huggingface.co/spaces/AIEnergyScore/leaderboard" style="text-decoration:none;font-weight:bold;font-size:1.1em;font-family:'Inter',sans-serif;">Leaderboard</a> | |
| <a class="header-link" href="https://huggingface.co/spaces/AIEnergyScore/Label" style="text-decoration:none;font-weight:bold;font-size:1.1em;font-family:'Inter',sans-serif;">Label Generator</a> | |
| <a class="header-link" href="https://huggingface.github.io/AIEnergyScore/#faq" style="text-decoration:none;font-weight:bold;font-size:1.1em;font-family:'Inter',sans-serif;">FAQ</a> | |
| <a class="header-link" href="https://huggingface.github.io/AIEnergyScore/#documentation" style="text-decoration:none;font-weight:bold;font-size:1.1em;font-family:'Inter',sans-serif;">Documentation</a> | |
| <a class="header-link" href="https://huggingface.co/spaces/AIEnergyScore/README/discussions" style="text-decoration:none;font-weight:bold;font-size:1.1em;font-family:'Inter',sans-serif;">Community</a> | |
| </div> | |
| """) | |
| # Logo (kept) | |
| gr.HTML(""" | |
| <div style="margin-top:0px;"> | |
| <picture style="display:block;margin:0 auto;max-width:300px;"> | |
| <source media="(prefers-color-scheme: dark)" srcset="https://huggingface.co/spaces/AIEnergyScore/Leaderboard/resolve/main/logodark.png"> | |
| <img src="https://huggingface.co/spaces/AIEnergyScore/Leaderboard/resolve/main/logo.png" alt="Logo" style="display:block;margin:0 auto;max-width:300px;height:auto;"> | |
| </picture> | |
| </div> | |
| """) | |
| gr.Markdown("<div style='text-align:center;'><h2>Submission Portal</h2></div>") | |
| ##preflight_box = gr.Markdown(preflight_status()) | |
| with gr.Row(): | |
| # -------- Open Models ---------- | |
| with gr.Column(): | |
| gr.Markdown(""" | |
| ### πΏ Open Models | |
| If your model is hosted on the π€ Hub, please **start a new Discussion** and include: | |
| - The **Hugging Face model link** (e.g., `org/model-name`) | |
| - The **requested task type** (e.g., Text Generation) | |
| > Requires a Hugging Face account. | |
| β‘οΈ **[Start a New Discussion](https://huggingface.co/spaces/AIEnergyScore/README/discussions)** | |
| """) | |
| # -------- Closed Models ---------- | |
| with gr.Column(): | |
| gr.Markdown(f""" | |
| ### π Closed Models | |
| Run the benchmark **in your own environment** and upload the logs here. | |
| 1. Use our Docker setup | |
| β’ **[Docker & configs]({GITHUB_DOCKER_URL})** | |
| β’ **[Methodology / Docs]({METHODOLOGY_URL})** | |
| 2. When finished, upload the **ZIP file of logs** below. | |
| **β οΈ By uploading the zip file, you agree to:** | |
| - **Public Data Sharing:** We may publicly share the energy performance metrics derived from your submission (no proprietary configs disclosed). | |
| - **Data Integrity:** Logs are accurate, unaltered, and produced per the specified procedures. | |
| - **Model Representation:** The submitted run reflects your production-level model (quantization, etc.). | |
| """) | |
| # Visible status box for user feedback | |
| status_box = gr.Markdown("") | |
| # Hidden file sink (kept pattern from your previous code) | |
| file_sink = gr.File(visible=False) | |
| upload_button = gr.UploadButton( | |
| "π Upload a ZIP file with logs", file_count="single", file_types=[".zip"], interactive=True | |
| ) | |
| # Wrapper: call your uploader and also write user-visible status | |
| def handle_zip_and_upload(temp_path): | |
| if not temp_path: | |
| gr.Warning("No file selected.") | |
| return "β No file uploaded." | |
| if not TOKEN: | |
| gr.Warning("Missing HF token in env var 'DEBUG'.") | |
| return "β Upload blocked: missing token (DEBUG)." | |
| # Enforce .zip | |
| if not str(temp_path).lower().endswith(".zip"): | |
| gr.Warning("Only .zip files are accepted.") | |
| return "β Please upload a .zip file." | |
| try: | |
| # Your existing uploader: pushes to AIEnergyScore/tested_proprietary_models/submitted_models/ | |
| add_docker_eval(temp_path) # shows a toast on success/failure internally | |
| basename = os.path.basename(temp_path) | |
| return f"β Received and submitted: **{basename}**" | |
| except Exception as e: | |
| gr.Warning(f"Upload error: {e}") | |
| return f"β Upload failed β {e}" | |
| # IMPORTANT: bind inside Blocks context | |
| upload_button.upload( | |
| fn=handle_zip_and_upload, | |
| inputs=upload_button, # UploadButton passes the temp file path | |
| outputs=status_box, # show result here | |
| ) | |
| # Launch | |
| if __name__ == "__main__": | |
| demo.launch() |