CodeGoat24's picture
Update src/about.py
d577417 verified
raw
history blame
2 kB
from dataclasses import dataclass
from enum import Enum
@dataclass
class Task:
benchmark: str
metric: str
col_name: str
# Select your tasks here
# ---------------------------------------------------
class Tasks(Enum):
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
# For MMLongBench-Doc (https://arxiv.org/abs/2407.01523), we use ACC as the main metric
task0 = Task("mmlongbench_doc", "acc", "ACC")
NUM_FEWSHOT = 0 # Change with your few shot
# ---------------------------------------------------
# Your leaderboard name
TITLE = """<h1 align="center" id="space-title">πŸ₯‡ <a href="" target="_blank">UniGenBench</a> Leaderboard</h1>"""
# Links and conference info
LINKS_AND_INFO = """
<div align="center">
<p><a href="https://github.com/CodeGoat24/UnifiedReward" target="_blank">UnifiedReward Team</a></p> <br>
<a href=""><img src='https://img.shields.io/badge/arXiv-UniGenBench-blue' alt='Paper PDF'></a><a href="https://codegoat24.github.io/UnifiedReward/Pref-GRPO"><img src='https://img.shields.io/badge/Project-Website-orange' alt='Project Page'></a><a href="https://github.com/CodeGoat24/UniGenBench" target="_blank" rel="noopener noreferrer"><img alt="Code" src="https://img.shields.io/github/stars/CodeGoat24/UniGenBench.svg?style=social&amp;label=Official"></a>
</div>
"""
# What does your leaderboard evaluate?
INTRODUCTION_TEXT = """
πŸ“š [UniGenBench]() is a unified benchmark for T2I generation that integrates diverse prompt themes with a comprehensive suite of fine-grained evaluation criteria.
πŸ”§ You can use the official [GitHub repo](https://github.com/CodeGoat24/UniGenBench) to evaluate your model on [UniGenBench]().
πŸ“ To add your own model to the leaderboard, please send an Email to yibinwang1121@163.com, then we will help with the evaluation and updating the leaderboard.
"""
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
CITATION_BUTTON_TEXT = r"""
}"""