|
|
from dataclasses import dataclass |
|
|
from enum import Enum |
|
|
|
|
|
@dataclass |
|
|
class Task: |
|
|
benchmark: str |
|
|
metric: str |
|
|
col_name: str |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class Tasks(Enum): |
|
|
|
|
|
|
|
|
task0 = Task("mmlongbench_doc", "acc", "ACC") |
|
|
|
|
|
NUM_FEWSHOT = 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
TITLE = """<h1 align="center" id="space-title">π₯ <a href="https://github.com/CodeGoat24/UniGenBench" target="_blank">UniGenBench</a> Leaderboard (English)</h1> """ |
|
|
|
|
|
|
|
|
LINKS_AND_INFO = """ |
|
|
<div align="center"> |
|
|
<p><a href="https://hunyuan.tencent.com/" target="_blank">Hunyuan</a>, Tencent</p> <br> |
|
|
|
|
|
<a href="https://codegoat24.github.io/UnifiedReward/Pref-GRPO" target="_blank">π Homepage</a> | |
|
|
<a href="https://arxiv.org/pdf/2508.20751" target="_blank">π arXiv Paper</a> | |
|
|
<a href="https://huggingface.co/datasets/CodeGoat24/UniGenBench/tree/main">π Huggingface</a> |
|
|
|
|
|
<a href="https://github.com/CodeGoat24/UniGenBench" target="_blank" rel="noopener noreferrer"><img alt="Code" src="https://img.shields.io/github/stars/CodeGoat24/UniGenBench.svg?style=social&label=Official"></a> |
|
|
|
|
|
π
<a href="https://huggingface.co/spaces/CodeGoat24/UniGenBench_Leaderboard"><b>Leaderboard</b>(English)</a> | |
|
|
<a href="https://huggingface.co/spaces/CodeGoat24/UniGenBench_Leaderboard_Chinese"><b>Leaderboard</b>(Chinese)</a> | |
|
|
<a href="https://huggingface.co/spaces/CodeGoat24/UniGenBench_Leaderboard_English_Long"><b>Leaderboard</b>(English Long)</a> | |
|
|
<a href="https://huggingface.co/spaces/CodeGoat24/UniGenBench_Leaderboard_Chinese_Long"><b>Leaderboard</b>(Chinese Long)</a> π
|
|
|
|
|
|
|
|
|
|
|
|
</div> |
|
|
""" |
|
|
|
|
|
|
|
|
INTRODUCTION_TEXT = """ |
|
|
π [UniGenBench](https://github.com/CodeGoat24/UniGenBench) is a unified benchmark for T2I generation that integrates diverse prompt themes with a comprehensive suite of fine-grained evaluation criteria. |
|
|
|
|
|
π§ You can use the official [GitHub repo](https://github.com/CodeGoat24/UniGenBench) to evaluate your model on [UniGenBench](https://github.com/CodeGoat24/UniGenBench). |
|
|
|
|
|
π We release **all generated images from the T2I models** evaluated in our UniGenBench on [UniGenBench-Eval-Images](https://huggingface.co/datasets/CodeGoat24/UniGenBench-Eval-Images). Feel free to use any evaluation model that is convenient and suitable for you to assess and compare the performance of your models. |
|
|
|
|
|
π To add your own model to the leaderboard, please send an Email to [Yibin Wang](https://codegoat24.github.io/), then we will help with the evaluation and updating the leaderboard. |
|
|
|
|
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results" |
|
|
CITATION_BUTTON_TEXT = r""" |
|
|
@article{UniGenBench++, |
|
|
title={UniGenBench++: A Unified Semantic Evaluation Benchmark for Text-to-Image Generation}, |
|
|
author={Wang, Yibin and Li, Zhimin and Zang, Yuhang and Bu, Jiazi and Zhou, Yujie and Xin, Yi and He, Junjun and Wang, Chunyu and Lu, Qinglin and Jin, Cheng and Wang, Jiaqi}, |
|
|
journal={arXiv preprint arXiv:2510.18701}, |
|
|
year={2025} |
|
|
} |
|
|
|
|
|
|
|
|
@article{UniGenBench&Pref-GRPO, |
|
|
title={Pref-GRPO: Pairwise Preference Reward-based GRPO for Stable Text-to-Image Reinforcement Learning}, |
|
|
author={Wang, Yibin and Li, Zhimin and Zang, Yuhang and Zhou, Yujie and Bu, Jiazi and Wang, Chunyu and Lu, Qinglin and Jin, Cheng and Wang, Jiaqi}, |
|
|
journal={arXiv preprint arXiv:2508.20751}, |
|
|
year={2025} |
|
|
} |
|
|
""" |
|
|
|