File size: 3,685 Bytes
d35349d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4abf07e
d35349d
 
 
 
a52c10d
d577417
9987298
 
5f74c44
9b2f817
 
d577417
7f41b51
79cb3b6
5d79220
 
79cb3b6
1e9a8cf
 
d35349d
 
 
 
 
746b06c
d35349d
746b06c
d35349d
580cd2a
 
a52c10d
580cd2a
d35349d
 
 
 
 
 
 
1f228f9
 
7697c19
1f228f9
 
 
 
 
e77d421
6c2c7a2
c3c50b4
e77d421
 
 
746b06c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
from dataclasses import dataclass
from enum import Enum

@dataclass
class Task:
    benchmark: str
    metric: str
    col_name: str


# Select your tasks here
# ---------------------------------------------------
class Tasks(Enum):
    # task_key in the json file, metric_key in the json file, name to display in the leaderboard 
    # For MMLongBench-Doc (https://arxiv.org/abs/2407.01523), we use ACC as the main metric
    task0 = Task("mmlongbench_doc", "acc", "ACC")

NUM_FEWSHOT = 0 # Change with your few shot
# ---------------------------------------------------



# Your leaderboard name
TITLE = """<h1 align="center" id="space-title">πŸ₯‡ <a href="https://github.com/CodeGoat24/UniGenBench" target="_blank">UniGenBench</a> Leaderboard (English)</h1> """

# Links and conference info
LINKS_AND_INFO = """
<div align="center">
<p><a href="https://hunyuan.tencent.com/" target="_blank">Hunyuan</a>, Tencent</p> <br>

<a href="https://codegoat24.github.io/UniGenBench" target="_blank">🏠 Homepage</a> | 
<a href="https://arxiv.org/pdf/2510.18701" target="_blank">πŸ“„ arXiv Paper</a> |
<a href="https://huggingface.co/datasets/CodeGoat24/UniGenBench/tree/main">😊 Huggingface</a>

<a href="https://github.com/CodeGoat24/UniGenBench" target="_blank" rel="noopener noreferrer"><img alt="Code" src="https://img.shields.io/github/stars/CodeGoat24/UniGenBench.svg?style=social&amp;label=Official"></a>

πŸ… <a href="https://huggingface.co/spaces/CodeGoat24/UniGenBench_Leaderboard"><b>Leaderboard</b>(English)</a> | 
<a href="https://huggingface.co/spaces/CodeGoat24/UniGenBench_Leaderboard_Chinese"><b>Leaderboard</b>(Chinese)</a> | 
<a href="https://huggingface.co/spaces/CodeGoat24/UniGenBench_Leaderboard_English_Long"><b>Leaderboard</b>(English Long)</a> | 
<a href="https://huggingface.co/spaces/CodeGoat24/UniGenBench_Leaderboard_Chinese_Long"><b>Leaderboard</b>(Chinese Long)</a> πŸ…



</div>
"""

# What does your leaderboard evaluate?
INTRODUCTION_TEXT = """
πŸ“š [UniGenBench](https://github.com/CodeGoat24/UniGenBench) is a unified benchmark for T2I generation that integrates diverse prompt themes with a comprehensive suite of fine-grained evaluation criteria. 

πŸ”§ You can use the official [GitHub repo](https://github.com/CodeGoat24/UniGenBench) to evaluate your model on [UniGenBench](https://github.com/CodeGoat24/UniGenBench).

😊 We release **all generated images from the T2I models** evaluated in our UniGenBench on [UniGenBench-Eval-Images](https://huggingface.co/datasets/CodeGoat24/UniGenBench-Eval-Images). Feel free to use any evaluation model that is convenient and suitable for you to assess and compare the performance of your models.

πŸ“ To add your own model to the leaderboard, please send an Email to [Yibin Wang](https://codegoat24.github.io/), then we will help with the evaluation and updating the leaderboard.

"""




CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
CITATION_BUTTON_TEXT = r"""
@article{UniGenBench++,
  title={UniGenBench++: A Unified Semantic Evaluation Benchmark for Text-to-Image Generation},
  author={Wang, Yibin and Li, Zhimin and Zang, Yuhang and Bu, Jiazi and Zhou, Yujie and Xin, Yi and He, Junjun and Wang, Chunyu and Lu, Qinglin and Jin, Cheng and others},
  journal={arXiv preprint arXiv:2510.18701},
  year={2025}
}


@article{UniGenBench&Pref-GRPO,
  title={Pref-GRPO: Pairwise Preference Reward-based GRPO for Stable Text-to-Image Reinforcement Learning},
  author={Wang, Yibin and Li, Zhimin and Zang, Yuhang and Zhou, Yujie and Bu, Jiazi and Wang, Chunyu and Lu, Qinglin and Jin, Cheng and Wang, Jiaqi},
  journal={arXiv preprint arXiv:2508.20751},
  year={2025}
}
"""