Spaces:
Running
Running
Update constants.py
Browse files- constants.py +7 -13
constants.py
CHANGED
|
@@ -28,19 +28,13 @@ LEADERBORAD_INTRODUCTION = """
|
|
| 28 |
[GitHub](https://github.com/PKU-YuanGroup/ChronoMagic-Bench) | [arXiv](https://arxiv.org/abs/2406.18522) | [Home Page](https://pku-yuangroup.github.io/ChronoMagic-Bench/) | [ChronoMagic-Pro](https://huggingface.co/datasets/BestWishYsh/ChronoMagic-Pro) | [ChronoMagic-ProH](https://huggingface.co/datasets/BestWishYsh/ChronoMagic-ProH)
|
| 29 |
"""
|
| 30 |
|
| 31 |
-
SUBMIT_INTRODUCTION = """#
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
2. Fill in 'MagicTime' in 'Revision Model Name' if you want to update your result (You can leave 'Model Name' blank).
|
| 39 |
-
3. Select ‘Backbone Type’ (DiT or U-Net).
|
| 40 |
-
4. Fill in 'https://github.com/x/x' in 'Model Link'.
|
| 41 |
-
5. Upload `ChronoMagic-Bench-Input.json`.
|
| 42 |
-
6. Click the 'Submit Eval' button.
|
| 43 |
-
7. Click 'Refresh' to obtain the uploaded leaderboard.
|
| 44 |
"""
|
| 45 |
|
| 46 |
TABLE_INTRODUCTION = """In the table below, we summarize each task performance of all the models.
|
|
|
|
| 28 |
[GitHub](https://github.com/PKU-YuanGroup/ChronoMagic-Bench) | [arXiv](https://arxiv.org/abs/2406.18522) | [Home Page](https://pku-yuangroup.github.io/ChronoMagic-Bench/) | [ChronoMagic-Pro](https://huggingface.co/datasets/BestWishYsh/ChronoMagic-Pro) | [ChronoMagic-ProH](https://huggingface.co/datasets/BestWishYsh/ChronoMagic-ProH)
|
| 29 |
"""
|
| 30 |
|
| 31 |
+
SUBMIT_INTRODUCTION = """# Submission Guidelines
|
| 32 |
+
1. Fill in *'Model Name'* if it is your first time to submit your result **or** Fill in *'Revision Model Name'* if you want to update your result.
|
| 33 |
+
2. Select *‘Backbone Type’* (DiT or U-Net).
|
| 34 |
+
3. Fill in your home page to *'Model Link'*.
|
| 35 |
+
4. After evaluation, follow the guidance in the [github repository](https://github.com/PKU-YuanGroup/ChronoMagic-Bench) to obtain `ChronoMagic-Bench-Input.json` and upload it here.
|
| 36 |
+
5. Click the 'Submit Eval' button.
|
| 37 |
+
6. Click 'Refresh' to obtain the uploaded leaderboard.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
"""
|
| 39 |
|
| 40 |
TABLE_INTRODUCTION = """In the table below, we summarize each task performance of all the models.
|