Jack Monas
commited on
Commit
·
f28037a
1
Parent(s):
d01834d
rules
Browse files
app.py
CHANGED
|
@@ -14,7 +14,7 @@ def scoring_layout():
|
|
| 14 |
col1, col2, col3 = st.columns(3)
|
| 15 |
|
| 16 |
with col1:
|
| 17 |
-
st.markdown("
|
| 18 |
st.markdown(
|
| 19 |
"""
|
| 20 |
- **1st Place**: 10 points
|
|
@@ -24,7 +24,7 @@ def scoring_layout():
|
|
| 24 |
)
|
| 25 |
|
| 26 |
with col2:
|
| 27 |
-
st.markdown("
|
| 28 |
st.markdown(
|
| 29 |
"""
|
| 30 |
- **1st Place**: 10 points
|
|
@@ -34,7 +34,7 @@ def scoring_layout():
|
|
| 34 |
)
|
| 35 |
|
| 36 |
with col3:
|
| 37 |
-
st.markdown("
|
| 38 |
st.markdown(
|
| 39 |
"""
|
| 40 |
- **1st Place**: 20 points
|
|
@@ -43,8 +43,7 @@ def scoring_layout():
|
|
| 43 |
"""
|
| 44 |
)
|
| 45 |
|
| 46 |
-
st.markdown("
|
| 47 |
-
st.markdown("### Tie-Breakers")
|
| 48 |
st.write(
|
| 49 |
"The overall winner will be the team with the highest total points. "
|
| 50 |
"In the event of a tie, the following tie-breakers will be applied in order:\n\n"
|
|
@@ -62,6 +61,7 @@ def main():
|
|
| 62 |
"Welcome to the World Model Challenge server. This platform hosts three challenges "
|
| 63 |
"designed to advance research in world models for robotics: Compression, Sampling, and Evaluation."
|
| 64 |
)
|
|
|
|
| 65 |
|
| 66 |
st.markdown("### Motivation")
|
| 67 |
st.write(
|
|
@@ -70,7 +70,8 @@ def main():
|
|
| 70 |
"learning to simulate complex real-world interactions from raw sensor data. We believe these learned simulators will enable "
|
| 71 |
"robust evaluation and iterative improvement of robot policies without the constraints of a physical testbed."
|
| 72 |
)
|
| 73 |
-
|
|
|
|
| 74 |
st.markdown("### The Challenges")
|
| 75 |
|
| 76 |
st.markdown("#### Compression Challenge")
|
|
@@ -87,9 +88,11 @@ def main():
|
|
| 87 |
st.write(
|
| 88 |
"The Evaluation Challenge tackles the ultimate question: Can you predict a robot's performance in the real world without physically deploying it? In this challenge, you will be provided with many different policies for a specific task. Your task is to rank these policies according to their expected real-world performance. This ranking will be compared with the actual ranking of the policies."
|
| 89 |
)
|
|
|
|
|
|
|
| 90 |
scoring_layout()
|
| 91 |
|
| 92 |
-
st.markdown("
|
| 93 |
st.write(
|
| 94 |
"The overall leaderboard, which shows the total points across all challenges, will go live on March 10th. "
|
| 95 |
"In addition, each challenge—Compression, Sampling, and Evaluation—will have its own leaderboard on their respective Hugging Face submission servers. "
|
|
|
|
| 14 |
col1, col2, col3 = st.columns(3)
|
| 15 |
|
| 16 |
with col1:
|
| 17 |
+
st.markdown("## Compression")
|
| 18 |
st.markdown(
|
| 19 |
"""
|
| 20 |
- **1st Place**: 10 points
|
|
|
|
| 24 |
)
|
| 25 |
|
| 26 |
with col2:
|
| 27 |
+
st.markdown("## Sampling")
|
| 28 |
st.markdown(
|
| 29 |
"""
|
| 30 |
- **1st Place**: 10 points
|
|
|
|
| 34 |
)
|
| 35 |
|
| 36 |
with col3:
|
| 37 |
+
st.markdown("## Evaluation")
|
| 38 |
st.markdown(
|
| 39 |
"""
|
| 40 |
- **1st Place**: 20 points
|
|
|
|
| 43 |
"""
|
| 44 |
)
|
| 45 |
|
| 46 |
+
st.markdown("## Tie-Breakers")
|
|
|
|
| 47 |
st.write(
|
| 48 |
"The overall winner will be the team with the highest total points. "
|
| 49 |
"In the event of a tie, the following tie-breakers will be applied in order:\n\n"
|
|
|
|
| 61 |
"Welcome to the World Model Challenge server. This platform hosts three challenges "
|
| 62 |
"designed to advance research in world models for robotics: Compression, Sampling, and Evaluation."
|
| 63 |
)
|
| 64 |
+
st.markdown("---")
|
| 65 |
|
| 66 |
st.markdown("### Motivation")
|
| 67 |
st.write(
|
|
|
|
| 70 |
"learning to simulate complex real-world interactions from raw sensor data. We believe these learned simulators will enable "
|
| 71 |
"robust evaluation and iterative improvement of robot policies without the constraints of a physical testbed."
|
| 72 |
)
|
| 73 |
+
st.markdown("---")
|
| 74 |
+
|
| 75 |
st.markdown("### The Challenges")
|
| 76 |
|
| 77 |
st.markdown("#### Compression Challenge")
|
|
|
|
| 88 |
st.write(
|
| 89 |
"The Evaluation Challenge tackles the ultimate question: Can you predict a robot's performance in the real world without physically deploying it? In this challenge, you will be provided with many different policies for a specific task. Your task is to rank these policies according to their expected real-world performance. This ranking will be compared with the actual ranking of the policies."
|
| 90 |
)
|
| 91 |
+
st.markdown("---")
|
| 92 |
+
|
| 93 |
scoring_layout()
|
| 94 |
|
| 95 |
+
st.markdown("## Overall Leaderboard")
|
| 96 |
st.write(
|
| 97 |
"The overall leaderboard, which shows the total points across all challenges, will go live on March 10th. "
|
| 98 |
"In addition, each challenge—Compression, Sampling, and Evaluation—will have its own leaderboard on their respective Hugging Face submission servers. "
|