Spaces:
Runtime error
Runtime error
Change file structure
Browse files- app.py +6 -8
- background_task.py +4 -6
app.py
CHANGED
|
@@ -11,11 +11,13 @@ from apscheduler.schedulers.background import BackgroundScheduler
|
|
| 11 |
|
| 12 |
|
| 13 |
DATASET_REPO_URL = "https://huggingface.co/datasets/CarlCochet/BotFightData"
|
| 14 |
-
ELO_FILENAME = "
|
| 15 |
-
ELO_DIR = "soccer_elo"
|
| 16 |
-
ELO_FILE = os.path.join(ELO_DIR, ELO_FILENAME)
|
| 17 |
HF_TOKEN = os.environ.get("HF_TOKEN")
|
| 18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
block = gr.Blocks()
|
| 20 |
matchmaking = Matchmaking()
|
| 21 |
api = HfApi()
|
|
@@ -24,10 +26,6 @@ scheduler = BackgroundScheduler()
|
|
| 24 |
scheduler.add_job(func=init_matchmaking, trigger="interval", seconds=15000)
|
| 25 |
scheduler.start()
|
| 26 |
|
| 27 |
-
repo = Repository(
|
| 28 |
-
local_dir=ELO_DIR, clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN
|
| 29 |
-
)
|
| 30 |
-
|
| 31 |
|
| 32 |
def update_elos():
|
| 33 |
matchmaking.read_history()
|
|
@@ -36,7 +34,7 @@ def update_elos():
|
|
| 36 |
|
| 37 |
|
| 38 |
def get_elo_data() -> pd.DataFrame:
|
| 39 |
-
data = pd.read_csv(os.path.join(DATASET_REPO_URL, "resolve", "main",
|
| 40 |
return data
|
| 41 |
|
| 42 |
|
|
|
|
| 11 |
|
| 12 |
|
| 13 |
DATASET_REPO_URL = "https://huggingface.co/datasets/CarlCochet/BotFightData"
|
| 14 |
+
ELO_FILENAME = "soccer_elo.csv"
|
|
|
|
|
|
|
| 15 |
HF_TOKEN = os.environ.get("HF_TOKEN")
|
| 16 |
|
| 17 |
+
repo = Repository(
|
| 18 |
+
clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
block = gr.Blocks()
|
| 22 |
matchmaking = Matchmaking()
|
| 23 |
api = HfApi()
|
|
|
|
| 26 |
scheduler.add_job(func=init_matchmaking, trigger="interval", seconds=15000)
|
| 27 |
scheduler.start()
|
| 28 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
def update_elos():
|
| 31 |
matchmaking.read_history()
|
|
|
|
| 34 |
|
| 35 |
|
| 36 |
def get_elo_data() -> pd.DataFrame:
|
| 37 |
+
data = pd.read_csv(os.path.join(DATASET_REPO_URL, "resolve", "main", ELO_FILENAME))
|
| 38 |
return data
|
| 39 |
|
| 40 |
|
background_task.py
CHANGED
|
@@ -6,13 +6,11 @@ from huggingface_hub import HfApi, Repository
|
|
| 6 |
|
| 7 |
|
| 8 |
DATASET_REPO_URL = "https://huggingface.co/datasets/CarlCochet/BotFightData"
|
| 9 |
-
ELO_FILENAME = "
|
| 10 |
-
ELO_DIR = "soccer_elo"
|
| 11 |
-
ELO_FILE = os.path.join(ELO_DIR, ELO_FILENAME)
|
| 12 |
HF_TOKEN = os.environ.get("HF_TOKEN")
|
| 13 |
|
| 14 |
repo = Repository(
|
| 15 |
-
|
| 16 |
)
|
| 17 |
|
| 18 |
|
|
@@ -114,7 +112,7 @@ class Matchmaking:
|
|
| 114 |
data_dict["games_played"].append(model.games_played)
|
| 115 |
df = pd.DataFrame(data_dict)
|
| 116 |
print(df.head())
|
| 117 |
-
df.to_csv(
|
| 118 |
repo.push_to_hub(commit_message="Update ELO")
|
| 119 |
# df_matches = pd.DataFrame(self.matches)
|
| 120 |
# date = datetime.now()
|
|
@@ -146,7 +144,7 @@ def get_models_list() -> list:
|
|
| 146 |
"""
|
| 147 |
models = []
|
| 148 |
models_names = []
|
| 149 |
-
data = pd.read_csv(os.path.join(DATASET_REPO_URL, "resolve", "main",
|
| 150 |
# models_on_hub = api.list_models(filter=["reinforcement-learning", env, "stable-baselines3"])
|
| 151 |
models_on_hub = []
|
| 152 |
for i, row in data.iterrows():
|
|
|
|
| 6 |
|
| 7 |
|
| 8 |
DATASET_REPO_URL = "https://huggingface.co/datasets/CarlCochet/BotFightData"
|
| 9 |
+
ELO_FILENAME = "soccer_elo.csv"
|
|
|
|
|
|
|
| 10 |
HF_TOKEN = os.environ.get("HF_TOKEN")
|
| 11 |
|
| 12 |
repo = Repository(
|
| 13 |
+
clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN
|
| 14 |
)
|
| 15 |
|
| 16 |
|
|
|
|
| 112 |
data_dict["games_played"].append(model.games_played)
|
| 113 |
df = pd.DataFrame(data_dict)
|
| 114 |
print(df.head())
|
| 115 |
+
df.to_csv(ELO_FILENAME)
|
| 116 |
repo.push_to_hub(commit_message="Update ELO")
|
| 117 |
# df_matches = pd.DataFrame(self.matches)
|
| 118 |
# date = datetime.now()
|
|
|
|
| 144 |
"""
|
| 145 |
models = []
|
| 146 |
models_names = []
|
| 147 |
+
data = pd.read_csv(os.path.join(DATASET_REPO_URL, "resolve", "main", ELO_FILENAME))
|
| 148 |
# models_on_hub = api.list_models(filter=["reinforcement-learning", env, "stable-baselines3"])
|
| 149 |
models_on_hub = []
|
| 150 |
for i, row in data.iterrows():
|