Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Update app.py
Browse files
app.py
CHANGED
|
@@ -12,7 +12,6 @@ from textwrap import dedent
|
|
| 12 |
from apscheduler.schedulers.background import BackgroundScheduler
|
| 13 |
|
| 14 |
|
| 15 |
-
HF_TOKEN = os.environ.get("HF_TOKEN")
|
| 16 |
CONVERSION_SCRIPT = "convert_lora_to_gguf.py"
|
| 17 |
|
| 18 |
def process_model(peft_model_id: str, q_method: str, private_repo, oauth_token: gr.OAuthToken | None):
|
|
@@ -180,11 +179,11 @@ with gr.Blocks(css=css) as demo:
|
|
| 180 |
)
|
| 181 |
|
| 182 |
|
| 183 |
-
def
|
| 184 |
-
|
| 185 |
|
| 186 |
scheduler = BackgroundScheduler()
|
| 187 |
-
scheduler.add_job(
|
| 188 |
scheduler.start()
|
| 189 |
|
| 190 |
# Launch the interface
|
|
|
|
| 12 |
from apscheduler.schedulers.background import BackgroundScheduler
|
| 13 |
|
| 14 |
|
|
|
|
| 15 |
CONVERSION_SCRIPT = "convert_lora_to_gguf.py"
|
| 16 |
|
| 17 |
def process_model(peft_model_id: str, q_method: str, private_repo, oauth_token: gr.OAuthToken | None):
|
|
|
|
| 179 |
)
|
| 180 |
|
| 181 |
|
| 182 |
+
def refresh_llama_cpp():
|
| 183 |
+
result = subprocess.run("cd llama.cpp && git pull", shell=True, capture_output=True)
|
| 184 |
|
| 185 |
scheduler = BackgroundScheduler()
|
| 186 |
+
scheduler.add_job(refresh_llama_cpp, "interval", seconds=5*60)
|
| 187 |
scheduler.start()
|
| 188 |
|
| 189 |
# Launch the interface
|