Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
yuqiang
commited on
Commit
·
00373a8
1
Parent(s):
204ab08
add scene idx
Browse files- app.py +10 -10
- backend_api.py +7 -1
app.py
CHANGED
|
@@ -56,18 +56,17 @@ def display_version_info():
|
|
| 56 |
return version_info
|
| 57 |
|
| 58 |
|
| 59 |
-
def run_simulation(scene, model, mode, prompt, history, request: gr.Request):
|
| 60 |
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
| 61 |
scene_desc = SCENE_CONFIGS.get(scene, {}).get("description", scene)
|
| 62 |
user_ip = request.client.host if request else "unknown"
|
| 63 |
session_id = request.session_hash
|
| 64 |
-
|
| 65 |
if not is_request_allowed(user_ip):
|
| 66 |
log_submission(scene, prompt, model, user_ip, "IP blocked temporarily")
|
| 67 |
raise gr.Error("Too many requests from this IP. Please wait and try again one minute later.")
|
| 68 |
|
| 69 |
# 提交任务到后端
|
| 70 |
-
submission_result = submit_to_backend(scene, prompt, mode, model, user_ip)
|
| 71 |
if submission_result.get("status") != "pending":
|
| 72 |
log_submission(scene, prompt, model, user_ip, "Submission failed")
|
| 73 |
raise gr.Error(f"Submission failed: {submission_result.get('message', 'unknown issue')}")
|
|
@@ -324,18 +323,19 @@ with gr.Blocks(title="InternNav Model Inference Demo", css=custom_css) as demo:
|
|
| 324 |
history_slots.append((slot, accordion, video, detail_md))
|
| 325 |
gr.Examples(
|
| 326 |
examples=[
|
| 327 |
-
["demo1", "rdp", "vlnPE", "Walk past the left side of the bed and stop in the doorway."],
|
| 328 |
-
["demo2", "rdp", "vlnPE", "Walk through the bathroom, past the sink and toilet. Stop in front of the counter with the two suitcase."],
|
| 329 |
-
["demo3", "rdp", "vlnPE", "Do a U-turn. Walk forward through the kitchen, heading to the black door. Walk out of the door and take a right onto the deck. Walk out on to the deck and stop."],
|
| 330 |
-
["demo4", "rdp", "vlnPE", "Walk out of bathroom and stand on white bath mat."],
|
| 331 |
-
["demo5", "rdp", "vlnPE", "Walk straight through the double wood doors, follow the red carpet straight to the next doorway and stop where the carpet splits off."]
|
| 332 |
],
|
| 333 |
-
inputs=[scene_dropdown, model_dropdown, mode_dropdown, prompt_input],
|
| 334 |
label="Navigation Task Examples"
|
| 335 |
)
|
|
|
|
| 336 |
submit_btn.click(
|
| 337 |
fn=run_simulation,
|
| 338 |
-
inputs=[scene_dropdown, model_dropdown, mode_dropdown, prompt_input, history_state],
|
| 339 |
outputs=[video_output, history_state],
|
| 340 |
queue=True,
|
| 341 |
api_name="run_simulation"
|
|
|
|
| 56 |
return version_info
|
| 57 |
|
| 58 |
|
| 59 |
+
def run_simulation(scene, episode, model, mode, prompt, history, request: gr.Request):
|
| 60 |
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
| 61 |
scene_desc = SCENE_CONFIGS.get(scene, {}).get("description", scene)
|
| 62 |
user_ip = request.client.host if request else "unknown"
|
| 63 |
session_id = request.session_hash
|
|
|
|
| 64 |
if not is_request_allowed(user_ip):
|
| 65 |
log_submission(scene, prompt, model, user_ip, "IP blocked temporarily")
|
| 66 |
raise gr.Error("Too many requests from this IP. Please wait and try again one minute later.")
|
| 67 |
|
| 68 |
# 提交任务到后端
|
| 69 |
+
submission_result = submit_to_backend(scene, episode, prompt, mode, model, user_ip)
|
| 70 |
if submission_result.get("status") != "pending":
|
| 71 |
log_submission(scene, prompt, model, user_ip, "Submission failed")
|
| 72 |
raise gr.Error(f"Submission failed: {submission_result.get('message', 'unknown issue')}")
|
|
|
|
| 323 |
history_slots.append((slot, accordion, video, detail_md))
|
| 324 |
gr.Examples(
|
| 325 |
examples=[
|
| 326 |
+
["demo1", '1', "rdp", "vlnPE", "Walk past the left side of the bed and stop in the doorway."],
|
| 327 |
+
["demo2", '1', "rdp", "vlnPE", "Walk through the bathroom, past the sink and toilet. Stop in front of the counter with the two suitcase."],
|
| 328 |
+
["demo3", '1', "rdp", "vlnPE", "Do a U-turn. Walk forward through the kitchen, heading to the black door. Walk out of the door and take a right onto the deck. Walk out on to the deck and stop."],
|
| 329 |
+
["demo4", '1', "rdp", "vlnPE", "Walk out of bathroom and stand on white bath mat."],
|
| 330 |
+
["demo5", '1', "rdp", "vlnPE", "Walk straight through the double wood doors, follow the red carpet straight to the next doorway and stop where the carpet splits off."]
|
| 331 |
],
|
| 332 |
+
inputs=[scene_dropdown, episode_dropdown, model_dropdown, mode_dropdown, prompt_input],
|
| 333 |
label="Navigation Task Examples"
|
| 334 |
)
|
| 335 |
+
|
| 336 |
submit_btn.click(
|
| 337 |
fn=run_simulation,
|
| 338 |
+
inputs=[scene_dropdown, episode_dropdown, model_dropdown, mode_dropdown, prompt_input, history_state],
|
| 339 |
outputs=[video_output, history_state],
|
| 340 |
queue=True,
|
| 341 |
api_name="run_simulation"
|
backend_api.py
CHANGED
|
@@ -6,13 +6,19 @@ import json
|
|
| 6 |
from typing import Optional
|
| 7 |
from config import API_ENDPOINTS
|
| 8 |
|
| 9 |
-
def submit_to_backend(scene: str, prompt: str, mode: str, model_type: str, user: str = "Gradio-user") -> dict:
|
| 10 |
job_id = str(uuid.uuid4())
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
data = {
|
| 12 |
"model_type": model_type,
|
| 13 |
"instruction": prompt,
|
| 14 |
"episode_type": scene,
|
| 15 |
"mode": mode,
|
|
|
|
|
|
|
| 16 |
}
|
| 17 |
payload = {
|
| 18 |
"user": user,
|
|
|
|
| 6 |
from typing import Optional
|
| 7 |
from config import API_ENDPOINTS
|
| 8 |
|
| 9 |
+
def submit_to_backend(scene: str, episode: str, prompt: str, mode: str, model_type: str, user: str = "Gradio-user") -> dict:
|
| 10 |
job_id = str(uuid.uuid4())
|
| 11 |
+
|
| 12 |
+
scene_index = scene.split("_")[-1]
|
| 13 |
+
episode_index = episode.split("_")[-1]
|
| 14 |
+
|
| 15 |
data = {
|
| 16 |
"model_type": model_type,
|
| 17 |
"instruction": prompt,
|
| 18 |
"episode_type": scene,
|
| 19 |
"mode": mode,
|
| 20 |
+
"scene_index": scene_index,
|
| 21 |
+
"episode_index": episode_index,
|
| 22 |
}
|
| 23 |
payload = {
|
| 24 |
"user": user,
|