Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -30,13 +30,19 @@ prompt_list = [
|
|
| 30 |
"Cause a deliberate error like divide by zero, then fix it in next step and print 10 / 2."
|
| 31 |
]
|
| 32 |
|
| 33 |
-
#
|
| 34 |
-
|
|
|
|
|
|
|
|
|
|
| 35 |
def run_agent(user_content):
|
| 36 |
-
yield "
|
|
|
|
| 37 |
|
| 38 |
-
#
|
| 39 |
-
|
|
|
|
|
|
|
| 40 |
|
| 41 |
# Initial messages
|
| 42 |
messages = [
|
|
@@ -125,7 +131,7 @@ with gr.Blocks(title="Code Agent Simulator") as demo:
|
|
| 125 |
gr.Markdown("# Code Agent Simulator on Hugging Face Spaces\nEnter a coding task prompt, and watch the agent simulate execution in real-time.")
|
| 126 |
|
| 127 |
input_prompt = gr.Textbox(label="Enter your prompt", placeholder="e.g., Implement binary search...")
|
| 128 |
-
output_log = gr.Textbox(value="", lines=30, autoscroll=True, show_label=True, label="Simulation Log")
|
| 129 |
run_button = gr.Button("Run Simulation")
|
| 130 |
|
| 131 |
examples = gr.Examples(examples=prompt_list, inputs=[input_prompt])
|
|
@@ -133,6 +139,6 @@ with gr.Blocks(title="Code Agent Simulator") as demo:
|
|
| 133 |
# On click, run the generator and stream to output
|
| 134 |
run_button.click(fn=run_agent, inputs=input_prompt, outputs=output_log)
|
| 135 |
|
| 136 |
-
# Launch (
|
| 137 |
if __name__ == "__main__":
|
| 138 |
-
demo.queue().launch(ssr_mode=True)
|
|
|
|
| 30 |
"Cause a deliberate error like divide by zero, then fix it in next step and print 10 / 2."
|
| 31 |
]
|
| 32 |
|
| 33 |
+
# Load model globally (CPU-safe to avoid startup CUDA errors)
|
| 34 |
+
pipe = pipeline("text-generation", model="xingyaoww/CodeActAgent-Mistral-7b-v0.1", device_map=None, torch_dtype=torch.float16) # float16 fallback for init; no 'auto'
|
| 35 |
+
|
| 36 |
+
# Generator function with GPU decorator
|
| 37 |
+
@spaces.GPU(duration=180) # 180s for safety with multi-turn/model move
|
| 38 |
def run_agent(user_content):
|
| 39 |
+
yield "Allocating GPU... (may queue if busy)\n\n"
|
| 40 |
+
yield "Moving model to GPU and initializing...\n\n"
|
| 41 |
|
| 42 |
+
# Move to GPU here (unrestricted power move)
|
| 43 |
+
device = torch.device('cuda')
|
| 44 |
+
pipe.model.to(device)
|
| 45 |
+
pipe.device = device
|
| 46 |
|
| 47 |
# Initial messages
|
| 48 |
messages = [
|
|
|
|
| 131 |
gr.Markdown("# Code Agent Simulator on Hugging Face Spaces\nEnter a coding task prompt, and watch the agent simulate execution in real-time.")
|
| 132 |
|
| 133 |
input_prompt = gr.Textbox(label="Enter your prompt", placeholder="e.g., Implement binary search...")
|
| 134 |
+
output_log = gr.Textbox(value="", lines=30, autoscroll=True, show_label=True, label="Simulation Log")
|
| 135 |
run_button = gr.Button("Run Simulation")
|
| 136 |
|
| 137 |
examples = gr.Examples(examples=prompt_list, inputs=[input_prompt])
|
|
|
|
| 139 |
# On click, run the generator and stream to output
|
| 140 |
run_button.click(fn=run_agent, inputs=input_prompt, outputs=output_log)
|
| 141 |
|
| 142 |
+
# Launch (disable SSR for stability, enable debug for logs)
|
| 143 |
if __name__ == "__main__":
|
| 144 |
+
demo.queue().launch(ssr_mode=False, debug=True)
|