Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -139,41 +139,36 @@ with gr.Blocks(theme=Ocean(), css=css_hide_share) as demo:
|
|
| 139 |
*Inspired by the tutorial [Object Detection and Visual Grounding with Qwen 2.5](https://pyimagesearch.com/2025/06/09/object-detection-and-visual-grounding-with-qwen-2-5/) on PyImageSearch.*
|
| 140 |
""")
|
| 141 |
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
generate_btn = gr.Button(value="Generate")
|
| 152 |
-
|
| 153 |
-
with gr.Row():
|
| 154 |
-
output_image = gr.Image(type="pil", label="Annotated image", height=500)
|
| 155 |
output_textbox = gr.Textbox(label="Model response", lines=10)
|
| 156 |
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
]
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
)
|
| 175 |
|
| 176 |
generate_btn.click(fn=detect, inputs=[image_input, prompt_input], outputs=[output_image, output_textbox])
|
| 177 |
|
| 178 |
if __name__ == "__main__":
|
| 179 |
-
demo.launch()
|
|
|
|
| 139 |
*Inspired by the tutorial [Object Detection and Visual Grounding with Qwen 2.5](https://pyimagesearch.com/2025/06/09/object-detection-and-visual-grounding-with-qwen-2-5/) on PyImageSearch.*
|
| 140 |
""")
|
| 141 |
|
| 142 |
+
with gr.Row():
|
| 143 |
+
with gr.Column(scale=1):
|
| 144 |
+
image_input = gr.Image(label="Upload an image", type="pil", height=400)
|
| 145 |
+
prompt_input = gr.Textbox(label="Enter your prompt", placeholder="e.g., Detect all red cars in the image")
|
| 146 |
+
category_input = gr.Textbox(label="Category", interactive=False)
|
| 147 |
+
generate_btn = gr.Button(value="Generate")
|
| 148 |
+
|
| 149 |
+
with gr.Column(scale=1):
|
| 150 |
+
output_image = gr.Image(type="pil", label="Annotated image", height=400)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 151 |
output_textbox = gr.Textbox(label="Model response", lines=10)
|
| 152 |
|
| 153 |
+
gr.Markdown("### Examples")
|
| 154 |
+
example_prompts = [
|
| 155 |
+
["examples/example_1.jpg", "Detect all objects in the image and return their locations and labels.", "Object Detection"],
|
| 156 |
+
["examples/example_2.JPG", "Detect all the individual candies in the image and return their locations and labels.", "Object Detection"],
|
| 157 |
+
["examples/example_1.jpg", "Count the number of red cars in the image.", "Object Counting"],
|
| 158 |
+
["examples/example_2.JPG", "Count the number of blue candies in the image.", "Object Counting"],
|
| 159 |
+
["examples/example_1.jpg", "Identify the red cars in this image, detect their key points and return their positions in the form of points.", "Visual Grounding + Keypoint Detection"],
|
| 160 |
+
["examples/example_2.JPG", "Identify the blue candies in this image, detect their key points and return their positions in the form of points.", "Visual Grounding + Keypoint Detection"],
|
| 161 |
+
["examples/example_1.jpg", "Detect the red car that is leading in this image and return its location and label.", "Visual Grounding + Object Detection"],
|
| 162 |
+
["examples/example_2.JPG", "Detect the blue candy located at the top of the group in this image and return its location and label.", "Visual Grounding + Object Detection"],
|
| 163 |
+
]
|
| 164 |
+
|
| 165 |
+
gr.Examples(
|
| 166 |
+
examples=example_prompts,
|
| 167 |
+
inputs=[image_input, prompt_input, category_input],
|
| 168 |
+
label="Click an example to populate the input"
|
| 169 |
+
)
|
|
|
|
| 170 |
|
| 171 |
generate_btn.click(fn=detect, inputs=[image_input, prompt_input], outputs=[output_image, output_textbox])
|
| 172 |
|
| 173 |
if __name__ == "__main__":
|
| 174 |
+
demo.launch()
|