Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,11 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import numpy as np
|
| 3 |
-
from PIL import Image
|
| 4 |
import random
|
| 5 |
-
import
|
| 6 |
import torch
|
| 7 |
-
|
| 8 |
-
from transformers import SegformerImageProcessor
|
| 9 |
|
| 10 |
image_list = [
|
| 11 |
"data/1.png",
|
|
@@ -14,6 +15,8 @@ image_list = [
|
|
| 14 |
"data/4.png",
|
| 15 |
]
|
| 16 |
|
|
|
|
|
|
|
| 17 |
def visualize_instance_seg_mask(mask):
|
| 18 |
# Initialize image with zeros with the image resolution
|
| 19 |
# of the segmentation mask and 3 channels
|
|
@@ -53,35 +56,9 @@ def Segformer_Segmentation(image_path, model_id):
|
|
| 53 |
result = proccessor.post_process_semantic_segmentation(outputs)[0]
|
| 54 |
result = np.array(result)
|
| 55 |
result = visualize_instance_seg_mask(result)
|
| 56 |
-
|
| 57 |
-
for plot_index in range(2):
|
| 58 |
-
if plot_index == 0:
|
| 59 |
-
plot_image = test_image
|
| 60 |
-
title = "Original"
|
| 61 |
-
else:
|
| 62 |
-
plot_image = result
|
| 63 |
-
title = "Segmentation"
|
| 64 |
-
|
| 65 |
-
plt.subplot(1, 2, plot_index+1)
|
| 66 |
-
plt.imshow(plot_image)
|
| 67 |
-
plt.title(title)
|
| 68 |
-
plt.axis("off")
|
| 69 |
-
plt.savefig(output_save)
|
| 70 |
-
|
| 71 |
-
return output_save
|
| 72 |
-
|
| 73 |
-
inputs = [
|
| 74 |
-
gr.inputs.Image(type="filepath", label="Input Image"),
|
| 75 |
-
gr.inputs.Dropdown(
|
| 76 |
-
choices=[
|
| 77 |
-
"deprem-ml/deprem_satellite_semantic_whu"
|
| 78 |
-
],
|
| 79 |
-
label="Model ID",
|
| 80 |
-
default="deprem-ml/deprem_satellite_semantic_whu",
|
| 81 |
-
)
|
| 82 |
-
]
|
| 83 |
|
| 84 |
-
|
| 85 |
|
| 86 |
examples = [[image_list[0], "deprem-ml/deprem_satellite_semantic_whu"],
|
| 87 |
[image_list[1], "deprem-ml/deprem_satellite_semantic_whu"],
|
|
@@ -90,13 +67,24 @@ examples = [[image_list[0], "deprem-ml/deprem_satellite_semantic_whu"],
|
|
| 90 |
|
| 91 |
title = "Deprem ML - Segformer Semantic Segmentation"
|
| 92 |
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 101 |
|
| 102 |
-
|
|
|
|
| 1 |
+
from transformers import SegformerForSemanticSegmentation
|
| 2 |
+
from transformers import SegformerImageProcessor
|
| 3 |
+
from PIL import Image
|
| 4 |
import gradio as gr
|
| 5 |
import numpy as np
|
|
|
|
| 6 |
import random
|
| 7 |
+
import cv2
|
| 8 |
import torch
|
| 9 |
+
|
|
|
|
| 10 |
|
| 11 |
image_list = [
|
| 12 |
"data/1.png",
|
|
|
|
| 15 |
"data/4.png",
|
| 16 |
]
|
| 17 |
|
| 18 |
+
model_path = ['deprem-ml/deprem_satellite_semantic_whu']
|
| 19 |
+
|
| 20 |
def visualize_instance_seg_mask(mask):
|
| 21 |
# Initialize image with zeros with the image resolution
|
| 22 |
# of the segmentation mask and 3 channels
|
|
|
|
| 56 |
result = proccessor.post_process_semantic_segmentation(outputs)[0]
|
| 57 |
result = np.array(result)
|
| 58 |
result = visualize_instance_seg_mask(result)
|
| 59 |
+
cv2.imwrite(output_save, result*255)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
|
| 61 |
+
return image_path, output_save
|
| 62 |
|
| 63 |
examples = [[image_list[0], "deprem-ml/deprem_satellite_semantic_whu"],
|
| 64 |
[image_list[1], "deprem-ml/deprem_satellite_semantic_whu"],
|
|
|
|
| 67 |
|
| 68 |
title = "Deprem ML - Segformer Semantic Segmentation"
|
| 69 |
|
| 70 |
+
app = gr.Blocks()
|
| 71 |
+
with app:
|
| 72 |
+
gr.HTML("<h1 style='text-align: center'>{}</h1>".format(title))
|
| 73 |
+
with gr.Row():
|
| 74 |
+
with gr.Column():
|
| 75 |
+
gr.Markdown("Video")
|
| 76 |
+
input_video = gr.Image(type='filepath')
|
| 77 |
+
model_id = gr.Dropdown(value=model_path[0], choices=model_path)
|
| 78 |
+
input_video_button = gr.Button(value="Predict")
|
| 79 |
+
|
| 80 |
+
with gr.Column():
|
| 81 |
+
output_orijinal_image = gr.Image(type='filepath')
|
| 82 |
+
|
| 83 |
+
with gr.Column():
|
| 84 |
+
output_mask_image = gr.Image(type='filepath')
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
gr.Examples(examples, inputs=[input_video, model_id], outputs=[output_orijinal_image, output_mask_image], fn=Segformer_Segmentation, cache_examples=True)
|
| 88 |
+
input_video_button.click(Segformer_Segmentation, inputs=[input_video, model_id], outputs=[output_orijinal_image, output_mask_image])
|
| 89 |
|
| 90 |
+
app.launch()
|