dd
Browse files
app.py
CHANGED
|
@@ -4,6 +4,8 @@ import torch
|
|
| 4 |
from PIL import Image
|
| 5 |
from transformers import AutoImageProcessor, Mask2FormerForUniversalSegmentation
|
| 6 |
import numpy as np
|
|
|
|
|
|
|
| 7 |
|
| 8 |
|
| 9 |
def greet(url):
|
|
@@ -27,7 +29,15 @@ def greet(url):
|
|
| 27 |
predicted_semantic_map = processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]
|
| 28 |
# we refer to the demo notebooks for visualization (see "Resources" section in the Mask2Former docs)
|
| 29 |
|
| 30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
|
| 32 |
url = "http://www.apparelnews.co.kr/upfiles/manage/202302/5d5f694177b26fc86e5db623bf7ae4b7.jpg"
|
| 33 |
#greet(url)
|
|
|
|
| 4 |
from PIL import Image
|
| 5 |
from transformers import AutoImageProcessor, Mask2FormerForUniversalSegmentation
|
| 6 |
import numpy as np
|
| 7 |
+
import torchvision.transforms as T
|
| 8 |
+
from PIL import Image
|
| 9 |
|
| 10 |
|
| 11 |
def greet(url):
|
|
|
|
| 29 |
predicted_semantic_map = processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]
|
| 30 |
# we refer to the demo notebooks for visualization (see "Resources" section in the Mask2Former docs)
|
| 31 |
|
| 32 |
+
# predicted_semantic_map์ 8๋นํธ ๋ถํธ ์๋ ์ ์๋ก ๋ณํ
|
| 33 |
+
# ์ด๋ฏธ์ง๋ฅผ ๋ถํธ ์๋ 8๋นํธ ์ ์๋ก ๋ณํ (0์์ 255 ์ฌ์ด์ ๊ฐ์ผ๋ก ์ค์ผ์ผ๋ง)
|
| 34 |
+
predicted_semantic_map_scaled = (predicted_semantic_map - predicted_semantic_map.min()) / (predicted_semantic_map.max() - predicted_semantic_map.min()) * 255
|
| 35 |
+
predicted_semantic_map_uint8 = predicted_semantic_map_scaled.to(torch.uint8)
|
| 36 |
+
|
| 37 |
+
tensor_to_pil = T.ToPILImage()
|
| 38 |
+
image = tensor_to_pil(predicted_semantic_map_uint8)
|
| 39 |
+
|
| 40 |
+
return image
|
| 41 |
|
| 42 |
url = "http://www.apparelnews.co.kr/upfiles/manage/202302/5d5f694177b26fc86e5db623bf7ae4b7.jpg"
|
| 43 |
#greet(url)
|