Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from matplotlib import gridspec | |
| import matplotlib.pyplot as plt | |
| import numpy as np | |
| from PIL import Image | |
| import tensorflow as tf | |
| from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation | |
| feature_extractor = SegformerFeatureExtractor.from_pretrained( | |
| "nvidia/segformer-b3-finetuned-cityscapes-1024-1024" | |
| ) | |
| model = TFSegformerForSemanticSegmentation.from_pretrained( | |
| "nvidia/segformer-b3-finetuned-cityscapes-1024-1024" | |
| ) | |
| def ade_palette(): | |
| """ADE20K palette that maps each class to RGB values.""" | |
| return [ | |
| [255, 0, 0], | |
| [255, 94, 0], | |
| [255, 187, 0], | |
| [255, 228, 0], | |
| [171, 242, 0], | |
| [29, 219, 22], | |
| [0, 216, 255], | |
| [0, 84, 255], | |
| [1, 0, 255], | |
| [95, 0, 255], | |
| [255, 0, 221], | |
| [255, 0, 127], | |
| [0, 0, 0], | |
| [255, 255, 255], | |
| [255, 216, 216], | |
| [250, 224, 212], | |
| [250, 236, 197], | |
| [250, 244, 192], | |
| [228, 247, 186], | |
| [206, 251, 201], | |
| [212, 244, 250], | |
| [217, 229, 255], | |
| [218, 217, 255], | |
| [232, 217, 255], | |
| [255, 217, 250], | |
| [255, 217, 236], | |
| [246, 246, 246], | |
| [234, 234, 234], | |
| [255, 167, 167], | |
| [255, 193, 158], | |
| [255, 224, 140], | |
| [250, 237, 125], | |
| [206, 242, 121], | |
| [183, 240, 177], | |
| [178, 235, 244], | |
| [178, 204, 255], | |
| [181, 178, 255], | |
| [209, 178, 255], | |
| [255, 178, 245], | |
| [255, 178, 217], | |
| [213, 213, 213], | |
| [189, 189, 189], | |
| [241, 95, 95], | |
| [242, 150, 97], | |
| [242, 203, 97], | |
| [229, 216, 92], | |
| [188, 229, 92], | |
| [134, 229, 127], | |
| [92, 209, 229], | |
| [103, 153, 255], | |
| [107, 102, 255], | |
| [165, 102, 255], | |
| [243, 97, 220], | |
| [243, 97, 166], | |
| [166, 166, 166], | |
| [140, 140, 140], | |
| [93, 93, 93], | |
| [116, 116, 116], | |
| [217, 65, 140], | |
| [217, 65, 197], | |
| [128, 65, 217], | |
| [70, 65, 217], | |
| [67, 116, 217], | |
| [61, 183, 204], | |
| [71, 200, 62], | |
| [159, 201, 60], | |
| [196, 183, 59], | |
| [204, 166, 61], | |
| [204, 114, 61], | |
| [204, 61, 61], | |
| [152, 0, 0], | |
| [153, 56, 0], | |
| [153, 112, 0], | |
| [153, 138, 0], | |
| [107, 153, 0], | |
| [47, 157, 39], | |
| [0, 130, 153], | |
| [0, 51, 153], | |
| [5, 0, 153], | |
| [63, 0, 153], | |
| [153, 0, 133], | |
| [153, 0, 76], | |
| [76, 76, 76], | |
| [53, 53, 53], | |
| [25, 25, 25], | |
| [33, 33, 33], | |
| [102, 0, 51], | |
| [102, 0, 88], | |
| [42, 0, 102], | |
| [3, 0, 102], | |
| [0, 34, 102], | |
| [0, 87, 102], | |
| [34, 116, 28], | |
| [71, 102, 0], | |
| [102, 92, 0], | |
| [102, 75, 0], | |
| [102, 37, 0], | |
| [103, 0, 0] | |
| ] | |
| labels_list = [] | |
| with open(r'labels.txt', 'r') as fp: | |
| for line in fp: | |
| labels_list.append(line[:-1]) | |
| colormap = np.asarray(ade_palette()) | |
| def label_to_color_image(label): | |
| if label.ndim != 2: | |
| raise ValueError("Expect 2-D input label") | |
| if np.max(label) >= len(colormap): | |
| raise ValueError("label value too large.") | |
| return colormap[label] | |
| def draw_plot(pred_img, seg): | |
| fig = plt.figure(figsize=(20, 15)) | |
| grid_spec = gridspec.GridSpec(1, 2, width_ratios=[6, 1]) | |
| plt.subplot(grid_spec[0]) | |
| plt.imshow(pred_img) | |
| plt.axis('off') | |
| LABEL_NAMES = np.asarray(labels_list) | |
| FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1) | |
| FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP) | |
| unique_labels = np.unique(seg.numpy().astype("uint8")) | |
| ax = plt.subplot(grid_spec[1]) | |
| plt.imshow(FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation="nearest") | |
| ax.yaxis.tick_right() | |
| plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels]) | |
| plt.xticks([], []) | |
| ax.tick_params(width=0.0, labelsize=25) | |
| return fig | |
| def sepia(input_img): | |
| input_img = Image.fromarray(input_img) | |
| inputs = feature_extractor(images=input_img, return_tensors="tf") | |
| outputs = model(**inputs) | |
| logits = outputs.logits | |
| logits = tf.transpose(logits, [0, 2, 3, 1]) | |
| logits = tf.image.resize( | |
| logits, input_img.size[::-1] | |
| ) # We reverse the shape of `image` because `image.size` returns width and height. | |
| seg = tf.math.argmax(logits, axis=-1)[0] | |
| color_seg = np.zeros( | |
| (seg.shape[0], seg.shape[1], 3), dtype=np.uint8 | |
| ) # height, width, 3 | |
| for label, color in enumerate(colormap): | |
| color_seg[seg.numpy() == label, :] = color | |
| # Show image + mask | |
| pred_img = np.array(input_img) * 0.5 + color_seg * 0.5 | |
| pred_img = pred_img.astype(np.uint8) | |
| fig = draw_plot(pred_img, seg) | |
| return fig | |
| demo = gr.Interface(fn=sepia, | |
| inputs=gr.Image(shape=(400, 600)), | |
| outputs=['plot'], | |
| examples=["cityscape-1.jpg", "cityscape-2.jpg", "cityscape-3.jpg"], | |
| allow_flagging='never') | |
| demo.launch() | |