layerdiff_eval / vis_utils.py
24yearsold's picture
Upload 2 files
60e52a4 verified
import os.path as osp
import json
from typing import List, Union
import random
import yaml
from einops import rearrange, reduce
import torch
import torchvision.transforms.functional as tv_functional
import gzip
import numpy as np
import cv2
from PIL import Image
from torchvision.transforms.functional import pil_to_tensor
class Colors:
# Ultralytics color palette https://ultralytics.com/
def __init__(self):
# hex = matplotlib.colors.TABLEAU_COLORS.values()
# hexs = ('FF1010', '10FF10', 'FFF010', '100FFF', 'c0c0c0', 'FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',
# '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')
hexs = [
'#4363d8',
'#9A6324',
'#808000',
'#469990',
'#000075',
'#e6194B',
'#f58231',
'#ffe119',
'#bfef45',
'#3cb44b',
'#42d4f4',
'#800000',
'#911eb4',
'#f032e6',
'#fabed4',
'#ffd8b1',
'#fffac8',
'#aaffc3',
'#dcbeff',
'#a9a9a9',
'#006400',
'#4169E1',
'#8B4513',
'#FA8072',
'#87CEEB',
'#FFD700',
'#ffffff',
'#000000',
]
self.palette = [self.hex2rgb(f'#{c}') if not c.startswith('#') else self.hex2rgb(c) for c in hexs]
self.n = len(self.palette)
def __call__(self, i, bgr=False):
c = self.palette[int(i) % self.n]
return (c[2], c[1], c[0]) if bgr else c
@staticmethod
def hex2rgb(h): # rgb order (PIL)
return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
DEFAULT_COLOR_PALETTE = Colors()
def get_color(idx):
if idx == -1:
return 255
else:
return DEFAULT_COLOR_PALETTE(idx)
VALID_BODY_PARTS_V2 = [
'hair', 'headwear', 'face', 'eyes', 'eyewear', 'ears', 'earwear', 'nose', 'mouth',
'neck', 'neckwear', 'topwear', 'handwear', 'bottomwear', 'legwear', 'footwear',
'tail', 'wings', 'objects'
]
def seed_everything(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def load_image(imgp: str, mode="RGB", output_type='numpy'):
"""
return RGB image as output_type
"""
img = Image.open(imgp).convert(mode)
if output_type == 'numpy':
img = np.array(img)
if len(img.shape) == 2:
img = img[..., None]
return img
def bbox_intersection(xyxy, xyxy2):
x1, y1, x2, y2 = xyxy2
dx1, dy1, dx2, dy2 = xyxy
ix1, ix2 = max(x1, dx1), min(x2, dx2)
iy1, iy2 = max(y1, dy1), min(y2, dy2)
if ix2 >= ix1 and iy2 >= iy1:
return [ix1, iy1, ix2, iy2]
return None
_IMG2TENSOR_IMGTYPE = (Image.Image, np.ndarray, str)
_IMG2TENSOR_DIMORDER = ('bchw', 'chw', 'hwc')
def img2tensor(img: Union[Image.Image, np.ndarray, str, torch.Tensor], normalize = False, mean = 0., std = 255., dim_order: str = 'bchw', dtype=torch.float32, device: str = 'cpu', imread_mode='RGB'):
def _check_normalize_values(values, num_channels):
if isinstance(values, tuple):
values = list(values)
elif isinstance(values, (int, float, np.ScalarType)):
values = [values] * num_channels
else:
assert isinstance(values, (np.ndarray, list))
if len(values) > num_channels:
values = values[:num_channels]
assert len(values) == num_channels
return values
assert isinstance(img, _IMG2TENSOR_IMGTYPE)
assert dim_order in _IMG2TENSOR_DIMORDER
if isinstance(img, str):
img = load_image(img, mode=imread_mode)
if isinstance(img, Image.Image):
img = pil_to_tensor(img)
if dim_order == 'bchw':
img = img.unsqueeze(0)
elif dim_order == 'hwc':
img = img.permute((1, 2, 0))
else:
if img.ndim == 2:
img = img[..., None]
else:
assert img.ndim == 3
if dim_order == 'bchw':
img = rearrange(img, 'h w c -> c h w')[None, ...]
elif dim_order == 'chw':
img = rearrange(img, 'h w c -> c h w')
img = torch.from_numpy(np.ascontiguousarray(img))
img = img.to(device=device, dtype=dtype)
if normalize:
if dim_order == 'bchw':
c = img.shape[1]
elif dim_order == 'chw':
c = img.shape[0]
else:
c = img.shape[2]
if mean is not None and std is not None:
mean = _check_normalize_values(mean, c)
std = _check_normalize_values(std, c)
img = tv_functional.normalize(img, mean=mean, std=std)
return img
def optim_depth(part_dict_list, fullpage):
window = create_window(11, 1.5, 3)
depth_map = np.full(fullpage.shape[:2], 2, dtype=np.float32)
ssim_map = np.full(fullpage.shape[:2], 0., dtype=np.float32)
depth_order_map = np.full(fullpage.shape[:2], -1, dtype=np.int16)
color_order_map = depth_order_map.copy()
fullpage_torch = img2tensor(fullpage[..., :3])
for ii, pd in enumerate(part_dict_list):
x1, y1, x2, y2 = pd['xyxy']
xyxy = pd['xyxy']
mask = pd['mask']
region_torch = img2tensor(pd['img'][..., :3])
with torch.no_grad():
ssim_map_region = calculate_ssim_map(fullpage_torch[:, :, y1: y2, x1: x2], region_torch, window, 255, use_padding=True)
ssim_map_region = ssim_map_region.to(dtype=torch.float32, device='cpu')[0].numpy()
ssim_update_mask = np.bitwise_and(ssim_map_region > ssim_map[y1: y2, x1: x2], mask)
if np.any(ssim_update_mask):
upd_mask = ssim_update_mask.astype(np.int32)
color_order_map[y1: y2, x1: x2] = color_order_map[y1: y2, x1: x2] * (1-upd_mask) + upd_mask * np.full((y2 - y1, x2 - x1), ii, dtype=np.int16)
ssim_map[y1: y2, x1: x2] = ssim_map[y1: y2, x1: x2] * (1-upd_mask) + upd_mask * ssim_map_region
depth_update_mask = np.bitwise_and(pd['depth'] < depth_map[y1: y2, x1: x2], mask)
if np.any(depth_update_mask):
depth_map[y1: y2, x1: x2] = (1 - depth_update_mask) * depth_map[y1: y2, x1: x2] + depth_update_mask * pd['depth']
depth_order_map[y1: y2, x1: x2] = (1 - depth_update_mask) * depth_order_map[y1: y2, x1: x2] + depth_update_mask * np.full((y2 - y1, x2 - x1), ii, dtype=np.int16)
for _ in range(1):
for ii in range(len(part_dict_list)):
pd = part_dict_list[ii]
# if pd['tag'] in {'face', 'topwear', 'nose'}:
# continue
x1, y1, x2, y2 = pd['xyxy']
mask = pd['mask']
color_mask = color_order_map[y1: y2, x1: x2] == ii
if not np.any(color_mask):
continue
depth = pd['depth']
depth_region = depth_map[y1: y2, x1: x2]
max_shift = np.max((depth - depth_region) * color_mask * mask)
if max_shift == 0:
continue
max_shift += 0.001
min_shift = np.min((depth - depth_region) * mask)
# print(min_shift)
shift_list = np.linspace(0., max_shift, num=20)
# shift_list = np.concat([np.linspace(0, min_shift, num=20), shift_list])
score_map = depth[..., None] - shift_list[None, None] < depth_region[..., None]
score_map = reduce((score_map == color_mask[..., None]).astype(np.float32) * mask[..., None], 'h w c -> c', reduction='mean')
shift = shift_list[np.argmax(score_map)]
if shift > 0:
depth -= shift
depth_update_mask = np.bitwise_and(depth < depth_region, mask)
depth_map[y1: y2, x1: x2] = (1 - depth_update_mask) * depth_map[y1: y2, x1: x2] + depth_update_mask * depth
pd['depth'] = depth
def load_parts(srcp, rotate=False):
srcimg = osp.join(srcp, 'src_img.png')
fullpage = np.array(Image.open(srcimg).convert('RGBA'))
infop = osp.join(srcp, 'info.json')
infos = json2dict(infop)
part_dict_list = []
tag2pd = {}
part_id = 0
min_sz = 12
if rotate:
fullpage = np.rot90(fullpage, 3, )
for tag, partdict in infos['parts'].items():
img = Image.open(osp.join(srcp, tag + '.png')).convert('RGBA')
depthp = osp.join(srcp, tag + '_depth.png')
img = np.array(img)
p_test = max(img.shape[:2]) // 10
mask = img[..., -1] > 10
if np.sum(mask[:-p_test, :-p_test]) > 4:
if rotate:
img = np.rot90(img, 3)
mask = np.rot90(mask, 3, )
xyxy = cv2.boundingRect(cv2.findNonZero(mask.astype(np.uint8)))
xyxy = np.array(xyxy)
h, w = xyxy[2:]
xyxy[2] += xyxy[0]
xyxy[3] += xyxy[1]
p = min_sz - w
if p > 0:
if xyxy[0] >= p:
xyxy[0] -= p
else:
xyxy[2] += p
p = min_sz - h
if p > 0:
if xyxy[1] >= p:
xyxy[1] -= p
else:
xyxy[3] += p
x1, y1, x2, y2 = xyxy
depth = np.array(Image.open(depthp).convert('L'))
if rotate:
depth = np.rot90(depth, 3)
dmin, dmax = partdict['depth_min'], partdict['depth_max']
mask = mask[y1: y2, x1: x2].copy()
img = img[y1: y2, x1: x2].copy()
depth = depth[y1: y2, x1: x2].copy()
depth = np.array(depth, dtype=np.float32) / 255 * (dmax - dmin) + dmin
tag2pd[tag] = {'img': img, 'depth': depth, 'part_id': part_id, 'xyxy': xyxy, 'mask': mask, 'tag': tag}
part_dict_list.append(tag2pd[tag])
part_id += 1
return fullpage, infos, part_dict_list
def json2dict(json_path: str):
plower = json_path.lower()
if plower.endswith('.gz'):
with gzip.open(json_path, 'rt', encoding='utf8') as f:
metadata = json.load(f)
return metadata
if plower.endswith('.yaml'):
with open(json_path, 'r') as file:
metadata = yaml.load(file, yaml.CSafeLoader)
return metadata
with open(json_path, 'r', encoding='utf8') as f:
metadata = json.loads(f.read())
return metadata
# Source: https://github.com/One-sixth/ms_ssim_pytorch/blob/master/ssim.py
'''
code modified from
https://github.com/VainF/pytorch-msssim/blob/master/pytorch_msssim/ssim.py
'''
import torch
import torch.jit
import torch.nn.functional as F
@torch.jit.script
def create_window(window_size: int = 11, sigma: float = 1.5, channel: int = 3):
'''
Create 1-D gauss kernel
:param window_size: the size of gauss kernel
:param sigma: sigma of normal distribution
:param channel: input channel
:return: 1D kernel
'''
coords = torch.arange(window_size, dtype=torch.float)
coords -= window_size // 2
g = torch.exp(-(coords ** 2) / (2 * sigma ** 2))
g /= g.sum()
g = g.reshape(1, 1, 1, -1).repeat(channel, 1, 1, 1)
return g
@torch.jit.script
def _gaussian_filter(x, window_1d, use_padding: bool):
'''
Blur input with 1-D kernel
:param x: batch of tensors to be blured
:param window_1d: 1-D gauss kernel
:param use_padding: padding image before conv
:return: blured tensors
'''
C = x.shape[1]
padding = 0
if use_padding:
window_size = window_1d.shape[3]
padding = window_size // 2
out = F.conv2d(x, window_1d, stride=1, padding=(0, padding), groups=C)
out = F.conv2d(out, window_1d.transpose(2, 3), stride=1, padding=(padding, 0), groups=C)
return out
@torch.jit.script
def calculate_ssim_map(X, Y, window, data_range: float, use_padding: bool=True):
'''
Calculate ssim index for X and Y
:param X: images
:param Y: images
:param window: 1-D gauss kernel
:param data_range: value range of input images. (usually 1.0 or 255)
:param use_padding: padding image before conv
:return:
'''
K1 = 0.01
K2 = 0.03
compensation = 1.0
C1 = (K1 * data_range) ** 2
C2 = (K2 * data_range) ** 2
mu1 = _gaussian_filter(X, window, use_padding)
mu2 = _gaussian_filter(Y, window, use_padding)
sigma1_sq = _gaussian_filter(X * X, window, use_padding)
sigma2_sq = _gaussian_filter(Y * Y, window, use_padding)
sigma12 = _gaussian_filter(X * Y, window, use_padding)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = compensation * (sigma1_sq - mu1_sq)
sigma2_sq = compensation * (sigma2_sq - mu2_sq)
sigma12 = compensation * (sigma12 - mu1_mu2)
cs_map = (2 * sigma12 + C2) / (sigma1_sq + sigma2_sq + C2)
# Fixed the issue that the negative value of cs_map caused ms_ssim to output Nan.
cs_map = F.relu(cs_map)
ssim_map = ((2 * mu1_mu2 + C1) / (mu1_sq + mu2_sq + C1)) * cs_map
ssim_val = ssim_map.mean(dim=(1)) # reduce along CHW
return ssim_val
@torch.jit.script
def ssim(X, Y, window, data_range: float, use_padding: bool=False):
'''
Calculate ssim index for X and Y
:param X: images
:param Y: images
:param window: 1-D gauss kernel
:param data_range: value range of input images. (usually 1.0 or 255)
:param use_padding: padding image before conv
:return:
'''
K1 = 0.01
K2 = 0.03
compensation = 1.0
C1 = (K1 * data_range) ** 2
C2 = (K2 * data_range) ** 2
mu1 = _gaussian_filter(X, window, use_padding)
mu2 = _gaussian_filter(Y, window, use_padding)
sigma1_sq = _gaussian_filter(X * X, window, use_padding)
sigma2_sq = _gaussian_filter(Y * Y, window, use_padding)
sigma12 = _gaussian_filter(X * Y, window, use_padding)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = compensation * (sigma1_sq - mu1_sq)
sigma2_sq = compensation * (sigma2_sq - mu2_sq)
sigma12 = compensation * (sigma12 - mu1_mu2)
cs_map = (2 * sigma12 + C2) / (sigma1_sq + sigma2_sq + C2)
# Fixed the issue that the negative value of cs_map caused ms_ssim to output Nan.
cs_map = F.relu(cs_map)
ssim_map = ((2 * mu1_mu2 + C1) / (mu1_sq + mu2_sq + C1)) * cs_map
ssim_val = ssim_map.mean(dim=(1, 2, 3)) # reduce along CHW
cs = cs_map.mean(dim=(1, 2, 3))
return ssim_val, cs
@torch.jit.script
def ms_ssim(X, Y, window, data_range: float, weights, use_padding: bool=False, eps: float=1e-8):
'''
interface of ms-ssim
:param X: a batch of images, (N,C,H,W)
:param Y: a batch of images, (N,C,H,W)
:param window: 1-D gauss kernel
:param data_range: value range of input images. (usually 1.0 or 255)
:param weights: weights for different levels
:param use_padding: padding image before conv
:param eps: use for avoid grad nan.
:return:
'''
weights = weights[:, None]
levels = weights.shape[0]
vals = []
for i in range(levels):
ss, cs = ssim(X, Y, window=window, data_range=data_range, use_padding=use_padding)
if i < levels-1:
vals.append(cs)
X = F.avg_pool2d(X, kernel_size=2, stride=2, ceil_mode=True)
Y = F.avg_pool2d(Y, kernel_size=2, stride=2, ceil_mode=True)
else:
vals.append(ss)
vals = torch.stack(vals, dim=0)
# Use for fix a issue. When c = a ** b and a is 0, c.backward() will cause the a.grad become inf.
vals = vals.clamp_min(eps)
# The origin ms-ssim op.
ms_ssim_val = torch.prod(vals[:-1] ** weights[:-1] * vals[-1:] ** weights[-1:], dim=0)
# The new ms-ssim op. But I don't know which is best.
# ms_ssim_val = torch.prod(vals ** weights, dim=0)
# In this file's image training demo. I feel the old ms-ssim more better. So I keep use old ms-ssim op.
return ms_ssim_val
class SSIMCriteria(torch.jit.ScriptModule):
__constants__ = ['data_range', 'use_padding']
def __init__(self, window_size=11, window_sigma=1.5, data_range=255., channel=3, use_padding=False):
'''
:param window_size: the size of gauss kernel
:param window_sigma: sigma of normal distribution
:param data_range: value range of input images. (usually 1.0 or 255)
:param channel: input channels (default: 3)
:param use_padding: padding image before conv
'''
super().__init__()
assert window_size % 2 == 1, 'Window size must be odd.'
window = create_window(window_size, window_sigma, channel)
self.register_buffer('window', window)
self.data_range = data_range
self.use_padding = use_padding
@torch.jit.script_method
def forward(self, X, Y):
r = ssim(X, Y, window=self.window, data_range=self.data_range, use_padding=self.use_padding)
return r[0]
class MS_SSIM(torch.jit.ScriptModule):
__constants__ = ['data_range', 'use_padding', 'eps']
def __init__(self, window_size=11, window_sigma=1.5, data_range=255., channel=3, use_padding=False, weights=None, levels=None, eps=1e-8):
'''
class for ms-ssim
:param window_size: the size of gauss kernel
:param window_sigma: sigma of normal distribution
:param data_range: value range of input images. (usually 1.0 or 255)
:param channel: input channels
:param use_padding: padding image before conv
:param weights: weights for different levels. (default [0.0448, 0.2856, 0.3001, 0.2363, 0.1333])
:param levels: number of downsampling
:param eps: Use for fix a issue. When c = a ** b and a is 0, c.backward() will cause the a.grad become inf.
'''
super().__init__()
assert window_size % 2 == 1, 'Window size must be odd.'
self.data_range = data_range
self.use_padding = use_padding
self.eps = eps
window = create_window(window_size, window_sigma, channel)
self.register_buffer('window', window)
if weights is None:
weights = [0.0448, 0.2856, 0.3001, 0.2363, 0.1333]
weights = torch.tensor(weights, dtype=torch.float)
if levels is not None:
weights = weights[:levels]
weights = weights / weights.sum()
self.register_buffer('weights', weights)
@torch.jit.script_method
def forward(self, X, Y):
return ms_ssim(X, Y, window=self.window, data_range=self.data_range, weights=self.weights,
use_padding=self.use_padding, eps=self.eps)
def img_alpha_blending(
drawables: List[np.ndarray],
xyxy=None,
output_type='numpy',
final_size=None,
max_depth_val=255,
premultiplied=True,
):
'''
final_size: (h, w)
'''
if isinstance(drawables, (np.ndarray, dict)):
drawables = [drawables]
# infer final scene size
if xyxy is not None:
final_size = [xyxy[3] - xyxy[1], xyxy[2] - xyxy[0]]
x1, y1, x2, y2 = xyxy
elif final_size is None:
d = drawables[0]
if isinstance(d, dict):
d = d['img']
final_size = d.shape[:2]
final_rgb = np.zeros((final_size[0], final_size[1], 3), dtype=np.float32)
final_alpha = np.zeros_like(final_rgb[..., [0]])
final_depth = None
for drawable_img in drawables:
dxyxy = None
depth = None
if isinstance(drawable_img, dict):
depth = drawable_img.get('depth', None)
tag = drawable_img.get('tag', None)
if depth is not None:
if depth.ndim == 2:
depth = depth[..., None]
if final_depth is None:
final_depth = np.full_like(final_alpha, fill_value=max_depth_val)
if 'xyxy' in drawable_img:
dxyxy = drawable_img['xyxy']
dx1, dy1, dx2, dy2 = dxyxy
drawable_img = drawable_img['img']
if dxyxy is not None:
if dx1 < 0:
drawable_img = drawable_img[:, -dx1:]
if depth is not None:
depth = depth[:, -dx1:]
dx1 = 0
if dy1 < 0:
drawable_img = drawable_img[-dy1:]
if depth is not None:
depth = depth[-dy1:]
dy1 = 0
if drawable_img.ndim == 3 and drawable_img.shape[-1] == 3:
drawable_alpha = np.ones_like(drawable_img[..., [-1]])
else:
drawable_alpha = drawable_img[..., [-1]] / 255
drawable_img = drawable_img[..., :3]
if xyxy is not None:
if dxyxy is None:
drawable_img = drawable_img[y1: y2, x1: x2]
else:
intersection = bbox_intersection(xyxy, dxyxy)
if intersection is None:
continue
ix1, iy1, ix2, iy2 = intersection
drawable_alpha = drawable_alpha[iy1-dy1: iy2-dy1, ix1-dx1: ix2-dx1]
final_alpha[iy1-y1: iy2-y1, ix1-x1: ix2-x1] += drawable_alpha
drawable_img = drawable_img[iy1-dy1: iy2-dy1, ix1-dx1: ix2-dx1]
final_rgb[iy1-y1: iy2-y1, ix1-x1: ix2-x1] = final_rgb[iy1-y1: iy2-y1, ix1-x1: ix2-x1] * (1-drawable_alpha) + drawable_img
continue
if dxyxy is None:
if depth is not None:
update_mask = (final_depth > depth).astype(np.uint8)
final_depth = update_mask * depth + (1-update_mask) * final_depth
final_rgb = update_mask * (final_rgb * (1-drawable_alpha) + drawable_img) + \
(1 - update_mask) * (drawable_img * (1-final_alpha) + final_rgb)
final_alpha = np.clip(final_alpha + drawable_alpha, 0, 1)
else:
final_alpha += drawable_alpha
final_alpha = np.clip(final_alpha, 0, 1)
if not premultiplied:
drawable_img = drawable_img * drawable_alpha
final_rgb = final_rgb * (1 - drawable_alpha) + drawable_img
else:
if depth is not None:
update_mask = (final_depth[dy1: dy2, dx1: dx2] > depth).astype(np.uint8)
update_mask = update_mask * (drawable_alpha > 0.1)
final_depth[dy1: dy2, dx1: dx2] = update_mask * depth + (1-update_mask) * final_depth[dy1: dy2, dx1: dx2]
final_rgb[dy1: dy2, dx1: dx2] = update_mask * (final_rgb[dy1: dy2, dx1: dx2] * (1-drawable_alpha) + drawable_img) + \
(1 - update_mask) * (drawable_img * (1-final_alpha[dy1: dy2, dx1: dx2]) + final_rgb[dy1: dy2, dx1: dx2])
final_alpha[dy1: dy2, dx1: dx2] = np.clip(final_alpha[dy1: dy2, dx1: dx2] + drawable_alpha, 0, 1)
else:
final_alpha[dy1: dy2, dx1: dx2] += drawable_alpha
final_alpha = np.clip(final_alpha, 0, 1)
final_rgb[dy1: dy2, dx1: dx2] = final_rgb[dy1: dy2, dx1: dx2] * (1-drawable_alpha) + drawable_img
final_alpha = np.clip(final_alpha, 0, 1) * 255
final = np.concatenate([final_rgb, final_alpha], axis=2)
final = np.clip(final, 0, 255).astype(np.uint8)
output_type = output_type.lower()
if output_type == 'pil':
final = Image.fromarray(final)
elif output_type == 'dict':
final = {
'img': final
}
if final_depth is not None:
final['depth'] = final_depth
return final
def rgba_to_rgb_fixbg(img: np.ndarray, background_color=255):
if isinstance(img, Image.Image):
img = np.array(img)
assert img.ndim == 3
if img.shape[-1] == 3:
return img
if isinstance(background_color, int):
bg = np.full_like(img[..., :3], fill_value=background_color)
else:
background_color = np.array(background_color)[:3].astype(np.uint8)
bg = np.full_like(img[..., :3], fill_value=255)
bg[..., :3] = background_color
return img_alpha_blending([bg, img])[..., :3].copy()