useful_code / dataset_code /vae_decode_hv.py
SuperCS's picture
Add files using upload-large-folder tool
e31e7b4 verified
import os
os.environ["HF_ENABLE_PARALLEL_LOADING"] = "yes"
import torch
from diffusers import AutoencoderKLHunyuanVideo
from diffusers.video_processor import VideoProcessor
from diffusers.utils import export_to_video
device = "cuda"
pretrained_model_name_or_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/hunyuanvideo-community/HunyuanVideo"
vae = AutoencoderKLHunyuanVideo.from_pretrained(
pretrained_model_name_or_path,
subfolder="vae",
torch_dtype=torch.float32,
).to(device)
vae.eval()
vae.requires_grad_(False)
vae.enable_tiling()
vae_scale_factor_spatial = vae.spatial_compression_ratio
video_processor = VideoProcessor(vae_scale_factor=vae_scale_factor_spatial)
latents = torch.load('/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-drone/latents_stride1/9F82nRgRthI_0046499_0046799_281_384_640.pt', map_location='cpu', weights_only=False)
vae_latents = latents['vae_latent'] / vae.config.scaling_factor
# vae_latents = vae_latents.to(device=device, dtype=vae.dtype)[:, :9, :, :]
video = vae.decode(vae_latents.unsqueeze(0).to(vae.device), return_dict=False)[0]
video = video_processor.postprocess_video(video, output_type="pil")
export_to_video(video[0], "output_fp_hv_33.mp4", fps=30)
# video[0][0].save("1_0.png")
# video[0][-1].save("2_0.png")
# first_vae_latents = latents['vae_latent'][:, 0, :, :].unsqueeze(1) / vae.config.scaling_factor
# first_vae_latents = first_vae_latents.to(device=device, dtype=vae.dtype)
# first_image = vae.decode(first_vae_latents.unsqueeze(0), return_dict=False)[0]
# first_image = video_processor.postprocess_video(first_image, output_type="pil")[0][0]
# first_image.save("1_1.png")
# last_vae_latents = latents['vae_latent'][:, -1, :, :].unsqueeze(1) / vae.config.scaling_factor
# last_vae_latents = last_vae_latents.to(device=device, dtype=vae.dtype)
# last_image = vae.decode(last_vae_latents.unsqueeze(0), return_dict=False)[0]
# last_image = video_processor.postprocess_video(last_image, output_type="pil")[0][0]
# last_image.save("2_1.png")
# print(f"Max memory: {torch.cuda.max_memory_allocated() / 1024**3:.3f} GB")
# import sys
# sys.path.append("/mnt/bn/yufan-dev-my/ysh/Codes/Efficient/fp_train/dance_forcing/utils")
# from utils_framepack import get_framepack_input_i2v
# (
# model_input, # torch.Size([2, 16, 9, 60, 104])
# indices_latents, # torch.Size([2, 9])
# latents_clean, # torch.Size([2, 16, 2, 60, 104])
# indices_clean_latents, # torch.Size([2, 2])
# latents_history_2x, # torch.Size([2, 16, 2, 60, 104])
# indices_latents_history_2x, # torch.Size([2, 2])
# latents_history_4x, # torch.Size([2, 16, 16, 60, 104])
# indices_latents_history_4x, # torch.Size([2, 16])
# section_to_video_idx,
# ) = get_framepack_input_i2v(
# vae_latents=latents['vae_latent'].unsqueeze(0),
# latent_window_size=9,
# vanilla_sampling=True,
# is_local_flf2v=True,
# dtype=torch.bfloat16,
# )
# vae_latents_1 = torch.cat([model_input[0:1], model_input[-1:]], dim = 2)
# vae_latents_1 = vae_latents_1.to(vae.device, dtype=vae.dtype) / vae.config.scaling_factor
# video = vae.decode(vae_latents_1, return_dict=False)[0]
# video = video_processor.postprocess_video(video, output_type="pil")
# export_to_video(video[0], "output_fp_f1_test_1.mp4", fps=30)
# def remove_front_padding(tensor, dim=1):
# non_zero_indices = torch.any(tensor != 0, dim=tuple(i for i in range(tensor.ndim) if i != dim))
# first_non_zero = torch.argmax(non_zero_indices.float())
# slices = [slice(None)] * tensor.ndim
# slices[dim] = slice(first_non_zero.item(), None)
# return tensor[tuple(slices)]
# vae_latents_1 = remove_front_padding(torch.cat([latents_history_4x[-1:], latents_history_2x[-1:], latents_clean[-1:][:, :, 0:1,], model_input[-1:], latents_clean[-1:][:, :, 1:,]], dim = 2), dim = 2)
# vae_latents_1 = vae_latents_1.to(vae.device, dtype=vae.dtype) / vae.config.scaling_factor
# video = vae.decode(vae_latents_1, return_dict=False)[0]
# video = video_processor.postprocess_video(video, output_type="pil")
# export_to_video(video[0], "output_fp_f1_test_2.mp4", fps=30)
# import pdb;pdb.set_trace()