File size: 11,891 Bytes
b4feb07 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 |
import argparse
import os
from tqdm import tqdm
from diffusers import AutoencoderKLHunyuanVideo
from transformers import (
CLIPTextModel,
CLIPTokenizer,
LlamaModel,
LlamaTokenizerFast,
SiglipImageProcessor,
SiglipVisionModel,
)
from diffusers.video_processor import VideoProcessor
from diffusers.utils import export_to_video, load_image
from dummy_dataloader_official import BucketedFeatureDataset, BucketedSampler, collate_fn
from torch.utils.data import DataLoader
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import Subset
import torchvision.transforms as transforms
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from IPython.display import HTML, display
from IPython.display import clear_output
from accelerate import Accelerator, DistributedType
from accelerate.logging import get_logger
from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed
from diffusers.training_utils import free_memory
from accelerate import Accelerator
from utils_framepack import encode_image, encode_prompt
def setup_distributed_env():
dist.init_process_group(backend="nccl")
torch.cuda.set_device(int(os.environ["LOCAL_RANK"]))
def cleanup_distributed_env():
dist.destroy_process_group()
def main(rank, world_size, global_rank, stride, batch_size, dataloader_num_workers, csv_file, video_folder, output_latent_folder, pretrained_model_name_or_path, siglip_model_name_or_path):
weight_dtype = torch.bfloat16
device = rank
seed = 42
# Load the tokenizers
tokenizer_one = LlamaTokenizerFast.from_pretrained(
pretrained_model_name_or_path,
subfolder="tokenizer",
)
tokenizer_two = CLIPTokenizer.from_pretrained(
pretrained_model_name_or_path,
subfolder="tokenizer_2",
)
feature_extractor = SiglipImageProcessor.from_pretrained(
siglip_model_name_or_path,
subfolder="feature_extractor",
)
vae = AutoencoderKLHunyuanVideo.from_pretrained(
pretrained_model_name_or_path,
subfolder="vae",
torch_dtype=torch.float32,
)
vae_scale_factor_spatial = vae.spatial_compression_ratio
video_processor = VideoProcessor(vae_scale_factor=vae_scale_factor_spatial)
text_encoder_one = LlamaModel.from_pretrained(
pretrained_model_name_or_path,
subfolder="text_encoder",
torch_dtype=weight_dtype,
)
text_encoder_two = CLIPTextModel.from_pretrained(
pretrained_model_name_or_path,
subfolder="text_encoder_2",
torch_dtype=weight_dtype,
)
image_encoder = SiglipVisionModel.from_pretrained(
siglip_model_name_or_path,
subfolder="image_encoder",
torch_dtype=weight_dtype,
)
vae.requires_grad_(False)
text_encoder_one.requires_grad_(False)
text_encoder_two.requires_grad_(False)
image_encoder.requires_grad_(False)
vae.eval()
text_encoder_one.eval()
text_encoder_two.eval()
image_encoder.eval()
vae = vae.to(device)
text_encoder_one = text_encoder_one.to(device)
text_encoder_two = text_encoder_two.to(device)
image_encoder = image_encoder.to(device)
# dist.barrier()
dataset = BucketedFeatureDataset(csv_file=csv_file, video_folder=video_folder, stride=stride, force_rebuild=True)
sampler = BucketedSampler(dataset, batch_size=batch_size, drop_last=True, shuffle=True, seed=seed)
dataloader = DataLoader(
dataset,
batch_sampler=sampler,
collate_fn=collate_fn,
num_workers=dataloader_num_workers,
# pin_memory=True,
prefetch_factor=2 if dataloader_num_workers != 0 else None,
# persistent_workers=True if dataloader_num_workers > 0 else False,
)
print(len(dataset), len(dataloader))
accelerator = Accelerator()
dataloader = accelerator.prepare(dataloader)
print(f"Dataset size: {len(dataset)}, Dataloader batches: {len(dataloader)}")
print(f"Process index: {accelerator.process_index}, World size: {accelerator.num_processes}")
sampler.set_epoch(0)
if rank==0:
pbar = tqdm(total=len(dataloader), desc="Processing")
# dist.barrier()
for idx, batch in enumerate(dataloader):
free_memory()
valid_indices = []
valid_uttids = []
valid_num_frames = []
valid_heights = []
valid_widths = []
valid_videos = []
valid_prompts = []
valid_first_frames_images = []
for i, (uttid, num_frame, height, width) in enumerate(zip(batch["uttid"], batch["video_metadata"]["num_frames"], batch["video_metadata"]["height"], batch["video_metadata"]["width"])):
os.makedirs(output_latent_folder, exist_ok=True)
output_path = os.path.join(output_latent_folder, f"{uttid}_{num_frame}_{height}_{width}.pt")
if not os.path.exists(output_path):
valid_indices.append(i)
valid_uttids.append(uttid)
valid_num_frames.append(num_frame)
valid_heights.append(height)
valid_widths.append(width)
valid_videos.append(batch["videos"][i])
valid_prompts.append(batch["prompts"][i])
valid_first_frames_images.append(batch["first_frames_images"][i])
else:
print(f"skipping {uttid}")
if not valid_indices:
print("skipping entire batch!")
if rank==0:
pbar.update(1)
pbar.set_postfix({"batch": idx})
continue
batch = None
del batch
free_memory()
batch = {
"uttid": valid_uttids,
"video_metadata": {
"num_frames": valid_num_frames,
"height": valid_heights,
"width": valid_widths
},
"videos": torch.stack(valid_videos),
"prompts": valid_prompts,
"first_frames_images": torch.stack(valid_first_frames_images),
}
if len(batch["uttid"]) == 0:
print("All samples in this batch are already processed, skipping!")
continue
with torch.no_grad():
# Get Vae feature 1
pixel_values = batch["videos"].permute(0, 2, 1, 3, 4).to(dtype=vae.dtype, device=device)
vae_latents = vae.encode(pixel_values).latent_dist.sample()
vae_latents = vae_latents * vae.config.scaling_factor
# Encode prompts
prompts = batch["prompts"]
prompt_embeds, pooled_prompt_embeds, prompt_attention_mask = encode_prompt(
tokenizer=tokenizer_one,
text_encoder=text_encoder_one,
tokenizer_2=tokenizer_two,
text_encoder_2=text_encoder_two,
prompt=prompts,
device=device,
)
# Prepare images
image_tensor = batch["first_frames_images"]
images = [transforms.ToPILImage()(x.to(torch.uint8)) for x in image_tensor]
image = video_processor.preprocess(image=images, height=batch["videos"].shape[-2], width=batch["videos"].shape[-1])
image_embeds = encode_image(
feature_extractor,
image_encoder,
image,
device=device,
dtype=weight_dtype,
)
for uttid, num_frame, height, width, cur_vae_latent, cur_prompt_embed, cur_pooled_prompt_embed, cur_prompt_attention_mask, cur_image_embed in zip(batch["uttid"], batch["video_metadata"]["num_frames"], batch["video_metadata"]["height"], batch["video_metadata"]["width"], vae_latents, prompt_embeds, pooled_prompt_embeds, prompt_attention_mask, image_embeds):
output_path = os.path.join(output_latent_folder, f"{uttid}_{num_frame}_{height}_{width}.pt")
temp_to_save = {
"vae_latent": cur_vae_latent.cpu().detach(),
"prompt_embed": cur_prompt_embed.cpu().detach(),
"pooled_prompt_embeds": cur_pooled_prompt_embed.cpu().detach(),
"prompt_attention_mask": cur_prompt_attention_mask.cpu().detach(),
"image_embeds": cur_image_embed.cpu().detach(),
}
torch.save(
temp_to_save,
output_path
)
print(f"save latent to: {output_path}")
if rank==0:
pbar.update(1)
pbar.set_postfix({"batch": idx})
pixel_values = None
prompts = None
image_tensor = None
images = None
vae_latents = None
vae_latents_2 = None
image_embeds = None
prompt_embeds = None
pooled_prompt_embeds = None
prompt_attention_mask = None
batch = None
valid_indices = None
valid_uttids = None
valid_num_frames = None
valid_heights = None
valid_widths = None
valid_videos = None
valid_prompts = None
valid_first_frames_images = None
temp_to_save = None
del pixel_values
del prompts
del image_tensor
del images
del vae_latents
del vae_latents_2
del image_embeds
del batch
del valid_indices
del valid_uttids
del valid_num_frames
del valid_heights
del valid_widths
del valid_videos
del valid_prompts
del valid_first_frames_images
del temp_to_save
free_memory()
# dist.barrier()
# dist.barrier()
dist.destroy_process_group()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Script for running model training and data processing.")
parser.add_argument("--stride", type=int, default=2, help="Batch size for processing")
parser.add_argument("--batch_size", type=int, default=1, help="Batch size for processing")
parser.add_argument("--dataloader_num_workers", type=int, default=0, help="Number of workers for data loading")
parser.add_argument("--csv_file", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/Sekai-Project/train/sekai-game-drone_updated.csv", help="Path to the config file")
parser.add_argument("--video_folder", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/Sekai-Project/sekai-game-drone", help="Path to the config file")
parser.add_argument("--output_latent_folder", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/Sekai-Project/sekai-game-drone/latents", help="Folder to store output latents")
parser.add_argument("--pretrained_model_name_or_path", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/hunyuanvideo-community/HunyuanVideo", help="Pretrained model path")
parser.add_argument("--siglip_model_name_or_path", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/lllyasviel/flux_redux_bfl", help="Siglip model path")
args = parser.parse_args()
setup_distributed_env()
global_rank = dist.get_rank()
local_rank = int(os.environ["LOCAL_RANK"])
device = torch.cuda.current_device()
world_size = dist.get_world_size()
main(
rank=device,
world_size=world_size,
global_rank=global_rank,
stride=args.stride,
batch_size=args.batch_size,
dataloader_num_workers=args.dataloader_num_workers,
csv_file=args.csv_file,
video_folder=args.video_folder,
output_latent_folder=args.output_latent_folder,
pretrained_model_name_or_path=args.pretrained_model_name_or_path,
siglip_model_name_or_path=args.siglip_model_name_or_path,
) |