|
|
import math |
|
|
import random |
|
|
from typing import Any, Dict, List, Optional, Tuple, Union |
|
|
|
|
|
import torch |
|
|
import torch.nn.functional as F |
|
|
from einops import rearrange, repeat |
|
|
|
|
|
from diffusers.training_utils import compute_density_for_timestep_sampling |
|
|
|
|
|
|
|
|
DEFAULT_PROMPT_TEMPLATE = { |
|
|
"template": ( |
|
|
"<|start_header_id|>system<|end_header_id|>\n\nDescribe the video by detailing the following aspects: " |
|
|
"1. The main content and theme of the video." |
|
|
"2. The color, shape, size, texture, quantity, text, and spatial relationships of the objects." |
|
|
"3. Actions, events, behaviors temporal relationships, physical movement changes of the objects." |
|
|
"4. background environment, light, style and atmosphere." |
|
|
"5. camera angles, movements, and transitions used in the video:<|eot_id|>" |
|
|
"<|start_header_id|>user<|end_header_id|>\n\n{}<|eot_id|>" |
|
|
), |
|
|
"crop_start": 95, |
|
|
} |
|
|
|
|
|
def get_config_value(args, name): |
|
|
if hasattr(args, name): |
|
|
return getattr(args, name) |
|
|
elif hasattr(args, 'training_config') and hasattr(args.training_config, name): |
|
|
return getattr(args.training_config, name) |
|
|
else: |
|
|
raise AttributeError(f"Neither args nor args.training_config has attribute '{name}'") |
|
|
|
|
|
|
|
|
def _get_llama_prompt_embeds( |
|
|
tokenizer, |
|
|
text_encoder, |
|
|
prompt: Union[str, List[str]], |
|
|
prompt_template: Dict[str, Any], |
|
|
num_videos_per_prompt: int = 1, |
|
|
device: Optional[torch.device] = None, |
|
|
dtype: Optional[torch.dtype] = None, |
|
|
max_sequence_length: int = 256, |
|
|
num_hidden_layers_to_skip: int = 2, |
|
|
) -> Tuple[torch.Tensor, torch.Tensor]: |
|
|
device = device |
|
|
dtype = dtype |
|
|
|
|
|
prompt = [prompt] if isinstance(prompt, str) else prompt |
|
|
batch_size = len(prompt) |
|
|
|
|
|
prompt = [prompt_template["template"].format(p) for p in prompt] |
|
|
|
|
|
crop_start = prompt_template.get("crop_start", None) |
|
|
if crop_start is None: |
|
|
prompt_template_input = tokenizer( |
|
|
prompt_template["template"], |
|
|
padding="max_length", |
|
|
return_tensors="pt", |
|
|
return_length=False, |
|
|
return_overflowing_tokens=False, |
|
|
return_attention_mask=False, |
|
|
) |
|
|
crop_start = prompt_template_input["input_ids"].shape[-1] |
|
|
|
|
|
crop_start -= 2 |
|
|
|
|
|
max_sequence_length += crop_start |
|
|
text_inputs = tokenizer( |
|
|
prompt, |
|
|
max_length=max_sequence_length, |
|
|
padding="max_length", |
|
|
truncation=True, |
|
|
return_tensors="pt", |
|
|
return_length=False, |
|
|
return_overflowing_tokens=False, |
|
|
return_attention_mask=True, |
|
|
) |
|
|
text_input_ids = text_inputs.input_ids.to(device=device) |
|
|
prompt_attention_mask = text_inputs.attention_mask.to(device=device) |
|
|
|
|
|
prompt_embeds = text_encoder( |
|
|
input_ids=text_input_ids, |
|
|
attention_mask=prompt_attention_mask, |
|
|
output_hidden_states=True, |
|
|
).hidden_states[-(num_hidden_layers_to_skip + 1)] |
|
|
prompt_embeds = prompt_embeds.to(dtype=dtype) |
|
|
|
|
|
if crop_start is not None and crop_start > 0: |
|
|
prompt_embeds = prompt_embeds[:, crop_start:] |
|
|
prompt_attention_mask = prompt_attention_mask[:, crop_start:] |
|
|
|
|
|
|
|
|
_, seq_len, _ = prompt_embeds.shape |
|
|
prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) |
|
|
prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) |
|
|
prompt_attention_mask = prompt_attention_mask.repeat(1, num_videos_per_prompt) |
|
|
prompt_attention_mask = prompt_attention_mask.view(batch_size * num_videos_per_prompt, seq_len) |
|
|
|
|
|
return prompt_embeds, prompt_attention_mask |
|
|
|
|
|
|
|
|
|
|
|
def _get_clip_prompt_embeds( |
|
|
tokenizer_2, |
|
|
text_encoder_2, |
|
|
prompt: Union[str, List[str]], |
|
|
num_videos_per_prompt: int = 1, |
|
|
device: Optional[torch.device] = None, |
|
|
dtype: Optional[torch.dtype] = None, |
|
|
max_sequence_length: int = 77, |
|
|
) -> torch.Tensor: |
|
|
device = device |
|
|
dtype = dtype |
|
|
|
|
|
prompt = [prompt] if isinstance(prompt, str) else prompt |
|
|
batch_size = len(prompt) |
|
|
|
|
|
text_inputs = tokenizer_2( |
|
|
prompt, |
|
|
padding="max_length", |
|
|
max_length=max_sequence_length, |
|
|
truncation=True, |
|
|
return_tensors="pt", |
|
|
) |
|
|
|
|
|
text_input_ids = text_inputs.input_ids |
|
|
untruncated_ids = tokenizer_2(prompt, padding="longest", return_tensors="pt").input_ids |
|
|
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): |
|
|
_ = tokenizer_2.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1]) |
|
|
|
|
|
prompt_embeds = text_encoder_2(text_input_ids.to(device), output_hidden_states=False).pooler_output |
|
|
|
|
|
|
|
|
prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt) |
|
|
prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, -1) |
|
|
|
|
|
return prompt_embeds |
|
|
|
|
|
|
|
|
|
|
|
def encode_prompt( |
|
|
tokenizer, |
|
|
text_encoder, |
|
|
tokenizer_2, |
|
|
text_encoder_2, |
|
|
prompt: Union[str, List[str]], |
|
|
prompt_2: Union[str, List[str]] = None, |
|
|
prompt_template: Dict[str, Any] = DEFAULT_PROMPT_TEMPLATE, |
|
|
num_videos_per_prompt: int = 1, |
|
|
prompt_embeds: Optional[torch.Tensor] = None, |
|
|
pooled_prompt_embeds: Optional[torch.Tensor] = None, |
|
|
prompt_attention_mask: Optional[torch.Tensor] = None, |
|
|
device: Optional[torch.device] = None, |
|
|
dtype: Optional[torch.dtype] = None, |
|
|
max_sequence_length: int = 256, |
|
|
): |
|
|
if prompt_embeds is None: |
|
|
prompt_embeds, prompt_attention_mask = _get_llama_prompt_embeds( |
|
|
tokenizer, |
|
|
text_encoder, |
|
|
prompt, |
|
|
prompt_template, |
|
|
num_videos_per_prompt, |
|
|
device=device, |
|
|
dtype=dtype, |
|
|
max_sequence_length=max_sequence_length, |
|
|
) |
|
|
|
|
|
if pooled_prompt_embeds is None: |
|
|
if prompt_2 is None: |
|
|
prompt_2 = prompt |
|
|
pooled_prompt_embeds = _get_clip_prompt_embeds( |
|
|
tokenizer_2, |
|
|
text_encoder_2, |
|
|
prompt, |
|
|
num_videos_per_prompt, |
|
|
device=device, |
|
|
dtype=dtype, |
|
|
max_sequence_length=77, |
|
|
) |
|
|
|
|
|
return prompt_embeds, pooled_prompt_embeds, prompt_attention_mask |
|
|
|
|
|
|
|
|
def encode_image( |
|
|
feature_extractor, |
|
|
image_encoder, |
|
|
image: torch.Tensor, |
|
|
device: Optional[torch.device] = None, |
|
|
dtype: Optional[torch.dtype] = None, |
|
|
): |
|
|
device = device |
|
|
image = (image + 1) / 2.0 |
|
|
image = feature_extractor(images=image, return_tensors="pt", do_rescale=False).to( |
|
|
device=device, dtype=image_encoder.dtype |
|
|
) |
|
|
image_embeds = image_encoder(**image).last_hidden_state |
|
|
return image_embeds.to(dtype=dtype) |
|
|
|
|
|
|
|
|
def get_framepack_input_t2v( |
|
|
vae, |
|
|
pixel_values, |
|
|
latent_window_size: int = 9, |
|
|
vanilla_sampling: bool = False, |
|
|
dtype: Optional[torch.dtype] = None, |
|
|
is_keep_x0=False, |
|
|
): |
|
|
|
|
|
latent_f = (pixel_values.shape[2] - 1) // 4 + 1 |
|
|
|
|
|
|
|
|
|
|
|
total_latent_sections = math.floor(latent_f / latent_window_size) |
|
|
if total_latent_sections < 1: |
|
|
min_frames_needed = latent_window_size * 4 + 1 |
|
|
raise ValueError( |
|
|
f"Not enough frames for FramePack: {pixel_values.shape[2]} frames ({latent_f} latent frames), minimum required: {min_frames_needed} frames ({latent_window_size + 1} latent frames)" |
|
|
) |
|
|
|
|
|
|
|
|
latent_f_aligned = total_latent_sections * latent_window_size |
|
|
|
|
|
|
|
|
frame_count_aligned = (latent_f_aligned - 1) * 4 + 1 |
|
|
if frame_count_aligned != pixel_values.shape[2]: |
|
|
print( |
|
|
f"Frame count mismatch: required={frame_count_aligned} != actual={pixel_values.shape[2]}, trimming to {frame_count_aligned}" |
|
|
) |
|
|
pixel_values = pixel_values[ |
|
|
:, :, :frame_count_aligned, :, : |
|
|
] |
|
|
|
|
|
latent_f = latent_f_aligned |
|
|
|
|
|
|
|
|
pixel_values = pixel_values.to(device=vae.device, dtype=vae.dtype) |
|
|
latents = vae.encode(pixel_values).latent_dist.sample() |
|
|
latents = latents * vae.config.scaling_factor |
|
|
latents = latents.to(dtype=dtype) |
|
|
|
|
|
all_target_latents = [] |
|
|
all_target_latent_indices = [] |
|
|
all_clean_latents = [] |
|
|
all_clean_latent_indices = [] |
|
|
all_clean_latents_2x = [] |
|
|
all_clean_latent_2x_indices = [] |
|
|
all_clean_latents_4x = [] |
|
|
all_clean_latent_4x_indices = [] |
|
|
section_to_video_idx = [] |
|
|
|
|
|
if vanilla_sampling: |
|
|
|
|
|
if is_keep_x0: |
|
|
for b in range(latents.shape[0]): |
|
|
video_lat = latents[b : b + 1] |
|
|
|
|
|
for section_index in range(total_latent_sections): |
|
|
target_start_f = section_index * latent_window_size |
|
|
target_end_f = target_start_f + latent_window_size |
|
|
start_latent = video_lat[:, :, 0:1, :, :] |
|
|
target_latents = video_lat[:, :, target_start_f:target_end_f, :, :] |
|
|
|
|
|
|
|
|
if section_index == 0: |
|
|
clean_latents_total_count = 2 + 2 + 16 |
|
|
else: |
|
|
clean_latents_total_count = 1 + 2 + 16 |
|
|
history_latents = torch.zeros( |
|
|
size=( |
|
|
1, |
|
|
16, |
|
|
clean_latents_total_count, |
|
|
video_lat.shape[-2], |
|
|
video_lat.shape[-1], |
|
|
), |
|
|
device=video_lat.device, |
|
|
dtype=video_lat.dtype, |
|
|
) |
|
|
|
|
|
history_start_f = 0 |
|
|
video_start_f = target_start_f - clean_latents_total_count |
|
|
copy_count = clean_latents_total_count |
|
|
|
|
|
if video_start_f < 0: |
|
|
history_start_f = -video_start_f |
|
|
copy_count = clean_latents_total_count - history_start_f |
|
|
video_start_f = 0 |
|
|
if copy_count > 0: |
|
|
history_latents[:, :, history_start_f:] = video_lat[ |
|
|
:, :, video_start_f : video_start_f + copy_count, :, : |
|
|
] |
|
|
|
|
|
|
|
|
if section_index == 0: |
|
|
indices = torch.arange(0, sum([16, 2, 2, latent_window_size])).unsqueeze(0) |
|
|
( |
|
|
clean_latent_4x_indices, |
|
|
clean_latent_2x_indices, |
|
|
clean_latent_indices, |
|
|
latent_indices, |
|
|
) = indices.split([16, 2, 2, latent_window_size], dim=1) |
|
|
clean_latents_4x, clean_latents_2x, clean_latents = history_latents.split([16, 2, 2], dim=2) |
|
|
else: |
|
|
indices = torch.arange(0, sum([1, 16, 2, 1, latent_window_size])).unsqueeze(0) |
|
|
( |
|
|
clean_latent_indices_start, |
|
|
clean_latent_4x_indices, |
|
|
clean_latent_2x_indices, |
|
|
clean_latent_1x_indices, |
|
|
latent_indices, |
|
|
) = indices.split([1, 16, 2, 1, latent_window_size], dim=1) |
|
|
clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1) |
|
|
|
|
|
clean_latents_4x, clean_latents_2x, clean_latents_1x = history_latents.split([16, 2, 1], dim=2) |
|
|
clean_latents = torch.cat([start_latent, clean_latents_1x], dim=2) |
|
|
|
|
|
all_target_latents.append(target_latents) |
|
|
all_target_latent_indices.append(latent_indices) |
|
|
all_clean_latents.append(clean_latents) |
|
|
all_clean_latent_indices.append(clean_latent_indices) |
|
|
all_clean_latents_2x.append(clean_latents_2x) |
|
|
all_clean_latent_2x_indices.append(clean_latent_2x_indices) |
|
|
all_clean_latents_4x.append(clean_latents_4x) |
|
|
all_clean_latent_4x_indices.append(clean_latent_4x_indices) |
|
|
section_to_video_idx.append(b) |
|
|
else: |
|
|
for b in range(latents.shape[0]): |
|
|
video_lat = latents[b : b + 1] |
|
|
|
|
|
for section_index in range(total_latent_sections): |
|
|
target_start_f = section_index * latent_window_size |
|
|
target_end_f = target_start_f + latent_window_size |
|
|
target_latents = video_lat[:, :, target_start_f:target_end_f, :, :] |
|
|
|
|
|
|
|
|
clean_latents_total_count = 2 + 2 + 16 |
|
|
history_latents = torch.zeros( |
|
|
size=( |
|
|
1, |
|
|
16, |
|
|
clean_latents_total_count, |
|
|
video_lat.shape[-2], |
|
|
video_lat.shape[-1], |
|
|
), |
|
|
device=video_lat.device, |
|
|
dtype=video_lat.dtype, |
|
|
) |
|
|
|
|
|
history_start_f = 0 |
|
|
video_start_f = target_start_f - clean_latents_total_count |
|
|
copy_count = clean_latents_total_count |
|
|
|
|
|
if video_start_f < 0: |
|
|
history_start_f = -video_start_f |
|
|
copy_count = clean_latents_total_count - history_start_f |
|
|
video_start_f = 0 |
|
|
if copy_count > 0: |
|
|
history_latents[:, :, history_start_f:] = video_lat[ |
|
|
:, :, video_start_f : video_start_f + copy_count, :, : |
|
|
] |
|
|
|
|
|
|
|
|
indices = torch.arange(0, sum([16, 2, 2, latent_window_size])).unsqueeze(0) |
|
|
( |
|
|
clean_latent_4x_indices, |
|
|
clean_latent_2x_indices, |
|
|
clean_latent_indices, |
|
|
latent_indices, |
|
|
) = indices.split([16, 2, 2, latent_window_size], dim=1) |
|
|
clean_latents_4x, clean_latents_2x, clean_latents = history_latents.split([16, 2, 2], dim=2) |
|
|
|
|
|
all_target_latents.append(target_latents) |
|
|
all_target_latent_indices.append(latent_indices) |
|
|
all_clean_latents.append(clean_latents) |
|
|
all_clean_latent_indices.append(clean_latent_indices) |
|
|
all_clean_latents_2x.append(clean_latents_2x) |
|
|
all_clean_latent_2x_indices.append(clean_latent_2x_indices) |
|
|
all_clean_latents_4x.append(clean_latents_4x) |
|
|
all_clean_latent_4x_indices.append(clean_latent_4x_indices) |
|
|
section_to_video_idx.append(b) |
|
|
else: |
|
|
pass |
|
|
|
|
|
|
|
|
batched_target_latents = torch.cat(all_target_latents, dim=0) |
|
|
batched_target_latent_indices = torch.cat(all_target_latent_indices, dim=0) |
|
|
batched_clean_latents = torch.cat(all_clean_latents, dim=0) |
|
|
batched_clean_latent_indices = torch.cat(all_clean_latent_indices, dim=0) |
|
|
batched_clean_latents_2x = torch.cat(all_clean_latents_2x, dim=0) |
|
|
batched_clean_latent_2x_indices = torch.cat(all_clean_latent_2x_indices, dim=0) |
|
|
batched_clean_latents_4x = torch.cat(all_clean_latents_4x, dim=0) |
|
|
batched_clean_latent_4x_indices = torch.cat(all_clean_latent_4x_indices, dim=0) |
|
|
|
|
|
return ( |
|
|
batched_target_latents, |
|
|
batched_target_latent_indices, |
|
|
batched_clean_latents, |
|
|
batched_clean_latent_indices, |
|
|
batched_clean_latents_2x, |
|
|
batched_clean_latent_2x_indices, |
|
|
batched_clean_latents_4x, |
|
|
batched_clean_latent_4x_indices, |
|
|
section_to_video_idx, |
|
|
) |
|
|
|
|
|
|
|
|
def get_framepack_input_i2v( |
|
|
vae, |
|
|
pixel_values, |
|
|
latent_window_size: int = 9, |
|
|
vanilla_sampling: bool = False, |
|
|
dtype: Optional[torch.dtype] = None, |
|
|
): |
|
|
|
|
|
latent_f = (pixel_values.shape[2] - 1) // 4 + 1 |
|
|
|
|
|
|
|
|
total_latent_sections = math.floor((latent_f - 1) / latent_window_size) |
|
|
if total_latent_sections < 1: |
|
|
min_frames_needed = latent_window_size * 4 + 1 |
|
|
raise ValueError( |
|
|
f"Not enough frames for FramePack: {pixel_values.shape[2]} frames ({latent_f} latent frames), minimum required: {min_frames_needed} frames ({latent_window_size + 1} latent frames)" |
|
|
) |
|
|
|
|
|
|
|
|
latent_f_aligned = total_latent_sections * latent_window_size + 1 |
|
|
|
|
|
|
|
|
frame_count_aligned = (latent_f_aligned - 1) * 4 + 1 |
|
|
if frame_count_aligned != pixel_values.shape[2]: |
|
|
print( |
|
|
f"Frame count mismatch: required={frame_count_aligned} != actual={pixel_values.shape[2]}, trimming to {frame_count_aligned}" |
|
|
) |
|
|
pixel_values = pixel_values[ |
|
|
:, :, :frame_count_aligned, :, : |
|
|
] |
|
|
|
|
|
latent_f = latent_f_aligned |
|
|
|
|
|
|
|
|
pixel_values = pixel_values.to(device=vae.device, dtype=vae.dtype) |
|
|
latents = vae.encode(pixel_values).latent_dist.sample() |
|
|
latents = latents * vae.config.scaling_factor |
|
|
latents = latents.to(dtype=dtype) |
|
|
|
|
|
all_target_latents = [] |
|
|
all_target_latent_indices = [] |
|
|
all_clean_latents = [] |
|
|
all_clean_latent_indices = [] |
|
|
all_clean_latents_2x = [] |
|
|
all_clean_latent_2x_indices = [] |
|
|
all_clean_latents_4x = [] |
|
|
all_clean_latent_4x_indices = [] |
|
|
section_to_video_idx = [] |
|
|
|
|
|
if vanilla_sampling: |
|
|
|
|
|
for b in range(latents.shape[0]): |
|
|
video_lat = latents[b : b + 1] |
|
|
|
|
|
for section_index in range(total_latent_sections): |
|
|
target_start_f = section_index * latent_window_size + 1 |
|
|
target_end_f = target_start_f + latent_window_size |
|
|
target_latents = video_lat[:, :, target_start_f:target_end_f, :, :] |
|
|
start_latent = video_lat[:, :, 0:1, :, :] |
|
|
|
|
|
|
|
|
clean_latents_total_count = 1 + 2 + 16 |
|
|
history_latents = torch.zeros( |
|
|
size=( |
|
|
1, |
|
|
16, |
|
|
clean_latents_total_count, |
|
|
video_lat.shape[-2], |
|
|
video_lat.shape[-1], |
|
|
), |
|
|
device=video_lat.device, |
|
|
dtype=video_lat.dtype, |
|
|
) |
|
|
|
|
|
history_start_f = 0 |
|
|
video_start_f = target_start_f - clean_latents_total_count |
|
|
copy_count = clean_latents_total_count |
|
|
|
|
|
if video_start_f < 0: |
|
|
history_start_f = -video_start_f |
|
|
copy_count = clean_latents_total_count - history_start_f |
|
|
video_start_f = 0 |
|
|
if copy_count > 0: |
|
|
history_latents[:, :, history_start_f:] = video_lat[ |
|
|
:, :, video_start_f : video_start_f + copy_count, :, : |
|
|
] |
|
|
|
|
|
|
|
|
indices = torch.arange(0, sum([1, 16, 2, 1, latent_window_size])).unsqueeze(0) |
|
|
( |
|
|
clean_latent_indices_start, |
|
|
clean_latent_4x_indices, |
|
|
clean_latent_2x_indices, |
|
|
clean_latent_1x_indices, |
|
|
latent_indices, |
|
|
) = indices.split([1, 16, 2, 1, latent_window_size], dim=1) |
|
|
clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1) |
|
|
|
|
|
clean_latents_4x, clean_latents_2x, clean_latents_1x = history_latents.split([16, 2, 1], dim=2) |
|
|
clean_latents = torch.cat([start_latent, clean_latents_1x], dim=2) |
|
|
|
|
|
all_target_latents.append(target_latents) |
|
|
all_target_latent_indices.append(latent_indices) |
|
|
all_clean_latents.append(clean_latents) |
|
|
all_clean_latent_indices.append(clean_latent_indices) |
|
|
all_clean_latents_2x.append(clean_latents_2x) |
|
|
all_clean_latent_2x_indices.append(clean_latent_2x_indices) |
|
|
all_clean_latents_4x.append(clean_latents_4x) |
|
|
all_clean_latent_4x_indices.append(clean_latent_4x_indices) |
|
|
section_to_video_idx.append(b) |
|
|
else: |
|
|
|
|
|
latent_paddings = list(reversed(range(total_latent_sections))) |
|
|
|
|
|
if total_latent_sections > 4: |
|
|
latent_paddings = [3] + [2] * (total_latent_sections - 3) + [1, 0] |
|
|
|
|
|
for b in range(latents.shape[0]): |
|
|
video_lat = latents[ |
|
|
b : b + 1 |
|
|
] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
history_latents = torch.zeros( |
|
|
( |
|
|
1, |
|
|
video_lat.shape[1], |
|
|
1 + 2 + 16, |
|
|
video_lat.shape[3], |
|
|
video_lat.shape[4], |
|
|
), |
|
|
dtype=video_lat.dtype, |
|
|
).to(video_lat.device) |
|
|
|
|
|
latent_f_index = latent_f - latent_window_size |
|
|
section_index = total_latent_sections - 1 |
|
|
|
|
|
for latent_padding in latent_paddings: |
|
|
is_last_section = ( |
|
|
section_index == 0 |
|
|
) |
|
|
latent_padding_size = latent_padding * latent_window_size |
|
|
if is_last_section: |
|
|
assert latent_f_index == 1, "Last section should be starting from frame 1" |
|
|
|
|
|
|
|
|
indices = torch.arange(0, sum([1, latent_padding_size, latent_window_size, 1, 2, 16])).unsqueeze(0) |
|
|
( |
|
|
clean_latent_indices_pre, |
|
|
blank_indices, |
|
|
latent_indices, |
|
|
clean_latent_indices_post, |
|
|
clean_latent_2x_indices, |
|
|
clean_latent_4x_indices, |
|
|
) = indices.split([1, latent_padding_size, latent_window_size, 1, 2, 16], dim=1) |
|
|
|
|
|
|
|
|
clean_latent_indices = torch.cat([clean_latent_indices_pre, clean_latent_indices_post], dim=1) |
|
|
|
|
|
|
|
|
clean_latents_pre = video_lat[:, :, 0:1, :, :] |
|
|
clean_latents_post, clean_latents_2x, clean_latents_4x = history_latents[ |
|
|
:, :, : 1 + 2 + 16, :, : |
|
|
].split([1, 2, 16], dim=2) |
|
|
clean_latents = torch.cat( |
|
|
[clean_latents_pre, clean_latents_post], dim=2 |
|
|
) |
|
|
|
|
|
|
|
|
target_latents = video_lat[:, :, latent_f_index : latent_f_index + latent_window_size, :, :] |
|
|
|
|
|
all_target_latents.append(target_latents) |
|
|
all_target_latent_indices.append(latent_indices) |
|
|
all_clean_latents.append(clean_latents) |
|
|
all_clean_latent_indices.append(clean_latent_indices) |
|
|
all_clean_latents_2x.append(clean_latents_2x) |
|
|
all_clean_latent_2x_indices.append(clean_latent_2x_indices) |
|
|
all_clean_latents_4x.append(clean_latents_4x) |
|
|
all_clean_latent_4x_indices.append(clean_latent_4x_indices) |
|
|
section_to_video_idx.append(b) |
|
|
|
|
|
if is_last_section: |
|
|
|
|
|
generated_latents_for_history = video_lat[:, :, : latent_window_size + 1, :, :] |
|
|
else: |
|
|
|
|
|
generated_latents_for_history = target_latents |
|
|
|
|
|
history_latents = torch.cat([generated_latents_for_history, history_latents], dim=2) |
|
|
|
|
|
section_index -= 1 |
|
|
latent_f_index -= latent_window_size |
|
|
|
|
|
|
|
|
batched_target_latents = torch.cat(all_target_latents, dim=0) |
|
|
batched_target_latent_indices = torch.cat(all_target_latent_indices, dim=0) |
|
|
batched_clean_latents = torch.cat(all_clean_latents, dim=0) |
|
|
batched_clean_latent_indices = torch.cat(all_clean_latent_indices, dim=0) |
|
|
batched_clean_latents_2x = torch.cat(all_clean_latents_2x, dim=0) |
|
|
batched_clean_latent_2x_indices = torch.cat(all_clean_latent_2x_indices, dim=0) |
|
|
batched_clean_latents_4x = torch.cat(all_clean_latents_4x, dim=0) |
|
|
batched_clean_latent_4x_indices = torch.cat(all_clean_latent_4x_indices, dim=0) |
|
|
|
|
|
return ( |
|
|
batched_target_latents, |
|
|
batched_target_latent_indices, |
|
|
batched_clean_latents, |
|
|
batched_clean_latent_indices, |
|
|
batched_clean_latents_2x, |
|
|
batched_clean_latent_2x_indices, |
|
|
batched_clean_latents_4x, |
|
|
batched_clean_latent_4x_indices, |
|
|
section_to_video_idx, |
|
|
) |
|
|
|
|
|
|
|
|
def get_pyramid_input( |
|
|
args, |
|
|
scheduler, |
|
|
latents, |
|
|
pyramid_stage_num=3, |
|
|
pyramid_sample_ratios=[1, 2, 1], |
|
|
pyramid_sample_mode="efficient", |
|
|
pyramid_stream_inference_steps=[10, 10, 10], |
|
|
stream_chunk_size=5, |
|
|
): |
|
|
assert pyramid_stage_num == len(pyramid_sample_ratios) |
|
|
if pyramid_sample_mode not in ["efficient", "full", "diffusion_forcing", "stream_sample"]: |
|
|
raise ValueError( |
|
|
f"Invalid pyramid_sample_mode: {pyramid_sample_mode}. Must be one of ['efficient', 'full', 'diffusion_forcing', 'dance_forcing']." |
|
|
) |
|
|
|
|
|
|
|
|
pyramid_latent_list = [] |
|
|
pyramid_latent_list.append(latents) |
|
|
num_frames, height, width = latents.shape[-3], latents.shape[-2], latents.shape[-1] |
|
|
for _ in range(pyramid_stage_num - 1): |
|
|
height //= 2 |
|
|
width //= 2 |
|
|
latents = rearrange(latents, "b c t h w -> (b t) c h w") |
|
|
latents = torch.nn.functional.interpolate(latents, size=(height, width), mode="bilinear") |
|
|
latents = rearrange(latents, "(b t) c h w -> b c t h w", t=num_frames) |
|
|
pyramid_latent_list.append(latents) |
|
|
pyramid_latent_list = list(reversed(pyramid_latent_list)) |
|
|
|
|
|
|
|
|
noise = torch.randn_like(pyramid_latent_list[-1]) |
|
|
device = noise.device |
|
|
dtype = pyramid_latent_list[-1].dtype |
|
|
latent_frame_num = noise.shape[2] |
|
|
input_video_num = noise.shape[0] |
|
|
|
|
|
height, width = noise.shape[-2], noise.shape[-1] |
|
|
noise_list = [noise] |
|
|
cur_noise = noise |
|
|
for i_s in range(pyramid_stage_num - 1): |
|
|
height //= 2 |
|
|
width //= 2 |
|
|
cur_noise = rearrange(cur_noise, "b c t h w -> (b t) c h w") |
|
|
cur_noise = F.interpolate(cur_noise, size=(height, width), mode="bilinear") * 2 |
|
|
cur_noise = rearrange(cur_noise, "(b t) c h w -> b c t h w", t=latent_frame_num) |
|
|
noise_list.append(cur_noise) |
|
|
noise_list = list(reversed(noise_list)) |
|
|
|
|
|
|
|
|
if pyramid_sample_mode == "efficient": |
|
|
assert input_video_num % (int(sum(pyramid_sample_ratios))) == 0 |
|
|
|
|
|
bsz = input_video_num // int(sum(pyramid_sample_ratios)) |
|
|
column_size = int(sum(pyramid_sample_ratios)) |
|
|
column_to_stage = {} |
|
|
i_sum = 0 |
|
|
for i_s, column_num in enumerate(pyramid_sample_ratios): |
|
|
for index in range(i_sum, i_sum + column_num): |
|
|
column_to_stage[index] = i_s |
|
|
i_sum += column_num |
|
|
|
|
|
|
|
|
noisy_latents_list = [] |
|
|
sigmas_list = [] |
|
|
targets_list = [] |
|
|
timesteps_list = [] |
|
|
training_steps = scheduler.config.num_train_timesteps |
|
|
for index in range(column_size): |
|
|
i_s = column_to_stage[index] |
|
|
clean_latent = pyramid_latent_list[i_s][index::column_size] |
|
|
last_clean_latent = None if i_s == 0 else pyramid_latent_list[i_s - 1][index::column_size] |
|
|
start_sigma = scheduler.start_sigmas[i_s] |
|
|
end_sigma = scheduler.end_sigmas[i_s] |
|
|
|
|
|
if i_s == 0: |
|
|
start_point = noise_list[i_s][index::column_size] |
|
|
else: |
|
|
|
|
|
last_clean_latent = rearrange(last_clean_latent, "b c t h w -> (b t) c h w") |
|
|
last_clean_latent = F.interpolate( |
|
|
last_clean_latent, |
|
|
size=( |
|
|
last_clean_latent.shape[-2] * 2, |
|
|
last_clean_latent.shape[-1] * 2, |
|
|
), |
|
|
mode="nearest", |
|
|
) |
|
|
last_clean_latent = rearrange(last_clean_latent, "(b t) c h w -> b c t h w", t=latent_frame_num) |
|
|
start_point = start_sigma * noise_list[i_s][index::column_size] + (1 - start_sigma) * last_clean_latent |
|
|
|
|
|
if i_s == pyramid_stage_num - 1: |
|
|
end_point = clean_latent |
|
|
else: |
|
|
end_point = end_sigma * noise_list[i_s][index::column_size] + (1 - end_sigma) * clean_latent |
|
|
|
|
|
|
|
|
|
|
|
u = compute_density_for_timestep_sampling( |
|
|
weighting_scheme=get_config_value(args, 'weighting_scheme'), |
|
|
batch_size=bsz, |
|
|
logit_mean=get_config_value(args, 'logit_mean'), |
|
|
logit_std=get_config_value(args, 'logit_std'), |
|
|
mode_scale=get_config_value(args, 'mode_scale'), |
|
|
) |
|
|
indices = (u * training_steps).long() |
|
|
indices = indices.clamp(0, training_steps - 1) |
|
|
timesteps = scheduler.timesteps_per_stage[i_s][indices].to(device=device) |
|
|
|
|
|
|
|
|
|
|
|
sigmas = scheduler.sigmas_per_stage[i_s][indices].to(device=device) |
|
|
while len(sigmas.shape) < start_point.ndim: |
|
|
sigmas = sigmas.unsqueeze(-1) |
|
|
|
|
|
noisy_latents = sigmas * start_point + (1 - sigmas) * end_point |
|
|
|
|
|
|
|
|
noisy_latents_list.append([noisy_latents.to(dtype)]) |
|
|
sigmas_list.append(sigmas.to(dtype)) |
|
|
timesteps_list.append(timesteps.to(dtype)) |
|
|
targets_list.append(start_point - end_point) |
|
|
elif pyramid_sample_mode == "full": |
|
|
|
|
|
bsz = input_video_num |
|
|
|
|
|
|
|
|
noisy_latents_list = [] |
|
|
sigmas_list = [] |
|
|
targets_list = [] |
|
|
timesteps_list = [] |
|
|
training_steps = scheduler.config.num_train_timesteps |
|
|
for i_s, cur_sample_ratio in zip(range(pyramid_stage_num), pyramid_sample_ratios): |
|
|
clean_latent = pyramid_latent_list[i_s] |
|
|
last_clean_latent = None if i_s == 0 else pyramid_latent_list[i_s - 1] |
|
|
start_sigma = scheduler.start_sigmas[i_s] |
|
|
end_sigma = scheduler.end_sigmas[i_s] |
|
|
|
|
|
if i_s == 0: |
|
|
start_point = noise_list[i_s] |
|
|
else: |
|
|
|
|
|
last_clean_latent = rearrange(last_clean_latent, "b c t h w -> (b t) c h w") |
|
|
last_clean_latent = F.interpolate( |
|
|
last_clean_latent, |
|
|
size=( |
|
|
last_clean_latent.shape[-2] * 2, |
|
|
last_clean_latent.shape[-1] * 2, |
|
|
), |
|
|
mode="nearest", |
|
|
) |
|
|
last_clean_latent = rearrange(last_clean_latent, "(b t) c h w -> b c t h w", t=latent_frame_num) |
|
|
start_point = start_sigma * noise_list[i_s] + (1 - start_sigma) * last_clean_latent |
|
|
|
|
|
if i_s == pyramid_stage_num - 1: |
|
|
end_point = clean_latent |
|
|
else: |
|
|
end_point = end_sigma * noise_list[i_s] + (1 - end_sigma) * clean_latent |
|
|
|
|
|
for _ in range(cur_sample_ratio): |
|
|
|
|
|
|
|
|
u = compute_density_for_timestep_sampling( |
|
|
weighting_scheme=get_config_value(args, 'weighting_scheme'), |
|
|
batch_size=bsz, |
|
|
logit_mean=get_config_value(args, 'logit_mean'), |
|
|
logit_std=get_config_value(args, 'logit_std'), |
|
|
mode_scale=get_config_value(args, 'mode_scale'), |
|
|
) |
|
|
indices = (u * training_steps).long() |
|
|
indices = indices.clamp(0, training_steps - 1) |
|
|
timesteps = scheduler.timesteps_per_stage[i_s][indices].to(device=device) |
|
|
|
|
|
|
|
|
|
|
|
sigmas = scheduler.sigmas_per_stage[i_s][indices].to(device=device) |
|
|
while len(sigmas.shape) < start_point.ndim: |
|
|
sigmas = sigmas.unsqueeze(-1) |
|
|
|
|
|
noisy_latents = sigmas * start_point + (1 - sigmas) * end_point |
|
|
|
|
|
|
|
|
noisy_latents_list.append(noisy_latents.to(dtype)) |
|
|
sigmas_list.append(sigmas.to(dtype)) |
|
|
timesteps_list.append(timesteps.to(dtype)) |
|
|
targets_list.append(start_point - end_point) |
|
|
elif pyramid_sample_mode == "diffusion_forcing": |
|
|
|
|
|
bsz = input_video_num |
|
|
latent_chunk_num = latent_frame_num // stream_chunk_size |
|
|
assert latent_frame_num % stream_chunk_size == 0 |
|
|
|
|
|
|
|
|
noisy_latents_list = [] |
|
|
sigmas_list = [] |
|
|
targets_list = [] |
|
|
timesteps_list = [] |
|
|
training_steps = scheduler.config.num_train_timesteps |
|
|
for i_s, cur_sample_ratio in zip(range(pyramid_stage_num), pyramid_sample_ratios): |
|
|
clean_latent = pyramid_latent_list[i_s] |
|
|
last_clean_latent = None if i_s == 0 else pyramid_latent_list[i_s - 1] |
|
|
start_sigma = scheduler.start_sigmas[i_s] |
|
|
end_sigma = scheduler.end_sigmas[i_s] |
|
|
|
|
|
if i_s == 0: |
|
|
start_point = noise_list[i_s] |
|
|
else: |
|
|
|
|
|
last_clean_latent = rearrange(last_clean_latent, "b c t h w -> (b t) c h w") |
|
|
last_clean_latent = F.interpolate( |
|
|
last_clean_latent, |
|
|
size=( |
|
|
last_clean_latent.shape[-2] * 2, |
|
|
last_clean_latent.shape[-1] * 2, |
|
|
), |
|
|
mode="nearest", |
|
|
) |
|
|
last_clean_latent = rearrange(last_clean_latent, "(b t) c h w -> b c t h w", t=latent_frame_num) |
|
|
start_point = start_sigma * noise_list[i_s] + (1 - start_sigma) * last_clean_latent |
|
|
|
|
|
if i_s == pyramid_stage_num - 1: |
|
|
end_point = clean_latent |
|
|
else: |
|
|
end_point = end_sigma * noise_list[i_s] + (1 - end_sigma) * clean_latent |
|
|
|
|
|
for _ in range(cur_sample_ratio): |
|
|
|
|
|
|
|
|
u = compute_density_for_timestep_sampling( |
|
|
weighting_scheme=get_config_value(args, 'weighting_scheme'), |
|
|
batch_size=bsz * latent_chunk_num, |
|
|
logit_mean=get_config_value(args, 'logit_mean'), |
|
|
logit_std=get_config_value(args, 'logit_std'), |
|
|
mode_scale=get_config_value(args, 'mode_scale'), |
|
|
) |
|
|
indices = (u * training_steps).long() |
|
|
indices = indices.clamp(0, training_steps - 1) |
|
|
|
|
|
timesteps = scheduler.timesteps_per_stage[i_s][indices].to(device=device) |
|
|
timesteps = timesteps.view(bsz, latent_chunk_num) |
|
|
sigmas = scheduler.sigmas_per_stage[i_s][indices].to(device=device) |
|
|
sigmas = sigmas.view(bsz, latent_chunk_num) |
|
|
|
|
|
chunk_index = ( |
|
|
torch.arange(latent_frame_num, device=device).unsqueeze(0).expand(bsz, -1) // stream_chunk_size |
|
|
) |
|
|
chunk_index = chunk_index.clamp(max=latent_chunk_num - 1) |
|
|
sigmas = torch.gather(sigmas, 1, chunk_index) |
|
|
timesteps = torch.gather(timesteps, 1, chunk_index) |
|
|
|
|
|
|
|
|
|
|
|
sigmas = ( |
|
|
sigmas.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) |
|
|
) |
|
|
noisy_latents = sigmas * start_point + (1 - sigmas) * end_point |
|
|
|
|
|
|
|
|
noisy_latents_list.append(noisy_latents.to(dtype)) |
|
|
sigmas_list.append(sigmas.to(dtype)) |
|
|
timesteps_list.append(timesteps.to(dtype)) |
|
|
targets_list.append(start_point - end_point) |
|
|
elif pyramid_sample_mode == "stream_sample": |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
bsz = input_video_num |
|
|
|
|
|
|
|
|
( |
|
|
training_num_steps_to_be_saved, |
|
|
training_all_timesteps_stage_ids, |
|
|
training_all_progressive_timesteps, |
|
|
progressive_timesteps_stages, |
|
|
) = get_stream_sample( |
|
|
scheduler=scheduler, |
|
|
max_latent_frame_num=latent_frame_num, |
|
|
stream_chunk_size=stream_chunk_size, |
|
|
pyramid_stage_num=pyramid_stage_num, |
|
|
pyramid_stream_inference_steps=pyramid_stream_inference_steps, |
|
|
) |
|
|
timestep_to_stage = { |
|
|
float(t.item()): int(stage.item()) |
|
|
for t, stage in zip(training_all_progressive_timesteps[0], training_all_timesteps_stage_ids[0]) |
|
|
} |
|
|
|
|
|
while True: |
|
|
initialization = random.choice([True, False]) |
|
|
termination = random.choice([True, False]) |
|
|
if not (initialization and termination): |
|
|
break |
|
|
|
|
|
stage_i = random.randint(0, training_num_steps_to_be_saved - 1) |
|
|
timesteps = progressive_timesteps_stages[stage_i].clone().repeat(bsz, 1) |
|
|
if initialization: |
|
|
timesteps = timesteps[:, -latent_frame_num:] |
|
|
elif termination: |
|
|
timesteps = timesteps[:, :latent_frame_num] |
|
|
|
|
|
|
|
|
sigmas, stage_latent_mapping = get_sigmas_from_pyramid_timesteps(scheduler, timesteps, timestep_to_stage) |
|
|
|
|
|
|
|
|
timesteps = timesteps.to(device) |
|
|
sigmas = sigmas.to(device) |
|
|
|
|
|
|
|
|
stage_point_list = [] |
|
|
for i_s in range(pyramid_stage_num): |
|
|
clean_latent = pyramid_latent_list[i_s] |
|
|
last_clean_latent = None if i_s == 0 else pyramid_latent_list[i_s - 1] |
|
|
start_sigma = scheduler.start_sigmas[i_s] |
|
|
end_sigma = scheduler.end_sigmas[i_s] |
|
|
|
|
|
if i_s == 0: |
|
|
start_point = noise_list[i_s] |
|
|
else: |
|
|
|
|
|
last_clean_latent = rearrange(last_clean_latent, "b c t h w -> (b t) c h w") |
|
|
last_clean_latent = F.interpolate( |
|
|
last_clean_latent, |
|
|
size=( |
|
|
last_clean_latent.shape[-2] * 2, |
|
|
last_clean_latent.shape[-1] * 2, |
|
|
), |
|
|
mode="nearest", |
|
|
) |
|
|
last_clean_latent = rearrange(last_clean_latent, "(b t) c h w -> b c t h w", t=latent_frame_num) |
|
|
start_point = start_sigma * noise_list[i_s] + (1 - start_sigma) * last_clean_latent |
|
|
|
|
|
if i_s == pyramid_stage_num - 1: |
|
|
end_point = clean_latent |
|
|
else: |
|
|
end_point = end_sigma * noise_list[i_s] + (1 - end_sigma) * clean_latent |
|
|
|
|
|
stage_point_list.append((start_point, end_point)) |
|
|
|
|
|
noisy_latents_list = [] |
|
|
targets_list = [] |
|
|
sigmas_list = [] |
|
|
timesteps_list = [] |
|
|
temp_noisy_latents_list = [] |
|
|
temp_targets_list = [] |
|
|
|
|
|
unique_elements = list(map(int, torch.unique(stage_latent_mapping))) |
|
|
for cur_stage in reversed(unique_elements): |
|
|
stage_indices = torch.nonzero(stage_latent_mapping == cur_stage, as_tuple=True) |
|
|
start_index = stage_indices[1][0].item() |
|
|
end_index = start_index + stream_chunk_size |
|
|
|
|
|
start_point, end_point = stage_point_list[cur_stage] |
|
|
start_point_slice = start_point[:, :, start_index:end_index, :, :] |
|
|
end_point_slice = end_point[:, :, start_index:end_index, :, :] |
|
|
|
|
|
sigmas_slice = sigmas[:, :, start_index:end_index, :, :] |
|
|
noisy_latents = sigmas_slice * start_point_slice + (1 - sigmas_slice) * end_point_slice |
|
|
target = start_point_slice - end_point_slice |
|
|
|
|
|
temp_noisy_latents_list.append(noisy_latents.to(dtype)) |
|
|
temp_targets_list.append(target) |
|
|
|
|
|
noisy_latents_list.append(temp_noisy_latents_list) |
|
|
targets_list.append(temp_targets_list) |
|
|
sigmas_list.append(sigmas.to(dtype)) |
|
|
timesteps_list.append(timesteps.to(dtype=dtype)) |
|
|
|
|
|
return noisy_latents_list, sigmas_list, timesteps_list, targets_list |
|
|
|
|
|
|
|
|
def get_sigmas_from_pyramid_timesteps(scheduler, timesteps, timestep_to_stage): |
|
|
|
|
|
flat_timesteps = timesteps.flatten() |
|
|
stage_latent_mapping = torch.tensor( |
|
|
[timestep_to_stage.get(float(t.item()), -1) for t in flat_timesteps], |
|
|
device=timesteps.device, |
|
|
).view(timesteps.shape) |
|
|
|
|
|
|
|
|
sigmas = torch.full_like(timesteps, -1.0) |
|
|
for i in range(timesteps.shape[0]): |
|
|
for j in range(timesteps.shape[1]): |
|
|
temp_stage_mapping = int(stage_latent_mapping[i, j]) |
|
|
target_value = timesteps[i, j] |
|
|
temp_indice = ( |
|
|
( |
|
|
torch.isclose( |
|
|
scheduler.timesteps_per_stage[temp_stage_mapping], |
|
|
target_value.clone().detach().to(scheduler.timesteps_per_stage[temp_stage_mapping].dtype), |
|
|
) |
|
|
) |
|
|
.nonzero(as_tuple=True)[0] |
|
|
.item() |
|
|
) |
|
|
sigmas[i, j] = scheduler.sigmas_per_stage[temp_stage_mapping][temp_indice] |
|
|
sigmas = sigmas.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) |
|
|
|
|
|
return sigmas, stage_latent_mapping |
|
|
|
|
|
|
|
|
def get_stream_sample( |
|
|
scheduler, |
|
|
max_latent_frame_num, |
|
|
stream_chunk_size, |
|
|
pyramid_stage_num=3, |
|
|
pyramid_stream_inference_steps=[10, 10, 10], |
|
|
): |
|
|
max_inference_steps = sum(pyramid_stream_inference_steps) |
|
|
|
|
|
|
|
|
all_progressive_timesteps_list = [] |
|
|
timestep_stage_list = [] |
|
|
for stage_idx in range(pyramid_stage_num): |
|
|
scheduler.set_timesteps(pyramid_stream_inference_steps[stage_idx], stage_idx) |
|
|
temp_timesteps = scheduler.timesteps |
|
|
all_progressive_timesteps_list.append(temp_timesteps) |
|
|
timestep_stage_list.append( |
|
|
torch.full_like(temp_timesteps, fill_value=stage_idx) |
|
|
) |
|
|
all_progressive_timesteps = torch.cat(all_progressive_timesteps_list).unsqueeze(0).flip(1) |
|
|
all_timesteps_stage_ids = torch.cat(timestep_stage_list).unsqueeze(0).flip(1) |
|
|
|
|
|
|
|
|
|
|
|
assert max_latent_frame_num % stream_chunk_size == 0, ( |
|
|
f"num_frames should be multiple of stream_chunk_size, {max_latent_frame_num} % {stream_chunk_size} != 0" |
|
|
) |
|
|
assert max_inference_steps % (max_latent_frame_num // stream_chunk_size) == 0, ( |
|
|
f"max_inference_steps should be multiple of max_latent_frame_num // stream_chunk_size, {max_inference_steps} % {max_latent_frame_num // stream_chunk_size} != 0" |
|
|
) |
|
|
num_steps_to_be_saved = max_inference_steps // ( |
|
|
max_latent_frame_num // stream_chunk_size |
|
|
) |
|
|
|
|
|
|
|
|
progressive_timesteps_stages = [ |
|
|
repeat( |
|
|
all_progressive_timesteps[:, (num_steps_to_be_saved - 1) - s :: num_steps_to_be_saved], |
|
|
"b f -> b f c", |
|
|
c=stream_chunk_size, |
|
|
).flatten(1, 2) |
|
|
for s in range(num_steps_to_be_saved) |
|
|
] |
|
|
|
|
|
return num_steps_to_be_saved, all_timesteps_stage_ids, all_progressive_timesteps, progressive_timesteps_stages |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
import argparse |
|
|
|
|
|
parser = argparse.ArgumentParser(description="Simple example of a training script.") |
|
|
parser.add_argument( |
|
|
"--weighting_scheme", |
|
|
type=str, |
|
|
default="logit_normal", |
|
|
choices=["sigma_sqrt", "logit_normal", "mode", "cosmap", "none"], |
|
|
help=('We default to the "none" weighting scheme for uniform sampling and uniform loss'), |
|
|
) |
|
|
parser.add_argument( |
|
|
"--logit_mean", |
|
|
type=float, |
|
|
default=0.0, |
|
|
help="mean to use when using the `'logit_normal'` weighting scheme.", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--logit_std", |
|
|
type=float, |
|
|
default=1.0, |
|
|
help="std to use when using the `'logit_normal'` weighting scheme.", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--mode_scale", |
|
|
type=float, |
|
|
default=1.29, |
|
|
help="Scale of mode weighting scheme. Only effective when using the `'mode'` as the `weighting_scheme`.", |
|
|
) |
|
|
args = parser.parse_args() |
|
|
|
|
|
device = "cuda" |
|
|
|
|
|
import sys |
|
|
|
|
|
sys.path.append("../") |
|
|
from scheduler.scheduling_flow_matching_pyramid import PyramidFlowMatchEulerDiscreteScheduler |
|
|
|
|
|
stages = [1, 2, 4] |
|
|
timestep_shift = 1.0 |
|
|
stage_range = [0, 1 / 3, 2 / 3, 1] |
|
|
scheduler_gamma = 1 / 3 |
|
|
scheduler = PyramidFlowMatchEulerDiscreteScheduler( |
|
|
shift=timestep_shift, |
|
|
stages=len(stages), |
|
|
stage_range=stage_range, |
|
|
gamma=scheduler_gamma, |
|
|
) |
|
|
print( |
|
|
f"The start sigmas and end sigmas of each stage is Start: {scheduler.start_sigmas}, End: {scheduler.end_sigmas}, Ori_start: {scheduler.ori_start_sigmas}" |
|
|
) |
|
|
|
|
|
|
|
|
from diffusers import AutoencoderKLHunyuanVideo |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pixel_values = torch.randn([2, 3, 241, 384, 640], device=device).clamp(-1, 1) |
|
|
pixel_values = pixel_values.to(torch.bfloat16) |
|
|
vae = AutoencoderKLHunyuanVideo.from_pretrained( |
|
|
"/mnt/workspace/checkpoints/hunyuanvideo-community/HunyuanVideo/", |
|
|
subfolder="vae", |
|
|
weight_dtype=torch.bfloat16, |
|
|
).to(device) |
|
|
vae.requires_grad_(False) |
|
|
vae.eval() |
|
|
|
|
|
( |
|
|
model_input, |
|
|
indices_latents, |
|
|
latents_clean, |
|
|
indices_clean_latents, |
|
|
latents_history_2x, |
|
|
indices_latents_history_2x, |
|
|
latents_history_4x, |
|
|
indices_latents_history_4x, |
|
|
section_to_video_idx, |
|
|
) = get_framepack_input_i2v( |
|
|
vae=vae, |
|
|
pixel_values=pixel_values, |
|
|
latent_window_size=12, |
|
|
vanilla_sampling=False, |
|
|
dtype=torch.bfloat16, |
|
|
) |
|
|
|
|
|
print(indices_latents, "\n", indices_clean_latents, "\n", indices_latents_history_2x, "\n", indices_latents_history_4x) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|