|
|
import os |
|
|
import json |
|
|
import pickle |
|
|
import random |
|
|
import numpy as np |
|
|
import pandas as pd |
|
|
from collections import defaultdict |
|
|
|
|
|
import torch |
|
|
import torchvision |
|
|
from torch.utils.data import DataLoader, Dataset, Sampler |
|
|
|
|
|
from video_reader import PyVideoReader |
|
|
|
|
|
from diffusers.utils import export_to_video |
|
|
from diffusers.training_utils import free_memory |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
resolution_bucket_options = { |
|
|
640: [ |
|
|
(768, 320), |
|
|
(768, 384), |
|
|
(640, 384), |
|
|
(768, 512), |
|
|
(576, 448), |
|
|
(512, 512), |
|
|
(448, 576), |
|
|
(512, 768), |
|
|
(384, 640), |
|
|
(384, 768), |
|
|
(320, 768), |
|
|
], |
|
|
} |
|
|
|
|
|
length_bucket_options = { |
|
|
1: [321, 301, 281, 261, 241, 221, 193, 181, 161, 141, 121, 101, 81, 61, 41, 21], |
|
|
2: [193, 177, 161, 156, 145, 133, 129, 121, 113, 109, 97, 85, 81, 73, 65, 61, 49, 37, 25], |
|
|
} |
|
|
|
|
|
def find_nearest_resolution_bucket(h, w, resolution=640): |
|
|
min_metric = float('inf') |
|
|
best_bucket = None |
|
|
for (bucket_h, bucket_w) in resolution_bucket_options[resolution]: |
|
|
metric = abs(h * bucket_w - w * bucket_h) |
|
|
if metric <= min_metric: |
|
|
min_metric = metric |
|
|
best_bucket = (bucket_h, bucket_w) |
|
|
return best_bucket |
|
|
|
|
|
def find_nearest_length_bucket(length, stride=1): |
|
|
buckets = length_bucket_options[stride] |
|
|
min_bucket = min(buckets) |
|
|
if length < min_bucket: |
|
|
return length |
|
|
valid_buckets = [bucket for bucket in buckets if bucket <= length] |
|
|
return max(valid_buckets) |
|
|
|
|
|
def read_cut_crop_and_resize(video_path, f_prime, h_prime, w_prime, stride=1, start_frame=None, end_frame=None): |
|
|
vr = PyVideoReader(video_path, threads=0) |
|
|
total_frames = len(vr) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
frame_indices = list(range(start_frame, end_frame, stride)) |
|
|
assert len(frame_indices) == f_prime |
|
|
frames = torch.from_numpy(vr.get_batch(frame_indices)).float() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
frames = (frames / 127.5) - 1 |
|
|
video = frames.permute(0, 3, 1, 2) |
|
|
|
|
|
frames, channels, h, w = video.shape |
|
|
aspect_ratio_original = h / w |
|
|
aspect_ratio_target = h_prime / w_prime |
|
|
|
|
|
if aspect_ratio_original >= aspect_ratio_target: |
|
|
new_h = int(w * aspect_ratio_target) |
|
|
top = (h - new_h) // 2 |
|
|
bottom = top + new_h |
|
|
left = 0 |
|
|
right = w |
|
|
else: |
|
|
new_w = int(h / aspect_ratio_target) |
|
|
left = (w - new_w) // 2 |
|
|
right = left + new_w |
|
|
top = 0 |
|
|
bottom = h |
|
|
|
|
|
|
|
|
cropped_video = video[:, :, top:bottom, left:right] |
|
|
|
|
|
resized_video = torchvision.transforms.functional.resize(cropped_video, (h_prime, w_prime)) |
|
|
return resized_video |
|
|
|
|
|
def save_frames(frame_raw, fps=24, video_path="1.mp4"): |
|
|
save_list = [] |
|
|
for frame in frame_raw: |
|
|
frame = (frame + 1) / 2 * 255 |
|
|
frame = torchvision.transforms.transforms.ToPILImage()(frame.to(torch.uint8)).convert("RGB") |
|
|
save_list.append(frame) |
|
|
frame = None |
|
|
del frame |
|
|
export_to_video(save_list, video_path, fps=fps) |
|
|
|
|
|
save_list = None |
|
|
del save_list |
|
|
free_memory() |
|
|
|
|
|
class BucketedFeatureDataset(Dataset): |
|
|
def __init__(self, csv_file, video_folder, stride=1, cache_file=None, force_rebuild=False): |
|
|
self.csv_file = csv_file |
|
|
self.video_folder = video_folder |
|
|
self.stride = stride |
|
|
|
|
|
if cache_file is None: |
|
|
cache_file = os.path.join(video_folder, f"dataset_cache_stride{stride}.pkl") |
|
|
|
|
|
if force_rebuild or not os.path.exists(cache_file): |
|
|
print("Building metadata cache...") |
|
|
self._build_metadata() |
|
|
self._save_cache(cache_file) |
|
|
else: |
|
|
print("Loading cached metadata...") |
|
|
with open(cache_file, "rb") as f: |
|
|
cached_data = pickle.load(f) |
|
|
if cached_data.get("stride", 1) != stride: |
|
|
print(f"Stride mismatch in cache (cached: {cached_data.get('stride', 1)}, current: {stride}). Rebuilding...") |
|
|
self._build_metadata() |
|
|
self._save_cache(cache_file) |
|
|
else: |
|
|
self.samples = cached_data["samples"] |
|
|
self.buckets = cached_data["buckets"] |
|
|
print(f"Loaded {len(self.samples)} samples from cache") |
|
|
|
|
|
|
|
|
def _save_cache(self, cache_file): |
|
|
print("Saving metadata cache...") |
|
|
cached_data = { |
|
|
"samples": self.samples, |
|
|
"buckets": self.buckets, |
|
|
"stride": self.stride |
|
|
} |
|
|
with open(cache_file, "wb") as f: |
|
|
pickle.dump(cached_data, f) |
|
|
print(f"Cached {len(self.samples)} samples with stride={self.stride}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _build_metadata(self): |
|
|
self.df = pd.read_csv(self.csv_file) |
|
|
|
|
|
self.samples = [] |
|
|
self.buckets = defaultdict(list) |
|
|
sample_idx = 0 |
|
|
|
|
|
print(f"Processing {len(self.df)} records from CSV with stride={self.stride}...") |
|
|
for i, row in self.df.iterrows(): |
|
|
if i % 10000 == 0: |
|
|
print(f"Processed {i}/{len(self.df)} records") |
|
|
|
|
|
uttid = row['id'] |
|
|
video_file = row['video path'] |
|
|
video_path = os.path.join(self.video_folder, video_file) |
|
|
start_frame = row["start_frame"] |
|
|
end_frame = row["end_frame"] |
|
|
segment_id = row["segment_id"] |
|
|
num_frame = end_frame - start_frame |
|
|
|
|
|
|
|
|
width = row["new_width"] |
|
|
height = row["new_height"] |
|
|
fps = row["new_fps"] |
|
|
|
|
|
uttid = f"{uttid}_{start_frame}_{end_frame}" |
|
|
prompt = row["prompt"] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
effective_num_frame = (num_frame + self.stride - 1) // self.stride |
|
|
bucket_height, bucket_width = find_nearest_resolution_bucket(height, width, resolution=640) |
|
|
bucket_num_frame = find_nearest_length_bucket(effective_num_frame, stride=self.stride) |
|
|
bucket_key = (bucket_num_frame, bucket_height, bucket_width) |
|
|
|
|
|
sample_info = { |
|
|
"uttid": uttid, |
|
|
"bucket_key": bucket_key, |
|
|
"video_path": video_path, |
|
|
"prompt": prompt, |
|
|
"fps": fps, |
|
|
"stride": self.stride, |
|
|
"effective_num_frame": effective_num_frame, |
|
|
"num_frame": num_frame, |
|
|
"height": height, |
|
|
"width": width, |
|
|
"bucket_num_frame": bucket_num_frame, |
|
|
"bucket_height": bucket_height, |
|
|
"bucket_width": bucket_width, |
|
|
"start_frame": start_frame, |
|
|
"end_frame": end_frame, |
|
|
} |
|
|
|
|
|
self.samples.append(sample_info) |
|
|
self.buckets[bucket_key].append(sample_idx) |
|
|
sample_idx += 1 |
|
|
|
|
|
def __len__(self): |
|
|
return len(self.samples) |
|
|
|
|
|
def __getitem__(self, idx): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
while True: |
|
|
sample_info = self.samples[idx] |
|
|
try: |
|
|
video_data = read_cut_crop_and_resize( |
|
|
video_path=sample_info["video_path"], |
|
|
f_prime=sample_info["bucket_num_frame"], |
|
|
h_prime=sample_info["bucket_height"], |
|
|
w_prime=sample_info["bucket_width"], |
|
|
stride=self.stride, |
|
|
start_frame=sample_info["start_frame"], |
|
|
end_frame=sample_info["end_frame"], |
|
|
) |
|
|
break |
|
|
except Exception: |
|
|
idx = random.randint(0, len(self.samples) - 1) |
|
|
print(f"Error loading {sample_info['video_path']}, retrying...") |
|
|
|
|
|
return { |
|
|
"uttid": sample_info["uttid"], |
|
|
"bucket_key": sample_info["bucket_key"], |
|
|
"video_metadata": { |
|
|
"num_frames": sample_info["bucket_num_frame"], |
|
|
"height": sample_info["bucket_height"], |
|
|
"width": sample_info["bucket_width"], |
|
|
"fps": sample_info["fps"], |
|
|
"stride": self.stride, |
|
|
"effective_num_frame": sample_info["effective_num_frame"], |
|
|
}, |
|
|
"videos": video_data, |
|
|
"prompts": sample_info["prompt"], |
|
|
"first_frames_images": (video_data[0] + 1) / 2 * 255, |
|
|
} |
|
|
|
|
|
class BucketedSampler(Sampler): |
|
|
def __init__(self, dataset, batch_size, drop_last=False, shuffle=False, seed=42): |
|
|
self.dataset = dataset |
|
|
self.batch_size = batch_size |
|
|
self.drop_last = drop_last |
|
|
self.shuffle = shuffle |
|
|
self.seed = seed |
|
|
self.generator = torch.Generator() |
|
|
self.buckets = dataset.buckets |
|
|
self._epoch = 0 |
|
|
|
|
|
def set_epoch(self, epoch): |
|
|
self._epoch = epoch |
|
|
|
|
|
def __iter__(self): |
|
|
if self.shuffle: |
|
|
self.generator.manual_seed(self.seed + self._epoch) |
|
|
else: |
|
|
self.generator.manual_seed(self.seed) |
|
|
|
|
|
bucket_iterators = {} |
|
|
bucket_batches = {} |
|
|
|
|
|
for bucket_key, sample_indices in self.buckets.items(): |
|
|
indices = sample_indices.copy() |
|
|
if self.shuffle: |
|
|
indices = torch.randperm(len(indices), generator=self.generator).tolist() |
|
|
indices = [sample_indices[i] for i in indices] |
|
|
|
|
|
batches = [] |
|
|
for i in range(0, len(indices), self.batch_size): |
|
|
batch = indices[i : i + self.batch_size] |
|
|
if len(batch) == self.batch_size or not self.drop_last: |
|
|
batches.append(batch) |
|
|
|
|
|
if batches: |
|
|
bucket_batches[bucket_key] = batches |
|
|
bucket_iterators[bucket_key] = iter(batches) |
|
|
|
|
|
remaining_buckets = list(bucket_iterators.keys()) |
|
|
|
|
|
while remaining_buckets: |
|
|
idx = torch.randint(len(remaining_buckets), (1,), generator=self.generator).item() |
|
|
bucket_key = remaining_buckets[idx] |
|
|
|
|
|
bucket_iter = bucket_iterators[bucket_key] |
|
|
|
|
|
try: |
|
|
batch = next(bucket_iter) |
|
|
for sample_idx in batch: |
|
|
sample_bucket = self.dataset.samples[sample_idx]['bucket_key'] |
|
|
if sample_bucket != bucket_key: |
|
|
print(f"❌ BUCKET MISMATCH! Expected {bucket_key}, got {sample_bucket} for sample {sample_idx}") |
|
|
yield batch |
|
|
except StopIteration: |
|
|
remaining_buckets.remove(bucket_key) |
|
|
|
|
|
def __len__(self): |
|
|
total_batches = 0 |
|
|
for sample_indices in self.buckets.values(): |
|
|
num_batches = len(sample_indices) // self.batch_size |
|
|
if not self.drop_last and len(sample_indices) % self.batch_size != 0: |
|
|
num_batches += 1 |
|
|
total_batches += num_batches |
|
|
return total_batches |
|
|
|
|
|
|
|
|
def collate_fn(batch): |
|
|
def collate_dict(data_list): |
|
|
if isinstance(data_list[0], dict): |
|
|
return { |
|
|
key: collate_dict([d[key] for d in data_list]) |
|
|
for key in data_list[0] |
|
|
} |
|
|
elif isinstance(data_list[0], torch.Tensor): |
|
|
return torch.stack(data_list) |
|
|
else: |
|
|
return data_list |
|
|
|
|
|
return { |
|
|
key: collate_dict([d[key] for d in batch]) |
|
|
for key in batch[0] |
|
|
} |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
from accelerate import Accelerator |
|
|
|
|
|
csv_file = f"/mnt/bn/yufan-dev-my/ysh/Ckpts/SpatialVID/SpatialVID-HQ-Final/test_prompt_filtered" |
|
|
video_folder = f"/mnt/bn/yufan-dev-my/ysh/Ckpts/SpatialVID/SpatialVID-HQ-Final" |
|
|
stride = 1 |
|
|
batch_size = 64 |
|
|
num_train_epochs = 1 |
|
|
seed = 0 |
|
|
output_dir = "accelerate_checkpoints" |
|
|
checkpoint_dirs = ( |
|
|
[ |
|
|
d |
|
|
for d in os.listdir(output_dir) |
|
|
if d.startswith("checkpoint-") and os.path.isdir(os.path.join(output_dir, d)) |
|
|
] |
|
|
if os.path.exists(output_dir) |
|
|
else [] |
|
|
) |
|
|
|
|
|
dataset = BucketedFeatureDataset(csv_file=csv_file, video_folder=video_folder, stride=stride) |
|
|
sampler = BucketedSampler(dataset, batch_size=batch_size, drop_last=True, shuffle=False, seed=seed) |
|
|
dataloader = DataLoader(dataset, batch_sampler=sampler, collate_fn=collate_fn, num_workers=8) |
|
|
|
|
|
print(len(dataset), len(dataloader)) |
|
|
accelerator = Accelerator() |
|
|
dataloader = accelerator.prepare(dataloader) |
|
|
print(f"Dataset size: {len(dataset)}, Dataloader batches: {len(dataloader)}") |
|
|
print(f"Process index: {accelerator.process_index}, World size: {accelerator.num_processes}") |
|
|
|
|
|
step = 0 |
|
|
global_step = 0 |
|
|
first_epoch = 0 |
|
|
num_update_steps_per_epoch = len(dataloader) |
|
|
|
|
|
print("Testing dataloader...") |
|
|
step = global_step |
|
|
for epoch in range(first_epoch, num_train_epochs): |
|
|
sampler.set_epoch(epoch) |
|
|
skip_steps = 0 |
|
|
printed_skip_log = False |
|
|
for i, batch in enumerate(dataloader): |
|
|
if epoch == first_epoch and skip_steps < (global_step % num_update_steps_per_epoch): |
|
|
skip_steps += 1 |
|
|
continue |
|
|
if epoch == first_epoch and not printed_skip_log: |
|
|
print(f"Skip {skip_steps} steps in epoch {epoch}") |
|
|
printed_skip_log = True |
|
|
|
|
|
|
|
|
uttid = batch["uttid"] |
|
|
bucket_key = batch["bucket_key"] |
|
|
num_frame = batch["video_metadata"]["num_frames"] |
|
|
height = batch["video_metadata"]["height"] |
|
|
width = batch["video_metadata"]["width"] |
|
|
|
|
|
|
|
|
video_data = batch["videos"] |
|
|
prompt = batch["prompts"] |
|
|
first_frames_images = batch["first_frames_images"] |
|
|
first_frames_images = [torchvision.transforms.ToPILImage()(x.to(torch.uint8)) for x in first_frames_images] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if accelerator.process_index == 0: |
|
|
|
|
|
print(f" Step {step}:") |
|
|
print(f" Batch {i}:") |
|
|
print(f" Batch size: {len(uttid)}") |
|
|
print(f" Uttids: {uttid}") |
|
|
print(f" Dimensions - frames: {num_frame[0]}, height: {height[0]}, width: {width[0]}") |
|
|
print(f" Bucket key: {bucket_key[0]}") |
|
|
print(f" Videos shape: {video_data.shape}") |
|
|
print(f" Cpation: {prompt}") |
|
|
|
|
|
|
|
|
assert all(nf == num_frame[0] for nf in num_frame), "Frame numbers not consistent in batch" |
|
|
assert all(h == height[0] for h in height), "Heights not consistent in batch" |
|
|
assert all(w == width[0] for w in width), "Widths not consistent in batch" |
|
|
|
|
|
print(" ✓ Batch dimensions are consistent") |
|
|
|
|
|
step += 1 |