useful_code / dataset_code /spatialvid /dummy_dataloader_official.py
SuperCS's picture
Add files using upload-large-folder tool
e051419 verified
import os
import json
import pickle
import random
import numpy as np
import pandas as pd
from collections import defaultdict
import torch
import torchvision
from torch.utils.data import DataLoader, Dataset, Sampler
from video_reader import PyVideoReader
from diffusers.utils import export_to_video
from diffusers.training_utils import free_memory
# 5: (21, 41, 61, 81, 101)
# 6: (25, 49, 73, 97, 121)
# 7: (29, 57, 85, 113, 141)
# 8: (33, 65, 97, 129, 161)
# 9: (37, 73, 109, 145, 181)
# 10: (41, 81, 121, 161, 201)
# 11: (45, 89, 133, 177, 221)
# 12: (49, 97, 145, 193, 241)
# 1: (21 - 1) * 4 + 1 = 81, 162
# 2: (22 - 1) * 4 + 1 = 85, 170
# 3: (23 - 1) * 4 + 1 = 89, 178
# 4: (24 - 1) * 4 + 1 = 93, 186
# 5: (25 - 1) * 4 + 1 = 97, 194
# 6: (26 - 1) * 4 + 1 = 101, 202
# 7: (27 - 1) * 4 + 1 = 105, 210
# 8: (28 - 1) * 4 + 1 = 109, 218
# 9: (29 - 1) * 4 + 1 = 113, 226
# 10: (30 - 1) * 4 + 1 = 117, 234
# 11: (31 - 1) * 4 + 1 = 121, 242
# 12: (32 - 1) * 4 + 1 = 125, 250
# 13: (33 - 1) * 4 + 1 = 129, 258
# 14: (34 - 1) * 4 + 1 = 133, 266
# 15: (35 - 1) * 4 + 1 = 137, 274
# 16: (36 - 1) * 4 + 1 = 141, 282
resolution_bucket_options = {
640: [
(768, 320),
(768, 384),
(640, 384),
(768, 512),
(576, 448),
(512, 512),
(448, 576),
(512, 768),
(384, 640),
(384, 768),
(320, 768),
],
}
length_bucket_options = {
1: [321, 301, 281, 261, 241, 221, 193, 181, 161, 141, 121, 101, 81, 61, 41, 21],
2: [193, 177, 161, 156, 145, 133, 129, 121, 113, 109, 97, 85, 81, 73, 65, 61, 49, 37, 25],
}
def find_nearest_resolution_bucket(h, w, resolution=640):
min_metric = float('inf')
best_bucket = None
for (bucket_h, bucket_w) in resolution_bucket_options[resolution]:
metric = abs(h * bucket_w - w * bucket_h)
if metric <= min_metric:
min_metric = metric
best_bucket = (bucket_h, bucket_w)
return best_bucket
def find_nearest_length_bucket(length, stride=1):
buckets = length_bucket_options[stride]
min_bucket = min(buckets)
if length < min_bucket:
return length
valid_buckets = [bucket for bucket in buckets if bucket <= length]
return max(valid_buckets)
def read_cut_crop_and_resize(video_path, f_prime, h_prime, w_prime, stride=1, start_frame=None, end_frame=None):
vr = PyVideoReader(video_path, threads=0) # 0 means auto (let ffmpeg pick the optimal number)
total_frames = len(vr)
# if stride != 1:
# required_span = stride * (f_prime - 1)
# start_frame = max(0, total_frames - required_span - 1)
# else:
# start_frame = max(0, total_frames - f_prime)
frame_indices = list(range(start_frame, end_frame, stride))
assert len(frame_indices) == f_prime
frames = torch.from_numpy(vr.get_batch(frame_indices)).float()
# if stride != 1:
# required_span = stride * (f_prime - 1)
# start_frame = max(0, total_frames - required_span - 1)
# frame_indices = list(range(start_frame, total_frames, stride))
# assert len(frame_indices) == f_prime
# frames = torch.from_numpy(np.stack(vr.decode_fast(start_frame=0, end_frame=total_frames))).float()
# frames = frames[frame_indices]
# else:
# start_frame = max(0, total_frames - f_prime)
# frames = torch.from_numpy(np.stack(vr.decode_fast(start_frame=start_frame, end_frame=total_frames))).float()
# total_frames = len(vr)
# start_frame = max(0, total_frames - f_prime)
# # frame_indices = list(range(start_frame, total_frames))
# # frames = torch.from_numpy(vr.get_batch(frame_indices)).float()
# frames = torch.from_numpy(np.stack(vr.decode_fast(start_frame=start_frame, end_frame=total_frames))).float()
frames = (frames / 127.5) - 1
video = frames.permute(0, 3, 1, 2)
frames, channels, h, w = video.shape
aspect_ratio_original = h / w
aspect_ratio_target = h_prime / w_prime
if aspect_ratio_original >= aspect_ratio_target:
new_h = int(w * aspect_ratio_target)
top = (h - new_h) // 2
bottom = top + new_h
left = 0
right = w
else:
new_w = int(h / aspect_ratio_target)
left = (w - new_w) // 2
right = left + new_w
top = 0
bottom = h
# Crop the video
cropped_video = video[:, :, top:bottom, left:right]
# Resize the cropped video
resized_video = torchvision.transforms.functional.resize(cropped_video, (h_prime, w_prime))
return resized_video
def save_frames(frame_raw, fps=24, video_path="1.mp4"):
save_list = []
for frame in frame_raw:
frame = (frame + 1) / 2 * 255
frame = torchvision.transforms.transforms.ToPILImage()(frame.to(torch.uint8)).convert("RGB")
save_list.append(frame)
frame = None
del frame
export_to_video(save_list, video_path, fps=fps)
save_list = None
del save_list
free_memory()
class BucketedFeatureDataset(Dataset):
def __init__(self, csv_file, video_folder, stride=1, cache_file=None, force_rebuild=False):
self.csv_file = csv_file
self.video_folder = video_folder
self.stride = stride
if cache_file is None:
cache_file = os.path.join(video_folder, f"dataset_cache_stride{stride}.pkl")
if force_rebuild or not os.path.exists(cache_file):
print("Building metadata cache...")
self._build_metadata()
self._save_cache(cache_file)
else:
print("Loading cached metadata...")
with open(cache_file, "rb") as f:
cached_data = pickle.load(f)
if cached_data.get("stride", 1) != stride:
print(f"Stride mismatch in cache (cached: {cached_data.get('stride', 1)}, current: {stride}). Rebuilding...")
self._build_metadata()
self._save_cache(cache_file)
else:
self.samples = cached_data["samples"]
self.buckets = cached_data["buckets"]
print(f"Loaded {len(self.samples)} samples from cache")
def _save_cache(self, cache_file):
print("Saving metadata cache...")
cached_data = {
"samples": self.samples,
"buckets": self.buckets,
"stride": self.stride
}
with open(cache_file, "wb") as f:
pickle.dump(cached_data, f)
print(f"Cached {len(self.samples)} samples with stride={self.stride}")
# def _build_metadata(self):
# self.feature_files = [f for f in os.listdir(self.video_folder) if f.endswith(".mp4")]
# self.samples = []
# self.buckets = defaultdict(list)
# sample_idx = 0
# print(f"Processing {len(self.feature_files)} files...")
# for i, feature_file in enumerate(self.feature_files):
# if i % 10000 == 0:
# print(f"Processed {i}/{len(self.feature_files)} files")
# video_path = os.path.join(self.video_folder, feature_file)
# # Parse filename
# parts = feature_file.split("_")[:4]
# uttid = parts[0]
# num_frame = int(parts[1])
# height = int(parts[2])
# width = int(parts[3].replace(".mp4", ""))
def _build_metadata(self):
self.df = pd.read_csv(self.csv_file)
self.samples = []
self.buckets = defaultdict(list)
sample_idx = 0
print(f"Processing {len(self.df)} records from CSV with stride={self.stride}...")
for i, row in self.df.iterrows():
if i % 10000 == 0:
print(f"Processed {i}/{len(self.df)} records")
uttid = row['id']
video_file = row['video path']
video_path = os.path.join(self.video_folder, video_file)
start_frame = row["start_frame"]
end_frame = row["end_frame"]
segment_id = row["segment_id"]
num_frame = end_frame - start_frame
# resolution = row["resolution"]
# width, height = map(int, row["resolution"].split('x'))
width = row["new_width"]
height = row["new_height"]
fps = row["new_fps"]
uttid = f"{uttid}_{start_frame}_{end_frame}"
prompt = row["prompt"]
# prompt_path = os.path.join(self.video_folder, row["annotation path"], "caption.json")
# with open(prompt_path, 'r') as f:
# data = json.load(f)
# prompt = data['SceneDescription'] + " " + data["CameraMotion"]
# # keep length >= 121
# if num_frame < 121:
# continue
effective_num_frame = (num_frame + self.stride - 1) // self.stride
bucket_height, bucket_width = find_nearest_resolution_bucket(height, width, resolution=640)
bucket_num_frame = find_nearest_length_bucket(effective_num_frame, stride=self.stride)
bucket_key = (bucket_num_frame, bucket_height, bucket_width)
sample_info = {
"uttid": uttid,
"bucket_key": bucket_key,
"video_path": video_path,
"prompt": prompt,
"fps": fps,
"stride": self.stride,
"effective_num_frame": effective_num_frame,
"num_frame": num_frame,
"height": height,
"width": width,
"bucket_num_frame": bucket_num_frame,
"bucket_height": bucket_height,
"bucket_width": bucket_width,
"start_frame": start_frame,
"end_frame": end_frame,
}
self.samples.append(sample_info)
self.buckets[bucket_key].append(sample_idx)
sample_idx += 1
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
# sample_info = self.samples[idx]
# video_data = read_cut_crop_and_resize(
# video_path=sample_info["video_path"],
# f_prime=sample_info["bucket_num_frame"],
# h_prime=sample_info["bucket_height"],
# w_prime=sample_info["bucket_width"],
# stride=self.stride,
# )
while True:
sample_info = self.samples[idx]
try:
video_data = read_cut_crop_and_resize(
video_path=sample_info["video_path"],
f_prime=sample_info["bucket_num_frame"],
h_prime=sample_info["bucket_height"],
w_prime=sample_info["bucket_width"],
stride=self.stride,
start_frame=sample_info["start_frame"],
end_frame=sample_info["end_frame"],
)
break
except Exception:
idx = random.randint(0, len(self.samples) - 1)
print(f"Error loading {sample_info['video_path']}, retrying...")
return {
"uttid": sample_info["uttid"],
"bucket_key": sample_info["bucket_key"],
"video_metadata": {
"num_frames": sample_info["bucket_num_frame"],
"height": sample_info["bucket_height"],
"width": sample_info["bucket_width"],
"fps": sample_info["fps"],
"stride": self.stride,
"effective_num_frame": sample_info["effective_num_frame"],
},
"videos": video_data,
"prompts": sample_info["prompt"],
"first_frames_images": (video_data[0] + 1) / 2 * 255,
}
class BucketedSampler(Sampler):
def __init__(self, dataset, batch_size, drop_last=False, shuffle=False, seed=42):
self.dataset = dataset
self.batch_size = batch_size
self.drop_last = drop_last
self.shuffle = shuffle
self.seed = seed
self.generator = torch.Generator()
self.buckets = dataset.buckets
self._epoch = 0
def set_epoch(self, epoch):
self._epoch = epoch
def __iter__(self):
if self.shuffle:
self.generator.manual_seed(self.seed + self._epoch)
else:
self.generator.manual_seed(self.seed)
bucket_iterators = {}
bucket_batches = {}
for bucket_key, sample_indices in self.buckets.items():
indices = sample_indices.copy()
if self.shuffle:
indices = torch.randperm(len(indices), generator=self.generator).tolist()
indices = [sample_indices[i] for i in indices]
batches = []
for i in range(0, len(indices), self.batch_size):
batch = indices[i : i + self.batch_size]
if len(batch) == self.batch_size or not self.drop_last:
batches.append(batch)
if batches:
bucket_batches[bucket_key] = batches
bucket_iterators[bucket_key] = iter(batches)
remaining_buckets = list(bucket_iterators.keys())
while remaining_buckets:
idx = torch.randint(len(remaining_buckets), (1,), generator=self.generator).item()
bucket_key = remaining_buckets[idx]
bucket_iter = bucket_iterators[bucket_key]
try:
batch = next(bucket_iter)
for sample_idx in batch:
sample_bucket = self.dataset.samples[sample_idx]['bucket_key']
if sample_bucket != bucket_key:
print(f"❌ BUCKET MISMATCH! Expected {bucket_key}, got {sample_bucket} for sample {sample_idx}")
yield batch
except StopIteration:
remaining_buckets.remove(bucket_key)
def __len__(self):
total_batches = 0
for sample_indices in self.buckets.values():
num_batches = len(sample_indices) // self.batch_size
if not self.drop_last and len(sample_indices) % self.batch_size != 0:
num_batches += 1
total_batches += num_batches
return total_batches
def collate_fn(batch):
def collate_dict(data_list):
if isinstance(data_list[0], dict):
return {
key: collate_dict([d[key] for d in data_list])
for key in data_list[0]
}
elif isinstance(data_list[0], torch.Tensor):
return torch.stack(data_list)
else:
return data_list
return {
key: collate_dict([d[key] for d in batch])
for key in batch[0]
}
if __name__ == "__main__":
from accelerate import Accelerator
csv_file = f"/mnt/bn/yufan-dev-my/ysh/Ckpts/SpatialVID/SpatialVID-HQ-Final/test_prompt_filtered"
video_folder = f"/mnt/bn/yufan-dev-my/ysh/Ckpts/SpatialVID/SpatialVID-HQ-Final"
stride = 1
batch_size = 64
num_train_epochs = 1
seed = 0
output_dir = "accelerate_checkpoints"
checkpoint_dirs = (
[
d
for d in os.listdir(output_dir)
if d.startswith("checkpoint-") and os.path.isdir(os.path.join(output_dir, d))
]
if os.path.exists(output_dir)
else []
)
dataset = BucketedFeatureDataset(csv_file=csv_file, video_folder=video_folder, stride=stride)
sampler = BucketedSampler(dataset, batch_size=batch_size, drop_last=True, shuffle=False, seed=seed)
dataloader = DataLoader(dataset, batch_sampler=sampler, collate_fn=collate_fn, num_workers=8)
print(len(dataset), len(dataloader))
accelerator = Accelerator()
dataloader = accelerator.prepare(dataloader)
print(f"Dataset size: {len(dataset)}, Dataloader batches: {len(dataloader)}")
print(f"Process index: {accelerator.process_index}, World size: {accelerator.num_processes}")
step = 0
global_step = 0
first_epoch = 0
num_update_steps_per_epoch = len(dataloader)
print("Testing dataloader...")
step = global_step
for epoch in range(first_epoch, num_train_epochs):
sampler.set_epoch(epoch)
skip_steps = 0
printed_skip_log = False
for i, batch in enumerate(dataloader):
if epoch == first_epoch and skip_steps < (global_step % num_update_steps_per_epoch):
skip_steps += 1
continue
if epoch == first_epoch and not printed_skip_log:
print(f"Skip {skip_steps} steps in epoch {epoch}")
printed_skip_log = True
# Get metadata
uttid = batch["uttid"]
bucket_key = batch["bucket_key"]
num_frame = batch["video_metadata"]["num_frames"]
height = batch["video_metadata"]["height"]
width = batch["video_metadata"]["width"]
# Get feature
video_data = batch["videos"]
prompt = batch["prompts"]
first_frames_images = batch["first_frames_images"]
first_frames_images = [torchvision.transforms.ToPILImage()(x.to(torch.uint8)) for x in first_frames_images]
# import pdb;pdb.set_trace()
# save_frames(video_data[0].squeeze(0), video_path="1.mp4")
if accelerator.process_index == 0:
# print info
print(f" Step {step}:")
print(f" Batch {i}:")
print(f" Batch size: {len(uttid)}")
print(f" Uttids: {uttid}")
print(f" Dimensions - frames: {num_frame[0]}, height: {height[0]}, width: {width[0]}")
print(f" Bucket key: {bucket_key[0]}")
print(f" Videos shape: {video_data.shape}")
print(f" Cpation: {prompt}")
# verify
assert all(nf == num_frame[0] for nf in num_frame), "Frame numbers not consistent in batch"
assert all(h == height[0] for h in height), "Heights not consistent in batch"
assert all(w == width[0] for w in width), "Widths not consistent in batch"
print(" ✓ Batch dimensions are consistent")
step += 1