|
|
import os |
|
|
import glob |
|
|
import torch |
|
|
import torch.multiprocessing as mp |
|
|
from diffusers import AutoencoderKLHunyuanVideo |
|
|
from diffusers.video_processor import VideoProcessor |
|
|
from diffusers.utils import export_to_video |
|
|
from concurrent.futures import ProcessPoolExecutor |
|
|
import time |
|
|
|
|
|
os.environ["HF_ENABLE_PARALLEL_LOADING"] = "yes" |
|
|
|
|
|
def process_files_on_gpu(gpu_id, file_list, pretrained_model_path, output_folder): |
|
|
"""在指定GPU上处理文件列表""" |
|
|
device = f"cuda:{gpu_id}" |
|
|
|
|
|
|
|
|
vae = AutoencoderKLHunyuanVideo.from_pretrained( |
|
|
pretrained_model_path, |
|
|
subfolder="vae", |
|
|
torch_dtype=torch.float32, |
|
|
).to(device) |
|
|
vae.eval() |
|
|
vae.requires_grad_(False) |
|
|
vae.enable_tiling() |
|
|
|
|
|
vae_scale_factor_spatial = vae.spatial_compression_ratio |
|
|
video_processor = VideoProcessor(vae_scale_factor=vae_scale_factor_spatial) |
|
|
|
|
|
for i, pt_file in enumerate(file_list): |
|
|
try: |
|
|
print(f"GPU {gpu_id} - 正在处理 ({i+1}/{len(file_list)}): {os.path.basename(pt_file)}") |
|
|
|
|
|
|
|
|
latents = torch.load(pt_file, map_location='cpu', weights_only=False) |
|
|
vae_latents = latents['vae_latent'] / vae.config.scaling_factor |
|
|
vae_latents = vae_latents.to(device=device, dtype=vae.dtype) |
|
|
|
|
|
|
|
|
video = vae.decode(vae_latents.unsqueeze(0), return_dict=False)[0] |
|
|
video = video_processor.postprocess_video(video, output_type="pil") |
|
|
|
|
|
|
|
|
base_name = os.path.splitext(os.path.basename(pt_file))[0] |
|
|
output_path = os.path.join(output_folder, f"{base_name}.mp4") |
|
|
|
|
|
|
|
|
export_to_video(video[0], output_path, fps=30) |
|
|
print(f"GPU {gpu_id} - 成功保存: {output_path}") |
|
|
|
|
|
|
|
|
del latents, vae_latents, video |
|
|
torch.cuda.empty_cache() |
|
|
|
|
|
except Exception as e: |
|
|
print(f"GPU {gpu_id} - 处理文件 {pt_file} 时出错: {str(e)}") |
|
|
continue |
|
|
|
|
|
print(f"GPU {gpu_id} - 完成所有分配的文件处理!") |
|
|
|
|
|
def main(): |
|
|
|
|
|
pretrained_model_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/hunyuanvideo-community/HunyuanVideo" |
|
|
input_folder = "/mnt/bn/yufan-dev-my/ysh/Datasets/dummy_fp_offload_latents" |
|
|
output_folder = "/mnt/bn/yufan-dev-my/ysh/Datasets/dummy_fp_offload_latents/decoded_videos" |
|
|
|
|
|
|
|
|
os.makedirs(output_folder, exist_ok=True) |
|
|
|
|
|
|
|
|
pt_files = glob.glob(os.path.join(input_folder, "*.pt")) |
|
|
print(f"找到 {len(pt_files)} 个.pt文件") |
|
|
|
|
|
if len(pt_files) == 0: |
|
|
print("没有找到.pt文件!") |
|
|
return |
|
|
|
|
|
|
|
|
num_gpus = min(8, torch.cuda.device_count()) |
|
|
print(f"使用 {num_gpus} 个GPU进行并行处理") |
|
|
|
|
|
|
|
|
files_per_gpu = len(pt_files) // num_gpus |
|
|
file_chunks = [] |
|
|
|
|
|
for i in range(num_gpus): |
|
|
start_idx = i * files_per_gpu |
|
|
if i == num_gpus - 1: |
|
|
end_idx = len(pt_files) |
|
|
else: |
|
|
end_idx = (i + 1) * files_per_gpu |
|
|
|
|
|
file_chunks.append(pt_files[start_idx:end_idx]) |
|
|
print(f"GPU {i} 将处理 {len(file_chunks[i])} 个文件") |
|
|
|
|
|
|
|
|
start_time = time.time() |
|
|
|
|
|
processes = [] |
|
|
for gpu_id in range(num_gpus): |
|
|
if len(file_chunks[gpu_id]) > 0: |
|
|
p = mp.Process( |
|
|
target=process_files_on_gpu, |
|
|
args=(gpu_id, file_chunks[gpu_id], pretrained_model_path, output_folder) |
|
|
) |
|
|
p.start() |
|
|
processes.append(p) |
|
|
|
|
|
|
|
|
for p in processes: |
|
|
p.join() |
|
|
|
|
|
end_time = time.time() |
|
|
print(f"\n所有文件处理完成!总耗时: {end_time - start_time:.2f} 秒") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
mp.set_start_method('spawn', force=True) |
|
|
main() |
|
|
|